OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [config/] [m68k/] [m68k.c] - Blame information for rev 378

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 282 jeremybenn
/* Subroutines for insn-output.c for Motorola 68000 family.
2
   Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3
   2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4
   Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify
9
it under the terms of the GNU General Public License as published by
10
the Free Software Foundation; either version 3, or (at your option)
11
any later version.
12
 
13
GCC is distributed in the hope that it will be useful,
14
but WITHOUT ANY WARRANTY; without even the implied warranty of
15
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
GNU General Public License for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
#include "config.h"
23
#include "system.h"
24
#include "coretypes.h"
25
#include "tm.h"
26
#include "tree.h"
27
#include "rtl.h"
28
#include "function.h"
29
#include "regs.h"
30
#include "hard-reg-set.h"
31
#include "real.h"
32
#include "insn-config.h"
33
#include "conditions.h"
34
#include "output.h"
35
#include "insn-attr.h"
36
#include "recog.h"
37
#include "toplev.h"
38
#include "expr.h"
39
#include "reload.h"
40
#include "tm_p.h"
41
#include "target.h"
42
#include "target-def.h"
43
#include "debug.h"
44
#include "flags.h"
45
#include "df.h"
46
/* ??? Need to add a dependency between m68k.o and sched-int.h.  */
47
#include "sched-int.h"
48
#include "insn-codes.h"
49
#include "ggc.h"
50
 
51
enum reg_class regno_reg_class[] =
52
{
53
  DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
54
  DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
55
  ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
56
  ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
57
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
58
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
59
  ADDR_REGS
60
};
61
 
62
 
63
/* The minimum number of integer registers that we want to save with the
64
   movem instruction.  Using two movel instructions instead of a single
65
   moveml is about 15% faster for the 68020 and 68030 at no expense in
66
   code size.  */
67
#define MIN_MOVEM_REGS 3
68
 
69
/* The minimum number of floating point registers that we want to save
70
   with the fmovem instruction.  */
71
#define MIN_FMOVEM_REGS 1
72
 
73
/* Structure describing stack frame layout.  */
74
struct m68k_frame
75
{
76
  /* Stack pointer to frame pointer offset.  */
77
  HOST_WIDE_INT offset;
78
 
79
  /* Offset of FPU registers.  */
80
  HOST_WIDE_INT foffset;
81
 
82
  /* Frame size in bytes (rounded up).  */
83
  HOST_WIDE_INT size;
84
 
85
  /* Data and address register.  */
86
  int reg_no;
87
  unsigned int reg_mask;
88
 
89
  /* FPU registers.  */
90
  int fpu_no;
91
  unsigned int fpu_mask;
92
 
93
  /* Offsets relative to ARG_POINTER.  */
94
  HOST_WIDE_INT frame_pointer_offset;
95
  HOST_WIDE_INT stack_pointer_offset;
96
 
97
  /* Function which the above information refers to.  */
98
  int funcdef_no;
99
};
100
 
101
/* Current frame information calculated by m68k_compute_frame_layout().  */
102
static struct m68k_frame current_frame;
103
 
104
/* Structure describing an m68k address.
105
 
106
   If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
107
   with null fields evaluating to 0.  Here:
108
 
109
   - BASE satisfies m68k_legitimate_base_reg_p
110
   - INDEX satisfies m68k_legitimate_index_reg_p
111
   - OFFSET satisfies m68k_legitimate_constant_address_p
112
 
113
   INDEX is either HImode or SImode.  The other fields are SImode.
114
 
115
   If CODE is PRE_DEC, the address is -(BASE).  If CODE is POST_INC,
116
   the address is (BASE)+.  */
117
struct m68k_address {
118
  enum rtx_code code;
119
  rtx base;
120
  rtx index;
121
  rtx offset;
122
  int scale;
123
};
124
 
125
static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
126
static int m68k_sched_issue_rate (void);
127
static int m68k_sched_variable_issue (FILE *, int, rtx, int);
128
static void m68k_sched_md_init_global (FILE *, int, int);
129
static void m68k_sched_md_finish_global (FILE *, int);
130
static void m68k_sched_md_init (FILE *, int, int);
131
static void m68k_sched_dfa_pre_advance_cycle (void);
132
static void m68k_sched_dfa_post_advance_cycle (void);
133
static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
134
 
135
static bool m68k_can_eliminate (const int, const int);
136
static bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
137
static bool m68k_handle_option (size_t, const char *, int);
138
static rtx find_addr_reg (rtx);
139
static const char *singlemove_string (rtx *);
140
static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
141
                                          HOST_WIDE_INT, tree);
142
static rtx m68k_struct_value_rtx (tree, int);
143
static tree m68k_handle_fndecl_attribute (tree *node, tree name,
144
                                          tree args, int flags,
145
                                          bool *no_add_attrs);
146
static void m68k_compute_frame_layout (void);
147
static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
148
static bool m68k_ok_for_sibcall_p (tree, tree);
149
static bool m68k_tls_symbol_p (rtx);
150
static rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
151
static bool m68k_rtx_costs (rtx, int, int, int *, bool);
152
#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
153
static bool m68k_return_in_memory (const_tree, const_tree);
154
#endif
155
static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
156
static void m68k_trampoline_init (rtx, tree, rtx);
157
static rtx m68k_delegitimize_address (rtx);
158
 
159
 
160
/* Specify the identification number of the library being built */
161
const char *m68k_library_id_string = "_current_shared_library_a5_offset_";
162
 
163
/* Initialize the GCC target structure.  */
164
 
165
#if INT_OP_GROUP == INT_OP_DOT_WORD
166
#undef TARGET_ASM_ALIGNED_HI_OP
167
#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
168
#endif
169
 
170
#if INT_OP_GROUP == INT_OP_NO_DOT
171
#undef TARGET_ASM_BYTE_OP
172
#define TARGET_ASM_BYTE_OP "\tbyte\t"
173
#undef TARGET_ASM_ALIGNED_HI_OP
174
#define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
175
#undef TARGET_ASM_ALIGNED_SI_OP
176
#define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
177
#endif
178
 
179
#if INT_OP_GROUP == INT_OP_DC
180
#undef TARGET_ASM_BYTE_OP
181
#define TARGET_ASM_BYTE_OP "\tdc.b\t"
182
#undef TARGET_ASM_ALIGNED_HI_OP
183
#define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
184
#undef TARGET_ASM_ALIGNED_SI_OP
185
#define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
186
#endif
187
 
188
#undef TARGET_ASM_UNALIGNED_HI_OP
189
#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
190
#undef TARGET_ASM_UNALIGNED_SI_OP
191
#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
192
 
193
#undef TARGET_ASM_OUTPUT_MI_THUNK
194
#define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
195
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
196
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
197
 
198
#undef TARGET_ASM_FILE_START_APP_OFF
199
#define TARGET_ASM_FILE_START_APP_OFF true
200
 
201
#undef TARGET_LEGITIMIZE_ADDRESS
202
#define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
203
 
204
#undef TARGET_SCHED_ADJUST_COST
205
#define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
206
 
207
#undef TARGET_SCHED_ISSUE_RATE
208
#define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
209
 
210
#undef TARGET_SCHED_VARIABLE_ISSUE
211
#define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
212
 
213
#undef TARGET_SCHED_INIT_GLOBAL
214
#define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
215
 
216
#undef TARGET_SCHED_FINISH_GLOBAL
217
#define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
218
 
219
#undef TARGET_SCHED_INIT
220
#define TARGET_SCHED_INIT m68k_sched_md_init
221
 
222
#undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
223
#define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
224
 
225
#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
226
#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
227
 
228
#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
229
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD        \
230
  m68k_sched_first_cycle_multipass_dfa_lookahead
231
 
232
#undef TARGET_HANDLE_OPTION
233
#define TARGET_HANDLE_OPTION m68k_handle_option
234
 
235
#undef TARGET_RTX_COSTS
236
#define TARGET_RTX_COSTS m68k_rtx_costs
237
 
238
#undef TARGET_ATTRIBUTE_TABLE
239
#define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
240
 
241
#undef TARGET_PROMOTE_PROTOTYPES
242
#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
243
 
244
#undef TARGET_STRUCT_VALUE_RTX
245
#define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
246
 
247
#undef TARGET_CANNOT_FORCE_CONST_MEM
248
#define TARGET_CANNOT_FORCE_CONST_MEM m68k_illegitimate_symbolic_constant_p
249
 
250
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
251
#define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
252
 
253
#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
254
#undef TARGET_RETURN_IN_MEMORY
255
#define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
256
#endif
257
 
258
#ifdef HAVE_AS_TLS
259
#undef TARGET_HAVE_TLS
260
#define TARGET_HAVE_TLS (true)
261
 
262
#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
263
#define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
264
#endif
265
 
266
#undef TARGET_LEGITIMATE_ADDRESS_P
267
#define TARGET_LEGITIMATE_ADDRESS_P     m68k_legitimate_address_p
268
 
269
#undef TARGET_CAN_ELIMINATE
270
#define TARGET_CAN_ELIMINATE m68k_can_eliminate
271
 
272
#undef TARGET_TRAMPOLINE_INIT
273
#define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
274
 
275
#undef TARGET_DELEGITIMIZE_ADDRESS
276
#define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
277
 
278
static const struct attribute_spec m68k_attribute_table[] =
279
{
280
  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
281
  { "interrupt", 0, 0, true,  false, false, m68k_handle_fndecl_attribute },
282
  { "interrupt_handler", 0, 0, true,  false, false, m68k_handle_fndecl_attribute },
283
  { "interrupt_thread", 0, 0, true,  false, false, m68k_handle_fndecl_attribute },
284
  { NULL,                0, 0, false, false, false, NULL }
285
};
286
 
287
struct gcc_target targetm = TARGET_INITIALIZER;
288
 
289
/* Base flags for 68k ISAs.  */
290
#define FL_FOR_isa_00    FL_ISA_68000
291
#define FL_FOR_isa_10    (FL_FOR_isa_00 | FL_ISA_68010)
292
/* FL_68881 controls the default setting of -m68881.  gcc has traditionally
293
   generated 68881 code for 68020 and 68030 targets unless explicitly told
294
   not to.  */
295
#define FL_FOR_isa_20    (FL_FOR_isa_10 | FL_ISA_68020 \
296
                          | FL_BITFIELD | FL_68881)
297
#define FL_FOR_isa_40    (FL_FOR_isa_20 | FL_ISA_68040)
298
#define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
299
 
300
/* Base flags for ColdFire ISAs.  */
301
#define FL_FOR_isa_a     (FL_COLDFIRE | FL_ISA_A)
302
#define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
303
/* Note ISA_B doesn't necessarily include USP (user stack pointer) support.  */
304
#define FL_FOR_isa_b     (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
305
/* ISA_C is not upwardly compatible with ISA_B.  */
306
#define FL_FOR_isa_c     (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
307
 
308
enum m68k_isa
309
{
310
  /* Traditional 68000 instruction sets.  */
311
  isa_00,
312
  isa_10,
313
  isa_20,
314
  isa_40,
315
  isa_cpu32,
316
  /* ColdFire instruction set variants.  */
317
  isa_a,
318
  isa_aplus,
319
  isa_b,
320
  isa_c,
321
  isa_max
322
};
323
 
324
/* Information about one of the -march, -mcpu or -mtune arguments.  */
325
struct m68k_target_selection
326
{
327
  /* The argument being described.  */
328
  const char *name;
329
 
330
  /* For -mcpu, this is the device selected by the option.
331
     For -mtune and -march, it is a representative device
332
     for the microarchitecture or ISA respectively.  */
333
  enum target_device device;
334
 
335
  /* The M68K_DEVICE fields associated with DEVICE.  See the comment
336
     in m68k-devices.def for details.  FAMILY is only valid for -mcpu.  */
337
  const char *family;
338
  enum uarch_type microarch;
339
  enum m68k_isa isa;
340
  unsigned long flags;
341
};
342
 
343
/* A list of all devices in m68k-devices.def.  Used for -mcpu selection.  */
344
static const struct m68k_target_selection all_devices[] =
345
{
346
#define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
347
  { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
348
#include "m68k-devices.def"
349
#undef M68K_DEVICE
350
  { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
351
};
352
 
353
/* A list of all ISAs, mapping each one to a representative device.
354
   Used for -march selection.  */
355
static const struct m68k_target_selection all_isas[] =
356
{
357
  { "68000",    m68000,     NULL,  u68000,   isa_00,    FL_FOR_isa_00 },
358
  { "68010",    m68010,     NULL,  u68010,   isa_10,    FL_FOR_isa_10 },
359
  { "68020",    m68020,     NULL,  u68020,   isa_20,    FL_FOR_isa_20 },
360
  { "68030",    m68030,     NULL,  u68030,   isa_20,    FL_FOR_isa_20 },
361
  { "68040",    m68040,     NULL,  u68040,   isa_40,    FL_FOR_isa_40 },
362
  { "68060",    m68060,     NULL,  u68060,   isa_40,    FL_FOR_isa_40 },
363
  { "cpu32",    cpu32,      NULL,  ucpu32,   isa_20,    FL_FOR_isa_cpu32 },
364
  { "isaa",     mcf5206e,   NULL,  ucfv2,    isa_a,     (FL_FOR_isa_a
365
                                                         | FL_CF_HWDIV) },
366
  { "isaaplus", mcf5271,    NULL,  ucfv2,    isa_aplus, (FL_FOR_isa_aplus
367
                                                         | FL_CF_HWDIV) },
368
  { "isab",     mcf5407,    NULL,  ucfv4,    isa_b,     FL_FOR_isa_b },
369
  { "isac",     unk_device, NULL,  ucfv4,    isa_c,     (FL_FOR_isa_c
370
                                                         | FL_CF_HWDIV) },
371
  { NULL,       unk_device, NULL,  unk_arch, isa_max,   0 }
372
};
373
 
374
/* A list of all microarchitectures, mapping each one to a representative
375
   device.  Used for -mtune selection.  */
376
static const struct m68k_target_selection all_microarchs[] =
377
{
378
  { "68000",    m68000,     NULL,  u68000,    isa_00,  FL_FOR_isa_00 },
379
  { "68010",    m68010,     NULL,  u68010,    isa_10,  FL_FOR_isa_10 },
380
  { "68020",    m68020,     NULL,  u68020,    isa_20,  FL_FOR_isa_20 },
381
  { "68020-40", m68020,     NULL,  u68020_40, isa_20,  FL_FOR_isa_20 },
382
  { "68020-60", m68020,     NULL,  u68020_60, isa_20,  FL_FOR_isa_20 },
383
  { "68030",    m68030,     NULL,  u68030,    isa_20,  FL_FOR_isa_20 },
384
  { "68040",    m68040,     NULL,  u68040,    isa_40,  FL_FOR_isa_40 },
385
  { "68060",    m68060,     NULL,  u68060,    isa_40,  FL_FOR_isa_40 },
386
  { "cpu32",    cpu32,      NULL,  ucpu32,    isa_20,  FL_FOR_isa_cpu32 },
387
  { "cfv1",     mcf51qe,    NULL,  ucfv1,     isa_c,   FL_FOR_isa_c },
388
  { "cfv2",     mcf5206,    NULL,  ucfv2,     isa_a,   FL_FOR_isa_a },
389
  { "cfv3",     mcf5307,    NULL,  ucfv3,     isa_a,   (FL_FOR_isa_a
390
                                                        | FL_CF_HWDIV) },
391
  { "cfv4",     mcf5407,    NULL,  ucfv4,     isa_b,   FL_FOR_isa_b },
392
  { "cfv4e",    mcf547x,    NULL,  ucfv4e,    isa_b,   (FL_FOR_isa_b
393
                                                        | FL_CF_USP
394
                                                        | FL_CF_EMAC
395
                                                        | FL_CF_FPU) },
396
  { NULL,       unk_device, NULL,  unk_arch,  isa_max, 0 }
397
};
398
 
399
/* The entries associated with the -mcpu, -march and -mtune settings,
400
   or null for options that have not been used.  */
401
const struct m68k_target_selection *m68k_cpu_entry;
402
const struct m68k_target_selection *m68k_arch_entry;
403
const struct m68k_target_selection *m68k_tune_entry;
404
 
405
/* Which CPU we are generating code for.  */
406
enum target_device m68k_cpu;
407
 
408
/* Which microarchitecture to tune for.  */
409
enum uarch_type m68k_tune;
410
 
411
/* Which FPU to use.  */
412
enum fpu_type m68k_fpu;
413
 
414
/* The set of FL_* flags that apply to the target processor.  */
415
unsigned int m68k_cpu_flags;
416
 
417
/* The set of FL_* flags that apply to the processor to be tuned for.  */
418
unsigned int m68k_tune_flags;
419
 
420
/* Asm templates for calling or jumping to an arbitrary symbolic address,
421
   or NULL if such calls or jumps are not supported.  The address is held
422
   in operand 0.  */
423
const char *m68k_symbolic_call;
424
const char *m68k_symbolic_jump;
425
 
426
/* Enum variable that corresponds to m68k_symbolic_call values.  */
427
enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
428
 
429
 
430
/* See whether TABLE has an entry with name NAME.  Return true and
431
   store the entry in *ENTRY if so, otherwise return false and
432
   leave *ENTRY alone.  */
433
 
434
static bool
435
m68k_find_selection (const struct m68k_target_selection **entry,
436
                     const struct m68k_target_selection *table,
437
                     const char *name)
438
{
439
  size_t i;
440
 
441
  for (i = 0; table[i].name; i++)
442
    if (strcmp (table[i].name, name) == 0)
443
      {
444
        *entry = table + i;
445
        return true;
446
      }
447
  return false;
448
}
449
 
450
/* Implement TARGET_HANDLE_OPTION.  */
451
 
452
static bool
453
m68k_handle_option (size_t code, const char *arg, int value)
454
{
455
  switch (code)
456
    {
457
    case OPT_march_:
458
      return m68k_find_selection (&m68k_arch_entry, all_isas, arg);
459
 
460
    case OPT_mcpu_:
461
      return m68k_find_selection (&m68k_cpu_entry, all_devices, arg);
462
 
463
    case OPT_mtune_:
464
      return m68k_find_selection (&m68k_tune_entry, all_microarchs, arg);
465
 
466
    case OPT_m5200:
467
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206");
468
 
469
    case OPT_m5206e:
470
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206e");
471
 
472
    case OPT_m528x:
473
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "528x");
474
 
475
    case OPT_m5307:
476
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "5307");
477
 
478
    case OPT_m5407:
479
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "5407");
480
 
481
    case OPT_mcfv4e:
482
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "547x");
483
 
484
    case OPT_m68000:
485
    case OPT_mc68000:
486
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "68000");
487
 
488
    case OPT_m68010:
489
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "68010");
490
 
491
    case OPT_m68020:
492
    case OPT_mc68020:
493
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "68020");
494
 
495
    case OPT_m68020_40:
496
      return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
497
                                   "68020-40")
498
              && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
499
 
500
    case OPT_m68020_60:
501
      return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
502
                                   "68020-60")
503
              && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
504
 
505
    case OPT_m68030:
506
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "68030");
507
 
508
    case OPT_m68040:
509
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "68040");
510
 
511
    case OPT_m68060:
512
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "68060");
513
 
514
    case OPT_m68302:
515
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "68302");
516
 
517
    case OPT_m68332:
518
    case OPT_mcpu32:
519
      return m68k_find_selection (&m68k_cpu_entry, all_devices, "68332");
520
 
521
    case OPT_mshared_library_id_:
522
      if (value > MAX_LIBRARY_ID)
523
        error ("-mshared-library-id=%s is not between 0 and %d",
524
               arg, MAX_LIBRARY_ID);
525
      else
526
        {
527
          char *tmp;
528
          asprintf (&tmp, "%d", (value * -4) - 4);
529
          m68k_library_id_string = tmp;
530
        }
531
      return true;
532
 
533
    default:
534
      return true;
535
    }
536
}
537
 
538
/* Sometimes certain combinations of command options do not make
539
   sense on a particular target machine.  You can define a macro
540
   `OVERRIDE_OPTIONS' to take account of this.  This macro, if
541
   defined, is executed once just after all the command options have
542
   been parsed.
543
 
544
   Don't use this macro to turn on various extra optimizations for
545
   `-O'.  That is what `OPTIMIZATION_OPTIONS' is for.  */
546
 
547
void
548
override_options (void)
549
{
550
  const struct m68k_target_selection *entry;
551
  unsigned long target_mask;
552
 
553
  /* User can choose:
554
 
555
     -mcpu=
556
     -march=
557
     -mtune=
558
 
559
     -march=ARCH should generate code that runs any processor
560
     implementing architecture ARCH.  -mcpu=CPU should override -march
561
     and should generate code that runs on processor CPU, making free
562
     use of any instructions that CPU understands.  -mtune=UARCH applies
563
     on top of -mcpu or -march and optimizes the code for UARCH.  It does
564
     not change the target architecture.  */
565
  if (m68k_cpu_entry)
566
    {
567
      /* Complain if the -march setting is for a different microarchitecture,
568
         or includes flags that the -mcpu setting doesn't.  */
569
      if (m68k_arch_entry
570
          && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
571
              || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
572
        warning (0, "-mcpu=%s conflicts with -march=%s",
573
                 m68k_cpu_entry->name, m68k_arch_entry->name);
574
 
575
      entry = m68k_cpu_entry;
576
    }
577
  else
578
    entry = m68k_arch_entry;
579
 
580
  if (!entry)
581
    entry = all_devices + TARGET_CPU_DEFAULT;
582
 
583
  m68k_cpu_flags = entry->flags;
584
 
585
  /* Use the architecture setting to derive default values for
586
     certain flags.  */
587
  target_mask = 0;
588
 
589
  /* ColdFire is lenient about alignment.  */
590
  if (!TARGET_COLDFIRE)
591
    target_mask |= MASK_STRICT_ALIGNMENT;
592
 
593
  if ((m68k_cpu_flags & FL_BITFIELD) != 0)
594
    target_mask |= MASK_BITFIELD;
595
  if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
596
    target_mask |= MASK_CF_HWDIV;
597
  if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
598
    target_mask |= MASK_HARD_FLOAT;
599
  target_flags |= target_mask & ~target_flags_explicit;
600
 
601
  /* Set the directly-usable versions of the -mcpu and -mtune settings.  */
602
  m68k_cpu = entry->device;
603
  if (m68k_tune_entry)
604
    {
605
      m68k_tune = m68k_tune_entry->microarch;
606
      m68k_tune_flags = m68k_tune_entry->flags;
607
    }
608
#ifdef M68K_DEFAULT_TUNE
609
  else if (!m68k_cpu_entry && !m68k_arch_entry)
610
    {
611
      enum target_device dev;
612
      dev = all_microarchs[M68K_DEFAULT_TUNE].device;
613
      m68k_tune_flags = all_devices[dev]->flags;
614
    }
615
#endif
616
  else
617
    {
618
      m68k_tune = entry->microarch;
619
      m68k_tune_flags = entry->flags;
620
    }
621
 
622
  /* Set the type of FPU.  */
623
  m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
624
              : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
625
              : FPUTYPE_68881);
626
 
627
  /* Sanity check to ensure that msep-data and mid-sahred-library are not
628
   * both specified together.  Doing so simply doesn't make sense.
629
   */
630
  if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
631
    error ("cannot specify both -msep-data and -mid-shared-library");
632
 
633
  /* If we're generating code for a separate A5 relative data segment,
634
   * we've got to enable -fPIC as well.  This might be relaxable to
635
   * -fpic but it hasn't been tested properly.
636
   */
637
  if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
638
    flag_pic = 2;
639
 
640
  /* -mpcrel -fPIC uses 32-bit pc-relative displacements.  Raise an
641
     error if the target does not support them.  */
642
  if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
643
    error ("-mpcrel -fPIC is not currently supported on selected cpu");
644
 
645
  /* ??? A historic way of turning on pic, or is this intended to
646
     be an embedded thing that doesn't have the same name binding
647
     significance that it does on hosted ELF systems?  */
648
  if (TARGET_PCREL && flag_pic == 0)
649
    flag_pic = 1;
650
 
651
  if (!flag_pic)
652
    {
653
      m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
654
 
655
      m68k_symbolic_jump = "jra %a0";
656
    }
657
  else if (TARGET_ID_SHARED_LIBRARY)
658
    /* All addresses must be loaded from the GOT.  */
659
    ;
660
  else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
661
    {
662
      if (TARGET_PCREL)
663
        m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
664
      else
665
        m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
666
 
667
      if (TARGET_ISAC)
668
        /* No unconditional long branch */;
669
      else if (TARGET_PCREL)
670
        m68k_symbolic_jump = "bra%.l %c0";
671
      else
672
        m68k_symbolic_jump = "bra%.l %p0";
673
      /* Turn off function cse if we are doing PIC.  We always want
674
         function call to be done as `bsr foo@PLTPC'.  */
675
      /* ??? It's traditional to do this for -mpcrel too, but it isn't
676
         clear how intentional that is.  */
677
      flag_no_function_cse = 1;
678
    }
679
 
680
  switch (m68k_symbolic_call_var)
681
    {
682
    case M68K_SYMBOLIC_CALL_JSR:
683
      m68k_symbolic_call = "jsr %a0";
684
      break;
685
 
686
    case M68K_SYMBOLIC_CALL_BSR_C:
687
      m68k_symbolic_call = "bsr%.l %c0";
688
      break;
689
 
690
    case M68K_SYMBOLIC_CALL_BSR_P:
691
      m68k_symbolic_call = "bsr%.l %p0";
692
      break;
693
 
694
    case M68K_SYMBOLIC_CALL_NONE:
695
      gcc_assert (m68k_symbolic_call == NULL);
696
      break;
697
 
698
    default:
699
      gcc_unreachable ();
700
    }
701
 
702
#ifndef ASM_OUTPUT_ALIGN_WITH_NOP
703
  if (align_labels > 2)
704
    {
705
      warning (0, "-falign-labels=%d is not supported", align_labels);
706
      align_labels = 0;
707
    }
708
  if (align_loops > 2)
709
    {
710
      warning (0, "-falign-loops=%d is not supported", align_loops);
711
      align_loops = 0;
712
    }
713
#endif
714
 
715
  SUBTARGET_OVERRIDE_OPTIONS;
716
 
717
  /* Setup scheduling options.  */
718
  if (TUNE_CFV1)
719
    m68k_sched_cpu = CPU_CFV1;
720
  else if (TUNE_CFV2)
721
    m68k_sched_cpu = CPU_CFV2;
722
  else if (TUNE_CFV3)
723
    m68k_sched_cpu = CPU_CFV3;
724
  else if (TUNE_CFV4)
725
    m68k_sched_cpu = CPU_CFV4;
726
  else
727
    {
728
      m68k_sched_cpu = CPU_UNKNOWN;
729
      flag_schedule_insns = 0;
730
      flag_schedule_insns_after_reload = 0;
731
      flag_modulo_sched = 0;
732
    }
733
 
734
  if (m68k_sched_cpu != CPU_UNKNOWN)
735
    {
736
      if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
737
        m68k_sched_mac = MAC_CF_EMAC;
738
      else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
739
        m68k_sched_mac = MAC_CF_MAC;
740
      else
741
        m68k_sched_mac = MAC_NO;
742
    }
743
}
744
 
745
/* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
746
   given argument and NAME is the argument passed to -mcpu.  Return NULL
747
   if -mcpu was not passed.  */
748
 
749
const char *
750
m68k_cpp_cpu_ident (const char *prefix)
751
{
752
  if (!m68k_cpu_entry)
753
    return NULL;
754
  return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
755
}
756
 
757
/* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
758
   given argument and NAME is the name of the representative device for
759
   the -mcpu argument's family.  Return NULL if -mcpu was not passed.  */
760
 
761
const char *
762
m68k_cpp_cpu_family (const char *prefix)
763
{
764
  if (!m68k_cpu_entry)
765
    return NULL;
766
  return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
767
}
768
 
769
/* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
770
   "interrupt_handler" attribute and interrupt_thread if FUNC has an
771
   "interrupt_thread" attribute.  Otherwise, return
772
   m68k_fk_normal_function.  */
773
 
774
enum m68k_function_kind
775
m68k_get_function_kind (tree func)
776
{
777
  tree a;
778
 
779
  gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
780
 
781
  a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
782
  if (a != NULL_TREE)
783
    return m68k_fk_interrupt_handler;
784
 
785
  a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
786
  if (a != NULL_TREE)
787
    return m68k_fk_interrupt_handler;
788
 
789
  a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
790
  if (a != NULL_TREE)
791
    return m68k_fk_interrupt_thread;
792
 
793
  return m68k_fk_normal_function;
794
}
795
 
796
/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
797
   struct attribute_spec.handler.  */
798
static tree
799
m68k_handle_fndecl_attribute (tree *node, tree name,
800
                              tree args ATTRIBUTE_UNUSED,
801
                              int flags ATTRIBUTE_UNUSED,
802
                              bool *no_add_attrs)
803
{
804
  if (TREE_CODE (*node) != FUNCTION_DECL)
805
    {
806
      warning (OPT_Wattributes, "%qE attribute only applies to functions",
807
               name);
808
      *no_add_attrs = true;
809
    }
810
 
811
  if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
812
    {
813
      error ("multiple interrupt attributes not allowed");
814
      *no_add_attrs = true;
815
    }
816
 
817
  if (!TARGET_FIDOA
818
      && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
819
    {
820
      error ("interrupt_thread is available only on fido");
821
      *no_add_attrs = true;
822
    }
823
 
824
  return NULL_TREE;
825
}
826
 
827
static void
828
m68k_compute_frame_layout (void)
829
{
830
  int regno, saved;
831
  unsigned int mask;
832
  enum m68k_function_kind func_kind =
833
    m68k_get_function_kind (current_function_decl);
834
  bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
835
  bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
836
 
837
  /* Only compute the frame once per function.
838
     Don't cache information until reload has been completed.  */
839
  if (current_frame.funcdef_no == current_function_funcdef_no
840
      && reload_completed)
841
    return;
842
 
843
  current_frame.size = (get_frame_size () + 3) & -4;
844
 
845
  mask = saved = 0;
846
 
847
  /* Interrupt thread does not need to save any register.  */
848
  if (!interrupt_thread)
849
    for (regno = 0; regno < 16; regno++)
850
      if (m68k_save_reg (regno, interrupt_handler))
851
        {
852
          mask |= 1 << (regno - D0_REG);
853
          saved++;
854
        }
855
  current_frame.offset = saved * 4;
856
  current_frame.reg_no = saved;
857
  current_frame.reg_mask = mask;
858
 
859
  current_frame.foffset = 0;
860
  mask = saved = 0;
861
  if (TARGET_HARD_FLOAT)
862
    {
863
      /* Interrupt thread does not need to save any register.  */
864
      if (!interrupt_thread)
865
        for (regno = 16; regno < 24; regno++)
866
          if (m68k_save_reg (regno, interrupt_handler))
867
            {
868
              mask |= 1 << (regno - FP0_REG);
869
              saved++;
870
            }
871
      current_frame.foffset = saved * TARGET_FP_REG_SIZE;
872
      current_frame.offset += current_frame.foffset;
873
    }
874
  current_frame.fpu_no = saved;
875
  current_frame.fpu_mask = mask;
876
 
877
  /* Remember what function this frame refers to.  */
878
  current_frame.funcdef_no = current_function_funcdef_no;
879
}
880
 
881
/* Worker function for TARGET_CAN_ELIMINATE.  */
882
 
883
bool
884
m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
885
{
886
  return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
887
}
888
 
889
HOST_WIDE_INT
890
m68k_initial_elimination_offset (int from, int to)
891
{
892
  int argptr_offset;
893
  /* The arg pointer points 8 bytes before the start of the arguments,
894
     as defined by FIRST_PARM_OFFSET.  This makes it coincident with the
895
     frame pointer in most frames.  */
896
  argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
897
  if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
898
    return argptr_offset;
899
 
900
  m68k_compute_frame_layout ();
901
 
902
  gcc_assert (to == STACK_POINTER_REGNUM);
903
  switch (from)
904
    {
905
    case ARG_POINTER_REGNUM:
906
      return current_frame.offset + current_frame.size - argptr_offset;
907
    case FRAME_POINTER_REGNUM:
908
      return current_frame.offset + current_frame.size;
909
    default:
910
      gcc_unreachable ();
911
    }
912
}
913
 
914
/* Refer to the array `regs_ever_live' to determine which registers
915
   to save; `regs_ever_live[I]' is nonzero if register number I
916
   is ever used in the function.  This function is responsible for
917
   knowing which registers should not be saved even if used.
918
   Return true if we need to save REGNO.  */
919
 
920
static bool
921
m68k_save_reg (unsigned int regno, bool interrupt_handler)
922
{
923
  if (flag_pic && regno == PIC_REG)
924
    {
925
      if (crtl->saves_all_registers)
926
        return true;
927
      if (crtl->uses_pic_offset_table)
928
        return true;
929
      /* Reload may introduce constant pool references into a function
930
         that thitherto didn't need a PIC register.  Note that the test
931
         above will not catch that case because we will only set
932
         crtl->uses_pic_offset_table when emitting
933
         the address reloads.  */
934
      if (crtl->uses_const_pool)
935
        return true;
936
    }
937
 
938
  if (crtl->calls_eh_return)
939
    {
940
      unsigned int i;
941
      for (i = 0; ; i++)
942
        {
943
          unsigned int test = EH_RETURN_DATA_REGNO (i);
944
          if (test == INVALID_REGNUM)
945
            break;
946
          if (test == regno)
947
            return true;
948
        }
949
    }
950
 
951
  /* Fixed regs we never touch.  */
952
  if (fixed_regs[regno])
953
    return false;
954
 
955
  /* The frame pointer (if it is such) is handled specially.  */
956
  if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
957
    return false;
958
 
959
  /* Interrupt handlers must also save call_used_regs
960
     if they are live or when calling nested functions.  */
961
  if (interrupt_handler)
962
    {
963
      if (df_regs_ever_live_p (regno))
964
        return true;
965
 
966
      if (!current_function_is_leaf && call_used_regs[regno])
967
        return true;
968
    }
969
 
970
  /* Never need to save registers that aren't touched.  */
971
  if (!df_regs_ever_live_p (regno))
972
    return false;
973
 
974
  /* Otherwise save everything that isn't call-clobbered.  */
975
  return !call_used_regs[regno];
976
}
977
 
978
/* Emit RTL for a MOVEM or FMOVEM instruction.  BASE + OFFSET represents
979
   the lowest memory address.  COUNT is the number of registers to be
980
   moved, with register REGNO + I being moved if bit I of MASK is set.
981
   STORE_P specifies the direction of the move and ADJUST_STACK_P says
982
   whether or not this is pre-decrement (if STORE_P) or post-increment
983
   (if !STORE_P) operation.  */
984
 
985
static rtx
986
m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
987
                 unsigned int count, unsigned int regno,
988
                 unsigned int mask, bool store_p, bool adjust_stack_p)
989
{
990
  int i;
991
  rtx body, addr, src, operands[2];
992
  enum machine_mode mode;
993
 
994
  body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
995
  mode = reg_raw_mode[regno];
996
  i = 0;
997
 
998
  if (adjust_stack_p)
999
    {
1000
      src = plus_constant (base, (count
1001
                                  * GET_MODE_SIZE (mode)
1002
                                  * (HOST_WIDE_INT) (store_p ? -1 : 1)));
1003
      XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
1004
    }
1005
 
1006
  for (; mask != 0; mask >>= 1, regno++)
1007
    if (mask & 1)
1008
      {
1009
        addr = plus_constant (base, offset);
1010
        operands[!store_p] = gen_frame_mem (mode, addr);
1011
        operands[store_p] = gen_rtx_REG (mode, regno);
1012
        XVECEXP (body, 0, i++)
1013
          = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
1014
        offset += GET_MODE_SIZE (mode);
1015
      }
1016
  gcc_assert (i == XVECLEN (body, 0));
1017
 
1018
  return emit_insn (body);
1019
}
1020
 
1021
/* Make INSN a frame-related instruction.  */
1022
 
1023
static void
1024
m68k_set_frame_related (rtx insn)
1025
{
1026
  rtx body;
1027
  int i;
1028
 
1029
  RTX_FRAME_RELATED_P (insn) = 1;
1030
  body = PATTERN (insn);
1031
  if (GET_CODE (body) == PARALLEL)
1032
    for (i = 0; i < XVECLEN (body, 0); i++)
1033
      RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
1034
}
1035
 
1036
/* Emit RTL for the "prologue" define_expand.  */
1037
 
1038
void
1039
m68k_expand_prologue (void)
1040
{
1041
  HOST_WIDE_INT fsize_with_regs;
1042
  rtx limit, src, dest, insn;
1043
 
1044
  m68k_compute_frame_layout ();
1045
 
1046
  /* If the stack limit is a symbol, we can check it here,
1047
     before actually allocating the space.  */
1048
  if (crtl->limit_stack
1049
      && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
1050
    {
1051
      limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
1052
      if (!LEGITIMATE_CONSTANT_P (limit))
1053
        {
1054
          emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1055
          limit = gen_rtx_REG (Pmode, D0_REG);
1056
        }
1057
      emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1058
                                            stack_pointer_rtx, limit),
1059
                               stack_pointer_rtx, limit,
1060
                               const1_rtx));
1061
    }
1062
 
1063
  fsize_with_regs = current_frame.size;
1064
  if (TARGET_COLDFIRE)
1065
    {
1066
      /* ColdFire's move multiple instructions do not allow pre-decrement
1067
         addressing.  Add the size of movem saves to the initial stack
1068
         allocation instead.  */
1069
      if (current_frame.reg_no >= MIN_MOVEM_REGS)
1070
        fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1071
      if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1072
        fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1073
    }
1074
 
1075
  if (frame_pointer_needed)
1076
    {
1077
      if (fsize_with_regs == 0 && TUNE_68040)
1078
        {
1079
          /* On the 68040, two separate moves are faster than link.w 0.  */
1080
          dest = gen_frame_mem (Pmode,
1081
                                gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1082
          m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1083
          m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1084
                                                  stack_pointer_rtx));
1085
        }
1086
      else if (fsize_with_regs < 0x8000 || TARGET_68020)
1087
        m68k_set_frame_related
1088
          (emit_insn (gen_link (frame_pointer_rtx,
1089
                                GEN_INT (-4 - fsize_with_regs))));
1090
      else
1091
        {
1092
          m68k_set_frame_related
1093
            (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1094
          m68k_set_frame_related
1095
            (emit_insn (gen_addsi3 (stack_pointer_rtx,
1096
                                    stack_pointer_rtx,
1097
                                    GEN_INT (-fsize_with_regs))));
1098
        }
1099
 
1100
      /* If the frame pointer is needed, emit a special barrier that
1101
         will prevent the scheduler from moving stores to the frame
1102
         before the stack adjustment.  */
1103
      emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1104
    }
1105
  else if (fsize_with_regs != 0)
1106
    m68k_set_frame_related
1107
      (emit_insn (gen_addsi3 (stack_pointer_rtx,
1108
                              stack_pointer_rtx,
1109
                              GEN_INT (-fsize_with_regs))));
1110
 
1111
  if (current_frame.fpu_mask)
1112
    {
1113
      gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1114
      if (TARGET_68881)
1115
        m68k_set_frame_related
1116
          (m68k_emit_movem (stack_pointer_rtx,
1117
                            current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1118
                            current_frame.fpu_no, FP0_REG,
1119
                            current_frame.fpu_mask, true, true));
1120
      else
1121
        {
1122
          int offset;
1123
 
1124
          /* If we're using moveml to save the integer registers,
1125
             the stack pointer will point to the bottom of the moveml
1126
             save area.  Find the stack offset of the first FP register.  */
1127
          if (current_frame.reg_no < MIN_MOVEM_REGS)
1128
            offset = 0;
1129
          else
1130
            offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1131
          m68k_set_frame_related
1132
            (m68k_emit_movem (stack_pointer_rtx, offset,
1133
                              current_frame.fpu_no, FP0_REG,
1134
                              current_frame.fpu_mask, true, false));
1135
        }
1136
    }
1137
 
1138
  /* If the stack limit is not a symbol, check it here.
1139
     This has the disadvantage that it may be too late...  */
1140
  if (crtl->limit_stack)
1141
    {
1142
      if (REG_P (stack_limit_rtx))
1143
        emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1144
                                              stack_limit_rtx),
1145
                                 stack_pointer_rtx, stack_limit_rtx,
1146
                                 const1_rtx));
1147
 
1148
      else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1149
        warning (0, "stack limit expression is not supported");
1150
    }
1151
 
1152
  if (current_frame.reg_no < MIN_MOVEM_REGS)
1153
    {
1154
      /* Store each register separately in the same order moveml does.  */
1155
      int i;
1156
 
1157
      for (i = 16; i-- > 0; )
1158
        if (current_frame.reg_mask & (1 << i))
1159
          {
1160
            src = gen_rtx_REG (SImode, D0_REG + i);
1161
            dest = gen_frame_mem (SImode,
1162
                                  gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1163
            m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1164
          }
1165
    }
1166
  else
1167
    {
1168
      if (TARGET_COLDFIRE)
1169
        /* The required register save space has already been allocated.
1170
           The first register should be stored at (%sp).  */
1171
        m68k_set_frame_related
1172
          (m68k_emit_movem (stack_pointer_rtx, 0,
1173
                            current_frame.reg_no, D0_REG,
1174
                            current_frame.reg_mask, true, false));
1175
      else
1176
        m68k_set_frame_related
1177
          (m68k_emit_movem (stack_pointer_rtx,
1178
                            current_frame.reg_no * -GET_MODE_SIZE (SImode),
1179
                            current_frame.reg_no, D0_REG,
1180
                            current_frame.reg_mask, true, true));
1181
    }
1182
 
1183
  if (!TARGET_SEP_DATA
1184
      && crtl->uses_pic_offset_table)
1185
    insn = emit_insn (gen_load_got (pic_offset_table_rtx));
1186
}
1187
 
1188
/* Return true if a simple (return) instruction is sufficient for this
1189
   instruction (i.e. if no epilogue is needed).  */
1190
 
1191
bool
1192
m68k_use_return_insn (void)
1193
{
1194
  if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1195
    return false;
1196
 
1197
  m68k_compute_frame_layout ();
1198
  return current_frame.offset == 0;
1199
}
1200
 
1201
/* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1202
   SIBCALL_P says which.
1203
 
1204
   The function epilogue should not depend on the current stack pointer!
1205
   It should use the frame pointer only, if there is a frame pointer.
1206
   This is mandatory because of alloca; we also take advantage of it to
1207
   omit stack adjustments before returning.  */
1208
 
1209
void
1210
m68k_expand_epilogue (bool sibcall_p)
1211
{
1212
  HOST_WIDE_INT fsize, fsize_with_regs;
1213
  bool big, restore_from_sp;
1214
 
1215
  m68k_compute_frame_layout ();
1216
 
1217
  fsize = current_frame.size;
1218
  big = false;
1219
  restore_from_sp = false;
1220
 
1221
  /* FIXME : current_function_is_leaf below is too strong.
1222
     What we really need to know there is if there could be pending
1223
     stack adjustment needed at that point.  */
1224
  restore_from_sp = (!frame_pointer_needed
1225
                     || (!cfun->calls_alloca
1226
                         && current_function_is_leaf));
1227
 
1228
  /* fsize_with_regs is the size we need to adjust the sp when
1229
     popping the frame.  */
1230
  fsize_with_regs = fsize;
1231
  if (TARGET_COLDFIRE && restore_from_sp)
1232
    {
1233
      /* ColdFire's move multiple instructions do not allow post-increment
1234
         addressing.  Add the size of movem loads to the final deallocation
1235
         instead.  */
1236
      if (current_frame.reg_no >= MIN_MOVEM_REGS)
1237
        fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1238
      if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1239
        fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1240
    }
1241
 
1242
  if (current_frame.offset + fsize >= 0x8000
1243
      && !restore_from_sp
1244
      && (current_frame.reg_mask || current_frame.fpu_mask))
1245
    {
1246
      if (TARGET_COLDFIRE
1247
          && (current_frame.reg_no >= MIN_MOVEM_REGS
1248
              || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1249
        {
1250
          /* ColdFire's move multiple instructions do not support the
1251
             (d8,Ax,Xi) addressing mode, so we're as well using a normal
1252
             stack-based restore.  */
1253
          emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1254
                          GEN_INT (-(current_frame.offset + fsize)));
1255
          emit_insn (gen_addsi3 (stack_pointer_rtx,
1256
                                 gen_rtx_REG (Pmode, A1_REG),
1257
                                 frame_pointer_rtx));
1258
          restore_from_sp = true;
1259
        }
1260
      else
1261
        {
1262
          emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1263
          fsize = 0;
1264
          big = true;
1265
        }
1266
    }
1267
 
1268
  if (current_frame.reg_no < MIN_MOVEM_REGS)
1269
    {
1270
      /* Restore each register separately in the same order moveml does.  */
1271
      int i;
1272
      HOST_WIDE_INT offset;
1273
 
1274
      offset = current_frame.offset + fsize;
1275
      for (i = 0; i < 16; i++)
1276
        if (current_frame.reg_mask & (1 << i))
1277
          {
1278
            rtx addr;
1279
 
1280
            if (big)
1281
              {
1282
                /* Generate the address -OFFSET(%fp,%a1.l).  */
1283
                addr = gen_rtx_REG (Pmode, A1_REG);
1284
                addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1285
                addr = plus_constant (addr, -offset);
1286
              }
1287
            else if (restore_from_sp)
1288
              addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1289
            else
1290
              addr = plus_constant (frame_pointer_rtx, -offset);
1291
            emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1292
                            gen_frame_mem (SImode, addr));
1293
            offset -= GET_MODE_SIZE (SImode);
1294
          }
1295
    }
1296
  else if (current_frame.reg_mask)
1297
    {
1298
      if (big)
1299
        m68k_emit_movem (gen_rtx_PLUS (Pmode,
1300
                                       gen_rtx_REG (Pmode, A1_REG),
1301
                                       frame_pointer_rtx),
1302
                         -(current_frame.offset + fsize),
1303
                         current_frame.reg_no, D0_REG,
1304
                         current_frame.reg_mask, false, false);
1305
      else if (restore_from_sp)
1306
        m68k_emit_movem (stack_pointer_rtx, 0,
1307
                         current_frame.reg_no, D0_REG,
1308
                         current_frame.reg_mask, false,
1309
                         !TARGET_COLDFIRE);
1310
      else
1311
        m68k_emit_movem (frame_pointer_rtx,
1312
                         -(current_frame.offset + fsize),
1313
                         current_frame.reg_no, D0_REG,
1314
                         current_frame.reg_mask, false, false);
1315
    }
1316
 
1317
  if (current_frame.fpu_no > 0)
1318
    {
1319
      if (big)
1320
        m68k_emit_movem (gen_rtx_PLUS (Pmode,
1321
                                       gen_rtx_REG (Pmode, A1_REG),
1322
                                       frame_pointer_rtx),
1323
                         -(current_frame.foffset + fsize),
1324
                         current_frame.fpu_no, FP0_REG,
1325
                         current_frame.fpu_mask, false, false);
1326
      else if (restore_from_sp)
1327
        {
1328
          if (TARGET_COLDFIRE)
1329
            {
1330
              int offset;
1331
 
1332
              /* If we used moveml to restore the integer registers, the
1333
                 stack pointer will still point to the bottom of the moveml
1334
                 save area.  Find the stack offset of the first FP
1335
                 register.  */
1336
              if (current_frame.reg_no < MIN_MOVEM_REGS)
1337
                offset = 0;
1338
              else
1339
                offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1340
              m68k_emit_movem (stack_pointer_rtx, offset,
1341
                               current_frame.fpu_no, FP0_REG,
1342
                               current_frame.fpu_mask, false, false);
1343
            }
1344
          else
1345
            m68k_emit_movem (stack_pointer_rtx, 0,
1346
                             current_frame.fpu_no, FP0_REG,
1347
                             current_frame.fpu_mask, false, true);
1348
        }
1349
      else
1350
        m68k_emit_movem (frame_pointer_rtx,
1351
                         -(current_frame.foffset + fsize),
1352
                         current_frame.fpu_no, FP0_REG,
1353
                         current_frame.fpu_mask, false, false);
1354
    }
1355
 
1356
  if (frame_pointer_needed)
1357
    emit_insn (gen_unlink (frame_pointer_rtx));
1358
  else if (fsize_with_regs)
1359
    emit_insn (gen_addsi3 (stack_pointer_rtx,
1360
                           stack_pointer_rtx,
1361
                           GEN_INT (fsize_with_regs)));
1362
 
1363
  if (crtl->calls_eh_return)
1364
    emit_insn (gen_addsi3 (stack_pointer_rtx,
1365
                           stack_pointer_rtx,
1366
                           EH_RETURN_STACKADJ_RTX));
1367
 
1368
  if (!sibcall_p)
1369
    emit_jump_insn (gen_rtx_RETURN (VOIDmode));
1370
}
1371
 
1372
/* Return true if X is a valid comparison operator for the dbcc
1373
   instruction.
1374
 
1375
   Note it rejects floating point comparison operators.
1376
   (In the future we could use Fdbcc).
1377
 
1378
   It also rejects some comparisons when CC_NO_OVERFLOW is set.  */
1379
 
1380
int
1381
valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
1382
{
1383
  switch (GET_CODE (x))
1384
    {
1385
      case EQ: case NE: case GTU: case LTU:
1386
      case GEU: case LEU:
1387
        return 1;
1388
 
1389
      /* Reject some when CC_NO_OVERFLOW is set.  This may be over
1390
         conservative */
1391
      case GT: case LT: case GE: case LE:
1392
        return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1393
      default:
1394
        return 0;
1395
    }
1396
}
1397
 
1398
/* Return nonzero if flags are currently in the 68881 flag register.  */
1399
int
1400
flags_in_68881 (void)
1401
{
1402
  /* We could add support for these in the future */
1403
  return cc_status.flags & CC_IN_68881;
1404
}
1405
 
1406
/* Return true if PARALLEL contains register REGNO.  */
1407
static bool
1408
m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1409
{
1410
  int i;
1411
 
1412
  if (REG_P (parallel) && REGNO (parallel) == regno)
1413
    return true;
1414
 
1415
  if (GET_CODE (parallel) != PARALLEL)
1416
    return false;
1417
 
1418
  for (i = 0; i < XVECLEN (parallel, 0); ++i)
1419
    {
1420
      const_rtx x;
1421
 
1422
      x = XEXP (XVECEXP (parallel, 0, i), 0);
1423
      if (REG_P (x) && REGNO (x) == regno)
1424
        return true;
1425
    }
1426
 
1427
  return false;
1428
}
1429
 
1430
/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P.  */
1431
 
1432
static bool
1433
m68k_ok_for_sibcall_p (tree decl, tree exp)
1434
{
1435
  enum m68k_function_kind kind;
1436
 
1437
  /* We cannot use sibcalls for nested functions because we use the
1438
     static chain register for indirect calls.  */
1439
  if (CALL_EXPR_STATIC_CHAIN (exp))
1440
    return false;
1441
 
1442
  if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1443
    {
1444
      /* Check that the return value locations are the same.  For
1445
         example that we aren't returning a value from the sibling in
1446
         a D0 register but then need to transfer it to a A0 register.  */
1447
      rtx cfun_value;
1448
      rtx call_value;
1449
 
1450
      cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1451
                                   cfun->decl);
1452
      call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1453
 
1454
      /* Check that the values are equal or that the result the callee
1455
         function returns is superset of what the current function returns.  */
1456
      if (!(rtx_equal_p (cfun_value, call_value)
1457
            || (REG_P (cfun_value)
1458
                && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1459
        return false;
1460
    }
1461
 
1462
  kind = m68k_get_function_kind (current_function_decl);
1463
  if (kind == m68k_fk_normal_function)
1464
    /* We can always sibcall from a normal function, because it's
1465
       undefined if it is calling an interrupt function.  */
1466
    return true;
1467
 
1468
  /* Otherwise we can only sibcall if the function kind is known to be
1469
     the same.  */
1470
  if (decl && m68k_get_function_kind (decl) == kind)
1471
    return true;
1472
 
1473
  return false;
1474
}
1475
 
1476
/* Convert X to a legitimate function call memory reference and return the
1477
   result.  */
1478
 
1479
rtx
1480
m68k_legitimize_call_address (rtx x)
1481
{
1482
  gcc_assert (MEM_P (x));
1483
  if (call_operand (XEXP (x, 0), VOIDmode))
1484
    return x;
1485
  return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1486
}
1487
 
1488
/* Likewise for sibling calls.  */
1489
 
1490
rtx
1491
m68k_legitimize_sibcall_address (rtx x)
1492
{
1493
  gcc_assert (MEM_P (x));
1494
  if (sibcall_operand (XEXP (x, 0), VOIDmode))
1495
    return x;
1496
 
1497
  emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1498
  return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1499
}
1500
 
1501
/* Convert X to a legitimate address and return it if successful.  Otherwise
1502
   return X.
1503
 
1504
   For the 68000, we handle X+REG by loading X into a register R and
1505
   using R+REG.  R will go in an address reg and indexing will be used.
1506
   However, if REG is a broken-out memory address or multiplication,
1507
   nothing needs to be done because REG can certainly go in an address reg.  */
1508
 
1509
static rtx
1510
m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
1511
{
1512
  if (m68k_tls_symbol_p (x))
1513
    return m68k_legitimize_tls_address (x);
1514
 
1515
  if (GET_CODE (x) == PLUS)
1516
    {
1517
      int ch = (x) != (oldx);
1518
      int copied = 0;
1519
 
1520
#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1521
 
1522
      if (GET_CODE (XEXP (x, 0)) == MULT)
1523
        {
1524
          COPY_ONCE (x);
1525
          XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1526
        }
1527
      if (GET_CODE (XEXP (x, 1)) == MULT)
1528
        {
1529
          COPY_ONCE (x);
1530
          XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1531
        }
1532
      if (ch)
1533
        {
1534
          if (GET_CODE (XEXP (x, 1)) == REG
1535
              && GET_CODE (XEXP (x, 0)) == REG)
1536
            {
1537
              if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1538
                {
1539
                  COPY_ONCE (x);
1540
                  x = force_operand (x, 0);
1541
                }
1542
              return x;
1543
            }
1544
          if (memory_address_p (mode, x))
1545
            return x;
1546
        }
1547
      if (GET_CODE (XEXP (x, 0)) == REG
1548
          || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1549
              && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1550
              && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1551
        {
1552
          rtx temp = gen_reg_rtx (Pmode);
1553
          rtx val = force_operand (XEXP (x, 1), 0);
1554
          emit_move_insn (temp, val);
1555
          COPY_ONCE (x);
1556
          XEXP (x, 1) = temp;
1557
          if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1558
              && GET_CODE (XEXP (x, 0)) == REG)
1559
            x = force_operand (x, 0);
1560
        }
1561
      else if (GET_CODE (XEXP (x, 1)) == REG
1562
               || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1563
                   && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1564
                   && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1565
        {
1566
          rtx temp = gen_reg_rtx (Pmode);
1567
          rtx val = force_operand (XEXP (x, 0), 0);
1568
          emit_move_insn (temp, val);
1569
          COPY_ONCE (x);
1570
          XEXP (x, 0) = temp;
1571
          if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1572
              && GET_CODE (XEXP (x, 1)) == REG)
1573
            x = force_operand (x, 0);
1574
        }
1575
    }
1576
 
1577
  return x;
1578
}
1579
 
1580
 
1581
/* Output a dbCC; jCC sequence.  Note we do not handle the
1582
   floating point version of this sequence (Fdbcc).  We also
1583
   do not handle alternative conditions when CC_NO_OVERFLOW is
1584
   set.  It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1585
   kick those out before we get here.  */
1586
 
1587
void
1588
output_dbcc_and_branch (rtx *operands)
1589
{
1590
  switch (GET_CODE (operands[3]))
1591
    {
1592
      case EQ:
1593
        output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1594
        break;
1595
 
1596
      case NE:
1597
        output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1598
        break;
1599
 
1600
      case GT:
1601
        output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1602
        break;
1603
 
1604
      case GTU:
1605
        output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1606
        break;
1607
 
1608
      case LT:
1609
        output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1610
        break;
1611
 
1612
      case LTU:
1613
        output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1614
        break;
1615
 
1616
      case GE:
1617
        output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1618
        break;
1619
 
1620
      case GEU:
1621
        output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1622
        break;
1623
 
1624
      case LE:
1625
        output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1626
        break;
1627
 
1628
      case LEU:
1629
        output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1630
        break;
1631
 
1632
      default:
1633
        gcc_unreachable ();
1634
    }
1635
 
1636
  /* If the decrement is to be done in SImode, then we have
1637
     to compensate for the fact that dbcc decrements in HImode.  */
1638
  switch (GET_MODE (operands[0]))
1639
    {
1640
      case SImode:
1641
        output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1642
        break;
1643
 
1644
      case HImode:
1645
        break;
1646
 
1647
      default:
1648
        gcc_unreachable ();
1649
    }
1650
}
1651
 
1652
const char *
1653
output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1654
{
1655
  rtx loperands[7];
1656
  enum rtx_code op_code = GET_CODE (op);
1657
 
1658
  /* This does not produce a useful cc.  */
1659
  CC_STATUS_INIT;
1660
 
1661
  /* The m68k cmp.l instruction requires operand1 to be a reg as used
1662
     below.  Swap the operands and change the op if these requirements
1663
     are not fulfilled.  */
1664
  if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1665
    {
1666
      rtx tmp = operand1;
1667
 
1668
      operand1 = operand2;
1669
      operand2 = tmp;
1670
      op_code = swap_condition (op_code);
1671
    }
1672
  loperands[0] = operand1;
1673
  if (GET_CODE (operand1) == REG)
1674
    loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1675
  else
1676
    loperands[1] = adjust_address (operand1, SImode, 4);
1677
  if (operand2 != const0_rtx)
1678
    {
1679
      loperands[2] = operand2;
1680
      if (GET_CODE (operand2) == REG)
1681
        loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1682
      else
1683
        loperands[3] = adjust_address (operand2, SImode, 4);
1684
    }
1685
  loperands[4] = gen_label_rtx ();
1686
  if (operand2 != const0_rtx)
1687
    output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1688
  else
1689
    {
1690
      if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1691
        output_asm_insn ("tst%.l %0", loperands);
1692
      else
1693
        output_asm_insn ("cmp%.w #0,%0", loperands);
1694
 
1695
      output_asm_insn ("jne %l4", loperands);
1696
 
1697
      if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1698
        output_asm_insn ("tst%.l %1", loperands);
1699
      else
1700
        output_asm_insn ("cmp%.w #0,%1", loperands);
1701
    }
1702
 
1703
  loperands[5] = dest;
1704
 
1705
  switch (op_code)
1706
    {
1707
      case EQ:
1708
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1709
                                           CODE_LABEL_NUMBER (loperands[4]));
1710
        output_asm_insn ("seq %5", loperands);
1711
        break;
1712
 
1713
      case NE:
1714
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1715
                                           CODE_LABEL_NUMBER (loperands[4]));
1716
        output_asm_insn ("sne %5", loperands);
1717
        break;
1718
 
1719
      case GT:
1720
        loperands[6] = gen_label_rtx ();
1721
        output_asm_insn ("shi %5\n\tjra %l6", loperands);
1722
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1723
                                           CODE_LABEL_NUMBER (loperands[4]));
1724
        output_asm_insn ("sgt %5", loperands);
1725
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1726
                                           CODE_LABEL_NUMBER (loperands[6]));
1727
        break;
1728
 
1729
      case GTU:
1730
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1731
                                           CODE_LABEL_NUMBER (loperands[4]));
1732
        output_asm_insn ("shi %5", loperands);
1733
        break;
1734
 
1735
      case LT:
1736
        loperands[6] = gen_label_rtx ();
1737
        output_asm_insn ("scs %5\n\tjra %l6", loperands);
1738
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1739
                                           CODE_LABEL_NUMBER (loperands[4]));
1740
        output_asm_insn ("slt %5", loperands);
1741
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1742
                                           CODE_LABEL_NUMBER (loperands[6]));
1743
        break;
1744
 
1745
      case LTU:
1746
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1747
                                           CODE_LABEL_NUMBER (loperands[4]));
1748
        output_asm_insn ("scs %5", loperands);
1749
        break;
1750
 
1751
      case GE:
1752
        loperands[6] = gen_label_rtx ();
1753
        output_asm_insn ("scc %5\n\tjra %l6", loperands);
1754
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1755
                                           CODE_LABEL_NUMBER (loperands[4]));
1756
        output_asm_insn ("sge %5", loperands);
1757
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1758
                                           CODE_LABEL_NUMBER (loperands[6]));
1759
        break;
1760
 
1761
      case GEU:
1762
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1763
                                           CODE_LABEL_NUMBER (loperands[4]));
1764
        output_asm_insn ("scc %5", loperands);
1765
        break;
1766
 
1767
      case LE:
1768
        loperands[6] = gen_label_rtx ();
1769
        output_asm_insn ("sls %5\n\tjra %l6", loperands);
1770
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1771
                                           CODE_LABEL_NUMBER (loperands[4]));
1772
        output_asm_insn ("sle %5", loperands);
1773
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1774
                                           CODE_LABEL_NUMBER (loperands[6]));
1775
        break;
1776
 
1777
      case LEU:
1778
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1779
                                           CODE_LABEL_NUMBER (loperands[4]));
1780
        output_asm_insn ("sls %5", loperands);
1781
        break;
1782
 
1783
      default:
1784
        gcc_unreachable ();
1785
    }
1786
  return "";
1787
}
1788
 
1789
const char *
1790
output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
1791
{
1792
  operands[0] = countop;
1793
  operands[1] = dataop;
1794
 
1795
  if (GET_CODE (countop) == CONST_INT)
1796
    {
1797
      register int count = INTVAL (countop);
1798
      /* If COUNT is bigger than size of storage unit in use,
1799
         advance to the containing unit of same size.  */
1800
      if (count > signpos)
1801
        {
1802
          int offset = (count & ~signpos) / 8;
1803
          count = count & signpos;
1804
          operands[1] = dataop = adjust_address (dataop, QImode, offset);
1805
        }
1806
      if (count == signpos)
1807
        cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1808
      else
1809
        cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1810
 
1811
      /* These three statements used to use next_insns_test_no...
1812
         but it appears that this should do the same job.  */
1813
      if (count == 31
1814
          && next_insn_tests_no_inequality (insn))
1815
        return "tst%.l %1";
1816
      if (count == 15
1817
          && next_insn_tests_no_inequality (insn))
1818
        return "tst%.w %1";
1819
      if (count == 7
1820
          && next_insn_tests_no_inequality (insn))
1821
        return "tst%.b %1";
1822
      /* Try to use `movew to ccr' followed by the appropriate branch insn.
1823
         On some m68k variants unfortunately that's slower than btst.
1824
         On 68000 and higher, that should also work for all HImode operands. */
1825
      if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1826
        {
1827
          if (count == 3 && DATA_REG_P (operands[1])
1828
              && next_insn_tests_no_inequality (insn))
1829
            {
1830
            cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1831
            return "move%.w %1,%%ccr";
1832
            }
1833
          if (count == 2 && DATA_REG_P (operands[1])
1834
              && next_insn_tests_no_inequality (insn))
1835
            {
1836
            cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1837
            return "move%.w %1,%%ccr";
1838
            }
1839
          /* count == 1 followed by bvc/bvs and
1840
             count == 0 followed by bcc/bcs are also possible, but need
1841
             m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1842
        }
1843
 
1844
      cc_status.flags = CC_NOT_NEGATIVE;
1845
    }
1846
  return "btst %0,%1";
1847
}
1848
 
1849
/* Return true if X is a legitimate base register.  STRICT_P says
1850
   whether we need strict checking.  */
1851
 
1852
bool
1853
m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1854
{
1855
  /* Allow SUBREG everywhere we allow REG.  This results in better code.  */
1856
  if (!strict_p && GET_CODE (x) == SUBREG)
1857
    x = SUBREG_REG (x);
1858
 
1859
  return (REG_P (x)
1860
          && (strict_p
1861
              ? REGNO_OK_FOR_BASE_P (REGNO (x))
1862
              : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1863
}
1864
 
1865
/* Return true if X is a legitimate index register.  STRICT_P says
1866
   whether we need strict checking.  */
1867
 
1868
bool
1869
m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1870
{
1871
  if (!strict_p && GET_CODE (x) == SUBREG)
1872
    x = SUBREG_REG (x);
1873
 
1874
  return (REG_P (x)
1875
          && (strict_p
1876
              ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1877
              : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1878
}
1879
 
1880
/* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1881
   (bd,An,Xn) addressing mode.  Fill in the INDEX and SCALE fields of
1882
   ADDRESS if so.  STRICT_P says whether we need strict checking.  */
1883
 
1884
static bool
1885
m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1886
{
1887
  int scale;
1888
 
1889
  /* Check for a scale factor.  */
1890
  scale = 1;
1891
  if ((TARGET_68020 || TARGET_COLDFIRE)
1892
      && GET_CODE (x) == MULT
1893
      && GET_CODE (XEXP (x, 1)) == CONST_INT
1894
      && (INTVAL (XEXP (x, 1)) == 2
1895
          || INTVAL (XEXP (x, 1)) == 4
1896
          || (INTVAL (XEXP (x, 1)) == 8
1897
              && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1898
    {
1899
      scale = INTVAL (XEXP (x, 1));
1900
      x = XEXP (x, 0);
1901
    }
1902
 
1903
  /* Check for a word extension.  */
1904
  if (!TARGET_COLDFIRE
1905
      && GET_CODE (x) == SIGN_EXTEND
1906
      && GET_MODE (XEXP (x, 0)) == HImode)
1907
    x = XEXP (x, 0);
1908
 
1909
  if (m68k_legitimate_index_reg_p (x, strict_p))
1910
    {
1911
      address->scale = scale;
1912
      address->index = x;
1913
      return true;
1914
    }
1915
 
1916
  return false;
1917
}
1918
 
1919
/* Return true if X is an illegitimate symbolic constant.  */
1920
 
1921
bool
1922
m68k_illegitimate_symbolic_constant_p (rtx x)
1923
{
1924
  rtx base, offset;
1925
 
1926
  if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1927
    {
1928
      split_const (x, &base, &offset);
1929
      if (GET_CODE (base) == SYMBOL_REF
1930
          && !offset_within_block_p (base, INTVAL (offset)))
1931
        return true;
1932
    }
1933
  return m68k_tls_reference_p (x, false);
1934
}
1935
 
1936
/* Return true if X is a legitimate constant address that can reach
1937
   bytes in the range [X, X + REACH).  STRICT_P says whether we need
1938
   strict checking.  */
1939
 
1940
static bool
1941
m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1942
{
1943
  rtx base, offset;
1944
 
1945
  if (!CONSTANT_ADDRESS_P (x))
1946
    return false;
1947
 
1948
  if (flag_pic
1949
      && !(strict_p && TARGET_PCREL)
1950
      && symbolic_operand (x, VOIDmode))
1951
    return false;
1952
 
1953
  if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1954
    {
1955
      split_const (x, &base, &offset);
1956
      if (GET_CODE (base) == SYMBOL_REF
1957
          && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1958
        return false;
1959
    }
1960
 
1961
  return !m68k_tls_reference_p (x, false);
1962
}
1963
 
1964
/* Return true if X is a LABEL_REF for a jump table.  Assume that unplaced
1965
   labels will become jump tables.  */
1966
 
1967
static bool
1968
m68k_jump_table_ref_p (rtx x)
1969
{
1970
  if (GET_CODE (x) != LABEL_REF)
1971
    return false;
1972
 
1973
  x = XEXP (x, 0);
1974
  if (!NEXT_INSN (x) && !PREV_INSN (x))
1975
    return true;
1976
 
1977
  x = next_nonnote_insn (x);
1978
  return x && JUMP_TABLE_DATA_P (x);
1979
}
1980
 
1981
/* Return true if X is a legitimate address for values of mode MODE.
1982
   STRICT_P says whether strict checking is needed.  If the address
1983
   is valid, describe its components in *ADDRESS.  */
1984
 
1985
static bool
1986
m68k_decompose_address (enum machine_mode mode, rtx x,
1987
                        bool strict_p, struct m68k_address *address)
1988
{
1989
  unsigned int reach;
1990
 
1991
  memset (address, 0, sizeof (*address));
1992
 
1993
  if (mode == BLKmode)
1994
    reach = 1;
1995
  else
1996
    reach = GET_MODE_SIZE (mode);
1997
 
1998
  /* Check for (An) (mode 2).  */
1999
  if (m68k_legitimate_base_reg_p (x, strict_p))
2000
    {
2001
      address->base = x;
2002
      return true;
2003
    }
2004
 
2005
  /* Check for -(An) and (An)+ (modes 3 and 4).  */
2006
  if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
2007
      && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2008
    {
2009
      address->code = GET_CODE (x);
2010
      address->base = XEXP (x, 0);
2011
      return true;
2012
    }
2013
 
2014
  /* Check for (d16,An) (mode 5).  */
2015
  if (GET_CODE (x) == PLUS
2016
      && GET_CODE (XEXP (x, 1)) == CONST_INT
2017
      && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
2018
      && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2019
    {
2020
      address->base = XEXP (x, 0);
2021
      address->offset = XEXP (x, 1);
2022
      return true;
2023
    }
2024
 
2025
  /* Check for GOT loads.  These are (bd,An,Xn) addresses if
2026
     TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2027
     addresses.  */
2028
  if (GET_CODE (x) == PLUS
2029
      && XEXP (x, 0) == pic_offset_table_rtx)
2030
    {
2031
      /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2032
         they are invalid in this context.  */
2033
      if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2034
        {
2035
          address->base = XEXP (x, 0);
2036
          address->offset = XEXP (x, 1);
2037
          return true;
2038
        }
2039
    }
2040
 
2041
  /* The ColdFire FPU only accepts addressing modes 2-5.  */
2042
  if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2043
    return false;
2044
 
2045
  /* Check for (xxx).w and (xxx).l.  Also, in the TARGET_PCREL case,
2046
     check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2047
     All these modes are variations of mode 7.  */
2048
  if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2049
    {
2050
      address->offset = x;
2051
      return true;
2052
    }
2053
 
2054
  /* Check for (d8,PC,Xn), a mode 7 form.  This case is needed for
2055
     tablejumps.
2056
 
2057
     ??? do_tablejump creates these addresses before placing the target
2058
     label, so we have to assume that unplaced labels are jump table
2059
     references.  It seems unlikely that we would ever generate indexed
2060
     accesses to unplaced labels in other cases.  */
2061
  if (GET_CODE (x) == PLUS
2062
      && m68k_jump_table_ref_p (XEXP (x, 1))
2063
      && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2064
    {
2065
      address->offset = XEXP (x, 1);
2066
      return true;
2067
    }
2068
 
2069
  /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2070
     (bd,An,Xn.SIZE*SCALE) addresses.  */
2071
 
2072
  if (TARGET_68020)
2073
    {
2074
      /* Check for a nonzero base displacement.  */
2075
      if (GET_CODE (x) == PLUS
2076
          && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2077
        {
2078
          address->offset = XEXP (x, 1);
2079
          x = XEXP (x, 0);
2080
        }
2081
 
2082
      /* Check for a suppressed index register.  */
2083
      if (m68k_legitimate_base_reg_p (x, strict_p))
2084
        {
2085
          address->base = x;
2086
          return true;
2087
        }
2088
 
2089
      /* Check for a suppressed base register.  Do not allow this case
2090
         for non-symbolic offsets as it effectively gives gcc freedom
2091
         to treat data registers as base registers, which can generate
2092
         worse code.  */
2093
      if (address->offset
2094
          && symbolic_operand (address->offset, VOIDmode)
2095
          && m68k_decompose_index (x, strict_p, address))
2096
        return true;
2097
    }
2098
  else
2099
    {
2100
      /* Check for a nonzero base displacement.  */
2101
      if (GET_CODE (x) == PLUS
2102
          && GET_CODE (XEXP (x, 1)) == CONST_INT
2103
          && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2104
        {
2105
          address->offset = XEXP (x, 1);
2106
          x = XEXP (x, 0);
2107
        }
2108
    }
2109
 
2110
  /* We now expect the sum of a base and an index.  */
2111
  if (GET_CODE (x) == PLUS)
2112
    {
2113
      if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2114
          && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2115
        {
2116
          address->base = XEXP (x, 0);
2117
          return true;
2118
        }
2119
 
2120
      if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2121
          && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2122
        {
2123
          address->base = XEXP (x, 1);
2124
          return true;
2125
        }
2126
    }
2127
  return false;
2128
}
2129
 
2130
/* Return true if X is a legitimate address for values of mode MODE.
2131
   STRICT_P says whether strict checking is needed.  */
2132
 
2133
bool
2134
m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2135
{
2136
  struct m68k_address address;
2137
 
2138
  return m68k_decompose_address (mode, x, strict_p, &address);
2139
}
2140
 
2141
/* Return true if X is a memory, describing its address in ADDRESS if so.
2142
   Apply strict checking if called during or after reload.  */
2143
 
2144
static bool
2145
m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2146
{
2147
  return (MEM_P (x)
2148
          && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2149
                                     reload_in_progress || reload_completed,
2150
                                     address));
2151
}
2152
 
2153
/* Return true if X matches the 'Q' constraint.  It must be a memory
2154
   with a base address and no constant offset or index.  */
2155
 
2156
bool
2157
m68k_matches_q_p (rtx x)
2158
{
2159
  struct m68k_address address;
2160
 
2161
  return (m68k_legitimate_mem_p (x, &address)
2162
          && address.code == UNKNOWN
2163
          && address.base
2164
          && !address.offset
2165
          && !address.index);
2166
}
2167
 
2168
/* Return true if X matches the 'U' constraint.  It must be a base address
2169
   with a constant offset and no index.  */
2170
 
2171
bool
2172
m68k_matches_u_p (rtx x)
2173
{
2174
  struct m68k_address address;
2175
 
2176
  return (m68k_legitimate_mem_p (x, &address)
2177
          && address.code == UNKNOWN
2178
          && address.base
2179
          && address.offset
2180
          && !address.index);
2181
}
2182
 
2183
/* Return GOT pointer.  */
2184
 
2185
static rtx
2186
m68k_get_gp (void)
2187
{
2188
  if (pic_offset_table_rtx == NULL_RTX)
2189
    pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2190
 
2191
  crtl->uses_pic_offset_table = 1;
2192
 
2193
  return pic_offset_table_rtx;
2194
}
2195
 
2196
/* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2197
   wrappers.  */
2198
enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2199
                  RELOC_TLSIE, RELOC_TLSLE };
2200
 
2201
#define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2202
 
2203
/* Wrap symbol X into unspec representing relocation RELOC.
2204
   BASE_REG - register that should be added to the result.
2205
   TEMP_REG - if non-null, temporary register.  */
2206
 
2207
static rtx
2208
m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2209
{
2210
  bool use_x_p;
2211
 
2212
  use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2213
 
2214
  if (TARGET_COLDFIRE && use_x_p)
2215
    /* When compiling with -mx{got, tls} switch the code will look like this:
2216
 
2217
       move.l <X>@<RELOC>,<TEMP_REG>
2218
       add.l <BASE_REG>,<TEMP_REG>  */
2219
    {
2220
      /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2221
         to put @RELOC after reference.  */
2222
      x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2223
                          UNSPEC_RELOC32);
2224
      x = gen_rtx_CONST (Pmode, x);
2225
 
2226
      if (temp_reg == NULL)
2227
        {
2228
          gcc_assert (can_create_pseudo_p ());
2229
          temp_reg = gen_reg_rtx (Pmode);
2230
        }
2231
 
2232
      emit_move_insn (temp_reg, x);
2233
      emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2234
      x = temp_reg;
2235
    }
2236
  else
2237
    {
2238
      x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2239
                          UNSPEC_RELOC16);
2240
      x = gen_rtx_CONST (Pmode, x);
2241
 
2242
      x = gen_rtx_PLUS (Pmode, base_reg, x);
2243
    }
2244
 
2245
  return x;
2246
}
2247
 
2248
/* Helper for m68k_unwrap_symbol.
2249
   Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2250
   sets *RELOC_PTR to relocation type for the symbol.  */
2251
 
2252
static rtx
2253
m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2254
                      enum m68k_reloc *reloc_ptr)
2255
{
2256
  if (GET_CODE (orig) == CONST)
2257
    {
2258
      rtx x;
2259
      enum m68k_reloc dummy;
2260
 
2261
      x = XEXP (orig, 0);
2262
 
2263
      if (reloc_ptr == NULL)
2264
        reloc_ptr = &dummy;
2265
 
2266
      /* Handle an addend.  */
2267
      if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2268
          && CONST_INT_P (XEXP (x, 1)))
2269
        x = XEXP (x, 0);
2270
 
2271
      if (GET_CODE (x) == UNSPEC)
2272
        {
2273
          switch (XINT (x, 1))
2274
            {
2275
            case UNSPEC_RELOC16:
2276
              orig = XVECEXP (x, 0, 0);
2277
              *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2278
              break;
2279
 
2280
            case UNSPEC_RELOC32:
2281
              if (unwrap_reloc32_p)
2282
                {
2283
                  orig = XVECEXP (x, 0, 0);
2284
                  *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2285
                }
2286
              break;
2287
 
2288
            default:
2289
              break;
2290
            }
2291
        }
2292
    }
2293
 
2294
  return orig;
2295
}
2296
 
2297
/* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2298
   UNSPEC_RELOC32 wrappers.  */
2299
 
2300
rtx
2301
m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2302
{
2303
  return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2304
}
2305
 
2306
/* Helper for m68k_final_prescan_insn.  */
2307
 
2308
static int
2309
m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2310
{
2311
  rtx x = *x_ptr;
2312
 
2313
  if (m68k_unwrap_symbol (x, true) != x)
2314
    /* For rationale of the below, see comment in m68k_final_prescan_insn.  */
2315
    {
2316
      rtx plus;
2317
 
2318
      gcc_assert (GET_CODE (x) == CONST);
2319
      plus = XEXP (x, 0);
2320
 
2321
      if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2322
        {
2323
          rtx unspec;
2324
          rtx addend;
2325
 
2326
          unspec = XEXP (plus, 0);
2327
          gcc_assert (GET_CODE (unspec) == UNSPEC);
2328
          addend = XEXP (plus, 1);
2329
          gcc_assert (CONST_INT_P (addend));
2330
 
2331
          /* We now have all the pieces, rearrange them.  */
2332
 
2333
          /* Move symbol to plus.  */
2334
          XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2335
 
2336
          /* Move plus inside unspec.  */
2337
          XVECEXP (unspec, 0, 0) = plus;
2338
 
2339
          /* Move unspec to top level of const.  */
2340
          XEXP (x, 0) = unspec;
2341
        }
2342
 
2343
      return -1;
2344
    }
2345
 
2346
  return 0;
2347
}
2348
 
2349
/* Prescan insn before outputing assembler for it.  */
2350
 
2351
void
2352
m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
2353
                         rtx *operands, int n_operands)
2354
{
2355
  int i;
2356
 
2357
  /* Combine and, possibly, other optimizations may do good job
2358
     converting
2359
       (const (unspec [(symbol)]))
2360
     into
2361
       (const (plus (unspec [(symbol)])
2362
                    (const_int N))).
2363
     The problem with this is emitting @TLS or @GOT decorations.
2364
     The decoration is emitted when processing (unspec), so the
2365
     result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2366
 
2367
     It seems that the easiest solution to this is to convert such
2368
     operands to
2369
       (const (unspec [(plus (symbol)
2370
                             (const_int N))])).
2371
     Note, that the top level of operand remains intact, so we don't have
2372
     to patch up anything outside of the operand.  */
2373
 
2374
  for (i = 0; i < n_operands; ++i)
2375
    {
2376
      rtx op;
2377
 
2378
      op = operands[i];
2379
 
2380
      for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
2381
    }
2382
}
2383
 
2384
/* Move X to a register and add REG_EQUAL note pointing to ORIG.
2385
   If REG is non-null, use it; generate new pseudo otherwise.  */
2386
 
2387
static rtx
2388
m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2389
{
2390
  rtx insn;
2391
 
2392
  if (reg == NULL_RTX)
2393
    {
2394
      gcc_assert (can_create_pseudo_p ());
2395
      reg = gen_reg_rtx (Pmode);
2396
    }
2397
 
2398
  insn = emit_move_insn (reg, x);
2399
  /* Put a REG_EQUAL note on this insn, so that it can be optimized
2400
     by loop.  */
2401
  set_unique_reg_note (insn, REG_EQUAL, orig);
2402
 
2403
  return reg;
2404
}
2405
 
2406
/* Does the same as m68k_wrap_symbol, but returns a memory reference to
2407
   GOT slot.  */
2408
 
2409
static rtx
2410
m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2411
{
2412
  x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2413
 
2414
  x = gen_rtx_MEM (Pmode, x);
2415
  MEM_READONLY_P (x) = 1;
2416
 
2417
  return x;
2418
}
2419
 
2420
/* Legitimize PIC addresses.  If the address is already
2421
   position-independent, we return ORIG.  Newly generated
2422
   position-independent addresses go to REG.  If we need more
2423
   than one register, we lose.
2424
 
2425
   An address is legitimized by making an indirect reference
2426
   through the Global Offset Table with the name of the symbol
2427
   used as an offset.
2428
 
2429
   The assembler and linker are responsible for placing the
2430
   address of the symbol in the GOT.  The function prologue
2431
   is responsible for initializing a5 to the starting address
2432
   of the GOT.
2433
 
2434
   The assembler is also responsible for translating a symbol name
2435
   into a constant displacement from the start of the GOT.
2436
 
2437
   A quick example may make things a little clearer:
2438
 
2439
   When not generating PIC code to store the value 12345 into _foo
2440
   we would generate the following code:
2441
 
2442
        movel #12345, _foo
2443
 
2444
   When generating PIC two transformations are made.  First, the compiler
2445
   loads the address of foo into a register.  So the first transformation makes:
2446
 
2447
        lea     _foo, a0
2448
        movel   #12345, a0@
2449
 
2450
   The code in movsi will intercept the lea instruction and call this
2451
   routine which will transform the instructions into:
2452
 
2453
        movel   a5@(_foo:w), a0
2454
        movel   #12345, a0@
2455
 
2456
 
2457
   That (in a nutshell) is how *all* symbol and label references are
2458
   handled.  */
2459
 
2460
rtx
2461
legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2462
                        rtx reg)
2463
{
2464
  rtx pic_ref = orig;
2465
 
2466
  /* First handle a simple SYMBOL_REF or LABEL_REF */
2467
  if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2468
    {
2469
      gcc_assert (reg);
2470
 
2471
      pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2472
      pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2473
    }
2474
  else if (GET_CODE (orig) == CONST)
2475
    {
2476
      rtx base;
2477
 
2478
      /* Make sure this has not already been legitimized.  */
2479
      if (m68k_unwrap_symbol (orig, true) != orig)
2480
        return orig;
2481
 
2482
      gcc_assert (reg);
2483
 
2484
      /* legitimize both operands of the PLUS */
2485
      gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2486
 
2487
      base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2488
      orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2489
                                     base == reg ? 0 : reg);
2490
 
2491
      if (GET_CODE (orig) == CONST_INT)
2492
        pic_ref = plus_constant (base, INTVAL (orig));
2493
      else
2494
        pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2495
    }
2496
 
2497
  return pic_ref;
2498
}
2499
 
2500
/* The __tls_get_addr symbol.  */
2501
static GTY(()) rtx m68k_tls_get_addr;
2502
 
2503
/* Return SYMBOL_REF for __tls_get_addr.  */
2504
 
2505
static rtx
2506
m68k_get_tls_get_addr (void)
2507
{
2508
  if (m68k_tls_get_addr == NULL_RTX)
2509
    m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2510
 
2511
  return m68k_tls_get_addr;
2512
}
2513
 
2514
/* Return libcall result in A0 instead of usual D0.  */
2515
static bool m68k_libcall_value_in_a0_p = false;
2516
 
2517
/* Emit instruction sequence that calls __tls_get_addr.  X is
2518
   the TLS symbol we are referencing and RELOC is the symbol type to use
2519
   (either TLSGD or TLSLDM).  EQV is the REG_EQUAL note for the sequence
2520
   emitted.  A pseudo register with result of __tls_get_addr call is
2521
   returned.  */
2522
 
2523
static rtx
2524
m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2525
{
2526
  rtx a0;
2527
  rtx insns;
2528
  rtx dest;
2529
 
2530
  /* Emit the call sequence.  */
2531
  start_sequence ();
2532
 
2533
  /* FIXME: Unfortunately, emit_library_call_value does not
2534
     consider (plus (%a5) (const (unspec))) to be a good enough
2535
     operand for push, so it forces it into a register.  The bad
2536
     thing about this is that combiner, due to copy propagation and other
2537
     optimizations, sometimes can not later fix this.  As a consequence,
2538
     additional register may be allocated resulting in a spill.
2539
     For reference, see args processing loops in
2540
     calls.c:emit_library_call_value_1.
2541
     For testcase, see gcc.target/m68k/tls-{gd, ld}.c  */
2542
  x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2543
 
2544
  /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2545
     is the simpliest way of generating a call.  The difference between
2546
     __tls_get_addr() and libcall is that the result is returned in D0
2547
     instead of A0.  To workaround this, we use m68k_libcall_value_in_a0_p
2548
     which temporarily switches returning the result to A0.  */
2549
 
2550
  m68k_libcall_value_in_a0_p = true;
2551
  a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2552
                                Pmode, 1, x, Pmode);
2553
  m68k_libcall_value_in_a0_p = false;
2554
 
2555
  insns = get_insns ();
2556
  end_sequence ();
2557
 
2558
  gcc_assert (can_create_pseudo_p ());
2559
  dest = gen_reg_rtx (Pmode);
2560
  emit_libcall_block (insns, dest, a0, eqv);
2561
 
2562
  return dest;
2563
}
2564
 
2565
/* The __tls_get_addr symbol.  */
2566
static GTY(()) rtx m68k_read_tp;
2567
 
2568
/* Return SYMBOL_REF for __m68k_read_tp.  */
2569
 
2570
static rtx
2571
m68k_get_m68k_read_tp (void)
2572
{
2573
  if (m68k_read_tp == NULL_RTX)
2574
    m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2575
 
2576
  return m68k_read_tp;
2577
}
2578
 
2579
/* Emit instruction sequence that calls __m68k_read_tp.
2580
   A pseudo register with result of __m68k_read_tp call is returned.  */
2581
 
2582
static rtx
2583
m68k_call_m68k_read_tp (void)
2584
{
2585
  rtx a0;
2586
  rtx eqv;
2587
  rtx insns;
2588
  rtx dest;
2589
 
2590
  start_sequence ();
2591
 
2592
  /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2593
     is the simpliest way of generating a call.  The difference between
2594
     __m68k_read_tp() and libcall is that the result is returned in D0
2595
     instead of A0.  To workaround this, we use m68k_libcall_value_in_a0_p
2596
     which temporarily switches returning the result to A0.  */
2597
 
2598
  /* Emit the call sequence.  */
2599
  m68k_libcall_value_in_a0_p = true;
2600
  a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2601
                                Pmode, 0);
2602
  m68k_libcall_value_in_a0_p = false;
2603
  insns = get_insns ();
2604
  end_sequence ();
2605
 
2606
  /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2607
     share the m68k_read_tp result with other IE/LE model accesses.  */
2608
  eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2609
 
2610
  gcc_assert (can_create_pseudo_p ());
2611
  dest = gen_reg_rtx (Pmode);
2612
  emit_libcall_block (insns, dest, a0, eqv);
2613
 
2614
  return dest;
2615
}
2616
 
2617
/* Return a legitimized address for accessing TLS SYMBOL_REF X.
2618
   For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2619
   ColdFire.  */
2620
 
2621
rtx
2622
m68k_legitimize_tls_address (rtx orig)
2623
{
2624
  switch (SYMBOL_REF_TLS_MODEL (orig))
2625
    {
2626
    case TLS_MODEL_GLOBAL_DYNAMIC:
2627
      orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2628
      break;
2629
 
2630
    case TLS_MODEL_LOCAL_DYNAMIC:
2631
      {
2632
        rtx eqv;
2633
        rtx a0;
2634
        rtx x;
2635
 
2636
        /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2637
           share the LDM result with other LD model accesses.  */
2638
        eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2639
                              UNSPEC_RELOC32);
2640
 
2641
        a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2642
 
2643
        x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2644
 
2645
        if (can_create_pseudo_p ())
2646
          x = m68k_move_to_reg (x, orig, NULL_RTX);
2647
 
2648
        orig = x;
2649
        break;
2650
      }
2651
 
2652
    case TLS_MODEL_INITIAL_EXEC:
2653
      {
2654
        rtx a0;
2655
        rtx x;
2656
 
2657
        a0 = m68k_call_m68k_read_tp ();
2658
 
2659
        x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2660
        x = gen_rtx_PLUS (Pmode, x, a0);
2661
 
2662
        if (can_create_pseudo_p ())
2663
          x = m68k_move_to_reg (x, orig, NULL_RTX);
2664
 
2665
        orig = x;
2666
        break;
2667
      }
2668
 
2669
    case TLS_MODEL_LOCAL_EXEC:
2670
      {
2671
        rtx a0;
2672
        rtx x;
2673
 
2674
        a0 = m68k_call_m68k_read_tp ();
2675
 
2676
        x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2677
 
2678
        if (can_create_pseudo_p ())
2679
          x = m68k_move_to_reg (x, orig, NULL_RTX);
2680
 
2681
        orig = x;
2682
        break;
2683
      }
2684
 
2685
    default:
2686
      gcc_unreachable ();
2687
    }
2688
 
2689
  return orig;
2690
}
2691
 
2692
/* Return true if X is a TLS symbol.  */
2693
 
2694
static bool
2695
m68k_tls_symbol_p (rtx x)
2696
{
2697
  if (!TARGET_HAVE_TLS)
2698
    return false;
2699
 
2700
  if (GET_CODE (x) != SYMBOL_REF)
2701
    return false;
2702
 
2703
  return SYMBOL_REF_TLS_MODEL (x) != 0;
2704
}
2705
 
2706
/* Helper for m68k_tls_referenced_p.  */
2707
 
2708
static int
2709
m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2710
{
2711
  /* Note: this is not the same as m68k_tls_symbol_p.  */
2712
  if (GET_CODE (*x_ptr) == SYMBOL_REF)
2713
    return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
2714
 
2715
  /* Don't recurse into legitimate TLS references.  */
2716
  if (m68k_tls_reference_p (*x_ptr, true))
2717
    return -1;
2718
 
2719
  return 0;
2720
}
2721
 
2722
/* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2723
   though illegitimate one.
2724
   If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference.  */
2725
 
2726
bool
2727
m68k_tls_reference_p (rtx x, bool legitimate_p)
2728
{
2729
  if (!TARGET_HAVE_TLS)
2730
    return false;
2731
 
2732
  if (!legitimate_p)
2733
    return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
2734
  else
2735
    {
2736
      enum m68k_reloc reloc = RELOC_GOT;
2737
 
2738
      return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2739
              && TLS_RELOC_P (reloc));
2740
    }
2741
}
2742
 
2743
 
2744
 
2745
#define USE_MOVQ(i)     ((unsigned) ((i) + 128) <= 255)
2746
 
2747
/* Return the type of move that should be used for integer I.  */
2748
 
2749
M68K_CONST_METHOD
2750
m68k_const_method (HOST_WIDE_INT i)
2751
{
2752
  unsigned u;
2753
 
2754
  if (USE_MOVQ (i))
2755
    return MOVQ;
2756
 
2757
  /* The ColdFire doesn't have byte or word operations.  */
2758
  /* FIXME: This may not be useful for the m68060 either.  */
2759
  if (!TARGET_COLDFIRE)
2760
    {
2761
      /* if -256 < N < 256 but N is not in range for a moveq
2762
         N^ff will be, so use moveq #N^ff, dreg; not.b dreg.  */
2763
      if (USE_MOVQ (i ^ 0xff))
2764
        return NOTB;
2765
      /* Likewise, try with not.w */
2766
      if (USE_MOVQ (i ^ 0xffff))
2767
        return NOTW;
2768
      /* This is the only value where neg.w is useful */
2769
      if (i == -65408)
2770
        return NEGW;
2771
    }
2772
 
2773
  /* Try also with swap.  */
2774
  u = i;
2775
  if (USE_MOVQ ((u >> 16) | (u << 16)))
2776
    return SWAP;
2777
 
2778
  if (TARGET_ISAB)
2779
    {
2780
      /* Try using MVZ/MVS with an immediate value to load constants.  */
2781
      if (i >= 0 && i <= 65535)
2782
        return MVZ;
2783
      if (i >= -32768 && i <= 32767)
2784
        return MVS;
2785
    }
2786
 
2787
  /* Otherwise, use move.l */
2788
  return MOVL;
2789
}
2790
 
2791
/* Return the cost of moving constant I into a data register.  */
2792
 
2793
static int
2794
const_int_cost (HOST_WIDE_INT i)
2795
{
2796
  switch (m68k_const_method (i))
2797
    {
2798
    case MOVQ:
2799
      /* Constants between -128 and 127 are cheap due to moveq.  */
2800
      return 0;
2801
    case MVZ:
2802
    case MVS:
2803
    case NOTB:
2804
    case NOTW:
2805
    case NEGW:
2806
    case SWAP:
2807
      /* Constants easily generated by moveq + not.b/not.w/neg.w/swap.  */
2808
      return 1;
2809
    case MOVL:
2810
      return 2;
2811
    default:
2812
      gcc_unreachable ();
2813
    }
2814
}
2815
 
2816
static bool
2817
m68k_rtx_costs (rtx x, int code, int outer_code, int *total,
2818
                bool speed ATTRIBUTE_UNUSED)
2819
{
2820
  switch (code)
2821
    {
2822
    case CONST_INT:
2823
      /* Constant zero is super cheap due to clr instruction.  */
2824
      if (x == const0_rtx)
2825
        *total = 0;
2826
      else
2827
        *total = const_int_cost (INTVAL (x));
2828
      return true;
2829
 
2830
    case CONST:
2831
    case LABEL_REF:
2832
    case SYMBOL_REF:
2833
      *total = 3;
2834
      return true;
2835
 
2836
    case CONST_DOUBLE:
2837
      /* Make 0.0 cheaper than other floating constants to
2838
         encourage creating tstsf and tstdf insns.  */
2839
      if (outer_code == COMPARE
2840
          && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2841
        *total = 4;
2842
      else
2843
        *total = 5;
2844
      return true;
2845
 
2846
    /* These are vaguely right for a 68020.  */
2847
    /* The costs for long multiply have been adjusted to work properly
2848
       in synth_mult on the 68020, relative to an average of the time
2849
       for add and the time for shift, taking away a little more because
2850
       sometimes move insns are needed.  */
2851
    /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2852
       terms.  */
2853
#define MULL_COST                               \
2854
  (TUNE_68060 ? 2                               \
2855
   : TUNE_68040 ? 5                             \
2856
   : (TUNE_CFV2 && TUNE_EMAC) ? 3               \
2857
   : (TUNE_CFV2 && TUNE_MAC) ? 4                \
2858
   : TUNE_CFV2 ? 8                              \
2859
   : TARGET_COLDFIRE ? 3 : 13)
2860
 
2861
#define MULW_COST                               \
2862
  (TUNE_68060 ? 2                               \
2863
   : TUNE_68040 ? 3                             \
2864
   : TUNE_68000_10 ? 5                          \
2865
   : (TUNE_CFV2 && TUNE_EMAC) ? 3               \
2866
   : (TUNE_CFV2 && TUNE_MAC) ? 2                \
2867
   : TUNE_CFV2 ? 8                              \
2868
   : TARGET_COLDFIRE ? 2 : 8)
2869
 
2870
#define DIVW_COST                               \
2871
  (TARGET_CF_HWDIV ? 11                         \
2872
   : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2873
 
2874
    case PLUS:
2875
      /* An lea costs about three times as much as a simple add.  */
2876
      if (GET_MODE (x) == SImode
2877
          && GET_CODE (XEXP (x, 1)) == REG
2878
          && GET_CODE (XEXP (x, 0)) == MULT
2879
          && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2880
          && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2881
          && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2882
              || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2883
              || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2884
        {
2885
            /* lea an@(dx:l:i),am */
2886
            *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2887
            return true;
2888
        }
2889
      return false;
2890
 
2891
    case ASHIFT:
2892
    case ASHIFTRT:
2893
    case LSHIFTRT:
2894
      if (TUNE_68060)
2895
        {
2896
          *total = COSTS_N_INSNS(1);
2897
          return true;
2898
        }
2899
      if (TUNE_68000_10)
2900
        {
2901
          if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2902
            {
2903
              if (INTVAL (XEXP (x, 1)) < 16)
2904
                *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2905
              else
2906
                /* We're using clrw + swap for these cases.  */
2907
                *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2908
            }
2909
          else
2910
            *total = COSTS_N_INSNS (10); /* Worst case.  */
2911
          return true;
2912
        }
2913
      /* A shift by a big integer takes an extra instruction.  */
2914
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
2915
          && (INTVAL (XEXP (x, 1)) == 16))
2916
        {
2917
          *total = COSTS_N_INSNS (2);    /* clrw;swap */
2918
          return true;
2919
        }
2920
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
2921
          && !(INTVAL (XEXP (x, 1)) > 0
2922
               && INTVAL (XEXP (x, 1)) <= 8))
2923
        {
2924
          *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3);      /* lsr #i,dn */
2925
          return true;
2926
        }
2927
      return false;
2928
 
2929
    case MULT:
2930
      if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2931
           || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2932
          && GET_MODE (x) == SImode)
2933
        *total = COSTS_N_INSNS (MULW_COST);
2934
      else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2935
        *total = COSTS_N_INSNS (MULW_COST);
2936
      else
2937
        *total = COSTS_N_INSNS (MULL_COST);
2938
      return true;
2939
 
2940
    case DIV:
2941
    case UDIV:
2942
    case MOD:
2943
    case UMOD:
2944
      if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2945
        *total = COSTS_N_INSNS (DIVW_COST);     /* div.w */
2946
      else if (TARGET_CF_HWDIV)
2947
        *total = COSTS_N_INSNS (18);
2948
      else
2949
        *total = COSTS_N_INSNS (43);            /* div.l */
2950
      return true;
2951
 
2952
    case ZERO_EXTRACT:
2953
      if (outer_code == COMPARE)
2954
        *total = 0;
2955
      return false;
2956
 
2957
    default:
2958
      return false;
2959
    }
2960
}
2961
 
2962
/* Return an instruction to move CONST_INT OPERANDS[1] into data register
2963
   OPERANDS[0].  */
2964
 
2965
static const char *
2966
output_move_const_into_data_reg (rtx *operands)
2967
{
2968
  HOST_WIDE_INT i;
2969
 
2970
  i = INTVAL (operands[1]);
2971
  switch (m68k_const_method (i))
2972
    {
2973
    case MVZ:
2974
      return "mvzw %1,%0";
2975
    case MVS:
2976
      return "mvsw %1,%0";
2977
    case MOVQ:
2978
      return "moveq %1,%0";
2979
    case NOTB:
2980
      CC_STATUS_INIT;
2981
      operands[1] = GEN_INT (i ^ 0xff);
2982
      return "moveq %1,%0\n\tnot%.b %0";
2983
    case NOTW:
2984
      CC_STATUS_INIT;
2985
      operands[1] = GEN_INT (i ^ 0xffff);
2986
      return "moveq %1,%0\n\tnot%.w %0";
2987
    case NEGW:
2988
      CC_STATUS_INIT;
2989
      return "moveq #-128,%0\n\tneg%.w %0";
2990
    case SWAP:
2991
      {
2992
        unsigned u = i;
2993
 
2994
        operands[1] = GEN_INT ((u << 16) | (u >> 16));
2995
        return "moveq %1,%0\n\tswap %0";
2996
      }
2997
    case MOVL:
2998
      return "move%.l %1,%0";
2999
    default:
3000
      gcc_unreachable ();
3001
    }
3002
}
3003
 
3004
/* Return true if I can be handled by ISA B's mov3q instruction.  */
3005
 
3006
bool
3007
valid_mov3q_const (HOST_WIDE_INT i)
3008
{
3009
  return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
3010
}
3011
 
3012
/* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3013
   I is the value of OPERANDS[1].  */
3014
 
3015
static const char *
3016
output_move_simode_const (rtx *operands)
3017
{
3018
  rtx dest;
3019
  HOST_WIDE_INT src;
3020
 
3021
  dest = operands[0];
3022
  src = INTVAL (operands[1]);
3023
  if (src == 0
3024
      && (DATA_REG_P (dest) || MEM_P (dest))
3025
      /* clr insns on 68000 read before writing.  */
3026
      && ((TARGET_68010 || TARGET_COLDFIRE)
3027
          || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
3028
    return "clr%.l %0";
3029
  else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3030
    return "mov3q%.l %1,%0";
3031
  else if (src == 0 && ADDRESS_REG_P (dest))
3032
    return "sub%.l %0,%0";
3033
  else if (DATA_REG_P (dest))
3034
    return output_move_const_into_data_reg (operands);
3035
  else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3036
    {
3037
      if (valid_mov3q_const (src))
3038
        return "mov3q%.l %1,%0";
3039
      return "move%.w %1,%0";
3040
    }
3041
  else if (MEM_P (dest)
3042
           && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3043
           && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3044
           && IN_RANGE (src, -0x8000, 0x7fff))
3045
    {
3046
      if (valid_mov3q_const (src))
3047
        return "mov3q%.l %1,%-";
3048
      return "pea %a1";
3049
    }
3050
  return "move%.l %1,%0";
3051
}
3052
 
3053
const char *
3054
output_move_simode (rtx *operands)
3055
{
3056
  if (GET_CODE (operands[1]) == CONST_INT)
3057
    return output_move_simode_const (operands);
3058
  else if ((GET_CODE (operands[1]) == SYMBOL_REF
3059
            || GET_CODE (operands[1]) == CONST)
3060
           && push_operand (operands[0], SImode))
3061
    return "pea %a1";
3062
  else if ((GET_CODE (operands[1]) == SYMBOL_REF
3063
            || GET_CODE (operands[1]) == CONST)
3064
           && ADDRESS_REG_P (operands[0]))
3065
    return "lea %a1,%0";
3066
  return "move%.l %1,%0";
3067
}
3068
 
3069
const char *
3070
output_move_himode (rtx *operands)
3071
{
3072
 if (GET_CODE (operands[1]) == CONST_INT)
3073
    {
3074
      if (operands[1] == const0_rtx
3075
          && (DATA_REG_P (operands[0])
3076
              || GET_CODE (operands[0]) == MEM)
3077
          /* clr insns on 68000 read before writing.  */
3078
          && ((TARGET_68010 || TARGET_COLDFIRE)
3079
              || !(GET_CODE (operands[0]) == MEM
3080
                   && MEM_VOLATILE_P (operands[0]))))
3081
        return "clr%.w %0";
3082
      else if (operands[1] == const0_rtx
3083
               && ADDRESS_REG_P (operands[0]))
3084
        return "sub%.l %0,%0";
3085
      else if (DATA_REG_P (operands[0])
3086
               && INTVAL (operands[1]) < 128
3087
               && INTVAL (operands[1]) >= -128)
3088
        return "moveq %1,%0";
3089
      else if (INTVAL (operands[1]) < 0x8000
3090
               && INTVAL (operands[1]) >= -0x8000)
3091
        return "move%.w %1,%0";
3092
    }
3093
  else if (CONSTANT_P (operands[1]))
3094
    return "move%.l %1,%0";
3095
  return "move%.w %1,%0";
3096
}
3097
 
3098
const char *
3099
output_move_qimode (rtx *operands)
3100
{
3101
  /* 68k family always modifies the stack pointer by at least 2, even for
3102
     byte pushes.  The 5200 (ColdFire) does not do this.  */
3103
 
3104
  /* This case is generated by pushqi1 pattern now.  */
3105
  gcc_assert (!(GET_CODE (operands[0]) == MEM
3106
                && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3107
                && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3108
                && ! ADDRESS_REG_P (operands[1])
3109
                && ! TARGET_COLDFIRE));
3110
 
3111
  /* clr and st insns on 68000 read before writing.  */
3112
  if (!ADDRESS_REG_P (operands[0])
3113
      && ((TARGET_68010 || TARGET_COLDFIRE)
3114
          || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3115
    {
3116
      if (operands[1] == const0_rtx)
3117
        return "clr%.b %0";
3118
      if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3119
          && GET_CODE (operands[1]) == CONST_INT
3120
          && (INTVAL (operands[1]) & 255) == 255)
3121
        {
3122
          CC_STATUS_INIT;
3123
          return "st %0";
3124
        }
3125
    }
3126
  if (GET_CODE (operands[1]) == CONST_INT
3127
      && DATA_REG_P (operands[0])
3128
      && INTVAL (operands[1]) < 128
3129
      && INTVAL (operands[1]) >= -128)
3130
    return "moveq %1,%0";
3131
  if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3132
    return "sub%.l %0,%0";
3133
  if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3134
    return "move%.l %1,%0";
3135
  /* 68k family (including the 5200 ColdFire) does not support byte moves to
3136
     from address registers.  */
3137
  if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3138
    return "move%.w %1,%0";
3139
  return "move%.b %1,%0";
3140
}
3141
 
3142
const char *
3143
output_move_stricthi (rtx *operands)
3144
{
3145
  if (operands[1] == const0_rtx
3146
      /* clr insns on 68000 read before writing.  */
3147
      && ((TARGET_68010 || TARGET_COLDFIRE)
3148
          || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3149
    return "clr%.w %0";
3150
  return "move%.w %1,%0";
3151
}
3152
 
3153
const char *
3154
output_move_strictqi (rtx *operands)
3155
{
3156
  if (operands[1] == const0_rtx
3157
      /* clr insns on 68000 read before writing.  */
3158
      && ((TARGET_68010 || TARGET_COLDFIRE)
3159
          || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3160
    return "clr%.b %0";
3161
  return "move%.b %1,%0";
3162
}
3163
 
3164
/* Return the best assembler insn template
3165
   for moving operands[1] into operands[0] as a fullword.  */
3166
 
3167
static const char *
3168
singlemove_string (rtx *operands)
3169
{
3170
  if (GET_CODE (operands[1]) == CONST_INT)
3171
    return output_move_simode_const (operands);
3172
  return "move%.l %1,%0";
3173
}
3174
 
3175
 
3176
/* Output assembler or rtl code to perform a doubleword move insn
3177
   with operands OPERANDS.
3178
   Pointers to 3 helper functions should be specified:
3179
   HANDLE_REG_ADJUST to adjust a register by a small value,
3180
   HANDLE_COMPADR to compute an address and
3181
   HANDLE_MOVSI to move 4 bytes.  */
3182
 
3183
static void
3184
handle_move_double (rtx operands[2],
3185
                    void (*handle_reg_adjust) (rtx, int),
3186
                    void (*handle_compadr) (rtx [2]),
3187
                    void (*handle_movsi) (rtx [2]))
3188
{
3189
  enum
3190
    {
3191
      REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3192
    } optype0, optype1;
3193
  rtx latehalf[2];
3194
  rtx middlehalf[2];
3195
  rtx xops[2];
3196
  rtx addreg0 = 0, addreg1 = 0;
3197
  int dest_overlapped_low = 0;
3198
  int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3199
 
3200
  middlehalf[0] = 0;
3201
  middlehalf[1] = 0;
3202
 
3203
  /* First classify both operands.  */
3204
 
3205
  if (REG_P (operands[0]))
3206
    optype0 = REGOP;
3207
  else if (offsettable_memref_p (operands[0]))
3208
    optype0 = OFFSOP;
3209
  else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3210
    optype0 = POPOP;
3211
  else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3212
    optype0 = PUSHOP;
3213
  else if (GET_CODE (operands[0]) == MEM)
3214
    optype0 = MEMOP;
3215
  else
3216
    optype0 = RNDOP;
3217
 
3218
  if (REG_P (operands[1]))
3219
    optype1 = REGOP;
3220
  else if (CONSTANT_P (operands[1]))
3221
    optype1 = CNSTOP;
3222
  else if (offsettable_memref_p (operands[1]))
3223
    optype1 = OFFSOP;
3224
  else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3225
    optype1 = POPOP;
3226
  else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3227
    optype1 = PUSHOP;
3228
  else if (GET_CODE (operands[1]) == MEM)
3229
    optype1 = MEMOP;
3230
  else
3231
    optype1 = RNDOP;
3232
 
3233
  /* Check for the cases that the operand constraints are not supposed
3234
     to allow to happen.  Generating code for these cases is
3235
     painful.  */
3236
  gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3237
 
3238
  /* If one operand is decrementing and one is incrementing
3239
     decrement the former register explicitly
3240
     and change that operand into ordinary indexing.  */
3241
 
3242
  if (optype0 == PUSHOP && optype1 == POPOP)
3243
    {
3244
      operands[0] = XEXP (XEXP (operands[0], 0), 0);
3245
 
3246
      handle_reg_adjust (operands[0], -size);
3247
 
3248
      if (GET_MODE (operands[1]) == XFmode)
3249
        operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3250
      else if (GET_MODE (operands[0]) == DFmode)
3251
        operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3252
      else
3253
        operands[0] = gen_rtx_MEM (DImode, operands[0]);
3254
      optype0 = OFFSOP;
3255
    }
3256
  if (optype0 == POPOP && optype1 == PUSHOP)
3257
    {
3258
      operands[1] = XEXP (XEXP (operands[1], 0), 0);
3259
 
3260
      handle_reg_adjust (operands[1], -size);
3261
 
3262
      if (GET_MODE (operands[1]) == XFmode)
3263
        operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3264
      else if (GET_MODE (operands[1]) == DFmode)
3265
        operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3266
      else
3267
        operands[1] = gen_rtx_MEM (DImode, operands[1]);
3268
      optype1 = OFFSOP;
3269
    }
3270
 
3271
  /* If an operand is an unoffsettable memory ref, find a register
3272
     we can increment temporarily to make it refer to the second word.  */
3273
 
3274
  if (optype0 == MEMOP)
3275
    addreg0 = find_addr_reg (XEXP (operands[0], 0));
3276
 
3277
  if (optype1 == MEMOP)
3278
    addreg1 = find_addr_reg (XEXP (operands[1], 0));
3279
 
3280
  /* Ok, we can do one word at a time.
3281
     Normally we do the low-numbered word first,
3282
     but if either operand is autodecrementing then we
3283
     do the high-numbered word first.
3284
 
3285
     In either case, set up in LATEHALF the operands to use
3286
     for the high-numbered word and in some cases alter the
3287
     operands in OPERANDS to be suitable for the low-numbered word.  */
3288
 
3289
  if (size == 12)
3290
    {
3291
      if (optype0 == REGOP)
3292
        {
3293
          latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3294
          middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3295
        }
3296
      else if (optype0 == OFFSOP)
3297
        {
3298
          middlehalf[0] = adjust_address (operands[0], SImode, 4);
3299
          latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3300
        }
3301
      else
3302
        {
3303
          middlehalf[0] = adjust_address (operands[0], SImode, 0);
3304
          latehalf[0] = adjust_address (operands[0], SImode, 0);
3305
        }
3306
 
3307
      if (optype1 == REGOP)
3308
        {
3309
          latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3310
          middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3311
        }
3312
      else if (optype1 == OFFSOP)
3313
        {
3314
          middlehalf[1] = adjust_address (operands[1], SImode, 4);
3315
          latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3316
        }
3317
      else if (optype1 == CNSTOP)
3318
        {
3319
          if (GET_CODE (operands[1]) == CONST_DOUBLE)
3320
            {
3321
              REAL_VALUE_TYPE r;
3322
              long l[3];
3323
 
3324
              REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3325
              REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3326
              operands[1] = GEN_INT (l[0]);
3327
              middlehalf[1] = GEN_INT (l[1]);
3328
              latehalf[1] = GEN_INT (l[2]);
3329
            }
3330
          else
3331
            {
3332
              /* No non-CONST_DOUBLE constant should ever appear
3333
                 here.  */
3334
              gcc_assert (!CONSTANT_P (operands[1]));
3335
            }
3336
        }
3337
      else
3338
        {
3339
          middlehalf[1] = adjust_address (operands[1], SImode, 0);
3340
          latehalf[1] = adjust_address (operands[1], SImode, 0);
3341
        }
3342
    }
3343
  else
3344
    /* size is not 12: */
3345
    {
3346
      if (optype0 == REGOP)
3347
        latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3348
      else if (optype0 == OFFSOP)
3349
        latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3350
      else
3351
        latehalf[0] = adjust_address (operands[0], SImode, 0);
3352
 
3353
      if (optype1 == REGOP)
3354
        latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3355
      else if (optype1 == OFFSOP)
3356
        latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3357
      else if (optype1 == CNSTOP)
3358
        split_double (operands[1], &operands[1], &latehalf[1]);
3359
      else
3360
        latehalf[1] = adjust_address (operands[1], SImode, 0);
3361
    }
3362
 
3363
  /* If insn is effectively movd N(sp),-(sp) then we will do the
3364
     high word first.  We should use the adjusted operand 1 (which is N+4(sp))
3365
     for the low word as well, to compensate for the first decrement of sp.  */
3366
  if (optype0 == PUSHOP
3367
      && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
3368
      && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
3369
    operands[1] = middlehalf[1] = latehalf[1];
3370
 
3371
  /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3372
     if the upper part of reg N does not appear in the MEM, arrange to
3373
     emit the move late-half first.  Otherwise, compute the MEM address
3374
     into the upper part of N and use that as a pointer to the memory
3375
     operand.  */
3376
  if (optype0 == REGOP
3377
      && (optype1 == OFFSOP || optype1 == MEMOP))
3378
    {
3379
      rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3380
 
3381
      if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3382
          && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3383
        {
3384
          /* If both halves of dest are used in the src memory address,
3385
             compute the address into latehalf of dest.
3386
             Note that this can't happen if the dest is two data regs.  */
3387
        compadr:
3388
          xops[0] = latehalf[0];
3389
          xops[1] = XEXP (operands[1], 0);
3390
 
3391
          handle_compadr (xops);
3392
          if (GET_MODE (operands[1]) == XFmode)
3393
            {
3394
              operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3395
              middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3396
              latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3397
            }
3398
          else
3399
            {
3400
              operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3401
              latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3402
            }
3403
        }
3404
      else if (size == 12
3405
               && reg_overlap_mentioned_p (middlehalf[0],
3406
                                           XEXP (operands[1], 0)))
3407
        {
3408
          /* Check for two regs used by both source and dest.
3409
             Note that this can't happen if the dest is all data regs.
3410
             It can happen if the dest is d6, d7, a0.
3411
             But in that case, latehalf is an addr reg, so
3412
             the code at compadr does ok.  */
3413
 
3414
          if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3415
              || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3416
            goto compadr;
3417
 
3418
          /* JRV says this can't happen: */
3419
          gcc_assert (!addreg0 && !addreg1);
3420
 
3421
          /* Only the middle reg conflicts; simply put it last.  */
3422
          handle_movsi (operands);
3423
          handle_movsi (latehalf);
3424
          handle_movsi (middlehalf);
3425
 
3426
          return;
3427
        }
3428
      else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3429
        /* If the low half of dest is mentioned in the source memory
3430
           address, the arrange to emit the move late half first.  */
3431
        dest_overlapped_low = 1;
3432
    }
3433
 
3434
  /* If one or both operands autodecrementing,
3435
     do the two words, high-numbered first.  */
3436
 
3437
  /* Likewise,  the first move would clobber the source of the second one,
3438
     do them in the other order.  This happens only for registers;
3439
     such overlap can't happen in memory unless the user explicitly
3440
     sets it up, and that is an undefined circumstance.  */
3441
 
3442
  if (optype0 == PUSHOP || optype1 == PUSHOP
3443
      || (optype0 == REGOP && optype1 == REGOP
3444
          && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3445
              || REGNO (operands[0]) == REGNO (latehalf[1])))
3446
      || dest_overlapped_low)
3447
    {
3448
      /* Make any unoffsettable addresses point at high-numbered word.  */
3449
      if (addreg0)
3450
        handle_reg_adjust (addreg0, size - 4);
3451
      if (addreg1)
3452
        handle_reg_adjust (addreg1, size - 4);
3453
 
3454
      /* Do that word.  */
3455
      handle_movsi (latehalf);
3456
 
3457
      /* Undo the adds we just did.  */
3458
      if (addreg0)
3459
        handle_reg_adjust (addreg0, -4);
3460
      if (addreg1)
3461
        handle_reg_adjust (addreg1, -4);
3462
 
3463
      if (size == 12)
3464
        {
3465
          handle_movsi (middlehalf);
3466
 
3467
          if (addreg0)
3468
            handle_reg_adjust (addreg0, -4);
3469
          if (addreg1)
3470
            handle_reg_adjust (addreg1, -4);
3471
        }
3472
 
3473
      /* Do low-numbered word.  */
3474
 
3475
      handle_movsi (operands);
3476
      return;
3477
    }
3478
 
3479
  /* Normal case: do the two words, low-numbered first.  */
3480
 
3481
  handle_movsi (operands);
3482
 
3483
  /* Do the middle one of the three words for long double */
3484
  if (size == 12)
3485
    {
3486
      if (addreg0)
3487
        handle_reg_adjust (addreg0, 4);
3488
      if (addreg1)
3489
        handle_reg_adjust (addreg1, 4);
3490
 
3491
      handle_movsi (middlehalf);
3492
    }
3493
 
3494
  /* Make any unoffsettable addresses point at high-numbered word.  */
3495
  if (addreg0)
3496
    handle_reg_adjust (addreg0, 4);
3497
  if (addreg1)
3498
    handle_reg_adjust (addreg1, 4);
3499
 
3500
  /* Do that word.  */
3501
  handle_movsi (latehalf);
3502
 
3503
  /* Undo the adds we just did.  */
3504
  if (addreg0)
3505
    handle_reg_adjust (addreg0, -(size - 4));
3506
  if (addreg1)
3507
    handle_reg_adjust (addreg1, -(size - 4));
3508
 
3509
  return;
3510
}
3511
 
3512
/* Output assembler code to adjust REG by N.  */
3513
static void
3514
output_reg_adjust (rtx reg, int n)
3515
{
3516
  const char *s;
3517
 
3518
  gcc_assert (GET_MODE (reg) == SImode
3519
              && -12 <= n && n != 0 && n <= 12);
3520
 
3521
  switch (n)
3522
    {
3523
    case 12:
3524
      s = "add%.l #12,%0";
3525
      break;
3526
 
3527
    case 8:
3528
      s = "addq%.l #8,%0";
3529
      break;
3530
 
3531
    case 4:
3532
      s = "addq%.l #4,%0";
3533
      break;
3534
 
3535
    case -12:
3536
      s = "sub%.l #12,%0";
3537
      break;
3538
 
3539
    case -8:
3540
      s = "subq%.l #8,%0";
3541
      break;
3542
 
3543
    case -4:
3544
      s = "subq%.l #4,%0";
3545
      break;
3546
 
3547
    default:
3548
      gcc_unreachable ();
3549
      s = NULL;
3550
    }
3551
 
3552
  output_asm_insn (s, &reg);
3553
}
3554
 
3555
/* Emit rtl code to adjust REG by N.  */
3556
static void
3557
emit_reg_adjust (rtx reg1, int n)
3558
{
3559
  rtx reg2;
3560
 
3561
  gcc_assert (GET_MODE (reg1) == SImode
3562
              && -12 <= n && n != 0 && n <= 12);
3563
 
3564
  reg1 = copy_rtx (reg1);
3565
  reg2 = copy_rtx (reg1);
3566
 
3567
  if (n < 0)
3568
    emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3569
  else if (n > 0)
3570
    emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3571
  else
3572
    gcc_unreachable ();
3573
}
3574
 
3575
/* Output assembler to load address OPERANDS[0] to register OPERANDS[1].  */
3576
static void
3577
output_compadr (rtx operands[2])
3578
{
3579
  output_asm_insn ("lea %a1,%0", operands);
3580
}
3581
 
3582
/* Output the best assembler insn for moving operands[1] into operands[0]
3583
   as a fullword.  */
3584
static void
3585
output_movsi (rtx operands[2])
3586
{
3587
  output_asm_insn (singlemove_string (operands), operands);
3588
}
3589
 
3590
/* Copy OP and change its mode to MODE.  */
3591
static rtx
3592
copy_operand (rtx op, enum machine_mode mode)
3593
{
3594
  /* ??? This looks really ugly.  There must be a better way
3595
     to change a mode on the operand.  */
3596
  if (GET_MODE (op) != VOIDmode)
3597
    {
3598
      if (REG_P (op))
3599
        op = gen_rtx_REG (mode, REGNO (op));
3600
      else
3601
        {
3602
          op = copy_rtx (op);
3603
          PUT_MODE (op, mode);
3604
        }
3605
    }
3606
 
3607
  return op;
3608
}
3609
 
3610
/* Emit rtl code for moving operands[1] into operands[0] as a fullword.  */
3611
static void
3612
emit_movsi (rtx operands[2])
3613
{
3614
  operands[0] = copy_operand (operands[0], SImode);
3615
  operands[1] = copy_operand (operands[1], SImode);
3616
 
3617
  emit_insn (gen_movsi (operands[0], operands[1]));
3618
}
3619
 
3620
/* Output assembler code to perform a doubleword move insn
3621
   with operands OPERANDS.  */
3622
const char *
3623
output_move_double (rtx *operands)
3624
{
3625
  handle_move_double (operands,
3626
                      output_reg_adjust, output_compadr, output_movsi);
3627
 
3628
  return "";
3629
}
3630
 
3631
/* Output rtl code to perform a doubleword move insn
3632
   with operands OPERANDS.  */
3633
void
3634
m68k_emit_move_double (rtx operands[2])
3635
{
3636
  handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3637
}
3638
 
3639
/* Ensure mode of ORIG, a REG rtx, is MODE.  Returns either ORIG or a
3640
   new rtx with the correct mode.  */
3641
 
3642
static rtx
3643
force_mode (enum machine_mode mode, rtx orig)
3644
{
3645
  if (mode == GET_MODE (orig))
3646
    return orig;
3647
 
3648
  if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3649
    abort ();
3650
 
3651
  return gen_rtx_REG (mode, REGNO (orig));
3652
}
3653
 
3654
static int
3655
fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3656
{
3657
  return reg_renumber && FP_REG_P (op);
3658
}
3659
 
3660
/* Emit insns to move operands[1] into operands[0].
3661
 
3662
   Return 1 if we have written out everything that needs to be done to
3663
   do the move.  Otherwise, return 0 and the caller will emit the move
3664
   normally.
3665
 
3666
   Note SCRATCH_REG may not be in the proper mode depending on how it
3667
   will be used.  This routine is responsible for creating a new copy
3668
   of SCRATCH_REG in the proper mode.  */
3669
 
3670
int
3671
emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
3672
{
3673
  register rtx operand0 = operands[0];
3674
  register rtx operand1 = operands[1];
3675
  register rtx tem;
3676
 
3677
  if (scratch_reg
3678
      && reload_in_progress && GET_CODE (operand0) == REG
3679
      && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3680
    operand0 = reg_equiv_mem[REGNO (operand0)];
3681
  else if (scratch_reg
3682
           && reload_in_progress && GET_CODE (operand0) == SUBREG
3683
           && GET_CODE (SUBREG_REG (operand0)) == REG
3684
           && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3685
    {
3686
     /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3687
        the code which tracks sets/uses for delete_output_reload.  */
3688
      rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3689
                                 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
3690
                                 SUBREG_BYTE (operand0));
3691
      operand0 = alter_subreg (&temp);
3692
    }
3693
 
3694
  if (scratch_reg
3695
      && reload_in_progress && GET_CODE (operand1) == REG
3696
      && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3697
    operand1 = reg_equiv_mem[REGNO (operand1)];
3698
  else if (scratch_reg
3699
           && reload_in_progress && GET_CODE (operand1) == SUBREG
3700
           && GET_CODE (SUBREG_REG (operand1)) == REG
3701
           && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3702
    {
3703
     /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3704
        the code which tracks sets/uses for delete_output_reload.  */
3705
      rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3706
                                 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
3707
                                 SUBREG_BYTE (operand1));
3708
      operand1 = alter_subreg (&temp);
3709
    }
3710
 
3711
  if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3712
      && ((tem = find_replacement (&XEXP (operand0, 0)))
3713
          != XEXP (operand0, 0)))
3714
    operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3715
  if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3716
      && ((tem = find_replacement (&XEXP (operand1, 0)))
3717
          != XEXP (operand1, 0)))
3718
    operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3719
 
3720
  /* Handle secondary reloads for loads/stores of FP registers where
3721
     the address is symbolic by using the scratch register */
3722
  if (fp_reg_operand (operand0, mode)
3723
      && ((GET_CODE (operand1) == MEM
3724
           && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3725
          || ((GET_CODE (operand1) == SUBREG
3726
               && GET_CODE (XEXP (operand1, 0)) == MEM
3727
               && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3728
      && scratch_reg)
3729
    {
3730
      if (GET_CODE (operand1) == SUBREG)
3731
        operand1 = XEXP (operand1, 0);
3732
 
3733
      /* SCRATCH_REG will hold an address.  We want
3734
         it in SImode regardless of what mode it was originally given
3735
         to us.  */
3736
      scratch_reg = force_mode (SImode, scratch_reg);
3737
 
3738
      /* D might not fit in 14 bits either; for such cases load D into
3739
         scratch reg.  */
3740
      if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3741
        {
3742
          emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3743
          emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3744
                                                       Pmode,
3745
                                                       XEXP (XEXP (operand1, 0), 0),
3746
                                                       scratch_reg));
3747
        }
3748
      else
3749
        emit_move_insn (scratch_reg, XEXP (operand1, 0));
3750
      emit_insn (gen_rtx_SET (VOIDmode, operand0,
3751
                              gen_rtx_MEM (mode, scratch_reg)));
3752
      return 1;
3753
    }
3754
  else if (fp_reg_operand (operand1, mode)
3755
           && ((GET_CODE (operand0) == MEM
3756
                && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3757
               || ((GET_CODE (operand0) == SUBREG)
3758
                   && GET_CODE (XEXP (operand0, 0)) == MEM
3759
                   && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3760
           && scratch_reg)
3761
    {
3762
      if (GET_CODE (operand0) == SUBREG)
3763
        operand0 = XEXP (operand0, 0);
3764
 
3765
      /* SCRATCH_REG will hold an address and maybe the actual data.  We want
3766
         it in SIMODE regardless of what mode it was originally given
3767
         to us.  */
3768
      scratch_reg = force_mode (SImode, scratch_reg);
3769
 
3770
      /* D might not fit in 14 bits either; for such cases load D into
3771
         scratch reg.  */
3772
      if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3773
        {
3774
          emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3775
          emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3776
                                                                        0)),
3777
                                                       Pmode,
3778
                                                       XEXP (XEXP (operand0, 0),
3779
                                                                   0),
3780
                                                       scratch_reg));
3781
        }
3782
      else
3783
        emit_move_insn (scratch_reg, XEXP (operand0, 0));
3784
      emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3785
                              operand1));
3786
      return 1;
3787
    }
3788
  /* Handle secondary reloads for loads of FP registers from constant
3789
     expressions by forcing the constant into memory.
3790
 
3791
     use scratch_reg to hold the address of the memory location.
3792
 
3793
     The proper fix is to change PREFERRED_RELOAD_CLASS to return
3794
     NO_REGS when presented with a const_int and an register class
3795
     containing only FP registers.  Doing so unfortunately creates
3796
     more problems than it solves.   Fix this for 2.5.  */
3797
  else if (fp_reg_operand (operand0, mode)
3798
           && CONSTANT_P (operand1)
3799
           && scratch_reg)
3800
    {
3801
      rtx xoperands[2];
3802
 
3803
      /* SCRATCH_REG will hold an address and maybe the actual data.  We want
3804
         it in SIMODE regardless of what mode it was originally given
3805
         to us.  */
3806
      scratch_reg = force_mode (SImode, scratch_reg);
3807
 
3808
      /* Force the constant into memory and put the address of the
3809
         memory location into scratch_reg.  */
3810
      xoperands[0] = scratch_reg;
3811
      xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3812
      emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3813
 
3814
      /* Now load the destination register.  */
3815
      emit_insn (gen_rtx_SET (mode, operand0,
3816
                              gen_rtx_MEM (mode, scratch_reg)));
3817
      return 1;
3818
    }
3819
 
3820
  /* Now have insn-emit do whatever it normally does.  */
3821
  return 0;
3822
}
3823
 
3824
/* Split one or more DImode RTL references into pairs of SImode
3825
   references.  The RTL can be REG, offsettable MEM, integer constant, or
3826
   CONST_DOUBLE.  "operands" is a pointer to an array of DImode RTL to
3827
   split and "num" is its length.  lo_half and hi_half are output arrays
3828
   that parallel "operands".  */
3829
 
3830
void
3831
split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3832
{
3833
  while (num--)
3834
    {
3835
      rtx op = operands[num];
3836
 
3837
      /* simplify_subreg refuses to split volatile memory addresses,
3838
         but we still have to handle it.  */
3839
      if (GET_CODE (op) == MEM)
3840
        {
3841
          lo_half[num] = adjust_address (op, SImode, 4);
3842
          hi_half[num] = adjust_address (op, SImode, 0);
3843
        }
3844
      else
3845
        {
3846
          lo_half[num] = simplify_gen_subreg (SImode, op,
3847
                                              GET_MODE (op) == VOIDmode
3848
                                              ? DImode : GET_MODE (op), 4);
3849
          hi_half[num] = simplify_gen_subreg (SImode, op,
3850
                                              GET_MODE (op) == VOIDmode
3851
                                              ? DImode : GET_MODE (op), 0);
3852
        }
3853
    }
3854
}
3855
 
3856
/* Split X into a base and a constant offset, storing them in *BASE
3857
   and *OFFSET respectively.  */
3858
 
3859
static void
3860
m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3861
{
3862
  *offset = 0;
3863
  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3864
    {
3865
      *offset += INTVAL (XEXP (x, 1));
3866
      x = XEXP (x, 0);
3867
    }
3868
  *base = x;
3869
}
3870
 
3871
/* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3872
   instruction.  STORE_P says whether the move is a load or store.
3873
 
3874
   If the instruction uses post-increment or pre-decrement addressing,
3875
   AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3876
   adjustment.  This adjustment will be made by the first element of
3877
   PARALLEL, with the loads or stores starting at element 1.  If the
3878
   instruction does not use post-increment or pre-decrement addressing,
3879
   AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3880
   start at element 0.  */
3881
 
3882
bool
3883
m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3884
                      HOST_WIDE_INT automod_offset, bool store_p)
3885
{
3886
  rtx base, mem_base, set, mem, reg, last_reg;
3887
  HOST_WIDE_INT offset, mem_offset;
3888
  int i, first, len;
3889
  enum reg_class rclass;
3890
 
3891
  len = XVECLEN (pattern, 0);
3892
  first = (automod_base != NULL);
3893
 
3894
  if (automod_base)
3895
    {
3896
      /* Stores must be pre-decrement and loads must be post-increment.  */
3897
      if (store_p != (automod_offset < 0))
3898
        return false;
3899
 
3900
      /* Work out the base and offset for lowest memory location.  */
3901
      base = automod_base;
3902
      offset = (automod_offset < 0 ? automod_offset : 0);
3903
    }
3904
  else
3905
    {
3906
      /* Allow any valid base and offset in the first access.  */
3907
      base = NULL;
3908
      offset = 0;
3909
    }
3910
 
3911
  last_reg = NULL;
3912
  rclass = NO_REGS;
3913
  for (i = first; i < len; i++)
3914
    {
3915
      /* We need a plain SET.  */
3916
      set = XVECEXP (pattern, 0, i);
3917
      if (GET_CODE (set) != SET)
3918
        return false;
3919
 
3920
      /* Check that we have a memory location...  */
3921
      mem = XEXP (set, !store_p);
3922
      if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3923
        return false;
3924
 
3925
      /* ...with the right address.  */
3926
      if (base == NULL)
3927
        {
3928
          m68k_split_offset (XEXP (mem, 0), &base, &offset);
3929
          /* The ColdFire instruction only allows (An) and (d16,An) modes.
3930
             There are no mode restrictions for 680x0 besides the
3931
             automodification rules enforced above.  */
3932
          if (TARGET_COLDFIRE
3933
              && !m68k_legitimate_base_reg_p (base, reload_completed))
3934
            return false;
3935
        }
3936
      else
3937
        {
3938
          m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3939
          if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3940
            return false;
3941
        }
3942
 
3943
      /* Check that we have a register of the required mode and class.  */
3944
      reg = XEXP (set, store_p);
3945
      if (!REG_P (reg)
3946
          || !HARD_REGISTER_P (reg)
3947
          || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3948
        return false;
3949
 
3950
      if (last_reg)
3951
        {
3952
          /* The register must belong to RCLASS and have a higher number
3953
             than the register in the previous SET.  */
3954
          if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3955
              || REGNO (last_reg) >= REGNO (reg))
3956
            return false;
3957
        }
3958
      else
3959
        {
3960
          /* Work out which register class we need.  */
3961
          if (INT_REGNO_P (REGNO (reg)))
3962
            rclass = GENERAL_REGS;
3963
          else if (FP_REGNO_P (REGNO (reg)))
3964
            rclass = FP_REGS;
3965
          else
3966
            return false;
3967
        }
3968
 
3969
      last_reg = reg;
3970
      offset += GET_MODE_SIZE (GET_MODE (reg));
3971
    }
3972
 
3973
  /* If we have an automodification, check whether the final offset is OK.  */
3974
  if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3975
    return false;
3976
 
3977
  /* Reject unprofitable cases.  */
3978
  if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3979
    return false;
3980
 
3981
  return true;
3982
}
3983
 
3984
/* Return the assembly code template for a movem or fmovem instruction
3985
   whose pattern is given by PATTERN.  Store the template's operands
3986
   in OPERANDS.
3987
 
3988
   If the instruction uses post-increment or pre-decrement addressing,
3989
   AUTOMOD_OFFSET is the total adjustment, otherwise it is 0.  STORE_P
3990
   is true if this is a store instruction.  */
3991
 
3992
const char *
3993
m68k_output_movem (rtx *operands, rtx pattern,
3994
                   HOST_WIDE_INT automod_offset, bool store_p)
3995
{
3996
  unsigned int mask;
3997
  int i, first;
3998
 
3999
  gcc_assert (GET_CODE (pattern) == PARALLEL);
4000
  mask = 0;
4001
  first = (automod_offset != 0);
4002
  for (i = first; i < XVECLEN (pattern, 0); i++)
4003
    {
4004
      /* When using movem with pre-decrement addressing, register X + D0_REG
4005
         is controlled by bit 15 - X.  For all other addressing modes,
4006
         register X + D0_REG is controlled by bit X.  Confusingly, the
4007
         register mask for fmovem is in the opposite order to that for
4008
         movem.  */
4009
      unsigned int regno;
4010
 
4011
      gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
4012
      gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
4013
      regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
4014
      if (automod_offset < 0)
4015
        {
4016
          if (FP_REGNO_P (regno))
4017
            mask |= 1 << (regno - FP0_REG);
4018
          else
4019
            mask |= 1 << (15 - (regno - D0_REG));
4020
        }
4021
      else
4022
        {
4023
          if (FP_REGNO_P (regno))
4024
            mask |= 1 << (7 - (regno - FP0_REG));
4025
          else
4026
            mask |= 1 << (regno - D0_REG);
4027
        }
4028
    }
4029
  CC_STATUS_INIT;
4030
 
4031
  if (automod_offset == 0)
4032
    operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4033
  else if (automod_offset < 0)
4034
    operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4035
  else
4036
    operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4037
  operands[1] = GEN_INT (mask);
4038
  if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4039
    {
4040
      if (store_p)
4041
        return "fmovem %1,%a0";
4042
      else
4043
        return "fmovem %a0,%1";
4044
    }
4045
  else
4046
    {
4047
      if (store_p)
4048
        return "movem%.l %1,%a0";
4049
      else
4050
        return "movem%.l %a0,%1";
4051
    }
4052
}
4053
 
4054
/* Return a REG that occurs in ADDR with coefficient 1.
4055
   ADDR can be effectively incremented by incrementing REG.  */
4056
 
4057
static rtx
4058
find_addr_reg (rtx addr)
4059
{
4060
  while (GET_CODE (addr) == PLUS)
4061
    {
4062
      if (GET_CODE (XEXP (addr, 0)) == REG)
4063
        addr = XEXP (addr, 0);
4064
      else if (GET_CODE (XEXP (addr, 1)) == REG)
4065
        addr = XEXP (addr, 1);
4066
      else if (CONSTANT_P (XEXP (addr, 0)))
4067
        addr = XEXP (addr, 1);
4068
      else if (CONSTANT_P (XEXP (addr, 1)))
4069
        addr = XEXP (addr, 0);
4070
      else
4071
        gcc_unreachable ();
4072
    }
4073
  gcc_assert (GET_CODE (addr) == REG);
4074
  return addr;
4075
}
4076
 
4077
/* Output assembler code to perform a 32-bit 3-operand add.  */
4078
 
4079
const char *
4080
output_addsi3 (rtx *operands)
4081
{
4082
  if (! operands_match_p (operands[0], operands[1]))
4083
    {
4084
      if (!ADDRESS_REG_P (operands[1]))
4085
        {
4086
          rtx tmp = operands[1];
4087
 
4088
          operands[1] = operands[2];
4089
          operands[2] = tmp;
4090
        }
4091
 
4092
      /* These insns can result from reloads to access
4093
         stack slots over 64k from the frame pointer.  */
4094
      if (GET_CODE (operands[2]) == CONST_INT
4095
          && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4096
        return "move%.l %2,%0\n\tadd%.l %1,%0";
4097
      if (GET_CODE (operands[2]) == REG)
4098
        return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4099
      return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4100
    }
4101
  if (GET_CODE (operands[2]) == CONST_INT)
4102
    {
4103
      if (INTVAL (operands[2]) > 0
4104
          && INTVAL (operands[2]) <= 8)
4105
        return "addq%.l %2,%0";
4106
      if (INTVAL (operands[2]) < 0
4107
          && INTVAL (operands[2]) >= -8)
4108
        {
4109
          operands[2] = GEN_INT (- INTVAL (operands[2]));
4110
          return "subq%.l %2,%0";
4111
        }
4112
      /* On the CPU32 it is faster to use two addql instructions to
4113
         add a small integer (8 < N <= 16) to a register.
4114
         Likewise for subql.  */
4115
      if (TUNE_CPU32 && REG_P (operands[0]))
4116
        {
4117
          if (INTVAL (operands[2]) > 8
4118
              && INTVAL (operands[2]) <= 16)
4119
            {
4120
              operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4121
              return "addq%.l #8,%0\n\taddq%.l %2,%0";
4122
            }
4123
          if (INTVAL (operands[2]) < -8
4124
              && INTVAL (operands[2]) >= -16)
4125
            {
4126
              operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4127
              return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4128
            }
4129
        }
4130
      if (ADDRESS_REG_P (operands[0])
4131
          && INTVAL (operands[2]) >= -0x8000
4132
          && INTVAL (operands[2]) < 0x8000)
4133
        {
4134
          if (TUNE_68040)
4135
            return "add%.w %2,%0";
4136
          else
4137
            return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4138
        }
4139
    }
4140
  return "add%.l %2,%0";
4141
}
4142
 
4143
/* Store in cc_status the expressions that the condition codes will
4144
   describe after execution of an instruction whose pattern is EXP.
4145
   Do not alter them if the instruction would not alter the cc's.  */
4146
 
4147
/* On the 68000, all the insns to store in an address register fail to
4148
   set the cc's.  However, in some cases these instructions can make it
4149
   possibly invalid to use the saved cc's.  In those cases we clear out
4150
   some or all of the saved cc's so they won't be used.  */
4151
 
4152
void
4153
notice_update_cc (rtx exp, rtx insn)
4154
{
4155
  if (GET_CODE (exp) == SET)
4156
    {
4157
      if (GET_CODE (SET_SRC (exp)) == CALL)
4158
        CC_STATUS_INIT;
4159
      else if (ADDRESS_REG_P (SET_DEST (exp)))
4160
        {
4161
          if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4162
            cc_status.value1 = 0;
4163
          if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4164
            cc_status.value2 = 0;
4165
        }
4166
      /* fmoves to memory or data registers do not set the condition
4167
         codes.  Normal moves _do_ set the condition codes, but not in
4168
         a way that is appropriate for comparison with 0, because -0.0
4169
         would be treated as a negative nonzero number.  Note that it
4170
         isn't appropriate to conditionalize this restriction on
4171
         HONOR_SIGNED_ZEROS because that macro merely indicates whether
4172
         we care about the difference between -0.0 and +0.0.  */
4173
      else if (!FP_REG_P (SET_DEST (exp))
4174
               && SET_DEST (exp) != cc0_rtx
4175
               && (FP_REG_P (SET_SRC (exp))
4176
                   || GET_CODE (SET_SRC (exp)) == FIX
4177
                   || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4178
        CC_STATUS_INIT;
4179
      /* A pair of move insns doesn't produce a useful overall cc.  */
4180
      else if (!FP_REG_P (SET_DEST (exp))
4181
               && !FP_REG_P (SET_SRC (exp))
4182
               && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4183
               && (GET_CODE (SET_SRC (exp)) == REG
4184
                   || GET_CODE (SET_SRC (exp)) == MEM
4185
                   || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4186
        CC_STATUS_INIT;
4187
      else if (SET_DEST (exp) != pc_rtx)
4188
        {
4189
          cc_status.flags = 0;
4190
          cc_status.value1 = SET_DEST (exp);
4191
          cc_status.value2 = SET_SRC (exp);
4192
        }
4193
    }
4194
  else if (GET_CODE (exp) == PARALLEL
4195
           && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4196
    {
4197
      rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4198
      rtx src  = SET_SRC  (XVECEXP (exp, 0, 0));
4199
 
4200
      if (ADDRESS_REG_P (dest))
4201
        CC_STATUS_INIT;
4202
      else if (dest != pc_rtx)
4203
        {
4204
          cc_status.flags = 0;
4205
          cc_status.value1 = dest;
4206
          cc_status.value2 = src;
4207
        }
4208
    }
4209
  else
4210
    CC_STATUS_INIT;
4211
  if (cc_status.value2 != 0
4212
      && ADDRESS_REG_P (cc_status.value2)
4213
      && GET_MODE (cc_status.value2) == QImode)
4214
    CC_STATUS_INIT;
4215
  if (cc_status.value2 != 0)
4216
    switch (GET_CODE (cc_status.value2))
4217
      {
4218
      case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4219
      case ROTATE: case ROTATERT:
4220
        /* These instructions always clear the overflow bit, and set
4221
           the carry to the bit shifted out.  */
4222
        cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4223
        break;
4224
 
4225
      case PLUS: case MINUS: case MULT:
4226
      case DIV: case UDIV: case MOD: case UMOD: case NEG:
4227
        if (GET_MODE (cc_status.value2) != VOIDmode)
4228
          cc_status.flags |= CC_NO_OVERFLOW;
4229
        break;
4230
      case ZERO_EXTEND:
4231
        /* (SET r1 (ZERO_EXTEND r2)) on this machine
4232
           ends with a move insn moving r2 in r2's mode.
4233
           Thus, the cc's are set for r2.
4234
           This can set N bit spuriously.  */
4235
        cc_status.flags |= CC_NOT_NEGATIVE;
4236
 
4237
      default:
4238
        break;
4239
      }
4240
  if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4241
      && cc_status.value2
4242
      && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4243
    cc_status.value2 = 0;
4244
  if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4245
       || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4246
    cc_status.flags = CC_IN_68881;
4247
  if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4248
      && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4249
    {
4250
      cc_status.flags = CC_IN_68881;
4251
      if (!FP_REG_P (XEXP (cc_status.value2, 0)))
4252
        cc_status.flags |= CC_REVERSED;
4253
    }
4254
}
4255
 
4256
const char *
4257
output_move_const_double (rtx *operands)
4258
{
4259
  int code = standard_68881_constant_p (operands[1]);
4260
 
4261
  if (code != 0)
4262
    {
4263
      static char buf[40];
4264
 
4265
      sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4266
      return buf;
4267
    }
4268
  return "fmove%.d %1,%0";
4269
}
4270
 
4271
const char *
4272
output_move_const_single (rtx *operands)
4273
{
4274
  int code = standard_68881_constant_p (operands[1]);
4275
 
4276
  if (code != 0)
4277
    {
4278
      static char buf[40];
4279
 
4280
      sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4281
      return buf;
4282
    }
4283
  return "fmove%.s %f1,%0";
4284
}
4285
 
4286
/* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4287
   from the "fmovecr" instruction.
4288
   The value, anded with 0xff, gives the code to use in fmovecr
4289
   to get the desired constant.  */
4290
 
4291
/* This code has been fixed for cross-compilation.  */
4292
 
4293
static int inited_68881_table = 0;
4294
 
4295
static const char *const strings_68881[7] = {
4296
  "0.0",
4297
  "1.0",
4298
  "10.0",
4299
  "100.0",
4300
  "10000.0",
4301
  "1e8",
4302
  "1e16"
4303
};
4304
 
4305
static const int codes_68881[7] = {
4306
  0x0f,
4307
  0x32,
4308
  0x33,
4309
  0x34,
4310
  0x35,
4311
  0x36,
4312
  0x37
4313
};
4314
 
4315
REAL_VALUE_TYPE values_68881[7];
4316
 
4317
/* Set up values_68881 array by converting the decimal values
4318
   strings_68881 to binary.  */
4319
 
4320
void
4321
init_68881_table (void)
4322
{
4323
  int i;
4324
  REAL_VALUE_TYPE r;
4325
  enum machine_mode mode;
4326
 
4327
  mode = SFmode;
4328
  for (i = 0; i < 7; i++)
4329
    {
4330
      if (i == 6)
4331
        mode = DFmode;
4332
      r = REAL_VALUE_ATOF (strings_68881[i], mode);
4333
      values_68881[i] = r;
4334
    }
4335
  inited_68881_table = 1;
4336
}
4337
 
4338
int
4339
standard_68881_constant_p (rtx x)
4340
{
4341
  REAL_VALUE_TYPE r;
4342
  int i;
4343
 
4344
  /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4345
     used at all on those chips.  */
4346
  if (TUNE_68040_60)
4347
    return 0;
4348
 
4349
  if (! inited_68881_table)
4350
    init_68881_table ();
4351
 
4352
  REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4353
 
4354
  /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4355
     is rejected.  */
4356
  for (i = 0; i < 6; i++)
4357
    {
4358
      if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
4359
        return (codes_68881[i]);
4360
    }
4361
 
4362
  if (GET_MODE (x) == SFmode)
4363
    return 0;
4364
 
4365
  if (REAL_VALUES_EQUAL (r, values_68881[6]))
4366
    return (codes_68881[6]);
4367
 
4368
  /* larger powers of ten in the constants ram are not used
4369
     because they are not equal to a `double' C constant.  */
4370
  return 0;
4371
}
4372
 
4373
/* If X is a floating-point constant, return the logarithm of X base 2,
4374
   or 0 if X is not a power of 2.  */
4375
 
4376
int
4377
floating_exact_log2 (rtx x)
4378
{
4379
  REAL_VALUE_TYPE r, r1;
4380
  int exp;
4381
 
4382
  REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4383
 
4384
  if (REAL_VALUES_LESS (r, dconst1))
4385
    return 0;
4386
 
4387
  exp = real_exponent (&r);
4388
  real_2expN (&r1, exp, DFmode);
4389
  if (REAL_VALUES_EQUAL (r1, r))
4390
    return exp;
4391
 
4392
  return 0;
4393
}
4394
 
4395
/* A C compound statement to output to stdio stream STREAM the
4396
   assembler syntax for an instruction operand X.  X is an RTL
4397
   expression.
4398
 
4399
   CODE is a value that can be used to specify one of several ways
4400
   of printing the operand.  It is used when identical operands
4401
   must be printed differently depending on the context.  CODE
4402
   comes from the `%' specification that was used to request
4403
   printing of the operand.  If the specification was just `%DIGIT'
4404
   then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4405
   is the ASCII code for LTR.
4406
 
4407
   If X is a register, this macro should print the register's name.
4408
   The names can be found in an array `reg_names' whose type is
4409
   `char *[]'.  `reg_names' is initialized from `REGISTER_NAMES'.
4410
 
4411
   When the machine description has a specification `%PUNCT' (a `%'
4412
   followed by a punctuation character), this macro is called with
4413
   a null pointer for X and the punctuation character for CODE.
4414
 
4415
   The m68k specific codes are:
4416
 
4417
   '.' for dot needed in Motorola-style opcode names.
4418
   '-' for an operand pushing on the stack:
4419
       sp@-, -(sp) or -(%sp) depending on the style of syntax.
4420
   '+' for an operand pushing on the stack:
4421
       sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4422
   '@' for a reference to the top word on the stack:
4423
       sp@, (sp) or (%sp) depending on the style of syntax.
4424
   '#' for an immediate operand prefix (# in MIT and Motorola syntax
4425
       but & in SGS syntax).
4426
   '!' for the cc register (used in an `and to cc' insn).
4427
   '$' for the letter `s' in an op code, but only on the 68040.
4428
   '&' for the letter `d' in an op code, but only on the 68040.
4429
   '/' for register prefix needed by longlong.h.
4430
   '?' for m68k_library_id_string
4431
 
4432
   'b' for byte insn (no effect, on the Sun; this is for the ISI).
4433
   'd' to force memory addressing to be absolute, not relative.
4434
   'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4435
   'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4436
       or print pair of registers as rx:ry.
4437
   'p' print an address with @PLTPC attached, but only if the operand
4438
       is not locally-bound.  */
4439
 
4440
void
4441
print_operand (FILE *file, rtx op, int letter)
4442
{
4443
  if (letter == '.')
4444
    {
4445
      if (MOTOROLA)
4446
        fprintf (file, ".");
4447
    }
4448
  else if (letter == '#')
4449
    asm_fprintf (file, "%I");
4450
  else if (letter == '-')
4451
    asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4452
  else if (letter == '+')
4453
    asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4454
  else if (letter == '@')
4455
    asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4456
  else if (letter == '!')
4457
    asm_fprintf (file, "%Rfpcr");
4458
  else if (letter == '$')
4459
    {
4460
      if (TARGET_68040)
4461
        fprintf (file, "s");
4462
    }
4463
  else if (letter == '&')
4464
    {
4465
      if (TARGET_68040)
4466
        fprintf (file, "d");
4467
    }
4468
  else if (letter == '/')
4469
    asm_fprintf (file, "%R");
4470
  else if (letter == '?')
4471
    asm_fprintf (file, m68k_library_id_string);
4472
  else if (letter == 'p')
4473
    {
4474
      output_addr_const (file, op);
4475
      if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4476
        fprintf (file, "@PLTPC");
4477
    }
4478
  else if (GET_CODE (op) == REG)
4479
    {
4480
      if (letter == 'R')
4481
        /* Print out the second register name of a register pair.
4482
           I.e., R (6) => 7.  */
4483
        fputs (M68K_REGNAME(REGNO (op) + 1), file);
4484
      else
4485
        fputs (M68K_REGNAME(REGNO (op)), file);
4486
    }
4487
  else if (GET_CODE (op) == MEM)
4488
    {
4489
      output_address (XEXP (op, 0));
4490
      if (letter == 'd' && ! TARGET_68020
4491
          && CONSTANT_ADDRESS_P (XEXP (op, 0))
4492
          && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4493
               && INTVAL (XEXP (op, 0)) < 0x8000
4494
               && INTVAL (XEXP (op, 0)) >= -0x8000))
4495
        fprintf (file, MOTOROLA ? ".l" : ":l");
4496
    }
4497
  else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4498
    {
4499
      REAL_VALUE_TYPE r;
4500
      long l;
4501
      REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4502
      REAL_VALUE_TO_TARGET_SINGLE (r, l);
4503
      asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4504
    }
4505
  else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4506
    {
4507
      REAL_VALUE_TYPE r;
4508
      long l[3];
4509
      REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4510
      REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
4511
      asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4512
                   l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4513
    }
4514
  else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4515
    {
4516
      REAL_VALUE_TYPE r;
4517
      long l[2];
4518
      REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4519
      REAL_VALUE_TO_TARGET_DOUBLE (r, l);
4520
      asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4521
    }
4522
  else
4523
    {
4524
      /* Use `print_operand_address' instead of `output_addr_const'
4525
         to ensure that we print relevant PIC stuff.  */
4526
      asm_fprintf (file, "%I");
4527
      if (TARGET_PCREL
4528
          && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4529
        print_operand_address (file, op);
4530
      else
4531
        output_addr_const (file, op);
4532
    }
4533
}
4534
 
4535
/* Return string for TLS relocation RELOC.  */
4536
 
4537
static const char *
4538
m68k_get_reloc_decoration (enum m68k_reloc reloc)
4539
{
4540
  /* To my knowledge, !MOTOROLA assemblers don't support TLS.  */
4541
  gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4542
 
4543
  switch (reloc)
4544
    {
4545
    case RELOC_GOT:
4546
      if (MOTOROLA)
4547
        {
4548
          if (flag_pic == 1 && TARGET_68020)
4549
            return "@GOT.w";
4550
          else
4551
            return "@GOT";
4552
        }
4553
      else
4554
        {
4555
          if (TARGET_68020)
4556
            {
4557
              switch (flag_pic)
4558
                {
4559
                case 1:
4560
                  return ":w";
4561
                case 2:
4562
                  return ":l";
4563
                default:
4564
                  return "";
4565
                }
4566
            }
4567
        }
4568
 
4569
    case RELOC_TLSGD:
4570
      return "@TLSGD";
4571
 
4572
    case RELOC_TLSLDM:
4573
      return "@TLSLDM";
4574
 
4575
    case RELOC_TLSLDO:
4576
      return "@TLSLDO";
4577
 
4578
    case RELOC_TLSIE:
4579
      return "@TLSIE";
4580
 
4581
    case RELOC_TLSLE:
4582
      return "@TLSLE";
4583
 
4584
    default:
4585
      gcc_unreachable ();
4586
    }
4587
}
4588
 
4589
/* m68k implementation of OUTPUT_ADDR_CONST_EXTRA.  */
4590
 
4591
bool
4592
m68k_output_addr_const_extra (FILE *file, rtx x)
4593
{
4594
  if (GET_CODE (x) == UNSPEC)
4595
    {
4596
      switch (XINT (x, 1))
4597
        {
4598
        case UNSPEC_RELOC16:
4599
        case UNSPEC_RELOC32:
4600
          output_addr_const (file, XVECEXP (x, 0, 0));
4601
          fputs (m68k_get_reloc_decoration (INTVAL (XVECEXP (x, 0, 1))), file);
4602
          return true;
4603
 
4604
        default:
4605
          break;
4606
        }
4607
    }
4608
 
4609
  return false;
4610
}
4611
 
4612
/* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL.  */
4613
 
4614
static void
4615
m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4616
{
4617
  gcc_assert (size == 4);
4618
  fputs ("\t.long\t", file);
4619
  output_addr_const (file, x);
4620
  fputs ("@TLSLDO+0x8000", file);
4621
}
4622
 
4623
/* In the name of slightly smaller debug output, and to cater to
4624
   general assembler lossage, recognize various UNSPEC sequences
4625
   and turn them back into a direct symbol reference.  */
4626
 
4627
static rtx
4628
m68k_delegitimize_address (rtx orig_x)
4629
{
4630
  rtx x, y;
4631
  rtx addend = NULL_RTX;
4632
  rtx result;
4633
 
4634
  orig_x = delegitimize_mem_from_attrs (orig_x);
4635
  if (! MEM_P (orig_x))
4636
    return orig_x;
4637
 
4638
  x = XEXP (orig_x, 0);
4639
 
4640
  if (GET_CODE (x) == PLUS
4641
      && GET_CODE (XEXP (x, 1)) == CONST
4642
      && REG_P (XEXP (x, 0))
4643
      && REGNO (XEXP (x, 0)) == PIC_REG)
4644
    {
4645
      y = x = XEXP (XEXP (x, 1), 0);
4646
 
4647
      /* Handle an addend.  */
4648
      if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
4649
          && CONST_INT_P (XEXP (x, 1)))
4650
        {
4651
          addend = XEXP (x, 1);
4652
          x = XEXP (x, 0);
4653
        }
4654
 
4655
      if (GET_CODE (x) == UNSPEC
4656
          && (XINT (x, 1) == UNSPEC_RELOC16
4657
              || XINT (x, 1) == UNSPEC_RELOC32))
4658
        {
4659
          result = XVECEXP (x, 0, 0);
4660
          if (addend)
4661
            {
4662
              if (GET_CODE (y) == PLUS)
4663
                result = gen_rtx_PLUS (Pmode, result, addend);
4664
              else
4665
                result = gen_rtx_MINUS (Pmode, result, addend);
4666
              result = gen_rtx_CONST (Pmode, result);
4667
            }
4668
          return result;
4669
        }
4670
    }
4671
 
4672
  return orig_x;
4673
}
4674
 
4675
 
4676
/* A C compound statement to output to stdio stream STREAM the
4677
   assembler syntax for an instruction operand that is a memory
4678
   reference whose address is ADDR.  ADDR is an RTL expression.
4679
 
4680
   Note that this contains a kludge that knows that the only reason
4681
   we have an address (plus (label_ref...) (reg...)) when not generating
4682
   PIC code is in the insn before a tablejump, and we know that m68k.md
4683
   generates a label LInnn: on such an insn.
4684
 
4685
   It is possible for PIC to generate a (plus (label_ref...) (reg...))
4686
   and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4687
 
4688
   This routine is responsible for distinguishing between -fpic and -fPIC
4689
   style relocations in an address.  When generating -fpic code the
4690
   offset is output in word mode (e.g. movel a5@(_foo:w), a0).  When generating
4691
   -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4692
 
4693
void
4694
print_operand_address (FILE *file, rtx addr)
4695
{
4696
  struct m68k_address address;
4697
 
4698
  if (!m68k_decompose_address (QImode, addr, true, &address))
4699
    gcc_unreachable ();
4700
 
4701
  if (address.code == PRE_DEC)
4702
    fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4703
             M68K_REGNAME (REGNO (address.base)));
4704
  else if (address.code == POST_INC)
4705
    fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4706
             M68K_REGNAME (REGNO (address.base)));
4707
  else if (!address.base && !address.index)
4708
    {
4709
      /* A constant address.  */
4710
      gcc_assert (address.offset == addr);
4711
      if (GET_CODE (addr) == CONST_INT)
4712
        {
4713
          /* (xxx).w or (xxx).l.  */
4714
          if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4715
            fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4716
          else
4717
            fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4718
        }
4719
      else if (TARGET_PCREL)
4720
        {
4721
          /* (d16,PC) or (bd,PC,Xn) (with suppressed index register).  */
4722
          fputc ('(', file);
4723
          output_addr_const (file, addr);
4724
          asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4725
        }
4726
      else
4727
        {
4728
          /* (xxx).l.  We need a special case for SYMBOL_REF if the symbol
4729
             name ends in `.<letter>', as the last 2 characters can be
4730
             mistaken as a size suffix.  Put the name in parentheses.  */
4731
          if (GET_CODE (addr) == SYMBOL_REF
4732
              && strlen (XSTR (addr, 0)) > 2
4733
              && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4734
            {
4735
              putc ('(', file);
4736
              output_addr_const (file, addr);
4737
              putc (')', file);
4738
            }
4739
          else
4740
            output_addr_const (file, addr);
4741
        }
4742
    }
4743
  else
4744
    {
4745
      int labelno;
4746
 
4747
      /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4748
         label being accessed, otherwise it is -1.  */
4749
      labelno = (address.offset
4750
                 && !address.base
4751
                 && GET_CODE (address.offset) == LABEL_REF
4752
                 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4753
                 : -1);
4754
      if (MOTOROLA)
4755
        {
4756
          /* Print the "offset(base" component.  */
4757
          if (labelno >= 0)
4758
            asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4759
          else
4760
            {
4761
              if (address.offset)
4762
                output_addr_const (file, address.offset);
4763
 
4764
              putc ('(', file);
4765
              if (address.base)
4766
                fputs (M68K_REGNAME (REGNO (address.base)), file);
4767
            }
4768
          /* Print the ",index" component, if any.  */
4769
          if (address.index)
4770
            {
4771
              if (address.base)
4772
                putc (',', file);
4773
              fprintf (file, "%s.%c",
4774
                       M68K_REGNAME (REGNO (address.index)),
4775
                       GET_MODE (address.index) == HImode ? 'w' : 'l');
4776
              if (address.scale != 1)
4777
                fprintf (file, "*%d", address.scale);
4778
            }
4779
          putc (')', file);
4780
        }
4781
      else /* !MOTOROLA */
4782
        {
4783
          if (!address.offset && !address.index)
4784
            fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4785
          else
4786
            {
4787
              /* Print the "base@(offset" component.  */
4788
              if (labelno >= 0)
4789
                asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4790
              else
4791
                {
4792
                  if (address.base)
4793
                    fputs (M68K_REGNAME (REGNO (address.base)), file);
4794
                  fprintf (file, "@(");
4795
                  if (address.offset)
4796
                    output_addr_const (file, address.offset);
4797
                }
4798
              /* Print the ",index" component, if any.  */
4799
              if (address.index)
4800
                {
4801
                  fprintf (file, ",%s:%c",
4802
                           M68K_REGNAME (REGNO (address.index)),
4803
                           GET_MODE (address.index) == HImode ? 'w' : 'l');
4804
                  if (address.scale != 1)
4805
                    fprintf (file, ":%d", address.scale);
4806
                }
4807
              putc (')', file);
4808
            }
4809
        }
4810
    }
4811
}
4812
 
4813
/* Check for cases where a clr insns can be omitted from code using
4814
   strict_low_part sets.  For example, the second clrl here is not needed:
4815
   clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4816
 
4817
   MODE is the mode of this STRICT_LOW_PART set.  FIRST_INSN is the clear
4818
   insn we are checking for redundancy.  TARGET is the register set by the
4819
   clear insn.  */
4820
 
4821
bool
4822
strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
4823
                             rtx target)
4824
{
4825
  rtx p = first_insn;
4826
 
4827
  while ((p = PREV_INSN (p)))
4828
    {
4829
      if (NOTE_INSN_BASIC_BLOCK_P (p))
4830
        return false;
4831
 
4832
      if (NOTE_P (p))
4833
        continue;
4834
 
4835
      /* If it isn't an insn, then give up.  */
4836
      if (!INSN_P (p))
4837
        return false;
4838
 
4839
      if (reg_set_p (target, p))
4840
        {
4841
          rtx set = single_set (p);
4842
          rtx dest;
4843
 
4844
          /* If it isn't an easy to recognize insn, then give up.  */
4845
          if (! set)
4846
            return false;
4847
 
4848
          dest = SET_DEST (set);
4849
 
4850
          /* If this sets the entire target register to zero, then our
4851
             first_insn is redundant.  */
4852
          if (rtx_equal_p (dest, target)
4853
              && SET_SRC (set) == const0_rtx)
4854
            return true;
4855
          else if (GET_CODE (dest) == STRICT_LOW_PART
4856
                   && GET_CODE (XEXP (dest, 0)) == REG
4857
                   && REGNO (XEXP (dest, 0)) == REGNO (target)
4858
                   && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4859
                       <= GET_MODE_SIZE (mode)))
4860
            /* This is a strict low part set which modifies less than
4861
               we are using, so it is safe.  */
4862
            ;
4863
          else
4864
            return false;
4865
        }
4866
    }
4867
 
4868
  return false;
4869
}
4870
 
4871
/* Operand predicates for implementing asymmetric pc-relative addressing
4872
   on m68k.  The m68k supports pc-relative addressing (mode 7, register 2)
4873
   when used as a source operand, but not as a destination operand.
4874
 
4875
   We model this by restricting the meaning of the basic predicates
4876
   (general_operand, memory_operand, etc) to forbid the use of this
4877
   addressing mode, and then define the following predicates that permit
4878
   this addressing mode.  These predicates can then be used for the
4879
   source operands of the appropriate instructions.
4880
 
4881
   n.b.  While it is theoretically possible to change all machine patterns
4882
   to use this addressing more where permitted by the architecture,
4883
   it has only been implemented for "common" cases: SImode, HImode, and
4884
   QImode operands, and only for the principle operations that would
4885
   require this addressing mode: data movement and simple integer operations.
4886
 
4887
   In parallel with these new predicates, two new constraint letters
4888
   were defined: 'S' and 'T'.  'S' is the -mpcrel analog of 'm'.
4889
   'T' replaces 's' in the non-pcrel case.  It is a no-op in the pcrel case.
4890
   In the pcrel case 's' is only valid in combination with 'a' registers.
4891
   See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4892
   of how these constraints are used.
4893
 
4894
   The use of these predicates is strictly optional, though patterns that
4895
   don't will cause an extra reload register to be allocated where one
4896
   was not necessary:
4897
 
4898
        lea (abc:w,%pc),%a0     ; need to reload address
4899
        moveq &1,%d1            ; since write to pc-relative space
4900
        movel %d1,%a0@          ; is not allowed
4901
        ...
4902
        lea (abc:w,%pc),%a1     ; no need to reload address here
4903
        movel %a1@,%d0          ; since "movel (abc:w,%pc),%d0" is ok
4904
 
4905
   For more info, consult tiemann@cygnus.com.
4906
 
4907
 
4908
   All of the ugliness with predicates and constraints is due to the
4909
   simple fact that the m68k does not allow a pc-relative addressing
4910
   mode as a destination.  gcc does not distinguish between source and
4911
   destination addresses.  Hence, if we claim that pc-relative address
4912
   modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4913
   end up with invalid code.  To get around this problem, we left
4914
   pc-relative modes as invalid addresses, and then added special
4915
   predicates and constraints to accept them.
4916
 
4917
   A cleaner way to handle this is to modify gcc to distinguish
4918
   between source and destination addresses.  We can then say that
4919
   pc-relative is a valid source address but not a valid destination
4920
   address, and hopefully avoid a lot of the predicate and constraint
4921
   hackery.  Unfortunately, this would be a pretty big change.  It would
4922
   be a useful change for a number of ports, but there aren't any current
4923
   plans to undertake this.
4924
 
4925
   ***************************************************************************/
4926
 
4927
 
4928
const char *
4929
output_andsi3 (rtx *operands)
4930
{
4931
  int logval;
4932
  if (GET_CODE (operands[2]) == CONST_INT
4933
      && (INTVAL (operands[2]) | 0xffff) == -1
4934
      && (DATA_REG_P (operands[0])
4935
          || offsettable_memref_p (operands[0]))
4936
      && !TARGET_COLDFIRE)
4937
    {
4938
      if (GET_CODE (operands[0]) != REG)
4939
        operands[0] = adjust_address (operands[0], HImode, 2);
4940
      operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4941
      /* Do not delete a following tstl %0 insn; that would be incorrect.  */
4942
      CC_STATUS_INIT;
4943
      if (operands[2] == const0_rtx)
4944
        return "clr%.w %0";
4945
      return "and%.w %2,%0";
4946
    }
4947
  if (GET_CODE (operands[2]) == CONST_INT
4948
      && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4949
      && (DATA_REG_P (operands[0])
4950
          || offsettable_memref_p (operands[0])))
4951
    {
4952
      if (DATA_REG_P (operands[0]))
4953
        operands[1] = GEN_INT (logval);
4954
      else
4955
        {
4956
          operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4957
          operands[1] = GEN_INT (logval % 8);
4958
        }
4959
      /* This does not set condition codes in a standard way.  */
4960
      CC_STATUS_INIT;
4961
      return "bclr %1,%0";
4962
    }
4963
  return "and%.l %2,%0";
4964
}
4965
 
4966
const char *
4967
output_iorsi3 (rtx *operands)
4968
{
4969
  register int logval;
4970
  if (GET_CODE (operands[2]) == CONST_INT
4971
      && INTVAL (operands[2]) >> 16 == 0
4972
      && (DATA_REG_P (operands[0])
4973
          || offsettable_memref_p (operands[0]))
4974
      && !TARGET_COLDFIRE)
4975
    {
4976
      if (GET_CODE (operands[0]) != REG)
4977
        operands[0] = adjust_address (operands[0], HImode, 2);
4978
      /* Do not delete a following tstl %0 insn; that would be incorrect.  */
4979
      CC_STATUS_INIT;
4980
      if (INTVAL (operands[2]) == 0xffff)
4981
        return "mov%.w %2,%0";
4982
      return "or%.w %2,%0";
4983
    }
4984
  if (GET_CODE (operands[2]) == CONST_INT
4985
      && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4986
      && (DATA_REG_P (operands[0])
4987
          || offsettable_memref_p (operands[0])))
4988
    {
4989
      if (DATA_REG_P (operands[0]))
4990
        operands[1] = GEN_INT (logval);
4991
      else
4992
        {
4993
          operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4994
          operands[1] = GEN_INT (logval % 8);
4995
        }
4996
      CC_STATUS_INIT;
4997
      return "bset %1,%0";
4998
    }
4999
  return "or%.l %2,%0";
5000
}
5001
 
5002
const char *
5003
output_xorsi3 (rtx *operands)
5004
{
5005
  register int logval;
5006
  if (GET_CODE (operands[2]) == CONST_INT
5007
      && INTVAL (operands[2]) >> 16 == 0
5008
      && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
5009
      && !TARGET_COLDFIRE)
5010
    {
5011
      if (! DATA_REG_P (operands[0]))
5012
        operands[0] = adjust_address (operands[0], HImode, 2);
5013
      /* Do not delete a following tstl %0 insn; that would be incorrect.  */
5014
      CC_STATUS_INIT;
5015
      if (INTVAL (operands[2]) == 0xffff)
5016
        return "not%.w %0";
5017
      return "eor%.w %2,%0";
5018
    }
5019
  if (GET_CODE (operands[2]) == CONST_INT
5020
      && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5021
      && (DATA_REG_P (operands[0])
5022
          || offsettable_memref_p (operands[0])))
5023
    {
5024
      if (DATA_REG_P (operands[0]))
5025
        operands[1] = GEN_INT (logval);
5026
      else
5027
        {
5028
          operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5029
          operands[1] = GEN_INT (logval % 8);
5030
        }
5031
      CC_STATUS_INIT;
5032
      return "bchg %1,%0";
5033
    }
5034
  return "eor%.l %2,%0";
5035
}
5036
 
5037
/* Return the instruction that should be used for a call to address X,
5038
   which is known to be in operand 0.  */
5039
 
5040
const char *
5041
output_call (rtx x)
5042
{
5043
  if (symbolic_operand (x, VOIDmode))
5044
    return m68k_symbolic_call;
5045
  else
5046
    return "jsr %a0";
5047
}
5048
 
5049
/* Likewise sibling calls.  */
5050
 
5051
const char *
5052
output_sibcall (rtx x)
5053
{
5054
  if (symbolic_operand (x, VOIDmode))
5055
    return m68k_symbolic_jump;
5056
  else
5057
    return "jmp %a0";
5058
}
5059
 
5060
static void
5061
m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5062
                      HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5063
                      tree function)
5064
{
5065
  rtx this_slot, offset, addr, mem, insn, tmp;
5066
 
5067
  /* Avoid clobbering the struct value reg by using the
5068
     static chain reg as a temporary.  */
5069
  tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5070
 
5071
  /* Pretend to be a post-reload pass while generating rtl.  */
5072
  reload_completed = 1;
5073
 
5074
  /* The "this" pointer is stored at 4(%sp).  */
5075
  this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4));
5076
 
5077
  /* Add DELTA to THIS.  */
5078
  if (delta != 0)
5079
    {
5080
      /* Make the offset a legitimate operand for memory addition.  */
5081
      offset = GEN_INT (delta);
5082
      if ((delta < -8 || delta > 8)
5083
          && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5084
        {
5085
          emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5086
          offset = gen_rtx_REG (Pmode, D0_REG);
5087
        }
5088
      emit_insn (gen_add3_insn (copy_rtx (this_slot),
5089
                                copy_rtx (this_slot), offset));
5090
    }
5091
 
5092
  /* If needed, add *(*THIS + VCALL_OFFSET) to THIS.  */
5093
  if (vcall_offset != 0)
5094
    {
5095
      /* Set the static chain register to *THIS.  */
5096
      emit_move_insn (tmp, this_slot);
5097
      emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5098
 
5099
      /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET.  */
5100
      addr = plus_constant (tmp, vcall_offset);
5101
      if (!m68k_legitimate_address_p (Pmode, addr, true))
5102
        {
5103
          emit_insn (gen_rtx_SET (VOIDmode, tmp, addr));
5104
          addr = tmp;
5105
        }
5106
 
5107
      /* Load the offset into %d0 and add it to THIS.  */
5108
      emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5109
                      gen_rtx_MEM (Pmode, addr));
5110
      emit_insn (gen_add3_insn (copy_rtx (this_slot),
5111
                                copy_rtx (this_slot),
5112
                                gen_rtx_REG (Pmode, D0_REG)));
5113
    }
5114
 
5115
  /* Jump to the target function.  Use a sibcall if direct jumps are
5116
     allowed, otherwise load the address into a register first.  */
5117
  mem = DECL_RTL (function);
5118
  if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5119
    {
5120
      gcc_assert (flag_pic);
5121
 
5122
      if (!TARGET_SEP_DATA)
5123
        {
5124
          /* Use the static chain register as a temporary (call-clobbered)
5125
             GOT pointer for this function.  We can use the static chain
5126
             register because it isn't live on entry to the thunk.  */
5127
          SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5128
          emit_insn (gen_load_got (pic_offset_table_rtx));
5129
        }
5130
      legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5131
      mem = replace_equiv_address (mem, tmp);
5132
    }
5133
  insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5134
  SIBLING_CALL_P (insn) = 1;
5135
 
5136
  /* Run just enough of rest_of_compilation.  */
5137
  insn = get_insns ();
5138
  split_all_insns_noflow ();
5139
  final_start_function (insn, file, 1);
5140
  final (insn, file, 1);
5141
  final_end_function ();
5142
 
5143
  /* Clean up the vars set above.  */
5144
  reload_completed = 0;
5145
 
5146
  /* Restore the original PIC register.  */
5147
  if (flag_pic)
5148
    SET_REGNO (pic_offset_table_rtx, PIC_REG);
5149
}
5150
 
5151
/* Worker function for TARGET_STRUCT_VALUE_RTX.  */
5152
 
5153
static rtx
5154
m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5155
                       int incoming ATTRIBUTE_UNUSED)
5156
{
5157
  return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5158
}
5159
 
5160
/* Return nonzero if register old_reg can be renamed to register new_reg.  */
5161
int
5162
m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5163
                           unsigned int new_reg)
5164
{
5165
 
5166
  /* Interrupt functions can only use registers that have already been
5167
     saved by the prologue, even if they would normally be
5168
     call-clobbered.  */
5169
 
5170
  if ((m68k_get_function_kind (current_function_decl)
5171
       == m68k_fk_interrupt_handler)
5172
      && !df_regs_ever_live_p (new_reg))
5173
    return 0;
5174
 
5175
  return 1;
5176
}
5177
 
5178
/* Value is true if hard register REGNO can hold a value of machine-mode
5179
   MODE.  On the 68000, we let the cpu registers can hold any mode, but
5180
   restrict the 68881 registers to floating-point modes.  */
5181
 
5182
bool
5183
m68k_regno_mode_ok (int regno, enum machine_mode mode)
5184
{
5185
  if (DATA_REGNO_P (regno))
5186
    {
5187
      /* Data Registers, can hold aggregate if fits in.  */
5188
      if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5189
        return true;
5190
    }
5191
  else if (ADDRESS_REGNO_P (regno))
5192
    {
5193
      if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5194
        return true;
5195
    }
5196
  else if (FP_REGNO_P (regno))
5197
    {
5198
      /* FPU registers, hold float or complex float of long double or
5199
         smaller.  */
5200
      if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5201
           || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5202
          && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5203
        return true;
5204
    }
5205
  return false;
5206
}
5207
 
5208
/* Implement SECONDARY_RELOAD_CLASS.  */
5209
 
5210
enum reg_class
5211
m68k_secondary_reload_class (enum reg_class rclass,
5212
                             enum machine_mode mode, rtx x)
5213
{
5214
  int regno;
5215
 
5216
  regno = true_regnum (x);
5217
 
5218
  /* If one operand of a movqi is an address register, the other
5219
     operand must be a general register or constant.  Other types
5220
     of operand must be reloaded through a data register.  */
5221
  if (GET_MODE_SIZE (mode) == 1
5222
      && reg_classes_intersect_p (rclass, ADDR_REGS)
5223
      && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5224
    return DATA_REGS;
5225
 
5226
  /* PC-relative addresses must be loaded into an address register first.  */
5227
  if (TARGET_PCREL
5228
      && !reg_class_subset_p (rclass, ADDR_REGS)
5229
      && symbolic_operand (x, VOIDmode))
5230
    return ADDR_REGS;
5231
 
5232
  return NO_REGS;
5233
}
5234
 
5235
/* Implement PREFERRED_RELOAD_CLASS.  */
5236
 
5237
enum reg_class
5238
m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5239
{
5240
  enum reg_class secondary_class;
5241
 
5242
  /* If RCLASS might need a secondary reload, try restricting it to
5243
     a class that doesn't.  */
5244
  secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5245
  if (secondary_class != NO_REGS
5246
      && reg_class_subset_p (secondary_class, rclass))
5247
    return secondary_class;
5248
 
5249
  /* Prefer to use moveq for in-range constants.  */
5250
  if (GET_CODE (x) == CONST_INT
5251
      && reg_class_subset_p (DATA_REGS, rclass)
5252
      && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5253
    return DATA_REGS;
5254
 
5255
  /* ??? Do we really need this now?  */
5256
  if (GET_CODE (x) == CONST_DOUBLE
5257
      && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5258
    {
5259
      if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5260
        return FP_REGS;
5261
 
5262
      return NO_REGS;
5263
    }
5264
 
5265
  return rclass;
5266
}
5267
 
5268
/* Return floating point values in a 68881 register.  This makes 68881 code
5269
   a little bit faster.  It also makes -msoft-float code incompatible with
5270
   hard-float code, so people have to be careful not to mix the two.
5271
   For ColdFire it was decided the ABI incompatibility is undesirable.
5272
   If there is need for a hard-float ABI it is probably worth doing it
5273
   properly and also passing function arguments in FP registers.  */
5274
rtx
5275
m68k_libcall_value (enum machine_mode mode)
5276
{
5277
  switch (mode) {
5278
  case SFmode:
5279
  case DFmode:
5280
  case XFmode:
5281
    if (TARGET_68881)
5282
      return gen_rtx_REG (mode, FP0_REG);
5283
    break;
5284
  default:
5285
    break;
5286
  }
5287
 
5288
  return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5289
}
5290
 
5291
/* Location in which function value is returned.
5292
   NOTE: Due to differences in ABIs, don't call this function directly,
5293
   use FUNCTION_VALUE instead.  */
5294
rtx
5295
m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5296
{
5297
  enum machine_mode mode;
5298
 
5299
  mode = TYPE_MODE (valtype);
5300
  switch (mode) {
5301
  case SFmode:
5302
  case DFmode:
5303
  case XFmode:
5304
    if (TARGET_68881)
5305
      return gen_rtx_REG (mode, FP0_REG);
5306
    break;
5307
  default:
5308
    break;
5309
  }
5310
 
5311
  /* If the function returns a pointer, push that into %a0.  */
5312
  if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5313
    /* For compatibility with the large body of existing code which
5314
       does not always properly declare external functions returning
5315
       pointer types, the m68k/SVR4 convention is to copy the value
5316
       returned for pointer functions from a0 to d0 in the function
5317
       epilogue, so that callers that have neglected to properly
5318
       declare the callee can still find the correct return value in
5319
       d0.  */
5320
    return gen_rtx_PARALLEL
5321
      (mode,
5322
       gen_rtvec (2,
5323
                  gen_rtx_EXPR_LIST (VOIDmode,
5324
                                     gen_rtx_REG (mode, A0_REG),
5325
                                     const0_rtx),
5326
                  gen_rtx_EXPR_LIST (VOIDmode,
5327
                                     gen_rtx_REG (mode, D0_REG),
5328
                                     const0_rtx)));
5329
  else if (POINTER_TYPE_P (valtype))
5330
    return gen_rtx_REG (mode, A0_REG);
5331
  else
5332
    return gen_rtx_REG (mode, D0_REG);
5333
}
5334
 
5335
/* Worker function for TARGET_RETURN_IN_MEMORY.  */
5336
#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5337
static bool
5338
m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5339
{
5340
  enum machine_mode mode = TYPE_MODE (type);
5341
 
5342
  if (mode == BLKmode)
5343
    return true;
5344
 
5345
  /* If TYPE's known alignment is less than the alignment of MODE that
5346
     would contain the structure, then return in memory.  We need to
5347
     do so to maintain the compatibility between code compiled with
5348
     -mstrict-align and that compiled with -mno-strict-align.  */
5349
  if (AGGREGATE_TYPE_P (type)
5350
      && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5351
    return true;
5352
 
5353
  return false;
5354
}
5355
#endif
5356
 
5357
/* CPU to schedule the program for.  */
5358
enum attr_cpu m68k_sched_cpu;
5359
 
5360
/* MAC to schedule the program for.  */
5361
enum attr_mac m68k_sched_mac;
5362
 
5363
/* Operand type.  */
5364
enum attr_op_type
5365
  {
5366
    /* No operand.  */
5367
    OP_TYPE_NONE,
5368
 
5369
    /* Integer register.  */
5370
    OP_TYPE_RN,
5371
 
5372
    /* FP register.  */
5373
    OP_TYPE_FPN,
5374
 
5375
    /* Implicit mem reference (e.g. stack).  */
5376
    OP_TYPE_MEM1,
5377
 
5378
    /* Memory without offset or indexing.  EA modes 2, 3 and 4.  */
5379
    OP_TYPE_MEM234,
5380
 
5381
    /* Memory with offset but without indexing.  EA mode 5.  */
5382
    OP_TYPE_MEM5,
5383
 
5384
    /* Memory with indexing.  EA mode 6.  */
5385
    OP_TYPE_MEM6,
5386
 
5387
    /* Memory referenced by absolute address.  EA mode 7.  */
5388
    OP_TYPE_MEM7,
5389
 
5390
    /* Immediate operand that doesn't require extension word.  */
5391
    OP_TYPE_IMM_Q,
5392
 
5393
    /* Immediate 16 bit operand.  */
5394
    OP_TYPE_IMM_W,
5395
 
5396
    /* Immediate 32 bit operand.  */
5397
    OP_TYPE_IMM_L
5398
  };
5399
 
5400
/* Return type of memory ADDR_RTX refers to.  */
5401
static enum attr_op_type
5402
sched_address_type (enum machine_mode mode, rtx addr_rtx)
5403
{
5404
  struct m68k_address address;
5405
 
5406
  if (symbolic_operand (addr_rtx, VOIDmode))
5407
    return OP_TYPE_MEM7;
5408
 
5409
  if (!m68k_decompose_address (mode, addr_rtx,
5410
                               reload_completed, &address))
5411
    {
5412
      gcc_assert (!reload_completed);
5413
      /* Reload will likely fix the address to be in the register.  */
5414
      return OP_TYPE_MEM234;
5415
    }
5416
 
5417
  if (address.scale != 0)
5418
    return OP_TYPE_MEM6;
5419
 
5420
  if (address.base != NULL_RTX)
5421
    {
5422
      if (address.offset == NULL_RTX)
5423
        return OP_TYPE_MEM234;
5424
 
5425
      return OP_TYPE_MEM5;
5426
    }
5427
 
5428
  gcc_assert (address.offset != NULL_RTX);
5429
 
5430
  return OP_TYPE_MEM7;
5431
}
5432
 
5433
/* Return X or Y (depending on OPX_P) operand of INSN.  */
5434
static rtx
5435
sched_get_operand (rtx insn, bool opx_p)
5436
{
5437
  int i;
5438
 
5439
  if (recog_memoized (insn) < 0)
5440
    gcc_unreachable ();
5441
 
5442
  extract_constrain_insn_cached (insn);
5443
 
5444
  if (opx_p)
5445
    i = get_attr_opx (insn);
5446
  else
5447
    i = get_attr_opy (insn);
5448
 
5449
  if (i >= recog_data.n_operands)
5450
    return NULL;
5451
 
5452
  return recog_data.operand[i];
5453
}
5454
 
5455
/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5456
   If ADDRESS_P is true, return type of memory location operand refers to.  */
5457
static enum attr_op_type
5458
sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
5459
{
5460
  rtx op;
5461
 
5462
  op = sched_get_operand (insn, opx_p);
5463
 
5464
  if (op == NULL)
5465
    {
5466
      gcc_assert (!reload_completed);
5467
      return OP_TYPE_RN;
5468
    }
5469
 
5470
  if (address_p)
5471
    return sched_address_type (QImode, op);
5472
 
5473
  if (memory_operand (op, VOIDmode))
5474
    return sched_address_type (GET_MODE (op), XEXP (op, 0));
5475
 
5476
  if (register_operand (op, VOIDmode))
5477
    {
5478
      if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5479
          || (reload_completed && FP_REG_P (op)))
5480
        return OP_TYPE_FPN;
5481
 
5482
      return OP_TYPE_RN;
5483
    }
5484
 
5485
  if (GET_CODE (op) == CONST_INT)
5486
    {
5487
      int ival;
5488
 
5489
      ival = INTVAL (op);
5490
 
5491
      /* Check for quick constants.  */
5492
      switch (get_attr_type (insn))
5493
        {
5494
        case TYPE_ALUQ_L:
5495
          if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5496
            return OP_TYPE_IMM_Q;
5497
 
5498
          gcc_assert (!reload_completed);
5499
          break;
5500
 
5501
        case TYPE_MOVEQ_L:
5502
          if (USE_MOVQ (ival))
5503
            return OP_TYPE_IMM_Q;
5504
 
5505
          gcc_assert (!reload_completed);
5506
          break;
5507
 
5508
        case TYPE_MOV3Q_L:
5509
          if (valid_mov3q_const (ival))
5510
            return OP_TYPE_IMM_Q;
5511
 
5512
          gcc_assert (!reload_completed);
5513
          break;
5514
 
5515
        default:
5516
          break;
5517
        }
5518
 
5519
      if (IN_RANGE (ival, -0x8000, 0x7fff))
5520
        return OP_TYPE_IMM_W;
5521
 
5522
      return OP_TYPE_IMM_L;
5523
    }
5524
 
5525
  if (GET_CODE (op) == CONST_DOUBLE)
5526
    {
5527
      switch (GET_MODE (op))
5528
        {
5529
        case SFmode:
5530
          return OP_TYPE_IMM_W;
5531
 
5532
        case VOIDmode:
5533
        case DFmode:
5534
          return OP_TYPE_IMM_L;
5535
 
5536
        default:
5537
          gcc_unreachable ();
5538
        }
5539
    }
5540
 
5541
  if (GET_CODE (op) == CONST
5542
      || symbolic_operand (op, VOIDmode)
5543
      || LABEL_P (op))
5544
    {
5545
      switch (GET_MODE (op))
5546
        {
5547
        case QImode:
5548
          return OP_TYPE_IMM_Q;
5549
 
5550
        case HImode:
5551
          return OP_TYPE_IMM_W;
5552
 
5553
        case SImode:
5554
          return OP_TYPE_IMM_L;
5555
 
5556
        default:
5557
          if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5558
            /* Just a guess.  */
5559
            return OP_TYPE_IMM_W;
5560
 
5561
          return OP_TYPE_IMM_L;
5562
        }
5563
    }
5564
 
5565
  gcc_assert (!reload_completed);
5566
 
5567
  if (FLOAT_MODE_P (GET_MODE (op)))
5568
    return OP_TYPE_FPN;
5569
 
5570
  return OP_TYPE_RN;
5571
}
5572
 
5573
/* Implement opx_type attribute.
5574
   Return type of INSN's operand X.
5575
   If ADDRESS_P is true, return type of memory location operand refers to.  */
5576
enum attr_opx_type
5577
m68k_sched_attr_opx_type (rtx insn, int address_p)
5578
{
5579
  switch (sched_attr_op_type (insn, true, address_p != 0))
5580
    {
5581
    case OP_TYPE_RN:
5582
      return OPX_TYPE_RN;
5583
 
5584
    case OP_TYPE_FPN:
5585
      return OPX_TYPE_FPN;
5586
 
5587
    case OP_TYPE_MEM1:
5588
      return OPX_TYPE_MEM1;
5589
 
5590
    case OP_TYPE_MEM234:
5591
      return OPX_TYPE_MEM234;
5592
 
5593
    case OP_TYPE_MEM5:
5594
      return OPX_TYPE_MEM5;
5595
 
5596
    case OP_TYPE_MEM6:
5597
      return OPX_TYPE_MEM6;
5598
 
5599
    case OP_TYPE_MEM7:
5600
      return OPX_TYPE_MEM7;
5601
 
5602
    case OP_TYPE_IMM_Q:
5603
      return OPX_TYPE_IMM_Q;
5604
 
5605
    case OP_TYPE_IMM_W:
5606
      return OPX_TYPE_IMM_W;
5607
 
5608
    case OP_TYPE_IMM_L:
5609
      return OPX_TYPE_IMM_L;
5610
 
5611
    default:
5612
      gcc_unreachable ();
5613
      return 0;
5614
    }
5615
}
5616
 
5617
/* Implement opy_type attribute.
5618
   Return type of INSN's operand Y.
5619
   If ADDRESS_P is true, return type of memory location operand refers to.  */
5620
enum attr_opy_type
5621
m68k_sched_attr_opy_type (rtx insn, int address_p)
5622
{
5623
  switch (sched_attr_op_type (insn, false, address_p != 0))
5624
    {
5625
    case OP_TYPE_RN:
5626
      return OPY_TYPE_RN;
5627
 
5628
    case OP_TYPE_FPN:
5629
      return OPY_TYPE_FPN;
5630
 
5631
    case OP_TYPE_MEM1:
5632
      return OPY_TYPE_MEM1;
5633
 
5634
    case OP_TYPE_MEM234:
5635
      return OPY_TYPE_MEM234;
5636
 
5637
    case OP_TYPE_MEM5:
5638
      return OPY_TYPE_MEM5;
5639
 
5640
    case OP_TYPE_MEM6:
5641
      return OPY_TYPE_MEM6;
5642
 
5643
    case OP_TYPE_MEM7:
5644
      return OPY_TYPE_MEM7;
5645
 
5646
    case OP_TYPE_IMM_Q:
5647
      return OPY_TYPE_IMM_Q;
5648
 
5649
    case OP_TYPE_IMM_W:
5650
      return OPY_TYPE_IMM_W;
5651
 
5652
    case OP_TYPE_IMM_L:
5653
      return OPY_TYPE_IMM_L;
5654
 
5655
    default:
5656
      gcc_unreachable ();
5657
      return 0;
5658
    }
5659
}
5660
 
5661
/* Return size of INSN as int.  */
5662
static int
5663
sched_get_attr_size_int (rtx insn)
5664
{
5665
  int size;
5666
 
5667
  switch (get_attr_type (insn))
5668
    {
5669
    case TYPE_IGNORE:
5670
      /* There should be no references to m68k_sched_attr_size for 'ignore'
5671
         instructions.  */
5672
      gcc_unreachable ();
5673
      return 0;
5674
 
5675
    case TYPE_MUL_L:
5676
      size = 2;
5677
      break;
5678
 
5679
    default:
5680
      size = 1;
5681
      break;
5682
    }
5683
 
5684
  switch (get_attr_opx_type (insn))
5685
    {
5686
    case OPX_TYPE_NONE:
5687
    case OPX_TYPE_RN:
5688
    case OPX_TYPE_FPN:
5689
    case OPX_TYPE_MEM1:
5690
    case OPX_TYPE_MEM234:
5691
    case OPY_TYPE_IMM_Q:
5692
      break;
5693
 
5694
    case OPX_TYPE_MEM5:
5695
    case OPX_TYPE_MEM6:
5696
      /* Here we assume that most absolute references are short.  */
5697
    case OPX_TYPE_MEM7:
5698
    case OPY_TYPE_IMM_W:
5699
      ++size;
5700
      break;
5701
 
5702
    case OPY_TYPE_IMM_L:
5703
      size += 2;
5704
      break;
5705
 
5706
    default:
5707
      gcc_unreachable ();
5708
    }
5709
 
5710
  switch (get_attr_opy_type (insn))
5711
    {
5712
    case OPY_TYPE_NONE:
5713
    case OPY_TYPE_RN:
5714
    case OPY_TYPE_FPN:
5715
    case OPY_TYPE_MEM1:
5716
    case OPY_TYPE_MEM234:
5717
    case OPY_TYPE_IMM_Q:
5718
      break;
5719
 
5720
    case OPY_TYPE_MEM5:
5721
    case OPY_TYPE_MEM6:
5722
      /* Here we assume that most absolute references are short.  */
5723
    case OPY_TYPE_MEM7:
5724
    case OPY_TYPE_IMM_W:
5725
      ++size;
5726
      break;
5727
 
5728
    case OPY_TYPE_IMM_L:
5729
      size += 2;
5730
      break;
5731
 
5732
    default:
5733
      gcc_unreachable ();
5734
    }
5735
 
5736
  if (size > 3)
5737
    {
5738
      gcc_assert (!reload_completed);
5739
 
5740
      size = 3;
5741
    }
5742
 
5743
  return size;
5744
}
5745
 
5746
/* Return size of INSN as attribute enum value.  */
5747
enum attr_size
5748
m68k_sched_attr_size (rtx insn)
5749
{
5750
  switch (sched_get_attr_size_int (insn))
5751
    {
5752
    case 1:
5753
      return SIZE_1;
5754
 
5755
    case 2:
5756
      return SIZE_2;
5757
 
5758
    case 3:
5759
      return SIZE_3;
5760
 
5761
    default:
5762
      gcc_unreachable ();
5763
      return 0;
5764
    }
5765
}
5766
 
5767
/* Return operand X or Y (depending on OPX_P) of INSN,
5768
   if it is a MEM, or NULL overwise.  */
5769
static enum attr_op_type
5770
sched_get_opxy_mem_type (rtx insn, bool opx_p)
5771
{
5772
  if (opx_p)
5773
    {
5774
      switch (get_attr_opx_type (insn))
5775
        {
5776
        case OPX_TYPE_NONE:
5777
        case OPX_TYPE_RN:
5778
        case OPX_TYPE_FPN:
5779
        case OPX_TYPE_IMM_Q:
5780
        case OPX_TYPE_IMM_W:
5781
        case OPX_TYPE_IMM_L:
5782
          return OP_TYPE_RN;
5783
 
5784
        case OPX_TYPE_MEM1:
5785
        case OPX_TYPE_MEM234:
5786
        case OPX_TYPE_MEM5:
5787
        case OPX_TYPE_MEM7:
5788
          return OP_TYPE_MEM1;
5789
 
5790
        case OPX_TYPE_MEM6:
5791
          return OP_TYPE_MEM6;
5792
 
5793
        default:
5794
          gcc_unreachable ();
5795
          return 0;
5796
        }
5797
    }
5798
  else
5799
    {
5800
      switch (get_attr_opy_type (insn))
5801
        {
5802
        case OPY_TYPE_NONE:
5803
        case OPY_TYPE_RN:
5804
        case OPY_TYPE_FPN:
5805
        case OPY_TYPE_IMM_Q:
5806
        case OPY_TYPE_IMM_W:
5807
        case OPY_TYPE_IMM_L:
5808
          return OP_TYPE_RN;
5809
 
5810
        case OPY_TYPE_MEM1:
5811
        case OPY_TYPE_MEM234:
5812
        case OPY_TYPE_MEM5:
5813
        case OPY_TYPE_MEM7:
5814
          return OP_TYPE_MEM1;
5815
 
5816
        case OPY_TYPE_MEM6:
5817
          return OP_TYPE_MEM6;
5818
 
5819
        default:
5820
          gcc_unreachable ();
5821
          return 0;
5822
        }
5823
    }
5824
}
5825
 
5826
/* Implement op_mem attribute.  */
5827
enum attr_op_mem
5828
m68k_sched_attr_op_mem (rtx insn)
5829
{
5830
  enum attr_op_type opx;
5831
  enum attr_op_type opy;
5832
 
5833
  opx = sched_get_opxy_mem_type (insn, true);
5834
  opy = sched_get_opxy_mem_type (insn, false);
5835
 
5836
  if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5837
    return OP_MEM_00;
5838
 
5839
  if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5840
    {
5841
      switch (get_attr_opx_access (insn))
5842
        {
5843
        case OPX_ACCESS_R:
5844
          return OP_MEM_10;
5845
 
5846
        case OPX_ACCESS_W:
5847
          return OP_MEM_01;
5848
 
5849
        case OPX_ACCESS_RW:
5850
          return OP_MEM_11;
5851
 
5852
        default:
5853
          gcc_unreachable ();
5854
          return 0;
5855
        }
5856
    }
5857
 
5858
  if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5859
    {
5860
      switch (get_attr_opx_access (insn))
5861
        {
5862
        case OPX_ACCESS_R:
5863
          return OP_MEM_I0;
5864
 
5865
        case OPX_ACCESS_W:
5866
          return OP_MEM_0I;
5867
 
5868
        case OPX_ACCESS_RW:
5869
          return OP_MEM_I1;
5870
 
5871
        default:
5872
          gcc_unreachable ();
5873
          return 0;
5874
        }
5875
    }
5876
 
5877
  if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5878
    return OP_MEM_10;
5879
 
5880
  if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5881
    {
5882
      switch (get_attr_opx_access (insn))
5883
        {
5884
        case OPX_ACCESS_W:
5885
          return OP_MEM_11;
5886
 
5887
        default:
5888
          gcc_assert (!reload_completed);
5889
          return OP_MEM_11;
5890
        }
5891
    }
5892
 
5893
  if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5894
    {
5895
      switch (get_attr_opx_access (insn))
5896
        {
5897
        case OPX_ACCESS_W:
5898
          return OP_MEM_1I;
5899
 
5900
        default:
5901
          gcc_assert (!reload_completed);
5902
          return OP_MEM_1I;
5903
        }
5904
    }
5905
 
5906
  if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5907
    return OP_MEM_I0;
5908
 
5909
  if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5910
    {
5911
      switch (get_attr_opx_access (insn))
5912
        {
5913
        case OPX_ACCESS_W:
5914
          return OP_MEM_I1;
5915
 
5916
        default:
5917
          gcc_assert (!reload_completed);
5918
          return OP_MEM_I1;
5919
        }
5920
    }
5921
 
5922
  gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5923
  gcc_assert (!reload_completed);
5924
  return OP_MEM_I1;
5925
}
5926
 
5927
/* Jump instructions types.  Indexed by INSN_UID.
5928
   The same rtl insn can be expanded into different asm instructions
5929
   depending on the cc0_status.  To properly determine type of jump
5930
   instructions we scan instruction stream and map jumps types to this
5931
   array.  */
5932
static enum attr_type *sched_branch_type;
5933
 
5934
/* Return the type of the jump insn.  */
5935
enum attr_type
5936
m68k_sched_branch_type (rtx insn)
5937
{
5938
  enum attr_type type;
5939
 
5940
  type = sched_branch_type[INSN_UID (insn)];
5941
 
5942
  gcc_assert (type != 0);
5943
 
5944
  return type;
5945
}
5946
 
5947
/* Data for ColdFire V4 index bypass.
5948
   Producer modifies register that is used as index in consumer with
5949
   specified scale.  */
5950
static struct
5951
{
5952
  /* Producer instruction.  */
5953
  rtx pro;
5954
 
5955
  /* Consumer instruction.  */
5956
  rtx con;
5957
 
5958
  /* Scale of indexed memory access within consumer.
5959
     Or zero if bypass should not be effective at the moment.  */
5960
  int scale;
5961
} sched_cfv4_bypass_data;
5962
 
5963
/* An empty state that is used in m68k_sched_adjust_cost.  */
5964
static state_t sched_adjust_cost_state;
5965
 
5966
/* Implement adjust_cost scheduler hook.
5967
   Return adjusted COST of dependency LINK between DEF_INSN and INSN.  */
5968
static int
5969
m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
5970
                        int cost)
5971
{
5972
  int delay;
5973
 
5974
  if (recog_memoized (def_insn) < 0
5975
      || recog_memoized (insn) < 0)
5976
    return cost;
5977
 
5978
  if (sched_cfv4_bypass_data.scale == 1)
5979
    /* Handle ColdFire V4 bypass for indexed address with 1x scale.  */
5980
    {
5981
      /* haifa-sched.c: insn_cost () calls bypass_p () just before
5982
         targetm.sched.adjust_cost ().  Hence, we can be relatively sure
5983
         that the data in sched_cfv4_bypass_data is up to date.  */
5984
      gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5985
                  && sched_cfv4_bypass_data.con == insn);
5986
 
5987
      if (cost < 3)
5988
        cost = 3;
5989
 
5990
      sched_cfv4_bypass_data.pro = NULL;
5991
      sched_cfv4_bypass_data.con = NULL;
5992
      sched_cfv4_bypass_data.scale = 0;
5993
    }
5994
  else
5995
    gcc_assert (sched_cfv4_bypass_data.pro == NULL
5996
                && sched_cfv4_bypass_data.con == NULL
5997
                && sched_cfv4_bypass_data.scale == 0);
5998
 
5999
  /* Don't try to issue INSN earlier than DFA permits.
6000
     This is especially useful for instructions that write to memory,
6001
     as their true dependence (default) latency is better to be set to 0
6002
     to workaround alias analysis limitations.
6003
     This is, in fact, a machine independent tweak, so, probably,
6004
     it should be moved to haifa-sched.c: insn_cost ().  */
6005
  delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
6006
  if (delay > cost)
6007
    cost = delay;
6008
 
6009
  return cost;
6010
}
6011
 
6012
/* Return maximal number of insns that can be scheduled on a single cycle.  */
6013
static int
6014
m68k_sched_issue_rate (void)
6015
{
6016
  switch (m68k_sched_cpu)
6017
    {
6018
    case CPU_CFV1:
6019
    case CPU_CFV2:
6020
    case CPU_CFV3:
6021
      return 1;
6022
 
6023
    case CPU_CFV4:
6024
      return 2;
6025
 
6026
    default:
6027
      gcc_unreachable ();
6028
      return 0;
6029
    }
6030
}
6031
 
6032
/* Maximal length of instruction for current CPU.
6033
   E.g. it is 3 for any ColdFire core.  */
6034
static int max_insn_size;
6035
 
6036
/* Data to model instruction buffer of CPU.  */
6037
struct _sched_ib
6038
{
6039
  /* True if instruction buffer model is modeled for current CPU.  */
6040
  bool enabled_p;
6041
 
6042
  /* Size of the instruction buffer in words.  */
6043
  int size;
6044
 
6045
  /* Number of filled words in the instruction buffer.  */
6046
  int filled;
6047
 
6048
  /* Additional information about instruction buffer for CPUs that have
6049
     a buffer of instruction records, rather then a plain buffer
6050
     of instruction words.  */
6051
  struct _sched_ib_records
6052
  {
6053
    /* Size of buffer in records.  */
6054
    int n_insns;
6055
 
6056
    /* Array to hold data on adjustements made to the size of the buffer.  */
6057
    int *adjust;
6058
 
6059
    /* Index of the above array.  */
6060
    int adjust_index;
6061
  } records;
6062
 
6063
  /* An insn that reserves (marks empty) one word in the instruction buffer.  */
6064
  rtx insn;
6065
};
6066
 
6067
static struct _sched_ib sched_ib;
6068
 
6069
/* ID of memory unit.  */
6070
static int sched_mem_unit_code;
6071
 
6072
/* Implementation of the targetm.sched.variable_issue () hook.
6073
   It is called after INSN was issued.  It returns the number of insns
6074
   that can possibly get scheduled on the current cycle.
6075
   It is used here to determine the effect of INSN on the instruction
6076
   buffer.  */
6077
static int
6078
m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6079
                           int sched_verbose ATTRIBUTE_UNUSED,
6080
                           rtx insn, int can_issue_more)
6081
{
6082
  int insn_size;
6083
 
6084
  if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6085
    {
6086
      switch (m68k_sched_cpu)
6087
        {
6088
        case CPU_CFV1:
6089
        case CPU_CFV2:
6090
          insn_size = sched_get_attr_size_int (insn);
6091
          break;
6092
 
6093
        case CPU_CFV3:
6094
          insn_size = sched_get_attr_size_int (insn);
6095
 
6096
          /* ColdFire V3 and V4 cores have instruction buffers that can
6097
             accumulate up to 8 instructions regardless of instructions'
6098
             sizes.  So we should take care not to "prefetch" 24 one-word
6099
             or 12 two-words instructions.
6100
             To model this behavior we temporarily decrease size of the
6101
             buffer by (max_insn_size - insn_size) for next 7 instructions.  */
6102
          {
6103
            int adjust;
6104
 
6105
            adjust = max_insn_size - insn_size;
6106
            sched_ib.size -= adjust;
6107
 
6108
            if (sched_ib.filled > sched_ib.size)
6109
              sched_ib.filled = sched_ib.size;
6110
 
6111
            sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6112
          }
6113
 
6114
          ++sched_ib.records.adjust_index;
6115
          if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6116
            sched_ib.records.adjust_index = 0;
6117
 
6118
          /* Undo adjustement we did 7 instructions ago.  */
6119
          sched_ib.size
6120
            += sched_ib.records.adjust[sched_ib.records.adjust_index];
6121
 
6122
          break;
6123
 
6124
        case CPU_CFV4:
6125
          gcc_assert (!sched_ib.enabled_p);
6126
          insn_size = 0;
6127
          break;
6128
 
6129
        default:
6130
          gcc_unreachable ();
6131
        }
6132
 
6133
      gcc_assert (insn_size <= sched_ib.filled);
6134
      --can_issue_more;
6135
    }
6136
  else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6137
           || asm_noperands (PATTERN (insn)) >= 0)
6138
    insn_size = sched_ib.filled;
6139
  else
6140
    insn_size = 0;
6141
 
6142
  sched_ib.filled -= insn_size;
6143
 
6144
  return can_issue_more;
6145
}
6146
 
6147
/* Return how many instructions should scheduler lookahead to choose the
6148
   best one.  */
6149
static int
6150
m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6151
{
6152
  return m68k_sched_issue_rate () - 1;
6153
}
6154
 
6155
/* Implementation of targetm.sched.md_init_global () hook.
6156
   It is invoked once per scheduling pass and is used here
6157
   to initialize scheduler constants.  */
6158
static void
6159
m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6160
                           int sched_verbose ATTRIBUTE_UNUSED,
6161
                           int n_insns ATTRIBUTE_UNUSED)
6162
{
6163
  /* Init branch types.  */
6164
  {
6165
    rtx insn;
6166
 
6167
    sched_branch_type = XCNEWVEC (enum attr_type, get_max_uid () + 1);
6168
 
6169
    for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6170
      {
6171
        if (JUMP_P (insn))
6172
          /* !!! FIXME: Implement real scan here.  */
6173
          sched_branch_type[INSN_UID (insn)] = TYPE_BCC;
6174
      }
6175
  }
6176
 
6177
#ifdef ENABLE_CHECKING
6178
  /* Check that all instructions have DFA reservations and
6179
     that all instructions can be issued from a clean state.  */
6180
  {
6181
    rtx insn;
6182
    state_t state;
6183
 
6184
    state = alloca (state_size ());
6185
 
6186
    for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6187
      {
6188
        if (INSN_P (insn) && recog_memoized (insn) >= 0)
6189
          {
6190
            gcc_assert (insn_has_dfa_reservation_p (insn));
6191
 
6192
            state_reset (state);
6193
            if (state_transition (state, insn) >= 0)
6194
              gcc_unreachable ();
6195
          }
6196
      }
6197
  }
6198
#endif
6199
 
6200
  /* Setup target cpu.  */
6201
 
6202
  /* ColdFire V4 has a set of features to keep its instruction buffer full
6203
     (e.g., a separate memory bus for instructions) and, hence, we do not model
6204
     buffer for this CPU.  */
6205
  sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6206
 
6207
  switch (m68k_sched_cpu)
6208
    {
6209
    case CPU_CFV4:
6210
      sched_ib.filled = 0;
6211
 
6212
      /* FALLTHRU */
6213
 
6214
    case CPU_CFV1:
6215
    case CPU_CFV2:
6216
      max_insn_size = 3;
6217
      sched_ib.records.n_insns = 0;
6218
      sched_ib.records.adjust = NULL;
6219
      break;
6220
 
6221
    case CPU_CFV3:
6222
      max_insn_size = 3;
6223
      sched_ib.records.n_insns = 8;
6224
      sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6225
      break;
6226
 
6227
    default:
6228
      gcc_unreachable ();
6229
    }
6230
 
6231
  sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6232
 
6233
  sched_adjust_cost_state = xmalloc (state_size ());
6234
  state_reset (sched_adjust_cost_state);
6235
 
6236
  start_sequence ();
6237
  emit_insn (gen_ib ());
6238
  sched_ib.insn = get_insns ();
6239
  end_sequence ();
6240
}
6241
 
6242
/* Scheduling pass is now finished.  Free/reset static variables.  */
6243
static void
6244
m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6245
                             int verbose ATTRIBUTE_UNUSED)
6246
{
6247
  sched_ib.insn = NULL;
6248
 
6249
  free (sched_adjust_cost_state);
6250
  sched_adjust_cost_state = NULL;
6251
 
6252
  sched_mem_unit_code = 0;
6253
 
6254
  free (sched_ib.records.adjust);
6255
  sched_ib.records.adjust = NULL;
6256
  sched_ib.records.n_insns = 0;
6257
  max_insn_size = 0;
6258
 
6259
  free (sched_branch_type);
6260
  sched_branch_type = NULL;
6261
}
6262
 
6263
/* Implementation of targetm.sched.md_init () hook.
6264
   It is invoked each time scheduler starts on the new block (basic block or
6265
   extended basic block).  */
6266
static void
6267
m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6268
                    int sched_verbose ATTRIBUTE_UNUSED,
6269
                    int n_insns ATTRIBUTE_UNUSED)
6270
{
6271
  switch (m68k_sched_cpu)
6272
    {
6273
    case CPU_CFV1:
6274
    case CPU_CFV2:
6275
      sched_ib.size = 6;
6276
      break;
6277
 
6278
    case CPU_CFV3:
6279
      sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6280
 
6281
      memset (sched_ib.records.adjust, 0,
6282
              sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6283
      sched_ib.records.adjust_index = 0;
6284
      break;
6285
 
6286
    case CPU_CFV4:
6287
      gcc_assert (!sched_ib.enabled_p);
6288
      sched_ib.size = 0;
6289
      break;
6290
 
6291
    default:
6292
      gcc_unreachable ();
6293
    }
6294
 
6295
  if (sched_ib.enabled_p)
6296
    /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6297
       the first cycle.  Workaround that.  */
6298
    sched_ib.filled = -2;
6299
}
6300
 
6301
/* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6302
   It is invoked just before current cycle finishes and is used here
6303
   to track if instruction buffer got its two words this cycle.  */
6304
static void
6305
m68k_sched_dfa_pre_advance_cycle (void)
6306
{
6307
  if (!sched_ib.enabled_p)
6308
    return;
6309
 
6310
  if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6311
    {
6312
      sched_ib.filled += 2;
6313
 
6314
      if (sched_ib.filled > sched_ib.size)
6315
        sched_ib.filled = sched_ib.size;
6316
    }
6317
}
6318
 
6319
/* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6320
   It is invoked just after new cycle begins and is used here
6321
   to setup number of filled words in the instruction buffer so that
6322
   instructions which won't have all their words prefetched would be
6323
   stalled for a cycle.  */
6324
static void
6325
m68k_sched_dfa_post_advance_cycle (void)
6326
{
6327
  int i;
6328
 
6329
  if (!sched_ib.enabled_p)
6330
    return;
6331
 
6332
  /* Setup number of prefetched instruction words in the instruction
6333
     buffer.  */
6334
  i = max_insn_size - sched_ib.filled;
6335
 
6336
  while (--i >= 0)
6337
    {
6338
      if (state_transition (curr_state, sched_ib.insn) >= 0)
6339
        gcc_unreachable ();
6340
    }
6341
}
6342
 
6343
/* Return X or Y (depending on OPX_P) operand of INSN,
6344
   if it is an integer register, or NULL overwise.  */
6345
static rtx
6346
sched_get_reg_operand (rtx insn, bool opx_p)
6347
{
6348
  rtx op = NULL;
6349
 
6350
  if (opx_p)
6351
    {
6352
      if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6353
        {
6354
          op = sched_get_operand (insn, true);
6355
          gcc_assert (op != NULL);
6356
 
6357
          if (!reload_completed && !REG_P (op))
6358
            return NULL;
6359
        }
6360
    }
6361
  else
6362
    {
6363
      if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6364
        {
6365
          op = sched_get_operand (insn, false);
6366
          gcc_assert (op != NULL);
6367
 
6368
          if (!reload_completed && !REG_P (op))
6369
            return NULL;
6370
        }
6371
    }
6372
 
6373
  return op;
6374
}
6375
 
6376
/* Return true, if X or Y (depending on OPX_P) operand of INSN
6377
   is a MEM.  */
6378
static bool
6379
sched_mem_operand_p (rtx insn, bool opx_p)
6380
{
6381
  switch (sched_get_opxy_mem_type (insn, opx_p))
6382
    {
6383
    case OP_TYPE_MEM1:
6384
    case OP_TYPE_MEM6:
6385
      return true;
6386
 
6387
    default:
6388
      return false;
6389
    }
6390
}
6391
 
6392
/* Return X or Y (depending on OPX_P) operand of INSN,
6393
   if it is a MEM, or NULL overwise.  */
6394
static rtx
6395
sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
6396
{
6397
  bool opx_p;
6398
  bool opy_p;
6399
 
6400
  opx_p = false;
6401
  opy_p = false;
6402
 
6403
  if (must_read_p)
6404
    {
6405
      opx_p = true;
6406
      opy_p = true;
6407
    }
6408
 
6409
  if (must_write_p)
6410
    {
6411
      opx_p = true;
6412
      opy_p = false;
6413
    }
6414
 
6415
  if (opy_p && sched_mem_operand_p (insn, false))
6416
    return sched_get_operand (insn, false);
6417
 
6418
  if (opx_p && sched_mem_operand_p (insn, true))
6419
    return sched_get_operand (insn, true);
6420
 
6421
  gcc_unreachable ();
6422
  return NULL;
6423
}
6424
 
6425
/* Return non-zero if PRO modifies register used as part of
6426
   address in CON.  */
6427
int
6428
m68k_sched_address_bypass_p (rtx pro, rtx con)
6429
{
6430
  rtx pro_x;
6431
  rtx con_mem_read;
6432
 
6433
  pro_x = sched_get_reg_operand (pro, true);
6434
  if (pro_x == NULL)
6435
    return 0;
6436
 
6437
  con_mem_read = sched_get_mem_operand (con, true, false);
6438
  gcc_assert (con_mem_read != NULL);
6439
 
6440
  if (reg_mentioned_p (pro_x, con_mem_read))
6441
    return 1;
6442
 
6443
  return 0;
6444
}
6445
 
6446
/* Helper function for m68k_sched_indexed_address_bypass_p.
6447
   if PRO modifies register used as index in CON,
6448
   return scale of indexed memory access in CON.  Return zero overwise.  */
6449
static int
6450
sched_get_indexed_address_scale (rtx pro, rtx con)
6451
{
6452
  rtx reg;
6453
  rtx mem;
6454
  struct m68k_address address;
6455
 
6456
  reg = sched_get_reg_operand (pro, true);
6457
  if (reg == NULL)
6458
    return 0;
6459
 
6460
  mem = sched_get_mem_operand (con, true, false);
6461
  gcc_assert (mem != NULL && MEM_P (mem));
6462
 
6463
  if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6464
                               &address))
6465
    gcc_unreachable ();
6466
 
6467
  if (REGNO (reg) == REGNO (address.index))
6468
    {
6469
      gcc_assert (address.scale != 0);
6470
      return address.scale;
6471
    }
6472
 
6473
  return 0;
6474
}
6475
 
6476
/* Return non-zero if PRO modifies register used
6477
   as index with scale 2 or 4 in CON.  */
6478
int
6479
m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
6480
{
6481
  gcc_assert (sched_cfv4_bypass_data.pro == NULL
6482
              && sched_cfv4_bypass_data.con == NULL
6483
              && sched_cfv4_bypass_data.scale == 0);
6484
 
6485
  switch (sched_get_indexed_address_scale (pro, con))
6486
    {
6487
    case 1:
6488
      /* We can't have a variable latency bypass, so
6489
         remember to adjust the insn cost in adjust_cost hook.  */
6490
      sched_cfv4_bypass_data.pro = pro;
6491
      sched_cfv4_bypass_data.con = con;
6492
      sched_cfv4_bypass_data.scale = 1;
6493
      return 0;
6494
 
6495
    case 2:
6496
    case 4:
6497
      return 1;
6498
 
6499
    default:
6500
      return 0;
6501
    }
6502
}
6503
 
6504
/* We generate a two-instructions program at M_TRAMP :
6505
        movea.l &CHAIN_VALUE,%a0
6506
        jmp FNADDR
6507
   where %a0 can be modified by changing STATIC_CHAIN_REGNUM.  */
6508
 
6509
static void
6510
m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6511
{
6512
  rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6513
  rtx mem;
6514
 
6515
  gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6516
 
6517
  mem = adjust_address (m_tramp, HImode, 0);
6518
  emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6519
  mem = adjust_address (m_tramp, SImode, 2);
6520
  emit_move_insn (mem, chain_value);
6521
 
6522
  mem = adjust_address (m_tramp, HImode, 6);
6523
  emit_move_insn (mem, GEN_INT(0x4EF9));
6524
  mem = adjust_address (m_tramp, SImode, 8);
6525
  emit_move_insn (mem, fnaddr);
6526
 
6527
  FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6528
}
6529
 
6530
#include "gt-m68k.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.