OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [config/] [m68k/] [m68k.c] - Blame information for rev 709

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 709 jeremybenn
/* Subroutines for insn-output.c for Motorola 68000 family.
2
   Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3
   2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4
   Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify
9
it under the terms of the GNU General Public License as published by
10
the Free Software Foundation; either version 3, or (at your option)
11
any later version.
12
 
13
GCC is distributed in the hope that it will be useful,
14
but WITHOUT ANY WARRANTY; without even the implied warranty of
15
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
GNU General Public License for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
#include "config.h"
23
#include "system.h"
24
#include "coretypes.h"
25
#include "tm.h"
26
#include "tree.h"
27
#include "rtl.h"
28
#include "function.h"
29
#include "regs.h"
30
#include "hard-reg-set.h"
31
#include "insn-config.h"
32
#include "conditions.h"
33
#include "output.h"
34
#include "insn-attr.h"
35
#include "recog.h"
36
#include "diagnostic-core.h"
37
#include "expr.h"
38
#include "reload.h"
39
#include "tm_p.h"
40
#include "target.h"
41
#include "target-def.h"
42
#include "debug.h"
43
#include "flags.h"
44
#include "df.h"
45
/* ??? Need to add a dependency between m68k.o and sched-int.h.  */
46
#include "sched-int.h"
47
#include "insn-codes.h"
48
#include "ggc.h"
49
#include "opts.h"
50
#include "optabs.h"
51
 
52
enum reg_class regno_reg_class[] =
53
{
54
  DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
55
  DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
56
  ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
57
  ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
58
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
59
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
60
  ADDR_REGS
61
};
62
 
63
 
64
/* The minimum number of integer registers that we want to save with the
65
   movem instruction.  Using two movel instructions instead of a single
66
   moveml is about 15% faster for the 68020 and 68030 at no expense in
67
   code size.  */
68
#define MIN_MOVEM_REGS 3
69
 
70
/* The minimum number of floating point registers that we want to save
71
   with the fmovem instruction.  */
72
#define MIN_FMOVEM_REGS 1
73
 
74
/* Structure describing stack frame layout.  */
75
struct m68k_frame
76
{
77
  /* Stack pointer to frame pointer offset.  */
78
  HOST_WIDE_INT offset;
79
 
80
  /* Offset of FPU registers.  */
81
  HOST_WIDE_INT foffset;
82
 
83
  /* Frame size in bytes (rounded up).  */
84
  HOST_WIDE_INT size;
85
 
86
  /* Data and address register.  */
87
  int reg_no;
88
  unsigned int reg_mask;
89
 
90
  /* FPU registers.  */
91
  int fpu_no;
92
  unsigned int fpu_mask;
93
 
94
  /* Offsets relative to ARG_POINTER.  */
95
  HOST_WIDE_INT frame_pointer_offset;
96
  HOST_WIDE_INT stack_pointer_offset;
97
 
98
  /* Function which the above information refers to.  */
99
  int funcdef_no;
100
};
101
 
102
/* Current frame information calculated by m68k_compute_frame_layout().  */
103
static struct m68k_frame current_frame;
104
 
105
/* Structure describing an m68k address.
106
 
107
   If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
108
   with null fields evaluating to 0.  Here:
109
 
110
   - BASE satisfies m68k_legitimate_base_reg_p
111
   - INDEX satisfies m68k_legitimate_index_reg_p
112
   - OFFSET satisfies m68k_legitimate_constant_address_p
113
 
114
   INDEX is either HImode or SImode.  The other fields are SImode.
115
 
116
   If CODE is PRE_DEC, the address is -(BASE).  If CODE is POST_INC,
117
   the address is (BASE)+.  */
118
struct m68k_address {
119
  enum rtx_code code;
120
  rtx base;
121
  rtx index;
122
  rtx offset;
123
  int scale;
124
};
125
 
126
static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
127
static int m68k_sched_issue_rate (void);
128
static int m68k_sched_variable_issue (FILE *, int, rtx, int);
129
static void m68k_sched_md_init_global (FILE *, int, int);
130
static void m68k_sched_md_finish_global (FILE *, int);
131
static void m68k_sched_md_init (FILE *, int, int);
132
static void m68k_sched_dfa_pre_advance_cycle (void);
133
static void m68k_sched_dfa_post_advance_cycle (void);
134
static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
135
 
136
static bool m68k_can_eliminate (const int, const int);
137
static void m68k_conditional_register_usage (void);
138
static bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
139
static void m68k_option_override (void);
140
static void m68k_override_options_after_change (void);
141
static rtx find_addr_reg (rtx);
142
static const char *singlemove_string (rtx *);
143
static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
144
                                          HOST_WIDE_INT, tree);
145
static rtx m68k_struct_value_rtx (tree, int);
146
static tree m68k_handle_fndecl_attribute (tree *node, tree name,
147
                                          tree args, int flags,
148
                                          bool *no_add_attrs);
149
static void m68k_compute_frame_layout (void);
150
static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
151
static bool m68k_ok_for_sibcall_p (tree, tree);
152
static bool m68k_tls_symbol_p (rtx);
153
static rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
154
static bool m68k_rtx_costs (rtx, int, int, int, int *, bool);
155
#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
156
static bool m68k_return_in_memory (const_tree, const_tree);
157
#endif
158
static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
159
static void m68k_trampoline_init (rtx, tree, rtx);
160
static int m68k_return_pops_args (tree, tree, int);
161
static rtx m68k_delegitimize_address (rtx);
162
static void m68k_function_arg_advance (cumulative_args_t, enum machine_mode,
163
                                       const_tree, bool);
164
static rtx m68k_function_arg (cumulative_args_t, enum machine_mode,
165
                              const_tree, bool);
166
static bool m68k_cannot_force_const_mem (enum machine_mode mode, rtx x);
167
static bool m68k_output_addr_const_extra (FILE *, rtx);
168
static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
169
 
170
/* Initialize the GCC target structure.  */
171
 
172
#if INT_OP_GROUP == INT_OP_DOT_WORD
173
#undef TARGET_ASM_ALIGNED_HI_OP
174
#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
175
#endif
176
 
177
#if INT_OP_GROUP == INT_OP_NO_DOT
178
#undef TARGET_ASM_BYTE_OP
179
#define TARGET_ASM_BYTE_OP "\tbyte\t"
180
#undef TARGET_ASM_ALIGNED_HI_OP
181
#define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
182
#undef TARGET_ASM_ALIGNED_SI_OP
183
#define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
184
#endif
185
 
186
#if INT_OP_GROUP == INT_OP_DC
187
#undef TARGET_ASM_BYTE_OP
188
#define TARGET_ASM_BYTE_OP "\tdc.b\t"
189
#undef TARGET_ASM_ALIGNED_HI_OP
190
#define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
191
#undef TARGET_ASM_ALIGNED_SI_OP
192
#define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
193
#endif
194
 
195
#undef TARGET_ASM_UNALIGNED_HI_OP
196
#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
197
#undef TARGET_ASM_UNALIGNED_SI_OP
198
#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
199
 
200
#undef TARGET_ASM_OUTPUT_MI_THUNK
201
#define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
202
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
203
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
204
 
205
#undef TARGET_ASM_FILE_START_APP_OFF
206
#define TARGET_ASM_FILE_START_APP_OFF true
207
 
208
#undef TARGET_LEGITIMIZE_ADDRESS
209
#define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
210
 
211
#undef TARGET_SCHED_ADJUST_COST
212
#define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
213
 
214
#undef TARGET_SCHED_ISSUE_RATE
215
#define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
216
 
217
#undef TARGET_SCHED_VARIABLE_ISSUE
218
#define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
219
 
220
#undef TARGET_SCHED_INIT_GLOBAL
221
#define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
222
 
223
#undef TARGET_SCHED_FINISH_GLOBAL
224
#define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
225
 
226
#undef TARGET_SCHED_INIT
227
#define TARGET_SCHED_INIT m68k_sched_md_init
228
 
229
#undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
230
#define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
231
 
232
#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
233
#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
234
 
235
#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
236
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD        \
237
  m68k_sched_first_cycle_multipass_dfa_lookahead
238
 
239
#undef TARGET_OPTION_OVERRIDE
240
#define TARGET_OPTION_OVERRIDE m68k_option_override
241
 
242
#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
243
#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
244
 
245
#undef TARGET_RTX_COSTS
246
#define TARGET_RTX_COSTS m68k_rtx_costs
247
 
248
#undef TARGET_ATTRIBUTE_TABLE
249
#define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
250
 
251
#undef TARGET_PROMOTE_PROTOTYPES
252
#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
253
 
254
#undef TARGET_STRUCT_VALUE_RTX
255
#define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
256
 
257
#undef TARGET_CANNOT_FORCE_CONST_MEM
258
#define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
259
 
260
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
261
#define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
262
 
263
#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
264
#undef TARGET_RETURN_IN_MEMORY
265
#define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
266
#endif
267
 
268
#ifdef HAVE_AS_TLS
269
#undef TARGET_HAVE_TLS
270
#define TARGET_HAVE_TLS (true)
271
 
272
#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
273
#define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
274
#endif
275
 
276
#undef TARGET_LEGITIMATE_ADDRESS_P
277
#define TARGET_LEGITIMATE_ADDRESS_P     m68k_legitimate_address_p
278
 
279
#undef TARGET_CAN_ELIMINATE
280
#define TARGET_CAN_ELIMINATE m68k_can_eliminate
281
 
282
#undef TARGET_CONDITIONAL_REGISTER_USAGE
283
#define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
284
 
285
#undef TARGET_TRAMPOLINE_INIT
286
#define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
287
 
288
#undef TARGET_RETURN_POPS_ARGS
289
#define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
290
 
291
#undef TARGET_DELEGITIMIZE_ADDRESS
292
#define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
293
 
294
#undef TARGET_FUNCTION_ARG
295
#define TARGET_FUNCTION_ARG m68k_function_arg
296
 
297
#undef TARGET_FUNCTION_ARG_ADVANCE
298
#define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
299
 
300
#undef TARGET_LEGITIMATE_CONSTANT_P
301
#define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
302
 
303
#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
304
#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
305
 
306
/* The value stored by TAS.  */
307
#undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
308
#define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
309
 
310
static const struct attribute_spec m68k_attribute_table[] =
311
{
312
  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
313
       affects_type_identity } */
314
  { "interrupt", 0, 0, true,  false, false, m68k_handle_fndecl_attribute,
315
    false },
316
  { "interrupt_handler", 0, 0, true,  false, false,
317
    m68k_handle_fndecl_attribute, false },
318
  { "interrupt_thread", 0, 0, true,  false, false,
319
    m68k_handle_fndecl_attribute, false },
320
  { NULL,                0, 0, false, false, false, NULL, false }
321
};
322
 
323
struct gcc_target targetm = TARGET_INITIALIZER;
324
 
325
/* Base flags for 68k ISAs.  */
326
#define FL_FOR_isa_00    FL_ISA_68000
327
#define FL_FOR_isa_10    (FL_FOR_isa_00 | FL_ISA_68010)
328
/* FL_68881 controls the default setting of -m68881.  gcc has traditionally
329
   generated 68881 code for 68020 and 68030 targets unless explicitly told
330
   not to.  */
331
#define FL_FOR_isa_20    (FL_FOR_isa_10 | FL_ISA_68020 \
332
                          | FL_BITFIELD | FL_68881 | FL_CAS)
333
#define FL_FOR_isa_40    (FL_FOR_isa_20 | FL_ISA_68040)
334
#define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
335
 
336
/* Base flags for ColdFire ISAs.  */
337
#define FL_FOR_isa_a     (FL_COLDFIRE | FL_ISA_A)
338
#define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
339
/* Note ISA_B doesn't necessarily include USP (user stack pointer) support.  */
340
#define FL_FOR_isa_b     (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
341
/* ISA_C is not upwardly compatible with ISA_B.  */
342
#define FL_FOR_isa_c     (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
343
 
344
enum m68k_isa
345
{
346
  /* Traditional 68000 instruction sets.  */
347
  isa_00,
348
  isa_10,
349
  isa_20,
350
  isa_40,
351
  isa_cpu32,
352
  /* ColdFire instruction set variants.  */
353
  isa_a,
354
  isa_aplus,
355
  isa_b,
356
  isa_c,
357
  isa_max
358
};
359
 
360
/* Information about one of the -march, -mcpu or -mtune arguments.  */
361
struct m68k_target_selection
362
{
363
  /* The argument being described.  */
364
  const char *name;
365
 
366
  /* For -mcpu, this is the device selected by the option.
367
     For -mtune and -march, it is a representative device
368
     for the microarchitecture or ISA respectively.  */
369
  enum target_device device;
370
 
371
  /* The M68K_DEVICE fields associated with DEVICE.  See the comment
372
     in m68k-devices.def for details.  FAMILY is only valid for -mcpu.  */
373
  const char *family;
374
  enum uarch_type microarch;
375
  enum m68k_isa isa;
376
  unsigned long flags;
377
};
378
 
379
/* A list of all devices in m68k-devices.def.  Used for -mcpu selection.  */
380
static const struct m68k_target_selection all_devices[] =
381
{
382
#define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
383
  { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
384
#include "m68k-devices.def"
385
#undef M68K_DEVICE
386
  { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
387
};
388
 
389
/* A list of all ISAs, mapping each one to a representative device.
390
   Used for -march selection.  */
391
static const struct m68k_target_selection all_isas[] =
392
{
393
#define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
394
  { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
395
#include "m68k-isas.def"
396
#undef M68K_ISA
397
  { NULL,       unk_device, NULL,  unk_arch, isa_max,   0 }
398
};
399
 
400
/* A list of all microarchitectures, mapping each one to a representative
401
   device.  Used for -mtune selection.  */
402
static const struct m68k_target_selection all_microarchs[] =
403
{
404
#define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
405
  { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
406
#include "m68k-microarchs.def"
407
#undef M68K_MICROARCH
408
  { NULL,       unk_device, NULL,  unk_arch,  isa_max, 0 }
409
};
410
 
411
/* The entries associated with the -mcpu, -march and -mtune settings,
412
   or null for options that have not been used.  */
413
const struct m68k_target_selection *m68k_cpu_entry;
414
const struct m68k_target_selection *m68k_arch_entry;
415
const struct m68k_target_selection *m68k_tune_entry;
416
 
417
/* Which CPU we are generating code for.  */
418
enum target_device m68k_cpu;
419
 
420
/* Which microarchitecture to tune for.  */
421
enum uarch_type m68k_tune;
422
 
423
/* Which FPU to use.  */
424
enum fpu_type m68k_fpu;
425
 
426
/* The set of FL_* flags that apply to the target processor.  */
427
unsigned int m68k_cpu_flags;
428
 
429
/* The set of FL_* flags that apply to the processor to be tuned for.  */
430
unsigned int m68k_tune_flags;
431
 
432
/* Asm templates for calling or jumping to an arbitrary symbolic address,
433
   or NULL if such calls or jumps are not supported.  The address is held
434
   in operand 0.  */
435
const char *m68k_symbolic_call;
436
const char *m68k_symbolic_jump;
437
 
438
/* Enum variable that corresponds to m68k_symbolic_call values.  */
439
enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
440
 
441
 
442
/* Implement TARGET_OPTION_OVERRIDE.  */
443
 
444
static void
445
m68k_option_override (void)
446
{
447
  const struct m68k_target_selection *entry;
448
  unsigned long target_mask;
449
 
450
  if (global_options_set.x_m68k_arch_option)
451
    m68k_arch_entry = &all_isas[m68k_arch_option];
452
 
453
  if (global_options_set.x_m68k_cpu_option)
454
    m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
455
 
456
  if (global_options_set.x_m68k_tune_option)
457
    m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
458
 
459
  /* User can choose:
460
 
461
     -mcpu=
462
     -march=
463
     -mtune=
464
 
465
     -march=ARCH should generate code that runs any processor
466
     implementing architecture ARCH.  -mcpu=CPU should override -march
467
     and should generate code that runs on processor CPU, making free
468
     use of any instructions that CPU understands.  -mtune=UARCH applies
469
     on top of -mcpu or -march and optimizes the code for UARCH.  It does
470
     not change the target architecture.  */
471
  if (m68k_cpu_entry)
472
    {
473
      /* Complain if the -march setting is for a different microarchitecture,
474
         or includes flags that the -mcpu setting doesn't.  */
475
      if (m68k_arch_entry
476
          && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
477
              || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
478
        warning (0, "-mcpu=%s conflicts with -march=%s",
479
                 m68k_cpu_entry->name, m68k_arch_entry->name);
480
 
481
      entry = m68k_cpu_entry;
482
    }
483
  else
484
    entry = m68k_arch_entry;
485
 
486
  if (!entry)
487
    entry = all_devices + TARGET_CPU_DEFAULT;
488
 
489
  m68k_cpu_flags = entry->flags;
490
 
491
  /* Use the architecture setting to derive default values for
492
     certain flags.  */
493
  target_mask = 0;
494
 
495
  /* ColdFire is lenient about alignment.  */
496
  if (!TARGET_COLDFIRE)
497
    target_mask |= MASK_STRICT_ALIGNMENT;
498
 
499
  if ((m68k_cpu_flags & FL_BITFIELD) != 0)
500
    target_mask |= MASK_BITFIELD;
501
  if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
502
    target_mask |= MASK_CF_HWDIV;
503
  if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
504
    target_mask |= MASK_HARD_FLOAT;
505
  target_flags |= target_mask & ~target_flags_explicit;
506
 
507
  /* Set the directly-usable versions of the -mcpu and -mtune settings.  */
508
  m68k_cpu = entry->device;
509
  if (m68k_tune_entry)
510
    {
511
      m68k_tune = m68k_tune_entry->microarch;
512
      m68k_tune_flags = m68k_tune_entry->flags;
513
    }
514
#ifdef M68K_DEFAULT_TUNE
515
  else if (!m68k_cpu_entry && !m68k_arch_entry)
516
    {
517
      enum target_device dev;
518
      dev = all_microarchs[M68K_DEFAULT_TUNE].device;
519
      m68k_tune_flags = all_devices[dev]->flags;
520
    }
521
#endif
522
  else
523
    {
524
      m68k_tune = entry->microarch;
525
      m68k_tune_flags = entry->flags;
526
    }
527
 
528
  /* Set the type of FPU.  */
529
  m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
530
              : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
531
              : FPUTYPE_68881);
532
 
533
  /* Sanity check to ensure that msep-data and mid-sahred-library are not
534
   * both specified together.  Doing so simply doesn't make sense.
535
   */
536
  if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
537
    error ("cannot specify both -msep-data and -mid-shared-library");
538
 
539
  /* If we're generating code for a separate A5 relative data segment,
540
   * we've got to enable -fPIC as well.  This might be relaxable to
541
   * -fpic but it hasn't been tested properly.
542
   */
543
  if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
544
    flag_pic = 2;
545
 
546
  /* -mpcrel -fPIC uses 32-bit pc-relative displacements.  Raise an
547
     error if the target does not support them.  */
548
  if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
549
    error ("-mpcrel -fPIC is not currently supported on selected cpu");
550
 
551
  /* ??? A historic way of turning on pic, or is this intended to
552
     be an embedded thing that doesn't have the same name binding
553
     significance that it does on hosted ELF systems?  */
554
  if (TARGET_PCREL && flag_pic == 0)
555
    flag_pic = 1;
556
 
557
  if (!flag_pic)
558
    {
559
      m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
560
 
561
      m68k_symbolic_jump = "jra %a0";
562
    }
563
  else if (TARGET_ID_SHARED_LIBRARY)
564
    /* All addresses must be loaded from the GOT.  */
565
    ;
566
  else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
567
    {
568
      if (TARGET_PCREL)
569
        m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
570
      else
571
        m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
572
 
573
      if (TARGET_ISAC)
574
        /* No unconditional long branch */;
575
      else if (TARGET_PCREL)
576
        m68k_symbolic_jump = "bra%.l %c0";
577
      else
578
        m68k_symbolic_jump = "bra%.l %p0";
579
      /* Turn off function cse if we are doing PIC.  We always want
580
         function call to be done as `bsr foo@PLTPC'.  */
581
      /* ??? It's traditional to do this for -mpcrel too, but it isn't
582
         clear how intentional that is.  */
583
      flag_no_function_cse = 1;
584
    }
585
 
586
  switch (m68k_symbolic_call_var)
587
    {
588
    case M68K_SYMBOLIC_CALL_JSR:
589
      m68k_symbolic_call = "jsr %a0";
590
      break;
591
 
592
    case M68K_SYMBOLIC_CALL_BSR_C:
593
      m68k_symbolic_call = "bsr%.l %c0";
594
      break;
595
 
596
    case M68K_SYMBOLIC_CALL_BSR_P:
597
      m68k_symbolic_call = "bsr%.l %p0";
598
      break;
599
 
600
    case M68K_SYMBOLIC_CALL_NONE:
601
      gcc_assert (m68k_symbolic_call == NULL);
602
      break;
603
 
604
    default:
605
      gcc_unreachable ();
606
    }
607
 
608
#ifndef ASM_OUTPUT_ALIGN_WITH_NOP
609
  if (align_labels > 2)
610
    {
611
      warning (0, "-falign-labels=%d is not supported", align_labels);
612
      align_labels = 0;
613
    }
614
  if (align_loops > 2)
615
    {
616
      warning (0, "-falign-loops=%d is not supported", align_loops);
617
      align_loops = 0;
618
    }
619
#endif
620
 
621
  SUBTARGET_OVERRIDE_OPTIONS;
622
 
623
  /* Setup scheduling options.  */
624
  if (TUNE_CFV1)
625
    m68k_sched_cpu = CPU_CFV1;
626
  else if (TUNE_CFV2)
627
    m68k_sched_cpu = CPU_CFV2;
628
  else if (TUNE_CFV3)
629
    m68k_sched_cpu = CPU_CFV3;
630
  else if (TUNE_CFV4)
631
    m68k_sched_cpu = CPU_CFV4;
632
  else
633
    {
634
      m68k_sched_cpu = CPU_UNKNOWN;
635
      flag_schedule_insns = 0;
636
      flag_schedule_insns_after_reload = 0;
637
      flag_modulo_sched = 0;
638
    }
639
 
640
  if (m68k_sched_cpu != CPU_UNKNOWN)
641
    {
642
      if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
643
        m68k_sched_mac = MAC_CF_EMAC;
644
      else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
645
        m68k_sched_mac = MAC_CF_MAC;
646
      else
647
        m68k_sched_mac = MAC_NO;
648
    }
649
}
650
 
651
/* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE.  */
652
 
653
static void
654
m68k_override_options_after_change (void)
655
{
656
  if (m68k_sched_cpu == CPU_UNKNOWN)
657
    {
658
      flag_schedule_insns = 0;
659
      flag_schedule_insns_after_reload = 0;
660
      flag_modulo_sched = 0;
661
    }
662
}
663
 
664
/* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
665
   given argument and NAME is the argument passed to -mcpu.  Return NULL
666
   if -mcpu was not passed.  */
667
 
668
const char *
669
m68k_cpp_cpu_ident (const char *prefix)
670
{
671
  if (!m68k_cpu_entry)
672
    return NULL;
673
  return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
674
}
675
 
676
/* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
677
   given argument and NAME is the name of the representative device for
678
   the -mcpu argument's family.  Return NULL if -mcpu was not passed.  */
679
 
680
const char *
681
m68k_cpp_cpu_family (const char *prefix)
682
{
683
  if (!m68k_cpu_entry)
684
    return NULL;
685
  return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
686
}
687
 
688
/* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
689
   "interrupt_handler" attribute and interrupt_thread if FUNC has an
690
   "interrupt_thread" attribute.  Otherwise, return
691
   m68k_fk_normal_function.  */
692
 
693
enum m68k_function_kind
694
m68k_get_function_kind (tree func)
695
{
696
  tree a;
697
 
698
  gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
699
 
700
  a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
701
  if (a != NULL_TREE)
702
    return m68k_fk_interrupt_handler;
703
 
704
  a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
705
  if (a != NULL_TREE)
706
    return m68k_fk_interrupt_handler;
707
 
708
  a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
709
  if (a != NULL_TREE)
710
    return m68k_fk_interrupt_thread;
711
 
712
  return m68k_fk_normal_function;
713
}
714
 
715
/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
716
   struct attribute_spec.handler.  */
717
static tree
718
m68k_handle_fndecl_attribute (tree *node, tree name,
719
                              tree args ATTRIBUTE_UNUSED,
720
                              int flags ATTRIBUTE_UNUSED,
721
                              bool *no_add_attrs)
722
{
723
  if (TREE_CODE (*node) != FUNCTION_DECL)
724
    {
725
      warning (OPT_Wattributes, "%qE attribute only applies to functions",
726
               name);
727
      *no_add_attrs = true;
728
    }
729
 
730
  if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
731
    {
732
      error ("multiple interrupt attributes not allowed");
733
      *no_add_attrs = true;
734
    }
735
 
736
  if (!TARGET_FIDOA
737
      && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
738
    {
739
      error ("interrupt_thread is available only on fido");
740
      *no_add_attrs = true;
741
    }
742
 
743
  return NULL_TREE;
744
}
745
 
746
static void
747
m68k_compute_frame_layout (void)
748
{
749
  int regno, saved;
750
  unsigned int mask;
751
  enum m68k_function_kind func_kind =
752
    m68k_get_function_kind (current_function_decl);
753
  bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
754
  bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
755
 
756
  /* Only compute the frame once per function.
757
     Don't cache information until reload has been completed.  */
758
  if (current_frame.funcdef_no == current_function_funcdef_no
759
      && reload_completed)
760
    return;
761
 
762
  current_frame.size = (get_frame_size () + 3) & -4;
763
 
764
  mask = saved = 0;
765
 
766
  /* Interrupt thread does not need to save any register.  */
767
  if (!interrupt_thread)
768
    for (regno = 0; regno < 16; regno++)
769
      if (m68k_save_reg (regno, interrupt_handler))
770
        {
771
          mask |= 1 << (regno - D0_REG);
772
          saved++;
773
        }
774
  current_frame.offset = saved * 4;
775
  current_frame.reg_no = saved;
776
  current_frame.reg_mask = mask;
777
 
778
  current_frame.foffset = 0;
779
  mask = saved = 0;
780
  if (TARGET_HARD_FLOAT)
781
    {
782
      /* Interrupt thread does not need to save any register.  */
783
      if (!interrupt_thread)
784
        for (regno = 16; regno < 24; regno++)
785
          if (m68k_save_reg (regno, interrupt_handler))
786
            {
787
              mask |= 1 << (regno - FP0_REG);
788
              saved++;
789
            }
790
      current_frame.foffset = saved * TARGET_FP_REG_SIZE;
791
      current_frame.offset += current_frame.foffset;
792
    }
793
  current_frame.fpu_no = saved;
794
  current_frame.fpu_mask = mask;
795
 
796
  /* Remember what function this frame refers to.  */
797
  current_frame.funcdef_no = current_function_funcdef_no;
798
}
799
 
800
/* Worker function for TARGET_CAN_ELIMINATE.  */
801
 
802
bool
803
m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
804
{
805
  return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
806
}
807
 
808
HOST_WIDE_INT
809
m68k_initial_elimination_offset (int from, int to)
810
{
811
  int argptr_offset;
812
  /* The arg pointer points 8 bytes before the start of the arguments,
813
     as defined by FIRST_PARM_OFFSET.  This makes it coincident with the
814
     frame pointer in most frames.  */
815
  argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
816
  if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
817
    return argptr_offset;
818
 
819
  m68k_compute_frame_layout ();
820
 
821
  gcc_assert (to == STACK_POINTER_REGNUM);
822
  switch (from)
823
    {
824
    case ARG_POINTER_REGNUM:
825
      return current_frame.offset + current_frame.size - argptr_offset;
826
    case FRAME_POINTER_REGNUM:
827
      return current_frame.offset + current_frame.size;
828
    default:
829
      gcc_unreachable ();
830
    }
831
}
832
 
833
/* Refer to the array `regs_ever_live' to determine which registers
834
   to save; `regs_ever_live[I]' is nonzero if register number I
835
   is ever used in the function.  This function is responsible for
836
   knowing which registers should not be saved even if used.
837
   Return true if we need to save REGNO.  */
838
 
839
static bool
840
m68k_save_reg (unsigned int regno, bool interrupt_handler)
841
{
842
  if (flag_pic && regno == PIC_REG)
843
    {
844
      if (crtl->saves_all_registers)
845
        return true;
846
      if (crtl->uses_pic_offset_table)
847
        return true;
848
      /* Reload may introduce constant pool references into a function
849
         that thitherto didn't need a PIC register.  Note that the test
850
         above will not catch that case because we will only set
851
         crtl->uses_pic_offset_table when emitting
852
         the address reloads.  */
853
      if (crtl->uses_const_pool)
854
        return true;
855
    }
856
 
857
  if (crtl->calls_eh_return)
858
    {
859
      unsigned int i;
860
      for (i = 0; ; i++)
861
        {
862
          unsigned int test = EH_RETURN_DATA_REGNO (i);
863
          if (test == INVALID_REGNUM)
864
            break;
865
          if (test == regno)
866
            return true;
867
        }
868
    }
869
 
870
  /* Fixed regs we never touch.  */
871
  if (fixed_regs[regno])
872
    return false;
873
 
874
  /* The frame pointer (if it is such) is handled specially.  */
875
  if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
876
    return false;
877
 
878
  /* Interrupt handlers must also save call_used_regs
879
     if they are live or when calling nested functions.  */
880
  if (interrupt_handler)
881
    {
882
      if (df_regs_ever_live_p (regno))
883
        return true;
884
 
885
      if (!current_function_is_leaf && call_used_regs[regno])
886
        return true;
887
    }
888
 
889
  /* Never need to save registers that aren't touched.  */
890
  if (!df_regs_ever_live_p (regno))
891
    return false;
892
 
893
  /* Otherwise save everything that isn't call-clobbered.  */
894
  return !call_used_regs[regno];
895
}
896
 
897
/* Emit RTL for a MOVEM or FMOVEM instruction.  BASE + OFFSET represents
898
   the lowest memory address.  COUNT is the number of registers to be
899
   moved, with register REGNO + I being moved if bit I of MASK is set.
900
   STORE_P specifies the direction of the move and ADJUST_STACK_P says
901
   whether or not this is pre-decrement (if STORE_P) or post-increment
902
   (if !STORE_P) operation.  */
903
 
904
static rtx
905
m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
906
                 unsigned int count, unsigned int regno,
907
                 unsigned int mask, bool store_p, bool adjust_stack_p)
908
{
909
  int i;
910
  rtx body, addr, src, operands[2];
911
  enum machine_mode mode;
912
 
913
  body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
914
  mode = reg_raw_mode[regno];
915
  i = 0;
916
 
917
  if (adjust_stack_p)
918
    {
919
      src = plus_constant (base, (count
920
                                  * GET_MODE_SIZE (mode)
921
                                  * (HOST_WIDE_INT) (store_p ? -1 : 1)));
922
      XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
923
    }
924
 
925
  for (; mask != 0; mask >>= 1, regno++)
926
    if (mask & 1)
927
      {
928
        addr = plus_constant (base, offset);
929
        operands[!store_p] = gen_frame_mem (mode, addr);
930
        operands[store_p] = gen_rtx_REG (mode, regno);
931
        XVECEXP (body, 0, i++)
932
          = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
933
        offset += GET_MODE_SIZE (mode);
934
      }
935
  gcc_assert (i == XVECLEN (body, 0));
936
 
937
  return emit_insn (body);
938
}
939
 
940
/* Make INSN a frame-related instruction.  */
941
 
942
static void
943
m68k_set_frame_related (rtx insn)
944
{
945
  rtx body;
946
  int i;
947
 
948
  RTX_FRAME_RELATED_P (insn) = 1;
949
  body = PATTERN (insn);
950
  if (GET_CODE (body) == PARALLEL)
951
    for (i = 0; i < XVECLEN (body, 0); i++)
952
      RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
953
}
954
 
955
/* Emit RTL for the "prologue" define_expand.  */
956
 
957
void
958
m68k_expand_prologue (void)
959
{
960
  HOST_WIDE_INT fsize_with_regs;
961
  rtx limit, src, dest;
962
 
963
  m68k_compute_frame_layout ();
964
 
965
  if (flag_stack_usage_info)
966
    current_function_static_stack_size
967
      = current_frame.size + current_frame.offset;
968
 
969
  /* If the stack limit is a symbol, we can check it here,
970
     before actually allocating the space.  */
971
  if (crtl->limit_stack
972
      && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
973
    {
974
      limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
975
      if (!m68k_legitimate_constant_p (Pmode, limit))
976
        {
977
          emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
978
          limit = gen_rtx_REG (Pmode, D0_REG);
979
        }
980
      emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
981
                                            stack_pointer_rtx, limit),
982
                               stack_pointer_rtx, limit,
983
                               const1_rtx));
984
    }
985
 
986
  fsize_with_regs = current_frame.size;
987
  if (TARGET_COLDFIRE)
988
    {
989
      /* ColdFire's move multiple instructions do not allow pre-decrement
990
         addressing.  Add the size of movem saves to the initial stack
991
         allocation instead.  */
992
      if (current_frame.reg_no >= MIN_MOVEM_REGS)
993
        fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
994
      if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
995
        fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
996
    }
997
 
998
  if (frame_pointer_needed)
999
    {
1000
      if (fsize_with_regs == 0 && TUNE_68040)
1001
        {
1002
          /* On the 68040, two separate moves are faster than link.w 0.  */
1003
          dest = gen_frame_mem (Pmode,
1004
                                gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1005
          m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1006
          m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1007
                                                  stack_pointer_rtx));
1008
        }
1009
      else if (fsize_with_regs < 0x8000 || TARGET_68020)
1010
        m68k_set_frame_related
1011
          (emit_insn (gen_link (frame_pointer_rtx,
1012
                                GEN_INT (-4 - fsize_with_regs))));
1013
      else
1014
        {
1015
          m68k_set_frame_related
1016
            (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1017
          m68k_set_frame_related
1018
            (emit_insn (gen_addsi3 (stack_pointer_rtx,
1019
                                    stack_pointer_rtx,
1020
                                    GEN_INT (-fsize_with_regs))));
1021
        }
1022
 
1023
      /* If the frame pointer is needed, emit a special barrier that
1024
         will prevent the scheduler from moving stores to the frame
1025
         before the stack adjustment.  */
1026
      emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1027
    }
1028
  else if (fsize_with_regs != 0)
1029
    m68k_set_frame_related
1030
      (emit_insn (gen_addsi3 (stack_pointer_rtx,
1031
                              stack_pointer_rtx,
1032
                              GEN_INT (-fsize_with_regs))));
1033
 
1034
  if (current_frame.fpu_mask)
1035
    {
1036
      gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1037
      if (TARGET_68881)
1038
        m68k_set_frame_related
1039
          (m68k_emit_movem (stack_pointer_rtx,
1040
                            current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1041
                            current_frame.fpu_no, FP0_REG,
1042
                            current_frame.fpu_mask, true, true));
1043
      else
1044
        {
1045
          int offset;
1046
 
1047
          /* If we're using moveml to save the integer registers,
1048
             the stack pointer will point to the bottom of the moveml
1049
             save area.  Find the stack offset of the first FP register.  */
1050
          if (current_frame.reg_no < MIN_MOVEM_REGS)
1051
            offset = 0;
1052
          else
1053
            offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1054
          m68k_set_frame_related
1055
            (m68k_emit_movem (stack_pointer_rtx, offset,
1056
                              current_frame.fpu_no, FP0_REG,
1057
                              current_frame.fpu_mask, true, false));
1058
        }
1059
    }
1060
 
1061
  /* If the stack limit is not a symbol, check it here.
1062
     This has the disadvantage that it may be too late...  */
1063
  if (crtl->limit_stack)
1064
    {
1065
      if (REG_P (stack_limit_rtx))
1066
        emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1067
                                              stack_limit_rtx),
1068
                                 stack_pointer_rtx, stack_limit_rtx,
1069
                                 const1_rtx));
1070
 
1071
      else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1072
        warning (0, "stack limit expression is not supported");
1073
    }
1074
 
1075
  if (current_frame.reg_no < MIN_MOVEM_REGS)
1076
    {
1077
      /* Store each register separately in the same order moveml does.  */
1078
      int i;
1079
 
1080
      for (i = 16; i-- > 0; )
1081
        if (current_frame.reg_mask & (1 << i))
1082
          {
1083
            src = gen_rtx_REG (SImode, D0_REG + i);
1084
            dest = gen_frame_mem (SImode,
1085
                                  gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1086
            m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1087
          }
1088
    }
1089
  else
1090
    {
1091
      if (TARGET_COLDFIRE)
1092
        /* The required register save space has already been allocated.
1093
           The first register should be stored at (%sp).  */
1094
        m68k_set_frame_related
1095
          (m68k_emit_movem (stack_pointer_rtx, 0,
1096
                            current_frame.reg_no, D0_REG,
1097
                            current_frame.reg_mask, true, false));
1098
      else
1099
        m68k_set_frame_related
1100
          (m68k_emit_movem (stack_pointer_rtx,
1101
                            current_frame.reg_no * -GET_MODE_SIZE (SImode),
1102
                            current_frame.reg_no, D0_REG,
1103
                            current_frame.reg_mask, true, true));
1104
    }
1105
 
1106
  if (!TARGET_SEP_DATA
1107
      && crtl->uses_pic_offset_table)
1108
    emit_insn (gen_load_got (pic_offset_table_rtx));
1109
}
1110
 
1111
/* Return true if a simple (return) instruction is sufficient for this
1112
   instruction (i.e. if no epilogue is needed).  */
1113
 
1114
bool
1115
m68k_use_return_insn (void)
1116
{
1117
  if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1118
    return false;
1119
 
1120
  m68k_compute_frame_layout ();
1121
  return current_frame.offset == 0;
1122
}
1123
 
1124
/* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1125
   SIBCALL_P says which.
1126
 
1127
   The function epilogue should not depend on the current stack pointer!
1128
   It should use the frame pointer only, if there is a frame pointer.
1129
   This is mandatory because of alloca; we also take advantage of it to
1130
   omit stack adjustments before returning.  */
1131
 
1132
void
1133
m68k_expand_epilogue (bool sibcall_p)
1134
{
1135
  HOST_WIDE_INT fsize, fsize_with_regs;
1136
  bool big, restore_from_sp;
1137
 
1138
  m68k_compute_frame_layout ();
1139
 
1140
  fsize = current_frame.size;
1141
  big = false;
1142
  restore_from_sp = false;
1143
 
1144
  /* FIXME : current_function_is_leaf below is too strong.
1145
     What we really need to know there is if there could be pending
1146
     stack adjustment needed at that point.  */
1147
  restore_from_sp = (!frame_pointer_needed
1148
                     || (!cfun->calls_alloca
1149
                         && current_function_is_leaf));
1150
 
1151
  /* fsize_with_regs is the size we need to adjust the sp when
1152
     popping the frame.  */
1153
  fsize_with_regs = fsize;
1154
  if (TARGET_COLDFIRE && restore_from_sp)
1155
    {
1156
      /* ColdFire's move multiple instructions do not allow post-increment
1157
         addressing.  Add the size of movem loads to the final deallocation
1158
         instead.  */
1159
      if (current_frame.reg_no >= MIN_MOVEM_REGS)
1160
        fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1161
      if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1162
        fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1163
    }
1164
 
1165
  if (current_frame.offset + fsize >= 0x8000
1166
      && !restore_from_sp
1167
      && (current_frame.reg_mask || current_frame.fpu_mask))
1168
    {
1169
      if (TARGET_COLDFIRE
1170
          && (current_frame.reg_no >= MIN_MOVEM_REGS
1171
              || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1172
        {
1173
          /* ColdFire's move multiple instructions do not support the
1174
             (d8,Ax,Xi) addressing mode, so we're as well using a normal
1175
             stack-based restore.  */
1176
          emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1177
                          GEN_INT (-(current_frame.offset + fsize)));
1178
          emit_insn (gen_addsi3 (stack_pointer_rtx,
1179
                                 gen_rtx_REG (Pmode, A1_REG),
1180
                                 frame_pointer_rtx));
1181
          restore_from_sp = true;
1182
        }
1183
      else
1184
        {
1185
          emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1186
          fsize = 0;
1187
          big = true;
1188
        }
1189
    }
1190
 
1191
  if (current_frame.reg_no < MIN_MOVEM_REGS)
1192
    {
1193
      /* Restore each register separately in the same order moveml does.  */
1194
      int i;
1195
      HOST_WIDE_INT offset;
1196
 
1197
      offset = current_frame.offset + fsize;
1198
      for (i = 0; i < 16; i++)
1199
        if (current_frame.reg_mask & (1 << i))
1200
          {
1201
            rtx addr;
1202
 
1203
            if (big)
1204
              {
1205
                /* Generate the address -OFFSET(%fp,%a1.l).  */
1206
                addr = gen_rtx_REG (Pmode, A1_REG);
1207
                addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1208
                addr = plus_constant (addr, -offset);
1209
              }
1210
            else if (restore_from_sp)
1211
              addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1212
            else
1213
              addr = plus_constant (frame_pointer_rtx, -offset);
1214
            emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1215
                            gen_frame_mem (SImode, addr));
1216
            offset -= GET_MODE_SIZE (SImode);
1217
          }
1218
    }
1219
  else if (current_frame.reg_mask)
1220
    {
1221
      if (big)
1222
        m68k_emit_movem (gen_rtx_PLUS (Pmode,
1223
                                       gen_rtx_REG (Pmode, A1_REG),
1224
                                       frame_pointer_rtx),
1225
                         -(current_frame.offset + fsize),
1226
                         current_frame.reg_no, D0_REG,
1227
                         current_frame.reg_mask, false, false);
1228
      else if (restore_from_sp)
1229
        m68k_emit_movem (stack_pointer_rtx, 0,
1230
                         current_frame.reg_no, D0_REG,
1231
                         current_frame.reg_mask, false,
1232
                         !TARGET_COLDFIRE);
1233
      else
1234
        m68k_emit_movem (frame_pointer_rtx,
1235
                         -(current_frame.offset + fsize),
1236
                         current_frame.reg_no, D0_REG,
1237
                         current_frame.reg_mask, false, false);
1238
    }
1239
 
1240
  if (current_frame.fpu_no > 0)
1241
    {
1242
      if (big)
1243
        m68k_emit_movem (gen_rtx_PLUS (Pmode,
1244
                                       gen_rtx_REG (Pmode, A1_REG),
1245
                                       frame_pointer_rtx),
1246
                         -(current_frame.foffset + fsize),
1247
                         current_frame.fpu_no, FP0_REG,
1248
                         current_frame.fpu_mask, false, false);
1249
      else if (restore_from_sp)
1250
        {
1251
          if (TARGET_COLDFIRE)
1252
            {
1253
              int offset;
1254
 
1255
              /* If we used moveml to restore the integer registers, the
1256
                 stack pointer will still point to the bottom of the moveml
1257
                 save area.  Find the stack offset of the first FP
1258
                 register.  */
1259
              if (current_frame.reg_no < MIN_MOVEM_REGS)
1260
                offset = 0;
1261
              else
1262
                offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1263
              m68k_emit_movem (stack_pointer_rtx, offset,
1264
                               current_frame.fpu_no, FP0_REG,
1265
                               current_frame.fpu_mask, false, false);
1266
            }
1267
          else
1268
            m68k_emit_movem (stack_pointer_rtx, 0,
1269
                             current_frame.fpu_no, FP0_REG,
1270
                             current_frame.fpu_mask, false, true);
1271
        }
1272
      else
1273
        m68k_emit_movem (frame_pointer_rtx,
1274
                         -(current_frame.foffset + fsize),
1275
                         current_frame.fpu_no, FP0_REG,
1276
                         current_frame.fpu_mask, false, false);
1277
    }
1278
 
1279
  if (frame_pointer_needed)
1280
    emit_insn (gen_unlink (frame_pointer_rtx));
1281
  else if (fsize_with_regs)
1282
    emit_insn (gen_addsi3 (stack_pointer_rtx,
1283
                           stack_pointer_rtx,
1284
                           GEN_INT (fsize_with_regs)));
1285
 
1286
  if (crtl->calls_eh_return)
1287
    emit_insn (gen_addsi3 (stack_pointer_rtx,
1288
                           stack_pointer_rtx,
1289
                           EH_RETURN_STACKADJ_RTX));
1290
 
1291
  if (!sibcall_p)
1292
    emit_jump_insn (ret_rtx);
1293
}
1294
 
1295
/* Return true if X is a valid comparison operator for the dbcc
1296
   instruction.
1297
 
1298
   Note it rejects floating point comparison operators.
1299
   (In the future we could use Fdbcc).
1300
 
1301
   It also rejects some comparisons when CC_NO_OVERFLOW is set.  */
1302
 
1303
int
1304
valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
1305
{
1306
  switch (GET_CODE (x))
1307
    {
1308
      case EQ: case NE: case GTU: case LTU:
1309
      case GEU: case LEU:
1310
        return 1;
1311
 
1312
      /* Reject some when CC_NO_OVERFLOW is set.  This may be over
1313
         conservative */
1314
      case GT: case LT: case GE: case LE:
1315
        return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1316
      default:
1317
        return 0;
1318
    }
1319
}
1320
 
1321
/* Return nonzero if flags are currently in the 68881 flag register.  */
1322
int
1323
flags_in_68881 (void)
1324
{
1325
  /* We could add support for these in the future */
1326
  return cc_status.flags & CC_IN_68881;
1327
}
1328
 
1329
/* Return true if PARALLEL contains register REGNO.  */
1330
static bool
1331
m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1332
{
1333
  int i;
1334
 
1335
  if (REG_P (parallel) && REGNO (parallel) == regno)
1336
    return true;
1337
 
1338
  if (GET_CODE (parallel) != PARALLEL)
1339
    return false;
1340
 
1341
  for (i = 0; i < XVECLEN (parallel, 0); ++i)
1342
    {
1343
      const_rtx x;
1344
 
1345
      x = XEXP (XVECEXP (parallel, 0, i), 0);
1346
      if (REG_P (x) && REGNO (x) == regno)
1347
        return true;
1348
    }
1349
 
1350
  return false;
1351
}
1352
 
1353
/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P.  */
1354
 
1355
static bool
1356
m68k_ok_for_sibcall_p (tree decl, tree exp)
1357
{
1358
  enum m68k_function_kind kind;
1359
 
1360
  /* We cannot use sibcalls for nested functions because we use the
1361
     static chain register for indirect calls.  */
1362
  if (CALL_EXPR_STATIC_CHAIN (exp))
1363
    return false;
1364
 
1365
  if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1366
    {
1367
      /* Check that the return value locations are the same.  For
1368
         example that we aren't returning a value from the sibling in
1369
         a D0 register but then need to transfer it to a A0 register.  */
1370
      rtx cfun_value;
1371
      rtx call_value;
1372
 
1373
      cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1374
                                   cfun->decl);
1375
      call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1376
 
1377
      /* Check that the values are equal or that the result the callee
1378
         function returns is superset of what the current function returns.  */
1379
      if (!(rtx_equal_p (cfun_value, call_value)
1380
            || (REG_P (cfun_value)
1381
                && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1382
        return false;
1383
    }
1384
 
1385
  kind = m68k_get_function_kind (current_function_decl);
1386
  if (kind == m68k_fk_normal_function)
1387
    /* We can always sibcall from a normal function, because it's
1388
       undefined if it is calling an interrupt function.  */
1389
    return true;
1390
 
1391
  /* Otherwise we can only sibcall if the function kind is known to be
1392
     the same.  */
1393
  if (decl && m68k_get_function_kind (decl) == kind)
1394
    return true;
1395
 
1396
  return false;
1397
}
1398
 
1399
/* On the m68k all args are always pushed.  */
1400
 
1401
static rtx
1402
m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
1403
                   enum machine_mode mode ATTRIBUTE_UNUSED,
1404
                   const_tree type ATTRIBUTE_UNUSED,
1405
                   bool named ATTRIBUTE_UNUSED)
1406
{
1407
  return NULL_RTX;
1408
}
1409
 
1410
static void
1411
m68k_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
1412
                           const_tree type, bool named ATTRIBUTE_UNUSED)
1413
{
1414
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1415
 
1416
  *cum += (mode != BLKmode
1417
           ? (GET_MODE_SIZE (mode) + 3) & ~3
1418
           : (int_size_in_bytes (type) + 3) & ~3);
1419
}
1420
 
1421
/* Convert X to a legitimate function call memory reference and return the
1422
   result.  */
1423
 
1424
rtx
1425
m68k_legitimize_call_address (rtx x)
1426
{
1427
  gcc_assert (MEM_P (x));
1428
  if (call_operand (XEXP (x, 0), VOIDmode))
1429
    return x;
1430
  return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1431
}
1432
 
1433
/* Likewise for sibling calls.  */
1434
 
1435
rtx
1436
m68k_legitimize_sibcall_address (rtx x)
1437
{
1438
  gcc_assert (MEM_P (x));
1439
  if (sibcall_operand (XEXP (x, 0), VOIDmode))
1440
    return x;
1441
 
1442
  emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1443
  return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1444
}
1445
 
1446
/* Convert X to a legitimate address and return it if successful.  Otherwise
1447
   return X.
1448
 
1449
   For the 68000, we handle X+REG by loading X into a register R and
1450
   using R+REG.  R will go in an address reg and indexing will be used.
1451
   However, if REG is a broken-out memory address or multiplication,
1452
   nothing needs to be done because REG can certainly go in an address reg.  */
1453
 
1454
static rtx
1455
m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
1456
{
1457
  if (m68k_tls_symbol_p (x))
1458
    return m68k_legitimize_tls_address (x);
1459
 
1460
  if (GET_CODE (x) == PLUS)
1461
    {
1462
      int ch = (x) != (oldx);
1463
      int copied = 0;
1464
 
1465
#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1466
 
1467
      if (GET_CODE (XEXP (x, 0)) == MULT)
1468
        {
1469
          COPY_ONCE (x);
1470
          XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1471
        }
1472
      if (GET_CODE (XEXP (x, 1)) == MULT)
1473
        {
1474
          COPY_ONCE (x);
1475
          XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1476
        }
1477
      if (ch)
1478
        {
1479
          if (GET_CODE (XEXP (x, 1)) == REG
1480
              && GET_CODE (XEXP (x, 0)) == REG)
1481
            {
1482
              if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1483
                {
1484
                  COPY_ONCE (x);
1485
                  x = force_operand (x, 0);
1486
                }
1487
              return x;
1488
            }
1489
          if (memory_address_p (mode, x))
1490
            return x;
1491
        }
1492
      if (GET_CODE (XEXP (x, 0)) == REG
1493
          || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1494
              && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1495
              && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1496
        {
1497
          rtx temp = gen_reg_rtx (Pmode);
1498
          rtx val = force_operand (XEXP (x, 1), 0);
1499
          emit_move_insn (temp, val);
1500
          COPY_ONCE (x);
1501
          XEXP (x, 1) = temp;
1502
          if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1503
              && GET_CODE (XEXP (x, 0)) == REG)
1504
            x = force_operand (x, 0);
1505
        }
1506
      else if (GET_CODE (XEXP (x, 1)) == REG
1507
               || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1508
                   && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1509
                   && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1510
        {
1511
          rtx temp = gen_reg_rtx (Pmode);
1512
          rtx val = force_operand (XEXP (x, 0), 0);
1513
          emit_move_insn (temp, val);
1514
          COPY_ONCE (x);
1515
          XEXP (x, 0) = temp;
1516
          if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1517
              && GET_CODE (XEXP (x, 1)) == REG)
1518
            x = force_operand (x, 0);
1519
        }
1520
    }
1521
 
1522
  return x;
1523
}
1524
 
1525
 
1526
/* Output a dbCC; jCC sequence.  Note we do not handle the
1527
   floating point version of this sequence (Fdbcc).  We also
1528
   do not handle alternative conditions when CC_NO_OVERFLOW is
1529
   set.  It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1530
   kick those out before we get here.  */
1531
 
1532
void
1533
output_dbcc_and_branch (rtx *operands)
1534
{
1535
  switch (GET_CODE (operands[3]))
1536
    {
1537
      case EQ:
1538
        output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1539
        break;
1540
 
1541
      case NE:
1542
        output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1543
        break;
1544
 
1545
      case GT:
1546
        output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1547
        break;
1548
 
1549
      case GTU:
1550
        output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1551
        break;
1552
 
1553
      case LT:
1554
        output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1555
        break;
1556
 
1557
      case LTU:
1558
        output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1559
        break;
1560
 
1561
      case GE:
1562
        output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1563
        break;
1564
 
1565
      case GEU:
1566
        output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1567
        break;
1568
 
1569
      case LE:
1570
        output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1571
        break;
1572
 
1573
      case LEU:
1574
        output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1575
        break;
1576
 
1577
      default:
1578
        gcc_unreachable ();
1579
    }
1580
 
1581
  /* If the decrement is to be done in SImode, then we have
1582
     to compensate for the fact that dbcc decrements in HImode.  */
1583
  switch (GET_MODE (operands[0]))
1584
    {
1585
      case SImode:
1586
        output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1587
        break;
1588
 
1589
      case HImode:
1590
        break;
1591
 
1592
      default:
1593
        gcc_unreachable ();
1594
    }
1595
}
1596
 
1597
const char *
1598
output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1599
{
1600
  rtx loperands[7];
1601
  enum rtx_code op_code = GET_CODE (op);
1602
 
1603
  /* This does not produce a useful cc.  */
1604
  CC_STATUS_INIT;
1605
 
1606
  /* The m68k cmp.l instruction requires operand1 to be a reg as used
1607
     below.  Swap the operands and change the op if these requirements
1608
     are not fulfilled.  */
1609
  if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1610
    {
1611
      rtx tmp = operand1;
1612
 
1613
      operand1 = operand2;
1614
      operand2 = tmp;
1615
      op_code = swap_condition (op_code);
1616
    }
1617
  loperands[0] = operand1;
1618
  if (GET_CODE (operand1) == REG)
1619
    loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1620
  else
1621
    loperands[1] = adjust_address (operand1, SImode, 4);
1622
  if (operand2 != const0_rtx)
1623
    {
1624
      loperands[2] = operand2;
1625
      if (GET_CODE (operand2) == REG)
1626
        loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1627
      else
1628
        loperands[3] = adjust_address (operand2, SImode, 4);
1629
    }
1630
  loperands[4] = gen_label_rtx ();
1631
  if (operand2 != const0_rtx)
1632
    output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1633
  else
1634
    {
1635
      if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1636
        output_asm_insn ("tst%.l %0", loperands);
1637
      else
1638
        output_asm_insn ("cmp%.w #0,%0", loperands);
1639
 
1640
      output_asm_insn ("jne %l4", loperands);
1641
 
1642
      if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1643
        output_asm_insn ("tst%.l %1", loperands);
1644
      else
1645
        output_asm_insn ("cmp%.w #0,%1", loperands);
1646
    }
1647
 
1648
  loperands[5] = dest;
1649
 
1650
  switch (op_code)
1651
    {
1652
      case EQ:
1653
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1654
                                           CODE_LABEL_NUMBER (loperands[4]));
1655
        output_asm_insn ("seq %5", loperands);
1656
        break;
1657
 
1658
      case NE:
1659
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1660
                                           CODE_LABEL_NUMBER (loperands[4]));
1661
        output_asm_insn ("sne %5", loperands);
1662
        break;
1663
 
1664
      case GT:
1665
        loperands[6] = gen_label_rtx ();
1666
        output_asm_insn ("shi %5\n\tjra %l6", loperands);
1667
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1668
                                           CODE_LABEL_NUMBER (loperands[4]));
1669
        output_asm_insn ("sgt %5", loperands);
1670
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1671
                                           CODE_LABEL_NUMBER (loperands[6]));
1672
        break;
1673
 
1674
      case GTU:
1675
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1676
                                           CODE_LABEL_NUMBER (loperands[4]));
1677
        output_asm_insn ("shi %5", loperands);
1678
        break;
1679
 
1680
      case LT:
1681
        loperands[6] = gen_label_rtx ();
1682
        output_asm_insn ("scs %5\n\tjra %l6", loperands);
1683
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1684
                                           CODE_LABEL_NUMBER (loperands[4]));
1685
        output_asm_insn ("slt %5", loperands);
1686
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1687
                                           CODE_LABEL_NUMBER (loperands[6]));
1688
        break;
1689
 
1690
      case LTU:
1691
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1692
                                           CODE_LABEL_NUMBER (loperands[4]));
1693
        output_asm_insn ("scs %5", loperands);
1694
        break;
1695
 
1696
      case GE:
1697
        loperands[6] = gen_label_rtx ();
1698
        output_asm_insn ("scc %5\n\tjra %l6", loperands);
1699
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1700
                                           CODE_LABEL_NUMBER (loperands[4]));
1701
        output_asm_insn ("sge %5", loperands);
1702
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1703
                                           CODE_LABEL_NUMBER (loperands[6]));
1704
        break;
1705
 
1706
      case GEU:
1707
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1708
                                           CODE_LABEL_NUMBER (loperands[4]));
1709
        output_asm_insn ("scc %5", loperands);
1710
        break;
1711
 
1712
      case LE:
1713
        loperands[6] = gen_label_rtx ();
1714
        output_asm_insn ("sls %5\n\tjra %l6", loperands);
1715
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1716
                                           CODE_LABEL_NUMBER (loperands[4]));
1717
        output_asm_insn ("sle %5", loperands);
1718
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1719
                                           CODE_LABEL_NUMBER (loperands[6]));
1720
        break;
1721
 
1722
      case LEU:
1723
        (*targetm.asm_out.internal_label) (asm_out_file, "L",
1724
                                           CODE_LABEL_NUMBER (loperands[4]));
1725
        output_asm_insn ("sls %5", loperands);
1726
        break;
1727
 
1728
      default:
1729
        gcc_unreachable ();
1730
    }
1731
  return "";
1732
}
1733
 
1734
const char *
1735
output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
1736
{
1737
  operands[0] = countop;
1738
  operands[1] = dataop;
1739
 
1740
  if (GET_CODE (countop) == CONST_INT)
1741
    {
1742
      register int count = INTVAL (countop);
1743
      /* If COUNT is bigger than size of storage unit in use,
1744
         advance to the containing unit of same size.  */
1745
      if (count > signpos)
1746
        {
1747
          int offset = (count & ~signpos) / 8;
1748
          count = count & signpos;
1749
          operands[1] = dataop = adjust_address (dataop, QImode, offset);
1750
        }
1751
      if (count == signpos)
1752
        cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1753
      else
1754
        cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1755
 
1756
      /* These three statements used to use next_insns_test_no...
1757
         but it appears that this should do the same job.  */
1758
      if (count == 31
1759
          && next_insn_tests_no_inequality (insn))
1760
        return "tst%.l %1";
1761
      if (count == 15
1762
          && next_insn_tests_no_inequality (insn))
1763
        return "tst%.w %1";
1764
      if (count == 7
1765
          && next_insn_tests_no_inequality (insn))
1766
        return "tst%.b %1";
1767
      /* Try to use `movew to ccr' followed by the appropriate branch insn.
1768
         On some m68k variants unfortunately that's slower than btst.
1769
         On 68000 and higher, that should also work for all HImode operands. */
1770
      if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1771
        {
1772
          if (count == 3 && DATA_REG_P (operands[1])
1773
              && next_insn_tests_no_inequality (insn))
1774
            {
1775
            cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1776
            return "move%.w %1,%%ccr";
1777
            }
1778
          if (count == 2 && DATA_REG_P (operands[1])
1779
              && next_insn_tests_no_inequality (insn))
1780
            {
1781
            cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1782
            return "move%.w %1,%%ccr";
1783
            }
1784
          /* count == 1 followed by bvc/bvs and
1785
             count == 0 followed by bcc/bcs are also possible, but need
1786
             m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1787
        }
1788
 
1789
      cc_status.flags = CC_NOT_NEGATIVE;
1790
    }
1791
  return "btst %0,%1";
1792
}
1793
 
1794
/* Return true if X is a legitimate base register.  STRICT_P says
1795
   whether we need strict checking.  */
1796
 
1797
bool
1798
m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1799
{
1800
  /* Allow SUBREG everywhere we allow REG.  This results in better code.  */
1801
  if (!strict_p && GET_CODE (x) == SUBREG)
1802
    x = SUBREG_REG (x);
1803
 
1804
  return (REG_P (x)
1805
          && (strict_p
1806
              ? REGNO_OK_FOR_BASE_P (REGNO (x))
1807
              : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1808
}
1809
 
1810
/* Return true if X is a legitimate index register.  STRICT_P says
1811
   whether we need strict checking.  */
1812
 
1813
bool
1814
m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1815
{
1816
  if (!strict_p && GET_CODE (x) == SUBREG)
1817
    x = SUBREG_REG (x);
1818
 
1819
  return (REG_P (x)
1820
          && (strict_p
1821
              ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1822
              : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1823
}
1824
 
1825
/* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1826
   (bd,An,Xn) addressing mode.  Fill in the INDEX and SCALE fields of
1827
   ADDRESS if so.  STRICT_P says whether we need strict checking.  */
1828
 
1829
static bool
1830
m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1831
{
1832
  int scale;
1833
 
1834
  /* Check for a scale factor.  */
1835
  scale = 1;
1836
  if ((TARGET_68020 || TARGET_COLDFIRE)
1837
      && GET_CODE (x) == MULT
1838
      && GET_CODE (XEXP (x, 1)) == CONST_INT
1839
      && (INTVAL (XEXP (x, 1)) == 2
1840
          || INTVAL (XEXP (x, 1)) == 4
1841
          || (INTVAL (XEXP (x, 1)) == 8
1842
              && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1843
    {
1844
      scale = INTVAL (XEXP (x, 1));
1845
      x = XEXP (x, 0);
1846
    }
1847
 
1848
  /* Check for a word extension.  */
1849
  if (!TARGET_COLDFIRE
1850
      && GET_CODE (x) == SIGN_EXTEND
1851
      && GET_MODE (XEXP (x, 0)) == HImode)
1852
    x = XEXP (x, 0);
1853
 
1854
  if (m68k_legitimate_index_reg_p (x, strict_p))
1855
    {
1856
      address->scale = scale;
1857
      address->index = x;
1858
      return true;
1859
    }
1860
 
1861
  return false;
1862
}
1863
 
1864
/* Return true if X is an illegitimate symbolic constant.  */
1865
 
1866
bool
1867
m68k_illegitimate_symbolic_constant_p (rtx x)
1868
{
1869
  rtx base, offset;
1870
 
1871
  if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1872
    {
1873
      split_const (x, &base, &offset);
1874
      if (GET_CODE (base) == SYMBOL_REF
1875
          && !offset_within_block_p (base, INTVAL (offset)))
1876
        return true;
1877
    }
1878
  return m68k_tls_reference_p (x, false);
1879
}
1880
 
1881
/* Implement TARGET_CANNOT_FORCE_CONST_MEM.  */
1882
 
1883
static bool
1884
m68k_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1885
{
1886
  return m68k_illegitimate_symbolic_constant_p (x);
1887
}
1888
 
1889
/* Return true if X is a legitimate constant address that can reach
1890
   bytes in the range [X, X + REACH).  STRICT_P says whether we need
1891
   strict checking.  */
1892
 
1893
static bool
1894
m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1895
{
1896
  rtx base, offset;
1897
 
1898
  if (!CONSTANT_ADDRESS_P (x))
1899
    return false;
1900
 
1901
  if (flag_pic
1902
      && !(strict_p && TARGET_PCREL)
1903
      && symbolic_operand (x, VOIDmode))
1904
    return false;
1905
 
1906
  if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1907
    {
1908
      split_const (x, &base, &offset);
1909
      if (GET_CODE (base) == SYMBOL_REF
1910
          && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1911
        return false;
1912
    }
1913
 
1914
  return !m68k_tls_reference_p (x, false);
1915
}
1916
 
1917
/* Return true if X is a LABEL_REF for a jump table.  Assume that unplaced
1918
   labels will become jump tables.  */
1919
 
1920
static bool
1921
m68k_jump_table_ref_p (rtx x)
1922
{
1923
  if (GET_CODE (x) != LABEL_REF)
1924
    return false;
1925
 
1926
  x = XEXP (x, 0);
1927
  if (!NEXT_INSN (x) && !PREV_INSN (x))
1928
    return true;
1929
 
1930
  x = next_nonnote_insn (x);
1931
  return x && JUMP_TABLE_DATA_P (x);
1932
}
1933
 
1934
/* Return true if X is a legitimate address for values of mode MODE.
1935
   STRICT_P says whether strict checking is needed.  If the address
1936
   is valid, describe its components in *ADDRESS.  */
1937
 
1938
static bool
1939
m68k_decompose_address (enum machine_mode mode, rtx x,
1940
                        bool strict_p, struct m68k_address *address)
1941
{
1942
  unsigned int reach;
1943
 
1944
  memset (address, 0, sizeof (*address));
1945
 
1946
  if (mode == BLKmode)
1947
    reach = 1;
1948
  else
1949
    reach = GET_MODE_SIZE (mode);
1950
 
1951
  /* Check for (An) (mode 2).  */
1952
  if (m68k_legitimate_base_reg_p (x, strict_p))
1953
    {
1954
      address->base = x;
1955
      return true;
1956
    }
1957
 
1958
  /* Check for -(An) and (An)+ (modes 3 and 4).  */
1959
  if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1960
      && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1961
    {
1962
      address->code = GET_CODE (x);
1963
      address->base = XEXP (x, 0);
1964
      return true;
1965
    }
1966
 
1967
  /* Check for (d16,An) (mode 5).  */
1968
  if (GET_CODE (x) == PLUS
1969
      && GET_CODE (XEXP (x, 1)) == CONST_INT
1970
      && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1971
      && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1972
    {
1973
      address->base = XEXP (x, 0);
1974
      address->offset = XEXP (x, 1);
1975
      return true;
1976
    }
1977
 
1978
  /* Check for GOT loads.  These are (bd,An,Xn) addresses if
1979
     TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1980
     addresses.  */
1981
  if (GET_CODE (x) == PLUS
1982
      && XEXP (x, 0) == pic_offset_table_rtx)
1983
    {
1984
      /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
1985
         they are invalid in this context.  */
1986
      if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
1987
        {
1988
          address->base = XEXP (x, 0);
1989
          address->offset = XEXP (x, 1);
1990
          return true;
1991
        }
1992
    }
1993
 
1994
  /* The ColdFire FPU only accepts addressing modes 2-5.  */
1995
  if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1996
    return false;
1997
 
1998
  /* Check for (xxx).w and (xxx).l.  Also, in the TARGET_PCREL case,
1999
     check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2000
     All these modes are variations of mode 7.  */
2001
  if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2002
    {
2003
      address->offset = x;
2004
      return true;
2005
    }
2006
 
2007
  /* Check for (d8,PC,Xn), a mode 7 form.  This case is needed for
2008
     tablejumps.
2009
 
2010
     ??? do_tablejump creates these addresses before placing the target
2011
     label, so we have to assume that unplaced labels are jump table
2012
     references.  It seems unlikely that we would ever generate indexed
2013
     accesses to unplaced labels in other cases.  */
2014
  if (GET_CODE (x) == PLUS
2015
      && m68k_jump_table_ref_p (XEXP (x, 1))
2016
      && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2017
    {
2018
      address->offset = XEXP (x, 1);
2019
      return true;
2020
    }
2021
 
2022
  /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2023
     (bd,An,Xn.SIZE*SCALE) addresses.  */
2024
 
2025
  if (TARGET_68020)
2026
    {
2027
      /* Check for a nonzero base displacement.  */
2028
      if (GET_CODE (x) == PLUS
2029
          && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2030
        {
2031
          address->offset = XEXP (x, 1);
2032
          x = XEXP (x, 0);
2033
        }
2034
 
2035
      /* Check for a suppressed index register.  */
2036
      if (m68k_legitimate_base_reg_p (x, strict_p))
2037
        {
2038
          address->base = x;
2039
          return true;
2040
        }
2041
 
2042
      /* Check for a suppressed base register.  Do not allow this case
2043
         for non-symbolic offsets as it effectively gives gcc freedom
2044
         to treat data registers as base registers, which can generate
2045
         worse code.  */
2046
      if (address->offset
2047
          && symbolic_operand (address->offset, VOIDmode)
2048
          && m68k_decompose_index (x, strict_p, address))
2049
        return true;
2050
    }
2051
  else
2052
    {
2053
      /* Check for a nonzero base displacement.  */
2054
      if (GET_CODE (x) == PLUS
2055
          && GET_CODE (XEXP (x, 1)) == CONST_INT
2056
          && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2057
        {
2058
          address->offset = XEXP (x, 1);
2059
          x = XEXP (x, 0);
2060
        }
2061
    }
2062
 
2063
  /* We now expect the sum of a base and an index.  */
2064
  if (GET_CODE (x) == PLUS)
2065
    {
2066
      if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2067
          && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2068
        {
2069
          address->base = XEXP (x, 0);
2070
          return true;
2071
        }
2072
 
2073
      if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2074
          && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2075
        {
2076
          address->base = XEXP (x, 1);
2077
          return true;
2078
        }
2079
    }
2080
  return false;
2081
}
2082
 
2083
/* Return true if X is a legitimate address for values of mode MODE.
2084
   STRICT_P says whether strict checking is needed.  */
2085
 
2086
bool
2087
m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2088
{
2089
  struct m68k_address address;
2090
 
2091
  return m68k_decompose_address (mode, x, strict_p, &address);
2092
}
2093
 
2094
/* Return true if X is a memory, describing its address in ADDRESS if so.
2095
   Apply strict checking if called during or after reload.  */
2096
 
2097
static bool
2098
m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2099
{
2100
  return (MEM_P (x)
2101
          && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2102
                                     reload_in_progress || reload_completed,
2103
                                     address));
2104
}
2105
 
2106
/* Implement TARGET_LEGITIMATE_CONSTANT_P.  */
2107
 
2108
bool
2109
m68k_legitimate_constant_p (enum machine_mode mode, rtx x)
2110
{
2111
  return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2112
}
2113
 
2114
/* Return true if X matches the 'Q' constraint.  It must be a memory
2115
   with a base address and no constant offset or index.  */
2116
 
2117
bool
2118
m68k_matches_q_p (rtx x)
2119
{
2120
  struct m68k_address address;
2121
 
2122
  return (m68k_legitimate_mem_p (x, &address)
2123
          && address.code == UNKNOWN
2124
          && address.base
2125
          && !address.offset
2126
          && !address.index);
2127
}
2128
 
2129
/* Return true if X matches the 'U' constraint.  It must be a base address
2130
   with a constant offset and no index.  */
2131
 
2132
bool
2133
m68k_matches_u_p (rtx x)
2134
{
2135
  struct m68k_address address;
2136
 
2137
  return (m68k_legitimate_mem_p (x, &address)
2138
          && address.code == UNKNOWN
2139
          && address.base
2140
          && address.offset
2141
          && !address.index);
2142
}
2143
 
2144
/* Return GOT pointer.  */
2145
 
2146
static rtx
2147
m68k_get_gp (void)
2148
{
2149
  if (pic_offset_table_rtx == NULL_RTX)
2150
    pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2151
 
2152
  crtl->uses_pic_offset_table = 1;
2153
 
2154
  return pic_offset_table_rtx;
2155
}
2156
 
2157
/* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2158
   wrappers.  */
2159
enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2160
                  RELOC_TLSIE, RELOC_TLSLE };
2161
 
2162
#define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2163
 
2164
/* Wrap symbol X into unspec representing relocation RELOC.
2165
   BASE_REG - register that should be added to the result.
2166
   TEMP_REG - if non-null, temporary register.  */
2167
 
2168
static rtx
2169
m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2170
{
2171
  bool use_x_p;
2172
 
2173
  use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2174
 
2175
  if (TARGET_COLDFIRE && use_x_p)
2176
    /* When compiling with -mx{got, tls} switch the code will look like this:
2177
 
2178
       move.l <X>@<RELOC>,<TEMP_REG>
2179
       add.l <BASE_REG>,<TEMP_REG>  */
2180
    {
2181
      /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2182
         to put @RELOC after reference.  */
2183
      x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2184
                          UNSPEC_RELOC32);
2185
      x = gen_rtx_CONST (Pmode, x);
2186
 
2187
      if (temp_reg == NULL)
2188
        {
2189
          gcc_assert (can_create_pseudo_p ());
2190
          temp_reg = gen_reg_rtx (Pmode);
2191
        }
2192
 
2193
      emit_move_insn (temp_reg, x);
2194
      emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2195
      x = temp_reg;
2196
    }
2197
  else
2198
    {
2199
      x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2200
                          UNSPEC_RELOC16);
2201
      x = gen_rtx_CONST (Pmode, x);
2202
 
2203
      x = gen_rtx_PLUS (Pmode, base_reg, x);
2204
    }
2205
 
2206
  return x;
2207
}
2208
 
2209
/* Helper for m68k_unwrap_symbol.
2210
   Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2211
   sets *RELOC_PTR to relocation type for the symbol.  */
2212
 
2213
static rtx
2214
m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2215
                      enum m68k_reloc *reloc_ptr)
2216
{
2217
  if (GET_CODE (orig) == CONST)
2218
    {
2219
      rtx x;
2220
      enum m68k_reloc dummy;
2221
 
2222
      x = XEXP (orig, 0);
2223
 
2224
      if (reloc_ptr == NULL)
2225
        reloc_ptr = &dummy;
2226
 
2227
      /* Handle an addend.  */
2228
      if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2229
          && CONST_INT_P (XEXP (x, 1)))
2230
        x = XEXP (x, 0);
2231
 
2232
      if (GET_CODE (x) == UNSPEC)
2233
        {
2234
          switch (XINT (x, 1))
2235
            {
2236
            case UNSPEC_RELOC16:
2237
              orig = XVECEXP (x, 0, 0);
2238
              *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2239
              break;
2240
 
2241
            case UNSPEC_RELOC32:
2242
              if (unwrap_reloc32_p)
2243
                {
2244
                  orig = XVECEXP (x, 0, 0);
2245
                  *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2246
                }
2247
              break;
2248
 
2249
            default:
2250
              break;
2251
            }
2252
        }
2253
    }
2254
 
2255
  return orig;
2256
}
2257
 
2258
/* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2259
   UNSPEC_RELOC32 wrappers.  */
2260
 
2261
rtx
2262
m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2263
{
2264
  return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2265
}
2266
 
2267
/* Helper for m68k_final_prescan_insn.  */
2268
 
2269
static int
2270
m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2271
{
2272
  rtx x = *x_ptr;
2273
 
2274
  if (m68k_unwrap_symbol (x, true) != x)
2275
    /* For rationale of the below, see comment in m68k_final_prescan_insn.  */
2276
    {
2277
      rtx plus;
2278
 
2279
      gcc_assert (GET_CODE (x) == CONST);
2280
      plus = XEXP (x, 0);
2281
 
2282
      if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2283
        {
2284
          rtx unspec;
2285
          rtx addend;
2286
 
2287
          unspec = XEXP (plus, 0);
2288
          gcc_assert (GET_CODE (unspec) == UNSPEC);
2289
          addend = XEXP (plus, 1);
2290
          gcc_assert (CONST_INT_P (addend));
2291
 
2292
          /* We now have all the pieces, rearrange them.  */
2293
 
2294
          /* Move symbol to plus.  */
2295
          XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2296
 
2297
          /* Move plus inside unspec.  */
2298
          XVECEXP (unspec, 0, 0) = plus;
2299
 
2300
          /* Move unspec to top level of const.  */
2301
          XEXP (x, 0) = unspec;
2302
        }
2303
 
2304
      return -1;
2305
    }
2306
 
2307
  return 0;
2308
}
2309
 
2310
/* Prescan insn before outputing assembler for it.  */
2311
 
2312
void
2313
m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
2314
                         rtx *operands, int n_operands)
2315
{
2316
  int i;
2317
 
2318
  /* Combine and, possibly, other optimizations may do good job
2319
     converting
2320
       (const (unspec [(symbol)]))
2321
     into
2322
       (const (plus (unspec [(symbol)])
2323
                    (const_int N))).
2324
     The problem with this is emitting @TLS or @GOT decorations.
2325
     The decoration is emitted when processing (unspec), so the
2326
     result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2327
 
2328
     It seems that the easiest solution to this is to convert such
2329
     operands to
2330
       (const (unspec [(plus (symbol)
2331
                             (const_int N))])).
2332
     Note, that the top level of operand remains intact, so we don't have
2333
     to patch up anything outside of the operand.  */
2334
 
2335
  for (i = 0; i < n_operands; ++i)
2336
    {
2337
      rtx op;
2338
 
2339
      op = operands[i];
2340
 
2341
      for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
2342
    }
2343
}
2344
 
2345
/* Move X to a register and add REG_EQUAL note pointing to ORIG.
2346
   If REG is non-null, use it; generate new pseudo otherwise.  */
2347
 
2348
static rtx
2349
m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2350
{
2351
  rtx insn;
2352
 
2353
  if (reg == NULL_RTX)
2354
    {
2355
      gcc_assert (can_create_pseudo_p ());
2356
      reg = gen_reg_rtx (Pmode);
2357
    }
2358
 
2359
  insn = emit_move_insn (reg, x);
2360
  /* Put a REG_EQUAL note on this insn, so that it can be optimized
2361
     by loop.  */
2362
  set_unique_reg_note (insn, REG_EQUAL, orig);
2363
 
2364
  return reg;
2365
}
2366
 
2367
/* Does the same as m68k_wrap_symbol, but returns a memory reference to
2368
   GOT slot.  */
2369
 
2370
static rtx
2371
m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2372
{
2373
  x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2374
 
2375
  x = gen_rtx_MEM (Pmode, x);
2376
  MEM_READONLY_P (x) = 1;
2377
 
2378
  return x;
2379
}
2380
 
2381
/* Legitimize PIC addresses.  If the address is already
2382
   position-independent, we return ORIG.  Newly generated
2383
   position-independent addresses go to REG.  If we need more
2384
   than one register, we lose.
2385
 
2386
   An address is legitimized by making an indirect reference
2387
   through the Global Offset Table with the name of the symbol
2388
   used as an offset.
2389
 
2390
   The assembler and linker are responsible for placing the
2391
   address of the symbol in the GOT.  The function prologue
2392
   is responsible for initializing a5 to the starting address
2393
   of the GOT.
2394
 
2395
   The assembler is also responsible for translating a symbol name
2396
   into a constant displacement from the start of the GOT.
2397
 
2398
   A quick example may make things a little clearer:
2399
 
2400
   When not generating PIC code to store the value 12345 into _foo
2401
   we would generate the following code:
2402
 
2403
        movel #12345, _foo
2404
 
2405
   When generating PIC two transformations are made.  First, the compiler
2406
   loads the address of foo into a register.  So the first transformation makes:
2407
 
2408
        lea     _foo, a0
2409
        movel   #12345, a0@
2410
 
2411
   The code in movsi will intercept the lea instruction and call this
2412
   routine which will transform the instructions into:
2413
 
2414
        movel   a5@(_foo:w), a0
2415
        movel   #12345, a0@
2416
 
2417
 
2418
   That (in a nutshell) is how *all* symbol and label references are
2419
   handled.  */
2420
 
2421
rtx
2422
legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2423
                        rtx reg)
2424
{
2425
  rtx pic_ref = orig;
2426
 
2427
  /* First handle a simple SYMBOL_REF or LABEL_REF */
2428
  if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2429
    {
2430
      gcc_assert (reg);
2431
 
2432
      pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2433
      pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2434
    }
2435
  else if (GET_CODE (orig) == CONST)
2436
    {
2437
      rtx base;
2438
 
2439
      /* Make sure this has not already been legitimized.  */
2440
      if (m68k_unwrap_symbol (orig, true) != orig)
2441
        return orig;
2442
 
2443
      gcc_assert (reg);
2444
 
2445
      /* legitimize both operands of the PLUS */
2446
      gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2447
 
2448
      base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2449
      orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2450
                                     base == reg ? 0 : reg);
2451
 
2452
      if (GET_CODE (orig) == CONST_INT)
2453
        pic_ref = plus_constant (base, INTVAL (orig));
2454
      else
2455
        pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2456
    }
2457
 
2458
  return pic_ref;
2459
}
2460
 
2461
/* The __tls_get_addr symbol.  */
2462
static GTY(()) rtx m68k_tls_get_addr;
2463
 
2464
/* Return SYMBOL_REF for __tls_get_addr.  */
2465
 
2466
static rtx
2467
m68k_get_tls_get_addr (void)
2468
{
2469
  if (m68k_tls_get_addr == NULL_RTX)
2470
    m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2471
 
2472
  return m68k_tls_get_addr;
2473
}
2474
 
2475
/* Return libcall result in A0 instead of usual D0.  */
2476
static bool m68k_libcall_value_in_a0_p = false;
2477
 
2478
/* Emit instruction sequence that calls __tls_get_addr.  X is
2479
   the TLS symbol we are referencing and RELOC is the symbol type to use
2480
   (either TLSGD or TLSLDM).  EQV is the REG_EQUAL note for the sequence
2481
   emitted.  A pseudo register with result of __tls_get_addr call is
2482
   returned.  */
2483
 
2484
static rtx
2485
m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2486
{
2487
  rtx a0;
2488
  rtx insns;
2489
  rtx dest;
2490
 
2491
  /* Emit the call sequence.  */
2492
  start_sequence ();
2493
 
2494
  /* FIXME: Unfortunately, emit_library_call_value does not
2495
     consider (plus (%a5) (const (unspec))) to be a good enough
2496
     operand for push, so it forces it into a register.  The bad
2497
     thing about this is that combiner, due to copy propagation and other
2498
     optimizations, sometimes can not later fix this.  As a consequence,
2499
     additional register may be allocated resulting in a spill.
2500
     For reference, see args processing loops in
2501
     calls.c:emit_library_call_value_1.
2502
     For testcase, see gcc.target/m68k/tls-{gd, ld}.c  */
2503
  x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2504
 
2505
  /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2506
     is the simpliest way of generating a call.  The difference between
2507
     __tls_get_addr() and libcall is that the result is returned in D0
2508
     instead of A0.  To workaround this, we use m68k_libcall_value_in_a0_p
2509
     which temporarily switches returning the result to A0.  */
2510
 
2511
  m68k_libcall_value_in_a0_p = true;
2512
  a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2513
                                Pmode, 1, x, Pmode);
2514
  m68k_libcall_value_in_a0_p = false;
2515
 
2516
  insns = get_insns ();
2517
  end_sequence ();
2518
 
2519
  gcc_assert (can_create_pseudo_p ());
2520
  dest = gen_reg_rtx (Pmode);
2521
  emit_libcall_block (insns, dest, a0, eqv);
2522
 
2523
  return dest;
2524
}
2525
 
2526
/* The __tls_get_addr symbol.  */
2527
static GTY(()) rtx m68k_read_tp;
2528
 
2529
/* Return SYMBOL_REF for __m68k_read_tp.  */
2530
 
2531
static rtx
2532
m68k_get_m68k_read_tp (void)
2533
{
2534
  if (m68k_read_tp == NULL_RTX)
2535
    m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2536
 
2537
  return m68k_read_tp;
2538
}
2539
 
2540
/* Emit instruction sequence that calls __m68k_read_tp.
2541
   A pseudo register with result of __m68k_read_tp call is returned.  */
2542
 
2543
static rtx
2544
m68k_call_m68k_read_tp (void)
2545
{
2546
  rtx a0;
2547
  rtx eqv;
2548
  rtx insns;
2549
  rtx dest;
2550
 
2551
  start_sequence ();
2552
 
2553
  /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2554
     is the simpliest way of generating a call.  The difference between
2555
     __m68k_read_tp() and libcall is that the result is returned in D0
2556
     instead of A0.  To workaround this, we use m68k_libcall_value_in_a0_p
2557
     which temporarily switches returning the result to A0.  */
2558
 
2559
  /* Emit the call sequence.  */
2560
  m68k_libcall_value_in_a0_p = true;
2561
  a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2562
                                Pmode, 0);
2563
  m68k_libcall_value_in_a0_p = false;
2564
  insns = get_insns ();
2565
  end_sequence ();
2566
 
2567
  /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2568
     share the m68k_read_tp result with other IE/LE model accesses.  */
2569
  eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2570
 
2571
  gcc_assert (can_create_pseudo_p ());
2572
  dest = gen_reg_rtx (Pmode);
2573
  emit_libcall_block (insns, dest, a0, eqv);
2574
 
2575
  return dest;
2576
}
2577
 
2578
/* Return a legitimized address for accessing TLS SYMBOL_REF X.
2579
   For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2580
   ColdFire.  */
2581
 
2582
rtx
2583
m68k_legitimize_tls_address (rtx orig)
2584
{
2585
  switch (SYMBOL_REF_TLS_MODEL (orig))
2586
    {
2587
    case TLS_MODEL_GLOBAL_DYNAMIC:
2588
      orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2589
      break;
2590
 
2591
    case TLS_MODEL_LOCAL_DYNAMIC:
2592
      {
2593
        rtx eqv;
2594
        rtx a0;
2595
        rtx x;
2596
 
2597
        /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2598
           share the LDM result with other LD model accesses.  */
2599
        eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2600
                              UNSPEC_RELOC32);
2601
 
2602
        a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2603
 
2604
        x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2605
 
2606
        if (can_create_pseudo_p ())
2607
          x = m68k_move_to_reg (x, orig, NULL_RTX);
2608
 
2609
        orig = x;
2610
        break;
2611
      }
2612
 
2613
    case TLS_MODEL_INITIAL_EXEC:
2614
      {
2615
        rtx a0;
2616
        rtx x;
2617
 
2618
        a0 = m68k_call_m68k_read_tp ();
2619
 
2620
        x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2621
        x = gen_rtx_PLUS (Pmode, x, a0);
2622
 
2623
        if (can_create_pseudo_p ())
2624
          x = m68k_move_to_reg (x, orig, NULL_RTX);
2625
 
2626
        orig = x;
2627
        break;
2628
      }
2629
 
2630
    case TLS_MODEL_LOCAL_EXEC:
2631
      {
2632
        rtx a0;
2633
        rtx x;
2634
 
2635
        a0 = m68k_call_m68k_read_tp ();
2636
 
2637
        x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2638
 
2639
        if (can_create_pseudo_p ())
2640
          x = m68k_move_to_reg (x, orig, NULL_RTX);
2641
 
2642
        orig = x;
2643
        break;
2644
      }
2645
 
2646
    default:
2647
      gcc_unreachable ();
2648
    }
2649
 
2650
  return orig;
2651
}
2652
 
2653
/* Return true if X is a TLS symbol.  */
2654
 
2655
static bool
2656
m68k_tls_symbol_p (rtx x)
2657
{
2658
  if (!TARGET_HAVE_TLS)
2659
    return false;
2660
 
2661
  if (GET_CODE (x) != SYMBOL_REF)
2662
    return false;
2663
 
2664
  return SYMBOL_REF_TLS_MODEL (x) != 0;
2665
}
2666
 
2667
/* Helper for m68k_tls_referenced_p.  */
2668
 
2669
static int
2670
m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2671
{
2672
  /* Note: this is not the same as m68k_tls_symbol_p.  */
2673
  if (GET_CODE (*x_ptr) == SYMBOL_REF)
2674
    return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
2675
 
2676
  /* Don't recurse into legitimate TLS references.  */
2677
  if (m68k_tls_reference_p (*x_ptr, true))
2678
    return -1;
2679
 
2680
  return 0;
2681
}
2682
 
2683
/* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2684
   though illegitimate one.
2685
   If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference.  */
2686
 
2687
bool
2688
m68k_tls_reference_p (rtx x, bool legitimate_p)
2689
{
2690
  if (!TARGET_HAVE_TLS)
2691
    return false;
2692
 
2693
  if (!legitimate_p)
2694
    return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
2695
  else
2696
    {
2697
      enum m68k_reloc reloc = RELOC_GOT;
2698
 
2699
      return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2700
              && TLS_RELOC_P (reloc));
2701
    }
2702
}
2703
 
2704
 
2705
 
2706
#define USE_MOVQ(i)     ((unsigned) ((i) + 128) <= 255)
2707
 
2708
/* Return the type of move that should be used for integer I.  */
2709
 
2710
M68K_CONST_METHOD
2711
m68k_const_method (HOST_WIDE_INT i)
2712
{
2713
  unsigned u;
2714
 
2715
  if (USE_MOVQ (i))
2716
    return MOVQ;
2717
 
2718
  /* The ColdFire doesn't have byte or word operations.  */
2719
  /* FIXME: This may not be useful for the m68060 either.  */
2720
  if (!TARGET_COLDFIRE)
2721
    {
2722
      /* if -256 < N < 256 but N is not in range for a moveq
2723
         N^ff will be, so use moveq #N^ff, dreg; not.b dreg.  */
2724
      if (USE_MOVQ (i ^ 0xff))
2725
        return NOTB;
2726
      /* Likewise, try with not.w */
2727
      if (USE_MOVQ (i ^ 0xffff))
2728
        return NOTW;
2729
      /* This is the only value where neg.w is useful */
2730
      if (i == -65408)
2731
        return NEGW;
2732
    }
2733
 
2734
  /* Try also with swap.  */
2735
  u = i;
2736
  if (USE_MOVQ ((u >> 16) | (u << 16)))
2737
    return SWAP;
2738
 
2739
  if (TARGET_ISAB)
2740
    {
2741
      /* Try using MVZ/MVS with an immediate value to load constants.  */
2742
      if (i >= 0 && i <= 65535)
2743
        return MVZ;
2744
      if (i >= -32768 && i <= 32767)
2745
        return MVS;
2746
    }
2747
 
2748
  /* Otherwise, use move.l */
2749
  return MOVL;
2750
}
2751
 
2752
/* Return the cost of moving constant I into a data register.  */
2753
 
2754
static int
2755
const_int_cost (HOST_WIDE_INT i)
2756
{
2757
  switch (m68k_const_method (i))
2758
    {
2759
    case MOVQ:
2760
      /* Constants between -128 and 127 are cheap due to moveq.  */
2761
      return 0;
2762
    case MVZ:
2763
    case MVS:
2764
    case NOTB:
2765
    case NOTW:
2766
    case NEGW:
2767
    case SWAP:
2768
      /* Constants easily generated by moveq + not.b/not.w/neg.w/swap.  */
2769
      return 1;
2770
    case MOVL:
2771
      return 2;
2772
    default:
2773
      gcc_unreachable ();
2774
    }
2775
}
2776
 
2777
static bool
2778
m68k_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2779
                int *total, bool speed ATTRIBUTE_UNUSED)
2780
{
2781
  switch (code)
2782
    {
2783
    case CONST_INT:
2784
      /* Constant zero is super cheap due to clr instruction.  */
2785
      if (x == const0_rtx)
2786
        *total = 0;
2787
      else
2788
        *total = const_int_cost (INTVAL (x));
2789
      return true;
2790
 
2791
    case CONST:
2792
    case LABEL_REF:
2793
    case SYMBOL_REF:
2794
      *total = 3;
2795
      return true;
2796
 
2797
    case CONST_DOUBLE:
2798
      /* Make 0.0 cheaper than other floating constants to
2799
         encourage creating tstsf and tstdf insns.  */
2800
      if (outer_code == COMPARE
2801
          && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2802
        *total = 4;
2803
      else
2804
        *total = 5;
2805
      return true;
2806
 
2807
    /* These are vaguely right for a 68020.  */
2808
    /* The costs for long multiply have been adjusted to work properly
2809
       in synth_mult on the 68020, relative to an average of the time
2810
       for add and the time for shift, taking away a little more because
2811
       sometimes move insns are needed.  */
2812
    /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2813
       terms.  */
2814
#define MULL_COST                               \
2815
  (TUNE_68060 ? 2                               \
2816
   : TUNE_68040 ? 5                             \
2817
   : (TUNE_CFV2 && TUNE_EMAC) ? 3               \
2818
   : (TUNE_CFV2 && TUNE_MAC) ? 4                \
2819
   : TUNE_CFV2 ? 8                              \
2820
   : TARGET_COLDFIRE ? 3 : 13)
2821
 
2822
#define MULW_COST                               \
2823
  (TUNE_68060 ? 2                               \
2824
   : TUNE_68040 ? 3                             \
2825
   : TUNE_68000_10 ? 5                          \
2826
   : (TUNE_CFV2 && TUNE_EMAC) ? 3               \
2827
   : (TUNE_CFV2 && TUNE_MAC) ? 2                \
2828
   : TUNE_CFV2 ? 8                              \
2829
   : TARGET_COLDFIRE ? 2 : 8)
2830
 
2831
#define DIVW_COST                               \
2832
  (TARGET_CF_HWDIV ? 11                         \
2833
   : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2834
 
2835
    case PLUS:
2836
      /* An lea costs about three times as much as a simple add.  */
2837
      if (GET_MODE (x) == SImode
2838
          && GET_CODE (XEXP (x, 1)) == REG
2839
          && GET_CODE (XEXP (x, 0)) == MULT
2840
          && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2841
          && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2842
          && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2843
              || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2844
              || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2845
        {
2846
            /* lea an@(dx:l:i),am */
2847
            *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2848
            return true;
2849
        }
2850
      return false;
2851
 
2852
    case ASHIFT:
2853
    case ASHIFTRT:
2854
    case LSHIFTRT:
2855
      if (TUNE_68060)
2856
        {
2857
          *total = COSTS_N_INSNS(1);
2858
          return true;
2859
        }
2860
      if (TUNE_68000_10)
2861
        {
2862
          if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2863
            {
2864
              if (INTVAL (XEXP (x, 1)) < 16)
2865
                *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2866
              else
2867
                /* We're using clrw + swap for these cases.  */
2868
                *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2869
            }
2870
          else
2871
            *total = COSTS_N_INSNS (10); /* Worst case.  */
2872
          return true;
2873
        }
2874
      /* A shift by a big integer takes an extra instruction.  */
2875
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
2876
          && (INTVAL (XEXP (x, 1)) == 16))
2877
        {
2878
          *total = COSTS_N_INSNS (2);    /* clrw;swap */
2879
          return true;
2880
        }
2881
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
2882
          && !(INTVAL (XEXP (x, 1)) > 0
2883
               && INTVAL (XEXP (x, 1)) <= 8))
2884
        {
2885
          *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3);      /* lsr #i,dn */
2886
          return true;
2887
        }
2888
      return false;
2889
 
2890
    case MULT:
2891
      if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2892
           || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2893
          && GET_MODE (x) == SImode)
2894
        *total = COSTS_N_INSNS (MULW_COST);
2895
      else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2896
        *total = COSTS_N_INSNS (MULW_COST);
2897
      else
2898
        *total = COSTS_N_INSNS (MULL_COST);
2899
      return true;
2900
 
2901
    case DIV:
2902
    case UDIV:
2903
    case MOD:
2904
    case UMOD:
2905
      if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2906
        *total = COSTS_N_INSNS (DIVW_COST);     /* div.w */
2907
      else if (TARGET_CF_HWDIV)
2908
        *total = COSTS_N_INSNS (18);
2909
      else
2910
        *total = COSTS_N_INSNS (43);            /* div.l */
2911
      return true;
2912
 
2913
    case ZERO_EXTRACT:
2914
      if (outer_code == COMPARE)
2915
        *total = 0;
2916
      return false;
2917
 
2918
    default:
2919
      return false;
2920
    }
2921
}
2922
 
2923
/* Return an instruction to move CONST_INT OPERANDS[1] into data register
2924
   OPERANDS[0].  */
2925
 
2926
static const char *
2927
output_move_const_into_data_reg (rtx *operands)
2928
{
2929
  HOST_WIDE_INT i;
2930
 
2931
  i = INTVAL (operands[1]);
2932
  switch (m68k_const_method (i))
2933
    {
2934
    case MVZ:
2935
      return "mvzw %1,%0";
2936
    case MVS:
2937
      return "mvsw %1,%0";
2938
    case MOVQ:
2939
      return "moveq %1,%0";
2940
    case NOTB:
2941
      CC_STATUS_INIT;
2942
      operands[1] = GEN_INT (i ^ 0xff);
2943
      return "moveq %1,%0\n\tnot%.b %0";
2944
    case NOTW:
2945
      CC_STATUS_INIT;
2946
      operands[1] = GEN_INT (i ^ 0xffff);
2947
      return "moveq %1,%0\n\tnot%.w %0";
2948
    case NEGW:
2949
      CC_STATUS_INIT;
2950
      return "moveq #-128,%0\n\tneg%.w %0";
2951
    case SWAP:
2952
      {
2953
        unsigned u = i;
2954
 
2955
        operands[1] = GEN_INT ((u << 16) | (u >> 16));
2956
        return "moveq %1,%0\n\tswap %0";
2957
      }
2958
    case MOVL:
2959
      return "move%.l %1,%0";
2960
    default:
2961
      gcc_unreachable ();
2962
    }
2963
}
2964
 
2965
/* Return true if I can be handled by ISA B's mov3q instruction.  */
2966
 
2967
bool
2968
valid_mov3q_const (HOST_WIDE_INT i)
2969
{
2970
  return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
2971
}
2972
 
2973
/* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2974
   I is the value of OPERANDS[1].  */
2975
 
2976
static const char *
2977
output_move_simode_const (rtx *operands)
2978
{
2979
  rtx dest;
2980
  HOST_WIDE_INT src;
2981
 
2982
  dest = operands[0];
2983
  src = INTVAL (operands[1]);
2984
  if (src == 0
2985
      && (DATA_REG_P (dest) || MEM_P (dest))
2986
      /* clr insns on 68000 read before writing.  */
2987
      && ((TARGET_68010 || TARGET_COLDFIRE)
2988
          || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
2989
    return "clr%.l %0";
2990
  else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
2991
    return "mov3q%.l %1,%0";
2992
  else if (src == 0 && ADDRESS_REG_P (dest))
2993
    return "sub%.l %0,%0";
2994
  else if (DATA_REG_P (dest))
2995
    return output_move_const_into_data_reg (operands);
2996
  else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
2997
    {
2998
      if (valid_mov3q_const (src))
2999
        return "mov3q%.l %1,%0";
3000
      return "move%.w %1,%0";
3001
    }
3002
  else if (MEM_P (dest)
3003
           && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3004
           && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3005
           && IN_RANGE (src, -0x8000, 0x7fff))
3006
    {
3007
      if (valid_mov3q_const (src))
3008
        return "mov3q%.l %1,%-";
3009
      return "pea %a1";
3010
    }
3011
  return "move%.l %1,%0";
3012
}
3013
 
3014
const char *
3015
output_move_simode (rtx *operands)
3016
{
3017
  if (GET_CODE (operands[1]) == CONST_INT)
3018
    return output_move_simode_const (operands);
3019
  else if ((GET_CODE (operands[1]) == SYMBOL_REF
3020
            || GET_CODE (operands[1]) == CONST)
3021
           && push_operand (operands[0], SImode))
3022
    return "pea %a1";
3023
  else if ((GET_CODE (operands[1]) == SYMBOL_REF
3024
            || GET_CODE (operands[1]) == CONST)
3025
           && ADDRESS_REG_P (operands[0]))
3026
    return "lea %a1,%0";
3027
  return "move%.l %1,%0";
3028
}
3029
 
3030
const char *
3031
output_move_himode (rtx *operands)
3032
{
3033
 if (GET_CODE (operands[1]) == CONST_INT)
3034
    {
3035
      if (operands[1] == const0_rtx
3036
          && (DATA_REG_P (operands[0])
3037
              || GET_CODE (operands[0]) == MEM)
3038
          /* clr insns on 68000 read before writing.  */
3039
          && ((TARGET_68010 || TARGET_COLDFIRE)
3040
              || !(GET_CODE (operands[0]) == MEM
3041
                   && MEM_VOLATILE_P (operands[0]))))
3042
        return "clr%.w %0";
3043
      else if (operands[1] == const0_rtx
3044
               && ADDRESS_REG_P (operands[0]))
3045
        return "sub%.l %0,%0";
3046
      else if (DATA_REG_P (operands[0])
3047
               && INTVAL (operands[1]) < 128
3048
               && INTVAL (operands[1]) >= -128)
3049
        return "moveq %1,%0";
3050
      else if (INTVAL (operands[1]) < 0x8000
3051
               && INTVAL (operands[1]) >= -0x8000)
3052
        return "move%.w %1,%0";
3053
    }
3054
  else if (CONSTANT_P (operands[1]))
3055
    return "move%.l %1,%0";
3056
  return "move%.w %1,%0";
3057
}
3058
 
3059
const char *
3060
output_move_qimode (rtx *operands)
3061
{
3062
  /* 68k family always modifies the stack pointer by at least 2, even for
3063
     byte pushes.  The 5200 (ColdFire) does not do this.  */
3064
 
3065
  /* This case is generated by pushqi1 pattern now.  */
3066
  gcc_assert (!(GET_CODE (operands[0]) == MEM
3067
                && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3068
                && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3069
                && ! ADDRESS_REG_P (operands[1])
3070
                && ! TARGET_COLDFIRE));
3071
 
3072
  /* clr and st insns on 68000 read before writing.  */
3073
  if (!ADDRESS_REG_P (operands[0])
3074
      && ((TARGET_68010 || TARGET_COLDFIRE)
3075
          || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3076
    {
3077
      if (operands[1] == const0_rtx)
3078
        return "clr%.b %0";
3079
      if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3080
          && GET_CODE (operands[1]) == CONST_INT
3081
          && (INTVAL (operands[1]) & 255) == 255)
3082
        {
3083
          CC_STATUS_INIT;
3084
          return "st %0";
3085
        }
3086
    }
3087
  if (GET_CODE (operands[1]) == CONST_INT
3088
      && DATA_REG_P (operands[0])
3089
      && INTVAL (operands[1]) < 128
3090
      && INTVAL (operands[1]) >= -128)
3091
    return "moveq %1,%0";
3092
  if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3093
    return "sub%.l %0,%0";
3094
  if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3095
    return "move%.l %1,%0";
3096
  /* 68k family (including the 5200 ColdFire) does not support byte moves to
3097
     from address registers.  */
3098
  if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3099
    return "move%.w %1,%0";
3100
  return "move%.b %1,%0";
3101
}
3102
 
3103
const char *
3104
output_move_stricthi (rtx *operands)
3105
{
3106
  if (operands[1] == const0_rtx
3107
      /* clr insns on 68000 read before writing.  */
3108
      && ((TARGET_68010 || TARGET_COLDFIRE)
3109
          || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3110
    return "clr%.w %0";
3111
  return "move%.w %1,%0";
3112
}
3113
 
3114
const char *
3115
output_move_strictqi (rtx *operands)
3116
{
3117
  if (operands[1] == const0_rtx
3118
      /* clr insns on 68000 read before writing.  */
3119
      && ((TARGET_68010 || TARGET_COLDFIRE)
3120
          || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3121
    return "clr%.b %0";
3122
  return "move%.b %1,%0";
3123
}
3124
 
3125
/* Return the best assembler insn template
3126
   for moving operands[1] into operands[0] as a fullword.  */
3127
 
3128
static const char *
3129
singlemove_string (rtx *operands)
3130
{
3131
  if (GET_CODE (operands[1]) == CONST_INT)
3132
    return output_move_simode_const (operands);
3133
  return "move%.l %1,%0";
3134
}
3135
 
3136
 
3137
/* Output assembler or rtl code to perform a doubleword move insn
3138
   with operands OPERANDS.
3139
   Pointers to 3 helper functions should be specified:
3140
   HANDLE_REG_ADJUST to adjust a register by a small value,
3141
   HANDLE_COMPADR to compute an address and
3142
   HANDLE_MOVSI to move 4 bytes.  */
3143
 
3144
static void
3145
handle_move_double (rtx operands[2],
3146
                    void (*handle_reg_adjust) (rtx, int),
3147
                    void (*handle_compadr) (rtx [2]),
3148
                    void (*handle_movsi) (rtx [2]))
3149
{
3150
  enum
3151
    {
3152
      REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3153
    } optype0, optype1;
3154
  rtx latehalf[2];
3155
  rtx middlehalf[2];
3156
  rtx xops[2];
3157
  rtx addreg0 = 0, addreg1 = 0;
3158
  int dest_overlapped_low = 0;
3159
  int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3160
 
3161
  middlehalf[0] = 0;
3162
  middlehalf[1] = 0;
3163
 
3164
  /* First classify both operands.  */
3165
 
3166
  if (REG_P (operands[0]))
3167
    optype0 = REGOP;
3168
  else if (offsettable_memref_p (operands[0]))
3169
    optype0 = OFFSOP;
3170
  else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3171
    optype0 = POPOP;
3172
  else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3173
    optype0 = PUSHOP;
3174
  else if (GET_CODE (operands[0]) == MEM)
3175
    optype0 = MEMOP;
3176
  else
3177
    optype0 = RNDOP;
3178
 
3179
  if (REG_P (operands[1]))
3180
    optype1 = REGOP;
3181
  else if (CONSTANT_P (operands[1]))
3182
    optype1 = CNSTOP;
3183
  else if (offsettable_memref_p (operands[1]))
3184
    optype1 = OFFSOP;
3185
  else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3186
    optype1 = POPOP;
3187
  else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3188
    optype1 = PUSHOP;
3189
  else if (GET_CODE (operands[1]) == MEM)
3190
    optype1 = MEMOP;
3191
  else
3192
    optype1 = RNDOP;
3193
 
3194
  /* Check for the cases that the operand constraints are not supposed
3195
     to allow to happen.  Generating code for these cases is
3196
     painful.  */
3197
  gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3198
 
3199
  /* If one operand is decrementing and one is incrementing
3200
     decrement the former register explicitly
3201
     and change that operand into ordinary indexing.  */
3202
 
3203
  if (optype0 == PUSHOP && optype1 == POPOP)
3204
    {
3205
      operands[0] = XEXP (XEXP (operands[0], 0), 0);
3206
 
3207
      handle_reg_adjust (operands[0], -size);
3208
 
3209
      if (GET_MODE (operands[1]) == XFmode)
3210
        operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3211
      else if (GET_MODE (operands[0]) == DFmode)
3212
        operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3213
      else
3214
        operands[0] = gen_rtx_MEM (DImode, operands[0]);
3215
      optype0 = OFFSOP;
3216
    }
3217
  if (optype0 == POPOP && optype1 == PUSHOP)
3218
    {
3219
      operands[1] = XEXP (XEXP (operands[1], 0), 0);
3220
 
3221
      handle_reg_adjust (operands[1], -size);
3222
 
3223
      if (GET_MODE (operands[1]) == XFmode)
3224
        operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3225
      else if (GET_MODE (operands[1]) == DFmode)
3226
        operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3227
      else
3228
        operands[1] = gen_rtx_MEM (DImode, operands[1]);
3229
      optype1 = OFFSOP;
3230
    }
3231
 
3232
  /* If an operand is an unoffsettable memory ref, find a register
3233
     we can increment temporarily to make it refer to the second word.  */
3234
 
3235
  if (optype0 == MEMOP)
3236
    addreg0 = find_addr_reg (XEXP (operands[0], 0));
3237
 
3238
  if (optype1 == MEMOP)
3239
    addreg1 = find_addr_reg (XEXP (operands[1], 0));
3240
 
3241
  /* Ok, we can do one word at a time.
3242
     Normally we do the low-numbered word first,
3243
     but if either operand is autodecrementing then we
3244
     do the high-numbered word first.
3245
 
3246
     In either case, set up in LATEHALF the operands to use
3247
     for the high-numbered word and in some cases alter the
3248
     operands in OPERANDS to be suitable for the low-numbered word.  */
3249
 
3250
  if (size == 12)
3251
    {
3252
      if (optype0 == REGOP)
3253
        {
3254
          latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3255
          middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3256
        }
3257
      else if (optype0 == OFFSOP)
3258
        {
3259
          middlehalf[0] = adjust_address (operands[0], SImode, 4);
3260
          latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3261
        }
3262
      else
3263
        {
3264
          middlehalf[0] = adjust_address (operands[0], SImode, 0);
3265
          latehalf[0] = adjust_address (operands[0], SImode, 0);
3266
        }
3267
 
3268
      if (optype1 == REGOP)
3269
        {
3270
          latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3271
          middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3272
        }
3273
      else if (optype1 == OFFSOP)
3274
        {
3275
          middlehalf[1] = adjust_address (operands[1], SImode, 4);
3276
          latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3277
        }
3278
      else if (optype1 == CNSTOP)
3279
        {
3280
          if (GET_CODE (operands[1]) == CONST_DOUBLE)
3281
            {
3282
              REAL_VALUE_TYPE r;
3283
              long l[3];
3284
 
3285
              REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3286
              REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3287
              operands[1] = GEN_INT (l[0]);
3288
              middlehalf[1] = GEN_INT (l[1]);
3289
              latehalf[1] = GEN_INT (l[2]);
3290
            }
3291
          else
3292
            {
3293
              /* No non-CONST_DOUBLE constant should ever appear
3294
                 here.  */
3295
              gcc_assert (!CONSTANT_P (operands[1]));
3296
            }
3297
        }
3298
      else
3299
        {
3300
          middlehalf[1] = adjust_address (operands[1], SImode, 0);
3301
          latehalf[1] = adjust_address (operands[1], SImode, 0);
3302
        }
3303
    }
3304
  else
3305
    /* size is not 12: */
3306
    {
3307
      if (optype0 == REGOP)
3308
        latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3309
      else if (optype0 == OFFSOP)
3310
        latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3311
      else
3312
        latehalf[0] = adjust_address (operands[0], SImode, 0);
3313
 
3314
      if (optype1 == REGOP)
3315
        latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3316
      else if (optype1 == OFFSOP)
3317
        latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3318
      else if (optype1 == CNSTOP)
3319
        split_double (operands[1], &operands[1], &latehalf[1]);
3320
      else
3321
        latehalf[1] = adjust_address (operands[1], SImode, 0);
3322
    }
3323
 
3324
  /* If insn is effectively movd N(sp),-(sp) then we will do the
3325
     high word first.  We should use the adjusted operand 1 (which is N+4(sp))
3326
     for the low word as well, to compensate for the first decrement of sp.  */
3327
  if (optype0 == PUSHOP
3328
      && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
3329
      && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
3330
    operands[1] = middlehalf[1] = latehalf[1];
3331
 
3332
  /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3333
     if the upper part of reg N does not appear in the MEM, arrange to
3334
     emit the move late-half first.  Otherwise, compute the MEM address
3335
     into the upper part of N and use that as a pointer to the memory
3336
     operand.  */
3337
  if (optype0 == REGOP
3338
      && (optype1 == OFFSOP || optype1 == MEMOP))
3339
    {
3340
      rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3341
 
3342
      if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3343
          && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3344
        {
3345
          /* If both halves of dest are used in the src memory address,
3346
             compute the address into latehalf of dest.
3347
             Note that this can't happen if the dest is two data regs.  */
3348
        compadr:
3349
          xops[0] = latehalf[0];
3350
          xops[1] = XEXP (operands[1], 0);
3351
 
3352
          handle_compadr (xops);
3353
          if (GET_MODE (operands[1]) == XFmode)
3354
            {
3355
              operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3356
              middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3357
              latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3358
            }
3359
          else
3360
            {
3361
              operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3362
              latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3363
            }
3364
        }
3365
      else if (size == 12
3366
               && reg_overlap_mentioned_p (middlehalf[0],
3367
                                           XEXP (operands[1], 0)))
3368
        {
3369
          /* Check for two regs used by both source and dest.
3370
             Note that this can't happen if the dest is all data regs.
3371
             It can happen if the dest is d6, d7, a0.
3372
             But in that case, latehalf is an addr reg, so
3373
             the code at compadr does ok.  */
3374
 
3375
          if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3376
              || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3377
            goto compadr;
3378
 
3379
          /* JRV says this can't happen: */
3380
          gcc_assert (!addreg0 && !addreg1);
3381
 
3382
          /* Only the middle reg conflicts; simply put it last.  */
3383
          handle_movsi (operands);
3384
          handle_movsi (latehalf);
3385
          handle_movsi (middlehalf);
3386
 
3387
          return;
3388
        }
3389
      else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3390
        /* If the low half of dest is mentioned in the source memory
3391
           address, the arrange to emit the move late half first.  */
3392
        dest_overlapped_low = 1;
3393
    }
3394
 
3395
  /* If one or both operands autodecrementing,
3396
     do the two words, high-numbered first.  */
3397
 
3398
  /* Likewise,  the first move would clobber the source of the second one,
3399
     do them in the other order.  This happens only for registers;
3400
     such overlap can't happen in memory unless the user explicitly
3401
     sets it up, and that is an undefined circumstance.  */
3402
 
3403
  if (optype0 == PUSHOP || optype1 == PUSHOP
3404
      || (optype0 == REGOP && optype1 == REGOP
3405
          && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3406
              || REGNO (operands[0]) == REGNO (latehalf[1])))
3407
      || dest_overlapped_low)
3408
    {
3409
      /* Make any unoffsettable addresses point at high-numbered word.  */
3410
      if (addreg0)
3411
        handle_reg_adjust (addreg0, size - 4);
3412
      if (addreg1)
3413
        handle_reg_adjust (addreg1, size - 4);
3414
 
3415
      /* Do that word.  */
3416
      handle_movsi (latehalf);
3417
 
3418
      /* Undo the adds we just did.  */
3419
      if (addreg0)
3420
        handle_reg_adjust (addreg0, -4);
3421
      if (addreg1)
3422
        handle_reg_adjust (addreg1, -4);
3423
 
3424
      if (size == 12)
3425
        {
3426
          handle_movsi (middlehalf);
3427
 
3428
          if (addreg0)
3429
            handle_reg_adjust (addreg0, -4);
3430
          if (addreg1)
3431
            handle_reg_adjust (addreg1, -4);
3432
        }
3433
 
3434
      /* Do low-numbered word.  */
3435
 
3436
      handle_movsi (operands);
3437
      return;
3438
    }
3439
 
3440
  /* Normal case: do the two words, low-numbered first.  */
3441
 
3442
  m68k_final_prescan_insn (NULL, operands, 2);
3443
  handle_movsi (operands);
3444
 
3445
  /* Do the middle one of the three words for long double */
3446
  if (size == 12)
3447
    {
3448
      if (addreg0)
3449
        handle_reg_adjust (addreg0, 4);
3450
      if (addreg1)
3451
        handle_reg_adjust (addreg1, 4);
3452
 
3453
      m68k_final_prescan_insn (NULL, middlehalf, 2);
3454
      handle_movsi (middlehalf);
3455
    }
3456
 
3457
  /* Make any unoffsettable addresses point at high-numbered word.  */
3458
  if (addreg0)
3459
    handle_reg_adjust (addreg0, 4);
3460
  if (addreg1)
3461
    handle_reg_adjust (addreg1, 4);
3462
 
3463
  /* Do that word.  */
3464
  m68k_final_prescan_insn (NULL, latehalf, 2);
3465
  handle_movsi (latehalf);
3466
 
3467
  /* Undo the adds we just did.  */
3468
  if (addreg0)
3469
    handle_reg_adjust (addreg0, -(size - 4));
3470
  if (addreg1)
3471
    handle_reg_adjust (addreg1, -(size - 4));
3472
 
3473
  return;
3474
}
3475
 
3476
/* Output assembler code to adjust REG by N.  */
3477
static void
3478
output_reg_adjust (rtx reg, int n)
3479
{
3480
  const char *s;
3481
 
3482
  gcc_assert (GET_MODE (reg) == SImode
3483
              && -12 <= n && n != 0 && n <= 12);
3484
 
3485
  switch (n)
3486
    {
3487
    case 12:
3488
      s = "add%.l #12,%0";
3489
      break;
3490
 
3491
    case 8:
3492
      s = "addq%.l #8,%0";
3493
      break;
3494
 
3495
    case 4:
3496
      s = "addq%.l #4,%0";
3497
      break;
3498
 
3499
    case -12:
3500
      s = "sub%.l #12,%0";
3501
      break;
3502
 
3503
    case -8:
3504
      s = "subq%.l #8,%0";
3505
      break;
3506
 
3507
    case -4:
3508
      s = "subq%.l #4,%0";
3509
      break;
3510
 
3511
    default:
3512
      gcc_unreachable ();
3513
      s = NULL;
3514
    }
3515
 
3516
  output_asm_insn (s, &reg);
3517
}
3518
 
3519
/* Emit rtl code to adjust REG by N.  */
3520
static void
3521
emit_reg_adjust (rtx reg1, int n)
3522
{
3523
  rtx reg2;
3524
 
3525
  gcc_assert (GET_MODE (reg1) == SImode
3526
              && -12 <= n && n != 0 && n <= 12);
3527
 
3528
  reg1 = copy_rtx (reg1);
3529
  reg2 = copy_rtx (reg1);
3530
 
3531
  if (n < 0)
3532
    emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3533
  else if (n > 0)
3534
    emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3535
  else
3536
    gcc_unreachable ();
3537
}
3538
 
3539
/* Output assembler to load address OPERANDS[0] to register OPERANDS[1].  */
3540
static void
3541
output_compadr (rtx operands[2])
3542
{
3543
  output_asm_insn ("lea %a1,%0", operands);
3544
}
3545
 
3546
/* Output the best assembler insn for moving operands[1] into operands[0]
3547
   as a fullword.  */
3548
static void
3549
output_movsi (rtx operands[2])
3550
{
3551
  output_asm_insn (singlemove_string (operands), operands);
3552
}
3553
 
3554
/* Copy OP and change its mode to MODE.  */
3555
static rtx
3556
copy_operand (rtx op, enum machine_mode mode)
3557
{
3558
  /* ??? This looks really ugly.  There must be a better way
3559
     to change a mode on the operand.  */
3560
  if (GET_MODE (op) != VOIDmode)
3561
    {
3562
      if (REG_P (op))
3563
        op = gen_rtx_REG (mode, REGNO (op));
3564
      else
3565
        {
3566
          op = copy_rtx (op);
3567
          PUT_MODE (op, mode);
3568
        }
3569
    }
3570
 
3571
  return op;
3572
}
3573
 
3574
/* Emit rtl code for moving operands[1] into operands[0] as a fullword.  */
3575
static void
3576
emit_movsi (rtx operands[2])
3577
{
3578
  operands[0] = copy_operand (operands[0], SImode);
3579
  operands[1] = copy_operand (operands[1], SImode);
3580
 
3581
  emit_insn (gen_movsi (operands[0], operands[1]));
3582
}
3583
 
3584
/* Output assembler code to perform a doubleword move insn
3585
   with operands OPERANDS.  */
3586
const char *
3587
output_move_double (rtx *operands)
3588
{
3589
  handle_move_double (operands,
3590
                      output_reg_adjust, output_compadr, output_movsi);
3591
 
3592
  return "";
3593
}
3594
 
3595
/* Output rtl code to perform a doubleword move insn
3596
   with operands OPERANDS.  */
3597
void
3598
m68k_emit_move_double (rtx operands[2])
3599
{
3600
  handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3601
}
3602
 
3603
/* Ensure mode of ORIG, a REG rtx, is MODE.  Returns either ORIG or a
3604
   new rtx with the correct mode.  */
3605
 
3606
static rtx
3607
force_mode (enum machine_mode mode, rtx orig)
3608
{
3609
  if (mode == GET_MODE (orig))
3610
    return orig;
3611
 
3612
  if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3613
    abort ();
3614
 
3615
  return gen_rtx_REG (mode, REGNO (orig));
3616
}
3617
 
3618
static int
3619
fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3620
{
3621
  return reg_renumber && FP_REG_P (op);
3622
}
3623
 
3624
/* Emit insns to move operands[1] into operands[0].
3625
 
3626
   Return 1 if we have written out everything that needs to be done to
3627
   do the move.  Otherwise, return 0 and the caller will emit the move
3628
   normally.
3629
 
3630
   Note SCRATCH_REG may not be in the proper mode depending on how it
3631
   will be used.  This routine is responsible for creating a new copy
3632
   of SCRATCH_REG in the proper mode.  */
3633
 
3634
int
3635
emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
3636
{
3637
  register rtx operand0 = operands[0];
3638
  register rtx operand1 = operands[1];
3639
  register rtx tem;
3640
 
3641
  if (scratch_reg
3642
      && reload_in_progress && GET_CODE (operand0) == REG
3643
      && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3644
    operand0 = reg_equiv_mem (REGNO (operand0));
3645
  else if (scratch_reg
3646
           && reload_in_progress && GET_CODE (operand0) == SUBREG
3647
           && GET_CODE (SUBREG_REG (operand0)) == REG
3648
           && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3649
    {
3650
     /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3651
        the code which tracks sets/uses for delete_output_reload.  */
3652
      rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3653
                                 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3654
                                 SUBREG_BYTE (operand0));
3655
      operand0 = alter_subreg (&temp);
3656
    }
3657
 
3658
  if (scratch_reg
3659
      && reload_in_progress && GET_CODE (operand1) == REG
3660
      && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3661
    operand1 = reg_equiv_mem (REGNO (operand1));
3662
  else if (scratch_reg
3663
           && reload_in_progress && GET_CODE (operand1) == SUBREG
3664
           && GET_CODE (SUBREG_REG (operand1)) == REG
3665
           && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3666
    {
3667
     /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3668
        the code which tracks sets/uses for delete_output_reload.  */
3669
      rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3670
                                 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3671
                                 SUBREG_BYTE (operand1));
3672
      operand1 = alter_subreg (&temp);
3673
    }
3674
 
3675
  if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3676
      && ((tem = find_replacement (&XEXP (operand0, 0)))
3677
          != XEXP (operand0, 0)))
3678
    operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3679
  if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3680
      && ((tem = find_replacement (&XEXP (operand1, 0)))
3681
          != XEXP (operand1, 0)))
3682
    operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3683
 
3684
  /* Handle secondary reloads for loads/stores of FP registers where
3685
     the address is symbolic by using the scratch register */
3686
  if (fp_reg_operand (operand0, mode)
3687
      && ((GET_CODE (operand1) == MEM
3688
           && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3689
          || ((GET_CODE (operand1) == SUBREG
3690
               && GET_CODE (XEXP (operand1, 0)) == MEM
3691
               && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3692
      && scratch_reg)
3693
    {
3694
      if (GET_CODE (operand1) == SUBREG)
3695
        operand1 = XEXP (operand1, 0);
3696
 
3697
      /* SCRATCH_REG will hold an address.  We want
3698
         it in SImode regardless of what mode it was originally given
3699
         to us.  */
3700
      scratch_reg = force_mode (SImode, scratch_reg);
3701
 
3702
      /* D might not fit in 14 bits either; for such cases load D into
3703
         scratch reg.  */
3704
      if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3705
        {
3706
          emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3707
          emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3708
                                                       Pmode,
3709
                                                       XEXP (XEXP (operand1, 0), 0),
3710
                                                       scratch_reg));
3711
        }
3712
      else
3713
        emit_move_insn (scratch_reg, XEXP (operand1, 0));
3714
      emit_insn (gen_rtx_SET (VOIDmode, operand0,
3715
                              gen_rtx_MEM (mode, scratch_reg)));
3716
      return 1;
3717
    }
3718
  else if (fp_reg_operand (operand1, mode)
3719
           && ((GET_CODE (operand0) == MEM
3720
                && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3721
               || ((GET_CODE (operand0) == SUBREG)
3722
                   && GET_CODE (XEXP (operand0, 0)) == MEM
3723
                   && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3724
           && scratch_reg)
3725
    {
3726
      if (GET_CODE (operand0) == SUBREG)
3727
        operand0 = XEXP (operand0, 0);
3728
 
3729
      /* SCRATCH_REG will hold an address and maybe the actual data.  We want
3730
         it in SIMODE regardless of what mode it was originally given
3731
         to us.  */
3732
      scratch_reg = force_mode (SImode, scratch_reg);
3733
 
3734
      /* D might not fit in 14 bits either; for such cases load D into
3735
         scratch reg.  */
3736
      if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3737
        {
3738
          emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3739
          emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3740
                                                                        0)),
3741
                                                       Pmode,
3742
                                                       XEXP (XEXP (operand0, 0),
3743
                                                                   0),
3744
                                                       scratch_reg));
3745
        }
3746
      else
3747
        emit_move_insn (scratch_reg, XEXP (operand0, 0));
3748
      emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3749
                              operand1));
3750
      return 1;
3751
    }
3752
  /* Handle secondary reloads for loads of FP registers from constant
3753
     expressions by forcing the constant into memory.
3754
 
3755
     use scratch_reg to hold the address of the memory location.
3756
 
3757
     The proper fix is to change PREFERRED_RELOAD_CLASS to return
3758
     NO_REGS when presented with a const_int and an register class
3759
     containing only FP registers.  Doing so unfortunately creates
3760
     more problems than it solves.   Fix this for 2.5.  */
3761
  else if (fp_reg_operand (operand0, mode)
3762
           && CONSTANT_P (operand1)
3763
           && scratch_reg)
3764
    {
3765
      rtx xoperands[2];
3766
 
3767
      /* SCRATCH_REG will hold an address and maybe the actual data.  We want
3768
         it in SIMODE regardless of what mode it was originally given
3769
         to us.  */
3770
      scratch_reg = force_mode (SImode, scratch_reg);
3771
 
3772
      /* Force the constant into memory and put the address of the
3773
         memory location into scratch_reg.  */
3774
      xoperands[0] = scratch_reg;
3775
      xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3776
      emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3777
 
3778
      /* Now load the destination register.  */
3779
      emit_insn (gen_rtx_SET (mode, operand0,
3780
                              gen_rtx_MEM (mode, scratch_reg)));
3781
      return 1;
3782
    }
3783
 
3784
  /* Now have insn-emit do whatever it normally does.  */
3785
  return 0;
3786
}
3787
 
3788
/* Split one or more DImode RTL references into pairs of SImode
3789
   references.  The RTL can be REG, offsettable MEM, integer constant, or
3790
   CONST_DOUBLE.  "operands" is a pointer to an array of DImode RTL to
3791
   split and "num" is its length.  lo_half and hi_half are output arrays
3792
   that parallel "operands".  */
3793
 
3794
void
3795
split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3796
{
3797
  while (num--)
3798
    {
3799
      rtx op = operands[num];
3800
 
3801
      /* simplify_subreg refuses to split volatile memory addresses,
3802
         but we still have to handle it.  */
3803
      if (GET_CODE (op) == MEM)
3804
        {
3805
          lo_half[num] = adjust_address (op, SImode, 4);
3806
          hi_half[num] = adjust_address (op, SImode, 0);
3807
        }
3808
      else
3809
        {
3810
          lo_half[num] = simplify_gen_subreg (SImode, op,
3811
                                              GET_MODE (op) == VOIDmode
3812
                                              ? DImode : GET_MODE (op), 4);
3813
          hi_half[num] = simplify_gen_subreg (SImode, op,
3814
                                              GET_MODE (op) == VOIDmode
3815
                                              ? DImode : GET_MODE (op), 0);
3816
        }
3817
    }
3818
}
3819
 
3820
/* Split X into a base and a constant offset, storing them in *BASE
3821
   and *OFFSET respectively.  */
3822
 
3823
static void
3824
m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3825
{
3826
  *offset = 0;
3827
  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3828
    {
3829
      *offset += INTVAL (XEXP (x, 1));
3830
      x = XEXP (x, 0);
3831
    }
3832
  *base = x;
3833
}
3834
 
3835
/* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3836
   instruction.  STORE_P says whether the move is a load or store.
3837
 
3838
   If the instruction uses post-increment or pre-decrement addressing,
3839
   AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3840
   adjustment.  This adjustment will be made by the first element of
3841
   PARALLEL, with the loads or stores starting at element 1.  If the
3842
   instruction does not use post-increment or pre-decrement addressing,
3843
   AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3844
   start at element 0.  */
3845
 
3846
bool
3847
m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3848
                      HOST_WIDE_INT automod_offset, bool store_p)
3849
{
3850
  rtx base, mem_base, set, mem, reg, last_reg;
3851
  HOST_WIDE_INT offset, mem_offset;
3852
  int i, first, len;
3853
  enum reg_class rclass;
3854
 
3855
  len = XVECLEN (pattern, 0);
3856
  first = (automod_base != NULL);
3857
 
3858
  if (automod_base)
3859
    {
3860
      /* Stores must be pre-decrement and loads must be post-increment.  */
3861
      if (store_p != (automod_offset < 0))
3862
        return false;
3863
 
3864
      /* Work out the base and offset for lowest memory location.  */
3865
      base = automod_base;
3866
      offset = (automod_offset < 0 ? automod_offset : 0);
3867
    }
3868
  else
3869
    {
3870
      /* Allow any valid base and offset in the first access.  */
3871
      base = NULL;
3872
      offset = 0;
3873
    }
3874
 
3875
  last_reg = NULL;
3876
  rclass = NO_REGS;
3877
  for (i = first; i < len; i++)
3878
    {
3879
      /* We need a plain SET.  */
3880
      set = XVECEXP (pattern, 0, i);
3881
      if (GET_CODE (set) != SET)
3882
        return false;
3883
 
3884
      /* Check that we have a memory location...  */
3885
      mem = XEXP (set, !store_p);
3886
      if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3887
        return false;
3888
 
3889
      /* ...with the right address.  */
3890
      if (base == NULL)
3891
        {
3892
          m68k_split_offset (XEXP (mem, 0), &base, &offset);
3893
          /* The ColdFire instruction only allows (An) and (d16,An) modes.
3894
             There are no mode restrictions for 680x0 besides the
3895
             automodification rules enforced above.  */
3896
          if (TARGET_COLDFIRE
3897
              && !m68k_legitimate_base_reg_p (base, reload_completed))
3898
            return false;
3899
        }
3900
      else
3901
        {
3902
          m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3903
          if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3904
            return false;
3905
        }
3906
 
3907
      /* Check that we have a register of the required mode and class.  */
3908
      reg = XEXP (set, store_p);
3909
      if (!REG_P (reg)
3910
          || !HARD_REGISTER_P (reg)
3911
          || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3912
        return false;
3913
 
3914
      if (last_reg)
3915
        {
3916
          /* The register must belong to RCLASS and have a higher number
3917
             than the register in the previous SET.  */
3918
          if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3919
              || REGNO (last_reg) >= REGNO (reg))
3920
            return false;
3921
        }
3922
      else
3923
        {
3924
          /* Work out which register class we need.  */
3925
          if (INT_REGNO_P (REGNO (reg)))
3926
            rclass = GENERAL_REGS;
3927
          else if (FP_REGNO_P (REGNO (reg)))
3928
            rclass = FP_REGS;
3929
          else
3930
            return false;
3931
        }
3932
 
3933
      last_reg = reg;
3934
      offset += GET_MODE_SIZE (GET_MODE (reg));
3935
    }
3936
 
3937
  /* If we have an automodification, check whether the final offset is OK.  */
3938
  if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3939
    return false;
3940
 
3941
  /* Reject unprofitable cases.  */
3942
  if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3943
    return false;
3944
 
3945
  return true;
3946
}
3947
 
3948
/* Return the assembly code template for a movem or fmovem instruction
3949
   whose pattern is given by PATTERN.  Store the template's operands
3950
   in OPERANDS.
3951
 
3952
   If the instruction uses post-increment or pre-decrement addressing,
3953
   AUTOMOD_OFFSET is the total adjustment, otherwise it is 0.  STORE_P
3954
   is true if this is a store instruction.  */
3955
 
3956
const char *
3957
m68k_output_movem (rtx *operands, rtx pattern,
3958
                   HOST_WIDE_INT automod_offset, bool store_p)
3959
{
3960
  unsigned int mask;
3961
  int i, first;
3962
 
3963
  gcc_assert (GET_CODE (pattern) == PARALLEL);
3964
  mask = 0;
3965
  first = (automod_offset != 0);
3966
  for (i = first; i < XVECLEN (pattern, 0); i++)
3967
    {
3968
      /* When using movem with pre-decrement addressing, register X + D0_REG
3969
         is controlled by bit 15 - X.  For all other addressing modes,
3970
         register X + D0_REG is controlled by bit X.  Confusingly, the
3971
         register mask for fmovem is in the opposite order to that for
3972
         movem.  */
3973
      unsigned int regno;
3974
 
3975
      gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3976
      gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3977
      regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3978
      if (automod_offset < 0)
3979
        {
3980
          if (FP_REGNO_P (regno))
3981
            mask |= 1 << (regno - FP0_REG);
3982
          else
3983
            mask |= 1 << (15 - (regno - D0_REG));
3984
        }
3985
      else
3986
        {
3987
          if (FP_REGNO_P (regno))
3988
            mask |= 1 << (7 - (regno - FP0_REG));
3989
          else
3990
            mask |= 1 << (regno - D0_REG);
3991
        }
3992
    }
3993
  CC_STATUS_INIT;
3994
 
3995
  if (automod_offset == 0)
3996
    operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
3997
  else if (automod_offset < 0)
3998
    operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
3999
  else
4000
    operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4001
  operands[1] = GEN_INT (mask);
4002
  if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4003
    {
4004
      if (store_p)
4005
        return "fmovem %1,%a0";
4006
      else
4007
        return "fmovem %a0,%1";
4008
    }
4009
  else
4010
    {
4011
      if (store_p)
4012
        return "movem%.l %1,%a0";
4013
      else
4014
        return "movem%.l %a0,%1";
4015
    }
4016
}
4017
 
4018
/* Return a REG that occurs in ADDR with coefficient 1.
4019
   ADDR can be effectively incremented by incrementing REG.  */
4020
 
4021
static rtx
4022
find_addr_reg (rtx addr)
4023
{
4024
  while (GET_CODE (addr) == PLUS)
4025
    {
4026
      if (GET_CODE (XEXP (addr, 0)) == REG)
4027
        addr = XEXP (addr, 0);
4028
      else if (GET_CODE (XEXP (addr, 1)) == REG)
4029
        addr = XEXP (addr, 1);
4030
      else if (CONSTANT_P (XEXP (addr, 0)))
4031
        addr = XEXP (addr, 1);
4032
      else if (CONSTANT_P (XEXP (addr, 1)))
4033
        addr = XEXP (addr, 0);
4034
      else
4035
        gcc_unreachable ();
4036
    }
4037
  gcc_assert (GET_CODE (addr) == REG);
4038
  return addr;
4039
}
4040
 
4041
/* Output assembler code to perform a 32-bit 3-operand add.  */
4042
 
4043
const char *
4044
output_addsi3 (rtx *operands)
4045
{
4046
  if (! operands_match_p (operands[0], operands[1]))
4047
    {
4048
      if (!ADDRESS_REG_P (operands[1]))
4049
        {
4050
          rtx tmp = operands[1];
4051
 
4052
          operands[1] = operands[2];
4053
          operands[2] = tmp;
4054
        }
4055
 
4056
      /* These insns can result from reloads to access
4057
         stack slots over 64k from the frame pointer.  */
4058
      if (GET_CODE (operands[2]) == CONST_INT
4059
          && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4060
        return "move%.l %2,%0\n\tadd%.l %1,%0";
4061
      if (GET_CODE (operands[2]) == REG)
4062
        return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4063
      return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4064
    }
4065
  if (GET_CODE (operands[2]) == CONST_INT)
4066
    {
4067
      if (INTVAL (operands[2]) > 0
4068
          && INTVAL (operands[2]) <= 8)
4069
        return "addq%.l %2,%0";
4070
      if (INTVAL (operands[2]) < 0
4071
          && INTVAL (operands[2]) >= -8)
4072
        {
4073
          operands[2] = GEN_INT (- INTVAL (operands[2]));
4074
          return "subq%.l %2,%0";
4075
        }
4076
      /* On the CPU32 it is faster to use two addql instructions to
4077
         add a small integer (8 < N <= 16) to a register.
4078
         Likewise for subql.  */
4079
      if (TUNE_CPU32 && REG_P (operands[0]))
4080
        {
4081
          if (INTVAL (operands[2]) > 8
4082
              && INTVAL (operands[2]) <= 16)
4083
            {
4084
              operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4085
              return "addq%.l #8,%0\n\taddq%.l %2,%0";
4086
            }
4087
          if (INTVAL (operands[2]) < -8
4088
              && INTVAL (operands[2]) >= -16)
4089
            {
4090
              operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4091
              return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4092
            }
4093
        }
4094
      if (ADDRESS_REG_P (operands[0])
4095
          && INTVAL (operands[2]) >= -0x8000
4096
          && INTVAL (operands[2]) < 0x8000)
4097
        {
4098
          if (TUNE_68040)
4099
            return "add%.w %2,%0";
4100
          else
4101
            return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4102
        }
4103
    }
4104
  return "add%.l %2,%0";
4105
}
4106
 
4107
/* Store in cc_status the expressions that the condition codes will
4108
   describe after execution of an instruction whose pattern is EXP.
4109
   Do not alter them if the instruction would not alter the cc's.  */
4110
 
4111
/* On the 68000, all the insns to store in an address register fail to
4112
   set the cc's.  However, in some cases these instructions can make it
4113
   possibly invalid to use the saved cc's.  In those cases we clear out
4114
   some or all of the saved cc's so they won't be used.  */
4115
 
4116
void
4117
notice_update_cc (rtx exp, rtx insn)
4118
{
4119
  if (GET_CODE (exp) == SET)
4120
    {
4121
      if (GET_CODE (SET_SRC (exp)) == CALL)
4122
        CC_STATUS_INIT;
4123
      else if (ADDRESS_REG_P (SET_DEST (exp)))
4124
        {
4125
          if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4126
            cc_status.value1 = 0;
4127
          if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4128
            cc_status.value2 = 0;
4129
        }
4130
      /* fmoves to memory or data registers do not set the condition
4131
         codes.  Normal moves _do_ set the condition codes, but not in
4132
         a way that is appropriate for comparison with 0, because -0.0
4133
         would be treated as a negative nonzero number.  Note that it
4134
         isn't appropriate to conditionalize this restriction on
4135
         HONOR_SIGNED_ZEROS because that macro merely indicates whether
4136
         we care about the difference between -0.0 and +0.0.  */
4137
      else if (!FP_REG_P (SET_DEST (exp))
4138
               && SET_DEST (exp) != cc0_rtx
4139
               && (FP_REG_P (SET_SRC (exp))
4140
                   || GET_CODE (SET_SRC (exp)) == FIX
4141
                   || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4142
        CC_STATUS_INIT;
4143
      /* A pair of move insns doesn't produce a useful overall cc.  */
4144
      else if (!FP_REG_P (SET_DEST (exp))
4145
               && !FP_REG_P (SET_SRC (exp))
4146
               && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4147
               && (GET_CODE (SET_SRC (exp)) == REG
4148
                   || GET_CODE (SET_SRC (exp)) == MEM
4149
                   || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4150
        CC_STATUS_INIT;
4151
      else if (SET_DEST (exp) != pc_rtx)
4152
        {
4153
          cc_status.flags = 0;
4154
          cc_status.value1 = SET_DEST (exp);
4155
          cc_status.value2 = SET_SRC (exp);
4156
        }
4157
    }
4158
  else if (GET_CODE (exp) == PARALLEL
4159
           && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4160
    {
4161
      rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4162
      rtx src  = SET_SRC  (XVECEXP (exp, 0, 0));
4163
 
4164
      if (ADDRESS_REG_P (dest))
4165
        CC_STATUS_INIT;
4166
      else if (dest != pc_rtx)
4167
        {
4168
          cc_status.flags = 0;
4169
          cc_status.value1 = dest;
4170
          cc_status.value2 = src;
4171
        }
4172
    }
4173
  else
4174
    CC_STATUS_INIT;
4175
  if (cc_status.value2 != 0
4176
      && ADDRESS_REG_P (cc_status.value2)
4177
      && GET_MODE (cc_status.value2) == QImode)
4178
    CC_STATUS_INIT;
4179
  if (cc_status.value2 != 0)
4180
    switch (GET_CODE (cc_status.value2))
4181
      {
4182
      case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4183
      case ROTATE: case ROTATERT:
4184
        /* These instructions always clear the overflow bit, and set
4185
           the carry to the bit shifted out.  */
4186
        cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4187
        break;
4188
 
4189
      case PLUS: case MINUS: case MULT:
4190
      case DIV: case UDIV: case MOD: case UMOD: case NEG:
4191
        if (GET_MODE (cc_status.value2) != VOIDmode)
4192
          cc_status.flags |= CC_NO_OVERFLOW;
4193
        break;
4194
      case ZERO_EXTEND:
4195
        /* (SET r1 (ZERO_EXTEND r2)) on this machine
4196
           ends with a move insn moving r2 in r2's mode.
4197
           Thus, the cc's are set for r2.
4198
           This can set N bit spuriously.  */
4199
        cc_status.flags |= CC_NOT_NEGATIVE;
4200
 
4201
      default:
4202
        break;
4203
      }
4204
  if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4205
      && cc_status.value2
4206
      && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4207
    cc_status.value2 = 0;
4208
  if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4209
       || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4210
    cc_status.flags = CC_IN_68881;
4211
  if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4212
      && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4213
    {
4214
      cc_status.flags = CC_IN_68881;
4215
      if (!FP_REG_P (XEXP (cc_status.value2, 0))
4216
          && FP_REG_P (XEXP (cc_status.value2, 1)))
4217
        cc_status.flags |= CC_REVERSED;
4218
    }
4219
}
4220
 
4221
const char *
4222
output_move_const_double (rtx *operands)
4223
{
4224
  int code = standard_68881_constant_p (operands[1]);
4225
 
4226
  if (code != 0)
4227
    {
4228
      static char buf[40];
4229
 
4230
      sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4231
      return buf;
4232
    }
4233
  return "fmove%.d %1,%0";
4234
}
4235
 
4236
const char *
4237
output_move_const_single (rtx *operands)
4238
{
4239
  int code = standard_68881_constant_p (operands[1]);
4240
 
4241
  if (code != 0)
4242
    {
4243
      static char buf[40];
4244
 
4245
      sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4246
      return buf;
4247
    }
4248
  return "fmove%.s %f1,%0";
4249
}
4250
 
4251
/* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4252
   from the "fmovecr" instruction.
4253
   The value, anded with 0xff, gives the code to use in fmovecr
4254
   to get the desired constant.  */
4255
 
4256
/* This code has been fixed for cross-compilation.  */
4257
 
4258
static int inited_68881_table = 0;
4259
 
4260
static const char *const strings_68881[7] = {
4261
  "0.0",
4262
  "1.0",
4263
  "10.0",
4264
  "100.0",
4265
  "10000.0",
4266
  "1e8",
4267
  "1e16"
4268
};
4269
 
4270
static const int codes_68881[7] = {
4271
  0x0f,
4272
  0x32,
4273
  0x33,
4274
  0x34,
4275
  0x35,
4276
  0x36,
4277
  0x37
4278
};
4279
 
4280
REAL_VALUE_TYPE values_68881[7];
4281
 
4282
/* Set up values_68881 array by converting the decimal values
4283
   strings_68881 to binary.  */
4284
 
4285
void
4286
init_68881_table (void)
4287
{
4288
  int i;
4289
  REAL_VALUE_TYPE r;
4290
  enum machine_mode mode;
4291
 
4292
  mode = SFmode;
4293
  for (i = 0; i < 7; i++)
4294
    {
4295
      if (i == 6)
4296
        mode = DFmode;
4297
      r = REAL_VALUE_ATOF (strings_68881[i], mode);
4298
      values_68881[i] = r;
4299
    }
4300
  inited_68881_table = 1;
4301
}
4302
 
4303
int
4304
standard_68881_constant_p (rtx x)
4305
{
4306
  REAL_VALUE_TYPE r;
4307
  int i;
4308
 
4309
  /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4310
     used at all on those chips.  */
4311
  if (TUNE_68040_60)
4312
    return 0;
4313
 
4314
  if (! inited_68881_table)
4315
    init_68881_table ();
4316
 
4317
  REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4318
 
4319
  /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4320
     is rejected.  */
4321
  for (i = 0; i < 6; i++)
4322
    {
4323
      if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
4324
        return (codes_68881[i]);
4325
    }
4326
 
4327
  if (GET_MODE (x) == SFmode)
4328
    return 0;
4329
 
4330
  if (REAL_VALUES_EQUAL (r, values_68881[6]))
4331
    return (codes_68881[6]);
4332
 
4333
  /* larger powers of ten in the constants ram are not used
4334
     because they are not equal to a `double' C constant.  */
4335
  return 0;
4336
}
4337
 
4338
/* If X is a floating-point constant, return the logarithm of X base 2,
4339
   or 0 if X is not a power of 2.  */
4340
 
4341
int
4342
floating_exact_log2 (rtx x)
4343
{
4344
  REAL_VALUE_TYPE r, r1;
4345
  int exp;
4346
 
4347
  REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4348
 
4349
  if (REAL_VALUES_LESS (r, dconst1))
4350
    return 0;
4351
 
4352
  exp = real_exponent (&r);
4353
  real_2expN (&r1, exp, DFmode);
4354
  if (REAL_VALUES_EQUAL (r1, r))
4355
    return exp;
4356
 
4357
  return 0;
4358
}
4359
 
4360
/* A C compound statement to output to stdio stream STREAM the
4361
   assembler syntax for an instruction operand X.  X is an RTL
4362
   expression.
4363
 
4364
   CODE is a value that can be used to specify one of several ways
4365
   of printing the operand.  It is used when identical operands
4366
   must be printed differently depending on the context.  CODE
4367
   comes from the `%' specification that was used to request
4368
   printing of the operand.  If the specification was just `%DIGIT'
4369
   then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4370
   is the ASCII code for LTR.
4371
 
4372
   If X is a register, this macro should print the register's name.
4373
   The names can be found in an array `reg_names' whose type is
4374
   `char *[]'.  `reg_names' is initialized from `REGISTER_NAMES'.
4375
 
4376
   When the machine description has a specification `%PUNCT' (a `%'
4377
   followed by a punctuation character), this macro is called with
4378
   a null pointer for X and the punctuation character for CODE.
4379
 
4380
   The m68k specific codes are:
4381
 
4382
   '.' for dot needed in Motorola-style opcode names.
4383
   '-' for an operand pushing on the stack:
4384
       sp@-, -(sp) or -(%sp) depending on the style of syntax.
4385
   '+' for an operand pushing on the stack:
4386
       sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4387
   '@' for a reference to the top word on the stack:
4388
       sp@, (sp) or (%sp) depending on the style of syntax.
4389
   '#' for an immediate operand prefix (# in MIT and Motorola syntax
4390
       but & in SGS syntax).
4391
   '!' for the cc register (used in an `and to cc' insn).
4392
   '$' for the letter `s' in an op code, but only on the 68040.
4393
   '&' for the letter `d' in an op code, but only on the 68040.
4394
   '/' for register prefix needed by longlong.h.
4395
   '?' for m68k_library_id_string
4396
 
4397
   'b' for byte insn (no effect, on the Sun; this is for the ISI).
4398
   'd' to force memory addressing to be absolute, not relative.
4399
   'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4400
   'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4401
       or print pair of registers as rx:ry.
4402
   'p' print an address with @PLTPC attached, but only if the operand
4403
       is not locally-bound.  */
4404
 
4405
void
4406
print_operand (FILE *file, rtx op, int letter)
4407
{
4408
  if (letter == '.')
4409
    {
4410
      if (MOTOROLA)
4411
        fprintf (file, ".");
4412
    }
4413
  else if (letter == '#')
4414
    asm_fprintf (file, "%I");
4415
  else if (letter == '-')
4416
    asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4417
  else if (letter == '+')
4418
    asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4419
  else if (letter == '@')
4420
    asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4421
  else if (letter == '!')
4422
    asm_fprintf (file, "%Rfpcr");
4423
  else if (letter == '$')
4424
    {
4425
      if (TARGET_68040)
4426
        fprintf (file, "s");
4427
    }
4428
  else if (letter == '&')
4429
    {
4430
      if (TARGET_68040)
4431
        fprintf (file, "d");
4432
    }
4433
  else if (letter == '/')
4434
    asm_fprintf (file, "%R");
4435
  else if (letter == '?')
4436
    asm_fprintf (file, m68k_library_id_string);
4437
  else if (letter == 'p')
4438
    {
4439
      output_addr_const (file, op);
4440
      if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4441
        fprintf (file, "@PLTPC");
4442
    }
4443
  else if (GET_CODE (op) == REG)
4444
    {
4445
      if (letter == 'R')
4446
        /* Print out the second register name of a register pair.
4447
           I.e., R (6) => 7.  */
4448
        fputs (M68K_REGNAME(REGNO (op) + 1), file);
4449
      else
4450
        fputs (M68K_REGNAME(REGNO (op)), file);
4451
    }
4452
  else if (GET_CODE (op) == MEM)
4453
    {
4454
      output_address (XEXP (op, 0));
4455
      if (letter == 'd' && ! TARGET_68020
4456
          && CONSTANT_ADDRESS_P (XEXP (op, 0))
4457
          && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4458
               && INTVAL (XEXP (op, 0)) < 0x8000
4459
               && INTVAL (XEXP (op, 0)) >= -0x8000))
4460
        fprintf (file, MOTOROLA ? ".l" : ":l");
4461
    }
4462
  else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4463
    {
4464
      REAL_VALUE_TYPE r;
4465
      long l;
4466
      REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4467
      REAL_VALUE_TO_TARGET_SINGLE (r, l);
4468
      asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4469
    }
4470
  else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4471
    {
4472
      REAL_VALUE_TYPE r;
4473
      long l[3];
4474
      REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4475
      REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
4476
      asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4477
                   l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4478
    }
4479
  else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4480
    {
4481
      REAL_VALUE_TYPE r;
4482
      long l[2];
4483
      REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4484
      REAL_VALUE_TO_TARGET_DOUBLE (r, l);
4485
      asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4486
    }
4487
  else
4488
    {
4489
      /* Use `print_operand_address' instead of `output_addr_const'
4490
         to ensure that we print relevant PIC stuff.  */
4491
      asm_fprintf (file, "%I");
4492
      if (TARGET_PCREL
4493
          && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4494
        print_operand_address (file, op);
4495
      else
4496
        output_addr_const (file, op);
4497
    }
4498
}
4499
 
4500
/* Return string for TLS relocation RELOC.  */
4501
 
4502
static const char *
4503
m68k_get_reloc_decoration (enum m68k_reloc reloc)
4504
{
4505
  /* To my knowledge, !MOTOROLA assemblers don't support TLS.  */
4506
  gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4507
 
4508
  switch (reloc)
4509
    {
4510
    case RELOC_GOT:
4511
      if (MOTOROLA)
4512
        {
4513
          if (flag_pic == 1 && TARGET_68020)
4514
            return "@GOT.w";
4515
          else
4516
            return "@GOT";
4517
        }
4518
      else
4519
        {
4520
          if (TARGET_68020)
4521
            {
4522
              switch (flag_pic)
4523
                {
4524
                case 1:
4525
                  return ":w";
4526
                case 2:
4527
                  return ":l";
4528
                default:
4529
                  return "";
4530
                }
4531
            }
4532
        }
4533
 
4534
    case RELOC_TLSGD:
4535
      return "@TLSGD";
4536
 
4537
    case RELOC_TLSLDM:
4538
      return "@TLSLDM";
4539
 
4540
    case RELOC_TLSLDO:
4541
      return "@TLSLDO";
4542
 
4543
    case RELOC_TLSIE:
4544
      return "@TLSIE";
4545
 
4546
    case RELOC_TLSLE:
4547
      return "@TLSLE";
4548
 
4549
    default:
4550
      gcc_unreachable ();
4551
    }
4552
}
4553
 
4554
/* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA.  */
4555
 
4556
static bool
4557
m68k_output_addr_const_extra (FILE *file, rtx x)
4558
{
4559
  if (GET_CODE (x) == UNSPEC)
4560
    {
4561
      switch (XINT (x, 1))
4562
        {
4563
        case UNSPEC_RELOC16:
4564
        case UNSPEC_RELOC32:
4565
          output_addr_const (file, XVECEXP (x, 0, 0));
4566
          fputs (m68k_get_reloc_decoration
4567
                 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4568
          return true;
4569
 
4570
        default:
4571
          break;
4572
        }
4573
    }
4574
 
4575
  return false;
4576
}
4577
 
4578
/* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL.  */
4579
 
4580
static void
4581
m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4582
{
4583
  gcc_assert (size == 4);
4584
  fputs ("\t.long\t", file);
4585
  output_addr_const (file, x);
4586
  fputs ("@TLSLDO+0x8000", file);
4587
}
4588
 
4589
/* In the name of slightly smaller debug output, and to cater to
4590
   general assembler lossage, recognize various UNSPEC sequences
4591
   and turn them back into a direct symbol reference.  */
4592
 
4593
static rtx
4594
m68k_delegitimize_address (rtx orig_x)
4595
{
4596
  rtx x;
4597
  struct m68k_address addr;
4598
  rtx unspec;
4599
 
4600
  orig_x = delegitimize_mem_from_attrs (orig_x);
4601
  x = orig_x;
4602
  if (MEM_P (x))
4603
    x = XEXP (x, 0);
4604
 
4605
  if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4606
    return orig_x;
4607
 
4608
  if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4609
      || addr.offset == NULL_RTX
4610
      || GET_CODE (addr.offset) != CONST)
4611
    return orig_x;
4612
 
4613
  unspec = XEXP (addr.offset, 0);
4614
  if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4615
    unspec = XEXP (unspec, 0);
4616
  if (GET_CODE (unspec) != UNSPEC
4617
      || (XINT (unspec, 1) != UNSPEC_RELOC16
4618
          && XINT (unspec, 1) != UNSPEC_RELOC32))
4619
    return orig_x;
4620
  x = XVECEXP (unspec, 0, 0);
4621
  gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4622
  if (unspec != XEXP (addr.offset, 0))
4623
    x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4624
  if (addr.index)
4625
    {
4626
      rtx idx = addr.index;
4627
      if (addr.scale != 1)
4628
        idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4629
      x = gen_rtx_PLUS (Pmode, idx, x);
4630
    }
4631
  if (addr.base)
4632
    x = gen_rtx_PLUS (Pmode, addr.base, x);
4633
  if (MEM_P (orig_x))
4634
    x = replace_equiv_address_nv (orig_x, x);
4635
  return x;
4636
}
4637
 
4638
 
4639
/* A C compound statement to output to stdio stream STREAM the
4640
   assembler syntax for an instruction operand that is a memory
4641
   reference whose address is ADDR.  ADDR is an RTL expression.
4642
 
4643
   Note that this contains a kludge that knows that the only reason
4644
   we have an address (plus (label_ref...) (reg...)) when not generating
4645
   PIC code is in the insn before a tablejump, and we know that m68k.md
4646
   generates a label LInnn: on such an insn.
4647
 
4648
   It is possible for PIC to generate a (plus (label_ref...) (reg...))
4649
   and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4650
 
4651
   This routine is responsible for distinguishing between -fpic and -fPIC
4652
   style relocations in an address.  When generating -fpic code the
4653
   offset is output in word mode (e.g. movel a5@(_foo:w), a0).  When generating
4654
   -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4655
 
4656
void
4657
print_operand_address (FILE *file, rtx addr)
4658
{
4659
  struct m68k_address address;
4660
 
4661
  if (!m68k_decompose_address (QImode, addr, true, &address))
4662
    gcc_unreachable ();
4663
 
4664
  if (address.code == PRE_DEC)
4665
    fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4666
             M68K_REGNAME (REGNO (address.base)));
4667
  else if (address.code == POST_INC)
4668
    fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4669
             M68K_REGNAME (REGNO (address.base)));
4670
  else if (!address.base && !address.index)
4671
    {
4672
      /* A constant address.  */
4673
      gcc_assert (address.offset == addr);
4674
      if (GET_CODE (addr) == CONST_INT)
4675
        {
4676
          /* (xxx).w or (xxx).l.  */
4677
          if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4678
            fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4679
          else
4680
            fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4681
        }
4682
      else if (TARGET_PCREL)
4683
        {
4684
          /* (d16,PC) or (bd,PC,Xn) (with suppressed index register).  */
4685
          fputc ('(', file);
4686
          output_addr_const (file, addr);
4687
          asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4688
        }
4689
      else
4690
        {
4691
          /* (xxx).l.  We need a special case for SYMBOL_REF if the symbol
4692
             name ends in `.<letter>', as the last 2 characters can be
4693
             mistaken as a size suffix.  Put the name in parentheses.  */
4694
          if (GET_CODE (addr) == SYMBOL_REF
4695
              && strlen (XSTR (addr, 0)) > 2
4696
              && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4697
            {
4698
              putc ('(', file);
4699
              output_addr_const (file, addr);
4700
              putc (')', file);
4701
            }
4702
          else
4703
            output_addr_const (file, addr);
4704
        }
4705
    }
4706
  else
4707
    {
4708
      int labelno;
4709
 
4710
      /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4711
         label being accessed, otherwise it is -1.  */
4712
      labelno = (address.offset
4713
                 && !address.base
4714
                 && GET_CODE (address.offset) == LABEL_REF
4715
                 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4716
                 : -1);
4717
      if (MOTOROLA)
4718
        {
4719
          /* Print the "offset(base" component.  */
4720
          if (labelno >= 0)
4721
            asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4722
          else
4723
            {
4724
              if (address.offset)
4725
                output_addr_const (file, address.offset);
4726
 
4727
              putc ('(', file);
4728
              if (address.base)
4729
                fputs (M68K_REGNAME (REGNO (address.base)), file);
4730
            }
4731
          /* Print the ",index" component, if any.  */
4732
          if (address.index)
4733
            {
4734
              if (address.base)
4735
                putc (',', file);
4736
              fprintf (file, "%s.%c",
4737
                       M68K_REGNAME (REGNO (address.index)),
4738
                       GET_MODE (address.index) == HImode ? 'w' : 'l');
4739
              if (address.scale != 1)
4740
                fprintf (file, "*%d", address.scale);
4741
            }
4742
          putc (')', file);
4743
        }
4744
      else /* !MOTOROLA */
4745
        {
4746
          if (!address.offset && !address.index)
4747
            fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4748
          else
4749
            {
4750
              /* Print the "base@(offset" component.  */
4751
              if (labelno >= 0)
4752
                asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4753
              else
4754
                {
4755
                  if (address.base)
4756
                    fputs (M68K_REGNAME (REGNO (address.base)), file);
4757
                  fprintf (file, "@(");
4758
                  if (address.offset)
4759
                    output_addr_const (file, address.offset);
4760
                }
4761
              /* Print the ",index" component, if any.  */
4762
              if (address.index)
4763
                {
4764
                  fprintf (file, ",%s:%c",
4765
                           M68K_REGNAME (REGNO (address.index)),
4766
                           GET_MODE (address.index) == HImode ? 'w' : 'l');
4767
                  if (address.scale != 1)
4768
                    fprintf (file, ":%d", address.scale);
4769
                }
4770
              putc (')', file);
4771
            }
4772
        }
4773
    }
4774
}
4775
 
4776
/* Check for cases where a clr insns can be omitted from code using
4777
   strict_low_part sets.  For example, the second clrl here is not needed:
4778
   clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4779
 
4780
   MODE is the mode of this STRICT_LOW_PART set.  FIRST_INSN is the clear
4781
   insn we are checking for redundancy.  TARGET is the register set by the
4782
   clear insn.  */
4783
 
4784
bool
4785
strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
4786
                             rtx target)
4787
{
4788
  rtx p = first_insn;
4789
 
4790
  while ((p = PREV_INSN (p)))
4791
    {
4792
      if (NOTE_INSN_BASIC_BLOCK_P (p))
4793
        return false;
4794
 
4795
      if (NOTE_P (p))
4796
        continue;
4797
 
4798
      /* If it isn't an insn, then give up.  */
4799
      if (!INSN_P (p))
4800
        return false;
4801
 
4802
      if (reg_set_p (target, p))
4803
        {
4804
          rtx set = single_set (p);
4805
          rtx dest;
4806
 
4807
          /* If it isn't an easy to recognize insn, then give up.  */
4808
          if (! set)
4809
            return false;
4810
 
4811
          dest = SET_DEST (set);
4812
 
4813
          /* If this sets the entire target register to zero, then our
4814
             first_insn is redundant.  */
4815
          if (rtx_equal_p (dest, target)
4816
              && SET_SRC (set) == const0_rtx)
4817
            return true;
4818
          else if (GET_CODE (dest) == STRICT_LOW_PART
4819
                   && GET_CODE (XEXP (dest, 0)) == REG
4820
                   && REGNO (XEXP (dest, 0)) == REGNO (target)
4821
                   && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4822
                       <= GET_MODE_SIZE (mode)))
4823
            /* This is a strict low part set which modifies less than
4824
               we are using, so it is safe.  */
4825
            ;
4826
          else
4827
            return false;
4828
        }
4829
    }
4830
 
4831
  return false;
4832
}
4833
 
4834
/* Operand predicates for implementing asymmetric pc-relative addressing
4835
   on m68k.  The m68k supports pc-relative addressing (mode 7, register 2)
4836
   when used as a source operand, but not as a destination operand.
4837
 
4838
   We model this by restricting the meaning of the basic predicates
4839
   (general_operand, memory_operand, etc) to forbid the use of this
4840
   addressing mode, and then define the following predicates that permit
4841
   this addressing mode.  These predicates can then be used for the
4842
   source operands of the appropriate instructions.
4843
 
4844
   n.b.  While it is theoretically possible to change all machine patterns
4845
   to use this addressing more where permitted by the architecture,
4846
   it has only been implemented for "common" cases: SImode, HImode, and
4847
   QImode operands, and only for the principle operations that would
4848
   require this addressing mode: data movement and simple integer operations.
4849
 
4850
   In parallel with these new predicates, two new constraint letters
4851
   were defined: 'S' and 'T'.  'S' is the -mpcrel analog of 'm'.
4852
   'T' replaces 's' in the non-pcrel case.  It is a no-op in the pcrel case.
4853
   In the pcrel case 's' is only valid in combination with 'a' registers.
4854
   See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4855
   of how these constraints are used.
4856
 
4857
   The use of these predicates is strictly optional, though patterns that
4858
   don't will cause an extra reload register to be allocated where one
4859
   was not necessary:
4860
 
4861
        lea (abc:w,%pc),%a0     ; need to reload address
4862
        moveq &1,%d1            ; since write to pc-relative space
4863
        movel %d1,%a0@          ; is not allowed
4864
        ...
4865
        lea (abc:w,%pc),%a1     ; no need to reload address here
4866
        movel %a1@,%d0          ; since "movel (abc:w,%pc),%d0" is ok
4867
 
4868
   For more info, consult tiemann@cygnus.com.
4869
 
4870
 
4871
   All of the ugliness with predicates and constraints is due to the
4872
   simple fact that the m68k does not allow a pc-relative addressing
4873
   mode as a destination.  gcc does not distinguish between source and
4874
   destination addresses.  Hence, if we claim that pc-relative address
4875
   modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4876
   end up with invalid code.  To get around this problem, we left
4877
   pc-relative modes as invalid addresses, and then added special
4878
   predicates and constraints to accept them.
4879
 
4880
   A cleaner way to handle this is to modify gcc to distinguish
4881
   between source and destination addresses.  We can then say that
4882
   pc-relative is a valid source address but not a valid destination
4883
   address, and hopefully avoid a lot of the predicate and constraint
4884
   hackery.  Unfortunately, this would be a pretty big change.  It would
4885
   be a useful change for a number of ports, but there aren't any current
4886
   plans to undertake this.
4887
 
4888
   ***************************************************************************/
4889
 
4890
 
4891
const char *
4892
output_andsi3 (rtx *operands)
4893
{
4894
  int logval;
4895
  if (GET_CODE (operands[2]) == CONST_INT
4896
      && (INTVAL (operands[2]) | 0xffff) == -1
4897
      && (DATA_REG_P (operands[0])
4898
          || offsettable_memref_p (operands[0]))
4899
      && !TARGET_COLDFIRE)
4900
    {
4901
      if (GET_CODE (operands[0]) != REG)
4902
        operands[0] = adjust_address (operands[0], HImode, 2);
4903
      operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4904
      /* Do not delete a following tstl %0 insn; that would be incorrect.  */
4905
      CC_STATUS_INIT;
4906
      if (operands[2] == const0_rtx)
4907
        return "clr%.w %0";
4908
      return "and%.w %2,%0";
4909
    }
4910
  if (GET_CODE (operands[2]) == CONST_INT
4911
      && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4912
      && (DATA_REG_P (operands[0])
4913
          || offsettable_memref_p (operands[0])))
4914
    {
4915
      if (DATA_REG_P (operands[0]))
4916
        operands[1] = GEN_INT (logval);
4917
      else
4918
        {
4919
          operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4920
          operands[1] = GEN_INT (logval % 8);
4921
        }
4922
      /* This does not set condition codes in a standard way.  */
4923
      CC_STATUS_INIT;
4924
      return "bclr %1,%0";
4925
    }
4926
  return "and%.l %2,%0";
4927
}
4928
 
4929
const char *
4930
output_iorsi3 (rtx *operands)
4931
{
4932
  register int logval;
4933
  if (GET_CODE (operands[2]) == CONST_INT
4934
      && INTVAL (operands[2]) >> 16 == 0
4935
      && (DATA_REG_P (operands[0])
4936
          || offsettable_memref_p (operands[0]))
4937
      && !TARGET_COLDFIRE)
4938
    {
4939
      if (GET_CODE (operands[0]) != REG)
4940
        operands[0] = adjust_address (operands[0], HImode, 2);
4941
      /* Do not delete a following tstl %0 insn; that would be incorrect.  */
4942
      CC_STATUS_INIT;
4943
      if (INTVAL (operands[2]) == 0xffff)
4944
        return "mov%.w %2,%0";
4945
      return "or%.w %2,%0";
4946
    }
4947
  if (GET_CODE (operands[2]) == CONST_INT
4948
      && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4949
      && (DATA_REG_P (operands[0])
4950
          || offsettable_memref_p (operands[0])))
4951
    {
4952
      if (DATA_REG_P (operands[0]))
4953
        operands[1] = GEN_INT (logval);
4954
      else
4955
        {
4956
          operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4957
          operands[1] = GEN_INT (logval % 8);
4958
        }
4959
      CC_STATUS_INIT;
4960
      return "bset %1,%0";
4961
    }
4962
  return "or%.l %2,%0";
4963
}
4964
 
4965
const char *
4966
output_xorsi3 (rtx *operands)
4967
{
4968
  register int logval;
4969
  if (GET_CODE (operands[2]) == CONST_INT
4970
      && INTVAL (operands[2]) >> 16 == 0
4971
      && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
4972
      && !TARGET_COLDFIRE)
4973
    {
4974
      if (! DATA_REG_P (operands[0]))
4975
        operands[0] = adjust_address (operands[0], HImode, 2);
4976
      /* Do not delete a following tstl %0 insn; that would be incorrect.  */
4977
      CC_STATUS_INIT;
4978
      if (INTVAL (operands[2]) == 0xffff)
4979
        return "not%.w %0";
4980
      return "eor%.w %2,%0";
4981
    }
4982
  if (GET_CODE (operands[2]) == CONST_INT
4983
      && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4984
      && (DATA_REG_P (operands[0])
4985
          || offsettable_memref_p (operands[0])))
4986
    {
4987
      if (DATA_REG_P (operands[0]))
4988
        operands[1] = GEN_INT (logval);
4989
      else
4990
        {
4991
          operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4992
          operands[1] = GEN_INT (logval % 8);
4993
        }
4994
      CC_STATUS_INIT;
4995
      return "bchg %1,%0";
4996
    }
4997
  return "eor%.l %2,%0";
4998
}
4999
 
5000
/* Return the instruction that should be used for a call to address X,
5001
   which is known to be in operand 0.  */
5002
 
5003
const char *
5004
output_call (rtx x)
5005
{
5006
  if (symbolic_operand (x, VOIDmode))
5007
    return m68k_symbolic_call;
5008
  else
5009
    return "jsr %a0";
5010
}
5011
 
5012
/* Likewise sibling calls.  */
5013
 
5014
const char *
5015
output_sibcall (rtx x)
5016
{
5017
  if (symbolic_operand (x, VOIDmode))
5018
    return m68k_symbolic_jump;
5019
  else
5020
    return "jmp %a0";
5021
}
5022
 
5023
static void
5024
m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5025
                      HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5026
                      tree function)
5027
{
5028
  rtx this_slot, offset, addr, mem, insn, tmp;
5029
 
5030
  /* Avoid clobbering the struct value reg by using the
5031
     static chain reg as a temporary.  */
5032
  tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5033
 
5034
  /* Pretend to be a post-reload pass while generating rtl.  */
5035
  reload_completed = 1;
5036
 
5037
  /* The "this" pointer is stored at 4(%sp).  */
5038
  this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4));
5039
 
5040
  /* Add DELTA to THIS.  */
5041
  if (delta != 0)
5042
    {
5043
      /* Make the offset a legitimate operand for memory addition.  */
5044
      offset = GEN_INT (delta);
5045
      if ((delta < -8 || delta > 8)
5046
          && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5047
        {
5048
          emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5049
          offset = gen_rtx_REG (Pmode, D0_REG);
5050
        }
5051
      emit_insn (gen_add3_insn (copy_rtx (this_slot),
5052
                                copy_rtx (this_slot), offset));
5053
    }
5054
 
5055
  /* If needed, add *(*THIS + VCALL_OFFSET) to THIS.  */
5056
  if (vcall_offset != 0)
5057
    {
5058
      /* Set the static chain register to *THIS.  */
5059
      emit_move_insn (tmp, this_slot);
5060
      emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5061
 
5062
      /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET.  */
5063
      addr = plus_constant (tmp, vcall_offset);
5064
      if (!m68k_legitimate_address_p (Pmode, addr, true))
5065
        {
5066
          emit_insn (gen_rtx_SET (VOIDmode, tmp, addr));
5067
          addr = tmp;
5068
        }
5069
 
5070
      /* Load the offset into %d0 and add it to THIS.  */
5071
      emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5072
                      gen_rtx_MEM (Pmode, addr));
5073
      emit_insn (gen_add3_insn (copy_rtx (this_slot),
5074
                                copy_rtx (this_slot),
5075
                                gen_rtx_REG (Pmode, D0_REG)));
5076
    }
5077
 
5078
  /* Jump to the target function.  Use a sibcall if direct jumps are
5079
     allowed, otherwise load the address into a register first.  */
5080
  mem = DECL_RTL (function);
5081
  if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5082
    {
5083
      gcc_assert (flag_pic);
5084
 
5085
      if (!TARGET_SEP_DATA)
5086
        {
5087
          /* Use the static chain register as a temporary (call-clobbered)
5088
             GOT pointer for this function.  We can use the static chain
5089
             register because it isn't live on entry to the thunk.  */
5090
          SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5091
          emit_insn (gen_load_got (pic_offset_table_rtx));
5092
        }
5093
      legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5094
      mem = replace_equiv_address (mem, tmp);
5095
    }
5096
  insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5097
  SIBLING_CALL_P (insn) = 1;
5098
 
5099
  /* Run just enough of rest_of_compilation.  */
5100
  insn = get_insns ();
5101
  split_all_insns_noflow ();
5102
  final_start_function (insn, file, 1);
5103
  final (insn, file, 1);
5104
  final_end_function ();
5105
 
5106
  /* Clean up the vars set above.  */
5107
  reload_completed = 0;
5108
 
5109
  /* Restore the original PIC register.  */
5110
  if (flag_pic)
5111
    SET_REGNO (pic_offset_table_rtx, PIC_REG);
5112
}
5113
 
5114
/* Worker function for TARGET_STRUCT_VALUE_RTX.  */
5115
 
5116
static rtx
5117
m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5118
                       int incoming ATTRIBUTE_UNUSED)
5119
{
5120
  return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5121
}
5122
 
5123
/* Return nonzero if register old_reg can be renamed to register new_reg.  */
5124
int
5125
m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5126
                           unsigned int new_reg)
5127
{
5128
 
5129
  /* Interrupt functions can only use registers that have already been
5130
     saved by the prologue, even if they would normally be
5131
     call-clobbered.  */
5132
 
5133
  if ((m68k_get_function_kind (current_function_decl)
5134
       == m68k_fk_interrupt_handler)
5135
      && !df_regs_ever_live_p (new_reg))
5136
    return 0;
5137
 
5138
  return 1;
5139
}
5140
 
5141
/* Value is true if hard register REGNO can hold a value of machine-mode
5142
   MODE.  On the 68000, we let the cpu registers can hold any mode, but
5143
   restrict the 68881 registers to floating-point modes.  */
5144
 
5145
bool
5146
m68k_regno_mode_ok (int regno, enum machine_mode mode)
5147
{
5148
  if (DATA_REGNO_P (regno))
5149
    {
5150
      /* Data Registers, can hold aggregate if fits in.  */
5151
      if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5152
        return true;
5153
    }
5154
  else if (ADDRESS_REGNO_P (regno))
5155
    {
5156
      if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5157
        return true;
5158
    }
5159
  else if (FP_REGNO_P (regno))
5160
    {
5161
      /* FPU registers, hold float or complex float of long double or
5162
         smaller.  */
5163
      if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5164
           || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5165
          && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5166
        return true;
5167
    }
5168
  return false;
5169
}
5170
 
5171
/* Implement SECONDARY_RELOAD_CLASS.  */
5172
 
5173
enum reg_class
5174
m68k_secondary_reload_class (enum reg_class rclass,
5175
                             enum machine_mode mode, rtx x)
5176
{
5177
  int regno;
5178
 
5179
  regno = true_regnum (x);
5180
 
5181
  /* If one operand of a movqi is an address register, the other
5182
     operand must be a general register or constant.  Other types
5183
     of operand must be reloaded through a data register.  */
5184
  if (GET_MODE_SIZE (mode) == 1
5185
      && reg_classes_intersect_p (rclass, ADDR_REGS)
5186
      && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5187
    return DATA_REGS;
5188
 
5189
  /* PC-relative addresses must be loaded into an address register first.  */
5190
  if (TARGET_PCREL
5191
      && !reg_class_subset_p (rclass, ADDR_REGS)
5192
      && symbolic_operand (x, VOIDmode))
5193
    return ADDR_REGS;
5194
 
5195
  return NO_REGS;
5196
}
5197
 
5198
/* Implement PREFERRED_RELOAD_CLASS.  */
5199
 
5200
enum reg_class
5201
m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5202
{
5203
  enum reg_class secondary_class;
5204
 
5205
  /* If RCLASS might need a secondary reload, try restricting it to
5206
     a class that doesn't.  */
5207
  secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5208
  if (secondary_class != NO_REGS
5209
      && reg_class_subset_p (secondary_class, rclass))
5210
    return secondary_class;
5211
 
5212
  /* Prefer to use moveq for in-range constants.  */
5213
  if (GET_CODE (x) == CONST_INT
5214
      && reg_class_subset_p (DATA_REGS, rclass)
5215
      && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5216
    return DATA_REGS;
5217
 
5218
  /* ??? Do we really need this now?  */
5219
  if (GET_CODE (x) == CONST_DOUBLE
5220
      && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5221
    {
5222
      if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5223
        return FP_REGS;
5224
 
5225
      return NO_REGS;
5226
    }
5227
 
5228
  return rclass;
5229
}
5230
 
5231
/* Return floating point values in a 68881 register.  This makes 68881 code
5232
   a little bit faster.  It also makes -msoft-float code incompatible with
5233
   hard-float code, so people have to be careful not to mix the two.
5234
   For ColdFire it was decided the ABI incompatibility is undesirable.
5235
   If there is need for a hard-float ABI it is probably worth doing it
5236
   properly and also passing function arguments in FP registers.  */
5237
rtx
5238
m68k_libcall_value (enum machine_mode mode)
5239
{
5240
  switch (mode) {
5241
  case SFmode:
5242
  case DFmode:
5243
  case XFmode:
5244
    if (TARGET_68881)
5245
      return gen_rtx_REG (mode, FP0_REG);
5246
    break;
5247
  default:
5248
    break;
5249
  }
5250
 
5251
  return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5252
}
5253
 
5254
/* Location in which function value is returned.
5255
   NOTE: Due to differences in ABIs, don't call this function directly,
5256
   use FUNCTION_VALUE instead.  */
5257
rtx
5258
m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5259
{
5260
  enum machine_mode mode;
5261
 
5262
  mode = TYPE_MODE (valtype);
5263
  switch (mode) {
5264
  case SFmode:
5265
  case DFmode:
5266
  case XFmode:
5267
    if (TARGET_68881)
5268
      return gen_rtx_REG (mode, FP0_REG);
5269
    break;
5270
  default:
5271
    break;
5272
  }
5273
 
5274
  /* If the function returns a pointer, push that into %a0.  */
5275
  if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5276
    /* For compatibility with the large body of existing code which
5277
       does not always properly declare external functions returning
5278
       pointer types, the m68k/SVR4 convention is to copy the value
5279
       returned for pointer functions from a0 to d0 in the function
5280
       epilogue, so that callers that have neglected to properly
5281
       declare the callee can still find the correct return value in
5282
       d0.  */
5283
    return gen_rtx_PARALLEL
5284
      (mode,
5285
       gen_rtvec (2,
5286
                  gen_rtx_EXPR_LIST (VOIDmode,
5287
                                     gen_rtx_REG (mode, A0_REG),
5288
                                     const0_rtx),
5289
                  gen_rtx_EXPR_LIST (VOIDmode,
5290
                                     gen_rtx_REG (mode, D0_REG),
5291
                                     const0_rtx)));
5292
  else if (POINTER_TYPE_P (valtype))
5293
    return gen_rtx_REG (mode, A0_REG);
5294
  else
5295
    return gen_rtx_REG (mode, D0_REG);
5296
}
5297
 
5298
/* Worker function for TARGET_RETURN_IN_MEMORY.  */
5299
#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5300
static bool
5301
m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5302
{
5303
  enum machine_mode mode = TYPE_MODE (type);
5304
 
5305
  if (mode == BLKmode)
5306
    return true;
5307
 
5308
  /* If TYPE's known alignment is less than the alignment of MODE that
5309
     would contain the structure, then return in memory.  We need to
5310
     do so to maintain the compatibility between code compiled with
5311
     -mstrict-align and that compiled with -mno-strict-align.  */
5312
  if (AGGREGATE_TYPE_P (type)
5313
      && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5314
    return true;
5315
 
5316
  return false;
5317
}
5318
#endif
5319
 
5320
/* CPU to schedule the program for.  */
5321
enum attr_cpu m68k_sched_cpu;
5322
 
5323
/* MAC to schedule the program for.  */
5324
enum attr_mac m68k_sched_mac;
5325
 
5326
/* Operand type.  */
5327
enum attr_op_type
5328
  {
5329
    /* No operand.  */
5330
    OP_TYPE_NONE,
5331
 
5332
    /* Integer register.  */
5333
    OP_TYPE_RN,
5334
 
5335
    /* FP register.  */
5336
    OP_TYPE_FPN,
5337
 
5338
    /* Implicit mem reference (e.g. stack).  */
5339
    OP_TYPE_MEM1,
5340
 
5341
    /* Memory without offset or indexing.  EA modes 2, 3 and 4.  */
5342
    OP_TYPE_MEM234,
5343
 
5344
    /* Memory with offset but without indexing.  EA mode 5.  */
5345
    OP_TYPE_MEM5,
5346
 
5347
    /* Memory with indexing.  EA mode 6.  */
5348
    OP_TYPE_MEM6,
5349
 
5350
    /* Memory referenced by absolute address.  EA mode 7.  */
5351
    OP_TYPE_MEM7,
5352
 
5353
    /* Immediate operand that doesn't require extension word.  */
5354
    OP_TYPE_IMM_Q,
5355
 
5356
    /* Immediate 16 bit operand.  */
5357
    OP_TYPE_IMM_W,
5358
 
5359
    /* Immediate 32 bit operand.  */
5360
    OP_TYPE_IMM_L
5361
  };
5362
 
5363
/* Return type of memory ADDR_RTX refers to.  */
5364
static enum attr_op_type
5365
sched_address_type (enum machine_mode mode, rtx addr_rtx)
5366
{
5367
  struct m68k_address address;
5368
 
5369
  if (symbolic_operand (addr_rtx, VOIDmode))
5370
    return OP_TYPE_MEM7;
5371
 
5372
  if (!m68k_decompose_address (mode, addr_rtx,
5373
                               reload_completed, &address))
5374
    {
5375
      gcc_assert (!reload_completed);
5376
      /* Reload will likely fix the address to be in the register.  */
5377
      return OP_TYPE_MEM234;
5378
    }
5379
 
5380
  if (address.scale != 0)
5381
    return OP_TYPE_MEM6;
5382
 
5383
  if (address.base != NULL_RTX)
5384
    {
5385
      if (address.offset == NULL_RTX)
5386
        return OP_TYPE_MEM234;
5387
 
5388
      return OP_TYPE_MEM5;
5389
    }
5390
 
5391
  gcc_assert (address.offset != NULL_RTX);
5392
 
5393
  return OP_TYPE_MEM7;
5394
}
5395
 
5396
/* Return X or Y (depending on OPX_P) operand of INSN.  */
5397
static rtx
5398
sched_get_operand (rtx insn, bool opx_p)
5399
{
5400
  int i;
5401
 
5402
  if (recog_memoized (insn) < 0)
5403
    gcc_unreachable ();
5404
 
5405
  extract_constrain_insn_cached (insn);
5406
 
5407
  if (opx_p)
5408
    i = get_attr_opx (insn);
5409
  else
5410
    i = get_attr_opy (insn);
5411
 
5412
  if (i >= recog_data.n_operands)
5413
    return NULL;
5414
 
5415
  return recog_data.operand[i];
5416
}
5417
 
5418
/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5419
   If ADDRESS_P is true, return type of memory location operand refers to.  */
5420
static enum attr_op_type
5421
sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
5422
{
5423
  rtx op;
5424
 
5425
  op = sched_get_operand (insn, opx_p);
5426
 
5427
  if (op == NULL)
5428
    {
5429
      gcc_assert (!reload_completed);
5430
      return OP_TYPE_RN;
5431
    }
5432
 
5433
  if (address_p)
5434
    return sched_address_type (QImode, op);
5435
 
5436
  if (memory_operand (op, VOIDmode))
5437
    return sched_address_type (GET_MODE (op), XEXP (op, 0));
5438
 
5439
  if (register_operand (op, VOIDmode))
5440
    {
5441
      if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5442
          || (reload_completed && FP_REG_P (op)))
5443
        return OP_TYPE_FPN;
5444
 
5445
      return OP_TYPE_RN;
5446
    }
5447
 
5448
  if (GET_CODE (op) == CONST_INT)
5449
    {
5450
      int ival;
5451
 
5452
      ival = INTVAL (op);
5453
 
5454
      /* Check for quick constants.  */
5455
      switch (get_attr_type (insn))
5456
        {
5457
        case TYPE_ALUQ_L:
5458
          if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5459
            return OP_TYPE_IMM_Q;
5460
 
5461
          gcc_assert (!reload_completed);
5462
          break;
5463
 
5464
        case TYPE_MOVEQ_L:
5465
          if (USE_MOVQ (ival))
5466
            return OP_TYPE_IMM_Q;
5467
 
5468
          gcc_assert (!reload_completed);
5469
          break;
5470
 
5471
        case TYPE_MOV3Q_L:
5472
          if (valid_mov3q_const (ival))
5473
            return OP_TYPE_IMM_Q;
5474
 
5475
          gcc_assert (!reload_completed);
5476
          break;
5477
 
5478
        default:
5479
          break;
5480
        }
5481
 
5482
      if (IN_RANGE (ival, -0x8000, 0x7fff))
5483
        return OP_TYPE_IMM_W;
5484
 
5485
      return OP_TYPE_IMM_L;
5486
    }
5487
 
5488
  if (GET_CODE (op) == CONST_DOUBLE)
5489
    {
5490
      switch (GET_MODE (op))
5491
        {
5492
        case SFmode:
5493
          return OP_TYPE_IMM_W;
5494
 
5495
        case VOIDmode:
5496
        case DFmode:
5497
          return OP_TYPE_IMM_L;
5498
 
5499
        default:
5500
          gcc_unreachable ();
5501
        }
5502
    }
5503
 
5504
  if (GET_CODE (op) == CONST
5505
      || symbolic_operand (op, VOIDmode)
5506
      || LABEL_P (op))
5507
    {
5508
      switch (GET_MODE (op))
5509
        {
5510
        case QImode:
5511
          return OP_TYPE_IMM_Q;
5512
 
5513
        case HImode:
5514
          return OP_TYPE_IMM_W;
5515
 
5516
        case SImode:
5517
          return OP_TYPE_IMM_L;
5518
 
5519
        default:
5520
          if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5521
            /* Just a guess.  */
5522
            return OP_TYPE_IMM_W;
5523
 
5524
          return OP_TYPE_IMM_L;
5525
        }
5526
    }
5527
 
5528
  gcc_assert (!reload_completed);
5529
 
5530
  if (FLOAT_MODE_P (GET_MODE (op)))
5531
    return OP_TYPE_FPN;
5532
 
5533
  return OP_TYPE_RN;
5534
}
5535
 
5536
/* Implement opx_type attribute.
5537
   Return type of INSN's operand X.
5538
   If ADDRESS_P is true, return type of memory location operand refers to.  */
5539
enum attr_opx_type
5540
m68k_sched_attr_opx_type (rtx insn, int address_p)
5541
{
5542
  switch (sched_attr_op_type (insn, true, address_p != 0))
5543
    {
5544
    case OP_TYPE_RN:
5545
      return OPX_TYPE_RN;
5546
 
5547
    case OP_TYPE_FPN:
5548
      return OPX_TYPE_FPN;
5549
 
5550
    case OP_TYPE_MEM1:
5551
      return OPX_TYPE_MEM1;
5552
 
5553
    case OP_TYPE_MEM234:
5554
      return OPX_TYPE_MEM234;
5555
 
5556
    case OP_TYPE_MEM5:
5557
      return OPX_TYPE_MEM5;
5558
 
5559
    case OP_TYPE_MEM6:
5560
      return OPX_TYPE_MEM6;
5561
 
5562
    case OP_TYPE_MEM7:
5563
      return OPX_TYPE_MEM7;
5564
 
5565
    case OP_TYPE_IMM_Q:
5566
      return OPX_TYPE_IMM_Q;
5567
 
5568
    case OP_TYPE_IMM_W:
5569
      return OPX_TYPE_IMM_W;
5570
 
5571
    case OP_TYPE_IMM_L:
5572
      return OPX_TYPE_IMM_L;
5573
 
5574
    default:
5575
      gcc_unreachable ();
5576
    }
5577
}
5578
 
5579
/* Implement opy_type attribute.
5580
   Return type of INSN's operand Y.
5581
   If ADDRESS_P is true, return type of memory location operand refers to.  */
5582
enum attr_opy_type
5583
m68k_sched_attr_opy_type (rtx insn, int address_p)
5584
{
5585
  switch (sched_attr_op_type (insn, false, address_p != 0))
5586
    {
5587
    case OP_TYPE_RN:
5588
      return OPY_TYPE_RN;
5589
 
5590
    case OP_TYPE_FPN:
5591
      return OPY_TYPE_FPN;
5592
 
5593
    case OP_TYPE_MEM1:
5594
      return OPY_TYPE_MEM1;
5595
 
5596
    case OP_TYPE_MEM234:
5597
      return OPY_TYPE_MEM234;
5598
 
5599
    case OP_TYPE_MEM5:
5600
      return OPY_TYPE_MEM5;
5601
 
5602
    case OP_TYPE_MEM6:
5603
      return OPY_TYPE_MEM6;
5604
 
5605
    case OP_TYPE_MEM7:
5606
      return OPY_TYPE_MEM7;
5607
 
5608
    case OP_TYPE_IMM_Q:
5609
      return OPY_TYPE_IMM_Q;
5610
 
5611
    case OP_TYPE_IMM_W:
5612
      return OPY_TYPE_IMM_W;
5613
 
5614
    case OP_TYPE_IMM_L:
5615
      return OPY_TYPE_IMM_L;
5616
 
5617
    default:
5618
      gcc_unreachable ();
5619
    }
5620
}
5621
 
5622
/* Return size of INSN as int.  */
5623
static int
5624
sched_get_attr_size_int (rtx insn)
5625
{
5626
  int size;
5627
 
5628
  switch (get_attr_type (insn))
5629
    {
5630
    case TYPE_IGNORE:
5631
      /* There should be no references to m68k_sched_attr_size for 'ignore'
5632
         instructions.  */
5633
      gcc_unreachable ();
5634
      return 0;
5635
 
5636
    case TYPE_MUL_L:
5637
      size = 2;
5638
      break;
5639
 
5640
    default:
5641
      size = 1;
5642
      break;
5643
    }
5644
 
5645
  switch (get_attr_opx_type (insn))
5646
    {
5647
    case OPX_TYPE_NONE:
5648
    case OPX_TYPE_RN:
5649
    case OPX_TYPE_FPN:
5650
    case OPX_TYPE_MEM1:
5651
    case OPX_TYPE_MEM234:
5652
    case OPY_TYPE_IMM_Q:
5653
      break;
5654
 
5655
    case OPX_TYPE_MEM5:
5656
    case OPX_TYPE_MEM6:
5657
      /* Here we assume that most absolute references are short.  */
5658
    case OPX_TYPE_MEM7:
5659
    case OPY_TYPE_IMM_W:
5660
      ++size;
5661
      break;
5662
 
5663
    case OPY_TYPE_IMM_L:
5664
      size += 2;
5665
      break;
5666
 
5667
    default:
5668
      gcc_unreachable ();
5669
    }
5670
 
5671
  switch (get_attr_opy_type (insn))
5672
    {
5673
    case OPY_TYPE_NONE:
5674
    case OPY_TYPE_RN:
5675
    case OPY_TYPE_FPN:
5676
    case OPY_TYPE_MEM1:
5677
    case OPY_TYPE_MEM234:
5678
    case OPY_TYPE_IMM_Q:
5679
      break;
5680
 
5681
    case OPY_TYPE_MEM5:
5682
    case OPY_TYPE_MEM6:
5683
      /* Here we assume that most absolute references are short.  */
5684
    case OPY_TYPE_MEM7:
5685
    case OPY_TYPE_IMM_W:
5686
      ++size;
5687
      break;
5688
 
5689
    case OPY_TYPE_IMM_L:
5690
      size += 2;
5691
      break;
5692
 
5693
    default:
5694
      gcc_unreachable ();
5695
    }
5696
 
5697
  if (size > 3)
5698
    {
5699
      gcc_assert (!reload_completed);
5700
 
5701
      size = 3;
5702
    }
5703
 
5704
  return size;
5705
}
5706
 
5707
/* Return size of INSN as attribute enum value.  */
5708
enum attr_size
5709
m68k_sched_attr_size (rtx insn)
5710
{
5711
  switch (sched_get_attr_size_int (insn))
5712
    {
5713
    case 1:
5714
      return SIZE_1;
5715
 
5716
    case 2:
5717
      return SIZE_2;
5718
 
5719
    case 3:
5720
      return SIZE_3;
5721
 
5722
    default:
5723
      gcc_unreachable ();
5724
    }
5725
}
5726
 
5727
/* Return operand X or Y (depending on OPX_P) of INSN,
5728
   if it is a MEM, or NULL overwise.  */
5729
static enum attr_op_type
5730
sched_get_opxy_mem_type (rtx insn, bool opx_p)
5731
{
5732
  if (opx_p)
5733
    {
5734
      switch (get_attr_opx_type (insn))
5735
        {
5736
        case OPX_TYPE_NONE:
5737
        case OPX_TYPE_RN:
5738
        case OPX_TYPE_FPN:
5739
        case OPX_TYPE_IMM_Q:
5740
        case OPX_TYPE_IMM_W:
5741
        case OPX_TYPE_IMM_L:
5742
          return OP_TYPE_RN;
5743
 
5744
        case OPX_TYPE_MEM1:
5745
        case OPX_TYPE_MEM234:
5746
        case OPX_TYPE_MEM5:
5747
        case OPX_TYPE_MEM7:
5748
          return OP_TYPE_MEM1;
5749
 
5750
        case OPX_TYPE_MEM6:
5751
          return OP_TYPE_MEM6;
5752
 
5753
        default:
5754
          gcc_unreachable ();
5755
        }
5756
    }
5757
  else
5758
    {
5759
      switch (get_attr_opy_type (insn))
5760
        {
5761
        case OPY_TYPE_NONE:
5762
        case OPY_TYPE_RN:
5763
        case OPY_TYPE_FPN:
5764
        case OPY_TYPE_IMM_Q:
5765
        case OPY_TYPE_IMM_W:
5766
        case OPY_TYPE_IMM_L:
5767
          return OP_TYPE_RN;
5768
 
5769
        case OPY_TYPE_MEM1:
5770
        case OPY_TYPE_MEM234:
5771
        case OPY_TYPE_MEM5:
5772
        case OPY_TYPE_MEM7:
5773
          return OP_TYPE_MEM1;
5774
 
5775
        case OPY_TYPE_MEM6:
5776
          return OP_TYPE_MEM6;
5777
 
5778
        default:
5779
          gcc_unreachable ();
5780
        }
5781
    }
5782
}
5783
 
5784
/* Implement op_mem attribute.  */
5785
enum attr_op_mem
5786
m68k_sched_attr_op_mem (rtx insn)
5787
{
5788
  enum attr_op_type opx;
5789
  enum attr_op_type opy;
5790
 
5791
  opx = sched_get_opxy_mem_type (insn, true);
5792
  opy = sched_get_opxy_mem_type (insn, false);
5793
 
5794
  if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5795
    return OP_MEM_00;
5796
 
5797
  if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5798
    {
5799
      switch (get_attr_opx_access (insn))
5800
        {
5801
        case OPX_ACCESS_R:
5802
          return OP_MEM_10;
5803
 
5804
        case OPX_ACCESS_W:
5805
          return OP_MEM_01;
5806
 
5807
        case OPX_ACCESS_RW:
5808
          return OP_MEM_11;
5809
 
5810
        default:
5811
          gcc_unreachable ();
5812
        }
5813
    }
5814
 
5815
  if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5816
    {
5817
      switch (get_attr_opx_access (insn))
5818
        {
5819
        case OPX_ACCESS_R:
5820
          return OP_MEM_I0;
5821
 
5822
        case OPX_ACCESS_W:
5823
          return OP_MEM_0I;
5824
 
5825
        case OPX_ACCESS_RW:
5826
          return OP_MEM_I1;
5827
 
5828
        default:
5829
          gcc_unreachable ();
5830
        }
5831
    }
5832
 
5833
  if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5834
    return OP_MEM_10;
5835
 
5836
  if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5837
    {
5838
      switch (get_attr_opx_access (insn))
5839
        {
5840
        case OPX_ACCESS_W:
5841
          return OP_MEM_11;
5842
 
5843
        default:
5844
          gcc_assert (!reload_completed);
5845
          return OP_MEM_11;
5846
        }
5847
    }
5848
 
5849
  if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5850
    {
5851
      switch (get_attr_opx_access (insn))
5852
        {
5853
        case OPX_ACCESS_W:
5854
          return OP_MEM_1I;
5855
 
5856
        default:
5857
          gcc_assert (!reload_completed);
5858
          return OP_MEM_1I;
5859
        }
5860
    }
5861
 
5862
  if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5863
    return OP_MEM_I0;
5864
 
5865
  if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5866
    {
5867
      switch (get_attr_opx_access (insn))
5868
        {
5869
        case OPX_ACCESS_W:
5870
          return OP_MEM_I1;
5871
 
5872
        default:
5873
          gcc_assert (!reload_completed);
5874
          return OP_MEM_I1;
5875
        }
5876
    }
5877
 
5878
  gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5879
  gcc_assert (!reload_completed);
5880
  return OP_MEM_I1;
5881
}
5882
 
5883
/* Jump instructions types.  Indexed by INSN_UID.
5884
   The same rtl insn can be expanded into different asm instructions
5885
   depending on the cc0_status.  To properly determine type of jump
5886
   instructions we scan instruction stream and map jumps types to this
5887
   array.  */
5888
static enum attr_type *sched_branch_type;
5889
 
5890
/* Return the type of the jump insn.  */
5891
enum attr_type
5892
m68k_sched_branch_type (rtx insn)
5893
{
5894
  enum attr_type type;
5895
 
5896
  type = sched_branch_type[INSN_UID (insn)];
5897
 
5898
  gcc_assert (type != 0);
5899
 
5900
  return type;
5901
}
5902
 
5903
/* Data for ColdFire V4 index bypass.
5904
   Producer modifies register that is used as index in consumer with
5905
   specified scale.  */
5906
static struct
5907
{
5908
  /* Producer instruction.  */
5909
  rtx pro;
5910
 
5911
  /* Consumer instruction.  */
5912
  rtx con;
5913
 
5914
  /* Scale of indexed memory access within consumer.
5915
     Or zero if bypass should not be effective at the moment.  */
5916
  int scale;
5917
} sched_cfv4_bypass_data;
5918
 
5919
/* An empty state that is used in m68k_sched_adjust_cost.  */
5920
static state_t sched_adjust_cost_state;
5921
 
5922
/* Implement adjust_cost scheduler hook.
5923
   Return adjusted COST of dependency LINK between DEF_INSN and INSN.  */
5924
static int
5925
m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
5926
                        int cost)
5927
{
5928
  int delay;
5929
 
5930
  if (recog_memoized (def_insn) < 0
5931
      || recog_memoized (insn) < 0)
5932
    return cost;
5933
 
5934
  if (sched_cfv4_bypass_data.scale == 1)
5935
    /* Handle ColdFire V4 bypass for indexed address with 1x scale.  */
5936
    {
5937
      /* haifa-sched.c: insn_cost () calls bypass_p () just before
5938
         targetm.sched.adjust_cost ().  Hence, we can be relatively sure
5939
         that the data in sched_cfv4_bypass_data is up to date.  */
5940
      gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5941
                  && sched_cfv4_bypass_data.con == insn);
5942
 
5943
      if (cost < 3)
5944
        cost = 3;
5945
 
5946
      sched_cfv4_bypass_data.pro = NULL;
5947
      sched_cfv4_bypass_data.con = NULL;
5948
      sched_cfv4_bypass_data.scale = 0;
5949
    }
5950
  else
5951
    gcc_assert (sched_cfv4_bypass_data.pro == NULL
5952
                && sched_cfv4_bypass_data.con == NULL
5953
                && sched_cfv4_bypass_data.scale == 0);
5954
 
5955
  /* Don't try to issue INSN earlier than DFA permits.
5956
     This is especially useful for instructions that write to memory,
5957
     as their true dependence (default) latency is better to be set to 0
5958
     to workaround alias analysis limitations.
5959
     This is, in fact, a machine independent tweak, so, probably,
5960
     it should be moved to haifa-sched.c: insn_cost ().  */
5961
  delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5962
  if (delay > cost)
5963
    cost = delay;
5964
 
5965
  return cost;
5966
}
5967
 
5968
/* Return maximal number of insns that can be scheduled on a single cycle.  */
5969
static int
5970
m68k_sched_issue_rate (void)
5971
{
5972
  switch (m68k_sched_cpu)
5973
    {
5974
    case CPU_CFV1:
5975
    case CPU_CFV2:
5976
    case CPU_CFV3:
5977
      return 1;
5978
 
5979
    case CPU_CFV4:
5980
      return 2;
5981
 
5982
    default:
5983
      gcc_unreachable ();
5984
      return 0;
5985
    }
5986
}
5987
 
5988
/* Maximal length of instruction for current CPU.
5989
   E.g. it is 3 for any ColdFire core.  */
5990
static int max_insn_size;
5991
 
5992
/* Data to model instruction buffer of CPU.  */
5993
struct _sched_ib
5994
{
5995
  /* True if instruction buffer model is modeled for current CPU.  */
5996
  bool enabled_p;
5997
 
5998
  /* Size of the instruction buffer in words.  */
5999
  int size;
6000
 
6001
  /* Number of filled words in the instruction buffer.  */
6002
  int filled;
6003
 
6004
  /* Additional information about instruction buffer for CPUs that have
6005
     a buffer of instruction records, rather then a plain buffer
6006
     of instruction words.  */
6007
  struct _sched_ib_records
6008
  {
6009
    /* Size of buffer in records.  */
6010
    int n_insns;
6011
 
6012
    /* Array to hold data on adjustements made to the size of the buffer.  */
6013
    int *adjust;
6014
 
6015
    /* Index of the above array.  */
6016
    int adjust_index;
6017
  } records;
6018
 
6019
  /* An insn that reserves (marks empty) one word in the instruction buffer.  */
6020
  rtx insn;
6021
};
6022
 
6023
static struct _sched_ib sched_ib;
6024
 
6025
/* ID of memory unit.  */
6026
static int sched_mem_unit_code;
6027
 
6028
/* Implementation of the targetm.sched.variable_issue () hook.
6029
   It is called after INSN was issued.  It returns the number of insns
6030
   that can possibly get scheduled on the current cycle.
6031
   It is used here to determine the effect of INSN on the instruction
6032
   buffer.  */
6033
static int
6034
m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6035
                           int sched_verbose ATTRIBUTE_UNUSED,
6036
                           rtx insn, int can_issue_more)
6037
{
6038
  int insn_size;
6039
 
6040
  if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6041
    {
6042
      switch (m68k_sched_cpu)
6043
        {
6044
        case CPU_CFV1:
6045
        case CPU_CFV2:
6046
          insn_size = sched_get_attr_size_int (insn);
6047
          break;
6048
 
6049
        case CPU_CFV3:
6050
          insn_size = sched_get_attr_size_int (insn);
6051
 
6052
          /* ColdFire V3 and V4 cores have instruction buffers that can
6053
             accumulate up to 8 instructions regardless of instructions'
6054
             sizes.  So we should take care not to "prefetch" 24 one-word
6055
             or 12 two-words instructions.
6056
             To model this behavior we temporarily decrease size of the
6057
             buffer by (max_insn_size - insn_size) for next 7 instructions.  */
6058
          {
6059
            int adjust;
6060
 
6061
            adjust = max_insn_size - insn_size;
6062
            sched_ib.size -= adjust;
6063
 
6064
            if (sched_ib.filled > sched_ib.size)
6065
              sched_ib.filled = sched_ib.size;
6066
 
6067
            sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6068
          }
6069
 
6070
          ++sched_ib.records.adjust_index;
6071
          if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6072
            sched_ib.records.adjust_index = 0;
6073
 
6074
          /* Undo adjustement we did 7 instructions ago.  */
6075
          sched_ib.size
6076
            += sched_ib.records.adjust[sched_ib.records.adjust_index];
6077
 
6078
          break;
6079
 
6080
        case CPU_CFV4:
6081
          gcc_assert (!sched_ib.enabled_p);
6082
          insn_size = 0;
6083
          break;
6084
 
6085
        default:
6086
          gcc_unreachable ();
6087
        }
6088
 
6089
      if (insn_size > sched_ib.filled)
6090
        /* Scheduling for register pressure does not always take DFA into
6091
           account.  Workaround instruction buffer not being filled enough.  */
6092
        {
6093
          gcc_assert (sched_pressure_p);
6094
          insn_size = sched_ib.filled;
6095
        }
6096
 
6097
      --can_issue_more;
6098
    }
6099
  else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6100
           || asm_noperands (PATTERN (insn)) >= 0)
6101
    insn_size = sched_ib.filled;
6102
  else
6103
    insn_size = 0;
6104
 
6105
  sched_ib.filled -= insn_size;
6106
 
6107
  return can_issue_more;
6108
}
6109
 
6110
/* Return how many instructions should scheduler lookahead to choose the
6111
   best one.  */
6112
static int
6113
m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6114
{
6115
  return m68k_sched_issue_rate () - 1;
6116
}
6117
 
6118
/* Implementation of targetm.sched.init_global () hook.
6119
   It is invoked once per scheduling pass and is used here
6120
   to initialize scheduler constants.  */
6121
static void
6122
m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6123
                           int sched_verbose ATTRIBUTE_UNUSED,
6124
                           int n_insns ATTRIBUTE_UNUSED)
6125
{
6126
  /* Init branch types.  */
6127
  {
6128
    rtx insn;
6129
 
6130
    sched_branch_type = XCNEWVEC (enum attr_type, get_max_uid () + 1);
6131
 
6132
    for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6133
      {
6134
        if (JUMP_P (insn))
6135
          /* !!! FIXME: Implement real scan here.  */
6136
          sched_branch_type[INSN_UID (insn)] = TYPE_BCC;
6137
      }
6138
  }
6139
 
6140
#ifdef ENABLE_CHECKING
6141
  /* Check that all instructions have DFA reservations and
6142
     that all instructions can be issued from a clean state.  */
6143
  {
6144
    rtx insn;
6145
    state_t state;
6146
 
6147
    state = alloca (state_size ());
6148
 
6149
    for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6150
      {
6151
        if (INSN_P (insn) && recog_memoized (insn) >= 0)
6152
          {
6153
            gcc_assert (insn_has_dfa_reservation_p (insn));
6154
 
6155
            state_reset (state);
6156
            if (state_transition (state, insn) >= 0)
6157
              gcc_unreachable ();
6158
          }
6159
      }
6160
  }
6161
#endif
6162
 
6163
  /* Setup target cpu.  */
6164
 
6165
  /* ColdFire V4 has a set of features to keep its instruction buffer full
6166
     (e.g., a separate memory bus for instructions) and, hence, we do not model
6167
     buffer for this CPU.  */
6168
  sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6169
 
6170
  switch (m68k_sched_cpu)
6171
    {
6172
    case CPU_CFV4:
6173
      sched_ib.filled = 0;
6174
 
6175
      /* FALLTHRU */
6176
 
6177
    case CPU_CFV1:
6178
    case CPU_CFV2:
6179
      max_insn_size = 3;
6180
      sched_ib.records.n_insns = 0;
6181
      sched_ib.records.adjust = NULL;
6182
      break;
6183
 
6184
    case CPU_CFV3:
6185
      max_insn_size = 3;
6186
      sched_ib.records.n_insns = 8;
6187
      sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6188
      break;
6189
 
6190
    default:
6191
      gcc_unreachable ();
6192
    }
6193
 
6194
  sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6195
 
6196
  sched_adjust_cost_state = xmalloc (state_size ());
6197
  state_reset (sched_adjust_cost_state);
6198
 
6199
  start_sequence ();
6200
  emit_insn (gen_ib ());
6201
  sched_ib.insn = get_insns ();
6202
  end_sequence ();
6203
}
6204
 
6205
/* Scheduling pass is now finished.  Free/reset static variables.  */
6206
static void
6207
m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6208
                             int verbose ATTRIBUTE_UNUSED)
6209
{
6210
  sched_ib.insn = NULL;
6211
 
6212
  free (sched_adjust_cost_state);
6213
  sched_adjust_cost_state = NULL;
6214
 
6215
  sched_mem_unit_code = 0;
6216
 
6217
  free (sched_ib.records.adjust);
6218
  sched_ib.records.adjust = NULL;
6219
  sched_ib.records.n_insns = 0;
6220
  max_insn_size = 0;
6221
 
6222
  free (sched_branch_type);
6223
  sched_branch_type = NULL;
6224
}
6225
 
6226
/* Implementation of targetm.sched.init () hook.
6227
   It is invoked each time scheduler starts on the new block (basic block or
6228
   extended basic block).  */
6229
static void
6230
m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6231
                    int sched_verbose ATTRIBUTE_UNUSED,
6232
                    int n_insns ATTRIBUTE_UNUSED)
6233
{
6234
  switch (m68k_sched_cpu)
6235
    {
6236
    case CPU_CFV1:
6237
    case CPU_CFV2:
6238
      sched_ib.size = 6;
6239
      break;
6240
 
6241
    case CPU_CFV3:
6242
      sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6243
 
6244
      memset (sched_ib.records.adjust, 0,
6245
              sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6246
      sched_ib.records.adjust_index = 0;
6247
      break;
6248
 
6249
    case CPU_CFV4:
6250
      gcc_assert (!sched_ib.enabled_p);
6251
      sched_ib.size = 0;
6252
      break;
6253
 
6254
    default:
6255
      gcc_unreachable ();
6256
    }
6257
 
6258
  if (sched_ib.enabled_p)
6259
    /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6260
       the first cycle.  Workaround that.  */
6261
    sched_ib.filled = -2;
6262
}
6263
 
6264
/* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6265
   It is invoked just before current cycle finishes and is used here
6266
   to track if instruction buffer got its two words this cycle.  */
6267
static void
6268
m68k_sched_dfa_pre_advance_cycle (void)
6269
{
6270
  if (!sched_ib.enabled_p)
6271
    return;
6272
 
6273
  if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6274
    {
6275
      sched_ib.filled += 2;
6276
 
6277
      if (sched_ib.filled > sched_ib.size)
6278
        sched_ib.filled = sched_ib.size;
6279
    }
6280
}
6281
 
6282
/* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6283
   It is invoked just after new cycle begins and is used here
6284
   to setup number of filled words in the instruction buffer so that
6285
   instructions which won't have all their words prefetched would be
6286
   stalled for a cycle.  */
6287
static void
6288
m68k_sched_dfa_post_advance_cycle (void)
6289
{
6290
  int i;
6291
 
6292
  if (!sched_ib.enabled_p)
6293
    return;
6294
 
6295
  /* Setup number of prefetched instruction words in the instruction
6296
     buffer.  */
6297
  i = max_insn_size - sched_ib.filled;
6298
 
6299
  while (--i >= 0)
6300
    {
6301
      if (state_transition (curr_state, sched_ib.insn) >= 0)
6302
        gcc_unreachable ();
6303
    }
6304
}
6305
 
6306
/* Return X or Y (depending on OPX_P) operand of INSN,
6307
   if it is an integer register, or NULL overwise.  */
6308
static rtx
6309
sched_get_reg_operand (rtx insn, bool opx_p)
6310
{
6311
  rtx op = NULL;
6312
 
6313
  if (opx_p)
6314
    {
6315
      if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6316
        {
6317
          op = sched_get_operand (insn, true);
6318
          gcc_assert (op != NULL);
6319
 
6320
          if (!reload_completed && !REG_P (op))
6321
            return NULL;
6322
        }
6323
    }
6324
  else
6325
    {
6326
      if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6327
        {
6328
          op = sched_get_operand (insn, false);
6329
          gcc_assert (op != NULL);
6330
 
6331
          if (!reload_completed && !REG_P (op))
6332
            return NULL;
6333
        }
6334
    }
6335
 
6336
  return op;
6337
}
6338
 
6339
/* Return true, if X or Y (depending on OPX_P) operand of INSN
6340
   is a MEM.  */
6341
static bool
6342
sched_mem_operand_p (rtx insn, bool opx_p)
6343
{
6344
  switch (sched_get_opxy_mem_type (insn, opx_p))
6345
    {
6346
    case OP_TYPE_MEM1:
6347
    case OP_TYPE_MEM6:
6348
      return true;
6349
 
6350
    default:
6351
      return false;
6352
    }
6353
}
6354
 
6355
/* Return X or Y (depending on OPX_P) operand of INSN,
6356
   if it is a MEM, or NULL overwise.  */
6357
static rtx
6358
sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
6359
{
6360
  bool opx_p;
6361
  bool opy_p;
6362
 
6363
  opx_p = false;
6364
  opy_p = false;
6365
 
6366
  if (must_read_p)
6367
    {
6368
      opx_p = true;
6369
      opy_p = true;
6370
    }
6371
 
6372
  if (must_write_p)
6373
    {
6374
      opx_p = true;
6375
      opy_p = false;
6376
    }
6377
 
6378
  if (opy_p && sched_mem_operand_p (insn, false))
6379
    return sched_get_operand (insn, false);
6380
 
6381
  if (opx_p && sched_mem_operand_p (insn, true))
6382
    return sched_get_operand (insn, true);
6383
 
6384
  gcc_unreachable ();
6385
  return NULL;
6386
}
6387
 
6388
/* Return non-zero if PRO modifies register used as part of
6389
   address in CON.  */
6390
int
6391
m68k_sched_address_bypass_p (rtx pro, rtx con)
6392
{
6393
  rtx pro_x;
6394
  rtx con_mem_read;
6395
 
6396
  pro_x = sched_get_reg_operand (pro, true);
6397
  if (pro_x == NULL)
6398
    return 0;
6399
 
6400
  con_mem_read = sched_get_mem_operand (con, true, false);
6401
  gcc_assert (con_mem_read != NULL);
6402
 
6403
  if (reg_mentioned_p (pro_x, con_mem_read))
6404
    return 1;
6405
 
6406
  return 0;
6407
}
6408
 
6409
/* Helper function for m68k_sched_indexed_address_bypass_p.
6410
   if PRO modifies register used as index in CON,
6411
   return scale of indexed memory access in CON.  Return zero overwise.  */
6412
static int
6413
sched_get_indexed_address_scale (rtx pro, rtx con)
6414
{
6415
  rtx reg;
6416
  rtx mem;
6417
  struct m68k_address address;
6418
 
6419
  reg = sched_get_reg_operand (pro, true);
6420
  if (reg == NULL)
6421
    return 0;
6422
 
6423
  mem = sched_get_mem_operand (con, true, false);
6424
  gcc_assert (mem != NULL && MEM_P (mem));
6425
 
6426
  if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6427
                               &address))
6428
    gcc_unreachable ();
6429
 
6430
  if (REGNO (reg) == REGNO (address.index))
6431
    {
6432
      gcc_assert (address.scale != 0);
6433
      return address.scale;
6434
    }
6435
 
6436
  return 0;
6437
}
6438
 
6439
/* Return non-zero if PRO modifies register used
6440
   as index with scale 2 or 4 in CON.  */
6441
int
6442
m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
6443
{
6444
  gcc_assert (sched_cfv4_bypass_data.pro == NULL
6445
              && sched_cfv4_bypass_data.con == NULL
6446
              && sched_cfv4_bypass_data.scale == 0);
6447
 
6448
  switch (sched_get_indexed_address_scale (pro, con))
6449
    {
6450
    case 1:
6451
      /* We can't have a variable latency bypass, so
6452
         remember to adjust the insn cost in adjust_cost hook.  */
6453
      sched_cfv4_bypass_data.pro = pro;
6454
      sched_cfv4_bypass_data.con = con;
6455
      sched_cfv4_bypass_data.scale = 1;
6456
      return 0;
6457
 
6458
    case 2:
6459
    case 4:
6460
      return 1;
6461
 
6462
    default:
6463
      return 0;
6464
    }
6465
}
6466
 
6467
/* We generate a two-instructions program at M_TRAMP :
6468
        movea.l &CHAIN_VALUE,%a0
6469
        jmp FNADDR
6470
   where %a0 can be modified by changing STATIC_CHAIN_REGNUM.  */
6471
 
6472
static void
6473
m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6474
{
6475
  rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6476
  rtx mem;
6477
 
6478
  gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6479
 
6480
  mem = adjust_address (m_tramp, HImode, 0);
6481
  emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6482
  mem = adjust_address (m_tramp, SImode, 2);
6483
  emit_move_insn (mem, chain_value);
6484
 
6485
  mem = adjust_address (m_tramp, HImode, 6);
6486
  emit_move_insn (mem, GEN_INT(0x4EF9));
6487
  mem = adjust_address (m_tramp, SImode, 8);
6488
  emit_move_insn (mem, fnaddr);
6489
 
6490
  FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6491
}
6492
 
6493
/* On the 68000, the RTS insn cannot pop anything.
6494
   On the 68010, the RTD insn may be used to pop them if the number
6495
     of args is fixed, but if the number is variable then the caller
6496
     must pop them all.  RTD can't be used for library calls now
6497
     because the library is compiled with the Unix compiler.
6498
   Use of RTD is a selectable option, since it is incompatible with
6499
   standard Unix calling sequences.  If the option is not selected,
6500
   the caller must always pop the args.  */
6501
 
6502
static int
6503
m68k_return_pops_args (tree fundecl, tree funtype, int size)
6504
{
6505
  return ((TARGET_RTD
6506
           && (!fundecl
6507
               || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6508
           && (!stdarg_p (funtype)))
6509
          ? size : 0);
6510
}
6511
 
6512
/* Make sure everything's fine if we *don't* have a given processor.
6513
   This assumes that putting a register in fixed_regs will keep the
6514
   compiler's mitts completely off it.  We don't bother to zero it out
6515
   of register classes.  */
6516
 
6517
static void
6518
m68k_conditional_register_usage (void)
6519
{
6520
  int i;
6521
  HARD_REG_SET x;
6522
  if (!TARGET_HARD_FLOAT)
6523
    {
6524
      COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6525
      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6526
        if (TEST_HARD_REG_BIT (x, i))
6527
          fixed_regs[i] = call_used_regs[i] = 1;
6528
    }
6529
  if (flag_pic)
6530
    fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6531
}
6532
 
6533
static void
6534
m68k_init_sync_libfuncs (void)
6535
{
6536
  init_sync_libfuncs (UNITS_PER_WORD);
6537
}
6538
 
6539
#include "gt-m68k.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.