OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [tags/] [gnu-src/] [gcc-4.5.1/] [gcc-4.5.1-or32-1.0rc1/] [gcc/] [config/] [alpha/] [alpha.c] - Blame information for rev 338

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 282 jeremybenn
/* Subroutines used for code generation on the DEC Alpha.
2
   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3
   2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4
   Free Software Foundation, Inc.
5
   Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6
 
7
This file is part of GCC.
8
 
9
GCC is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 3, or (at your option)
12
any later version.
13
 
14
GCC is distributed in the hope that it will be useful,
15
but WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17
GNU General Public License for more details.
18
 
19
You should have received a copy of the GNU General Public License
20
along with GCC; see the file COPYING3.  If not see
21
<http://www.gnu.org/licenses/>.  */
22
 
23
 
24
#include "config.h"
25
#include "system.h"
26
#include "coretypes.h"
27
#include "tm.h"
28
#include "rtl.h"
29
#include "tree.h"
30
#include "regs.h"
31
#include "hard-reg-set.h"
32
#include "real.h"
33
#include "insn-config.h"
34
#include "conditions.h"
35
#include "output.h"
36
#include "insn-attr.h"
37
#include "flags.h"
38
#include "recog.h"
39
#include "expr.h"
40
#include "optabs.h"
41
#include "reload.h"
42
#include "obstack.h"
43
#include "except.h"
44
#include "function.h"
45
#include "toplev.h"
46
#include "ggc.h"
47
#include "integrate.h"
48
#include "tm_p.h"
49
#include "target.h"
50
#include "target-def.h"
51
#include "debug.h"
52
#include "langhooks.h"
53
#include <splay-tree.h>
54
#include "cfglayout.h"
55
#include "gimple.h"
56
#include "tree-flow.h"
57
#include "tree-stdarg.h"
58
#include "tm-constrs.h"
59
#include "df.h"
60
#include "libfuncs.h"
61
 
62
/* Specify which cpu to schedule for.  */
63
enum processor_type alpha_tune;
64
 
65
/* Which cpu we're generating code for.  */
66
enum processor_type alpha_cpu;
67
 
68
static const char * const alpha_cpu_name[] =
69
{
70
  "ev4", "ev5", "ev6"
71
};
72
 
73
/* Specify how accurate floating-point traps need to be.  */
74
 
75
enum alpha_trap_precision alpha_tp;
76
 
77
/* Specify the floating-point rounding mode.  */
78
 
79
enum alpha_fp_rounding_mode alpha_fprm;
80
 
81
/* Specify which things cause traps.  */
82
 
83
enum alpha_fp_trap_mode alpha_fptm;
84
 
85
/* Nonzero if inside of a function, because the Alpha asm can't
86
   handle .files inside of functions.  */
87
 
88
static int inside_function = FALSE;
89
 
90
/* The number of cycles of latency we should assume on memory reads.  */
91
 
92
int alpha_memory_latency = 3;
93
 
94
/* Whether the function needs the GP.  */
95
 
96
static int alpha_function_needs_gp;
97
 
98
/* The alias set for prologue/epilogue register save/restore.  */
99
 
100
static GTY(()) alias_set_type alpha_sr_alias_set;
101
 
102
/* The assembler name of the current function.  */
103
 
104
static const char *alpha_fnname;
105
 
106
/* The next explicit relocation sequence number.  */
107
extern GTY(()) int alpha_next_sequence_number;
108
int alpha_next_sequence_number = 1;
109
 
110
/* The literal and gpdisp sequence numbers for this insn, as printed
111
   by %# and %* respectively.  */
112
extern GTY(()) int alpha_this_literal_sequence_number;
113
extern GTY(()) int alpha_this_gpdisp_sequence_number;
114
int alpha_this_literal_sequence_number;
115
int alpha_this_gpdisp_sequence_number;
116
 
117
/* Costs of various operations on the different architectures.  */
118
 
119
struct alpha_rtx_cost_data
120
{
121
  unsigned char fp_add;
122
  unsigned char fp_mult;
123
  unsigned char fp_div_sf;
124
  unsigned char fp_div_df;
125
  unsigned char int_mult_si;
126
  unsigned char int_mult_di;
127
  unsigned char int_shift;
128
  unsigned char int_cmov;
129
  unsigned short int_div;
130
};
131
 
132
static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
133
{
134
  { /* EV4 */
135
    COSTS_N_INSNS (6),          /* fp_add */
136
    COSTS_N_INSNS (6),          /* fp_mult */
137
    COSTS_N_INSNS (34),         /* fp_div_sf */
138
    COSTS_N_INSNS (63),         /* fp_div_df */
139
    COSTS_N_INSNS (23),         /* int_mult_si */
140
    COSTS_N_INSNS (23),         /* int_mult_di */
141
    COSTS_N_INSNS (2),          /* int_shift */
142
    COSTS_N_INSNS (2),          /* int_cmov */
143
    COSTS_N_INSNS (97),         /* int_div */
144
  },
145
  { /* EV5 */
146
    COSTS_N_INSNS (4),          /* fp_add */
147
    COSTS_N_INSNS (4),          /* fp_mult */
148
    COSTS_N_INSNS (15),         /* fp_div_sf */
149
    COSTS_N_INSNS (22),         /* fp_div_df */
150
    COSTS_N_INSNS (8),          /* int_mult_si */
151
    COSTS_N_INSNS (12),         /* int_mult_di */
152
    COSTS_N_INSNS (1) + 1,      /* int_shift */
153
    COSTS_N_INSNS (1),          /* int_cmov */
154
    COSTS_N_INSNS (83),         /* int_div */
155
  },
156
  { /* EV6 */
157
    COSTS_N_INSNS (4),          /* fp_add */
158
    COSTS_N_INSNS (4),          /* fp_mult */
159
    COSTS_N_INSNS (12),         /* fp_div_sf */
160
    COSTS_N_INSNS (15),         /* fp_div_df */
161
    COSTS_N_INSNS (7),          /* int_mult_si */
162
    COSTS_N_INSNS (7),          /* int_mult_di */
163
    COSTS_N_INSNS (1),          /* int_shift */
164
    COSTS_N_INSNS (2),          /* int_cmov */
165
    COSTS_N_INSNS (86),         /* int_div */
166
  },
167
};
168
 
169
/* Similar but tuned for code size instead of execution latency.  The
170
   extra +N is fractional cost tuning based on latency.  It's used to
171
   encourage use of cheaper insns like shift, but only if there's just
172
   one of them.  */
173
 
174
static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
175
{
176
  COSTS_N_INSNS (1),            /* fp_add */
177
  COSTS_N_INSNS (1),            /* fp_mult */
178
  COSTS_N_INSNS (1),            /* fp_div_sf */
179
  COSTS_N_INSNS (1) + 1,        /* fp_div_df */
180
  COSTS_N_INSNS (1) + 1,        /* int_mult_si */
181
  COSTS_N_INSNS (1) + 2,        /* int_mult_di */
182
  COSTS_N_INSNS (1),            /* int_shift */
183
  COSTS_N_INSNS (1),            /* int_cmov */
184
  COSTS_N_INSNS (6),            /* int_div */
185
};
186
 
187
/* Get the number of args of a function in one of two ways.  */
188
#if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
189
#define NUM_ARGS crtl->args.info.num_args
190
#else
191
#define NUM_ARGS crtl->args.info
192
#endif
193
 
194
#define REG_PV 27
195
#define REG_RA 26
196
 
197
/* Declarations of static functions.  */
198
static struct machine_function *alpha_init_machine_status (void);
199
static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
200
 
201
#if TARGET_ABI_OPEN_VMS
202
static void alpha_write_linkage (FILE *, const char *, tree);
203
static bool vms_valid_pointer_mode (enum machine_mode);
204
#endif
205
 
206
static void unicosmk_output_deferred_case_vectors (FILE *);
207
static void unicosmk_gen_dsib (unsigned long *);
208
static void unicosmk_output_ssib (FILE *, const char *);
209
static int unicosmk_need_dex (rtx);
210
 
211
/* Implement TARGET_HANDLE_OPTION.  */
212
 
213
static bool
214
alpha_handle_option (size_t code, const char *arg, int value)
215
{
216
  switch (code)
217
    {
218
    case OPT_mfp_regs:
219
      if (value == 0)
220
        target_flags |= MASK_SOFT_FP;
221
      break;
222
 
223
    case OPT_mieee:
224
    case OPT_mieee_with_inexact:
225
      target_flags |= MASK_IEEE_CONFORMANT;
226
      break;
227
 
228
    case OPT_mtls_size_:
229
      if (value != 16 && value != 32 && value != 64)
230
        error ("bad value %qs for -mtls-size switch", arg);
231
      break;
232
    }
233
 
234
  return true;
235
}
236
 
237
#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
238
/* Implement TARGET_MANGLE_TYPE.  */
239
 
240
static const char *
241
alpha_mangle_type (const_tree type)
242
{
243
  if (TYPE_MAIN_VARIANT (type) == long_double_type_node
244
      && TARGET_LONG_DOUBLE_128)
245
    return "g";
246
 
247
  /* For all other types, use normal C++ mangling.  */
248
  return NULL;
249
}
250
#endif
251
 
252
/* Parse target option strings.  */
253
 
254
void
255
override_options (void)
256
{
257
  static const struct cpu_table {
258
    const char *const name;
259
    const enum processor_type processor;
260
    const int flags;
261
  } cpu_table[] = {
262
    { "ev4",    PROCESSOR_EV4, 0 },
263
    { "ev45",   PROCESSOR_EV4, 0 },
264
    { "21064",  PROCESSOR_EV4, 0 },
265
    { "ev5",    PROCESSOR_EV5, 0 },
266
    { "21164",  PROCESSOR_EV5, 0 },
267
    { "ev56",   PROCESSOR_EV5, MASK_BWX },
268
    { "21164a", PROCESSOR_EV5, MASK_BWX },
269
    { "pca56",  PROCESSOR_EV5, MASK_BWX|MASK_MAX },
270
    { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
271
    { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272
    { "ev6",    PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
273
    { "21264",  PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
274
    { "ev67",   PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
275
    { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
276
  };
277
 
278
  int const ct_size = ARRAY_SIZE (cpu_table);
279
  int i;
280
 
281
  /* Unicos/Mk doesn't have shared libraries.  */
282
  if (TARGET_ABI_UNICOSMK && flag_pic)
283
    {
284
      warning (0, "-f%s ignored for Unicos/Mk (not supported)",
285
               (flag_pic > 1) ? "PIC" : "pic");
286
      flag_pic = 0;
287
    }
288
 
289
  /* On Unicos/Mk, the native compiler consistently generates /d suffices for
290
     floating-point instructions.  Make that the default for this target.  */
291
  if (TARGET_ABI_UNICOSMK)
292
    alpha_fprm = ALPHA_FPRM_DYN;
293
  else
294
    alpha_fprm = ALPHA_FPRM_NORM;
295
 
296
  alpha_tp = ALPHA_TP_PROG;
297
  alpha_fptm = ALPHA_FPTM_N;
298
 
299
  /* We cannot use su and sui qualifiers for conversion instructions on
300
     Unicos/Mk.  I'm not sure if this is due to assembler or hardware
301
     limitations.  Right now, we issue a warning if -mieee is specified
302
     and then ignore it; eventually, we should either get it right or
303
     disable the option altogether.  */
304
 
305
  if (TARGET_IEEE)
306
    {
307
      if (TARGET_ABI_UNICOSMK)
308
        warning (0, "-mieee not supported on Unicos/Mk");
309
      else
310
        {
311
          alpha_tp = ALPHA_TP_INSN;
312
          alpha_fptm = ALPHA_FPTM_SU;
313
        }
314
    }
315
 
316
  if (TARGET_IEEE_WITH_INEXACT)
317
    {
318
      if (TARGET_ABI_UNICOSMK)
319
        warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
320
      else
321
        {
322
          alpha_tp = ALPHA_TP_INSN;
323
          alpha_fptm = ALPHA_FPTM_SUI;
324
        }
325
    }
326
 
327
  if (alpha_tp_string)
328
    {
329
      if (! strcmp (alpha_tp_string, "p"))
330
        alpha_tp = ALPHA_TP_PROG;
331
      else if (! strcmp (alpha_tp_string, "f"))
332
        alpha_tp = ALPHA_TP_FUNC;
333
      else if (! strcmp (alpha_tp_string, "i"))
334
        alpha_tp = ALPHA_TP_INSN;
335
      else
336
        error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
337
    }
338
 
339
  if (alpha_fprm_string)
340
    {
341
      if (! strcmp (alpha_fprm_string, "n"))
342
        alpha_fprm = ALPHA_FPRM_NORM;
343
      else if (! strcmp (alpha_fprm_string, "m"))
344
        alpha_fprm = ALPHA_FPRM_MINF;
345
      else if (! strcmp (alpha_fprm_string, "c"))
346
        alpha_fprm = ALPHA_FPRM_CHOP;
347
      else if (! strcmp (alpha_fprm_string,"d"))
348
        alpha_fprm = ALPHA_FPRM_DYN;
349
      else
350
        error ("bad value %qs for -mfp-rounding-mode switch",
351
               alpha_fprm_string);
352
    }
353
 
354
  if (alpha_fptm_string)
355
    {
356
      if (strcmp (alpha_fptm_string, "n") == 0)
357
        alpha_fptm = ALPHA_FPTM_N;
358
      else if (strcmp (alpha_fptm_string, "u") == 0)
359
        alpha_fptm = ALPHA_FPTM_U;
360
      else if (strcmp (alpha_fptm_string, "su") == 0)
361
        alpha_fptm = ALPHA_FPTM_SU;
362
      else if (strcmp (alpha_fptm_string, "sui") == 0)
363
        alpha_fptm = ALPHA_FPTM_SUI;
364
      else
365
        error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
366
    }
367
 
368
  if (alpha_cpu_string)
369
    {
370
      for (i = 0; i < ct_size; i++)
371
        if (! strcmp (alpha_cpu_string, cpu_table [i].name))
372
          {
373
            alpha_tune = alpha_cpu = cpu_table [i].processor;
374
            target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
375
            target_flags |= cpu_table [i].flags;
376
            break;
377
          }
378
      if (i == ct_size)
379
        error ("bad value %qs for -mcpu switch", alpha_cpu_string);
380
    }
381
 
382
  if (alpha_tune_string)
383
    {
384
      for (i = 0; i < ct_size; i++)
385
        if (! strcmp (alpha_tune_string, cpu_table [i].name))
386
          {
387
            alpha_tune = cpu_table [i].processor;
388
            break;
389
          }
390
      if (i == ct_size)
391
        error ("bad value %qs for -mtune switch", alpha_tune_string);
392
    }
393
 
394
  /* Do some sanity checks on the above options.  */
395
 
396
  if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
397
    {
398
      warning (0, "trap mode not supported on Unicos/Mk");
399
      alpha_fptm = ALPHA_FPTM_N;
400
    }
401
 
402
  if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
403
      && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
404
    {
405
      warning (0, "fp software completion requires -mtrap-precision=i");
406
      alpha_tp = ALPHA_TP_INSN;
407
    }
408
 
409
  if (alpha_cpu == PROCESSOR_EV6)
410
    {
411
      /* Except for EV6 pass 1 (not released), we always have precise
412
         arithmetic traps.  Which means we can do software completion
413
         without minding trap shadows.  */
414
      alpha_tp = ALPHA_TP_PROG;
415
    }
416
 
417
  if (TARGET_FLOAT_VAX)
418
    {
419
      if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
420
        {
421
          warning (0, "rounding mode not supported for VAX floats");
422
          alpha_fprm = ALPHA_FPRM_NORM;
423
        }
424
      if (alpha_fptm == ALPHA_FPTM_SUI)
425
        {
426
          warning (0, "trap mode not supported for VAX floats");
427
          alpha_fptm = ALPHA_FPTM_SU;
428
        }
429
      if (target_flags_explicit & MASK_LONG_DOUBLE_128)
430
        warning (0, "128-bit long double not supported for VAX floats");
431
      target_flags &= ~MASK_LONG_DOUBLE_128;
432
    }
433
 
434
  {
435
    char *end;
436
    int lat;
437
 
438
    if (!alpha_mlat_string)
439
      alpha_mlat_string = "L1";
440
 
441
    if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
442
        && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
443
      ;
444
    else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
445
             && ISDIGIT ((unsigned char)alpha_mlat_string[1])
446
             && alpha_mlat_string[2] == '\0')
447
      {
448
        static int const cache_latency[][4] =
449
        {
450
          { 3, 30, -1 },        /* ev4 -- Bcache is a guess */
451
          { 2, 12, 38 },        /* ev5 -- Bcache from PC164 LMbench numbers */
452
          { 3, 12, 30 },        /* ev6 -- Bcache from DS20 LMbench.  */
453
        };
454
 
455
        lat = alpha_mlat_string[1] - '0';
456
        if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
457
          {
458
            warning (0, "L%d cache latency unknown for %s",
459
                     lat, alpha_cpu_name[alpha_tune]);
460
            lat = 3;
461
          }
462
        else
463
          lat = cache_latency[alpha_tune][lat-1];
464
      }
465
    else if (! strcmp (alpha_mlat_string, "main"))
466
      {
467
        /* Most current memories have about 370ns latency.  This is
468
           a reasonable guess for a fast cpu.  */
469
        lat = 150;
470
      }
471
    else
472
      {
473
        warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
474
        lat = 3;
475
      }
476
 
477
    alpha_memory_latency = lat;
478
  }
479
 
480
  /* Default the definition of "small data" to 8 bytes.  */
481
  if (!g_switch_set)
482
    g_switch_value = 8;
483
 
484
  /* Infer TARGET_SMALL_DATA from -fpic/-fPIC.  */
485
  if (flag_pic == 1)
486
    target_flags |= MASK_SMALL_DATA;
487
  else if (flag_pic == 2)
488
    target_flags &= ~MASK_SMALL_DATA;
489
 
490
  /* Align labels and loops for optimal branching.  */
491
  /* ??? Kludge these by not doing anything if we don't optimize and also if
492
     we are writing ECOFF symbols to work around a bug in DEC's assembler.  */
493
  if (optimize > 0 && write_symbols != SDB_DEBUG)
494
    {
495
      if (align_loops <= 0)
496
        align_loops = 16;
497
      if (align_jumps <= 0)
498
        align_jumps = 16;
499
    }
500
  if (align_functions <= 0)
501
    align_functions = 16;
502
 
503
  /* Acquire a unique set number for our register saves and restores.  */
504
  alpha_sr_alias_set = new_alias_set ();
505
 
506
  /* Register variables and functions with the garbage collector.  */
507
 
508
  /* Set up function hooks.  */
509
  init_machine_status = alpha_init_machine_status;
510
 
511
  /* Tell the compiler when we're using VAX floating point.  */
512
  if (TARGET_FLOAT_VAX)
513
    {
514
      REAL_MODE_FORMAT (SFmode) = &vax_f_format;
515
      REAL_MODE_FORMAT (DFmode) = &vax_g_format;
516
      REAL_MODE_FORMAT (TFmode) = NULL;
517
    }
518
 
519
#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
520
  if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
521
    target_flags |= MASK_LONG_DOUBLE_128;
522
#endif
523
 
524
  /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
525
     can be optimized to ap = __builtin_next_arg (0).  */
526
  if (TARGET_ABI_UNICOSMK)
527
    targetm.expand_builtin_va_start = NULL;
528
}
529
 
530
/* Returns 1 if VALUE is a mask that contains full bytes of zero or ones.  */
531
 
532
int
533
zap_mask (HOST_WIDE_INT value)
534
{
535
  int i;
536
 
537
  for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
538
       i++, value >>= 8)
539
    if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
540
      return 0;
541
 
542
  return 1;
543
}
544
 
545
/* Return true if OP is valid for a particular TLS relocation.
546
   We are already guaranteed that OP is a CONST.  */
547
 
548
int
549
tls_symbolic_operand_1 (rtx op, int size, int unspec)
550
{
551
  op = XEXP (op, 0);
552
 
553
  if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
554
    return 0;
555
  op = XVECEXP (op, 0, 0);
556
 
557
  if (GET_CODE (op) != SYMBOL_REF)
558
    return 0;
559
 
560
  switch (SYMBOL_REF_TLS_MODEL (op))
561
    {
562
    case TLS_MODEL_LOCAL_DYNAMIC:
563
      return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
564
    case TLS_MODEL_INITIAL_EXEC:
565
      return unspec == UNSPEC_TPREL && size == 64;
566
    case TLS_MODEL_LOCAL_EXEC:
567
      return unspec == UNSPEC_TPREL && size == alpha_tls_size;
568
    default:
569
      gcc_unreachable ();
570
    }
571
}
572
 
573
/* Used by aligned_memory_operand and unaligned_memory_operand to
574
   resolve what reload is going to do with OP if it's a register.  */
575
 
576
rtx
577
resolve_reload_operand (rtx op)
578
{
579
  if (reload_in_progress)
580
    {
581
      rtx tmp = op;
582
      if (GET_CODE (tmp) == SUBREG)
583
        tmp = SUBREG_REG (tmp);
584
      if (REG_P (tmp)
585
          && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
586
        {
587
          op = reg_equiv_memory_loc[REGNO (tmp)];
588
          if (op == 0)
589
            return 0;
590
        }
591
    }
592
  return op;
593
}
594
 
595
/* The scalar modes supported differs from the default check-what-c-supports
596
   version in that sometimes TFmode is available even when long double
597
   indicates only DFmode.  On unicosmk, we have the situation that HImode
598
   doesn't map to any C type, but of course we still support that.  */
599
 
600
static bool
601
alpha_scalar_mode_supported_p (enum machine_mode mode)
602
{
603
  switch (mode)
604
    {
605
    case QImode:
606
    case HImode:
607
    case SImode:
608
    case DImode:
609
    case TImode: /* via optabs.c */
610
      return true;
611
 
612
    case SFmode:
613
    case DFmode:
614
      return true;
615
 
616
    case TFmode:
617
      return TARGET_HAS_XFLOATING_LIBS;
618
 
619
    default:
620
      return false;
621
    }
622
}
623
 
624
/* Alpha implements a couple of integer vector mode operations when
625
   TARGET_MAX is enabled.  We do not check TARGET_MAX here, however,
626
   which allows the vectorizer to operate on e.g. move instructions,
627
   or when expand_vector_operations can do something useful.  */
628
 
629
static bool
630
alpha_vector_mode_supported_p (enum machine_mode mode)
631
{
632
  return mode == V8QImode || mode == V4HImode || mode == V2SImode;
633
}
634
 
635
/* Return 1 if this function can directly return via $26.  */
636
 
637
int
638
direct_return (void)
639
{
640
  return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
641
          && reload_completed
642
          && alpha_sa_size () == 0
643
          && get_frame_size () == 0
644
          && crtl->outgoing_args_size == 0
645
          && crtl->args.pretend_args_size == 0);
646
}
647
 
648
/* Return the ADDR_VEC associated with a tablejump insn.  */
649
 
650
rtx
651
alpha_tablejump_addr_vec (rtx insn)
652
{
653
  rtx tmp;
654
 
655
  tmp = JUMP_LABEL (insn);
656
  if (!tmp)
657
    return NULL_RTX;
658
  tmp = NEXT_INSN (tmp);
659
  if (!tmp)
660
    return NULL_RTX;
661
  if (JUMP_P (tmp)
662
      && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
663
    return PATTERN (tmp);
664
  return NULL_RTX;
665
}
666
 
667
/* Return the label of the predicted edge, or CONST0_RTX if we don't know.  */
668
 
669
rtx
670
alpha_tablejump_best_label (rtx insn)
671
{
672
  rtx jump_table = alpha_tablejump_addr_vec (insn);
673
  rtx best_label = NULL_RTX;
674
 
675
  /* ??? Once the CFG doesn't keep getting completely rebuilt, look
676
     there for edge frequency counts from profile data.  */
677
 
678
  if (jump_table)
679
    {
680
      int n_labels = XVECLEN (jump_table, 1);
681
      int best_count = -1;
682
      int i, j;
683
 
684
      for (i = 0; i < n_labels; i++)
685
        {
686
          int count = 1;
687
 
688
          for (j = i + 1; j < n_labels; j++)
689
            if (XEXP (XVECEXP (jump_table, 1, i), 0)
690
                == XEXP (XVECEXP (jump_table, 1, j), 0))
691
              count++;
692
 
693
          if (count > best_count)
694
            best_count = count, best_label = XVECEXP (jump_table, 1, i);
695
        }
696
    }
697
 
698
  return best_label ? best_label : const0_rtx;
699
}
700
 
701
/* Return the TLS model to use for SYMBOL.  */
702
 
703
static enum tls_model
704
tls_symbolic_operand_type (rtx symbol)
705
{
706
  enum tls_model model;
707
 
708
  if (GET_CODE (symbol) != SYMBOL_REF)
709
    return TLS_MODEL_NONE;
710
  model = SYMBOL_REF_TLS_MODEL (symbol);
711
 
712
  /* Local-exec with a 64-bit size is the same code as initial-exec.  */
713
  if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
714
    model = TLS_MODEL_INITIAL_EXEC;
715
 
716
  return model;
717
}
718
 
719
/* Return true if the function DECL will share the same GP as any
720
   function in the current unit of translation.  */
721
 
722
static bool
723
decl_has_samegp (const_tree decl)
724
{
725
  /* Functions that are not local can be overridden, and thus may
726
     not share the same gp.  */
727
  if (!(*targetm.binds_local_p) (decl))
728
    return false;
729
 
730
  /* If -msmall-data is in effect, assume that there is only one GP
731
     for the module, and so any local symbol has this property.  We
732
     need explicit relocations to be able to enforce this for symbols
733
     not defined in this unit of translation, however.  */
734
  if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
735
    return true;
736
 
737
  /* Functions that are not external are defined in this UoT.  */
738
  /* ??? Irritatingly, static functions not yet emitted are still
739
     marked "external".  Apply this to non-static functions only.  */
740
  return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
741
}
742
 
743
/* Return true if EXP should be placed in the small data section.  */
744
 
745
static bool
746
alpha_in_small_data_p (const_tree exp)
747
{
748
  /* We want to merge strings, so we never consider them small data.  */
749
  if (TREE_CODE (exp) == STRING_CST)
750
    return false;
751
 
752
  /* Functions are never in the small data area.  Duh.  */
753
  if (TREE_CODE (exp) == FUNCTION_DECL)
754
    return false;
755
 
756
  if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
757
    {
758
      const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
759
      if (strcmp (section, ".sdata") == 0
760
          || strcmp (section, ".sbss") == 0)
761
        return true;
762
    }
763
  else
764
    {
765
      HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
766
 
767
      /* If this is an incomplete type with size 0, then we can't put it
768
         in sdata because it might be too big when completed.  */
769
      if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
770
        return true;
771
    }
772
 
773
  return false;
774
}
775
 
776
#if TARGET_ABI_OPEN_VMS
777
static bool
778
vms_valid_pointer_mode (enum machine_mode mode)
779
{
780
  return (mode == SImode || mode == DImode);
781
}
782
 
783
static bool
784
alpha_linkage_symbol_p (const char *symname)
785
{
786
  int symlen = strlen (symname);
787
 
788
  if (symlen > 4)
789
    return strcmp (&symname [symlen - 4], "..lk") == 0;
790
 
791
  return false;
792
}
793
 
794
#define LINKAGE_SYMBOL_REF_P(X) \
795
  ((GET_CODE (X) == SYMBOL_REF   \
796
    && alpha_linkage_symbol_p (XSTR (X, 0))) \
797
   || (GET_CODE (X) == CONST                 \
798
       && GET_CODE (XEXP (X, 0)) == PLUS     \
799
       && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
800
       && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
801
#endif
802
 
803
/* legitimate_address_p recognizes an RTL expression that is a valid
804
   memory address for an instruction.  The MODE argument is the
805
   machine mode for the MEM expression that wants to use this address.
806
 
807
   For Alpha, we have either a constant address or the sum of a
808
   register and a constant address, or just a register.  For DImode,
809
   any of those forms can be surrounded with an AND that clear the
810
   low-order three bits; this is an "unaligned" access.  */
811
 
812
static bool
813
alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
814
{
815
  /* If this is an ldq_u type address, discard the outer AND.  */
816
  if (mode == DImode
817
      && GET_CODE (x) == AND
818
      && CONST_INT_P (XEXP (x, 1))
819
      && INTVAL (XEXP (x, 1)) == -8)
820
    x = XEXP (x, 0);
821
 
822
  /* Discard non-paradoxical subregs.  */
823
  if (GET_CODE (x) == SUBREG
824
      && (GET_MODE_SIZE (GET_MODE (x))
825
          < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
826
    x = SUBREG_REG (x);
827
 
828
  /* Unadorned general registers are valid.  */
829
  if (REG_P (x)
830
      && (strict
831
          ? STRICT_REG_OK_FOR_BASE_P (x)
832
          : NONSTRICT_REG_OK_FOR_BASE_P (x)))
833
    return true;
834
 
835
  /* Constant addresses (i.e. +/- 32k) are valid.  */
836
  if (CONSTANT_ADDRESS_P (x))
837
    return true;
838
 
839
#if TARGET_ABI_OPEN_VMS
840
  if (LINKAGE_SYMBOL_REF_P (x))
841
    return true;
842
#endif
843
 
844
  /* Register plus a small constant offset is valid.  */
845
  if (GET_CODE (x) == PLUS)
846
    {
847
      rtx ofs = XEXP (x, 1);
848
      x = XEXP (x, 0);
849
 
850
      /* Discard non-paradoxical subregs.  */
851
      if (GET_CODE (x) == SUBREG
852
          && (GET_MODE_SIZE (GET_MODE (x))
853
              < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
854
        x = SUBREG_REG (x);
855
 
856
      if (REG_P (x))
857
        {
858
          if (! strict
859
              && NONSTRICT_REG_OK_FP_BASE_P (x)
860
              && CONST_INT_P (ofs))
861
            return true;
862
          if ((strict
863
               ? STRICT_REG_OK_FOR_BASE_P (x)
864
               : NONSTRICT_REG_OK_FOR_BASE_P (x))
865
              && CONSTANT_ADDRESS_P (ofs))
866
            return true;
867
        }
868
    }
869
 
870
  /* If we're managing explicit relocations, LO_SUM is valid, as are small
871
     data symbols.  Avoid explicit relocations of modes larger than word
872
     mode since i.e. $LC0+8($1) can fold around +/- 32k offset.  */
873
  else if (TARGET_EXPLICIT_RELOCS
874
           && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
875
    {
876
      if (small_symbolic_operand (x, Pmode))
877
        return true;
878
 
879
      if (GET_CODE (x) == LO_SUM)
880
        {
881
          rtx ofs = XEXP (x, 1);
882
          x = XEXP (x, 0);
883
 
884
          /* Discard non-paradoxical subregs.  */
885
          if (GET_CODE (x) == SUBREG
886
              && (GET_MODE_SIZE (GET_MODE (x))
887
                  < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
888
            x = SUBREG_REG (x);
889
 
890
          /* Must have a valid base register.  */
891
          if (! (REG_P (x)
892
                 && (strict
893
                     ? STRICT_REG_OK_FOR_BASE_P (x)
894
                     : NONSTRICT_REG_OK_FOR_BASE_P (x))))
895
            return false;
896
 
897
          /* The symbol must be local.  */
898
          if (local_symbolic_operand (ofs, Pmode)
899
              || dtp32_symbolic_operand (ofs, Pmode)
900
              || tp32_symbolic_operand (ofs, Pmode))
901
            return true;
902
        }
903
    }
904
 
905
  return false;
906
}
907
 
908
/* Build the SYMBOL_REF for __tls_get_addr.  */
909
 
910
static GTY(()) rtx tls_get_addr_libfunc;
911
 
912
static rtx
913
get_tls_get_addr (void)
914
{
915
  if (!tls_get_addr_libfunc)
916
    tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
917
  return tls_get_addr_libfunc;
918
}
919
 
920
/* Try machine-dependent ways of modifying an illegitimate address
921
   to be legitimate.  If we find one, return the new, valid address.  */
922
 
923
static rtx
924
alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
925
{
926
  HOST_WIDE_INT addend;
927
 
928
  /* If the address is (plus reg const_int) and the CONST_INT is not a
929
     valid offset, compute the high part of the constant and add it to
930
     the register.  Then our address is (plus temp low-part-const).  */
931
  if (GET_CODE (x) == PLUS
932
      && REG_P (XEXP (x, 0))
933
      && CONST_INT_P (XEXP (x, 1))
934
      && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
935
    {
936
      addend = INTVAL (XEXP (x, 1));
937
      x = XEXP (x, 0);
938
      goto split_addend;
939
    }
940
 
941
  /* If the address is (const (plus FOO const_int)), find the low-order
942
     part of the CONST_INT.  Then load FOO plus any high-order part of the
943
     CONST_INT into a register.  Our address is (plus reg low-part-const).
944
     This is done to reduce the number of GOT entries.  */
945
  if (can_create_pseudo_p ()
946
      && GET_CODE (x) == CONST
947
      && GET_CODE (XEXP (x, 0)) == PLUS
948
      && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
949
    {
950
      addend = INTVAL (XEXP (XEXP (x, 0), 1));
951
      x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
952
      goto split_addend;
953
    }
954
 
955
  /* If we have a (plus reg const), emit the load as in (2), then add
956
     the two registers, and finally generate (plus reg low-part-const) as
957
     our address.  */
958
  if (can_create_pseudo_p ()
959
      && GET_CODE (x) == PLUS
960
      && REG_P (XEXP (x, 0))
961
      && GET_CODE (XEXP (x, 1)) == CONST
962
      && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
963
      && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
964
    {
965
      addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
966
      x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
967
                               XEXP (XEXP (XEXP (x, 1), 0), 0),
968
                               NULL_RTX, 1, OPTAB_LIB_WIDEN);
969
      goto split_addend;
970
    }
971
 
972
  /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
973
     Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
974
     around +/- 32k offset.  */
975
  if (TARGET_EXPLICIT_RELOCS
976
      && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
977
      && symbolic_operand (x, Pmode))
978
    {
979
      rtx r0, r16, eqv, tga, tp, insn, dest, seq;
980
 
981
      switch (tls_symbolic_operand_type (x))
982
        {
983
        case TLS_MODEL_NONE:
984
          break;
985
 
986
        case TLS_MODEL_GLOBAL_DYNAMIC:
987
          start_sequence ();
988
 
989
          r0 = gen_rtx_REG (Pmode, 0);
990
          r16 = gen_rtx_REG (Pmode, 16);
991
          tga = get_tls_get_addr ();
992
          dest = gen_reg_rtx (Pmode);
993
          seq = GEN_INT (alpha_next_sequence_number++);
994
 
995
          emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
996
          insn = gen_call_value_osf_tlsgd (r0, tga, seq);
997
          insn = emit_call_insn (insn);
998
          RTL_CONST_CALL_P (insn) = 1;
999
          use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1000
 
1001
          insn = get_insns ();
1002
          end_sequence ();
1003
 
1004
          emit_libcall_block (insn, dest, r0, x);
1005
          return dest;
1006
 
1007
        case TLS_MODEL_LOCAL_DYNAMIC:
1008
          start_sequence ();
1009
 
1010
          r0 = gen_rtx_REG (Pmode, 0);
1011
          r16 = gen_rtx_REG (Pmode, 16);
1012
          tga = get_tls_get_addr ();
1013
          scratch = gen_reg_rtx (Pmode);
1014
          seq = GEN_INT (alpha_next_sequence_number++);
1015
 
1016
          emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1017
          insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1018
          insn = emit_call_insn (insn);
1019
          RTL_CONST_CALL_P (insn) = 1;
1020
          use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1021
 
1022
          insn = get_insns ();
1023
          end_sequence ();
1024
 
1025
          eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1026
                                UNSPEC_TLSLDM_CALL);
1027
          emit_libcall_block (insn, scratch, r0, eqv);
1028
 
1029
          eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1030
          eqv = gen_rtx_CONST (Pmode, eqv);
1031
 
1032
          if (alpha_tls_size == 64)
1033
            {
1034
              dest = gen_reg_rtx (Pmode);
1035
              emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1036
              emit_insn (gen_adddi3 (dest, dest, scratch));
1037
              return dest;
1038
            }
1039
          if (alpha_tls_size == 32)
1040
            {
1041
              insn = gen_rtx_HIGH (Pmode, eqv);
1042
              insn = gen_rtx_PLUS (Pmode, scratch, insn);
1043
              scratch = gen_reg_rtx (Pmode);
1044
              emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1045
            }
1046
          return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1047
 
1048
        case TLS_MODEL_INITIAL_EXEC:
1049
          eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1050
          eqv = gen_rtx_CONST (Pmode, eqv);
1051
          tp = gen_reg_rtx (Pmode);
1052
          scratch = gen_reg_rtx (Pmode);
1053
          dest = gen_reg_rtx (Pmode);
1054
 
1055
          emit_insn (gen_load_tp (tp));
1056
          emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1057
          emit_insn (gen_adddi3 (dest, tp, scratch));
1058
          return dest;
1059
 
1060
        case TLS_MODEL_LOCAL_EXEC:
1061
          eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1062
          eqv = gen_rtx_CONST (Pmode, eqv);
1063
          tp = gen_reg_rtx (Pmode);
1064
 
1065
          emit_insn (gen_load_tp (tp));
1066
          if (alpha_tls_size == 32)
1067
            {
1068
              insn = gen_rtx_HIGH (Pmode, eqv);
1069
              insn = gen_rtx_PLUS (Pmode, tp, insn);
1070
              tp = gen_reg_rtx (Pmode);
1071
              emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1072
            }
1073
          return gen_rtx_LO_SUM (Pmode, tp, eqv);
1074
 
1075
        default:
1076
          gcc_unreachable ();
1077
        }
1078
 
1079
      if (local_symbolic_operand (x, Pmode))
1080
        {
1081
          if (small_symbolic_operand (x, Pmode))
1082
            return x;
1083
          else
1084
            {
1085
              if (can_create_pseudo_p ())
1086
                scratch = gen_reg_rtx (Pmode);
1087
              emit_insn (gen_rtx_SET (VOIDmode, scratch,
1088
                                      gen_rtx_HIGH (Pmode, x)));
1089
              return gen_rtx_LO_SUM (Pmode, scratch, x);
1090
            }
1091
        }
1092
    }
1093
 
1094
  return NULL;
1095
 
1096
 split_addend:
1097
  {
1098
    HOST_WIDE_INT low, high;
1099
 
1100
    low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1101
    addend -= low;
1102
    high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1103
    addend -= high;
1104
 
1105
    if (addend)
1106
      x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1107
                               (!can_create_pseudo_p () ? scratch : NULL_RTX),
1108
                               1, OPTAB_LIB_WIDEN);
1109
    if (high)
1110
      x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1111
                               (!can_create_pseudo_p () ? scratch : NULL_RTX),
1112
                               1, OPTAB_LIB_WIDEN);
1113
 
1114
    return plus_constant (x, low);
1115
  }
1116
}
1117
 
1118
 
1119
/* Try machine-dependent ways of modifying an illegitimate address
1120
   to be legitimate.  Return X or the new, valid address.  */
1121
 
1122
static rtx
1123
alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1124
                          enum machine_mode mode)
1125
{
1126
  rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1127
  return new_x ? new_x : x;
1128
}
1129
 
1130
/* Primarily this is required for TLS symbols, but given that our move
1131
   patterns *ought* to be able to handle any symbol at any time, we
1132
   should never be spilling symbolic operands to the constant pool, ever.  */
1133
 
1134
static bool
1135
alpha_cannot_force_const_mem (rtx x)
1136
{
1137
  enum rtx_code code = GET_CODE (x);
1138
  return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1139
}
1140
 
1141
/* We do not allow indirect calls to be optimized into sibling calls, nor
1142
   can we allow a call to a function with a different GP to be optimized
1143
   into a sibcall.  */
1144
 
1145
static bool
1146
alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1147
{
1148
  /* Can't do indirect tail calls, since we don't know if the target
1149
     uses the same GP.  */
1150
  if (!decl)
1151
    return false;
1152
 
1153
  /* Otherwise, we can make a tail call if the target function shares
1154
     the same GP.  */
1155
  return decl_has_samegp (decl);
1156
}
1157
 
1158
int
1159
some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1160
{
1161
  rtx x = *px;
1162
 
1163
  /* Don't re-split.  */
1164
  if (GET_CODE (x) == LO_SUM)
1165
    return -1;
1166
 
1167
  return small_symbolic_operand (x, Pmode) != 0;
1168
}
1169
 
1170
static int
1171
split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1172
{
1173
  rtx x = *px;
1174
 
1175
  /* Don't re-split.  */
1176
  if (GET_CODE (x) == LO_SUM)
1177
    return -1;
1178
 
1179
  if (small_symbolic_operand (x, Pmode))
1180
    {
1181
      x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1182
      *px = x;
1183
      return -1;
1184
    }
1185
 
1186
  return 0;
1187
}
1188
 
1189
rtx
1190
split_small_symbolic_operand (rtx x)
1191
{
1192
  x = copy_insn (x);
1193
  for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1194
  return x;
1195
}
1196
 
1197
/* Indicate that INSN cannot be duplicated.  This is true for any insn
1198
   that we've marked with gpdisp relocs, since those have to stay in
1199
   1-1 correspondence with one another.
1200
 
1201
   Technically we could copy them if we could set up a mapping from one
1202
   sequence number to another, across the set of insns to be duplicated.
1203
   This seems overly complicated and error-prone since interblock motion
1204
   from sched-ebb could move one of the pair of insns to a different block.
1205
 
1206
   Also cannot allow jsr insns to be duplicated.  If they throw exceptions,
1207
   then they'll be in a different block from their ldgp.  Which could lead
1208
   the bb reorder code to think that it would be ok to copy just the block
1209
   containing the call and branch to the block containing the ldgp.  */
1210
 
1211
static bool
1212
alpha_cannot_copy_insn_p (rtx insn)
1213
{
1214
  if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1215
    return false;
1216
  if (recog_memoized (insn) >= 0)
1217
    return get_attr_cannot_copy (insn);
1218
  else
1219
    return false;
1220
}
1221
 
1222
 
1223
/* Try a machine-dependent way of reloading an illegitimate address
1224
   operand.  If we find one, push the reload and return the new rtx.  */
1225
 
1226
rtx
1227
alpha_legitimize_reload_address (rtx x,
1228
                                 enum machine_mode mode ATTRIBUTE_UNUSED,
1229
                                 int opnum, int type,
1230
                                 int ind_levels ATTRIBUTE_UNUSED)
1231
{
1232
  /* We must recognize output that we have already generated ourselves.  */
1233
  if (GET_CODE (x) == PLUS
1234
      && GET_CODE (XEXP (x, 0)) == PLUS
1235
      && REG_P (XEXP (XEXP (x, 0), 0))
1236
      && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1237
      && CONST_INT_P (XEXP (x, 1)))
1238
    {
1239
      push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1240
                   BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1241
                   opnum, (enum reload_type) type);
1242
      return x;
1243
    }
1244
 
1245
  /* We wish to handle large displacements off a base register by
1246
     splitting the addend across an ldah and the mem insn.  This
1247
     cuts number of extra insns needed from 3 to 1.  */
1248
  if (GET_CODE (x) == PLUS
1249
      && REG_P (XEXP (x, 0))
1250
      && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1251
      && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1252
      && GET_CODE (XEXP (x, 1)) == CONST_INT)
1253
    {
1254
      HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1255
      HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1256
      HOST_WIDE_INT high
1257
        = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1258
 
1259
      /* Check for 32-bit overflow.  */
1260
      if (high + low != val)
1261
        return NULL_RTX;
1262
 
1263
      /* Reload the high part into a base reg; leave the low part
1264
         in the mem directly.  */
1265
      x = gen_rtx_PLUS (GET_MODE (x),
1266
                        gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1267
                                      GEN_INT (high)),
1268
                        GEN_INT (low));
1269
 
1270
      push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1271
                   BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1272
                   opnum, (enum reload_type) type);
1273
      return x;
1274
    }
1275
 
1276
  return NULL_RTX;
1277
}
1278
 
1279
/* Compute a (partial) cost for rtx X.  Return true if the complete
1280
   cost has been computed, and false if subexpressions should be
1281
   scanned.  In either case, *TOTAL contains the cost result.  */
1282
 
1283
static bool
1284
alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1285
                 bool speed)
1286
{
1287
  enum machine_mode mode = GET_MODE (x);
1288
  bool float_mode_p = FLOAT_MODE_P (mode);
1289
  const struct alpha_rtx_cost_data *cost_data;
1290
 
1291
  if (!speed)
1292
    cost_data = &alpha_rtx_cost_size;
1293
  else
1294
    cost_data = &alpha_rtx_cost_data[alpha_tune];
1295
 
1296
  switch (code)
1297
    {
1298
    case CONST_INT:
1299
      /* If this is an 8-bit constant, return zero since it can be used
1300
         nearly anywhere with no cost.  If it is a valid operand for an
1301
         ADD or AND, likewise return 0 if we know it will be used in that
1302
         context.  Otherwise, return 2 since it might be used there later.
1303
         All other constants take at least two insns.  */
1304
      if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1305
        {
1306
          *total = 0;
1307
          return true;
1308
        }
1309
      /* FALLTHRU */
1310
 
1311
    case CONST_DOUBLE:
1312
      if (x == CONST0_RTX (mode))
1313
        *total = 0;
1314
      else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1315
               || (outer_code == AND && and_operand (x, VOIDmode)))
1316
        *total = 0;
1317
      else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1318
        *total = 2;
1319
      else
1320
        *total = COSTS_N_INSNS (2);
1321
      return true;
1322
 
1323
    case CONST:
1324
    case SYMBOL_REF:
1325
    case LABEL_REF:
1326
      if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1327
        *total = COSTS_N_INSNS (outer_code != MEM);
1328
      else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1329
        *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1330
      else if (tls_symbolic_operand_type (x))
1331
        /* Estimate of cost for call_pal rduniq.  */
1332
        /* ??? How many insns do we emit here?  More than one...  */
1333
        *total = COSTS_N_INSNS (15);
1334
      else
1335
        /* Otherwise we do a load from the GOT.  */
1336
        *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1337
      return true;
1338
 
1339
    case HIGH:
1340
      /* This is effectively an add_operand.  */
1341
      *total = 2;
1342
      return true;
1343
 
1344
    case PLUS:
1345
    case MINUS:
1346
      if (float_mode_p)
1347
        *total = cost_data->fp_add;
1348
      else if (GET_CODE (XEXP (x, 0)) == MULT
1349
               && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1350
        {
1351
          *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1352
                              (enum rtx_code) outer_code, speed)
1353
                    + rtx_cost (XEXP (x, 1),
1354
                                (enum rtx_code) outer_code, speed)
1355
                    + COSTS_N_INSNS (1));
1356
          return true;
1357
        }
1358
      return false;
1359
 
1360
    case MULT:
1361
      if (float_mode_p)
1362
        *total = cost_data->fp_mult;
1363
      else if (mode == DImode)
1364
        *total = cost_data->int_mult_di;
1365
      else
1366
        *total = cost_data->int_mult_si;
1367
      return false;
1368
 
1369
    case ASHIFT:
1370
      if (CONST_INT_P (XEXP (x, 1))
1371
          && INTVAL (XEXP (x, 1)) <= 3)
1372
        {
1373
          *total = COSTS_N_INSNS (1);
1374
          return false;
1375
        }
1376
      /* FALLTHRU */
1377
 
1378
    case ASHIFTRT:
1379
    case LSHIFTRT:
1380
      *total = cost_data->int_shift;
1381
      return false;
1382
 
1383
    case IF_THEN_ELSE:
1384
      if (float_mode_p)
1385
        *total = cost_data->fp_add;
1386
      else
1387
        *total = cost_data->int_cmov;
1388
      return false;
1389
 
1390
    case DIV:
1391
    case UDIV:
1392
    case MOD:
1393
    case UMOD:
1394
      if (!float_mode_p)
1395
        *total = cost_data->int_div;
1396
      else if (mode == SFmode)
1397
        *total = cost_data->fp_div_sf;
1398
      else
1399
        *total = cost_data->fp_div_df;
1400
      return false;
1401
 
1402
    case MEM:
1403
      *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1404
      return true;
1405
 
1406
    case NEG:
1407
      if (! float_mode_p)
1408
        {
1409
          *total = COSTS_N_INSNS (1);
1410
          return false;
1411
        }
1412
      /* FALLTHRU */
1413
 
1414
    case ABS:
1415
      if (! float_mode_p)
1416
        {
1417
          *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1418
          return false;
1419
        }
1420
      /* FALLTHRU */
1421
 
1422
    case FLOAT:
1423
    case UNSIGNED_FLOAT:
1424
    case FIX:
1425
    case UNSIGNED_FIX:
1426
    case FLOAT_TRUNCATE:
1427
      *total = cost_data->fp_add;
1428
      return false;
1429
 
1430
    case FLOAT_EXTEND:
1431
      if (MEM_P (XEXP (x, 0)))
1432
        *total = 0;
1433
      else
1434
        *total = cost_data->fp_add;
1435
      return false;
1436
 
1437
    default:
1438
      return false;
1439
    }
1440
}
1441
 
1442
/* REF is an alignable memory location.  Place an aligned SImode
1443
   reference into *PALIGNED_MEM and the number of bits to shift into
1444
   *PBITNUM.  SCRATCH is a free register for use in reloading out
1445
   of range stack slots.  */
1446
 
1447
void
1448
get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1449
{
1450
  rtx base;
1451
  HOST_WIDE_INT disp, offset;
1452
 
1453
  gcc_assert (MEM_P (ref));
1454
 
1455
  if (reload_in_progress
1456
      && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1457
    {
1458
      base = find_replacement (&XEXP (ref, 0));
1459
      gcc_assert (memory_address_p (GET_MODE (ref), base));
1460
    }
1461
  else
1462
    base = XEXP (ref, 0);
1463
 
1464
  if (GET_CODE (base) == PLUS)
1465
    disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1466
  else
1467
    disp = 0;
1468
 
1469
  /* Find the byte offset within an aligned word.  If the memory itself is
1470
     claimed to be aligned, believe it.  Otherwise, aligned_memory_operand
1471
     will have examined the base register and determined it is aligned, and
1472
     thus displacements from it are naturally alignable.  */
1473
  if (MEM_ALIGN (ref) >= 32)
1474
    offset = 0;
1475
  else
1476
    offset = disp & 3;
1477
 
1478
  /* The location should not cross aligned word boundary.  */
1479
  gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1480
              <= GET_MODE_SIZE (SImode));
1481
 
1482
  /* Access the entire aligned word.  */
1483
  *paligned_mem = widen_memory_access (ref, SImode, -offset);
1484
 
1485
  /* Convert the byte offset within the word to a bit offset.  */
1486
  if (WORDS_BIG_ENDIAN)
1487
    offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1488
  else
1489
    offset *= 8;
1490
  *pbitnum = GEN_INT (offset);
1491
}
1492
 
1493
/* Similar, but just get the address.  Handle the two reload cases.
1494
   Add EXTRA_OFFSET to the address we return.  */
1495
 
1496
rtx
1497
get_unaligned_address (rtx ref)
1498
{
1499
  rtx base;
1500
  HOST_WIDE_INT offset = 0;
1501
 
1502
  gcc_assert (MEM_P (ref));
1503
 
1504
  if (reload_in_progress
1505
      && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1506
    {
1507
      base = find_replacement (&XEXP (ref, 0));
1508
 
1509
      gcc_assert (memory_address_p (GET_MODE (ref), base));
1510
    }
1511
  else
1512
    base = XEXP (ref, 0);
1513
 
1514
  if (GET_CODE (base) == PLUS)
1515
    offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1516
 
1517
  return plus_constant (base, offset);
1518
}
1519
 
1520
/* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1521
   X is always returned in a register.  */
1522
 
1523
rtx
1524
get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1525
{
1526
  if (GET_CODE (addr) == PLUS)
1527
    {
1528
      ofs += INTVAL (XEXP (addr, 1));
1529
      addr = XEXP (addr, 0);
1530
    }
1531
 
1532
  return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1533
                              NULL_RTX, 1, OPTAB_LIB_WIDEN);
1534
}
1535
 
1536
/* On the Alpha, all (non-symbolic) constants except zero go into
1537
   a floating-point register via memory.  Note that we cannot
1538
   return anything that is not a subset of RCLASS, and that some
1539
   symbolic constants cannot be dropped to memory.  */
1540
 
1541
enum reg_class
1542
alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1543
{
1544
  /* Zero is present in any register class.  */
1545
  if (x == CONST0_RTX (GET_MODE (x)))
1546
    return rclass;
1547
 
1548
  /* These sorts of constants we can easily drop to memory.  */
1549
  if (CONST_INT_P (x)
1550
      || GET_CODE (x) == CONST_DOUBLE
1551
      || GET_CODE (x) == CONST_VECTOR)
1552
    {
1553
      if (rclass == FLOAT_REGS)
1554
        return NO_REGS;
1555
      if (rclass == ALL_REGS)
1556
        return GENERAL_REGS;
1557
      return rclass;
1558
    }
1559
 
1560
  /* All other kinds of constants should not (and in the case of HIGH
1561
     cannot) be dropped to memory -- instead we use a GENERAL_REGS
1562
     secondary reload.  */
1563
  if (CONSTANT_P (x))
1564
    return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1565
 
1566
  return rclass;
1567
}
1568
 
1569
/* Inform reload about cases where moving X with a mode MODE to a register in
1570
   RCLASS requires an extra scratch or immediate register.  Return the class
1571
   needed for the immediate register.  */
1572
 
1573
static enum reg_class
1574
alpha_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
1575
                        enum machine_mode mode, secondary_reload_info *sri)
1576
{
1577
  /* Loading and storing HImode or QImode values to and from memory
1578
     usually requires a scratch register.  */
1579
  if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1580
    {
1581
      if (any_memory_operand (x, mode))
1582
        {
1583
          if (in_p)
1584
            {
1585
              if (!aligned_memory_operand (x, mode))
1586
                sri->icode = reload_in_optab[mode];
1587
            }
1588
          else
1589
            sri->icode = reload_out_optab[mode];
1590
          return NO_REGS;
1591
        }
1592
    }
1593
 
1594
  /* We also cannot do integral arithmetic into FP regs, as might result
1595
     from register elimination into a DImode fp register.  */
1596
  if (rclass == FLOAT_REGS)
1597
    {
1598
      if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1599
        return GENERAL_REGS;
1600
      if (in_p && INTEGRAL_MODE_P (mode)
1601
          && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1602
        return GENERAL_REGS;
1603
    }
1604
 
1605
  return NO_REGS;
1606
}
1607
 
1608
/* Subfunction of the following function.  Update the flags of any MEM
1609
   found in part of X.  */
1610
 
1611
static int
1612
alpha_set_memflags_1 (rtx *xp, void *data)
1613
{
1614
  rtx x = *xp, orig = (rtx) data;
1615
 
1616
  if (!MEM_P (x))
1617
    return 0;
1618
 
1619
  MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1620
  MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1621
  MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1622
  MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1623
  MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1624
 
1625
  /* Sadly, we cannot use alias sets because the extra aliasing
1626
     produced by the AND interferes.  Given that two-byte quantities
1627
     are the only thing we would be able to differentiate anyway,
1628
     there does not seem to be any point in convoluting the early
1629
     out of the alias check.  */
1630
 
1631
  return -1;
1632
}
1633
 
1634
/* Given SEQ, which is an INSN list, look for any MEMs in either
1635
   a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1636
   volatile flags from REF into each of the MEMs found.  If REF is not
1637
   a MEM, don't do anything.  */
1638
 
1639
void
1640
alpha_set_memflags (rtx seq, rtx ref)
1641
{
1642
  rtx insn;
1643
 
1644
  if (!MEM_P (ref))
1645
    return;
1646
 
1647
  /* This is only called from alpha.md, after having had something
1648
     generated from one of the insn patterns.  So if everything is
1649
     zero, the pattern is already up-to-date.  */
1650
  if (!MEM_VOLATILE_P (ref)
1651
      && !MEM_IN_STRUCT_P (ref)
1652
      && !MEM_SCALAR_P (ref)
1653
      && !MEM_NOTRAP_P (ref)
1654
      && !MEM_READONLY_P (ref))
1655
    return;
1656
 
1657
  for (insn = seq; insn; insn = NEXT_INSN (insn))
1658
    if (INSN_P (insn))
1659
      for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1660
    else
1661
      gcc_unreachable ();
1662
}
1663
 
1664
static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1665
                                 int, bool);
1666
 
1667
/* Internal routine for alpha_emit_set_const to check for N or below insns.
1668
   If NO_OUTPUT is true, then we only check to see if N insns are possible,
1669
   and return pc_rtx if successful.  */
1670
 
1671
static rtx
1672
alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1673
                        HOST_WIDE_INT c, int n, bool no_output)
1674
{
1675
  HOST_WIDE_INT new_const;
1676
  int i, bits;
1677
  /* Use a pseudo if highly optimizing and still generating RTL.  */
1678
  rtx subtarget
1679
    = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1680
  rtx temp, insn;
1681
 
1682
  /* If this is a sign-extended 32-bit constant, we can do this in at most
1683
     three insns, so do it if we have enough insns left.  We always have
1684
     a sign-extended 32-bit constant when compiling on a narrow machine.  */
1685
 
1686
  if (HOST_BITS_PER_WIDE_INT != 64
1687
      || c >> 31 == -1 || c >> 31 == 0)
1688
    {
1689
      HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1690
      HOST_WIDE_INT tmp1 = c - low;
1691
      HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1692
      HOST_WIDE_INT extra = 0;
1693
 
1694
      /* If HIGH will be interpreted as negative but the constant is
1695
         positive, we must adjust it to do two ldha insns.  */
1696
 
1697
      if ((high & 0x8000) != 0 && c >= 0)
1698
        {
1699
          extra = 0x4000;
1700
          tmp1 -= 0x40000000;
1701
          high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1702
        }
1703
 
1704
      if (c == low || (low == 0 && extra == 0))
1705
        {
1706
          /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1707
             but that meant that we can't handle INT_MIN on 32-bit machines
1708
             (like NT/Alpha), because we recurse indefinitely through
1709
             emit_move_insn to gen_movdi.  So instead, since we know exactly
1710
             what we want, create it explicitly.  */
1711
 
1712
          if (no_output)
1713
            return pc_rtx;
1714
          if (target == NULL)
1715
            target = gen_reg_rtx (mode);
1716
          emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1717
          return target;
1718
        }
1719
      else if (n >= 2 + (extra != 0))
1720
        {
1721
          if (no_output)
1722
            return pc_rtx;
1723
          if (!can_create_pseudo_p ())
1724
            {
1725
              emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1726
              temp = target;
1727
            }
1728
          else
1729
            temp = copy_to_suggested_reg (GEN_INT (high << 16),
1730
                                          subtarget, mode);
1731
 
1732
          /* As of 2002-02-23, addsi3 is only available when not optimizing.
1733
             This means that if we go through expand_binop, we'll try to
1734
             generate extensions, etc, which will require new pseudos, which
1735
             will fail during some split phases.  The SImode add patterns
1736
             still exist, but are not named.  So build the insns by hand.  */
1737
 
1738
          if (extra != 0)
1739
            {
1740
              if (! subtarget)
1741
                subtarget = gen_reg_rtx (mode);
1742
              insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1743
              insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1744
              emit_insn (insn);
1745
              temp = subtarget;
1746
            }
1747
 
1748
          if (target == NULL)
1749
            target = gen_reg_rtx (mode);
1750
          insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1751
          insn = gen_rtx_SET (VOIDmode, target, insn);
1752
          emit_insn (insn);
1753
          return target;
1754
        }
1755
    }
1756
 
1757
  /* If we couldn't do it that way, try some other methods.  But if we have
1758
     no instructions left, don't bother.  Likewise, if this is SImode and
1759
     we can't make pseudos, we can't do anything since the expand_binop
1760
     and expand_unop calls will widen and try to make pseudos.  */
1761
 
1762
  if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1763
    return 0;
1764
 
1765
  /* Next, see if we can load a related constant and then shift and possibly
1766
     negate it to get the constant we want.  Try this once each increasing
1767
     numbers of insns.  */
1768
 
1769
  for (i = 1; i < n; i++)
1770
    {
1771
      /* First, see if minus some low bits, we've an easy load of
1772
         high bits.  */
1773
 
1774
      new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1775
      if (new_const != 0)
1776
        {
1777
          temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1778
          if (temp)
1779
            {
1780
              if (no_output)
1781
                return temp;
1782
              return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1783
                                   target, 0, OPTAB_WIDEN);
1784
            }
1785
        }
1786
 
1787
      /* Next try complementing.  */
1788
      temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1789
      if (temp)
1790
        {
1791
          if (no_output)
1792
            return temp;
1793
          return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1794
        }
1795
 
1796
      /* Next try to form a constant and do a left shift.  We can do this
1797
         if some low-order bits are zero; the exact_log2 call below tells
1798
         us that information.  The bits we are shifting out could be any
1799
         value, but here we'll just try the 0- and sign-extended forms of
1800
         the constant.  To try to increase the chance of having the same
1801
         constant in more than one insn, start at the highest number of
1802
         bits to shift, but try all possibilities in case a ZAPNOT will
1803
         be useful.  */
1804
 
1805
      bits = exact_log2 (c & -c);
1806
      if (bits > 0)
1807
        for (; bits > 0; bits--)
1808
          {
1809
            new_const = c >> bits;
1810
            temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1811
            if (!temp && c < 0)
1812
              {
1813
                new_const = (unsigned HOST_WIDE_INT)c >> bits;
1814
                temp = alpha_emit_set_const (subtarget, mode, new_const,
1815
                                             i, no_output);
1816
              }
1817
            if (temp)
1818
              {
1819
                if (no_output)
1820
                  return temp;
1821
                return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1822
                                     target, 0, OPTAB_WIDEN);
1823
              }
1824
          }
1825
 
1826
      /* Now try high-order zero bits.  Here we try the shifted-in bits as
1827
         all zero and all ones.  Be careful to avoid shifting outside the
1828
         mode and to avoid shifting outside the host wide int size.  */
1829
      /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1830
         confuse the recursive call and set all of the high 32 bits.  */
1831
 
1832
      bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1833
              - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1834
      if (bits > 0)
1835
        for (; bits > 0; bits--)
1836
          {
1837
            new_const = c << bits;
1838
            temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1839
            if (!temp)
1840
              {
1841
                new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1842
                temp = alpha_emit_set_const (subtarget, mode, new_const,
1843
                                             i, no_output);
1844
              }
1845
            if (temp)
1846
              {
1847
                if (no_output)
1848
                  return temp;
1849
                return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1850
                                     target, 1, OPTAB_WIDEN);
1851
              }
1852
          }
1853
 
1854
      /* Now try high-order 1 bits.  We get that with a sign-extension.
1855
         But one bit isn't enough here.  Be careful to avoid shifting outside
1856
         the mode and to avoid shifting outside the host wide int size.  */
1857
 
1858
      bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1859
              - floor_log2 (~ c) - 2);
1860
      if (bits > 0)
1861
        for (; bits > 0; bits--)
1862
          {
1863
            new_const = c << bits;
1864
            temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1865
            if (!temp)
1866
              {
1867
                new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1868
                temp = alpha_emit_set_const (subtarget, mode, new_const,
1869
                                             i, no_output);
1870
              }
1871
            if (temp)
1872
              {
1873
                if (no_output)
1874
                  return temp;
1875
                return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1876
                                     target, 0, OPTAB_WIDEN);
1877
              }
1878
          }
1879
    }
1880
 
1881
#if HOST_BITS_PER_WIDE_INT == 64
1882
  /* Finally, see if can load a value into the target that is the same as the
1883
     constant except that all bytes that are 0 are changed to be 0xff.  If we
1884
     can, then we can do a ZAPNOT to obtain the desired constant.  */
1885
 
1886
  new_const = c;
1887
  for (i = 0; i < 64; i += 8)
1888
    if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1889
      new_const |= (HOST_WIDE_INT) 0xff << i;
1890
 
1891
  /* We are only called for SImode and DImode.  If this is SImode, ensure that
1892
     we are sign extended to a full word.  */
1893
 
1894
  if (mode == SImode)
1895
    new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1896
 
1897
  if (new_const != c)
1898
    {
1899
      temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1900
      if (temp)
1901
        {
1902
          if (no_output)
1903
            return temp;
1904
          return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1905
                               target, 0, OPTAB_WIDEN);
1906
        }
1907
    }
1908
#endif
1909
 
1910
  return 0;
1911
}
1912
 
1913
/* Try to output insns to set TARGET equal to the constant C if it can be
1914
   done in less than N insns.  Do all computations in MODE.  Returns the place
1915
   where the output has been placed if it can be done and the insns have been
1916
   emitted.  If it would take more than N insns, zero is returned and no
1917
   insns and emitted.  */
1918
 
1919
static rtx
1920
alpha_emit_set_const (rtx target, enum machine_mode mode,
1921
                      HOST_WIDE_INT c, int n, bool no_output)
1922
{
1923
  enum machine_mode orig_mode = mode;
1924
  rtx orig_target = target;
1925
  rtx result = 0;
1926
  int i;
1927
 
1928
  /* If we can't make any pseudos, TARGET is an SImode hard register, we
1929
     can't load this constant in one insn, do this in DImode.  */
1930
  if (!can_create_pseudo_p () && mode == SImode
1931
      && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1932
    {
1933
      result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1934
      if (result)
1935
        return result;
1936
 
1937
      target = no_output ? NULL : gen_lowpart (DImode, target);
1938
      mode = DImode;
1939
    }
1940
  else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1941
    {
1942
      target = no_output ? NULL : gen_lowpart (DImode, target);
1943
      mode = DImode;
1944
    }
1945
 
1946
  /* Try 1 insn, then 2, then up to N.  */
1947
  for (i = 1; i <= n; i++)
1948
    {
1949
      result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1950
      if (result)
1951
        {
1952
          rtx insn, set;
1953
 
1954
          if (no_output)
1955
            return result;
1956
 
1957
          insn = get_last_insn ();
1958
          set = single_set (insn);
1959
          if (! CONSTANT_P (SET_SRC (set)))
1960
            set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1961
          break;
1962
        }
1963
    }
1964
 
1965
  /* Allow for the case where we changed the mode of TARGET.  */
1966
  if (result)
1967
    {
1968
      if (result == target)
1969
        result = orig_target;
1970
      else if (mode != orig_mode)
1971
        result = gen_lowpart (orig_mode, result);
1972
    }
1973
 
1974
  return result;
1975
}
1976
 
1977
/* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1978
   fall back to a straight forward decomposition.  We do this to avoid
1979
   exponential run times encountered when looking for longer sequences
1980
   with alpha_emit_set_const.  */
1981
 
1982
static rtx
1983
alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1984
{
1985
  HOST_WIDE_INT d1, d2, d3, d4;
1986
 
1987
  /* Decompose the entire word */
1988
#if HOST_BITS_PER_WIDE_INT >= 64
1989
  gcc_assert (c2 == -(c1 < 0));
1990
  d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1991
  c1 -= d1;
1992
  d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1993
  c1 = (c1 - d2) >> 32;
1994
  d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1995
  c1 -= d3;
1996
  d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1997
  gcc_assert (c1 == d4);
1998
#else
1999
  d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2000
  c1 -= d1;
2001
  d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2002
  gcc_assert (c1 == d2);
2003
  c2 += (d2 < 0);
2004
  d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2005
  c2 -= d3;
2006
  d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2007
  gcc_assert (c2 == d4);
2008
#endif
2009
 
2010
  /* Construct the high word */
2011
  if (d4)
2012
    {
2013
      emit_move_insn (target, GEN_INT (d4));
2014
      if (d3)
2015
        emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2016
    }
2017
  else
2018
    emit_move_insn (target, GEN_INT (d3));
2019
 
2020
  /* Shift it into place */
2021
  emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2022
 
2023
  /* Add in the low bits.  */
2024
  if (d2)
2025
    emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2026
  if (d1)
2027
    emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2028
 
2029
  return target;
2030
}
2031
 
2032
/* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2033
   the low 64 bits.  */
2034
 
2035
static void
2036
alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2037
{
2038
  HOST_WIDE_INT i0, i1;
2039
 
2040
  if (GET_CODE (x) == CONST_VECTOR)
2041
    x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2042
 
2043
 
2044
  if (CONST_INT_P (x))
2045
    {
2046
      i0 = INTVAL (x);
2047
      i1 = -(i0 < 0);
2048
    }
2049
  else if (HOST_BITS_PER_WIDE_INT >= 64)
2050
    {
2051
      i0 = CONST_DOUBLE_LOW (x);
2052
      i1 = -(i0 < 0);
2053
    }
2054
  else
2055
    {
2056
      i0 = CONST_DOUBLE_LOW (x);
2057
      i1 = CONST_DOUBLE_HIGH (x);
2058
    }
2059
 
2060
  *p0 = i0;
2061
  *p1 = i1;
2062
}
2063
 
2064
/* Implement LEGITIMATE_CONSTANT_P.  This is all constants for which we
2065
   are willing to load the value into a register via a move pattern.
2066
   Normally this is all symbolic constants, integral constants that
2067
   take three or fewer instructions, and floating-point zero.  */
2068
 
2069
bool
2070
alpha_legitimate_constant_p (rtx x)
2071
{
2072
  enum machine_mode mode = GET_MODE (x);
2073
  HOST_WIDE_INT i0, i1;
2074
 
2075
  switch (GET_CODE (x))
2076
    {
2077
    case LABEL_REF:
2078
    case HIGH:
2079
      return true;
2080
 
2081
    case CONST:
2082
      if (GET_CODE (XEXP (x, 0)) == PLUS
2083
          && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2084
        x = XEXP (XEXP (x, 0), 0);
2085
      else
2086
        return true;
2087
 
2088
      if (GET_CODE (x) != SYMBOL_REF)
2089
        return true;
2090
 
2091
      /* FALLTHRU */
2092
 
2093
    case SYMBOL_REF:
2094
      /* TLS symbols are never valid.  */
2095
      return SYMBOL_REF_TLS_MODEL (x) == 0;
2096
 
2097
    case CONST_DOUBLE:
2098
      if (x == CONST0_RTX (mode))
2099
        return true;
2100
      if (FLOAT_MODE_P (mode))
2101
        return false;
2102
      goto do_integer;
2103
 
2104
    case CONST_VECTOR:
2105
      if (x == CONST0_RTX (mode))
2106
        return true;
2107
      if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2108
        return false;
2109
      if (GET_MODE_SIZE (mode) != 8)
2110
        return false;
2111
      goto do_integer;
2112
 
2113
    case CONST_INT:
2114
    do_integer:
2115
      if (TARGET_BUILD_CONSTANTS)
2116
        return true;
2117
      alpha_extract_integer (x, &i0, &i1);
2118
      if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2119
        return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2120
      return false;
2121
 
2122
    default:
2123
      return false;
2124
    }
2125
}
2126
 
2127
/* Operand 1 is known to be a constant, and should require more than one
2128
   instruction to load.  Emit that multi-part load.  */
2129
 
2130
bool
2131
alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2132
{
2133
  HOST_WIDE_INT i0, i1;
2134
  rtx temp = NULL_RTX;
2135
 
2136
  alpha_extract_integer (operands[1], &i0, &i1);
2137
 
2138
  if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2139
    temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2140
 
2141
  if (!temp && TARGET_BUILD_CONSTANTS)
2142
    temp = alpha_emit_set_long_const (operands[0], i0, i1);
2143
 
2144
  if (temp)
2145
    {
2146
      if (!rtx_equal_p (operands[0], temp))
2147
        emit_move_insn (operands[0], temp);
2148
      return true;
2149
    }
2150
 
2151
  return false;
2152
}
2153
 
2154
/* Expand a move instruction; return true if all work is done.
2155
   We don't handle non-bwx subword loads here.  */
2156
 
2157
bool
2158
alpha_expand_mov (enum machine_mode mode, rtx *operands)
2159
{
2160
  rtx tmp;
2161
 
2162
  /* If the output is not a register, the input must be.  */
2163
  if (MEM_P (operands[0])
2164
      && ! reg_or_0_operand (operands[1], mode))
2165
    operands[1] = force_reg (mode, operands[1]);
2166
 
2167
  /* Allow legitimize_address to perform some simplifications.  */
2168
  if (mode == Pmode && symbolic_operand (operands[1], mode))
2169
    {
2170
      tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2171
      if (tmp)
2172
        {
2173
          if (tmp == operands[0])
2174
            return true;
2175
          operands[1] = tmp;
2176
          return false;
2177
        }
2178
    }
2179
 
2180
  /* Early out for non-constants and valid constants.  */
2181
  if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2182
    return false;
2183
 
2184
  /* Split large integers.  */
2185
  if (CONST_INT_P (operands[1])
2186
      || GET_CODE (operands[1]) == CONST_DOUBLE
2187
      || GET_CODE (operands[1]) == CONST_VECTOR)
2188
    {
2189
      if (alpha_split_const_mov (mode, operands))
2190
        return true;
2191
    }
2192
 
2193
  /* Otherwise we've nothing left but to drop the thing to memory.  */
2194
  tmp = force_const_mem (mode, operands[1]);
2195
 
2196
  if (tmp == NULL_RTX)
2197
    return false;
2198
 
2199
  if (reload_in_progress)
2200
    {
2201
      emit_move_insn (operands[0], XEXP (tmp, 0));
2202
      operands[1] = replace_equiv_address (tmp, operands[0]);
2203
    }
2204
  else
2205
    operands[1] = validize_mem (tmp);
2206
  return false;
2207
}
2208
 
2209
/* Expand a non-bwx QImode or HImode move instruction;
2210
   return true if all work is done.  */
2211
 
2212
bool
2213
alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2214
{
2215
  rtx seq;
2216
 
2217
  /* If the output is not a register, the input must be.  */
2218
  if (MEM_P (operands[0]))
2219
    operands[1] = force_reg (mode, operands[1]);
2220
 
2221
  /* Handle four memory cases, unaligned and aligned for either the input
2222
     or the output.  The only case where we can be called during reload is
2223
     for aligned loads; all other cases require temporaries.  */
2224
 
2225
  if (any_memory_operand (operands[1], mode))
2226
    {
2227
      if (aligned_memory_operand (operands[1], mode))
2228
        {
2229
          if (reload_in_progress)
2230
            {
2231
              if (mode == QImode)
2232
                seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2233
              else
2234
                seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2235
              emit_insn (seq);
2236
            }
2237
          else
2238
            {
2239
              rtx aligned_mem, bitnum;
2240
              rtx scratch = gen_reg_rtx (SImode);
2241
              rtx subtarget;
2242
              bool copyout;
2243
 
2244
              get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2245
 
2246
              subtarget = operands[0];
2247
              if (REG_P (subtarget))
2248
                subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2249
              else
2250
                subtarget = gen_reg_rtx (DImode), copyout = true;
2251
 
2252
              if (mode == QImode)
2253
                seq = gen_aligned_loadqi (subtarget, aligned_mem,
2254
                                          bitnum, scratch);
2255
              else
2256
                seq = gen_aligned_loadhi (subtarget, aligned_mem,
2257
                                          bitnum, scratch);
2258
              emit_insn (seq);
2259
 
2260
              if (copyout)
2261
                emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2262
            }
2263
        }
2264
      else
2265
        {
2266
          /* Don't pass these as parameters since that makes the generated
2267
             code depend on parameter evaluation order which will cause
2268
             bootstrap failures.  */
2269
 
2270
          rtx temp1, temp2, subtarget, ua;
2271
          bool copyout;
2272
 
2273
          temp1 = gen_reg_rtx (DImode);
2274
          temp2 = gen_reg_rtx (DImode);
2275
 
2276
          subtarget = operands[0];
2277
          if (REG_P (subtarget))
2278
            subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2279
          else
2280
            subtarget = gen_reg_rtx (DImode), copyout = true;
2281
 
2282
          ua = get_unaligned_address (operands[1]);
2283
          if (mode == QImode)
2284
            seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2285
          else
2286
            seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2287
 
2288
          alpha_set_memflags (seq, operands[1]);
2289
          emit_insn (seq);
2290
 
2291
          if (copyout)
2292
            emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2293
        }
2294
      return true;
2295
    }
2296
 
2297
  if (any_memory_operand (operands[0], mode))
2298
    {
2299
      if (aligned_memory_operand (operands[0], mode))
2300
        {
2301
          rtx aligned_mem, bitnum;
2302
          rtx temp1 = gen_reg_rtx (SImode);
2303
          rtx temp2 = gen_reg_rtx (SImode);
2304
 
2305
          get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2306
 
2307
          emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2308
                                        temp1, temp2));
2309
        }
2310
      else
2311
        {
2312
          rtx temp1 = gen_reg_rtx (DImode);
2313
          rtx temp2 = gen_reg_rtx (DImode);
2314
          rtx temp3 = gen_reg_rtx (DImode);
2315
          rtx ua = get_unaligned_address (operands[0]);
2316
 
2317
          if (mode == QImode)
2318
            seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2319
          else
2320
            seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2321
 
2322
          alpha_set_memflags (seq, operands[0]);
2323
          emit_insn (seq);
2324
        }
2325
      return true;
2326
    }
2327
 
2328
  return false;
2329
}
2330
 
2331
/* Implement the movmisalign patterns.  One of the operands is a memory
2332
   that is not naturally aligned.  Emit instructions to load it.  */
2333
 
2334
void
2335
alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2336
{
2337
  /* Honor misaligned loads, for those we promised to do so.  */
2338
  if (MEM_P (operands[1]))
2339
    {
2340
      rtx tmp;
2341
 
2342
      if (register_operand (operands[0], mode))
2343
        tmp = operands[0];
2344
      else
2345
        tmp = gen_reg_rtx (mode);
2346
 
2347
      alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2348
      if (tmp != operands[0])
2349
        emit_move_insn (operands[0], tmp);
2350
    }
2351
  else if (MEM_P (operands[0]))
2352
    {
2353
      if (!reg_or_0_operand (operands[1], mode))
2354
        operands[1] = force_reg (mode, operands[1]);
2355
      alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2356
    }
2357
  else
2358
    gcc_unreachable ();
2359
}
2360
 
2361
/* Generate an unsigned DImode to FP conversion.  This is the same code
2362
   optabs would emit if we didn't have TFmode patterns.
2363
 
2364
   For SFmode, this is the only construction I've found that can pass
2365
   gcc.c-torture/execute/ieee/rbug.c.  No scenario that uses DFmode
2366
   intermediates will work, because you'll get intermediate rounding
2367
   that ruins the end result.  Some of this could be fixed by turning
2368
   on round-to-positive-infinity, but that requires diddling the fpsr,
2369
   which kills performance.  I tried turning this around and converting
2370
   to a negative number, so that I could turn on /m, but either I did
2371
   it wrong or there's something else cause I wound up with the exact
2372
   same single-bit error.  There is a branch-less form of this same code:
2373
 
2374
        srl     $16,1,$1
2375
        and     $16,1,$2
2376
        cmplt   $16,0,$3
2377
        or      $1,$2,$2
2378
        cmovge  $16,$16,$2
2379
        itoft   $3,$f10
2380
        itoft   $2,$f11
2381
        cvtqs   $f11,$f11
2382
        adds    $f11,$f11,$f0
2383
        fcmoveq $f10,$f11,$f0
2384
 
2385
   I'm not using it because it's the same number of instructions as
2386
   this branch-full form, and it has more serialized long latency
2387
   instructions on the critical path.
2388
 
2389
   For DFmode, we can avoid rounding errors by breaking up the word
2390
   into two pieces, converting them separately, and adding them back:
2391
 
2392
   LC0: .long 0,0x5f800000
2393
 
2394
        itoft   $16,$f11
2395
        lda     $2,LC0
2396
        cmplt   $16,0,$1
2397
        cpyse   $f11,$f31,$f10
2398
        cpyse   $f31,$f11,$f11
2399
        s4addq  $1,$2,$1
2400
        lds     $f12,0($1)
2401
        cvtqt   $f10,$f10
2402
        cvtqt   $f11,$f11
2403
        addt    $f12,$f10,$f0
2404
        addt    $f0,$f11,$f0
2405
 
2406
   This doesn't seem to be a clear-cut win over the optabs form.
2407
   It probably all depends on the distribution of numbers being
2408
   converted -- in the optabs form, all but high-bit-set has a
2409
   much lower minimum execution time.  */
2410
 
2411
void
2412
alpha_emit_floatuns (rtx operands[2])
2413
{
2414
  rtx neglab, donelab, i0, i1, f0, in, out;
2415
  enum machine_mode mode;
2416
 
2417
  out = operands[0];
2418
  in = force_reg (DImode, operands[1]);
2419
  mode = GET_MODE (out);
2420
  neglab = gen_label_rtx ();
2421
  donelab = gen_label_rtx ();
2422
  i0 = gen_reg_rtx (DImode);
2423
  i1 = gen_reg_rtx (DImode);
2424
  f0 = gen_reg_rtx (mode);
2425
 
2426
  emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2427
 
2428
  emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2429
  emit_jump_insn (gen_jump (donelab));
2430
  emit_barrier ();
2431
 
2432
  emit_label (neglab);
2433
 
2434
  emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2435
  emit_insn (gen_anddi3 (i1, in, const1_rtx));
2436
  emit_insn (gen_iordi3 (i0, i0, i1));
2437
  emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2438
  emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2439
 
2440
  emit_label (donelab);
2441
}
2442
 
2443
/* Generate the comparison for a conditional branch.  */
2444
 
2445
void
2446
alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2447
{
2448
  enum rtx_code cmp_code, branch_code;
2449
  enum machine_mode branch_mode = VOIDmode;
2450
  enum rtx_code code = GET_CODE (operands[0]);
2451
  rtx op0 = operands[1], op1 = operands[2];
2452
  rtx tem;
2453
 
2454
  if (cmp_mode == TFmode)
2455
    {
2456
      op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2457
      op1 = const0_rtx;
2458
      cmp_mode = DImode;
2459
    }
2460
 
2461
  /* The general case: fold the comparison code to the types of compares
2462
     that we have, choosing the branch as necessary.  */
2463
  switch (code)
2464
    {
2465
    case EQ:  case LE:  case LT:  case LEU:  case LTU:
2466
    case UNORDERED:
2467
      /* We have these compares: */
2468
      cmp_code = code, branch_code = NE;
2469
      break;
2470
 
2471
    case NE:
2472
    case ORDERED:
2473
      /* These must be reversed.  */
2474
      cmp_code = reverse_condition (code), branch_code = EQ;
2475
      break;
2476
 
2477
    case GE:  case GT: case GEU:  case GTU:
2478
      /* For FP, we swap them, for INT, we reverse them.  */
2479
      if (cmp_mode == DFmode)
2480
        {
2481
          cmp_code = swap_condition (code);
2482
          branch_code = NE;
2483
          tem = op0, op0 = op1, op1 = tem;
2484
        }
2485
      else
2486
        {
2487
          cmp_code = reverse_condition (code);
2488
          branch_code = EQ;
2489
        }
2490
      break;
2491
 
2492
    default:
2493
      gcc_unreachable ();
2494
    }
2495
 
2496
  if (cmp_mode == DFmode)
2497
    {
2498
      if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2499
        {
2500
          /* When we are not as concerned about non-finite values, and we
2501
             are comparing against zero, we can branch directly.  */
2502
          if (op1 == CONST0_RTX (DFmode))
2503
            cmp_code = UNKNOWN, branch_code = code;
2504
          else if (op0 == CONST0_RTX (DFmode))
2505
            {
2506
              /* Undo the swap we probably did just above.  */
2507
              tem = op0, op0 = op1, op1 = tem;
2508
              branch_code = swap_condition (cmp_code);
2509
              cmp_code = UNKNOWN;
2510
            }
2511
        }
2512
      else
2513
        {
2514
          /* ??? We mark the branch mode to be CCmode to prevent the
2515
             compare and branch from being combined, since the compare
2516
             insn follows IEEE rules that the branch does not.  */
2517
          branch_mode = CCmode;
2518
        }
2519
    }
2520
  else
2521
    {
2522
      /* The following optimizations are only for signed compares.  */
2523
      if (code != LEU && code != LTU && code != GEU && code != GTU)
2524
        {
2525
          /* Whee.  Compare and branch against 0 directly.  */
2526
          if (op1 == const0_rtx)
2527
            cmp_code = UNKNOWN, branch_code = code;
2528
 
2529
          /* If the constants doesn't fit into an immediate, but can
2530
             be generated by lda/ldah, we adjust the argument and
2531
             compare against zero, so we can use beq/bne directly.  */
2532
          /* ??? Don't do this when comparing against symbols, otherwise
2533
             we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2534
             be declared false out of hand (at least for non-weak).  */
2535
          else if (CONST_INT_P (op1)
2536
                   && (code == EQ || code == NE)
2537
                   && !(symbolic_operand (op0, VOIDmode)
2538
                        || (REG_P (op0) && REG_POINTER (op0))))
2539
            {
2540
              rtx n_op1 = GEN_INT (-INTVAL (op1));
2541
 
2542
              if (! satisfies_constraint_I (op1)
2543
                  && (satisfies_constraint_K (n_op1)
2544
                      || satisfies_constraint_L (n_op1)))
2545
                cmp_code = PLUS, branch_code = code, op1 = n_op1;
2546
            }
2547
        }
2548
 
2549
      if (!reg_or_0_operand (op0, DImode))
2550
        op0 = force_reg (DImode, op0);
2551
      if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2552
        op1 = force_reg (DImode, op1);
2553
    }
2554
 
2555
  /* Emit an initial compare instruction, if necessary.  */
2556
  tem = op0;
2557
  if (cmp_code != UNKNOWN)
2558
    {
2559
      tem = gen_reg_rtx (cmp_mode);
2560
      emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2561
    }
2562
 
2563
  /* Emit the branch instruction.  */
2564
  tem = gen_rtx_SET (VOIDmode, pc_rtx,
2565
                     gen_rtx_IF_THEN_ELSE (VOIDmode,
2566
                                           gen_rtx_fmt_ee (branch_code,
2567
                                                           branch_mode, tem,
2568
                                                           CONST0_RTX (cmp_mode)),
2569
                                           gen_rtx_LABEL_REF (VOIDmode,
2570
                                                              operands[3]),
2571
                                           pc_rtx));
2572
  emit_jump_insn (tem);
2573
}
2574
 
2575
/* Certain simplifications can be done to make invalid setcc operations
2576
   valid.  Return the final comparison, or NULL if we can't work.  */
2577
 
2578
bool
2579
alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2580
{
2581
  enum rtx_code cmp_code;
2582
  enum rtx_code code = GET_CODE (operands[1]);
2583
  rtx op0 = operands[2], op1 = operands[3];
2584
  rtx tmp;
2585
 
2586
  if (cmp_mode == TFmode)
2587
    {
2588
      op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2589
      op1 = const0_rtx;
2590
      cmp_mode = DImode;
2591
    }
2592
 
2593
  if (cmp_mode == DFmode && !TARGET_FIX)
2594
    return 0;
2595
 
2596
  /* The general case: fold the comparison code to the types of compares
2597
     that we have, choosing the branch as necessary.  */
2598
 
2599
  cmp_code = UNKNOWN;
2600
  switch (code)
2601
    {
2602
    case EQ:  case LE:  case LT:  case LEU:  case LTU:
2603
    case UNORDERED:
2604
      /* We have these compares.  */
2605
      if (cmp_mode == DFmode)
2606
        cmp_code = code, code = NE;
2607
      break;
2608
 
2609
    case NE:
2610
      if (cmp_mode == DImode && op1 == const0_rtx)
2611
        break;
2612
      /* FALLTHRU */
2613
 
2614
    case ORDERED:
2615
      cmp_code = reverse_condition (code);
2616
      code = EQ;
2617
      break;
2618
 
2619
    case GE:  case GT: case GEU:  case GTU:
2620
      /* These normally need swapping, but for integer zero we have
2621
         special patterns that recognize swapped operands.  */
2622
      if (cmp_mode == DImode && op1 == const0_rtx)
2623
        break;
2624
      code = swap_condition (code);
2625
      if (cmp_mode == DFmode)
2626
        cmp_code = code, code = NE;
2627
      tmp = op0, op0 = op1, op1 = tmp;
2628
      break;
2629
 
2630
    default:
2631
      gcc_unreachable ();
2632
    }
2633
 
2634
  if (cmp_mode == DImode)
2635
    {
2636
      if (!register_operand (op0, DImode))
2637
        op0 = force_reg (DImode, op0);
2638
      if (!reg_or_8bit_operand (op1, DImode))
2639
        op1 = force_reg (DImode, op1);
2640
    }
2641
 
2642
  /* Emit an initial compare instruction, if necessary.  */
2643
  if (cmp_code != UNKNOWN)
2644
    {
2645
      tmp = gen_reg_rtx (cmp_mode);
2646
      emit_insn (gen_rtx_SET (VOIDmode, tmp,
2647
                              gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2648
 
2649
      op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2650
      op1 = const0_rtx;
2651
    }
2652
 
2653
  /* Emit the setcc instruction.  */
2654
  emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2655
                          gen_rtx_fmt_ee (code, DImode, op0, op1)));
2656
  return true;
2657
}
2658
 
2659
 
2660
/* Rewrite a comparison against zero CMP of the form
2661
   (CODE (cc0) (const_int 0)) so it can be written validly in
2662
   a conditional move (if_then_else CMP ...).
2663
   If both of the operands that set cc0 are nonzero we must emit
2664
   an insn to perform the compare (it can't be done within
2665
   the conditional move).  */
2666
 
2667
rtx
2668
alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2669
{
2670
  enum rtx_code code = GET_CODE (cmp);
2671
  enum rtx_code cmov_code = NE;
2672
  rtx op0 = XEXP (cmp, 0);
2673
  rtx op1 = XEXP (cmp, 1);
2674
  enum machine_mode cmp_mode
2675
    = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2676
  enum machine_mode cmov_mode = VOIDmode;
2677
  int local_fast_math = flag_unsafe_math_optimizations;
2678
  rtx tem;
2679
 
2680
  if (cmp_mode == TFmode)
2681
    {
2682
      op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2683
      op1 = const0_rtx;
2684
      cmp_mode = DImode;
2685
    }
2686
 
2687
  gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2688
 
2689
  if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2690
    {
2691
      enum rtx_code cmp_code;
2692
 
2693
      if (! TARGET_FIX)
2694
        return 0;
2695
 
2696
      /* If we have fp<->int register move instructions, do a cmov by
2697
         performing the comparison in fp registers, and move the
2698
         zero/nonzero value to integer registers, where we can then
2699
         use a normal cmov, or vice-versa.  */
2700
 
2701
      switch (code)
2702
        {
2703
        case EQ: case LE: case LT: case LEU: case LTU:
2704
          /* We have these compares.  */
2705
          cmp_code = code, code = NE;
2706
          break;
2707
 
2708
        case NE:
2709
          /* This must be reversed.  */
2710
          cmp_code = EQ, code = EQ;
2711
          break;
2712
 
2713
        case GE: case GT: case GEU: case GTU:
2714
          /* These normally need swapping, but for integer zero we have
2715
             special patterns that recognize swapped operands.  */
2716
          if (cmp_mode == DImode && op1 == const0_rtx)
2717
            cmp_code = code, code = NE;
2718
          else
2719
            {
2720
              cmp_code = swap_condition (code);
2721
              code = NE;
2722
              tem = op0, op0 = op1, op1 = tem;
2723
            }
2724
          break;
2725
 
2726
        default:
2727
          gcc_unreachable ();
2728
        }
2729
 
2730
      tem = gen_reg_rtx (cmp_mode);
2731
      emit_insn (gen_rtx_SET (VOIDmode, tem,
2732
                              gen_rtx_fmt_ee (cmp_code, cmp_mode,
2733
                                              op0, op1)));
2734
 
2735
      cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2736
      op0 = gen_lowpart (cmp_mode, tem);
2737
      op1 = CONST0_RTX (cmp_mode);
2738
      local_fast_math = 1;
2739
    }
2740
 
2741
  /* We may be able to use a conditional move directly.
2742
     This avoids emitting spurious compares.  */
2743
  if (signed_comparison_operator (cmp, VOIDmode)
2744
      && (cmp_mode == DImode || local_fast_math)
2745
      && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2746
    return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2747
 
2748
  /* We can't put the comparison inside the conditional move;
2749
     emit a compare instruction and put that inside the
2750
     conditional move.  Make sure we emit only comparisons we have;
2751
     swap or reverse as necessary.  */
2752
 
2753
  if (!can_create_pseudo_p ())
2754
    return NULL_RTX;
2755
 
2756
  switch (code)
2757
    {
2758
    case EQ:  case LE:  case LT:  case LEU:  case LTU:
2759
      /* We have these compares: */
2760
      break;
2761
 
2762
    case NE:
2763
      /* This must be reversed.  */
2764
      code = reverse_condition (code);
2765
      cmov_code = EQ;
2766
      break;
2767
 
2768
    case GE:  case GT:  case GEU:  case GTU:
2769
      /* These must be swapped.  */
2770
      if (op1 != CONST0_RTX (cmp_mode))
2771
        {
2772
          code = swap_condition (code);
2773
          tem = op0, op0 = op1, op1 = tem;
2774
        }
2775
      break;
2776
 
2777
    default:
2778
      gcc_unreachable ();
2779
    }
2780
 
2781
  if (cmp_mode == DImode)
2782
    {
2783
      if (!reg_or_0_operand (op0, DImode))
2784
        op0 = force_reg (DImode, op0);
2785
      if (!reg_or_8bit_operand (op1, DImode))
2786
        op1 = force_reg (DImode, op1);
2787
    }
2788
 
2789
  /* ??? We mark the branch mode to be CCmode to prevent the compare
2790
     and cmov from being combined, since the compare insn follows IEEE
2791
     rules that the cmov does not.  */
2792
  if (cmp_mode == DFmode && !local_fast_math)
2793
    cmov_mode = CCmode;
2794
 
2795
  tem = gen_reg_rtx (cmp_mode);
2796
  emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2797
  return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2798
}
2799
 
2800
/* Simplify a conditional move of two constants into a setcc with
2801
   arithmetic.  This is done with a splitter since combine would
2802
   just undo the work if done during code generation.  It also catches
2803
   cases we wouldn't have before cse.  */
2804
 
2805
int
2806
alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2807
                              rtx t_rtx, rtx f_rtx)
2808
{
2809
  HOST_WIDE_INT t, f, diff;
2810
  enum machine_mode mode;
2811
  rtx target, subtarget, tmp;
2812
 
2813
  mode = GET_MODE (dest);
2814
  t = INTVAL (t_rtx);
2815
  f = INTVAL (f_rtx);
2816
  diff = t - f;
2817
 
2818
  if (((code == NE || code == EQ) && diff < 0)
2819
      || (code == GE || code == GT))
2820
    {
2821
      code = reverse_condition (code);
2822
      diff = t, t = f, f = diff;
2823
      diff = t - f;
2824
    }
2825
 
2826
  subtarget = target = dest;
2827
  if (mode != DImode)
2828
    {
2829
      target = gen_lowpart (DImode, dest);
2830
      if (can_create_pseudo_p ())
2831
        subtarget = gen_reg_rtx (DImode);
2832
      else
2833
        subtarget = target;
2834
    }
2835
  /* Below, we must be careful to use copy_rtx on target and subtarget
2836
     in intermediate insns, as they may be a subreg rtx, which may not
2837
     be shared.  */
2838
 
2839
  if (f == 0 && exact_log2 (diff) > 0
2840
      /* On EV6, we've got enough shifters to make non-arithmetic shifts
2841
         viable over a longer latency cmove.  On EV5, the E0 slot is a
2842
         scarce resource, and on EV4 shift has the same latency as a cmove.  */
2843
      && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2844
    {
2845
      tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2846
      emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2847
 
2848
      tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2849
                            GEN_INT (exact_log2 (t)));
2850
      emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2851
    }
2852
  else if (f == 0 && t == -1)
2853
    {
2854
      tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2855
      emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2856
 
2857
      emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2858
    }
2859
  else if (diff == 1 || diff == 4 || diff == 8)
2860
    {
2861
      rtx add_op;
2862
 
2863
      tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2864
      emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2865
 
2866
      if (diff == 1)
2867
        emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2868
      else
2869
        {
2870
          add_op = GEN_INT (f);
2871
          if (sext_add_operand (add_op, mode))
2872
            {
2873
              tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2874
                                  GEN_INT (diff));
2875
              tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2876
              emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2877
            }
2878
          else
2879
            return 0;
2880
        }
2881
    }
2882
  else
2883
    return 0;
2884
 
2885
  return 1;
2886
}
2887
 
2888
/* Look up the function X_floating library function name for the
2889
   given operation.  */
2890
 
2891
struct GTY(()) xfloating_op
2892
{
2893
  const enum rtx_code code;
2894
  const char *const GTY((skip)) osf_func;
2895
  const char *const GTY((skip)) vms_func;
2896
  rtx libcall;
2897
};
2898
 
2899
static GTY(()) struct xfloating_op xfloating_ops[] =
2900
{
2901
  { PLUS,               "_OtsAddX", "OTS$ADD_X", 0 },
2902
  { MINUS,              "_OtsSubX", "OTS$SUB_X", 0 },
2903
  { MULT,               "_OtsMulX", "OTS$MUL_X", 0 },
2904
  { DIV,                "_OtsDivX", "OTS$DIV_X", 0 },
2905
  { EQ,                 "_OtsEqlX", "OTS$EQL_X", 0 },
2906
  { NE,                 "_OtsNeqX", "OTS$NEQ_X", 0 },
2907
  { LT,                 "_OtsLssX", "OTS$LSS_X", 0 },
2908
  { LE,                 "_OtsLeqX", "OTS$LEQ_X", 0 },
2909
  { GT,                 "_OtsGtrX", "OTS$GTR_X", 0 },
2910
  { GE,                 "_OtsGeqX", "OTS$GEQ_X", 0 },
2911
  { FIX,                "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2912
  { FLOAT,              "_OtsCvtQX", "OTS$CVTQX", 0 },
2913
  { UNSIGNED_FLOAT,     "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2914
  { FLOAT_EXTEND,       "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2915
  { FLOAT_TRUNCATE,     "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2916
};
2917
 
2918
static GTY(()) struct xfloating_op vax_cvt_ops[] =
2919
{
2920
  { FLOAT_EXTEND,       "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2921
  { FLOAT_TRUNCATE,     "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2922
};
2923
 
2924
static rtx
2925
alpha_lookup_xfloating_lib_func (enum rtx_code code)
2926
{
2927
  struct xfloating_op *ops = xfloating_ops;
2928
  long n = ARRAY_SIZE (xfloating_ops);
2929
  long i;
2930
 
2931
  gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2932
 
2933
  /* How irritating.  Nothing to key off for the main table.  */
2934
  if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2935
    {
2936
      ops = vax_cvt_ops;
2937
      n = ARRAY_SIZE (vax_cvt_ops);
2938
    }
2939
 
2940
  for (i = 0; i < n; ++i, ++ops)
2941
    if (ops->code == code)
2942
      {
2943
        rtx func = ops->libcall;
2944
        if (!func)
2945
          {
2946
            func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2947
                                     ? ops->vms_func : ops->osf_func);
2948
            ops->libcall = func;
2949
          }
2950
        return func;
2951
      }
2952
 
2953
  gcc_unreachable ();
2954
}
2955
 
2956
/* Most X_floating operations take the rounding mode as an argument.
2957
   Compute that here.  */
2958
 
2959
static int
2960
alpha_compute_xfloating_mode_arg (enum rtx_code code,
2961
                                  enum alpha_fp_rounding_mode round)
2962
{
2963
  int mode;
2964
 
2965
  switch (round)
2966
    {
2967
    case ALPHA_FPRM_NORM:
2968
      mode = 2;
2969
      break;
2970
    case ALPHA_FPRM_MINF:
2971
      mode = 1;
2972
      break;
2973
    case ALPHA_FPRM_CHOP:
2974
      mode = 0;
2975
      break;
2976
    case ALPHA_FPRM_DYN:
2977
      mode = 4;
2978
      break;
2979
    default:
2980
      gcc_unreachable ();
2981
 
2982
    /* XXX For reference, round to +inf is mode = 3.  */
2983
    }
2984
 
2985
  if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2986
    mode |= 0x10000;
2987
 
2988
  return mode;
2989
}
2990
 
2991
/* Emit an X_floating library function call.
2992
 
2993
   Note that these functions do not follow normal calling conventions:
2994
   TFmode arguments are passed in two integer registers (as opposed to
2995
   indirect); TFmode return values appear in R16+R17.
2996
 
2997
   FUNC is the function to call.
2998
   TARGET is where the output belongs.
2999
   OPERANDS are the inputs.
3000
   NOPERANDS is the count of inputs.
3001
   EQUIV is the expression equivalent for the function.
3002
*/
3003
 
3004
static void
3005
alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3006
                              int noperands, rtx equiv)
3007
{
3008
  rtx usage = NULL_RTX, tmp, reg;
3009
  int regno = 16, i;
3010
 
3011
  start_sequence ();
3012
 
3013
  for (i = 0; i < noperands; ++i)
3014
    {
3015
      switch (GET_MODE (operands[i]))
3016
        {
3017
        case TFmode:
3018
          reg = gen_rtx_REG (TFmode, regno);
3019
          regno += 2;
3020
          break;
3021
 
3022
        case DFmode:
3023
          reg = gen_rtx_REG (DFmode, regno + 32);
3024
          regno += 1;
3025
          break;
3026
 
3027
        case VOIDmode:
3028
          gcc_assert (CONST_INT_P (operands[i]));
3029
          /* FALLTHRU */
3030
        case DImode:
3031
          reg = gen_rtx_REG (DImode, regno);
3032
          regno += 1;
3033
          break;
3034
 
3035
        default:
3036
          gcc_unreachable ();
3037
        }
3038
 
3039
      emit_move_insn (reg, operands[i]);
3040
      usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3041
    }
3042
 
3043
  switch (GET_MODE (target))
3044
    {
3045
    case TFmode:
3046
      reg = gen_rtx_REG (TFmode, 16);
3047
      break;
3048
    case DFmode:
3049
      reg = gen_rtx_REG (DFmode, 32);
3050
      break;
3051
    case DImode:
3052
      reg = gen_rtx_REG (DImode, 0);
3053
      break;
3054
    default:
3055
      gcc_unreachable ();
3056
    }
3057
 
3058
  tmp = gen_rtx_MEM (QImode, func);
3059
  tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3060
                                        const0_rtx, const0_rtx));
3061
  CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3062
  RTL_CONST_CALL_P (tmp) = 1;
3063
 
3064
  tmp = get_insns ();
3065
  end_sequence ();
3066
 
3067
  emit_libcall_block (tmp, target, reg, equiv);
3068
}
3069
 
3070
/* Emit an X_floating library function call for arithmetic (+,-,*,/).  */
3071
 
3072
void
3073
alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3074
{
3075
  rtx func;
3076
  int mode;
3077
  rtx out_operands[3];
3078
 
3079
  func = alpha_lookup_xfloating_lib_func (code);
3080
  mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3081
 
3082
  out_operands[0] = operands[1];
3083
  out_operands[1] = operands[2];
3084
  out_operands[2] = GEN_INT (mode);
3085
  alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3086
                                gen_rtx_fmt_ee (code, TFmode, operands[1],
3087
                                                operands[2]));
3088
}
3089
 
3090
/* Emit an X_floating library function call for a comparison.  */
3091
 
3092
static rtx
3093
alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3094
{
3095
  enum rtx_code cmp_code, res_code;
3096
  rtx func, out, operands[2], note;
3097
 
3098
  /* X_floating library comparison functions return
3099
           -1  unordered
3100
 
3101
            1  true
3102
     Convert the compare against the raw return value.  */
3103
 
3104
  cmp_code = *pcode;
3105
  switch (cmp_code)
3106
    {
3107
    case UNORDERED:
3108
      cmp_code = EQ;
3109
      res_code = LT;
3110
      break;
3111
    case ORDERED:
3112
      cmp_code = EQ;
3113
      res_code = GE;
3114
      break;
3115
    case NE:
3116
      res_code = NE;
3117
      break;
3118
    case EQ:
3119
    case LT:
3120
    case GT:
3121
    case LE:
3122
    case GE:
3123
      res_code = GT;
3124
      break;
3125
    default:
3126
      gcc_unreachable ();
3127
    }
3128
  *pcode = res_code;
3129
 
3130
  func = alpha_lookup_xfloating_lib_func (cmp_code);
3131
 
3132
  operands[0] = op0;
3133
  operands[1] = op1;
3134
  out = gen_reg_rtx (DImode);
3135
 
3136
  /* What's actually returned is -1,0,1, not a proper boolean value,
3137
     so use an EXPR_LIST as with a generic libcall instead of a
3138
     comparison type expression.  */
3139
  note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3140
  note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3141
  note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3142
  alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3143
 
3144
  return out;
3145
}
3146
 
3147
/* Emit an X_floating library function call for a conversion.  */
3148
 
3149
void
3150
alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3151
{
3152
  int noperands = 1, mode;
3153
  rtx out_operands[2];
3154
  rtx func;
3155
  enum rtx_code code = orig_code;
3156
 
3157
  if (code == UNSIGNED_FIX)
3158
    code = FIX;
3159
 
3160
  func = alpha_lookup_xfloating_lib_func (code);
3161
 
3162
  out_operands[0] = operands[1];
3163
 
3164
  switch (code)
3165
    {
3166
    case FIX:
3167
      mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3168
      out_operands[1] = GEN_INT (mode);
3169
      noperands = 2;
3170
      break;
3171
    case FLOAT_TRUNCATE:
3172
      mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3173
      out_operands[1] = GEN_INT (mode);
3174
      noperands = 2;
3175
      break;
3176
    default:
3177
      break;
3178
    }
3179
 
3180
  alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3181
                                gen_rtx_fmt_e (orig_code,
3182
                                               GET_MODE (operands[0]),
3183
                                               operands[1]));
3184
}
3185
 
3186
/* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3187
   DImode moves from OP[2,3] to OP[0,1].  If FIXUP_OVERLAP is true,
3188
   guarantee that the sequence
3189
     set (OP[0] OP[2])
3190
     set (OP[1] OP[3])
3191
   is valid.  Naturally, output operand ordering is little-endian.
3192
   This is used by *movtf_internal and *movti_internal.  */
3193
 
3194
void
3195
alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3196
                        bool fixup_overlap)
3197
{
3198
  switch (GET_CODE (operands[1]))
3199
    {
3200
    case REG:
3201
      operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3202
      operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3203
      break;
3204
 
3205
    case MEM:
3206
      operands[3] = adjust_address (operands[1], DImode, 8);
3207
      operands[2] = adjust_address (operands[1], DImode, 0);
3208
      break;
3209
 
3210
    case CONST_INT:
3211
    case CONST_DOUBLE:
3212
      gcc_assert (operands[1] == CONST0_RTX (mode));
3213
      operands[2] = operands[3] = const0_rtx;
3214
      break;
3215
 
3216
    default:
3217
      gcc_unreachable ();
3218
    }
3219
 
3220
  switch (GET_CODE (operands[0]))
3221
    {
3222
    case REG:
3223
      operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3224
      operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3225
      break;
3226
 
3227
    case MEM:
3228
      operands[1] = adjust_address (operands[0], DImode, 8);
3229
      operands[0] = adjust_address (operands[0], DImode, 0);
3230
      break;
3231
 
3232
    default:
3233
      gcc_unreachable ();
3234
    }
3235
 
3236
  if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3237
    {
3238
      rtx tmp;
3239
      tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3240
      tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3241
    }
3242
}
3243
 
3244
/* Implement negtf2 or abstf2.  Op0 is destination, op1 is source,
3245
   op2 is a register containing the sign bit, operation is the
3246
   logical operation to be performed.  */
3247
 
3248
void
3249
alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3250
{
3251
  rtx high_bit = operands[2];
3252
  rtx scratch;
3253
  int move;
3254
 
3255
  alpha_split_tmode_pair (operands, TFmode, false);
3256
 
3257
  /* Detect three flavors of operand overlap.  */
3258
  move = 1;
3259
  if (rtx_equal_p (operands[0], operands[2]))
3260
    move = 0;
3261
  else if (rtx_equal_p (operands[1], operands[2]))
3262
    {
3263
      if (rtx_equal_p (operands[0], high_bit))
3264
        move = 2;
3265
      else
3266
        move = -1;
3267
    }
3268
 
3269
  if (move < 0)
3270
    emit_move_insn (operands[0], operands[2]);
3271
 
3272
  /* ??? If the destination overlaps both source tf and high_bit, then
3273
     assume source tf is dead in its entirety and use the other half
3274
     for a scratch register.  Otherwise "scratch" is just the proper
3275
     destination register.  */
3276
  scratch = operands[move < 2 ? 1 : 3];
3277
 
3278
  emit_insn ((*operation) (scratch, high_bit, operands[3]));
3279
 
3280
  if (move > 0)
3281
    {
3282
      emit_move_insn (operands[0], operands[2]);
3283
      if (move > 1)
3284
        emit_move_insn (operands[1], scratch);
3285
    }
3286
}
3287
 
3288
/* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3289
   unaligned data:
3290
 
3291
           unsigned:                       signed:
3292
   word:   ldq_u  r1,X(r11)                ldq_u  r1,X(r11)
3293
           ldq_u  r2,X+1(r11)              ldq_u  r2,X+1(r11)
3294
           lda    r3,X(r11)                lda    r3,X+2(r11)
3295
           extwl  r1,r3,r1                 extql  r1,r3,r1
3296
           extwh  r2,r3,r2                 extqh  r2,r3,r2
3297
           or     r1.r2.r1                 or     r1,r2,r1
3298
                                           sra    r1,48,r1
3299
 
3300
   long:   ldq_u  r1,X(r11)                ldq_u  r1,X(r11)
3301
           ldq_u  r2,X+3(r11)              ldq_u  r2,X+3(r11)
3302
           lda    r3,X(r11)                lda    r3,X(r11)
3303
           extll  r1,r3,r1                 extll  r1,r3,r1
3304
           extlh  r2,r3,r2                 extlh  r2,r3,r2
3305
           or     r1.r2.r1                 addl   r1,r2,r1
3306
 
3307
   quad:   ldq_u  r1,X(r11)
3308
           ldq_u  r2,X+7(r11)
3309
           lda    r3,X(r11)
3310
           extql  r1,r3,r1
3311
           extqh  r2,r3,r2
3312
           or     r1.r2.r1
3313
*/
3314
 
3315
void
3316
alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3317
                             HOST_WIDE_INT ofs, int sign)
3318
{
3319
  rtx meml, memh, addr, extl, exth, tmp, mema;
3320
  enum machine_mode mode;
3321
 
3322
  if (TARGET_BWX && size == 2)
3323
    {
3324
      meml = adjust_address (mem, QImode, ofs);
3325
      memh = adjust_address (mem, QImode, ofs+1);
3326
      if (BYTES_BIG_ENDIAN)
3327
        tmp = meml, meml = memh, memh = tmp;
3328
      extl = gen_reg_rtx (DImode);
3329
      exth = gen_reg_rtx (DImode);
3330
      emit_insn (gen_zero_extendqidi2 (extl, meml));
3331
      emit_insn (gen_zero_extendqidi2 (exth, memh));
3332
      exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3333
                                  NULL, 1, OPTAB_LIB_WIDEN);
3334
      addr = expand_simple_binop (DImode, IOR, extl, exth,
3335
                                  NULL, 1, OPTAB_LIB_WIDEN);
3336
 
3337
      if (sign && GET_MODE (tgt) != HImode)
3338
        {
3339
          addr = gen_lowpart (HImode, addr);
3340
          emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3341
        }
3342
      else
3343
        {
3344
          if (GET_MODE (tgt) != DImode)
3345
            addr = gen_lowpart (GET_MODE (tgt), addr);
3346
          emit_move_insn (tgt, addr);
3347
        }
3348
      return;
3349
    }
3350
 
3351
  meml = gen_reg_rtx (DImode);
3352
  memh = gen_reg_rtx (DImode);
3353
  addr = gen_reg_rtx (DImode);
3354
  extl = gen_reg_rtx (DImode);
3355
  exth = gen_reg_rtx (DImode);
3356
 
3357
  mema = XEXP (mem, 0);
3358
  if (GET_CODE (mema) == LO_SUM)
3359
    mema = force_reg (Pmode, mema);
3360
 
3361
  /* AND addresses cannot be in any alias set, since they may implicitly
3362
     alias surrounding code.  Ideally we'd have some alias set that
3363
     covered all types except those with alignment 8 or higher.  */
3364
 
3365
  tmp = change_address (mem, DImode,
3366
                        gen_rtx_AND (DImode,
3367
                                     plus_constant (mema, ofs),
3368
                                     GEN_INT (-8)));
3369
  set_mem_alias_set (tmp, 0);
3370
  emit_move_insn (meml, tmp);
3371
 
3372
  tmp = change_address (mem, DImode,
3373
                        gen_rtx_AND (DImode,
3374
                                     plus_constant (mema, ofs + size - 1),
3375
                                     GEN_INT (-8)));
3376
  set_mem_alias_set (tmp, 0);
3377
  emit_move_insn (memh, tmp);
3378
 
3379
  if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3380
    {
3381
      emit_move_insn (addr, plus_constant (mema, -1));
3382
 
3383
      emit_insn (gen_extqh_be (extl, meml, addr));
3384
      emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3385
 
3386
      addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3387
      addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3388
                           addr, 1, OPTAB_WIDEN);
3389
    }
3390
  else if (sign && size == 2)
3391
    {
3392
      emit_move_insn (addr, plus_constant (mema, ofs+2));
3393
 
3394
      emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3395
      emit_insn (gen_extqh_le (exth, memh, addr));
3396
 
3397
      /* We must use tgt here for the target.  Alpha-vms port fails if we use
3398
         addr for the target, because addr is marked as a pointer and combine
3399
         knows that pointers are always sign-extended 32-bit values.  */
3400
      addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3401
      addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3402
                           addr, 1, OPTAB_WIDEN);
3403
    }
3404
  else
3405
    {
3406
      if (WORDS_BIG_ENDIAN)
3407
        {
3408
          emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3409
          switch ((int) size)
3410
            {
3411
            case 2:
3412
              emit_insn (gen_extwh_be (extl, meml, addr));
3413
              mode = HImode;
3414
              break;
3415
 
3416
            case 4:
3417
              emit_insn (gen_extlh_be (extl, meml, addr));
3418
              mode = SImode;
3419
              break;
3420
 
3421
            case 8:
3422
              emit_insn (gen_extqh_be (extl, meml, addr));
3423
              mode = DImode;
3424
              break;
3425
 
3426
            default:
3427
              gcc_unreachable ();
3428
            }
3429
          emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3430
        }
3431
      else
3432
        {
3433
          emit_move_insn (addr, plus_constant (mema, ofs));
3434
          emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3435
          switch ((int) size)
3436
            {
3437
            case 2:
3438
              emit_insn (gen_extwh_le (exth, memh, addr));
3439
              mode = HImode;
3440
              break;
3441
 
3442
            case 4:
3443
              emit_insn (gen_extlh_le (exth, memh, addr));
3444
              mode = SImode;
3445
              break;
3446
 
3447
            case 8:
3448
              emit_insn (gen_extqh_le (exth, memh, addr));
3449
              mode = DImode;
3450
              break;
3451
 
3452
            default:
3453
              gcc_unreachable ();
3454
            }
3455
        }
3456
 
3457
      addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3458
                           gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3459
                           sign, OPTAB_WIDEN);
3460
    }
3461
 
3462
  if (addr != tgt)
3463
    emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3464
}
3465
 
3466
/* Similarly, use ins and msk instructions to perform unaligned stores.  */
3467
 
3468
void
3469
alpha_expand_unaligned_store (rtx dst, rtx src,
3470
                              HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3471
{
3472
  rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3473
 
3474
  if (TARGET_BWX && size == 2)
3475
    {
3476
      if (src != const0_rtx)
3477
        {
3478
          dstl = gen_lowpart (QImode, src);
3479
          dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3480
                                      NULL, 1, OPTAB_LIB_WIDEN);
3481
          dsth = gen_lowpart (QImode, dsth);
3482
        }
3483
      else
3484
        dstl = dsth = const0_rtx;
3485
 
3486
      meml = adjust_address (dst, QImode, ofs);
3487
      memh = adjust_address (dst, QImode, ofs+1);
3488
      if (BYTES_BIG_ENDIAN)
3489
        addr = meml, meml = memh, memh = addr;
3490
 
3491
      emit_move_insn (meml, dstl);
3492
      emit_move_insn (memh, dsth);
3493
      return;
3494
    }
3495
 
3496
  dstl = gen_reg_rtx (DImode);
3497
  dsth = gen_reg_rtx (DImode);
3498
  insl = gen_reg_rtx (DImode);
3499
  insh = gen_reg_rtx (DImode);
3500
 
3501
  dsta = XEXP (dst, 0);
3502
  if (GET_CODE (dsta) == LO_SUM)
3503
    dsta = force_reg (Pmode, dsta);
3504
 
3505
  /* AND addresses cannot be in any alias set, since they may implicitly
3506
     alias surrounding code.  Ideally we'd have some alias set that
3507
     covered all types except those with alignment 8 or higher.  */
3508
 
3509
  meml = change_address (dst, DImode,
3510
                         gen_rtx_AND (DImode,
3511
                                      plus_constant (dsta, ofs),
3512
                                      GEN_INT (-8)));
3513
  set_mem_alias_set (meml, 0);
3514
 
3515
  memh = change_address (dst, DImode,
3516
                         gen_rtx_AND (DImode,
3517
                                      plus_constant (dsta, ofs + size - 1),
3518
                                      GEN_INT (-8)));
3519
  set_mem_alias_set (memh, 0);
3520
 
3521
  emit_move_insn (dsth, memh);
3522
  emit_move_insn (dstl, meml);
3523
  if (WORDS_BIG_ENDIAN)
3524
    {
3525
      addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3526
 
3527
      if (src != const0_rtx)
3528
        {
3529
          switch ((int) size)
3530
            {
3531
            case 2:
3532
              emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3533
              break;
3534
            case 4:
3535
              emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3536
              break;
3537
            case 8:
3538
              emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3539
              break;
3540
            }
3541
          emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3542
                                GEN_INT (size*8), addr));
3543
        }
3544
 
3545
      switch ((int) size)
3546
        {
3547
        case 2:
3548
          emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3549
          break;
3550
        case 4:
3551
          {
3552
            rtx msk = immed_double_const (0xffffffff, 0, DImode);
3553
            emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3554
            break;
3555
          }
3556
        case 8:
3557
          emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3558
          break;
3559
        }
3560
 
3561
      emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3562
    }
3563
  else
3564
    {
3565
      addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3566
 
3567
      if (src != CONST0_RTX (GET_MODE (src)))
3568
        {
3569
          emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3570
                                GEN_INT (size*8), addr));
3571
 
3572
          switch ((int) size)
3573
            {
3574
            case 2:
3575
              emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3576
              break;
3577
            case 4:
3578
              emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3579
              break;
3580
            case 8:
3581
              emit_insn (gen_insql_le (insl, gen_lowpart (DImode, src), addr));
3582
              break;
3583
            }
3584
        }
3585
 
3586
      emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3587
 
3588
      switch ((int) size)
3589
        {
3590
        case 2:
3591
          emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3592
          break;
3593
        case 4:
3594
          {
3595
            rtx msk = immed_double_const (0xffffffff, 0, DImode);
3596
            emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3597
            break;
3598
          }
3599
        case 8:
3600
          emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3601
          break;
3602
        }
3603
    }
3604
 
3605
  if (src != CONST0_RTX (GET_MODE (src)))
3606
    {
3607
      dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3608
      dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3609
    }
3610
 
3611
  if (WORDS_BIG_ENDIAN)
3612
    {
3613
      emit_move_insn (meml, dstl);
3614
      emit_move_insn (memh, dsth);
3615
    }
3616
  else
3617
    {
3618
      /* Must store high before low for degenerate case of aligned.  */
3619
      emit_move_insn (memh, dsth);
3620
      emit_move_insn (meml, dstl);
3621
    }
3622
}
3623
 
3624
/* The block move code tries to maximize speed by separating loads and
3625
   stores at the expense of register pressure: we load all of the data
3626
   before we store it back out.  There are two secondary effects worth
3627
   mentioning, that this speeds copying to/from aligned and unaligned
3628
   buffers, and that it makes the code significantly easier to write.  */
3629
 
3630
#define MAX_MOVE_WORDS  8
3631
 
3632
/* Load an integral number of consecutive unaligned quadwords.  */
3633
 
3634
static void
3635
alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3636
                                   HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3637
{
3638
  rtx const im8 = GEN_INT (-8);
3639
  rtx const i64 = GEN_INT (64);
3640
  rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3641
  rtx sreg, areg, tmp, smema;
3642
  HOST_WIDE_INT i;
3643
 
3644
  smema = XEXP (smem, 0);
3645
  if (GET_CODE (smema) == LO_SUM)
3646
    smema = force_reg (Pmode, smema);
3647
 
3648
  /* Generate all the tmp registers we need.  */
3649
  for (i = 0; i < words; ++i)
3650
    {
3651
      data_regs[i] = out_regs[i];
3652
      ext_tmps[i] = gen_reg_rtx (DImode);
3653
    }
3654
  data_regs[words] = gen_reg_rtx (DImode);
3655
 
3656
  if (ofs != 0)
3657
    smem = adjust_address (smem, GET_MODE (smem), ofs);
3658
 
3659
  /* Load up all of the source data.  */
3660
  for (i = 0; i < words; ++i)
3661
    {
3662
      tmp = change_address (smem, DImode,
3663
                            gen_rtx_AND (DImode,
3664
                                         plus_constant (smema, 8*i),
3665
                                         im8));
3666
      set_mem_alias_set (tmp, 0);
3667
      emit_move_insn (data_regs[i], tmp);
3668
    }
3669
 
3670
  tmp = change_address (smem, DImode,
3671
                        gen_rtx_AND (DImode,
3672
                                     plus_constant (smema, 8*words - 1),
3673
                                     im8));
3674
  set_mem_alias_set (tmp, 0);
3675
  emit_move_insn (data_regs[words], tmp);
3676
 
3677
  /* Extract the half-word fragments.  Unfortunately DEC decided to make
3678
     extxh with offset zero a noop instead of zeroing the register, so
3679
     we must take care of that edge condition ourselves with cmov.  */
3680
 
3681
  sreg = copy_addr_to_reg (smema);
3682
  areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3683
                       1, OPTAB_WIDEN);
3684
  if (WORDS_BIG_ENDIAN)
3685
    emit_move_insn (sreg, plus_constant (sreg, 7));
3686
  for (i = 0; i < words; ++i)
3687
    {
3688
      if (WORDS_BIG_ENDIAN)
3689
        {
3690
          emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3691
          emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3692
        }
3693
      else
3694
        {
3695
          emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3696
          emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3697
        }
3698
      emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3699
                              gen_rtx_IF_THEN_ELSE (DImode,
3700
                                                    gen_rtx_EQ (DImode, areg,
3701
                                                                const0_rtx),
3702
                                                    const0_rtx, ext_tmps[i])));
3703
    }
3704
 
3705
  /* Merge the half-words into whole words.  */
3706
  for (i = 0; i < words; ++i)
3707
    {
3708
      out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3709
                                  ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3710
    }
3711
}
3712
 
3713
/* Store an integral number of consecutive unaligned quadwords.  DATA_REGS
3714
   may be NULL to store zeros.  */
3715
 
3716
static void
3717
alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3718
                                    HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3719
{
3720
  rtx const im8 = GEN_INT (-8);
3721
  rtx const i64 = GEN_INT (64);
3722
  rtx ins_tmps[MAX_MOVE_WORDS];
3723
  rtx st_tmp_1, st_tmp_2, dreg;
3724
  rtx st_addr_1, st_addr_2, dmema;
3725
  HOST_WIDE_INT i;
3726
 
3727
  dmema = XEXP (dmem, 0);
3728
  if (GET_CODE (dmema) == LO_SUM)
3729
    dmema = force_reg (Pmode, dmema);
3730
 
3731
  /* Generate all the tmp registers we need.  */
3732
  if (data_regs != NULL)
3733
    for (i = 0; i < words; ++i)
3734
      ins_tmps[i] = gen_reg_rtx(DImode);
3735
  st_tmp_1 = gen_reg_rtx(DImode);
3736
  st_tmp_2 = gen_reg_rtx(DImode);
3737
 
3738
  if (ofs != 0)
3739
    dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3740
 
3741
  st_addr_2 = change_address (dmem, DImode,
3742
                              gen_rtx_AND (DImode,
3743
                                           plus_constant (dmema, words*8 - 1),
3744
                                       im8));
3745
  set_mem_alias_set (st_addr_2, 0);
3746
 
3747
  st_addr_1 = change_address (dmem, DImode,
3748
                              gen_rtx_AND (DImode, dmema, im8));
3749
  set_mem_alias_set (st_addr_1, 0);
3750
 
3751
  /* Load up the destination end bits.  */
3752
  emit_move_insn (st_tmp_2, st_addr_2);
3753
  emit_move_insn (st_tmp_1, st_addr_1);
3754
 
3755
  /* Shift the input data into place.  */
3756
  dreg = copy_addr_to_reg (dmema);
3757
  if (WORDS_BIG_ENDIAN)
3758
    emit_move_insn (dreg, plus_constant (dreg, 7));
3759
  if (data_regs != NULL)
3760
    {
3761
      for (i = words-1; i >= 0; --i)
3762
        {
3763
          if (WORDS_BIG_ENDIAN)
3764
            {
3765
              emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3766
              emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3767
            }
3768
          else
3769
            {
3770
              emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3771
              emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3772
            }
3773
        }
3774
      for (i = words-1; i > 0; --i)
3775
        {
3776
          ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3777
                                        ins_tmps[i-1], ins_tmps[i-1], 1,
3778
                                        OPTAB_WIDEN);
3779
        }
3780
    }
3781
 
3782
  /* Split and merge the ends with the destination data.  */
3783
  if (WORDS_BIG_ENDIAN)
3784
    {
3785
      emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3786
      emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3787
    }
3788
  else
3789
    {
3790
      emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3791
      emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3792
    }
3793
 
3794
  if (data_regs != NULL)
3795
    {
3796
      st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3797
                               st_tmp_2, 1, OPTAB_WIDEN);
3798
      st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3799
                               st_tmp_1, 1, OPTAB_WIDEN);
3800
    }
3801
 
3802
  /* Store it all.  */
3803
  if (WORDS_BIG_ENDIAN)
3804
    emit_move_insn (st_addr_1, st_tmp_1);
3805
  else
3806
    emit_move_insn (st_addr_2, st_tmp_2);
3807
  for (i = words-1; i > 0; --i)
3808
    {
3809
      rtx tmp = change_address (dmem, DImode,
3810
                                gen_rtx_AND (DImode,
3811
                                             plus_constant(dmema,
3812
                                             WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3813
                                             im8));
3814
      set_mem_alias_set (tmp, 0);
3815
      emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3816
    }
3817
  if (WORDS_BIG_ENDIAN)
3818
    emit_move_insn (st_addr_2, st_tmp_2);
3819
  else
3820
    emit_move_insn (st_addr_1, st_tmp_1);
3821
}
3822
 
3823
 
3824
/* Expand string/block move operations.
3825
 
3826
   operands[0] is the pointer to the destination.
3827
   operands[1] is the pointer to the source.
3828
   operands[2] is the number of bytes to move.
3829
   operands[3] is the alignment.  */
3830
 
3831
int
3832
alpha_expand_block_move (rtx operands[])
3833
{
3834
  rtx bytes_rtx = operands[2];
3835
  rtx align_rtx = operands[3];
3836
  HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3837
  HOST_WIDE_INT bytes = orig_bytes;
3838
  HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3839
  HOST_WIDE_INT dst_align = src_align;
3840
  rtx orig_src = operands[1];
3841
  rtx orig_dst = operands[0];
3842
  rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3843
  rtx tmp;
3844
  unsigned int i, words, ofs, nregs = 0;
3845
 
3846
  if (orig_bytes <= 0)
3847
    return 1;
3848
  else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3849
    return 0;
3850
 
3851
  /* Look for additional alignment information from recorded register info.  */
3852
 
3853
  tmp = XEXP (orig_src, 0);
3854
  if (REG_P (tmp))
3855
    src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3856
  else if (GET_CODE (tmp) == PLUS
3857
           && REG_P (XEXP (tmp, 0))
3858
           && CONST_INT_P (XEXP (tmp, 1)))
3859
    {
3860
      unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3861
      unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3862
 
3863
      if (a > src_align)
3864
        {
3865
          if (a >= 64 && c % 8 == 0)
3866
            src_align = 64;
3867
          else if (a >= 32 && c % 4 == 0)
3868
            src_align = 32;
3869
          else if (a >= 16 && c % 2 == 0)
3870
            src_align = 16;
3871
        }
3872
    }
3873
 
3874
  tmp = XEXP (orig_dst, 0);
3875
  if (REG_P (tmp))
3876
    dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3877
  else if (GET_CODE (tmp) == PLUS
3878
           && REG_P (XEXP (tmp, 0))
3879
           && CONST_INT_P (XEXP (tmp, 1)))
3880
    {
3881
      unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3882
      unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3883
 
3884
      if (a > dst_align)
3885
        {
3886
          if (a >= 64 && c % 8 == 0)
3887
            dst_align = 64;
3888
          else if (a >= 32 && c % 4 == 0)
3889
            dst_align = 32;
3890
          else if (a >= 16 && c % 2 == 0)
3891
            dst_align = 16;
3892
        }
3893
    }
3894
 
3895
  ofs = 0;
3896
  if (src_align >= 64 && bytes >= 8)
3897
    {
3898
      words = bytes / 8;
3899
 
3900
      for (i = 0; i < words; ++i)
3901
        data_regs[nregs + i] = gen_reg_rtx (DImode);
3902
 
3903
      for (i = 0; i < words; ++i)
3904
        emit_move_insn (data_regs[nregs + i],
3905
                        adjust_address (orig_src, DImode, ofs + i * 8));
3906
 
3907
      nregs += words;
3908
      bytes -= words * 8;
3909
      ofs += words * 8;
3910
    }
3911
 
3912
  if (src_align >= 32 && bytes >= 4)
3913
    {
3914
      words = bytes / 4;
3915
 
3916
      for (i = 0; i < words; ++i)
3917
        data_regs[nregs + i] = gen_reg_rtx (SImode);
3918
 
3919
      for (i = 0; i < words; ++i)
3920
        emit_move_insn (data_regs[nregs + i],
3921
                        adjust_address (orig_src, SImode, ofs + i * 4));
3922
 
3923
      nregs += words;
3924
      bytes -= words * 4;
3925
      ofs += words * 4;
3926
    }
3927
 
3928
  if (bytes >= 8)
3929
    {
3930
      words = bytes / 8;
3931
 
3932
      for (i = 0; i < words+1; ++i)
3933
        data_regs[nregs + i] = gen_reg_rtx (DImode);
3934
 
3935
      alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3936
                                         words, ofs);
3937
 
3938
      nregs += words;
3939
      bytes -= words * 8;
3940
      ofs += words * 8;
3941
    }
3942
 
3943
  if (! TARGET_BWX && bytes >= 4)
3944
    {
3945
      data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3946
      alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3947
      bytes -= 4;
3948
      ofs += 4;
3949
    }
3950
 
3951
  if (bytes >= 2)
3952
    {
3953
      if (src_align >= 16)
3954
        {
3955
          do {
3956
            data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3957
            emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3958
            bytes -= 2;
3959
            ofs += 2;
3960
          } while (bytes >= 2);
3961
        }
3962
      else if (! TARGET_BWX)
3963
        {
3964
          data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3965
          alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3966
          bytes -= 2;
3967
          ofs += 2;
3968
        }
3969
    }
3970
 
3971
  while (bytes > 0)
3972
    {
3973
      data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3974
      emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3975
      bytes -= 1;
3976
      ofs += 1;
3977
    }
3978
 
3979
  gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3980
 
3981
  /* Now save it back out again.  */
3982
 
3983
  i = 0, ofs = 0;
3984
 
3985
  /* Write out the data in whatever chunks reading the source allowed.  */
3986
  if (dst_align >= 64)
3987
    {
3988
      while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3989
        {
3990
          emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3991
                          data_regs[i]);
3992
          ofs += 8;
3993
          i++;
3994
        }
3995
    }
3996
 
3997
  if (dst_align >= 32)
3998
    {
3999
      /* If the source has remaining DImode regs, write them out in
4000
         two pieces.  */
4001
      while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4002
        {
4003
          tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4004
                              NULL_RTX, 1, OPTAB_WIDEN);
4005
 
4006
          emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4007
                          gen_lowpart (SImode, data_regs[i]));
4008
          emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4009
                          gen_lowpart (SImode, tmp));
4010
          ofs += 8;
4011
          i++;
4012
        }
4013
 
4014
      while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4015
        {
4016
          emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4017
                          data_regs[i]);
4018
          ofs += 4;
4019
          i++;
4020
        }
4021
    }
4022
 
4023
  if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4024
    {
4025
      /* Write out a remaining block of words using unaligned methods.  */
4026
 
4027
      for (words = 1; i + words < nregs; words++)
4028
        if (GET_MODE (data_regs[i + words]) != DImode)
4029
          break;
4030
 
4031
      if (words == 1)
4032
        alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4033
      else
4034
        alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4035
                                            words, ofs);
4036
 
4037
      i += words;
4038
      ofs += words * 8;
4039
    }
4040
 
4041
  /* Due to the above, this won't be aligned.  */
4042
  /* ??? If we have more than one of these, consider constructing full
4043
     words in registers and using alpha_expand_unaligned_store_words.  */
4044
  while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4045
    {
4046
      alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4047
      ofs += 4;
4048
      i++;
4049
    }
4050
 
4051
  if (dst_align >= 16)
4052
    while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4053
      {
4054
        emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4055
        i++;
4056
        ofs += 2;
4057
      }
4058
  else
4059
    while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4060
      {
4061
        alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4062
        i++;
4063
        ofs += 2;
4064
      }
4065
 
4066
  /* The remainder must be byte copies.  */
4067
  while (i < nregs)
4068
    {
4069
      gcc_assert (GET_MODE (data_regs[i]) == QImode);
4070
      emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4071
      i++;
4072
      ofs += 1;
4073
    }
4074
 
4075
  return 1;
4076
}
4077
 
4078
int
4079
alpha_expand_block_clear (rtx operands[])
4080
{
4081
  rtx bytes_rtx = operands[1];
4082
  rtx align_rtx = operands[3];
4083
  HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4084
  HOST_WIDE_INT bytes = orig_bytes;
4085
  HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4086
  HOST_WIDE_INT alignofs = 0;
4087
  rtx orig_dst = operands[0];
4088
  rtx tmp;
4089
  int i, words, ofs = 0;
4090
 
4091
  if (orig_bytes <= 0)
4092
    return 1;
4093
  if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4094
    return 0;
4095
 
4096
  /* Look for stricter alignment.  */
4097
  tmp = XEXP (orig_dst, 0);
4098
  if (REG_P (tmp))
4099
    align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4100
  else if (GET_CODE (tmp) == PLUS
4101
           && REG_P (XEXP (tmp, 0))
4102
           && CONST_INT_P (XEXP (tmp, 1)))
4103
    {
4104
      HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4105
      int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4106
 
4107
      if (a > align)
4108
        {
4109
          if (a >= 64)
4110
            align = a, alignofs = 8 - c % 8;
4111
          else if (a >= 32)
4112
            align = a, alignofs = 4 - c % 4;
4113
          else if (a >= 16)
4114
            align = a, alignofs = 2 - c % 2;
4115
        }
4116
    }
4117
 
4118
  /* Handle an unaligned prefix first.  */
4119
 
4120
  if (alignofs > 0)
4121
    {
4122
#if HOST_BITS_PER_WIDE_INT >= 64
4123
      /* Given that alignofs is bounded by align, the only time BWX could
4124
         generate three stores is for a 7 byte fill.  Prefer two individual
4125
         stores over a load/mask/store sequence.  */
4126
      if ((!TARGET_BWX || alignofs == 7)
4127
               && align >= 32
4128
               && !(alignofs == 4 && bytes >= 4))
4129
        {
4130
          enum machine_mode mode = (align >= 64 ? DImode : SImode);
4131
          int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4132
          rtx mem, tmp;
4133
          HOST_WIDE_INT mask;
4134
 
4135
          mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4136
          set_mem_alias_set (mem, 0);
4137
 
4138
          mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4139
          if (bytes < alignofs)
4140
            {
4141
              mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4142
              ofs += bytes;
4143
              bytes = 0;
4144
            }
4145
          else
4146
            {
4147
              bytes -= alignofs;
4148
              ofs += alignofs;
4149
            }
4150
          alignofs = 0;
4151
 
4152
          tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4153
                              NULL_RTX, 1, OPTAB_WIDEN);
4154
 
4155
          emit_move_insn (mem, tmp);
4156
        }
4157
#endif
4158
 
4159
      if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4160
        {
4161
          emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4162
          bytes -= 1;
4163
          ofs += 1;
4164
          alignofs -= 1;
4165
        }
4166
      if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4167
        {
4168
          emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4169
          bytes -= 2;
4170
          ofs += 2;
4171
          alignofs -= 2;
4172
        }
4173
      if (alignofs == 4 && bytes >= 4)
4174
        {
4175
          emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4176
          bytes -= 4;
4177
          ofs += 4;
4178
          alignofs = 0;
4179
        }
4180
 
4181
      /* If we've not used the extra lead alignment information by now,
4182
         we won't be able to.  Downgrade align to match what's left over.  */
4183
      if (alignofs > 0)
4184
        {
4185
          alignofs = alignofs & -alignofs;
4186
          align = MIN (align, alignofs * BITS_PER_UNIT);
4187
        }
4188
    }
4189
 
4190
  /* Handle a block of contiguous long-words.  */
4191
 
4192
  if (align >= 64 && bytes >= 8)
4193
    {
4194
      words = bytes / 8;
4195
 
4196
      for (i = 0; i < words; ++i)
4197
        emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4198
                        const0_rtx);
4199
 
4200
      bytes -= words * 8;
4201
      ofs += words * 8;
4202
    }
4203
 
4204
  /* If the block is large and appropriately aligned, emit a single
4205
     store followed by a sequence of stq_u insns.  */
4206
 
4207
  if (align >= 32 && bytes > 16)
4208
    {
4209
      rtx orig_dsta;
4210
 
4211
      emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4212
      bytes -= 4;
4213
      ofs += 4;
4214
 
4215
      orig_dsta = XEXP (orig_dst, 0);
4216
      if (GET_CODE (orig_dsta) == LO_SUM)
4217
        orig_dsta = force_reg (Pmode, orig_dsta);
4218
 
4219
      words = bytes / 8;
4220
      for (i = 0; i < words; ++i)
4221
        {
4222
          rtx mem
4223
            = change_address (orig_dst, DImode,
4224
                              gen_rtx_AND (DImode,
4225
                                           plus_constant (orig_dsta, ofs + i*8),
4226
                                           GEN_INT (-8)));
4227
          set_mem_alias_set (mem, 0);
4228
          emit_move_insn (mem, const0_rtx);
4229
        }
4230
 
4231
      /* Depending on the alignment, the first stq_u may have overlapped
4232
         with the initial stl, which means that the last stq_u didn't
4233
         write as much as it would appear.  Leave those questionable bytes
4234
         unaccounted for.  */
4235
      bytes -= words * 8 - 4;
4236
      ofs += words * 8 - 4;
4237
    }
4238
 
4239
  /* Handle a smaller block of aligned words.  */
4240
 
4241
  if ((align >= 64 && bytes == 4)
4242
      || (align == 32 && bytes >= 4))
4243
    {
4244
      words = bytes / 4;
4245
 
4246
      for (i = 0; i < words; ++i)
4247
        emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4248
                        const0_rtx);
4249
 
4250
      bytes -= words * 4;
4251
      ofs += words * 4;
4252
    }
4253
 
4254
  /* An unaligned block uses stq_u stores for as many as possible.  */
4255
 
4256
  if (bytes >= 8)
4257
    {
4258
      words = bytes / 8;
4259
 
4260
      alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4261
 
4262
      bytes -= words * 8;
4263
      ofs += words * 8;
4264
    }
4265
 
4266
  /* Next clean up any trailing pieces.  */
4267
 
4268
#if HOST_BITS_PER_WIDE_INT >= 64
4269
  /* Count the number of bits in BYTES for which aligned stores could
4270
     be emitted.  */
4271
  words = 0;
4272
  for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4273
    if (bytes & i)
4274
      words += 1;
4275
 
4276
  /* If we have appropriate alignment (and it wouldn't take too many
4277
     instructions otherwise), mask out the bytes we need.  */
4278
  if (TARGET_BWX ? words > 2 : bytes > 0)
4279
    {
4280
      if (align >= 64)
4281
        {
4282
          rtx mem, tmp;
4283
          HOST_WIDE_INT mask;
4284
 
4285
          mem = adjust_address (orig_dst, DImode, ofs);
4286
          set_mem_alias_set (mem, 0);
4287
 
4288
          mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4289
 
4290
          tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4291
                              NULL_RTX, 1, OPTAB_WIDEN);
4292
 
4293
          emit_move_insn (mem, tmp);
4294
          return 1;
4295
        }
4296
      else if (align >= 32 && bytes < 4)
4297
        {
4298
          rtx mem, tmp;
4299
          HOST_WIDE_INT mask;
4300
 
4301
          mem = adjust_address (orig_dst, SImode, ofs);
4302
          set_mem_alias_set (mem, 0);
4303
 
4304
          mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4305
 
4306
          tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4307
                              NULL_RTX, 1, OPTAB_WIDEN);
4308
 
4309
          emit_move_insn (mem, tmp);
4310
          return 1;
4311
        }
4312
    }
4313
#endif
4314
 
4315
  if (!TARGET_BWX && bytes >= 4)
4316
    {
4317
      alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4318
      bytes -= 4;
4319
      ofs += 4;
4320
    }
4321
 
4322
  if (bytes >= 2)
4323
    {
4324
      if (align >= 16)
4325
        {
4326
          do {
4327
            emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4328
                            const0_rtx);
4329
            bytes -= 2;
4330
            ofs += 2;
4331
          } while (bytes >= 2);
4332
        }
4333
      else if (! TARGET_BWX)
4334
        {
4335
          alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4336
          bytes -= 2;
4337
          ofs += 2;
4338
        }
4339
    }
4340
 
4341
  while (bytes > 0)
4342
    {
4343
      emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4344
      bytes -= 1;
4345
      ofs += 1;
4346
    }
4347
 
4348
  return 1;
4349
}
4350
 
4351
/* Returns a mask so that zap(x, value) == x & mask.  */
4352
 
4353
rtx
4354
alpha_expand_zap_mask (HOST_WIDE_INT value)
4355
{
4356
  rtx result;
4357
  int i;
4358
 
4359
  if (HOST_BITS_PER_WIDE_INT >= 64)
4360
    {
4361
      HOST_WIDE_INT mask = 0;
4362
 
4363
      for (i = 7; i >= 0; --i)
4364
        {
4365
          mask <<= 8;
4366
          if (!((value >> i) & 1))
4367
            mask |= 0xff;
4368
        }
4369
 
4370
      result = gen_int_mode (mask, DImode);
4371
    }
4372
  else
4373
    {
4374
      HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4375
 
4376
      gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4377
 
4378
      for (i = 7; i >= 4; --i)
4379
        {
4380
          mask_hi <<= 8;
4381
          if (!((value >> i) & 1))
4382
            mask_hi |= 0xff;
4383
        }
4384
 
4385
      for (i = 3; i >= 0; --i)
4386
        {
4387
          mask_lo <<= 8;
4388
          if (!((value >> i) & 1))
4389
            mask_lo |= 0xff;
4390
        }
4391
 
4392
      result = immed_double_const (mask_lo, mask_hi, DImode);
4393
    }
4394
 
4395
  return result;
4396
}
4397
 
4398
void
4399
alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4400
                                   enum machine_mode mode,
4401
                                   rtx op0, rtx op1, rtx op2)
4402
{
4403
  op0 = gen_lowpart (mode, op0);
4404
 
4405
  if (op1 == const0_rtx)
4406
    op1 = CONST0_RTX (mode);
4407
  else
4408
    op1 = gen_lowpart (mode, op1);
4409
 
4410
  if (op2 == const0_rtx)
4411
    op2 = CONST0_RTX (mode);
4412
  else
4413
    op2 = gen_lowpart (mode, op2);
4414
 
4415
  emit_insn ((*gen) (op0, op1, op2));
4416
}
4417
 
4418
/* A subroutine of the atomic operation splitters.  Jump to LABEL if
4419
   COND is true.  Mark the jump as unlikely to be taken.  */
4420
 
4421
static void
4422
emit_unlikely_jump (rtx cond, rtx label)
4423
{
4424
  rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4425
  rtx x;
4426
 
4427
  x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4428
  x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4429
  add_reg_note (x, REG_BR_PROB, very_unlikely);
4430
}
4431
 
4432
/* A subroutine of the atomic operation splitters.  Emit a load-locked
4433
   instruction in MODE.  */
4434
 
4435
static void
4436
emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4437
{
4438
  rtx (*fn) (rtx, rtx) = NULL;
4439
  if (mode == SImode)
4440
    fn = gen_load_locked_si;
4441
  else if (mode == DImode)
4442
    fn = gen_load_locked_di;
4443
  emit_insn (fn (reg, mem));
4444
}
4445
 
4446
/* A subroutine of the atomic operation splitters.  Emit a store-conditional
4447
   instruction in MODE.  */
4448
 
4449
static void
4450
emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4451
{
4452
  rtx (*fn) (rtx, rtx, rtx) = NULL;
4453
  if (mode == SImode)
4454
    fn = gen_store_conditional_si;
4455
  else if (mode == DImode)
4456
    fn = gen_store_conditional_di;
4457
  emit_insn (fn (res, mem, val));
4458
}
4459
 
4460
/* A subroutine of the atomic operation splitters.  Emit an insxl
4461
   instruction in MODE.  */
4462
 
4463
static rtx
4464
emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4465
{
4466
  rtx ret = gen_reg_rtx (DImode);
4467
  rtx (*fn) (rtx, rtx, rtx);
4468
 
4469
  if (WORDS_BIG_ENDIAN)
4470
    {
4471
      if (mode == QImode)
4472
        fn = gen_insbl_be;
4473
      else
4474
        fn = gen_inswl_be;
4475
    }
4476
  else
4477
    {
4478
      if (mode == QImode)
4479
        fn = gen_insbl_le;
4480
      else
4481
        fn = gen_inswl_le;
4482
    }
4483
  /* The insbl and inswl patterns require a register operand.  */
4484
  op1 = force_reg (mode, op1);
4485
  emit_insn (fn (ret, op1, op2));
4486
 
4487
  return ret;
4488
}
4489
 
4490
/* Expand an atomic fetch-and-operate pattern.  CODE is the binary operation
4491
   to perform.  MEM is the memory on which to operate.  VAL is the second
4492
   operand of the binary operator.  BEFORE and AFTER are optional locations to
4493
   return the value of MEM either before of after the operation.  SCRATCH is
4494
   a scratch register.  */
4495
 
4496
void
4497
alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4498
                       rtx before, rtx after, rtx scratch)
4499
{
4500
  enum machine_mode mode = GET_MODE (mem);
4501
  rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4502
 
4503
  emit_insn (gen_memory_barrier ());
4504
 
4505
  label = gen_label_rtx ();
4506
  emit_label (label);
4507
  label = gen_rtx_LABEL_REF (DImode, label);
4508
 
4509
  if (before == NULL)
4510
    before = scratch;
4511
  emit_load_locked (mode, before, mem);
4512
 
4513
  if (code == NOT)
4514
    {
4515
      x = gen_rtx_AND (mode, before, val);
4516
      emit_insn (gen_rtx_SET (VOIDmode, val, x));
4517
 
4518
      x = gen_rtx_NOT (mode, val);
4519
    }
4520
  else
4521
    x = gen_rtx_fmt_ee (code, mode, before, val);
4522
  if (after)
4523
    emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4524
  emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4525
 
4526
  emit_store_conditional (mode, cond, mem, scratch);
4527
 
4528
  x = gen_rtx_EQ (DImode, cond, const0_rtx);
4529
  emit_unlikely_jump (x, label);
4530
 
4531
  emit_insn (gen_memory_barrier ());
4532
}
4533
 
4534
/* Expand a compare and swap operation.  */
4535
 
4536
void
4537
alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4538
                              rtx scratch)
4539
{
4540
  enum machine_mode mode = GET_MODE (mem);
4541
  rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4542
 
4543
  emit_insn (gen_memory_barrier ());
4544
 
4545
  label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4546
  label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4547
  emit_label (XEXP (label1, 0));
4548
 
4549
  emit_load_locked (mode, retval, mem);
4550
 
4551
  x = gen_lowpart (DImode, retval);
4552
  if (oldval == const0_rtx)
4553
    x = gen_rtx_NE (DImode, x, const0_rtx);
4554
  else
4555
    {
4556
      x = gen_rtx_EQ (DImode, x, oldval);
4557
      emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4558
      x = gen_rtx_EQ (DImode, cond, const0_rtx);
4559
    }
4560
  emit_unlikely_jump (x, label2);
4561
 
4562
  emit_move_insn (scratch, newval);
4563
  emit_store_conditional (mode, cond, mem, scratch);
4564
 
4565
  x = gen_rtx_EQ (DImode, cond, const0_rtx);
4566
  emit_unlikely_jump (x, label1);
4567
 
4568
  emit_insn (gen_memory_barrier ());
4569
  emit_label (XEXP (label2, 0));
4570
}
4571
 
4572
void
4573
alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4574
{
4575
  enum machine_mode mode = GET_MODE (mem);
4576
  rtx addr, align, wdst;
4577
  rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4578
 
4579
  addr = force_reg (DImode, XEXP (mem, 0));
4580
  align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4581
                               NULL_RTX, 1, OPTAB_DIRECT);
4582
 
4583
  oldval = convert_modes (DImode, mode, oldval, 1);
4584
  newval = emit_insxl (mode, newval, addr);
4585
 
4586
  wdst = gen_reg_rtx (DImode);
4587
  if (mode == QImode)
4588
    fn5 = gen_sync_compare_and_swapqi_1;
4589
  else
4590
    fn5 = gen_sync_compare_and_swaphi_1;
4591
  emit_insn (fn5 (wdst, addr, oldval, newval, align));
4592
 
4593
  emit_move_insn (dst, gen_lowpart (mode, wdst));
4594
}
4595
 
4596
void
4597
alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4598
                                 rtx oldval, rtx newval, rtx align,
4599
                                 rtx scratch, rtx cond)
4600
{
4601
  rtx label1, label2, mem, width, mask, x;
4602
 
4603
  mem = gen_rtx_MEM (DImode, align);
4604
  MEM_VOLATILE_P (mem) = 1;
4605
 
4606
  emit_insn (gen_memory_barrier ());
4607
  label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4608
  label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4609
  emit_label (XEXP (label1, 0));
4610
 
4611
  emit_load_locked (DImode, scratch, mem);
4612
 
4613
  width = GEN_INT (GET_MODE_BITSIZE (mode));
4614
  mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4615
  if (WORDS_BIG_ENDIAN)
4616
    emit_insn (gen_extxl_be (dest, scratch, width, addr));
4617
  else
4618
    emit_insn (gen_extxl_le (dest, scratch, width, addr));
4619
 
4620
  if (oldval == const0_rtx)
4621
    x = gen_rtx_NE (DImode, dest, const0_rtx);
4622
  else
4623
    {
4624
      x = gen_rtx_EQ (DImode, dest, oldval);
4625
      emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4626
      x = gen_rtx_EQ (DImode, cond, const0_rtx);
4627
    }
4628
  emit_unlikely_jump (x, label2);
4629
 
4630
  if (WORDS_BIG_ENDIAN)
4631
    emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4632
  else
4633
    emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4634
  emit_insn (gen_iordi3 (scratch, scratch, newval));
4635
 
4636
  emit_store_conditional (DImode, scratch, mem, scratch);
4637
 
4638
  x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4639
  emit_unlikely_jump (x, label1);
4640
 
4641
  emit_insn (gen_memory_barrier ());
4642
  emit_label (XEXP (label2, 0));
4643
}
4644
 
4645
/* Expand an atomic exchange operation.  */
4646
 
4647
void
4648
alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4649
{
4650
  enum machine_mode mode = GET_MODE (mem);
4651
  rtx label, x, cond = gen_lowpart (DImode, scratch);
4652
 
4653
  label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4654
  emit_label (XEXP (label, 0));
4655
 
4656
  emit_load_locked (mode, retval, mem);
4657
  emit_move_insn (scratch, val);
4658
  emit_store_conditional (mode, cond, mem, scratch);
4659
 
4660
  x = gen_rtx_EQ (DImode, cond, const0_rtx);
4661
  emit_unlikely_jump (x, label);
4662
 
4663
  emit_insn (gen_memory_barrier ());
4664
}
4665
 
4666
void
4667
alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4668
{
4669
  enum machine_mode mode = GET_MODE (mem);
4670
  rtx addr, align, wdst;
4671
  rtx (*fn4) (rtx, rtx, rtx, rtx);
4672
 
4673
  /* Force the address into a register.  */
4674
  addr = force_reg (DImode, XEXP (mem, 0));
4675
 
4676
  /* Align it to a multiple of 8.  */
4677
  align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4678
                               NULL_RTX, 1, OPTAB_DIRECT);
4679
 
4680
  /* Insert val into the correct byte location within the word.  */
4681
  val = emit_insxl (mode, val, addr);
4682
 
4683
  wdst = gen_reg_rtx (DImode);
4684
  if (mode == QImode)
4685
    fn4 = gen_sync_lock_test_and_setqi_1;
4686
  else
4687
    fn4 = gen_sync_lock_test_and_sethi_1;
4688
  emit_insn (fn4 (wdst, addr, val, align));
4689
 
4690
  emit_move_insn (dst, gen_lowpart (mode, wdst));
4691
}
4692
 
4693
void
4694
alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4695
                                  rtx val, rtx align, rtx scratch)
4696
{
4697
  rtx label, mem, width, mask, x;
4698
 
4699
  mem = gen_rtx_MEM (DImode, align);
4700
  MEM_VOLATILE_P (mem) = 1;
4701
 
4702
  label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4703
  emit_label (XEXP (label, 0));
4704
 
4705
  emit_load_locked (DImode, scratch, mem);
4706
 
4707
  width = GEN_INT (GET_MODE_BITSIZE (mode));
4708
  mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4709
  if (WORDS_BIG_ENDIAN)
4710
    {
4711
      emit_insn (gen_extxl_be (dest, scratch, width, addr));
4712
      emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4713
    }
4714
  else
4715
    {
4716
      emit_insn (gen_extxl_le (dest, scratch, width, addr));
4717
      emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4718
    }
4719
  emit_insn (gen_iordi3 (scratch, scratch, val));
4720
 
4721
  emit_store_conditional (DImode, scratch, mem, scratch);
4722
 
4723
  x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4724
  emit_unlikely_jump (x, label);
4725
 
4726
  emit_insn (gen_memory_barrier ());
4727
}
4728
 
4729
/* Adjust the cost of a scheduling dependency.  Return the new cost of
4730
   a dependency LINK or INSN on DEP_INSN.  COST is the current cost.  */
4731
 
4732
static int
4733
alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4734
{
4735
  enum attr_type insn_type, dep_insn_type;
4736
 
4737
  /* If the dependence is an anti-dependence, there is no cost.  For an
4738
     output dependence, there is sometimes a cost, but it doesn't seem
4739
     worth handling those few cases.  */
4740
  if (REG_NOTE_KIND (link) != 0)
4741
    return cost;
4742
 
4743
  /* If we can't recognize the insns, we can't really do anything.  */
4744
  if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4745
    return cost;
4746
 
4747
  insn_type = get_attr_type (insn);
4748
  dep_insn_type = get_attr_type (dep_insn);
4749
 
4750
  /* Bring in the user-defined memory latency.  */
4751
  if (dep_insn_type == TYPE_ILD
4752
      || dep_insn_type == TYPE_FLD
4753
      || dep_insn_type == TYPE_LDSYM)
4754
    cost += alpha_memory_latency-1;
4755
 
4756
  /* Everything else handled in DFA bypasses now.  */
4757
 
4758
  return cost;
4759
}
4760
 
4761
/* The number of instructions that can be issued per cycle.  */
4762
 
4763
static int
4764
alpha_issue_rate (void)
4765
{
4766
  return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4767
}
4768
 
4769
/* How many alternative schedules to try.  This should be as wide as the
4770
   scheduling freedom in the DFA, but no wider.  Making this value too
4771
   large results extra work for the scheduler.
4772
 
4773
   For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4774
   alternative schedules.  For EV5, we can choose between E0/E1 and
4775
   FA/FM.  For EV6, an arithmetic insn can be issued to U0/U1/L0/L1.  */
4776
 
4777
static int
4778
alpha_multipass_dfa_lookahead (void)
4779
{
4780
  return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4781
}
4782
 
4783
/* Machine-specific function data.  */
4784
 
4785
struct GTY(()) machine_function
4786
{
4787
  /* For unicosmk.  */
4788
  /* List of call information words for calls from this function.  */
4789
  struct rtx_def *first_ciw;
4790
  struct rtx_def *last_ciw;
4791
  int ciw_count;
4792
 
4793
  /* List of deferred case vectors.  */
4794
  struct rtx_def *addr_list;
4795
 
4796
  /* For OSF.  */
4797
  const char *some_ld_name;
4798
 
4799
  /* For TARGET_LD_BUGGY_LDGP.  */
4800
  struct rtx_def *gp_save_rtx;
4801
 
4802
  /* For VMS condition handlers.  */
4803
  bool uses_condition_handler;
4804
};
4805
 
4806
/* How to allocate a 'struct machine_function'.  */
4807
 
4808
static struct machine_function *
4809
alpha_init_machine_status (void)
4810
{
4811
  return ((struct machine_function *)
4812
                ggc_alloc_cleared (sizeof (struct machine_function)));
4813
}
4814
 
4815
/* Support for frame based VMS condition handlers.  */
4816
 
4817
/* A VMS condition handler may be established for a function with a call to
4818
   __builtin_establish_vms_condition_handler, and cancelled with a call to
4819
   __builtin_revert_vms_condition_handler.
4820
 
4821
   The VMS Condition Handling Facility knows about the existence of a handler
4822
   from the procedure descriptor .handler field.  As the VMS native compilers,
4823
   we store the user specified handler's address at a fixed location in the
4824
   stack frame and point the procedure descriptor at a common wrapper which
4825
   fetches the real handler's address and issues an indirect call.
4826
 
4827
   The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4828
 
4829
   We force the procedure kind to PT_STACK, and the fixed frame location is
4830
   fp+8, just before the register save area. We use the handler_data field in
4831
   the procedure descriptor to state the fp offset at which the installed
4832
   handler address can be found.  */
4833
 
4834
#define VMS_COND_HANDLER_FP_OFFSET 8
4835
 
4836
/* Expand code to store the currently installed user VMS condition handler
4837
   into TARGET and install HANDLER as the new condition handler.  */
4838
 
4839
void
4840
alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4841
{
4842
  rtx handler_slot_address
4843
    = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
4844
 
4845
  rtx handler_slot
4846
    = gen_rtx_MEM (DImode, handler_slot_address);
4847
 
4848
  emit_move_insn (target, handler_slot);
4849
  emit_move_insn (handler_slot, handler);
4850
 
4851
  /* Notify the start/prologue/epilogue emitters that the condition handler
4852
     slot is needed.  In addition to reserving the slot space, this will force
4853
     the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4854
     use above is correct.  */
4855
  cfun->machine->uses_condition_handler = true;
4856
}
4857
 
4858
/* Expand code to store the current VMS condition handler into TARGET and
4859
   nullify it.  */
4860
 
4861
void
4862
alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4863
{
4864
  /* We implement this by establishing a null condition handler, with the tiny
4865
     side effect of setting uses_condition_handler.  This is a little bit
4866
     pessimistic if no actual builtin_establish call is ever issued, which is
4867
     not a real problem and expected never to happen anyway.  */
4868
 
4869
  alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4870
}
4871
 
4872
/* Functions to save and restore alpha_return_addr_rtx.  */
4873
 
4874
/* Start the ball rolling with RETURN_ADDR_RTX.  */
4875
 
4876
rtx
4877
alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4878
{
4879
  if (count != 0)
4880
    return const0_rtx;
4881
 
4882
  return get_hard_reg_initial_val (Pmode, REG_RA);
4883
}
4884
 
4885
/* Return or create a memory slot containing the gp value for the current
4886
   function.  Needed only if TARGET_LD_BUGGY_LDGP.  */
4887
 
4888
rtx
4889
alpha_gp_save_rtx (void)
4890
{
4891
  rtx seq, m = cfun->machine->gp_save_rtx;
4892
 
4893
  if (m == NULL)
4894
    {
4895
      start_sequence ();
4896
 
4897
      m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4898
      m = validize_mem (m);
4899
      emit_move_insn (m, pic_offset_table_rtx);
4900
 
4901
      seq = get_insns ();
4902
      end_sequence ();
4903
 
4904
      /* We used to simply emit the sequence after entry_of_function.
4905
         However this breaks the CFG if the first instruction in the
4906
         first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4907
         label.  Emit the sequence properly on the edge.  We are only
4908
         invoked from dw2_build_landing_pads and finish_eh_generation
4909
         will call commit_edge_insertions thanks to a kludge.  */
4910
      insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4911
 
4912
      cfun->machine->gp_save_rtx = m;
4913
    }
4914
 
4915
  return m;
4916
}
4917
 
4918
static int
4919
alpha_ra_ever_killed (void)
4920
{
4921
  rtx top;
4922
 
4923
  if (!has_hard_reg_initial_val (Pmode, REG_RA))
4924
    return (int)df_regs_ever_live_p (REG_RA);
4925
 
4926
  push_topmost_sequence ();
4927
  top = get_insns ();
4928
  pop_topmost_sequence ();
4929
 
4930
  return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4931
}
4932
 
4933
 
4934
/* Return the trap mode suffix applicable to the current
4935
   instruction, or NULL.  */
4936
 
4937
static const char *
4938
get_trap_mode_suffix (void)
4939
{
4940
  enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4941
 
4942
  switch (s)
4943
    {
4944
    case TRAP_SUFFIX_NONE:
4945
      return NULL;
4946
 
4947
    case TRAP_SUFFIX_SU:
4948
      if (alpha_fptm >= ALPHA_FPTM_SU)
4949
        return "su";
4950
      return NULL;
4951
 
4952
    case TRAP_SUFFIX_SUI:
4953
      if (alpha_fptm >= ALPHA_FPTM_SUI)
4954
        return "sui";
4955
      return NULL;
4956
 
4957
    case TRAP_SUFFIX_V_SV:
4958
      switch (alpha_fptm)
4959
        {
4960
        case ALPHA_FPTM_N:
4961
          return NULL;
4962
        case ALPHA_FPTM_U:
4963
          return "v";
4964
        case ALPHA_FPTM_SU:
4965
        case ALPHA_FPTM_SUI:
4966
          return "sv";
4967
        default:
4968
          gcc_unreachable ();
4969
        }
4970
 
4971
    case TRAP_SUFFIX_V_SV_SVI:
4972
      switch (alpha_fptm)
4973
        {
4974
        case ALPHA_FPTM_N:
4975
          return NULL;
4976
        case ALPHA_FPTM_U:
4977
          return "v";
4978
        case ALPHA_FPTM_SU:
4979
          return "sv";
4980
        case ALPHA_FPTM_SUI:
4981
          return "svi";
4982
        default:
4983
          gcc_unreachable ();
4984
        }
4985
      break;
4986
 
4987
    case TRAP_SUFFIX_U_SU_SUI:
4988
      switch (alpha_fptm)
4989
        {
4990
        case ALPHA_FPTM_N:
4991
          return NULL;
4992
        case ALPHA_FPTM_U:
4993
          return "u";
4994
        case ALPHA_FPTM_SU:
4995
          return "su";
4996
        case ALPHA_FPTM_SUI:
4997
          return "sui";
4998
        default:
4999
          gcc_unreachable ();
5000
        }
5001
      break;
5002
 
5003
    default:
5004
      gcc_unreachable ();
5005
    }
5006
  gcc_unreachable ();
5007
}
5008
 
5009
/* Return the rounding mode suffix applicable to the current
5010
   instruction, or NULL.  */
5011
 
5012
static const char *
5013
get_round_mode_suffix (void)
5014
{
5015
  enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5016
 
5017
  switch (s)
5018
    {
5019
    case ROUND_SUFFIX_NONE:
5020
      return NULL;
5021
    case ROUND_SUFFIX_NORMAL:
5022
      switch (alpha_fprm)
5023
        {
5024
        case ALPHA_FPRM_NORM:
5025
          return NULL;
5026
        case ALPHA_FPRM_MINF:
5027
          return "m";
5028
        case ALPHA_FPRM_CHOP:
5029
          return "c";
5030
        case ALPHA_FPRM_DYN:
5031
          return "d";
5032
        default:
5033
          gcc_unreachable ();
5034
        }
5035
      break;
5036
 
5037
    case ROUND_SUFFIX_C:
5038
      return "c";
5039
 
5040
    default:
5041
      gcc_unreachable ();
5042
    }
5043
  gcc_unreachable ();
5044
}
5045
 
5046
/* Locate some local-dynamic symbol still in use by this function
5047
   so that we can print its name in some movdi_er_tlsldm pattern.  */
5048
 
5049
static int
5050
get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5051
{
5052
  rtx x = *px;
5053
 
5054
  if (GET_CODE (x) == SYMBOL_REF
5055
      && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5056
    {
5057
      cfun->machine->some_ld_name = XSTR (x, 0);
5058
      return 1;
5059
    }
5060
 
5061
  return 0;
5062
}
5063
 
5064
static const char *
5065
get_some_local_dynamic_name (void)
5066
{
5067
  rtx insn;
5068
 
5069
  if (cfun->machine->some_ld_name)
5070
    return cfun->machine->some_ld_name;
5071
 
5072
  for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5073
    if (INSN_P (insn)
5074
        && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5075
      return cfun->machine->some_ld_name;
5076
 
5077
  gcc_unreachable ();
5078
}
5079
 
5080
/* Print an operand.  Recognize special options, documented below.  */
5081
 
5082
void
5083
print_operand (FILE *file, rtx x, int code)
5084
{
5085
  int i;
5086
 
5087
  switch (code)
5088
    {
5089
    case '~':
5090
      /* Print the assembler name of the current function.  */
5091
      assemble_name (file, alpha_fnname);
5092
      break;
5093
 
5094
    case '&':
5095
      assemble_name (file, get_some_local_dynamic_name ());
5096
      break;
5097
 
5098
    case '/':
5099
      {
5100
        const char *trap = get_trap_mode_suffix ();
5101
        const char *round = get_round_mode_suffix ();
5102
 
5103
        if (trap || round)
5104
          fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5105
                   (trap ? trap : ""), (round ? round : ""));
5106
        break;
5107
      }
5108
 
5109
    case ',':
5110
      /* Generates single precision instruction suffix.  */
5111
      fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5112
      break;
5113
 
5114
    case '-':
5115
      /* Generates double precision instruction suffix.  */
5116
      fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5117
      break;
5118
 
5119
    case '#':
5120
      if (alpha_this_literal_sequence_number == 0)
5121
        alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5122
      fprintf (file, "%d", alpha_this_literal_sequence_number);
5123
      break;
5124
 
5125
    case '*':
5126
      if (alpha_this_gpdisp_sequence_number == 0)
5127
        alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5128
      fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5129
      break;
5130
 
5131
    case 'H':
5132
      if (GET_CODE (x) == HIGH)
5133
        output_addr_const (file, XEXP (x, 0));
5134
      else
5135
        output_operand_lossage ("invalid %%H value");
5136
      break;
5137
 
5138
    case 'J':
5139
      {
5140
        const char *lituse;
5141
 
5142
        if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5143
          {
5144
            x = XVECEXP (x, 0, 0);
5145
            lituse = "lituse_tlsgd";
5146
          }
5147
        else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5148
          {
5149
            x = XVECEXP (x, 0, 0);
5150
            lituse = "lituse_tlsldm";
5151
          }
5152
        else if (CONST_INT_P (x))
5153
          lituse = "lituse_jsr";
5154
        else
5155
          {
5156
            output_operand_lossage ("invalid %%J value");
5157
            break;
5158
          }
5159
 
5160
        if (x != const0_rtx)
5161
          fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5162
      }
5163
      break;
5164
 
5165
    case 'j':
5166
      {
5167
        const char *lituse;
5168
 
5169
#ifdef HAVE_AS_JSRDIRECT_RELOCS
5170
        lituse = "lituse_jsrdirect";
5171
#else
5172
        lituse = "lituse_jsr";
5173
#endif
5174
 
5175
        gcc_assert (INTVAL (x) != 0);
5176
        fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5177
      }
5178
      break;
5179
    case 'r':
5180
      /* If this operand is the constant zero, write it as "$31".  */
5181
      if (REG_P (x))
5182
        fprintf (file, "%s", reg_names[REGNO (x)]);
5183
      else if (x == CONST0_RTX (GET_MODE (x)))
5184
        fprintf (file, "$31");
5185
      else
5186
        output_operand_lossage ("invalid %%r value");
5187
      break;
5188
 
5189
    case 'R':
5190
      /* Similar, but for floating-point.  */
5191
      if (REG_P (x))
5192
        fprintf (file, "%s", reg_names[REGNO (x)]);
5193
      else if (x == CONST0_RTX (GET_MODE (x)))
5194
        fprintf (file, "$f31");
5195
      else
5196
        output_operand_lossage ("invalid %%R value");
5197
      break;
5198
 
5199
    case 'N':
5200
      /* Write the 1's complement of a constant.  */
5201
      if (!CONST_INT_P (x))
5202
        output_operand_lossage ("invalid %%N value");
5203
 
5204
      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5205
      break;
5206
 
5207
    case 'P':
5208
      /* Write 1 << C, for a constant C.  */
5209
      if (!CONST_INT_P (x))
5210
        output_operand_lossage ("invalid %%P value");
5211
 
5212
      fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5213
      break;
5214
 
5215
    case 'h':
5216
      /* Write the high-order 16 bits of a constant, sign-extended.  */
5217
      if (!CONST_INT_P (x))
5218
        output_operand_lossage ("invalid %%h value");
5219
 
5220
      fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5221
      break;
5222
 
5223
    case 'L':
5224
      /* Write the low-order 16 bits of a constant, sign-extended.  */
5225
      if (!CONST_INT_P (x))
5226
        output_operand_lossage ("invalid %%L value");
5227
 
5228
      fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5229
               (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5230
      break;
5231
 
5232
    case 'm':
5233
      /* Write mask for ZAP insn.  */
5234
      if (GET_CODE (x) == CONST_DOUBLE)
5235
        {
5236
          HOST_WIDE_INT mask = 0;
5237
          HOST_WIDE_INT value;
5238
 
5239
          value = CONST_DOUBLE_LOW (x);
5240
          for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5241
               i++, value >>= 8)
5242
            if (value & 0xff)
5243
              mask |= (1 << i);
5244
 
5245
          value = CONST_DOUBLE_HIGH (x);
5246
          for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5247
               i++, value >>= 8)
5248
            if (value & 0xff)
5249
              mask |= (1 << (i + sizeof (int)));
5250
 
5251
          fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5252
        }
5253
 
5254
      else if (CONST_INT_P (x))
5255
        {
5256
          HOST_WIDE_INT mask = 0, value = INTVAL (x);
5257
 
5258
          for (i = 0; i < 8; i++, value >>= 8)
5259
            if (value & 0xff)
5260
              mask |= (1 << i);
5261
 
5262
          fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5263
        }
5264
      else
5265
        output_operand_lossage ("invalid %%m value");
5266
      break;
5267
 
5268
    case 'M':
5269
      /* 'b', 'w', 'l', or 'q' as the value of the constant.  */
5270
      if (!CONST_INT_P (x)
5271
          || (INTVAL (x) != 8 && INTVAL (x) != 16
5272
              && INTVAL (x) != 32 && INTVAL (x) != 64))
5273
        output_operand_lossage ("invalid %%M value");
5274
 
5275
      fprintf (file, "%s",
5276
               (INTVAL (x) == 8 ? "b"
5277
                : INTVAL (x) == 16 ? "w"
5278
                : INTVAL (x) == 32 ? "l"
5279
                : "q"));
5280
      break;
5281
 
5282
    case 'U':
5283
      /* Similar, except do it from the mask.  */
5284
      if (CONST_INT_P (x))
5285
        {
5286
          HOST_WIDE_INT value = INTVAL (x);
5287
 
5288
          if (value == 0xff)
5289
            {
5290
              fputc ('b', file);
5291
              break;
5292
            }
5293
          if (value == 0xffff)
5294
            {
5295
              fputc ('w', file);
5296
              break;
5297
            }
5298
          if (value == 0xffffffff)
5299
            {
5300
              fputc ('l', file);
5301
              break;
5302
            }
5303
          if (value == -1)
5304
            {
5305
              fputc ('q', file);
5306
              break;
5307
            }
5308
        }
5309
      else if (HOST_BITS_PER_WIDE_INT == 32
5310
               && GET_CODE (x) == CONST_DOUBLE
5311
               && CONST_DOUBLE_LOW (x) == 0xffffffff
5312
               && CONST_DOUBLE_HIGH (x) == 0)
5313
        {
5314
          fputc ('l', file);
5315
          break;
5316
        }
5317
      output_operand_lossage ("invalid %%U value");
5318
      break;
5319
 
5320
    case 's':
5321
      /* Write the constant value divided by 8 for little-endian mode or
5322
         (56 - value) / 8 for big-endian mode.  */
5323
 
5324
      if (!CONST_INT_P (x)
5325
          || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5326
                                                     ? 56
5327
                                                     : 64)
5328
          || (INTVAL (x) & 7) != 0)
5329
        output_operand_lossage ("invalid %%s value");
5330
 
5331
      fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5332
               WORDS_BIG_ENDIAN
5333
               ? (56 - INTVAL (x)) / 8
5334
               : INTVAL (x) / 8);
5335
      break;
5336
 
5337
    case 'S':
5338
      /* Same, except compute (64 - c) / 8 */
5339
 
5340
      if (!CONST_INT_P (x)
5341
          && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5342
          && (INTVAL (x) & 7) != 8)
5343
        output_operand_lossage ("invalid %%s value");
5344
 
5345
      fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5346
      break;
5347
 
5348
    case 't':
5349
      {
5350
        /* On Unicos/Mk systems: use a DEX expression if the symbol
5351
           clashes with a register name.  */
5352
        int dex = unicosmk_need_dex (x);
5353
        if (dex)
5354
          fprintf (file, "DEX(%d)", dex);
5355
        else
5356
          output_addr_const (file, x);
5357
      }
5358
      break;
5359
 
5360
    case 'C': case 'D': case 'c': case 'd':
5361
      /* Write out comparison name.  */
5362
      {
5363
        enum rtx_code c = GET_CODE (x);
5364
 
5365
        if (!COMPARISON_P (x))
5366
          output_operand_lossage ("invalid %%C value");
5367
 
5368
        else if (code == 'D')
5369
          c = reverse_condition (c);
5370
        else if (code == 'c')
5371
          c = swap_condition (c);
5372
        else if (code == 'd')
5373
          c = swap_condition (reverse_condition (c));
5374
 
5375
        if (c == LEU)
5376
          fprintf (file, "ule");
5377
        else if (c == LTU)
5378
          fprintf (file, "ult");
5379
        else if (c == UNORDERED)
5380
          fprintf (file, "un");
5381
        else
5382
          fprintf (file, "%s", GET_RTX_NAME (c));
5383
      }
5384
      break;
5385
 
5386
    case 'E':
5387
      /* Write the divide or modulus operator.  */
5388
      switch (GET_CODE (x))
5389
        {
5390
        case DIV:
5391
          fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5392
          break;
5393
        case UDIV:
5394
          fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5395
          break;
5396
        case MOD:
5397
          fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5398
          break;
5399
        case UMOD:
5400
          fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5401
          break;
5402
        default:
5403
          output_operand_lossage ("invalid %%E value");
5404
          break;
5405
        }
5406
      break;
5407
 
5408
    case 'A':
5409
      /* Write "_u" for unaligned access.  */
5410
      if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5411
        fprintf (file, "_u");
5412
      break;
5413
 
5414
    case 0:
5415
      if (REG_P (x))
5416
        fprintf (file, "%s", reg_names[REGNO (x)]);
5417
      else if (MEM_P (x))
5418
        output_address (XEXP (x, 0));
5419
      else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5420
        {
5421
          switch (XINT (XEXP (x, 0), 1))
5422
            {
5423
            case UNSPEC_DTPREL:
5424
            case UNSPEC_TPREL:
5425
              output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5426
              break;
5427
            default:
5428
              output_operand_lossage ("unknown relocation unspec");
5429
              break;
5430
            }
5431
        }
5432
      else
5433
        output_addr_const (file, x);
5434
      break;
5435
 
5436
    default:
5437
      output_operand_lossage ("invalid %%xn code");
5438
    }
5439
}
5440
 
5441
void
5442
print_operand_address (FILE *file, rtx addr)
5443
{
5444
  int basereg = 31;
5445
  HOST_WIDE_INT offset = 0;
5446
 
5447
  if (GET_CODE (addr) == AND)
5448
    addr = XEXP (addr, 0);
5449
 
5450
  if (GET_CODE (addr) == PLUS
5451
      && CONST_INT_P (XEXP (addr, 1)))
5452
    {
5453
      offset = INTVAL (XEXP (addr, 1));
5454
      addr = XEXP (addr, 0);
5455
    }
5456
 
5457
  if (GET_CODE (addr) == LO_SUM)
5458
    {
5459
      const char *reloc16, *reloclo;
5460
      rtx op1 = XEXP (addr, 1);
5461
 
5462
      if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5463
        {
5464
          op1 = XEXP (op1, 0);
5465
          switch (XINT (op1, 1))
5466
            {
5467
            case UNSPEC_DTPREL:
5468
              reloc16 = NULL;
5469
              reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5470
              break;
5471
            case UNSPEC_TPREL:
5472
              reloc16 = NULL;
5473
              reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5474
              break;
5475
            default:
5476
              output_operand_lossage ("unknown relocation unspec");
5477
              return;
5478
            }
5479
 
5480
          output_addr_const (file, XVECEXP (op1, 0, 0));
5481
        }
5482
      else
5483
        {
5484
          reloc16 = "gprel";
5485
          reloclo = "gprellow";
5486
          output_addr_const (file, op1);
5487
        }
5488
 
5489
      if (offset)
5490
        fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5491
 
5492
      addr = XEXP (addr, 0);
5493
      switch (GET_CODE (addr))
5494
        {
5495
        case REG:
5496
          basereg = REGNO (addr);
5497
          break;
5498
 
5499
        case SUBREG:
5500
          basereg = subreg_regno (addr);
5501
          break;
5502
 
5503
        default:
5504
          gcc_unreachable ();
5505
        }
5506
 
5507
      fprintf (file, "($%d)\t\t!%s", basereg,
5508
               (basereg == 29 ? reloc16 : reloclo));
5509
      return;
5510
    }
5511
 
5512
  switch (GET_CODE (addr))
5513
    {
5514
    case REG:
5515
      basereg = REGNO (addr);
5516
      break;
5517
 
5518
    case SUBREG:
5519
      basereg = subreg_regno (addr);
5520
      break;
5521
 
5522
    case CONST_INT:
5523
      offset = INTVAL (addr);
5524
      break;
5525
 
5526
#if TARGET_ABI_OPEN_VMS
5527
    case SYMBOL_REF:
5528
      fprintf (file, "%s", XSTR (addr, 0));
5529
      return;
5530
 
5531
    case CONST:
5532
      gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5533
                  && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5534
      fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5535
               XSTR (XEXP (XEXP (addr, 0), 0), 0),
5536
               INTVAL (XEXP (XEXP (addr, 0), 1)));
5537
      return;
5538
 
5539
#endif
5540
    default:
5541
      gcc_unreachable ();
5542
    }
5543
 
5544
  fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5545
}
5546
 
5547
/* Emit RTL insns to initialize the variable parts of a trampoline at
5548
   M_TRAMP.  FNDECL is target function's decl.  CHAIN_VALUE is an rtx
5549
   for the static chain value for the function.  */
5550
 
5551
static void
5552
alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5553
{
5554
  rtx fnaddr, mem, word1, word2;
5555
 
5556
  fnaddr = XEXP (DECL_RTL (fndecl), 0);
5557
 
5558
#ifdef POINTERS_EXTEND_UNSIGNED
5559
  fnaddr = convert_memory_address (Pmode, fnaddr);
5560
  chain_value = convert_memory_address (Pmode, chain_value);
5561
#endif
5562
 
5563
  if (TARGET_ABI_OPEN_VMS)
5564
    {
5565
      const char *fnname;
5566
      char *trname;
5567
 
5568
      /* Construct the name of the trampoline entry point.  */
5569
      fnname = XSTR (fnaddr, 0);
5570
      trname = (char *) alloca (strlen (fnname) + 5);
5571
      strcpy (trname, fnname);
5572
      strcat (trname, "..tr");
5573
      fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5574
      word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5575
 
5576
      /* Trampoline (or "bounded") procedure descriptor is constructed from
5577
         the function's procedure descriptor with certain fields zeroed IAW
5578
         the VMS calling standard. This is stored in the first quadword.  */
5579
      word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5580
      word1 = expand_and (DImode, word1, GEN_INT (0xffff0fff0000fff0), NULL);
5581
    }
5582
  else
5583
    {
5584
      /* These 4 instructions are:
5585
            ldq $1,24($27)
5586
            ldq $27,16($27)
5587
            jmp $31,($27),0
5588
            nop
5589
         We don't bother setting the HINT field of the jump; the nop
5590
         is merely there for padding.  */
5591
      word1 = GEN_INT (0xa77b0010a43b0018);
5592
      word2 = GEN_INT (0x47ff041f6bfb0000);
5593
    }
5594
 
5595
  /* Store the first two words, as computed above.  */
5596
  mem = adjust_address (m_tramp, DImode, 0);
5597
  emit_move_insn (mem, word1);
5598
  mem = adjust_address (m_tramp, DImode, 8);
5599
  emit_move_insn (mem, word2);
5600
 
5601
  /* Store function address and static chain value.  */
5602
  mem = adjust_address (m_tramp, Pmode, 16);
5603
  emit_move_insn (mem, fnaddr);
5604
  mem = adjust_address (m_tramp, Pmode, 24);
5605
  emit_move_insn (mem, chain_value);
5606
 
5607
  if (!TARGET_ABI_OPEN_VMS)
5608
    {
5609
      emit_insn (gen_imb ());
5610
#ifdef ENABLE_EXECUTE_STACK
5611
      emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5612
                         LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5613
#endif
5614
    }
5615
}
5616
 
5617
/* Determine where to put an argument to a function.
5618
   Value is zero to push the argument on the stack,
5619
   or a hard register in which to store the argument.
5620
 
5621
   MODE is the argument's machine mode.
5622
   TYPE is the data type of the argument (as a tree).
5623
    This is null for libcalls where that information may
5624
    not be available.
5625
   CUM is a variable of type CUMULATIVE_ARGS which gives info about
5626
    the preceding args and about the function being called.
5627
   NAMED is nonzero if this argument is a named parameter
5628
    (otherwise it is an extra parameter matching an ellipsis).
5629
 
5630
   On Alpha the first 6 words of args are normally in registers
5631
   and the rest are pushed.  */
5632
 
5633
rtx
5634
function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5635
              int named ATTRIBUTE_UNUSED)
5636
{
5637
  int basereg;
5638
  int num_args;
5639
 
5640
  /* Don't get confused and pass small structures in FP registers.  */
5641
  if (type && AGGREGATE_TYPE_P (type))
5642
    basereg = 16;
5643
  else
5644
    {
5645
#ifdef ENABLE_CHECKING
5646
      /* With alpha_split_complex_arg, we shouldn't see any raw complex
5647
         values here.  */
5648
      gcc_assert (!COMPLEX_MODE_P (mode));
5649
#endif
5650
 
5651
      /* Set up defaults for FP operands passed in FP registers, and
5652
         integral operands passed in integer registers.  */
5653
      if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5654
        basereg = 32 + 16;
5655
      else
5656
        basereg = 16;
5657
    }
5658
 
5659
  /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5660
     the three platforms, so we can't avoid conditional compilation.  */
5661
#if TARGET_ABI_OPEN_VMS
5662
    {
5663
      if (mode == VOIDmode)
5664
        return alpha_arg_info_reg_val (cum);
5665
 
5666
      num_args = cum.num_args;
5667
      if (num_args >= 6
5668
          || targetm.calls.must_pass_in_stack (mode, type))
5669
        return NULL_RTX;
5670
    }
5671
#elif TARGET_ABI_UNICOSMK
5672
    {
5673
      int size;
5674
 
5675
      /* If this is the last argument, generate the call info word (CIW).  */
5676
      /* ??? We don't include the caller's line number in the CIW because
5677
         I don't know how to determine it if debug infos are turned off.  */
5678
      if (mode == VOIDmode)
5679
        {
5680
          int i;
5681
          HOST_WIDE_INT lo;
5682
          HOST_WIDE_INT hi;
5683
          rtx ciw;
5684
 
5685
          lo = 0;
5686
 
5687
          for (i = 0; i < cum.num_reg_words && i < 5; i++)
5688
            if (cum.reg_args_type[i])
5689
              lo |= (1 << (7 - i));
5690
 
5691
          if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5692
            lo |= 7;
5693
          else
5694
            lo |= cum.num_reg_words;
5695
 
5696
#if HOST_BITS_PER_WIDE_INT == 32
5697
          hi = (cum.num_args << 20) | cum.num_arg_words;
5698
#else
5699
          lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5700
            | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5701
          hi = 0;
5702
#endif
5703
          ciw = immed_double_const (lo, hi, DImode);
5704
 
5705
          return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5706
                                 UNSPEC_UMK_LOAD_CIW);
5707
        }
5708
 
5709
      size = ALPHA_ARG_SIZE (mode, type, named);
5710
      num_args = cum.num_reg_words;
5711
      if (cum.force_stack
5712
          || cum.num_reg_words + size > 6
5713
          || targetm.calls.must_pass_in_stack (mode, type))
5714
        return NULL_RTX;
5715
      else if (type && TYPE_MODE (type) == BLKmode)
5716
        {
5717
          rtx reg1, reg2;
5718
 
5719
          reg1 = gen_rtx_REG (DImode, num_args + 16);
5720
          reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5721
 
5722
          /* The argument fits in two registers. Note that we still need to
5723
             reserve a register for empty structures.  */
5724
          if (size == 0)
5725
            return NULL_RTX;
5726
          else if (size == 1)
5727
            return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5728
          else
5729
            {
5730
              reg2 = gen_rtx_REG (DImode, num_args + 17);
5731
              reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5732
              return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5733
            }
5734
        }
5735
    }
5736
#elif TARGET_ABI_OSF
5737
    {
5738
      if (cum >= 6)
5739
        return NULL_RTX;
5740
      num_args = cum;
5741
 
5742
      /* VOID is passed as a special flag for "last argument".  */
5743
      if (type == void_type_node)
5744
        basereg = 16;
5745
      else if (targetm.calls.must_pass_in_stack (mode, type))
5746
        return NULL_RTX;
5747
    }
5748
#else
5749
#error Unhandled ABI
5750
#endif
5751
 
5752
  return gen_rtx_REG (mode, num_args + basereg);
5753
}
5754
 
5755
static int
5756
alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5757
                         enum machine_mode mode ATTRIBUTE_UNUSED,
5758
                         tree type ATTRIBUTE_UNUSED,
5759
                         bool named ATTRIBUTE_UNUSED)
5760
{
5761
  int words = 0;
5762
 
5763
#if TARGET_ABI_OPEN_VMS
5764
  if (cum->num_args < 6
5765
      && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5766
    words = 6 - cum->num_args;
5767
#elif TARGET_ABI_UNICOSMK
5768
  /* Never any split arguments.  */
5769
#elif TARGET_ABI_OSF
5770
  if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5771
    words = 6 - *cum;
5772
#else
5773
#error Unhandled ABI
5774
#endif
5775
 
5776
  return words * UNITS_PER_WORD;
5777
}
5778
 
5779
 
5780
/* Return true if TYPE must be returned in memory, instead of in registers.  */
5781
 
5782
static bool
5783
alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5784
{
5785
  enum machine_mode mode = VOIDmode;
5786
  int size;
5787
 
5788
  if (type)
5789
    {
5790
      mode = TYPE_MODE (type);
5791
 
5792
      /* All aggregates are returned in memory, except on OpenVMS where
5793
         records that fit 64 bits should be returned by immediate value
5794
         as required by section 3.8.7.1 of the OpenVMS Calling Standard.  */
5795
      if (TARGET_ABI_OPEN_VMS
5796
          && TREE_CODE (type) != ARRAY_TYPE
5797
          && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5798
        return false;
5799
 
5800
      if (AGGREGATE_TYPE_P (type))
5801
        return true;
5802
    }
5803
 
5804
  size = GET_MODE_SIZE (mode);
5805
  switch (GET_MODE_CLASS (mode))
5806
    {
5807
    case MODE_VECTOR_FLOAT:
5808
      /* Pass all float vectors in memory, like an aggregate.  */
5809
      return true;
5810
 
5811
    case MODE_COMPLEX_FLOAT:
5812
      /* We judge complex floats on the size of their element,
5813
         not the size of the whole type.  */
5814
      size = GET_MODE_UNIT_SIZE (mode);
5815
      break;
5816
 
5817
    case MODE_INT:
5818
    case MODE_FLOAT:
5819
    case MODE_COMPLEX_INT:
5820
    case MODE_VECTOR_INT:
5821
      break;
5822
 
5823
    default:
5824
      /* ??? We get called on all sorts of random stuff from
5825
         aggregate_value_p.  We must return something, but it's not
5826
         clear what's safe to return.  Pretend it's a struct I
5827
         guess.  */
5828
      return true;
5829
    }
5830
 
5831
  /* Otherwise types must fit in one register.  */
5832
  return size > UNITS_PER_WORD;
5833
}
5834
 
5835
/* Return true if TYPE should be passed by invisible reference.  */
5836
 
5837
static bool
5838
alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5839
                         enum machine_mode mode,
5840
                         const_tree type ATTRIBUTE_UNUSED,
5841
                         bool named ATTRIBUTE_UNUSED)
5842
{
5843
  return mode == TFmode || mode == TCmode;
5844
}
5845
 
5846
/* Define how to find the value returned by a function.  VALTYPE is the
5847
   data type of the value (as a tree).  If the precise function being
5848
   called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5849
   MODE is set instead of VALTYPE for libcalls.
5850
 
5851
   On Alpha the value is found in $0 for integer functions and
5852
   $f0 for floating-point functions.  */
5853
 
5854
rtx
5855
function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5856
                enum machine_mode mode)
5857
{
5858
  unsigned int regnum, dummy;
5859
  enum mode_class mclass;
5860
 
5861
  gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5862
 
5863
  if (valtype)
5864
    mode = TYPE_MODE (valtype);
5865
 
5866
  mclass = GET_MODE_CLASS (mode);
5867
  switch (mclass)
5868
    {
5869
    case MODE_INT:
5870
      /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5871
         where we have them returning both SImode and DImode.  */
5872
      if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5873
        PROMOTE_MODE (mode, dummy, valtype);
5874
      /* FALLTHRU */
5875
 
5876
    case MODE_COMPLEX_INT:
5877
    case MODE_VECTOR_INT:
5878
      regnum = 0;
5879
      break;
5880
 
5881
    case MODE_FLOAT:
5882
      regnum = 32;
5883
      break;
5884
 
5885
    case MODE_COMPLEX_FLOAT:
5886
      {
5887
        enum machine_mode cmode = GET_MODE_INNER (mode);
5888
 
5889
        return gen_rtx_PARALLEL
5890
          (VOIDmode,
5891
           gen_rtvec (2,
5892
                      gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5893
                                         const0_rtx),
5894
                      gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5895
                                         GEN_INT (GET_MODE_SIZE (cmode)))));
5896
      }
5897
 
5898
    case MODE_RANDOM:
5899
      /* We should only reach here for BLKmode on VMS.  */
5900
      gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5901
      regnum = 0;
5902
      break;
5903
 
5904
    default:
5905
      gcc_unreachable ();
5906
    }
5907
 
5908
  return gen_rtx_REG (mode, regnum);
5909
}
5910
 
5911
/* TCmode complex values are passed by invisible reference.  We
5912
   should not split these values.  */
5913
 
5914
static bool
5915
alpha_split_complex_arg (const_tree type)
5916
{
5917
  return TYPE_MODE (type) != TCmode;
5918
}
5919
 
5920
static tree
5921
alpha_build_builtin_va_list (void)
5922
{
5923
  tree base, ofs, space, record, type_decl;
5924
 
5925
  if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5926
    return ptr_type_node;
5927
 
5928
  record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5929
  type_decl = build_decl (BUILTINS_LOCATION,
5930
                          TYPE_DECL, get_identifier ("__va_list_tag"), record);
5931
  TREE_CHAIN (record) = type_decl;
5932
  TYPE_NAME (record) = type_decl;
5933
 
5934
  /* C++? SET_IS_AGGR_TYPE (record, 1); */
5935
 
5936
  /* Dummy field to prevent alignment warnings.  */
5937
  space = build_decl (BUILTINS_LOCATION,
5938
                      FIELD_DECL, NULL_TREE, integer_type_node);
5939
  DECL_FIELD_CONTEXT (space) = record;
5940
  DECL_ARTIFICIAL (space) = 1;
5941
  DECL_IGNORED_P (space) = 1;
5942
 
5943
  ofs = build_decl (BUILTINS_LOCATION,
5944
                    FIELD_DECL, get_identifier ("__offset"),
5945
                    integer_type_node);
5946
  DECL_FIELD_CONTEXT (ofs) = record;
5947
  TREE_CHAIN (ofs) = space;
5948
 
5949
  base = build_decl (BUILTINS_LOCATION,
5950
                     FIELD_DECL, get_identifier ("__base"),
5951
                     ptr_type_node);
5952
  DECL_FIELD_CONTEXT (base) = record;
5953
  TREE_CHAIN (base) = ofs;
5954
 
5955
  TYPE_FIELDS (record) = base;
5956
  layout_type (record);
5957
 
5958
  va_list_gpr_counter_field = ofs;
5959
  return record;
5960
}
5961
 
5962
#if TARGET_ABI_OSF
5963
/* Helper function for alpha_stdarg_optimize_hook.  Skip over casts
5964
   and constant additions.  */
5965
 
5966
static gimple
5967
va_list_skip_additions (tree lhs)
5968
{
5969
  gimple stmt;
5970
 
5971
  for (;;)
5972
    {
5973
      enum tree_code code;
5974
 
5975
      stmt = SSA_NAME_DEF_STMT (lhs);
5976
 
5977
      if (gimple_code (stmt) == GIMPLE_PHI)
5978
        return stmt;
5979
 
5980
      if (!is_gimple_assign (stmt)
5981
          || gimple_assign_lhs (stmt) != lhs)
5982
        return NULL;
5983
 
5984
      if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5985
        return stmt;
5986
      code = gimple_assign_rhs_code (stmt);
5987
      if (!CONVERT_EXPR_CODE_P (code)
5988
          && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5989
              || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5990
              || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5991
        return stmt;
5992
 
5993
      lhs = gimple_assign_rhs1 (stmt);
5994
    }
5995
}
5996
 
5997
/* Check if LHS = RHS statement is
5998
   LHS = *(ap.__base + ap.__offset + cst)
5999
   or
6000
   LHS = *(ap.__base
6001
           + ((ap.__offset + cst <= 47)
6002
              ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
6003
   If the former, indicate that GPR registers are needed,
6004
   if the latter, indicate that FPR registers are needed.
6005
 
6006
   Also look for LHS = (*ptr).field, where ptr is one of the forms
6007
   listed above.
6008
 
6009
   On alpha, cfun->va_list_gpr_size is used as size of the needed
6010
   regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
6011
   registers are needed and bit 1 set if FPR registers are needed.
6012
   Return true if va_list references should not be scanned for the
6013
   current statement.  */
6014
 
6015
static bool
6016
alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
6017
{
6018
  tree base, offset, rhs;
6019
  int offset_arg = 1;
6020
  gimple base_stmt;
6021
 
6022
  if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
6023
      != GIMPLE_SINGLE_RHS)
6024
    return false;
6025
 
6026
  rhs = gimple_assign_rhs1 (stmt);
6027
  while (handled_component_p (rhs))
6028
    rhs = TREE_OPERAND (rhs, 0);
6029
  if (TREE_CODE (rhs) != INDIRECT_REF
6030
      || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
6031
    return false;
6032
 
6033
  stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
6034
  if (stmt == NULL
6035
      || !is_gimple_assign (stmt)
6036
      || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
6037
    return false;
6038
 
6039
  base = gimple_assign_rhs1 (stmt);
6040
  if (TREE_CODE (base) == SSA_NAME)
6041
    {
6042
      base_stmt = va_list_skip_additions (base);
6043
      if (base_stmt
6044
          && is_gimple_assign (base_stmt)
6045
          && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6046
        base = gimple_assign_rhs1 (base_stmt);
6047
    }
6048
 
6049
  if (TREE_CODE (base) != COMPONENT_REF
6050
      || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6051
    {
6052
      base = gimple_assign_rhs2 (stmt);
6053
      if (TREE_CODE (base) == SSA_NAME)
6054
        {
6055
          base_stmt = va_list_skip_additions (base);
6056
          if (base_stmt
6057
              && is_gimple_assign (base_stmt)
6058
              && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6059
            base = gimple_assign_rhs1 (base_stmt);
6060
        }
6061
 
6062
      if (TREE_CODE (base) != COMPONENT_REF
6063
          || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6064
        return false;
6065
 
6066
      offset_arg = 0;
6067
    }
6068
 
6069
  base = get_base_address (base);
6070
  if (TREE_CODE (base) != VAR_DECL
6071
      || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
6072
    return false;
6073
 
6074
  offset = gimple_op (stmt, 1 + offset_arg);
6075
  if (TREE_CODE (offset) == SSA_NAME)
6076
    {
6077
      gimple offset_stmt = va_list_skip_additions (offset);
6078
 
6079
      if (offset_stmt
6080
          && gimple_code (offset_stmt) == GIMPLE_PHI)
6081
        {
6082
          HOST_WIDE_INT sub;
6083
          gimple arg1_stmt, arg2_stmt;
6084
          tree arg1, arg2;
6085
          enum tree_code code1, code2;
6086
 
6087
          if (gimple_phi_num_args (offset_stmt) != 2)
6088
            goto escapes;
6089
 
6090
          arg1_stmt
6091
            = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
6092
          arg2_stmt
6093
            = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
6094
          if (arg1_stmt == NULL
6095
              || !is_gimple_assign (arg1_stmt)
6096
              || arg2_stmt == NULL
6097
              || !is_gimple_assign (arg2_stmt))
6098
            goto escapes;
6099
 
6100
          code1 = gimple_assign_rhs_code (arg1_stmt);
6101
          code2 = gimple_assign_rhs_code (arg2_stmt);
6102
          if (code1 == COMPONENT_REF
6103
              && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6104
            /* Do nothing.  */;
6105
          else if (code2 == COMPONENT_REF
6106
                   && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6107
            {
6108
              gimple tem = arg1_stmt;
6109
              code2 = code1;
6110
              arg1_stmt = arg2_stmt;
6111
              arg2_stmt = tem;
6112
            }
6113
          else
6114
            goto escapes;
6115
 
6116
          if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
6117
            goto escapes;
6118
 
6119
          sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
6120
          if (code2 == MINUS_EXPR)
6121
            sub = -sub;
6122
          if (sub < -48 || sub > -32)
6123
            goto escapes;
6124
 
6125
          arg1 = gimple_assign_rhs1 (arg1_stmt);
6126
          arg2 = gimple_assign_rhs1 (arg2_stmt);
6127
          if (TREE_CODE (arg2) == SSA_NAME)
6128
            {
6129
              arg2_stmt = va_list_skip_additions (arg2);
6130
              if (arg2_stmt == NULL
6131
                  || !is_gimple_assign (arg2_stmt)
6132
                  || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6133
                goto escapes;
6134
              arg2 = gimple_assign_rhs1 (arg2_stmt);
6135
            }
6136
          if (arg1 != arg2)
6137
            goto escapes;
6138
 
6139
          if (TREE_CODE (arg1) != COMPONENT_REF
6140
              || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6141
              || get_base_address (arg1) != base)
6142
            goto escapes;
6143
 
6144
          /* Need floating point regs.  */
6145
          cfun->va_list_fpr_size |= 2;
6146
          return false;
6147
        }
6148
      if (offset_stmt
6149
          && is_gimple_assign (offset_stmt)
6150
          && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6151
        offset = gimple_assign_rhs1 (offset_stmt);
6152
    }
6153
  if (TREE_CODE (offset) != COMPONENT_REF
6154
      || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6155
      || get_base_address (offset) != base)
6156
    goto escapes;
6157
  else
6158
    /* Need general regs.  */
6159
    cfun->va_list_fpr_size |= 1;
6160
  return false;
6161
 
6162
escapes:
6163
  si->va_list_escapes = true;
6164
  return false;
6165
}
6166
#endif
6167
 
6168
/* Perform any needed actions needed for a function that is receiving a
6169
   variable number of arguments.  */
6170
 
6171
static void
6172
alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6173
                              tree type, int *pretend_size, int no_rtl)
6174
{
6175
  CUMULATIVE_ARGS cum = *pcum;
6176
 
6177
  /* Skip the current argument.  */
6178
  FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6179
 
6180
#if TARGET_ABI_UNICOSMK
6181
  /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6182
     arguments on the stack. Unfortunately, it doesn't always store the first
6183
     one (i.e. the one that arrives in $16 or $f16). This is not a problem
6184
     with stdargs as we always have at least one named argument there.  */
6185
  if (cum.num_reg_words < 6)
6186
    {
6187
      if (!no_rtl)
6188
        {
6189
          emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6190
          emit_insn (gen_arg_home_umk ());
6191
        }
6192
      *pretend_size = 0;
6193
    }
6194
#elif TARGET_ABI_OPEN_VMS
6195
  /* For VMS, we allocate space for all 6 arg registers plus a count.
6196
 
6197
     However, if NO registers need to be saved, don't allocate any space.
6198
     This is not only because we won't need the space, but because AP
6199
     includes the current_pretend_args_size and we don't want to mess up
6200
     any ap-relative addresses already made.  */
6201
  if (cum.num_args < 6)
6202
    {
6203
      if (!no_rtl)
6204
        {
6205
          emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6206
          emit_insn (gen_arg_home ());
6207
        }
6208
      *pretend_size = 7 * UNITS_PER_WORD;
6209
    }
6210
#else
6211
  /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6212
     only push those that are remaining.  However, if NO registers need to
6213
     be saved, don't allocate any space.  This is not only because we won't
6214
     need the space, but because AP includes the current_pretend_args_size
6215
     and we don't want to mess up any ap-relative addresses already made.
6216
 
6217
     If we are not to use the floating-point registers, save the integer
6218
     registers where we would put the floating-point registers.  This is
6219
     not the most efficient way to implement varargs with just one register
6220
     class, but it isn't worth doing anything more efficient in this rare
6221
     case.  */
6222
  if (cum >= 6)
6223
    return;
6224
 
6225
  if (!no_rtl)
6226
    {
6227
      int count;
6228
      alias_set_type set = get_varargs_alias_set ();
6229
      rtx tmp;
6230
 
6231
      count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6232
      if (count > 6 - cum)
6233
        count = 6 - cum;
6234
 
6235
      /* Detect whether integer registers or floating-point registers
6236
         are needed by the detected va_arg statements.  See above for
6237
         how these values are computed.  Note that the "escape" value
6238
         is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6239
         these bits set.  */
6240
      gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6241
 
6242
      if (cfun->va_list_fpr_size & 1)
6243
        {
6244
          tmp = gen_rtx_MEM (BLKmode,
6245
                             plus_constant (virtual_incoming_args_rtx,
6246
                                            (cum + 6) * UNITS_PER_WORD));
6247
          MEM_NOTRAP_P (tmp) = 1;
6248
          set_mem_alias_set (tmp, set);
6249
          move_block_from_reg (16 + cum, tmp, count);
6250
        }
6251
 
6252
      if (cfun->va_list_fpr_size & 2)
6253
        {
6254
          tmp = gen_rtx_MEM (BLKmode,
6255
                             plus_constant (virtual_incoming_args_rtx,
6256
                                            cum * UNITS_PER_WORD));
6257
          MEM_NOTRAP_P (tmp) = 1;
6258
          set_mem_alias_set (tmp, set);
6259
          move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6260
        }
6261
     }
6262
  *pretend_size = 12 * UNITS_PER_WORD;
6263
#endif
6264
}
6265
 
6266
static void
6267
alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6268
{
6269
  HOST_WIDE_INT offset;
6270
  tree t, offset_field, base_field;
6271
 
6272
  if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6273
    return;
6274
 
6275
  if (TARGET_ABI_UNICOSMK)
6276
    std_expand_builtin_va_start (valist, nextarg);
6277
 
6278
  /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6279
     up by 48, storing fp arg registers in the first 48 bytes, and the
6280
     integer arg registers in the next 48 bytes.  This is only done,
6281
     however, if any integer registers need to be stored.
6282
 
6283
     If no integer registers need be stored, then we must subtract 48
6284
     in order to account for the integer arg registers which are counted
6285
     in argsize above, but which are not actually stored on the stack.
6286
     Must further be careful here about structures straddling the last
6287
     integer argument register; that futzes with pretend_args_size,
6288
     which changes the meaning of AP.  */
6289
 
6290
  if (NUM_ARGS < 6)
6291
    offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6292
  else
6293
    offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6294
 
6295
  if (TARGET_ABI_OPEN_VMS)
6296
    {
6297
      t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6298
      t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6299
                 size_int (offset + NUM_ARGS * UNITS_PER_WORD));
6300
      t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6301
      TREE_SIDE_EFFECTS (t) = 1;
6302
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6303
    }
6304
  else
6305
    {
6306
      base_field = TYPE_FIELDS (TREE_TYPE (valist));
6307
      offset_field = TREE_CHAIN (base_field);
6308
 
6309
      base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6310
                           valist, base_field, NULL_TREE);
6311
      offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6312
                             valist, offset_field, NULL_TREE);
6313
 
6314
      t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6315
      t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6316
                  size_int (offset));
6317
      t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6318
      TREE_SIDE_EFFECTS (t) = 1;
6319
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6320
 
6321
      t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6322
      t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6323
      TREE_SIDE_EFFECTS (t) = 1;
6324
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6325
    }
6326
}
6327
 
6328
static tree
6329
alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6330
                         gimple_seq *pre_p)
6331
{
6332
  tree type_size, ptr_type, addend, t, addr;
6333
  gimple_seq internal_post;
6334
 
6335
  /* If the type could not be passed in registers, skip the block
6336
     reserved for the registers.  */
6337
  if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6338
    {
6339
      t = build_int_cst (TREE_TYPE (offset), 6*8);
6340
      gimplify_assign (offset,
6341
                       build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6342
                       pre_p);
6343
    }
6344
 
6345
  addend = offset;
6346
  ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6347
 
6348
  if (TREE_CODE (type) == COMPLEX_TYPE)
6349
    {
6350
      tree real_part, imag_part, real_temp;
6351
 
6352
      real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6353
                                           offset, pre_p);
6354
 
6355
      /* Copy the value into a new temporary, lest the formal temporary
6356
         be reused out from under us.  */
6357
      real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6358
 
6359
      imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6360
                                           offset, pre_p);
6361
 
6362
      return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6363
    }
6364
  else if (TREE_CODE (type) == REAL_TYPE)
6365
    {
6366
      tree fpaddend, cond, fourtyeight;
6367
 
6368
      fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6369
      fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6370
                              addend, fourtyeight);
6371
      cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6372
      addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6373
                            fpaddend, addend);
6374
    }
6375
 
6376
  /* Build the final address and force that value into a temporary.  */
6377
  addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6378
                 fold_convert (sizetype, addend));
6379
  internal_post = NULL;
6380
  gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6381
  gimple_seq_add_seq (pre_p, internal_post);
6382
 
6383
  /* Update the offset field.  */
6384
  type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6385
  if (type_size == NULL || TREE_OVERFLOW (type_size))
6386
    t = size_zero_node;
6387
  else
6388
    {
6389
      t = size_binop (PLUS_EXPR, type_size, size_int (7));
6390
      t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6391
      t = size_binop (MULT_EXPR, t, size_int (8));
6392
    }
6393
  t = fold_convert (TREE_TYPE (offset), t);
6394
  gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6395
                   pre_p);
6396
 
6397
  return build_va_arg_indirect_ref (addr);
6398
}
6399
 
6400
static tree
6401
alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6402
                       gimple_seq *post_p)
6403
{
6404
  tree offset_field, base_field, offset, base, t, r;
6405
  bool indirect;
6406
 
6407
  if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6408
    return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6409
 
6410
  base_field = TYPE_FIELDS (va_list_type_node);
6411
  offset_field = TREE_CHAIN (base_field);
6412
  base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6413
                       valist, base_field, NULL_TREE);
6414
  offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6415
                         valist, offset_field, NULL_TREE);
6416
 
6417
  /* Pull the fields of the structure out into temporaries.  Since we never
6418
     modify the base field, we can use a formal temporary.  Sign-extend the
6419
     offset field so that it's the proper width for pointer arithmetic.  */
6420
  base = get_formal_tmp_var (base_field, pre_p);
6421
 
6422
  t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6423
  offset = get_initialized_tmp_var (t, pre_p, NULL);
6424
 
6425
  indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6426
  if (indirect)
6427
    type = build_pointer_type_for_mode (type, ptr_mode, true);
6428
 
6429
  /* Find the value.  Note that this will be a stable indirection, or
6430
     a composite of stable indirections in the case of complex.  */
6431
  r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6432
 
6433
  /* Stuff the offset temporary back into its field.  */
6434
  gimplify_assign (unshare_expr (offset_field),
6435
                   fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6436
 
6437
  if (indirect)
6438
    r = build_va_arg_indirect_ref (r);
6439
 
6440
  return r;
6441
}
6442
 
6443
/* Builtins.  */
6444
 
6445
enum alpha_builtin
6446
{
6447
  ALPHA_BUILTIN_CMPBGE,
6448
  ALPHA_BUILTIN_EXTBL,
6449
  ALPHA_BUILTIN_EXTWL,
6450
  ALPHA_BUILTIN_EXTLL,
6451
  ALPHA_BUILTIN_EXTQL,
6452
  ALPHA_BUILTIN_EXTWH,
6453
  ALPHA_BUILTIN_EXTLH,
6454
  ALPHA_BUILTIN_EXTQH,
6455
  ALPHA_BUILTIN_INSBL,
6456
  ALPHA_BUILTIN_INSWL,
6457
  ALPHA_BUILTIN_INSLL,
6458
  ALPHA_BUILTIN_INSQL,
6459
  ALPHA_BUILTIN_INSWH,
6460
  ALPHA_BUILTIN_INSLH,
6461
  ALPHA_BUILTIN_INSQH,
6462
  ALPHA_BUILTIN_MSKBL,
6463
  ALPHA_BUILTIN_MSKWL,
6464
  ALPHA_BUILTIN_MSKLL,
6465
  ALPHA_BUILTIN_MSKQL,
6466
  ALPHA_BUILTIN_MSKWH,
6467
  ALPHA_BUILTIN_MSKLH,
6468
  ALPHA_BUILTIN_MSKQH,
6469
  ALPHA_BUILTIN_UMULH,
6470
  ALPHA_BUILTIN_ZAP,
6471
  ALPHA_BUILTIN_ZAPNOT,
6472
  ALPHA_BUILTIN_AMASK,
6473
  ALPHA_BUILTIN_IMPLVER,
6474
  ALPHA_BUILTIN_RPCC,
6475
  ALPHA_BUILTIN_THREAD_POINTER,
6476
  ALPHA_BUILTIN_SET_THREAD_POINTER,
6477
  ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6478
  ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6479
 
6480
  /* TARGET_MAX */
6481
  ALPHA_BUILTIN_MINUB8,
6482
  ALPHA_BUILTIN_MINSB8,
6483
  ALPHA_BUILTIN_MINUW4,
6484
  ALPHA_BUILTIN_MINSW4,
6485
  ALPHA_BUILTIN_MAXUB8,
6486
  ALPHA_BUILTIN_MAXSB8,
6487
  ALPHA_BUILTIN_MAXUW4,
6488
  ALPHA_BUILTIN_MAXSW4,
6489
  ALPHA_BUILTIN_PERR,
6490
  ALPHA_BUILTIN_PKLB,
6491
  ALPHA_BUILTIN_PKWB,
6492
  ALPHA_BUILTIN_UNPKBL,
6493
  ALPHA_BUILTIN_UNPKBW,
6494
 
6495
  /* TARGET_CIX */
6496
  ALPHA_BUILTIN_CTTZ,
6497
  ALPHA_BUILTIN_CTLZ,
6498
  ALPHA_BUILTIN_CTPOP,
6499
 
6500
  ALPHA_BUILTIN_max
6501
};
6502
 
6503
static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6504
  CODE_FOR_builtin_cmpbge,
6505
  CODE_FOR_builtin_extbl,
6506
  CODE_FOR_builtin_extwl,
6507
  CODE_FOR_builtin_extll,
6508
  CODE_FOR_builtin_extql,
6509
  CODE_FOR_builtin_extwh,
6510
  CODE_FOR_builtin_extlh,
6511
  CODE_FOR_builtin_extqh,
6512
  CODE_FOR_builtin_insbl,
6513
  CODE_FOR_builtin_inswl,
6514
  CODE_FOR_builtin_insll,
6515
  CODE_FOR_builtin_insql,
6516
  CODE_FOR_builtin_inswh,
6517
  CODE_FOR_builtin_inslh,
6518
  CODE_FOR_builtin_insqh,
6519
  CODE_FOR_builtin_mskbl,
6520
  CODE_FOR_builtin_mskwl,
6521
  CODE_FOR_builtin_mskll,
6522
  CODE_FOR_builtin_mskql,
6523
  CODE_FOR_builtin_mskwh,
6524
  CODE_FOR_builtin_msklh,
6525
  CODE_FOR_builtin_mskqh,
6526
  CODE_FOR_umuldi3_highpart,
6527
  CODE_FOR_builtin_zap,
6528
  CODE_FOR_builtin_zapnot,
6529
  CODE_FOR_builtin_amask,
6530
  CODE_FOR_builtin_implver,
6531
  CODE_FOR_builtin_rpcc,
6532
  CODE_FOR_load_tp,
6533
  CODE_FOR_set_tp,
6534
  CODE_FOR_builtin_establish_vms_condition_handler,
6535
  CODE_FOR_builtin_revert_vms_condition_handler,
6536
 
6537
  /* TARGET_MAX */
6538
  CODE_FOR_builtin_minub8,
6539
  CODE_FOR_builtin_minsb8,
6540
  CODE_FOR_builtin_minuw4,
6541
  CODE_FOR_builtin_minsw4,
6542
  CODE_FOR_builtin_maxub8,
6543
  CODE_FOR_builtin_maxsb8,
6544
  CODE_FOR_builtin_maxuw4,
6545
  CODE_FOR_builtin_maxsw4,
6546
  CODE_FOR_builtin_perr,
6547
  CODE_FOR_builtin_pklb,
6548
  CODE_FOR_builtin_pkwb,
6549
  CODE_FOR_builtin_unpkbl,
6550
  CODE_FOR_builtin_unpkbw,
6551
 
6552
  /* TARGET_CIX */
6553
  CODE_FOR_ctzdi2,
6554
  CODE_FOR_clzdi2,
6555
  CODE_FOR_popcountdi2
6556
};
6557
 
6558
struct alpha_builtin_def
6559
{
6560
  const char *name;
6561
  enum alpha_builtin code;
6562
  unsigned int target_mask;
6563
  bool is_const;
6564
};
6565
 
6566
static struct alpha_builtin_def const zero_arg_builtins[] = {
6567
  { "__builtin_alpha_implver",  ALPHA_BUILTIN_IMPLVER,  0, true },
6568
  { "__builtin_alpha_rpcc",     ALPHA_BUILTIN_RPCC,     0, false }
6569
};
6570
 
6571
static struct alpha_builtin_def const one_arg_builtins[] = {
6572
  { "__builtin_alpha_amask",    ALPHA_BUILTIN_AMASK,    0, true },
6573
  { "__builtin_alpha_pklb",     ALPHA_BUILTIN_PKLB,     MASK_MAX, true },
6574
  { "__builtin_alpha_pkwb",     ALPHA_BUILTIN_PKWB,     MASK_MAX, true },
6575
  { "__builtin_alpha_unpkbl",   ALPHA_BUILTIN_UNPKBL,   MASK_MAX, true },
6576
  { "__builtin_alpha_unpkbw",   ALPHA_BUILTIN_UNPKBW,   MASK_MAX, true },
6577
  { "__builtin_alpha_cttz",     ALPHA_BUILTIN_CTTZ,     MASK_CIX, true },
6578
  { "__builtin_alpha_ctlz",     ALPHA_BUILTIN_CTLZ,     MASK_CIX, true },
6579
  { "__builtin_alpha_ctpop",    ALPHA_BUILTIN_CTPOP,    MASK_CIX, true }
6580
};
6581
 
6582
static struct alpha_builtin_def const two_arg_builtins[] = {
6583
  { "__builtin_alpha_cmpbge",   ALPHA_BUILTIN_CMPBGE,   0, true },
6584
  { "__builtin_alpha_extbl",    ALPHA_BUILTIN_EXTBL,    0, true },
6585
  { "__builtin_alpha_extwl",    ALPHA_BUILTIN_EXTWL,    0, true },
6586
  { "__builtin_alpha_extll",    ALPHA_BUILTIN_EXTLL,    0, true },
6587
  { "__builtin_alpha_extql",    ALPHA_BUILTIN_EXTQL,    0, true },
6588
  { "__builtin_alpha_extwh",    ALPHA_BUILTIN_EXTWH,    0, true },
6589
  { "__builtin_alpha_extlh",    ALPHA_BUILTIN_EXTLH,    0, true },
6590
  { "__builtin_alpha_extqh",    ALPHA_BUILTIN_EXTQH,    0, true },
6591
  { "__builtin_alpha_insbl",    ALPHA_BUILTIN_INSBL,    0, true },
6592
  { "__builtin_alpha_inswl",    ALPHA_BUILTIN_INSWL,    0, true },
6593
  { "__builtin_alpha_insll",    ALPHA_BUILTIN_INSLL,    0, true },
6594
  { "__builtin_alpha_insql",    ALPHA_BUILTIN_INSQL,    0, true },
6595
  { "__builtin_alpha_inswh",    ALPHA_BUILTIN_INSWH,    0, true },
6596
  { "__builtin_alpha_inslh",    ALPHA_BUILTIN_INSLH,    0, true },
6597
  { "__builtin_alpha_insqh",    ALPHA_BUILTIN_INSQH,    0, true },
6598
  { "__builtin_alpha_mskbl",    ALPHA_BUILTIN_MSKBL,    0, true },
6599
  { "__builtin_alpha_mskwl",    ALPHA_BUILTIN_MSKWL,    0, true },
6600
  { "__builtin_alpha_mskll",    ALPHA_BUILTIN_MSKLL,    0, true },
6601
  { "__builtin_alpha_mskql",    ALPHA_BUILTIN_MSKQL,    0, true },
6602
  { "__builtin_alpha_mskwh",    ALPHA_BUILTIN_MSKWH,    0, true },
6603
  { "__builtin_alpha_msklh",    ALPHA_BUILTIN_MSKLH,    0, true },
6604
  { "__builtin_alpha_mskqh",    ALPHA_BUILTIN_MSKQH,    0, true },
6605
  { "__builtin_alpha_umulh",    ALPHA_BUILTIN_UMULH,    0, true },
6606
  { "__builtin_alpha_zap",      ALPHA_BUILTIN_ZAP,      0, true },
6607
  { "__builtin_alpha_zapnot",   ALPHA_BUILTIN_ZAPNOT,   0, true },
6608
  { "__builtin_alpha_minub8",   ALPHA_BUILTIN_MINUB8,   MASK_MAX, true },
6609
  { "__builtin_alpha_minsb8",   ALPHA_BUILTIN_MINSB8,   MASK_MAX, true },
6610
  { "__builtin_alpha_minuw4",   ALPHA_BUILTIN_MINUW4,   MASK_MAX, true },
6611
  { "__builtin_alpha_minsw4",   ALPHA_BUILTIN_MINSW4,   MASK_MAX, true },
6612
  { "__builtin_alpha_maxub8",   ALPHA_BUILTIN_MAXUB8,   MASK_MAX, true },
6613
  { "__builtin_alpha_maxsb8",   ALPHA_BUILTIN_MAXSB8,   MASK_MAX, true },
6614
  { "__builtin_alpha_maxuw4",   ALPHA_BUILTIN_MAXUW4,   MASK_MAX, true },
6615
  { "__builtin_alpha_maxsw4",   ALPHA_BUILTIN_MAXSW4,   MASK_MAX, true },
6616
  { "__builtin_alpha_perr",     ALPHA_BUILTIN_PERR,     MASK_MAX, true }
6617
};
6618
 
6619
static GTY(()) tree alpha_v8qi_u;
6620
static GTY(()) tree alpha_v8qi_s;
6621
static GTY(()) tree alpha_v4hi_u;
6622
static GTY(()) tree alpha_v4hi_s;
6623
 
6624
/* Helper function of alpha_init_builtins.  Add the COUNT built-in
6625
   functions pointed to by P, with function type FTYPE.  */
6626
 
6627
static void
6628
alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6629
                    tree ftype)
6630
{
6631
  tree decl;
6632
  size_t i;
6633
 
6634
  for (i = 0; i < count; ++i, ++p)
6635
    if ((target_flags & p->target_mask) == p->target_mask)
6636
      {
6637
        decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6638
                                     NULL, NULL);
6639
        if (p->is_const)
6640
          TREE_READONLY (decl) = 1;
6641
        TREE_NOTHROW (decl) = 1;
6642
      }
6643
}
6644
 
6645
 
6646
static void
6647
alpha_init_builtins (void)
6648
{
6649
  tree dimode_integer_type_node;
6650
  tree ftype, decl;
6651
 
6652
  dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6653
 
6654
  /* Fwrite on VMS is non-standard.  */
6655
#if TARGET_ABI_OPEN_VMS
6656
  implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
6657
  implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
6658
#endif
6659
 
6660
  ftype = build_function_type (dimode_integer_type_node, void_list_node);
6661
  alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6662
                      ftype);
6663
 
6664
  ftype = build_function_type_list (dimode_integer_type_node,
6665
                                    dimode_integer_type_node, NULL_TREE);
6666
  alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6667
                      ftype);
6668
 
6669
  ftype = build_function_type_list (dimode_integer_type_node,
6670
                                    dimode_integer_type_node,
6671
                                    dimode_integer_type_node, NULL_TREE);
6672
  alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6673
                      ftype);
6674
 
6675
  ftype = build_function_type (ptr_type_node, void_list_node);
6676
  decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6677
                               ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6678
                               NULL, NULL);
6679
  TREE_NOTHROW (decl) = 1;
6680
 
6681
  ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6682
  decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6683
                               ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6684
                               NULL, NULL);
6685
  TREE_NOTHROW (decl) = 1;
6686
 
6687
  if (TARGET_ABI_OPEN_VMS)
6688
    {
6689
      ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6690
                                        NULL_TREE);
6691
      add_builtin_function ("__builtin_establish_vms_condition_handler", ftype,
6692
                            ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6693
                            BUILT_IN_MD, NULL, NULL_TREE);
6694
 
6695
      ftype = build_function_type_list (ptr_type_node, void_type_node,
6696
                                        NULL_TREE);
6697
      add_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6698
                            ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6699
                             BUILT_IN_MD, NULL, NULL_TREE);
6700
    }
6701
 
6702
  alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6703
  alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6704
  alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6705
  alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6706
}
6707
 
6708
/* Expand an expression EXP that calls a built-in function,
6709
   with result going to TARGET if that's convenient
6710
   (and in mode MODE if that's convenient).
6711
   SUBTARGET may be used as the target for computing one of EXP's operands.
6712
   IGNORE is nonzero if the value is to be ignored.  */
6713
 
6714
static rtx
6715
alpha_expand_builtin (tree exp, rtx target,
6716
                      rtx subtarget ATTRIBUTE_UNUSED,
6717
                      enum machine_mode mode ATTRIBUTE_UNUSED,
6718
                      int ignore ATTRIBUTE_UNUSED)
6719
{
6720
#define MAX_ARGS 2
6721
 
6722
  tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6723
  unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6724
  tree arg;
6725
  call_expr_arg_iterator iter;
6726
  enum insn_code icode;
6727
  rtx op[MAX_ARGS], pat;
6728
  int arity;
6729
  bool nonvoid;
6730
 
6731
  if (fcode >= ALPHA_BUILTIN_max)
6732
    internal_error ("bad builtin fcode");
6733
  icode = code_for_builtin[fcode];
6734
  if (icode == 0)
6735
    internal_error ("bad builtin fcode");
6736
 
6737
  nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6738
 
6739
  arity = 0;
6740
  FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6741
    {
6742
      const struct insn_operand_data *insn_op;
6743
 
6744
      if (arg == error_mark_node)
6745
        return NULL_RTX;
6746
      if (arity > MAX_ARGS)
6747
        return NULL_RTX;
6748
 
6749
      insn_op = &insn_data[icode].operand[arity + nonvoid];
6750
 
6751
      op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6752
 
6753
      if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6754
        op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6755
      arity++;
6756
    }
6757
 
6758
  if (nonvoid)
6759
    {
6760
      enum machine_mode tmode = insn_data[icode].operand[0].mode;
6761
      if (!target
6762
          || GET_MODE (target) != tmode
6763
          || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6764
        target = gen_reg_rtx (tmode);
6765
    }
6766
 
6767
  switch (arity)
6768
    {
6769
    case 0:
6770
      pat = GEN_FCN (icode) (target);
6771
      break;
6772
    case 1:
6773
      if (nonvoid)
6774
        pat = GEN_FCN (icode) (target, op[0]);
6775
      else
6776
        pat = GEN_FCN (icode) (op[0]);
6777
      break;
6778
    case 2:
6779
      pat = GEN_FCN (icode) (target, op[0], op[1]);
6780
      break;
6781
    default:
6782
      gcc_unreachable ();
6783
    }
6784
  if (!pat)
6785
    return NULL_RTX;
6786
  emit_insn (pat);
6787
 
6788
  if (nonvoid)
6789
    return target;
6790
  else
6791
    return const0_rtx;
6792
}
6793
 
6794
 
6795
/* Several bits below assume HWI >= 64 bits.  This should be enforced
6796
   by config.gcc.  */
6797
#if HOST_BITS_PER_WIDE_INT < 64
6798
# error "HOST_WIDE_INT too small"
6799
#endif
6800
 
6801
/* Fold the builtin for the CMPBGE instruction.  This is a vector comparison
6802
   with an 8-bit output vector.  OPINT contains the integer operands; bit N
6803
   of OP_CONST is set if OPINT[N] is valid.  */
6804
 
6805
static tree
6806
alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6807
{
6808
  if (op_const == 3)
6809
    {
6810
      int i, val;
6811
      for (i = 0, val = 0; i < 8; ++i)
6812
        {
6813
          unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6814
          unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6815
          if (c0 >= c1)
6816
            val |= 1 << i;
6817
        }
6818
      return build_int_cst (long_integer_type_node, val);
6819
    }
6820
  else if (op_const == 2 && opint[1] == 0)
6821
    return build_int_cst (long_integer_type_node, 0xff);
6822
  return NULL;
6823
}
6824
 
6825
/* Fold the builtin for the ZAPNOT instruction.  This is essentially a
6826
   specialized form of an AND operation.  Other byte manipulation instructions
6827
   are defined in terms of this instruction, so this is also used as a
6828
   subroutine for other builtins.
6829
 
6830
   OP contains the tree operands; OPINT contains the extracted integer values.
6831
   Bit N of OP_CONST it set if OPINT[N] is valid.  OP may be null if only
6832
   OPINT may be considered.  */
6833
 
6834
static tree
6835
alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6836
                           long op_const)
6837
{
6838
  if (op_const & 2)
6839
    {
6840
      unsigned HOST_WIDE_INT mask = 0;
6841
      int i;
6842
 
6843
      for (i = 0; i < 8; ++i)
6844
        if ((opint[1] >> i) & 1)
6845
          mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6846
 
6847
      if (op_const & 1)
6848
        return build_int_cst (long_integer_type_node, opint[0] & mask);
6849
 
6850
      if (op)
6851
        return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6852
                            build_int_cst (long_integer_type_node, mask));
6853
    }
6854
  else if ((op_const & 1) && opint[0] == 0)
6855
    return build_int_cst (long_integer_type_node, 0);
6856
  return NULL;
6857
}
6858
 
6859
/* Fold the builtins for the EXT family of instructions.  */
6860
 
6861
static tree
6862
alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6863
                          long op_const, unsigned HOST_WIDE_INT bytemask,
6864
                          bool is_high)
6865
{
6866
  long zap_const = 2;
6867
  tree *zap_op = NULL;
6868
 
6869
  if (op_const & 2)
6870
    {
6871
      unsigned HOST_WIDE_INT loc;
6872
 
6873
      loc = opint[1] & 7;
6874
      if (BYTES_BIG_ENDIAN)
6875
        loc ^= 7;
6876
      loc *= 8;
6877
 
6878
      if (loc != 0)
6879
        {
6880
          if (op_const & 1)
6881
            {
6882
              unsigned HOST_WIDE_INT temp = opint[0];
6883
              if (is_high)
6884
                temp <<= loc;
6885
              else
6886
                temp >>= loc;
6887
              opint[0] = temp;
6888
              zap_const = 3;
6889
            }
6890
        }
6891
      else
6892
        zap_op = op;
6893
    }
6894
 
6895
  opint[1] = bytemask;
6896
  return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6897
}
6898
 
6899
/* Fold the builtins for the INS family of instructions.  */
6900
 
6901
static tree
6902
alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6903
                          long op_const, unsigned HOST_WIDE_INT bytemask,
6904
                          bool is_high)
6905
{
6906
  if ((op_const & 1) && opint[0] == 0)
6907
    return build_int_cst (long_integer_type_node, 0);
6908
 
6909
  if (op_const & 2)
6910
    {
6911
      unsigned HOST_WIDE_INT temp, loc, byteloc;
6912
      tree *zap_op = NULL;
6913
 
6914
      loc = opint[1] & 7;
6915
      if (BYTES_BIG_ENDIAN)
6916
        loc ^= 7;
6917
      bytemask <<= loc;
6918
 
6919
      temp = opint[0];
6920
      if (is_high)
6921
        {
6922
          byteloc = (64 - (loc * 8)) & 0x3f;
6923
          if (byteloc == 0)
6924
            zap_op = op;
6925
          else
6926
            temp >>= byteloc;
6927
          bytemask >>= 8;
6928
        }
6929
      else
6930
        {
6931
          byteloc = loc * 8;
6932
          if (byteloc == 0)
6933
            zap_op = op;
6934
          else
6935
            temp <<= byteloc;
6936
        }
6937
 
6938
      opint[0] = temp;
6939
      opint[1] = bytemask;
6940
      return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6941
    }
6942
 
6943
  return NULL;
6944
}
6945
 
6946
static tree
6947
alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6948
                          long op_const, unsigned HOST_WIDE_INT bytemask,
6949
                          bool is_high)
6950
{
6951
  if (op_const & 2)
6952
    {
6953
      unsigned HOST_WIDE_INT loc;
6954
 
6955
      loc = opint[1] & 7;
6956
      if (BYTES_BIG_ENDIAN)
6957
        loc ^= 7;
6958
      bytemask <<= loc;
6959
 
6960
      if (is_high)
6961
        bytemask >>= 8;
6962
 
6963
      opint[1] = bytemask ^ 0xff;
6964
    }
6965
 
6966
  return alpha_fold_builtin_zapnot (op, opint, op_const);
6967
}
6968
 
6969
static tree
6970
alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6971
{
6972
  switch (op_const)
6973
    {
6974
    case 3:
6975
      {
6976
        unsigned HOST_WIDE_INT l;
6977
        HOST_WIDE_INT h;
6978
 
6979
        mul_double (opint[0], 0, opint[1], 0, &l, &h);
6980
 
6981
#if HOST_BITS_PER_WIDE_INT > 64
6982
# error fixme
6983
#endif
6984
 
6985
        return build_int_cst (long_integer_type_node, h);
6986
      }
6987
 
6988
    case 1:
6989
      opint[1] = opint[0];
6990
      /* FALLTHRU */
6991
    case 2:
6992
      /* Note that (X*1) >> 64 == 0.  */
6993
      if (opint[1] == 0 || opint[1] == 1)
6994
        return build_int_cst (long_integer_type_node, 0);
6995
      break;
6996
    }
6997
  return NULL;
6998
}
6999
 
7000
static tree
7001
alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
7002
{
7003
  tree op0 = fold_convert (vtype, op[0]);
7004
  tree op1 = fold_convert (vtype, op[1]);
7005
  tree val = fold_build2 (code, vtype, op0, op1);
7006
  return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
7007
}
7008
 
7009
static tree
7010
alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
7011
{
7012
  unsigned HOST_WIDE_INT temp = 0;
7013
  int i;
7014
 
7015
  if (op_const != 3)
7016
    return NULL;
7017
 
7018
  for (i = 0; i < 8; ++i)
7019
    {
7020
      unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
7021
      unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
7022
      if (a >= b)
7023
        temp += a - b;
7024
      else
7025
        temp += b - a;
7026
    }
7027
 
7028
  return build_int_cst (long_integer_type_node, temp);
7029
}
7030
 
7031
static tree
7032
alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
7033
{
7034
  unsigned HOST_WIDE_INT temp;
7035
 
7036
  if (op_const == 0)
7037
    return NULL;
7038
 
7039
  temp = opint[0] & 0xff;
7040
  temp |= (opint[0] >> 24) & 0xff00;
7041
 
7042
  return build_int_cst (long_integer_type_node, temp);
7043
}
7044
 
7045
static tree
7046
alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
7047
{
7048
  unsigned HOST_WIDE_INT temp;
7049
 
7050
  if (op_const == 0)
7051
    return NULL;
7052
 
7053
  temp = opint[0] & 0xff;
7054
  temp |= (opint[0] >>  8) & 0xff00;
7055
  temp |= (opint[0] >> 16) & 0xff0000;
7056
  temp |= (opint[0] >> 24) & 0xff000000;
7057
 
7058
  return build_int_cst (long_integer_type_node, temp);
7059
}
7060
 
7061
static tree
7062
alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
7063
{
7064
  unsigned HOST_WIDE_INT temp;
7065
 
7066
  if (op_const == 0)
7067
    return NULL;
7068
 
7069
  temp = opint[0] & 0xff;
7070
  temp |= (opint[0] & 0xff00) << 24;
7071
 
7072
  return build_int_cst (long_integer_type_node, temp);
7073
}
7074
 
7075
static tree
7076
alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
7077
{
7078
  unsigned HOST_WIDE_INT temp;
7079
 
7080
  if (op_const == 0)
7081
    return NULL;
7082
 
7083
  temp = opint[0] & 0xff;
7084
  temp |= (opint[0] & 0x0000ff00) << 8;
7085
  temp |= (opint[0] & 0x00ff0000) << 16;
7086
  temp |= (opint[0] & 0xff000000) << 24;
7087
 
7088
  return build_int_cst (long_integer_type_node, temp);
7089
}
7090
 
7091
static tree
7092
alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
7093
{
7094
  unsigned HOST_WIDE_INT temp;
7095
 
7096
  if (op_const == 0)
7097
    return NULL;
7098
 
7099
  if (opint[0] == 0)
7100
    temp = 64;
7101
  else
7102
    temp = exact_log2 (opint[0] & -opint[0]);
7103
 
7104
  return build_int_cst (long_integer_type_node, temp);
7105
}
7106
 
7107
static tree
7108
alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
7109
{
7110
  unsigned HOST_WIDE_INT temp;
7111
 
7112
  if (op_const == 0)
7113
    return NULL;
7114
 
7115
  if (opint[0] == 0)
7116
    temp = 64;
7117
  else
7118
    temp = 64 - floor_log2 (opint[0]) - 1;
7119
 
7120
  return build_int_cst (long_integer_type_node, temp);
7121
}
7122
 
7123
static tree
7124
alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7125
{
7126
  unsigned HOST_WIDE_INT temp, op;
7127
 
7128
  if (op_const == 0)
7129
    return NULL;
7130
 
7131
  op = opint[0];
7132
  temp = 0;
7133
  while (op)
7134
    temp++, op &= op - 1;
7135
 
7136
  return build_int_cst (long_integer_type_node, temp);
7137
}
7138
 
7139
/* Fold one of our builtin functions.  */
7140
 
7141
static tree
7142
alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
7143
{
7144
  tree op[MAX_ARGS], t;
7145
  unsigned HOST_WIDE_INT opint[MAX_ARGS];
7146
  long op_const = 0, arity = 0;
7147
 
7148
  for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
7149
    {
7150
      tree arg = TREE_VALUE (t);
7151
      if (arg == error_mark_node)
7152
        return NULL;
7153
      if (arity >= MAX_ARGS)
7154
        return NULL;
7155
 
7156
      op[arity] = arg;
7157
      opint[arity] = 0;
7158
      if (TREE_CODE (arg) == INTEGER_CST)
7159
        {
7160
          op_const |= 1L << arity;
7161
          opint[arity] = int_cst_value (arg);
7162
        }
7163
    }
7164
 
7165
  switch (DECL_FUNCTION_CODE (fndecl))
7166
    {
7167
    case ALPHA_BUILTIN_CMPBGE:
7168
      return alpha_fold_builtin_cmpbge (opint, op_const);
7169
 
7170
    case ALPHA_BUILTIN_EXTBL:
7171
      return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7172
    case ALPHA_BUILTIN_EXTWL:
7173
      return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7174
    case ALPHA_BUILTIN_EXTLL:
7175
      return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7176
    case ALPHA_BUILTIN_EXTQL:
7177
      return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7178
    case ALPHA_BUILTIN_EXTWH:
7179
      return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7180
    case ALPHA_BUILTIN_EXTLH:
7181
      return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7182
    case ALPHA_BUILTIN_EXTQH:
7183
      return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7184
 
7185
    case ALPHA_BUILTIN_INSBL:
7186
      return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7187
    case ALPHA_BUILTIN_INSWL:
7188
      return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7189
    case ALPHA_BUILTIN_INSLL:
7190
      return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7191
    case ALPHA_BUILTIN_INSQL:
7192
      return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7193
    case ALPHA_BUILTIN_INSWH:
7194
      return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7195
    case ALPHA_BUILTIN_INSLH:
7196
      return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7197
    case ALPHA_BUILTIN_INSQH:
7198
      return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7199
 
7200
    case ALPHA_BUILTIN_MSKBL:
7201
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7202
    case ALPHA_BUILTIN_MSKWL:
7203
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7204
    case ALPHA_BUILTIN_MSKLL:
7205
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7206
    case ALPHA_BUILTIN_MSKQL:
7207
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7208
    case ALPHA_BUILTIN_MSKWH:
7209
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7210
    case ALPHA_BUILTIN_MSKLH:
7211
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7212
    case ALPHA_BUILTIN_MSKQH:
7213
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7214
 
7215
    case ALPHA_BUILTIN_UMULH:
7216
      return alpha_fold_builtin_umulh (opint, op_const);
7217
 
7218
    case ALPHA_BUILTIN_ZAP:
7219
      opint[1] ^= 0xff;
7220
      /* FALLTHRU */
7221
    case ALPHA_BUILTIN_ZAPNOT:
7222
      return alpha_fold_builtin_zapnot (op, opint, op_const);
7223
 
7224
    case ALPHA_BUILTIN_MINUB8:
7225
      return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7226
    case ALPHA_BUILTIN_MINSB8:
7227
      return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7228
    case ALPHA_BUILTIN_MINUW4:
7229
      return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7230
    case ALPHA_BUILTIN_MINSW4:
7231
      return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7232
    case ALPHA_BUILTIN_MAXUB8:
7233
      return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7234
    case ALPHA_BUILTIN_MAXSB8:
7235
      return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7236
    case ALPHA_BUILTIN_MAXUW4:
7237
      return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7238
    case ALPHA_BUILTIN_MAXSW4:
7239
      return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7240
 
7241
    case ALPHA_BUILTIN_PERR:
7242
      return alpha_fold_builtin_perr (opint, op_const);
7243
    case ALPHA_BUILTIN_PKLB:
7244
      return alpha_fold_builtin_pklb (opint, op_const);
7245
    case ALPHA_BUILTIN_PKWB:
7246
      return alpha_fold_builtin_pkwb (opint, op_const);
7247
    case ALPHA_BUILTIN_UNPKBL:
7248
      return alpha_fold_builtin_unpkbl (opint, op_const);
7249
    case ALPHA_BUILTIN_UNPKBW:
7250
      return alpha_fold_builtin_unpkbw (opint, op_const);
7251
 
7252
    case ALPHA_BUILTIN_CTTZ:
7253
      return alpha_fold_builtin_cttz (opint, op_const);
7254
    case ALPHA_BUILTIN_CTLZ:
7255
      return alpha_fold_builtin_ctlz (opint, op_const);
7256
    case ALPHA_BUILTIN_CTPOP:
7257
      return alpha_fold_builtin_ctpop (opint, op_const);
7258
 
7259
    case ALPHA_BUILTIN_AMASK:
7260
    case ALPHA_BUILTIN_IMPLVER:
7261
    case ALPHA_BUILTIN_RPCC:
7262
    case ALPHA_BUILTIN_THREAD_POINTER:
7263
    case ALPHA_BUILTIN_SET_THREAD_POINTER:
7264
      /* None of these are foldable at compile-time.  */
7265
    default:
7266
      return NULL;
7267
    }
7268
}
7269
 
7270
/* This page contains routines that are used to determine what the function
7271
   prologue and epilogue code will do and write them out.  */
7272
 
7273
/* Compute the size of the save area in the stack.  */
7274
 
7275
/* These variables are used for communication between the following functions.
7276
   They indicate various things about the current function being compiled
7277
   that are used to tell what kind of prologue, epilogue and procedure
7278
   descriptor to generate.  */
7279
 
7280
/* Nonzero if we need a stack procedure.  */
7281
enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7282
static enum alpha_procedure_types alpha_procedure_type;
7283
 
7284
/* Register number (either FP or SP) that is used to unwind the frame.  */
7285
static int vms_unwind_regno;
7286
 
7287
/* Register number used to save FP.  We need not have one for RA since
7288
   we don't modify it for register procedures.  This is only defined
7289
   for register frame procedures.  */
7290
static int vms_save_fp_regno;
7291
 
7292
/* Register number used to reference objects off our PV.  */
7293
static int vms_base_regno;
7294
 
7295
/* Compute register masks for saved registers.  */
7296
 
7297
static void
7298
alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7299
{
7300
  unsigned long imask = 0;
7301
  unsigned long fmask = 0;
7302
  unsigned int i;
7303
 
7304
  /* When outputting a thunk, we don't have valid register life info,
7305
     but assemble_start_function wants to output .frame and .mask
7306
     directives.  */
7307
  if (cfun->is_thunk)
7308
    {
7309
      *imaskP = 0;
7310
      *fmaskP = 0;
7311
      return;
7312
    }
7313
 
7314
  if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7315
    imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7316
 
7317
  /* One for every register we have to save.  */
7318
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7319
    if (! fixed_regs[i] && ! call_used_regs[i]
7320
        && df_regs_ever_live_p (i) && i != REG_RA
7321
        && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7322
      {
7323
        if (i < 32)
7324
          imask |= (1UL << i);
7325
        else
7326
          fmask |= (1UL << (i - 32));
7327
      }
7328
 
7329
  /* We need to restore these for the handler.  */
7330
  if (crtl->calls_eh_return)
7331
    {
7332
      for (i = 0; ; ++i)
7333
        {
7334
          unsigned regno = EH_RETURN_DATA_REGNO (i);
7335
          if (regno == INVALID_REGNUM)
7336
            break;
7337
          imask |= 1UL << regno;
7338
        }
7339
    }
7340
 
7341
  /* If any register spilled, then spill the return address also.  */
7342
  /* ??? This is required by the Digital stack unwind specification
7343
     and isn't needed if we're doing Dwarf2 unwinding.  */
7344
  if (imask || fmask || alpha_ra_ever_killed ())
7345
    imask |= (1UL << REG_RA);
7346
 
7347
  *imaskP = imask;
7348
  *fmaskP = fmask;
7349
}
7350
 
7351
int
7352
alpha_sa_size (void)
7353
{
7354
  unsigned long mask[2];
7355
  int sa_size = 0;
7356
  int i, j;
7357
 
7358
  alpha_sa_mask (&mask[0], &mask[1]);
7359
 
7360
  if (TARGET_ABI_UNICOSMK)
7361
    {
7362
      if (mask[0] || mask[1])
7363
        sa_size = 14;
7364
    }
7365
  else
7366
    {
7367
      for (j = 0; j < 2; ++j)
7368
        for (i = 0; i < 32; ++i)
7369
          if ((mask[j] >> i) & 1)
7370
            sa_size++;
7371
    }
7372
 
7373
  if (TARGET_ABI_UNICOSMK)
7374
    {
7375
      /* We might not need to generate a frame if we don't make any calls
7376
         (including calls to __T3E_MISMATCH if this is a vararg function),
7377
         don't have any local variables which require stack slots, don't
7378
         use alloca and have not determined that we need a frame for other
7379
         reasons.  */
7380
 
7381
      alpha_procedure_type
7382
        = (sa_size || get_frame_size() != 0
7383
           || crtl->outgoing_args_size
7384
           || cfun->stdarg || cfun->calls_alloca
7385
           || frame_pointer_needed)
7386
          ? PT_STACK : PT_REGISTER;
7387
 
7388
      /* Always reserve space for saving callee-saved registers if we
7389
         need a frame as required by the calling convention.  */
7390
      if (alpha_procedure_type == PT_STACK)
7391
        sa_size = 14;
7392
    }
7393
  else if (TARGET_ABI_OPEN_VMS)
7394
    {
7395
      /* Start with a stack procedure if we make any calls (REG_RA used), or
7396
         need a frame pointer, with a register procedure if we otherwise need
7397
         at least a slot, and with a null procedure in other cases.  */
7398
      if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7399
        alpha_procedure_type = PT_STACK;
7400
      else if (get_frame_size() != 0)
7401
        alpha_procedure_type = PT_REGISTER;
7402
      else
7403
        alpha_procedure_type = PT_NULL;
7404
 
7405
      /* Don't reserve space for saving FP & RA yet.  Do that later after we've
7406
         made the final decision on stack procedure vs register procedure.  */
7407
      if (alpha_procedure_type == PT_STACK)
7408
        sa_size -= 2;
7409
 
7410
      /* Decide whether to refer to objects off our PV via FP or PV.
7411
         If we need FP for something else or if we receive a nonlocal
7412
         goto (which expects PV to contain the value), we must use PV.
7413
         Otherwise, start by assuming we can use FP.  */
7414
 
7415
      vms_base_regno
7416
        = (frame_pointer_needed
7417
           || cfun->has_nonlocal_label
7418
           || alpha_procedure_type == PT_STACK
7419
           || crtl->outgoing_args_size)
7420
          ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7421
 
7422
      /* If we want to copy PV into FP, we need to find some register
7423
         in which to save FP.  */
7424
 
7425
      vms_save_fp_regno = -1;
7426
      if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7427
        for (i = 0; i < 32; i++)
7428
          if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7429
            vms_save_fp_regno = i;
7430
 
7431
      /* A VMS condition handler requires a stack procedure in our
7432
         implementation. (not required by the calling standard).  */
7433
      if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7434
          || cfun->machine->uses_condition_handler)
7435
        vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7436
      else if (alpha_procedure_type == PT_NULL)
7437
        vms_base_regno = REG_PV;
7438
 
7439
      /* Stack unwinding should be done via FP unless we use it for PV.  */
7440
      vms_unwind_regno = (vms_base_regno == REG_PV
7441
                          ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7442
 
7443
      /* If this is a stack procedure, allow space for saving FP, RA and
7444
         a condition handler slot if needed.  */
7445
      if (alpha_procedure_type == PT_STACK)
7446
        sa_size += 2 + cfun->machine->uses_condition_handler;
7447
    }
7448
  else
7449
    {
7450
      /* Our size must be even (multiple of 16 bytes).  */
7451
      if (sa_size & 1)
7452
        sa_size++;
7453
    }
7454
 
7455
  return sa_size * 8;
7456
}
7457
 
7458
/* Define the offset between two registers, one to be eliminated,
7459
   and the other its replacement, at the start of a routine.  */
7460
 
7461
HOST_WIDE_INT
7462
alpha_initial_elimination_offset (unsigned int from,
7463
                                  unsigned int to ATTRIBUTE_UNUSED)
7464
{
7465
  HOST_WIDE_INT ret;
7466
 
7467
  ret = alpha_sa_size ();
7468
  ret += ALPHA_ROUND (crtl->outgoing_args_size);
7469
 
7470
  switch (from)
7471
    {
7472
    case FRAME_POINTER_REGNUM:
7473
      break;
7474
 
7475
    case ARG_POINTER_REGNUM:
7476
      ret += (ALPHA_ROUND (get_frame_size ()
7477
                           + crtl->args.pretend_args_size)
7478
              - crtl->args.pretend_args_size);
7479
      break;
7480
 
7481
    default:
7482
      gcc_unreachable ();
7483
    }
7484
 
7485
  return ret;
7486
}
7487
 
7488
#if TARGET_ABI_OPEN_VMS
7489
 
7490
/* Worker function for TARGET_CAN_ELIMINATE.  */
7491
 
7492
static bool
7493
alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7494
{
7495
  /* We need the alpha_procedure_type to decide. Evaluate it now.  */
7496
  alpha_sa_size ();
7497
 
7498
  switch (alpha_procedure_type)
7499
    {
7500
    case PT_NULL:
7501
      /* NULL procedures have no frame of their own and we only
7502
         know how to resolve from the current stack pointer.  */
7503
      return to == STACK_POINTER_REGNUM;
7504
 
7505
    case PT_REGISTER:
7506
    case PT_STACK:
7507
      /* We always eliminate except to the stack pointer if there is no
7508
         usable frame pointer at hand.  */
7509
      return (to != STACK_POINTER_REGNUM
7510
              || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7511
    }
7512
 
7513
  gcc_unreachable ();
7514
}
7515
 
7516
/* FROM is to be eliminated for TO. Return the offset so that TO+offset
7517
   designates the same location as FROM.  */
7518
 
7519
HOST_WIDE_INT
7520
alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7521
{
7522
  /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7523
     HARD_FRAME or STACK_PTR.  We need the alpha_procedure_type to decide
7524
     on the proper computations and will need the register save area size
7525
     in most cases.  */
7526
 
7527
  HOST_WIDE_INT sa_size = alpha_sa_size ();
7528
 
7529
  /* PT_NULL procedures have no frame of their own and we only allow
7530
     elimination to the stack pointer. This is the argument pointer and we
7531
     resolve the soft frame pointer to that as well.  */
7532
 
7533
  if (alpha_procedure_type == PT_NULL)
7534
    return 0;
7535
 
7536
  /* For a PT_STACK procedure the frame layout looks as follows
7537
 
7538
                      -----> decreasing addresses
7539
 
7540
                   <             size rounded up to 16       |   likewise   >
7541
     --------------#------------------------------+++--------------+++-------#
7542
     incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7543
     --------------#---------------------------------------------------------#
7544
                                   ^         ^              ^               ^
7545
                              ARG_PTR FRAME_PTR HARD_FRAME_PTR       STACK_PTR
7546
 
7547
 
7548
     PT_REGISTER procedures are similar in that they may have a frame of their
7549
     own. They have no regs-sa/pv/outgoing-args area.
7550
 
7551
     We first compute offset to HARD_FRAME_PTR, then add what we need to get
7552
     to STACK_PTR if need be.  */
7553
 
7554
  {
7555
    HOST_WIDE_INT offset;
7556
    HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7557
 
7558
    switch (from)
7559
      {
7560
      case FRAME_POINTER_REGNUM:
7561
        offset = ALPHA_ROUND (sa_size + pv_save_size);
7562
        break;
7563
      case ARG_POINTER_REGNUM:
7564
        offset = (ALPHA_ROUND (sa_size + pv_save_size
7565
                               + get_frame_size ()
7566
                               + crtl->args.pretend_args_size)
7567
                  - crtl->args.pretend_args_size);
7568
        break;
7569
      default:
7570
        gcc_unreachable ();
7571
      }
7572
 
7573
    if (to == STACK_POINTER_REGNUM)
7574
      offset += ALPHA_ROUND (crtl->outgoing_args_size);
7575
 
7576
    return offset;
7577
  }
7578
}
7579
 
7580
#define COMMON_OBJECT "common_object"
7581
 
7582
static tree
7583
common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7584
                       tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7585
                       bool *no_add_attrs ATTRIBUTE_UNUSED)
7586
{
7587
  tree decl = *node;
7588
  gcc_assert (DECL_P (decl));
7589
 
7590
  DECL_COMMON (decl) = 1;
7591
  return NULL_TREE;
7592
}
7593
 
7594
static const struct attribute_spec vms_attribute_table[] =
7595
{
7596
  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7597
  { COMMON_OBJECT,   0, 1, true,  false, false, common_object_handler },
7598
  { NULL,            0, 0, false, false, false, NULL }
7599
};
7600
 
7601
void
7602
vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7603
                               unsigned HOST_WIDE_INT size,
7604
                               unsigned int align)
7605
{
7606
  tree attr = DECL_ATTRIBUTES (decl);
7607
  fprintf (file, "%s", COMMON_ASM_OP);
7608
  assemble_name (file, name);
7609
  fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7610
  /* ??? Unlike on OSF/1, the alignment factor is not in log units.  */
7611
  fprintf (file, ",%u", align / BITS_PER_UNIT);
7612
  if (attr)
7613
    {
7614
      attr = lookup_attribute (COMMON_OBJECT, attr);
7615
      if (attr)
7616
        fprintf (file, ",%s",
7617
                 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7618
    }
7619
  fputc ('\n', file);
7620
}
7621
 
7622
#undef COMMON_OBJECT
7623
 
7624
#endif
7625
 
7626
static int
7627
find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7628
{
7629
  return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7630
}
7631
 
7632
int
7633
alpha_find_lo_sum_using_gp (rtx insn)
7634
{
7635
  return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7636
}
7637
 
7638
static int
7639
alpha_does_function_need_gp (void)
7640
{
7641
  rtx insn;
7642
 
7643
  /* The GP being variable is an OSF abi thing.  */
7644
  if (! TARGET_ABI_OSF)
7645
    return 0;
7646
 
7647
  /* We need the gp to load the address of __mcount.  */
7648
  if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7649
    return 1;
7650
 
7651
  /* The code emitted by alpha_output_mi_thunk_osf uses the gp.  */
7652
  if (cfun->is_thunk)
7653
    return 1;
7654
 
7655
  /* The nonlocal receiver pattern assumes that the gp is valid for
7656
     the nested function.  Reasonable because it's almost always set
7657
     correctly already.  For the cases where that's wrong, make sure
7658
     the nested function loads its gp on entry.  */
7659
  if (crtl->has_nonlocal_goto)
7660
    return 1;
7661
 
7662
  /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7663
     Even if we are a static function, we still need to do this in case
7664
     our address is taken and passed to something like qsort.  */
7665
 
7666
  push_topmost_sequence ();
7667
  insn = get_insns ();
7668
  pop_topmost_sequence ();
7669
 
7670
  for (; insn; insn = NEXT_INSN (insn))
7671
    if (NONDEBUG_INSN_P (insn)
7672
        && ! JUMP_TABLE_DATA_P (insn)
7673
        && GET_CODE (PATTERN (insn)) != USE
7674
        && GET_CODE (PATTERN (insn)) != CLOBBER
7675
        && get_attr_usegp (insn))
7676
      return 1;
7677
 
7678
  return 0;
7679
}
7680
 
7681
 
7682
/* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7683
   sequences.  */
7684
 
7685
static rtx
7686
set_frame_related_p (void)
7687
{
7688
  rtx seq = get_insns ();
7689
  rtx insn;
7690
 
7691
  end_sequence ();
7692
 
7693
  if (!seq)
7694
    return NULL_RTX;
7695
 
7696
  if (INSN_P (seq))
7697
    {
7698
      insn = seq;
7699
      while (insn != NULL_RTX)
7700
        {
7701
          RTX_FRAME_RELATED_P (insn) = 1;
7702
          insn = NEXT_INSN (insn);
7703
        }
7704
      seq = emit_insn (seq);
7705
    }
7706
  else
7707
    {
7708
      seq = emit_insn (seq);
7709
      RTX_FRAME_RELATED_P (seq) = 1;
7710
    }
7711
  return seq;
7712
}
7713
 
7714
#define FRP(exp)  (start_sequence (), exp, set_frame_related_p ())
7715
 
7716
/* Generates a store with the proper unwind info attached.  VALUE is
7717
   stored at BASE_REG+BASE_OFS.  If FRAME_BIAS is nonzero, then BASE_REG
7718
   contains SP+FRAME_BIAS, and that is the unwind info that should be
7719
   generated.  If FRAME_REG != VALUE, then VALUE is being stored on
7720
   behalf of FRAME_REG, and FRAME_REG should be present in the unwind.  */
7721
 
7722
static void
7723
emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7724
                    HOST_WIDE_INT base_ofs, rtx frame_reg)
7725
{
7726
  rtx addr, mem, insn;
7727
 
7728
  addr = plus_constant (base_reg, base_ofs);
7729
  mem = gen_rtx_MEM (DImode, addr);
7730
  set_mem_alias_set (mem, alpha_sr_alias_set);
7731
 
7732
  insn = emit_move_insn (mem, value);
7733
  RTX_FRAME_RELATED_P (insn) = 1;
7734
 
7735
  if (frame_bias || value != frame_reg)
7736
    {
7737
      if (frame_bias)
7738
        {
7739
          addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7740
          mem = gen_rtx_MEM (DImode, addr);
7741
        }
7742
 
7743
      add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7744
                    gen_rtx_SET (VOIDmode, mem, frame_reg));
7745
    }
7746
}
7747
 
7748
static void
7749
emit_frame_store (unsigned int regno, rtx base_reg,
7750
                  HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7751
{
7752
  rtx reg = gen_rtx_REG (DImode, regno);
7753
  emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7754
}
7755
 
7756
/* Write function prologue.  */
7757
 
7758
/* On vms we have two kinds of functions:
7759
 
7760
   - stack frame (PROC_STACK)
7761
        these are 'normal' functions with local vars and which are
7762
        calling other functions
7763
   - register frame (PROC_REGISTER)
7764
        keeps all data in registers, needs no stack
7765
 
7766
   We must pass this to the assembler so it can generate the
7767
   proper pdsc (procedure descriptor)
7768
   This is done with the '.pdesc' command.
7769
 
7770
   On not-vms, we don't really differentiate between the two, as we can
7771
   simply allocate stack without saving registers.  */
7772
 
7773
void
7774
alpha_expand_prologue (void)
7775
{
7776
  /* Registers to save.  */
7777
  unsigned long imask = 0;
7778
  unsigned long fmask = 0;
7779
  /* Stack space needed for pushing registers clobbered by us.  */
7780
  HOST_WIDE_INT sa_size;
7781
  /* Complete stack size needed.  */
7782
  HOST_WIDE_INT frame_size;
7783
  /* Offset from base reg to register save area.  */
7784
  HOST_WIDE_INT reg_offset;
7785
  rtx sa_reg;
7786
  int i;
7787
 
7788
  sa_size = alpha_sa_size ();
7789
 
7790
  frame_size = get_frame_size ();
7791
  if (TARGET_ABI_OPEN_VMS)
7792
    frame_size = ALPHA_ROUND (sa_size
7793
                              + (alpha_procedure_type == PT_STACK ? 8 : 0)
7794
                              + frame_size
7795
                              + crtl->args.pretend_args_size);
7796
  else if (TARGET_ABI_UNICOSMK)
7797
    /* We have to allocate space for the DSIB if we generate a frame.  */
7798
    frame_size = ALPHA_ROUND (sa_size
7799
                              + (alpha_procedure_type == PT_STACK ? 48 : 0))
7800
                 + ALPHA_ROUND (frame_size
7801
                                + crtl->outgoing_args_size);
7802
  else
7803
    frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7804
                  + sa_size
7805
                  + ALPHA_ROUND (frame_size
7806
                                 + crtl->args.pretend_args_size));
7807
 
7808
  if (TARGET_ABI_OPEN_VMS)
7809
    reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7810
  else
7811
    reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7812
 
7813
  alpha_sa_mask (&imask, &fmask);
7814
 
7815
  /* Emit an insn to reload GP, if needed.  */
7816
  if (TARGET_ABI_OSF)
7817
    {
7818
      alpha_function_needs_gp = alpha_does_function_need_gp ();
7819
      if (alpha_function_needs_gp)
7820
        emit_insn (gen_prologue_ldgp ());
7821
    }
7822
 
7823
  /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7824
     the call to mcount ourselves, rather than having the linker do it
7825
     magically in response to -pg.  Since _mcount has special linkage,
7826
     don't represent the call as a call.  */
7827
  if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7828
    emit_insn (gen_prologue_mcount ());
7829
 
7830
  if (TARGET_ABI_UNICOSMK)
7831
    unicosmk_gen_dsib (&imask);
7832
 
7833
  /* Adjust the stack by the frame size.  If the frame size is > 4096
7834
     bytes, we need to be sure we probe somewhere in the first and last
7835
     4096 bytes (we can probably get away without the latter test) and
7836
     every 8192 bytes in between.  If the frame size is > 32768, we
7837
     do this in a loop.  Otherwise, we generate the explicit probe
7838
     instructions.
7839
 
7840
     Note that we are only allowed to adjust sp once in the prologue.  */
7841
 
7842
  if (frame_size <= 32768)
7843
    {
7844
      if (frame_size > 4096)
7845
        {
7846
          int probed;
7847
 
7848
          for (probed = 4096; probed < frame_size; probed += 8192)
7849
            emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7850
                                                 ? -probed + 64
7851
                                                 : -probed)));
7852
 
7853
          /* We only have to do this probe if we aren't saving registers.  */
7854
          if (sa_size == 0 && frame_size > probed - 4096)
7855
            emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7856
        }
7857
 
7858
      if (frame_size != 0)
7859
        FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7860
                                    GEN_INT (TARGET_ABI_UNICOSMK
7861
                                             ? -frame_size + 64
7862
                                             : -frame_size))));
7863
    }
7864
  else
7865
    {
7866
      /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7867
         number of 8192 byte blocks to probe.  We then probe each block
7868
         in the loop and then set SP to the proper location.  If the
7869
         amount remaining is > 4096, we have to do one more probe if we
7870
         are not saving any registers.  */
7871
 
7872
      HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7873
      HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7874
      rtx ptr = gen_rtx_REG (DImode, 22);
7875
      rtx count = gen_rtx_REG (DImode, 23);
7876
      rtx seq;
7877
 
7878
      emit_move_insn (count, GEN_INT (blocks));
7879
      emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7880
                             GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7881
 
7882
      /* Because of the difficulty in emitting a new basic block this
7883
         late in the compilation, generate the loop as a single insn.  */
7884
      emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7885
 
7886
      if (leftover > 4096 && sa_size == 0)
7887
        {
7888
          rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7889
          MEM_VOLATILE_P (last) = 1;
7890
          emit_move_insn (last, const0_rtx);
7891
        }
7892
 
7893
      if (TARGET_ABI_WINDOWS_NT)
7894
        {
7895
          /* For NT stack unwind (done by 'reverse execution'), it's
7896
             not OK to take the result of a loop, even though the value
7897
             is already in ptr, so we reload it via a single operation
7898
             and subtract it to sp.
7899
 
7900
             Yes, that's correct -- we have to reload the whole constant
7901
             into a temporary via ldah+lda then subtract from sp.  */
7902
 
7903
          HOST_WIDE_INT lo, hi;
7904
          lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7905
          hi = frame_size - lo;
7906
 
7907
          emit_move_insn (ptr, GEN_INT (hi));
7908
          emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7909
          seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7910
                                       ptr));
7911
        }
7912
      else
7913
        {
7914
          seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7915
                                       GEN_INT (-leftover)));
7916
        }
7917
 
7918
      /* This alternative is special, because the DWARF code cannot
7919
         possibly intuit through the loop above.  So we invent this
7920
         note it looks at instead.  */
7921
      RTX_FRAME_RELATED_P (seq) = 1;
7922
      add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7923
                    gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7924
                                 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7925
                                               GEN_INT (TARGET_ABI_UNICOSMK
7926
                                                        ? -frame_size + 64
7927
                                                        : -frame_size))));
7928
    }
7929
 
7930
  if (!TARGET_ABI_UNICOSMK)
7931
    {
7932
      HOST_WIDE_INT sa_bias = 0;
7933
 
7934
      /* Cope with very large offsets to the register save area.  */
7935
      sa_reg = stack_pointer_rtx;
7936
      if (reg_offset + sa_size > 0x8000)
7937
        {
7938
          int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7939
          rtx sa_bias_rtx;
7940
 
7941
          if (low + sa_size <= 0x8000)
7942
            sa_bias = reg_offset - low, reg_offset = low;
7943
          else
7944
            sa_bias = reg_offset, reg_offset = 0;
7945
 
7946
          sa_reg = gen_rtx_REG (DImode, 24);
7947
          sa_bias_rtx = GEN_INT (sa_bias);
7948
 
7949
          if (add_operand (sa_bias_rtx, DImode))
7950
            emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7951
          else
7952
            {
7953
              emit_move_insn (sa_reg, sa_bias_rtx);
7954
              emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7955
            }
7956
        }
7957
 
7958
      /* Save regs in stack order.  Beginning with VMS PV.  */
7959
      if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7960
        emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7961
 
7962
      /* Save register RA next.  */
7963
      if (imask & (1UL << REG_RA))
7964
        {
7965
          emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7966
          imask &= ~(1UL << REG_RA);
7967
          reg_offset += 8;
7968
        }
7969
 
7970
      /* Now save any other registers required to be saved.  */
7971
      for (i = 0; i < 31; i++)
7972
        if (imask & (1UL << i))
7973
          {
7974
            emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7975
            reg_offset += 8;
7976
          }
7977
 
7978
      for (i = 0; i < 31; i++)
7979
        if (fmask & (1UL << i))
7980
          {
7981
            emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7982
            reg_offset += 8;
7983
          }
7984
    }
7985
  else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7986
    {
7987
      /* The standard frame on the T3E includes space for saving registers.
7988
         We just have to use it. We don't have to save the return address and
7989
         the old frame pointer here - they are saved in the DSIB.  */
7990
 
7991
      reg_offset = -56;
7992
      for (i = 9; i < 15; i++)
7993
        if (imask & (1UL << i))
7994
          {
7995
            emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7996
            reg_offset -= 8;
7997
          }
7998
      for (i = 2; i < 10; i++)
7999
        if (fmask & (1UL << i))
8000
          {
8001
            emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
8002
            reg_offset -= 8;
8003
          }
8004
    }
8005
 
8006
  if (TARGET_ABI_OPEN_VMS)
8007
    {
8008
      /* Register frame procedures save the fp.  */
8009
      if (alpha_procedure_type == PT_REGISTER)
8010
        {
8011
          rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
8012
                                     hard_frame_pointer_rtx);
8013
          add_reg_note (insn, REG_CFA_REGISTER, NULL);
8014
          RTX_FRAME_RELATED_P (insn) = 1;
8015
        }
8016
 
8017
      if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
8018
        emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
8019
                                    gen_rtx_REG (DImode, REG_PV)));
8020
 
8021
      if (alpha_procedure_type != PT_NULL
8022
          && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8023
        FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8024
 
8025
      /* If we have to allocate space for outgoing args, do it now.  */
8026
      if (crtl->outgoing_args_size != 0)
8027
        {
8028
          rtx seq
8029
            = emit_move_insn (stack_pointer_rtx,
8030
                              plus_constant
8031
                              (hard_frame_pointer_rtx,
8032
                               - (ALPHA_ROUND
8033
                                  (crtl->outgoing_args_size))));
8034
 
8035
          /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
8036
             if ! frame_pointer_needed. Setting the bit will change the CFA
8037
             computation rule to use sp again, which would be wrong if we had
8038
             frame_pointer_needed, as this means sp might move unpredictably
8039
             later on.
8040
 
8041
             Also, note that
8042
               frame_pointer_needed
8043
               => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8044
             and
8045
               crtl->outgoing_args_size != 0
8046
               => alpha_procedure_type != PT_NULL,
8047
 
8048
             so when we are not setting the bit here, we are guaranteed to
8049
             have emitted an FRP frame pointer update just before.  */
8050
          RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
8051
        }
8052
    }
8053
  else if (!TARGET_ABI_UNICOSMK)
8054
    {
8055
      /* If we need a frame pointer, set it from the stack pointer.  */
8056
      if (frame_pointer_needed)
8057
        {
8058
          if (TARGET_CAN_FAULT_IN_PROLOGUE)
8059
            FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8060
          else
8061
            /* This must always be the last instruction in the
8062
               prologue, thus we emit a special move + clobber.  */
8063
              FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
8064
                                           stack_pointer_rtx, sa_reg)));
8065
        }
8066
    }
8067
 
8068
  /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
8069
     the prologue, for exception handling reasons, we cannot do this for
8070
     any insn that might fault.  We could prevent this for mems with a
8071
     (clobber:BLK (scratch)), but this doesn't work for fp insns.  So we
8072
     have to prevent all such scheduling with a blockage.
8073
 
8074
     Linux, on the other hand, never bothered to implement OSF/1's
8075
     exception handling, and so doesn't care about such things.  Anyone
8076
     planning to use dwarf2 frame-unwind info can also omit the blockage.  */
8077
 
8078
  if (! TARGET_CAN_FAULT_IN_PROLOGUE)
8079
    emit_insn (gen_blockage ());
8080
}
8081
 
8082
/* Count the number of .file directives, so that .loc is up to date.  */
8083
int num_source_filenames = 0;
8084
 
8085
/* Output the textual info surrounding the prologue.  */
8086
 
8087
void
8088
alpha_start_function (FILE *file, const char *fnname,
8089
                      tree decl ATTRIBUTE_UNUSED)
8090
{
8091
  unsigned long imask = 0;
8092
  unsigned long fmask = 0;
8093
  /* Stack space needed for pushing registers clobbered by us.  */
8094
  HOST_WIDE_INT sa_size;
8095
  /* Complete stack size needed.  */
8096
  unsigned HOST_WIDE_INT frame_size;
8097
  /* The maximum debuggable frame size (512 Kbytes using Tru64 as).  */
8098
  unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
8099
                                          ? 524288
8100
                                          : 1UL << 31;
8101
  /* Offset from base reg to register save area.  */
8102
  HOST_WIDE_INT reg_offset;
8103
  char *entry_label = (char *) alloca (strlen (fnname) + 6);
8104
  char *tramp_label = (char *) alloca (strlen (fnname) + 6);
8105
  int i;
8106
 
8107
  /* Don't emit an extern directive for functions defined in the same file.  */
8108
  if (TARGET_ABI_UNICOSMK)
8109
    {
8110
      tree name_tree;
8111
      name_tree = get_identifier (fnname);
8112
      TREE_ASM_WRITTEN (name_tree) = 1;
8113
    }
8114
 
8115
#if TARGET_ABI_OPEN_VMS
8116
  if (vms_debug_main
8117
      && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
8118
    {
8119
      targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
8120
      ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
8121
      switch_to_section (text_section);
8122
      vms_debug_main = NULL;
8123
    }
8124
#endif
8125
 
8126
  alpha_fnname = fnname;
8127
  sa_size = alpha_sa_size ();
8128
 
8129
  frame_size = get_frame_size ();
8130
  if (TARGET_ABI_OPEN_VMS)
8131
    frame_size = ALPHA_ROUND (sa_size
8132
                              + (alpha_procedure_type == PT_STACK ? 8 : 0)
8133
                              + frame_size
8134
                              + crtl->args.pretend_args_size);
8135
  else if (TARGET_ABI_UNICOSMK)
8136
    frame_size = ALPHA_ROUND (sa_size
8137
                              + (alpha_procedure_type == PT_STACK ? 48 : 0))
8138
                 + ALPHA_ROUND (frame_size
8139
                              + crtl->outgoing_args_size);
8140
  else
8141
    frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
8142
                  + sa_size
8143
                  + ALPHA_ROUND (frame_size
8144
                                 + crtl->args.pretend_args_size));
8145
 
8146
  if (TARGET_ABI_OPEN_VMS)
8147
    reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8148
  else
8149
    reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8150
 
8151
  alpha_sa_mask (&imask, &fmask);
8152
 
8153
  /* Ecoff can handle multiple .file directives, so put out file and lineno.
8154
     We have to do that before the .ent directive as we cannot switch
8155
     files within procedures with native ecoff because line numbers are
8156
     linked to procedure descriptors.
8157
     Outputting the lineno helps debugging of one line functions as they
8158
     would otherwise get no line number at all. Please note that we would
8159
     like to put out last_linenum from final.c, but it is not accessible.  */
8160
 
8161
  if (write_symbols == SDB_DEBUG)
8162
    {
8163
#ifdef ASM_OUTPUT_SOURCE_FILENAME
8164
      ASM_OUTPUT_SOURCE_FILENAME (file,
8165
                                  DECL_SOURCE_FILE (current_function_decl));
8166
#endif
8167
#ifdef SDB_OUTPUT_SOURCE_LINE
8168
      if (debug_info_level != DINFO_LEVEL_TERSE)
8169
        SDB_OUTPUT_SOURCE_LINE (file,
8170
                                DECL_SOURCE_LINE (current_function_decl));
8171
#endif
8172
    }
8173
 
8174
  /* Issue function start and label.  */
8175
  if (TARGET_ABI_OPEN_VMS
8176
      || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
8177
    {
8178
      fputs ("\t.ent ", file);
8179
      assemble_name (file, fnname);
8180
      putc ('\n', file);
8181
 
8182
      /* If the function needs GP, we'll write the "..ng" label there.
8183
         Otherwise, do it here.  */
8184
      if (TARGET_ABI_OSF
8185
          && ! alpha_function_needs_gp
8186
          && ! cfun->is_thunk)
8187
        {
8188
          putc ('$', file);
8189
          assemble_name (file, fnname);
8190
          fputs ("..ng:\n", file);
8191
        }
8192
    }
8193
  /* Nested functions on VMS that are potentially called via trampoline
8194
     get a special transfer entry point that loads the called functions
8195
     procedure descriptor and static chain.  */
8196
   if (TARGET_ABI_OPEN_VMS
8197
       && !TREE_PUBLIC (decl)
8198
       && DECL_CONTEXT (decl)
8199
       && !TYPE_P (DECL_CONTEXT (decl)))
8200
     {
8201
        strcpy (tramp_label, fnname);
8202
        strcat (tramp_label, "..tr");
8203
        ASM_OUTPUT_LABEL (file, tramp_label);
8204
        fprintf (file, "\tldq $1,24($27)\n");
8205
        fprintf (file, "\tldq $27,16($27)\n");
8206
     }
8207
 
8208
  strcpy (entry_label, fnname);
8209
  if (TARGET_ABI_OPEN_VMS)
8210
    strcat (entry_label, "..en");
8211
 
8212
  /* For public functions, the label must be globalized by appending an
8213
     additional colon.  */
8214
  if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
8215
    strcat (entry_label, ":");
8216
 
8217
  ASM_OUTPUT_LABEL (file, entry_label);
8218
  inside_function = TRUE;
8219
 
8220
  if (TARGET_ABI_OPEN_VMS)
8221
    fprintf (file, "\t.base $%d\n", vms_base_regno);
8222
 
8223
  if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
8224
      && !flag_inhibit_size_directive)
8225
    {
8226
      /* Set flags in procedure descriptor to request IEEE-conformant
8227
         math-library routines.  The value we set it to is PDSC_EXC_IEEE
8228
         (/usr/include/pdsc.h).  */
8229
      fputs ("\t.eflag 48\n", file);
8230
    }
8231
 
8232
  /* Set up offsets to alpha virtual arg/local debugging pointer.  */
8233
  alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
8234
  alpha_arg_offset = -frame_size + 48;
8235
 
8236
  /* Describe our frame.  If the frame size is larger than an integer,
8237
     print it as zero to avoid an assembler error.  We won't be
8238
     properly describing such a frame, but that's the best we can do.  */
8239
  if (TARGET_ABI_UNICOSMK)
8240
    ;
8241
  else if (TARGET_ABI_OPEN_VMS)
8242
    fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8243
             HOST_WIDE_INT_PRINT_DEC "\n",
8244
             vms_unwind_regno,
8245
             frame_size >= (1UL << 31) ? 0 : frame_size,
8246
             reg_offset);
8247
  else if (!flag_inhibit_size_directive)
8248
    fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8249
             (frame_pointer_needed
8250
              ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8251
             frame_size >= max_frame_size ? 0 : frame_size,
8252
             crtl->args.pretend_args_size);
8253
 
8254
  /* Describe which registers were spilled.  */
8255
  if (TARGET_ABI_UNICOSMK)
8256
    ;
8257
  else if (TARGET_ABI_OPEN_VMS)
8258
    {
8259
      if (imask)
8260
        /* ??? Does VMS care if mask contains ra?  The old code didn't
8261
           set it, so I don't here.  */
8262
        fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8263
      if (fmask)
8264
        fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8265
      if (alpha_procedure_type == PT_REGISTER)
8266
        fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8267
    }
8268
  else if (!flag_inhibit_size_directive)
8269
    {
8270
      if (imask)
8271
        {
8272
          fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8273
                   frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8274
 
8275
          for (i = 0; i < 32; ++i)
8276
            if (imask & (1UL << i))
8277
              reg_offset += 8;
8278
        }
8279
 
8280
      if (fmask)
8281
        fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8282
                 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8283
    }
8284
 
8285
#if TARGET_ABI_OPEN_VMS
8286
  /* If a user condition handler has been installed at some point, emit
8287
     the procedure descriptor bits to point the Condition Handling Facility
8288
     at the indirection wrapper, and state the fp offset at which the user
8289
     handler may be found.  */
8290
  if (cfun->machine->uses_condition_handler)
8291
    {
8292
      fprintf (file, "\t.handler __gcc_shell_handler\n");
8293
      fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8294
    }
8295
 
8296
  /* Ifdef'ed cause link_section are only available then.  */
8297
  switch_to_section (readonly_data_section);
8298
  fprintf (file, "\t.align 3\n");
8299
  assemble_name (file, fnname); fputs ("..na:\n", file);
8300
  fputs ("\t.ascii \"", file);
8301
  assemble_name (file, fnname);
8302
  fputs ("\\0\"\n", file);
8303
  alpha_need_linkage (fnname, 1);
8304
  switch_to_section (text_section);
8305
#endif
8306
}
8307
 
8308
/* Emit the .prologue note at the scheduled end of the prologue.  */
8309
 
8310
static void
8311
alpha_output_function_end_prologue (FILE *file)
8312
{
8313
  if (TARGET_ABI_UNICOSMK)
8314
    ;
8315
  else if (TARGET_ABI_OPEN_VMS)
8316
    fputs ("\t.prologue\n", file);
8317
  else if (TARGET_ABI_WINDOWS_NT)
8318
    fputs ("\t.prologue 0\n", file);
8319
  else if (!flag_inhibit_size_directive)
8320
    fprintf (file, "\t.prologue %d\n",
8321
             alpha_function_needs_gp || cfun->is_thunk);
8322
}
8323
 
8324
/* Write function epilogue.  */
8325
 
8326
void
8327
alpha_expand_epilogue (void)
8328
{
8329
  /* Registers to save.  */
8330
  unsigned long imask = 0;
8331
  unsigned long fmask = 0;
8332
  /* Stack space needed for pushing registers clobbered by us.  */
8333
  HOST_WIDE_INT sa_size;
8334
  /* Complete stack size needed.  */
8335
  HOST_WIDE_INT frame_size;
8336
  /* Offset from base reg to register save area.  */
8337
  HOST_WIDE_INT reg_offset;
8338
  int fp_is_frame_pointer, fp_offset;
8339
  rtx sa_reg, sa_reg_exp = NULL;
8340
  rtx sp_adj1, sp_adj2, mem, reg, insn;
8341
  rtx eh_ofs;
8342
  rtx cfa_restores = NULL_RTX;
8343
  int i;
8344
 
8345
  sa_size = alpha_sa_size ();
8346
 
8347
  frame_size = get_frame_size ();
8348
  if (TARGET_ABI_OPEN_VMS)
8349
    frame_size = ALPHA_ROUND (sa_size
8350
                              + (alpha_procedure_type == PT_STACK ? 8 : 0)
8351
                              + frame_size
8352
                              + crtl->args.pretend_args_size);
8353
  else if (TARGET_ABI_UNICOSMK)
8354
    frame_size = ALPHA_ROUND (sa_size
8355
                              + (alpha_procedure_type == PT_STACK ? 48 : 0))
8356
                 + ALPHA_ROUND (frame_size
8357
                              + crtl->outgoing_args_size);
8358
  else
8359
    frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
8360
                  + sa_size
8361
                  + ALPHA_ROUND (frame_size
8362
                                 + crtl->args.pretend_args_size));
8363
 
8364
  if (TARGET_ABI_OPEN_VMS)
8365
    {
8366
       if (alpha_procedure_type == PT_STACK)
8367
          reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8368
       else
8369
          reg_offset = 0;
8370
    }
8371
  else
8372
    reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8373
 
8374
  alpha_sa_mask (&imask, &fmask);
8375
 
8376
  fp_is_frame_pointer
8377
    = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8378
       || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8379
  fp_offset = 0;
8380
  sa_reg = stack_pointer_rtx;
8381
 
8382
  if (crtl->calls_eh_return)
8383
    eh_ofs = EH_RETURN_STACKADJ_RTX;
8384
  else
8385
    eh_ofs = NULL_RTX;
8386
 
8387
  if (!TARGET_ABI_UNICOSMK && sa_size)
8388
    {
8389
      /* If we have a frame pointer, restore SP from it.  */
8390
      if ((TARGET_ABI_OPEN_VMS
8391
           && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8392
          || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8393
        emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8394
 
8395
      /* Cope with very large offsets to the register save area.  */
8396
      if (reg_offset + sa_size > 0x8000)
8397
        {
8398
          int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8399
          HOST_WIDE_INT bias;
8400
 
8401
          if (low + sa_size <= 0x8000)
8402
            bias = reg_offset - low, reg_offset = low;
8403
          else
8404
            bias = reg_offset, reg_offset = 0;
8405
 
8406
          sa_reg = gen_rtx_REG (DImode, 22);
8407
          sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8408
 
8409
          emit_move_insn (sa_reg, sa_reg_exp);
8410
        }
8411
 
8412
      /* Restore registers in order, excepting a true frame pointer.  */
8413
 
8414
      mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8415
      if (! eh_ofs)
8416
        set_mem_alias_set (mem, alpha_sr_alias_set);
8417
      reg = gen_rtx_REG (DImode, REG_RA);
8418
      emit_move_insn (reg, mem);
8419
      cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8420
 
8421
      reg_offset += 8;
8422
      imask &= ~(1UL << REG_RA);
8423
 
8424
      for (i = 0; i < 31; ++i)
8425
        if (imask & (1UL << i))
8426
          {
8427
            if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8428
              fp_offset = reg_offset;
8429
            else
8430
              {
8431
                mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8432
                set_mem_alias_set (mem, alpha_sr_alias_set);
8433
                reg = gen_rtx_REG (DImode, i);
8434
                emit_move_insn (reg, mem);
8435
                cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8436
                                               cfa_restores);
8437
              }
8438
            reg_offset += 8;
8439
          }
8440
 
8441
      for (i = 0; i < 31; ++i)
8442
        if (fmask & (1UL << i))
8443
          {
8444
            mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8445
            set_mem_alias_set (mem, alpha_sr_alias_set);
8446
            reg = gen_rtx_REG (DFmode, i+32);
8447
            emit_move_insn (reg, mem);
8448
            cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8449
            reg_offset += 8;
8450
          }
8451
    }
8452
  else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8453
    {
8454
      /* Restore callee-saved general-purpose registers.  */
8455
 
8456
      reg_offset = -56;
8457
 
8458
      for (i = 9; i < 15; i++)
8459
        if (imask & (1UL << i))
8460
          {
8461
            mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8462
                                                     reg_offset));
8463
            set_mem_alias_set (mem, alpha_sr_alias_set);
8464
            reg = gen_rtx_REG (DImode, i);
8465
            emit_move_insn (reg, mem);
8466
            cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8467
            reg_offset -= 8;
8468
          }
8469
 
8470
      for (i = 2; i < 10; i++)
8471
        if (fmask & (1UL << i))
8472
          {
8473
            mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8474
                                                     reg_offset));
8475
            set_mem_alias_set (mem, alpha_sr_alias_set);
8476
            reg = gen_rtx_REG (DFmode, i+32);
8477
            emit_move_insn (reg, mem);
8478
            cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8479
            reg_offset -= 8;
8480
          }
8481
 
8482
      /* Restore the return address from the DSIB.  */
8483
      mem = gen_rtx_MEM (DImode, plus_constant (hard_frame_pointer_rtx, -8));
8484
      set_mem_alias_set (mem, alpha_sr_alias_set);
8485
      reg = gen_rtx_REG (DImode, REG_RA);
8486
      emit_move_insn (reg, mem);
8487
      cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8488
    }
8489
 
8490
  if (frame_size || eh_ofs)
8491
    {
8492
      sp_adj1 = stack_pointer_rtx;
8493
 
8494
      if (eh_ofs)
8495
        {
8496
          sp_adj1 = gen_rtx_REG (DImode, 23);
8497
          emit_move_insn (sp_adj1,
8498
                          gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8499
        }
8500
 
8501
      /* If the stack size is large, begin computation into a temporary
8502
         register so as not to interfere with a potential fp restore,
8503
         which must be consecutive with an SP restore.  */
8504
      if (frame_size < 32768
8505
          && ! (TARGET_ABI_UNICOSMK && cfun->calls_alloca))
8506
        sp_adj2 = GEN_INT (frame_size);
8507
      else if (TARGET_ABI_UNICOSMK)
8508
        {
8509
          sp_adj1 = gen_rtx_REG (DImode, 23);
8510
          emit_move_insn (sp_adj1, hard_frame_pointer_rtx);
8511
          sp_adj2 = const0_rtx;
8512
        }
8513
      else if (frame_size < 0x40007fffL)
8514
        {
8515
          int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8516
 
8517
          sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8518
          if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8519
            sp_adj1 = sa_reg;
8520
          else
8521
            {
8522
              sp_adj1 = gen_rtx_REG (DImode, 23);
8523
              emit_move_insn (sp_adj1, sp_adj2);
8524
            }
8525
          sp_adj2 = GEN_INT (low);
8526
        }
8527
      else
8528
        {
8529
          rtx tmp = gen_rtx_REG (DImode, 23);
8530
          sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8531
          if (!sp_adj2)
8532
            {
8533
              /* We can't drop new things to memory this late, afaik,
8534
                 so build it up by pieces.  */
8535
              sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8536
                                                   -(frame_size < 0));
8537
              gcc_assert (sp_adj2);
8538
            }
8539
        }
8540
 
8541
      /* From now on, things must be in order.  So emit blockages.  */
8542
 
8543
      /* Restore the frame pointer.  */
8544
      if (TARGET_ABI_UNICOSMK)
8545
        {
8546
          emit_insn (gen_blockage ());
8547
          mem = gen_rtx_MEM (DImode,
8548
                             plus_constant (hard_frame_pointer_rtx, -16));
8549
          set_mem_alias_set (mem, alpha_sr_alias_set);
8550
          emit_move_insn (hard_frame_pointer_rtx, mem);
8551
          cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8552
                                         hard_frame_pointer_rtx, cfa_restores);
8553
        }
8554
      else if (fp_is_frame_pointer)
8555
        {
8556
          emit_insn (gen_blockage ());
8557
          mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8558
          set_mem_alias_set (mem, alpha_sr_alias_set);
8559
          emit_move_insn (hard_frame_pointer_rtx, mem);
8560
          cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8561
                                         hard_frame_pointer_rtx, cfa_restores);
8562
        }
8563
      else if (TARGET_ABI_OPEN_VMS)
8564
        {
8565
          emit_insn (gen_blockage ());
8566
          emit_move_insn (hard_frame_pointer_rtx,
8567
                          gen_rtx_REG (DImode, vms_save_fp_regno));
8568
          cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8569
                                         hard_frame_pointer_rtx, cfa_restores);
8570
        }
8571
 
8572
      /* Restore the stack pointer.  */
8573
      emit_insn (gen_blockage ());
8574
      if (sp_adj2 == const0_rtx)
8575
        insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8576
      else
8577
        insn = emit_move_insn (stack_pointer_rtx,
8578
                               gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8579
      REG_NOTES (insn) = cfa_restores;
8580
      add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8581
      RTX_FRAME_RELATED_P (insn) = 1;
8582
    }
8583
  else
8584
    {
8585
      gcc_assert (cfa_restores == NULL);
8586
 
8587
      if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8588
        {
8589
          emit_insn (gen_blockage ());
8590
          insn = emit_move_insn (hard_frame_pointer_rtx,
8591
                                 gen_rtx_REG (DImode, vms_save_fp_regno));
8592
          add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8593
          RTX_FRAME_RELATED_P (insn) = 1;
8594
        }
8595
      else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8596
        {
8597
          /* Decrement the frame pointer if the function does not have a
8598
             frame.  */
8599
          emit_insn (gen_blockage ());
8600
          emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8601
                                 hard_frame_pointer_rtx, constm1_rtx));
8602
        }
8603
    }
8604
}
8605
 
8606
/* Output the rest of the textual info surrounding the epilogue.  */
8607
 
8608
void
8609
alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8610
{
8611
  rtx insn;
8612
 
8613
  /* We output a nop after noreturn calls at the very end of the function to
8614
     ensure that the return address always remains in the caller's code range,
8615
     as not doing so might confuse unwinding engines.  */
8616
  insn = get_last_insn ();
8617
  if (!INSN_P (insn))
8618
    insn = prev_active_insn (insn);
8619
  if (insn && CALL_P (insn))
8620
    output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8621
 
8622
#if TARGET_ABI_OPEN_VMS
8623
  alpha_write_linkage (file, fnname, decl);
8624
#endif
8625
 
8626
  /* End the function.  */
8627
  if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8628
    {
8629
      fputs ("\t.end ", file);
8630
      assemble_name (file, fnname);
8631
      putc ('\n', file);
8632
    }
8633
  inside_function = FALSE;
8634
 
8635
  /* Output jump tables and the static subroutine information block.  */
8636
  if (TARGET_ABI_UNICOSMK)
8637
    {
8638
      unicosmk_output_ssib (file, fnname);
8639
      unicosmk_output_deferred_case_vectors (file);
8640
    }
8641
}
8642
 
8643
#if TARGET_ABI_OPEN_VMS
8644
void avms_asm_output_external (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name)
8645
{
8646
#ifdef DO_CRTL_NAMES
8647
  DO_CRTL_NAMES;
8648
#endif
8649
}
8650
#endif
8651
 
8652
#if TARGET_ABI_OSF
8653
/* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8654
 
8655
   In order to avoid the hordes of differences between generated code
8656
   with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8657
   lots of code loading up large constants, generate rtl and emit it
8658
   instead of going straight to text.
8659
 
8660
   Not sure why this idea hasn't been explored before...  */
8661
 
8662
static void
8663
alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8664
                           HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8665
                           tree function)
8666
{
8667
  HOST_WIDE_INT hi, lo;
8668
  rtx this_rtx, insn, funexp;
8669
 
8670
  /* We always require a valid GP.  */
8671
  emit_insn (gen_prologue_ldgp ());
8672
  emit_note (NOTE_INSN_PROLOGUE_END);
8673
 
8674
  /* Find the "this" pointer.  If the function returns a structure,
8675
     the structure return pointer is in $16.  */
8676
  if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8677
    this_rtx = gen_rtx_REG (Pmode, 17);
8678
  else
8679
    this_rtx = gen_rtx_REG (Pmode, 16);
8680
 
8681
  /* Add DELTA.  When possible we use ldah+lda.  Otherwise load the
8682
     entire constant for the add.  */
8683
  lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8684
  hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8685
  if (hi + lo == delta)
8686
    {
8687
      if (hi)
8688
        emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8689
      if (lo)
8690
        emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8691
    }
8692
  else
8693
    {
8694
      rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8695
                                           delta, -(delta < 0));
8696
      emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8697
    }
8698
 
8699
  /* Add a delta stored in the vtable at VCALL_OFFSET.  */
8700
  if (vcall_offset)
8701
    {
8702
      rtx tmp, tmp2;
8703
 
8704
      tmp = gen_rtx_REG (Pmode, 0);
8705
      emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8706
 
8707
      lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8708
      hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8709
      if (hi + lo == vcall_offset)
8710
        {
8711
          if (hi)
8712
            emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8713
        }
8714
      else
8715
        {
8716
          tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8717
                                            vcall_offset, -(vcall_offset < 0));
8718
          emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8719
          lo = 0;
8720
        }
8721
      if (lo)
8722
        tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8723
      else
8724
        tmp2 = tmp;
8725
      emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8726
 
8727
      emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8728
    }
8729
 
8730
  /* Generate a tail call to the target function.  */
8731
  if (! TREE_USED (function))
8732
    {
8733
      assemble_external (function);
8734
      TREE_USED (function) = 1;
8735
    }
8736
  funexp = XEXP (DECL_RTL (function), 0);
8737
  funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8738
  insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8739
  SIBLING_CALL_P (insn) = 1;
8740
 
8741
  /* Run just enough of rest_of_compilation to get the insns emitted.
8742
     There's not really enough bulk here to make other passes such as
8743
     instruction scheduling worth while.  Note that use_thunk calls
8744
     assemble_start_function and assemble_end_function.  */
8745
  insn = get_insns ();
8746
  insn_locators_alloc ();
8747
  shorten_branches (insn);
8748
  final_start_function (insn, file, 1);
8749
  final (insn, file, 1);
8750
  final_end_function ();
8751
}
8752
#endif /* TARGET_ABI_OSF */
8753
 
8754
/* Debugging support.  */
8755
 
8756
#include "gstab.h"
8757
 
8758
/* Count the number of sdb related labels are generated (to find block
8759
   start and end boundaries).  */
8760
 
8761
int sdb_label_count = 0;
8762
 
8763
/* Name of the file containing the current function.  */
8764
 
8765
static const char *current_function_file = "";
8766
 
8767
/* Offsets to alpha virtual arg/local debugging pointers.  */
8768
 
8769
long alpha_arg_offset;
8770
long alpha_auto_offset;
8771
 
8772
/* Emit a new filename to a stream.  */
8773
 
8774
void
8775
alpha_output_filename (FILE *stream, const char *name)
8776
{
8777
  static int first_time = TRUE;
8778
 
8779
  if (first_time)
8780
    {
8781
      first_time = FALSE;
8782
      ++num_source_filenames;
8783
      current_function_file = name;
8784
      fprintf (stream, "\t.file\t%d ", num_source_filenames);
8785
      output_quoted_string (stream, name);
8786
      fprintf (stream, "\n");
8787
      if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8788
        fprintf (stream, "\t#@stabs\n");
8789
    }
8790
 
8791
  else if (write_symbols == DBX_DEBUG)
8792
    /* dbxout.c will emit an appropriate .stabs directive.  */
8793
    return;
8794
 
8795
  else if (name != current_function_file
8796
           && strcmp (name, current_function_file) != 0)
8797
    {
8798
      if (inside_function && ! TARGET_GAS)
8799
        fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8800
      else
8801
        {
8802
          ++num_source_filenames;
8803
          current_function_file = name;
8804
          fprintf (stream, "\t.file\t%d ", num_source_filenames);
8805
        }
8806
 
8807
      output_quoted_string (stream, name);
8808
      fprintf (stream, "\n");
8809
    }
8810
}
8811
 
8812
/* Structure to show the current status of registers and memory.  */
8813
 
8814
struct shadow_summary
8815
{
8816
  struct {
8817
    unsigned int i     : 31;    /* Mask of int regs */
8818
    unsigned int fp    : 31;    /* Mask of fp regs */
8819
    unsigned int mem   :  1;    /* mem == imem | fpmem */
8820
  } used, defd;
8821
};
8822
 
8823
/* Summary the effects of expression X on the machine.  Update SUM, a pointer
8824
   to the summary structure.  SET is nonzero if the insn is setting the
8825
   object, otherwise zero.  */
8826
 
8827
static void
8828
summarize_insn (rtx x, struct shadow_summary *sum, int set)
8829
{
8830
  const char *format_ptr;
8831
  int i, j;
8832
 
8833
  if (x == 0)
8834
    return;
8835
 
8836
  switch (GET_CODE (x))
8837
    {
8838
      /* ??? Note that this case would be incorrect if the Alpha had a
8839
         ZERO_EXTRACT in SET_DEST.  */
8840
    case SET:
8841
      summarize_insn (SET_SRC (x), sum, 0);
8842
      summarize_insn (SET_DEST (x), sum, 1);
8843
      break;
8844
 
8845
    case CLOBBER:
8846
      summarize_insn (XEXP (x, 0), sum, 1);
8847
      break;
8848
 
8849
    case USE:
8850
      summarize_insn (XEXP (x, 0), sum, 0);
8851
      break;
8852
 
8853
    case ASM_OPERANDS:
8854
      for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8855
        summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8856
      break;
8857
 
8858
    case PARALLEL:
8859
      for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8860
        summarize_insn (XVECEXP (x, 0, i), sum, 0);
8861
      break;
8862
 
8863
    case SUBREG:
8864
      summarize_insn (SUBREG_REG (x), sum, 0);
8865
      break;
8866
 
8867
    case REG:
8868
      {
8869
        int regno = REGNO (x);
8870
        unsigned long mask = ((unsigned long) 1) << (regno % 32);
8871
 
8872
        if (regno == 31 || regno == 63)
8873
          break;
8874
 
8875
        if (set)
8876
          {
8877
            if (regno < 32)
8878
              sum->defd.i |= mask;
8879
            else
8880
              sum->defd.fp |= mask;
8881
          }
8882
        else
8883
          {
8884
            if (regno < 32)
8885
              sum->used.i  |= mask;
8886
            else
8887
              sum->used.fp |= mask;
8888
          }
8889
        }
8890
      break;
8891
 
8892
    case MEM:
8893
      if (set)
8894
        sum->defd.mem = 1;
8895
      else
8896
        sum->used.mem = 1;
8897
 
8898
      /* Find the regs used in memory address computation: */
8899
      summarize_insn (XEXP (x, 0), sum, 0);
8900
      break;
8901
 
8902
    case CONST_INT:   case CONST_DOUBLE:
8903
    case SYMBOL_REF:  case LABEL_REF:     case CONST:
8904
    case SCRATCH:     case ASM_INPUT:
8905
      break;
8906
 
8907
      /* Handle common unary and binary ops for efficiency.  */
8908
    case COMPARE:  case PLUS:    case MINUS:   case MULT:      case DIV:
8909
    case MOD:      case UDIV:    case UMOD:    case AND:       case IOR:
8910
    case XOR:      case ASHIFT:  case ROTATE:  case ASHIFTRT:  case LSHIFTRT:
8911
    case ROTATERT: case SMIN:    case SMAX:    case UMIN:      case UMAX:
8912
    case NE:       case EQ:      case GE:      case GT:        case LE:
8913
    case LT:       case GEU:     case GTU:     case LEU:       case LTU:
8914
      summarize_insn (XEXP (x, 0), sum, 0);
8915
      summarize_insn (XEXP (x, 1), sum, 0);
8916
      break;
8917
 
8918
    case NEG:  case NOT:  case SIGN_EXTEND:  case ZERO_EXTEND:
8919
    case TRUNCATE:  case FLOAT_EXTEND:  case FLOAT_TRUNCATE:  case FLOAT:
8920
    case FIX:  case UNSIGNED_FLOAT:  case UNSIGNED_FIX:  case ABS:
8921
    case SQRT:  case FFS:
8922
      summarize_insn (XEXP (x, 0), sum, 0);
8923
      break;
8924
 
8925
    default:
8926
      format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8927
      for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8928
        switch (format_ptr[i])
8929
          {
8930
          case 'e':
8931
            summarize_insn (XEXP (x, i), sum, 0);
8932
            break;
8933
 
8934
          case 'E':
8935
            for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8936
              summarize_insn (XVECEXP (x, i, j), sum, 0);
8937
            break;
8938
 
8939
          case 'i':
8940
            break;
8941
 
8942
          default:
8943
            gcc_unreachable ();
8944
          }
8945
    }
8946
}
8947
 
8948
/* Ensure a sufficient number of `trapb' insns are in the code when
8949
   the user requests code with a trap precision of functions or
8950
   instructions.
8951
 
8952
   In naive mode, when the user requests a trap-precision of
8953
   "instruction", a trapb is needed after every instruction that may
8954
   generate a trap.  This ensures that the code is resumption safe but
8955
   it is also slow.
8956
 
8957
   When optimizations are turned on, we delay issuing a trapb as long
8958
   as possible.  In this context, a trap shadow is the sequence of
8959
   instructions that starts with a (potentially) trap generating
8960
   instruction and extends to the next trapb or call_pal instruction
8961
   (but GCC never generates call_pal by itself).  We can delay (and
8962
   therefore sometimes omit) a trapb subject to the following
8963
   conditions:
8964
 
8965
   (a) On entry to the trap shadow, if any Alpha register or memory
8966
   location contains a value that is used as an operand value by some
8967
   instruction in the trap shadow (live on entry), then no instruction
8968
   in the trap shadow may modify the register or memory location.
8969
 
8970
   (b) Within the trap shadow, the computation of the base register
8971
   for a memory load or store instruction may not involve using the
8972
   result of an instruction that might generate an UNPREDICTABLE
8973
   result.
8974
 
8975
   (c) Within the trap shadow, no register may be used more than once
8976
   as a destination register.  (This is to make life easier for the
8977
   trap-handler.)
8978
 
8979
   (d) The trap shadow may not include any branch instructions.  */
8980
 
8981
static void
8982
alpha_handle_trap_shadows (void)
8983
{
8984
  struct shadow_summary shadow;
8985
  int trap_pending, exception_nesting;
8986
  rtx i, n;
8987
 
8988
  trap_pending = 0;
8989
  exception_nesting = 0;
8990
  shadow.used.i = 0;
8991
  shadow.used.fp = 0;
8992
  shadow.used.mem = 0;
8993
  shadow.defd = shadow.used;
8994
 
8995
  for (i = get_insns (); i ; i = NEXT_INSN (i))
8996
    {
8997
      if (NOTE_P (i))
8998
        {
8999
          switch (NOTE_KIND (i))
9000
            {
9001
            case NOTE_INSN_EH_REGION_BEG:
9002
              exception_nesting++;
9003
              if (trap_pending)
9004
                goto close_shadow;
9005
              break;
9006
 
9007
            case NOTE_INSN_EH_REGION_END:
9008
              exception_nesting--;
9009
              if (trap_pending)
9010
                goto close_shadow;
9011
              break;
9012
 
9013
            case NOTE_INSN_EPILOGUE_BEG:
9014
              if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
9015
                goto close_shadow;
9016
              break;
9017
            }
9018
        }
9019
      else if (trap_pending)
9020
        {
9021
          if (alpha_tp == ALPHA_TP_FUNC)
9022
            {
9023
              if (JUMP_P (i)
9024
                  && GET_CODE (PATTERN (i)) == RETURN)
9025
                goto close_shadow;
9026
            }
9027
          else if (alpha_tp == ALPHA_TP_INSN)
9028
            {
9029
              if (optimize > 0)
9030
                {
9031
                  struct shadow_summary sum;
9032
 
9033
                  sum.used.i = 0;
9034
                  sum.used.fp = 0;
9035
                  sum.used.mem = 0;
9036
                  sum.defd = sum.used;
9037
 
9038
                  switch (GET_CODE (i))
9039
                    {
9040
                    case INSN:
9041
                      /* Annoyingly, get_attr_trap will die on these.  */
9042
                      if (GET_CODE (PATTERN (i)) == USE
9043
                          || GET_CODE (PATTERN (i)) == CLOBBER)
9044
                        break;
9045
 
9046
                      summarize_insn (PATTERN (i), &sum, 0);
9047
 
9048
                      if ((sum.defd.i & shadow.defd.i)
9049
                          || (sum.defd.fp & shadow.defd.fp))
9050
                        {
9051
                          /* (c) would be violated */
9052
                          goto close_shadow;
9053
                        }
9054
 
9055
                      /* Combine shadow with summary of current insn: */
9056
                      shadow.used.i   |= sum.used.i;
9057
                      shadow.used.fp  |= sum.used.fp;
9058
                      shadow.used.mem |= sum.used.mem;
9059
                      shadow.defd.i   |= sum.defd.i;
9060
                      shadow.defd.fp  |= sum.defd.fp;
9061
                      shadow.defd.mem |= sum.defd.mem;
9062
 
9063
                      if ((sum.defd.i & shadow.used.i)
9064
                          || (sum.defd.fp & shadow.used.fp)
9065
                          || (sum.defd.mem & shadow.used.mem))
9066
                        {
9067
                          /* (a) would be violated (also takes care of (b))  */
9068
                          gcc_assert (get_attr_trap (i) != TRAP_YES
9069
                                      || (!(sum.defd.i & sum.used.i)
9070
                                          && !(sum.defd.fp & sum.used.fp)));
9071
 
9072
                          goto close_shadow;
9073
                        }
9074
                      break;
9075
 
9076
                    case JUMP_INSN:
9077
                    case CALL_INSN:
9078
                    case CODE_LABEL:
9079
                      goto close_shadow;
9080
 
9081
                    default:
9082
                      gcc_unreachable ();
9083
                    }
9084
                }
9085
              else
9086
                {
9087
                close_shadow:
9088
                  n = emit_insn_before (gen_trapb (), i);
9089
                  PUT_MODE (n, TImode);
9090
                  PUT_MODE (i, TImode);
9091
                  trap_pending = 0;
9092
                  shadow.used.i = 0;
9093
                  shadow.used.fp = 0;
9094
                  shadow.used.mem = 0;
9095
                  shadow.defd = shadow.used;
9096
                }
9097
            }
9098
        }
9099
 
9100
      if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
9101
          && NONJUMP_INSN_P (i)
9102
          && GET_CODE (PATTERN (i)) != USE
9103
          && GET_CODE (PATTERN (i)) != CLOBBER
9104
          && get_attr_trap (i) == TRAP_YES)
9105
        {
9106
          if (optimize && !trap_pending)
9107
            summarize_insn (PATTERN (i), &shadow, 0);
9108
          trap_pending = 1;
9109
        }
9110
    }
9111
}
9112
 
9113
/* Alpha can only issue instruction groups simultaneously if they are
9114
   suitably aligned.  This is very processor-specific.  */
9115
/* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
9116
   that are marked "fake".  These instructions do not exist on that target,
9117
   but it is possible to see these insns with deranged combinations of
9118
   command-line options, such as "-mtune=ev4 -mmax".  Instead of aborting,
9119
   choose a result at random.  */
9120
 
9121
enum alphaev4_pipe {
9122
  EV4_STOP = 0,
9123
  EV4_IB0 = 1,
9124
  EV4_IB1 = 2,
9125
  EV4_IBX = 4
9126
};
9127
 
9128
enum alphaev5_pipe {
9129
  EV5_STOP = 0,
9130
  EV5_NONE = 1,
9131
  EV5_E01 = 2,
9132
  EV5_E0 = 4,
9133
  EV5_E1 = 8,
9134
  EV5_FAM = 16,
9135
  EV5_FA = 32,
9136
  EV5_FM = 64
9137
};
9138
 
9139
static enum alphaev4_pipe
9140
alphaev4_insn_pipe (rtx insn)
9141
{
9142
  if (recog_memoized (insn) < 0)
9143
    return EV4_STOP;
9144
  if (get_attr_length (insn) != 4)
9145
    return EV4_STOP;
9146
 
9147
  switch (get_attr_type (insn))
9148
    {
9149
    case TYPE_ILD:
9150
    case TYPE_LDSYM:
9151
    case TYPE_FLD:
9152
    case TYPE_LD_L:
9153
      return EV4_IBX;
9154
 
9155
    case TYPE_IADD:
9156
    case TYPE_ILOG:
9157
    case TYPE_ICMOV:
9158
    case TYPE_ICMP:
9159
    case TYPE_FST:
9160
    case TYPE_SHIFT:
9161
    case TYPE_IMUL:
9162
    case TYPE_FBR:
9163
    case TYPE_MVI:              /* fake */
9164
      return EV4_IB0;
9165
 
9166
    case TYPE_IST:
9167
    case TYPE_MISC:
9168
    case TYPE_IBR:
9169
    case TYPE_JSR:
9170
    case TYPE_CALLPAL:
9171
    case TYPE_FCPYS:
9172
    case TYPE_FCMOV:
9173
    case TYPE_FADD:
9174
    case TYPE_FDIV:
9175
    case TYPE_FMUL:
9176
    case TYPE_ST_C:
9177
    case TYPE_MB:
9178
    case TYPE_FSQRT:            /* fake */
9179
    case TYPE_FTOI:             /* fake */
9180
    case TYPE_ITOF:             /* fake */
9181
      return EV4_IB1;
9182
 
9183
    default:
9184
      gcc_unreachable ();
9185
    }
9186
}
9187
 
9188
static enum alphaev5_pipe
9189
alphaev5_insn_pipe (rtx insn)
9190
{
9191
  if (recog_memoized (insn) < 0)
9192
    return EV5_STOP;
9193
  if (get_attr_length (insn) != 4)
9194
    return EV5_STOP;
9195
 
9196
  switch (get_attr_type (insn))
9197
    {
9198
    case TYPE_ILD:
9199
    case TYPE_FLD:
9200
    case TYPE_LDSYM:
9201
    case TYPE_IADD:
9202
    case TYPE_ILOG:
9203
    case TYPE_ICMOV:
9204
    case TYPE_ICMP:
9205
      return EV5_E01;
9206
 
9207
    case TYPE_IST:
9208
    case TYPE_FST:
9209
    case TYPE_SHIFT:
9210
    case TYPE_IMUL:
9211
    case TYPE_MISC:
9212
    case TYPE_MVI:
9213
    case TYPE_LD_L:
9214
    case TYPE_ST_C:
9215
    case TYPE_MB:
9216
    case TYPE_FTOI:             /* fake */
9217
    case TYPE_ITOF:             /* fake */
9218
      return EV5_E0;
9219
 
9220
    case TYPE_IBR:
9221
    case TYPE_JSR:
9222
    case TYPE_CALLPAL:
9223
      return EV5_E1;
9224
 
9225
    case TYPE_FCPYS:
9226
      return EV5_FAM;
9227
 
9228
    case TYPE_FBR:
9229
    case TYPE_FCMOV:
9230
    case TYPE_FADD:
9231
    case TYPE_FDIV:
9232
    case TYPE_FSQRT:            /* fake */
9233
      return EV5_FA;
9234
 
9235
    case TYPE_FMUL:
9236
      return EV5_FM;
9237
 
9238
    default:
9239
      gcc_unreachable ();
9240
    }
9241
}
9242
 
9243
/* IN_USE is a mask of the slots currently filled within the insn group.
9244
   The mask bits come from alphaev4_pipe above.  If EV4_IBX is set, then
9245
   the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
9246
 
9247
   LEN is, of course, the length of the group in bytes.  */
9248
 
9249
static rtx
9250
alphaev4_next_group (rtx insn, int *pin_use, int *plen)
9251
{
9252
  int len, in_use;
9253
 
9254
  len = in_use = 0;
9255
 
9256
  if (! INSN_P (insn)
9257
      || GET_CODE (PATTERN (insn)) == CLOBBER
9258
      || GET_CODE (PATTERN (insn)) == USE)
9259
    goto next_and_done;
9260
 
9261
  while (1)
9262
    {
9263
      enum alphaev4_pipe pipe;
9264
 
9265
      pipe = alphaev4_insn_pipe (insn);
9266
      switch (pipe)
9267
        {
9268
        case EV4_STOP:
9269
          /* Force complex instructions to start new groups.  */
9270
          if (in_use)
9271
            goto done;
9272
 
9273
          /* If this is a completely unrecognized insn, it's an asm.
9274
             We don't know how long it is, so record length as -1 to
9275
             signal a needed realignment.  */
9276
          if (recog_memoized (insn) < 0)
9277
            len = -1;
9278
          else
9279
            len = get_attr_length (insn);
9280
          goto next_and_done;
9281
 
9282
        case EV4_IBX:
9283
          if (in_use & EV4_IB0)
9284
            {
9285
              if (in_use & EV4_IB1)
9286
                goto done;
9287
              in_use |= EV4_IB1;
9288
            }
9289
          else
9290
            in_use |= EV4_IB0 | EV4_IBX;
9291
          break;
9292
 
9293
        case EV4_IB0:
9294
          if (in_use & EV4_IB0)
9295
            {
9296
              if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
9297
                goto done;
9298
              in_use |= EV4_IB1;
9299
            }
9300
          in_use |= EV4_IB0;
9301
          break;
9302
 
9303
        case EV4_IB1:
9304
          if (in_use & EV4_IB1)
9305
            goto done;
9306
          in_use |= EV4_IB1;
9307
          break;
9308
 
9309
        default:
9310
          gcc_unreachable ();
9311
        }
9312
      len += 4;
9313
 
9314
      /* Haifa doesn't do well scheduling branches.  */
9315
      if (JUMP_P (insn))
9316
        goto next_and_done;
9317
 
9318
    next:
9319
      insn = next_nonnote_insn (insn);
9320
 
9321
      if (!insn || ! INSN_P (insn))
9322
        goto done;
9323
 
9324
      /* Let Haifa tell us where it thinks insn group boundaries are.  */
9325
      if (GET_MODE (insn) == TImode)
9326
        goto done;
9327
 
9328
      if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9329
        goto next;
9330
    }
9331
 
9332
 next_and_done:
9333
  insn = next_nonnote_insn (insn);
9334
 
9335
 done:
9336
  *plen = len;
9337
  *pin_use = in_use;
9338
  return insn;
9339
}
9340
 
9341
/* IN_USE is a mask of the slots currently filled within the insn group.
9342
   The mask bits come from alphaev5_pipe above.  If EV5_E01 is set, then
9343
   the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9344
 
9345
   LEN is, of course, the length of the group in bytes.  */
9346
 
9347
static rtx
9348
alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9349
{
9350
  int len, in_use;
9351
 
9352
  len = in_use = 0;
9353
 
9354
  if (! INSN_P (insn)
9355
      || GET_CODE (PATTERN (insn)) == CLOBBER
9356
      || GET_CODE (PATTERN (insn)) == USE)
9357
    goto next_and_done;
9358
 
9359
  while (1)
9360
    {
9361
      enum alphaev5_pipe pipe;
9362
 
9363
      pipe = alphaev5_insn_pipe (insn);
9364
      switch (pipe)
9365
        {
9366
        case EV5_STOP:
9367
          /* Force complex instructions to start new groups.  */
9368
          if (in_use)
9369
            goto done;
9370
 
9371
          /* If this is a completely unrecognized insn, it's an asm.
9372
             We don't know how long it is, so record length as -1 to
9373
             signal a needed realignment.  */
9374
          if (recog_memoized (insn) < 0)
9375
            len = -1;
9376
          else
9377
            len = get_attr_length (insn);
9378
          goto next_and_done;
9379
 
9380
        /* ??? Most of the places below, we would like to assert never
9381
           happen, as it would indicate an error either in Haifa, or
9382
           in the scheduling description.  Unfortunately, Haifa never
9383
           schedules the last instruction of the BB, so we don't have
9384
           an accurate TI bit to go off.  */
9385
        case EV5_E01:
9386
          if (in_use & EV5_E0)
9387
            {
9388
              if (in_use & EV5_E1)
9389
                goto done;
9390
              in_use |= EV5_E1;
9391
            }
9392
          else
9393
            in_use |= EV5_E0 | EV5_E01;
9394
          break;
9395
 
9396
        case EV5_E0:
9397
          if (in_use & EV5_E0)
9398
            {
9399
              if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9400
                goto done;
9401
              in_use |= EV5_E1;
9402
            }
9403
          in_use |= EV5_E0;
9404
          break;
9405
 
9406
        case EV5_E1:
9407
          if (in_use & EV5_E1)
9408
            goto done;
9409
          in_use |= EV5_E1;
9410
          break;
9411
 
9412
        case EV5_FAM:
9413
          if (in_use & EV5_FA)
9414
            {
9415
              if (in_use & EV5_FM)
9416
                goto done;
9417
              in_use |= EV5_FM;
9418
            }
9419
          else
9420
            in_use |= EV5_FA | EV5_FAM;
9421
          break;
9422
 
9423
        case EV5_FA:
9424
          if (in_use & EV5_FA)
9425
            goto done;
9426
          in_use |= EV5_FA;
9427
          break;
9428
 
9429
        case EV5_FM:
9430
          if (in_use & EV5_FM)
9431
            goto done;
9432
          in_use |= EV5_FM;
9433
          break;
9434
 
9435
        case EV5_NONE:
9436
          break;
9437
 
9438
        default:
9439
          gcc_unreachable ();
9440
        }
9441
      len += 4;
9442
 
9443
      /* Haifa doesn't do well scheduling branches.  */
9444
      /* ??? If this is predicted not-taken, slotting continues, except
9445
         that no more IBR, FBR, or JSR insns may be slotted.  */
9446
      if (JUMP_P (insn))
9447
        goto next_and_done;
9448
 
9449
    next:
9450
      insn = next_nonnote_insn (insn);
9451
 
9452
      if (!insn || ! INSN_P (insn))
9453
        goto done;
9454
 
9455
      /* Let Haifa tell us where it thinks insn group boundaries are.  */
9456
      if (GET_MODE (insn) == TImode)
9457
        goto done;
9458
 
9459
      if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9460
        goto next;
9461
    }
9462
 
9463
 next_and_done:
9464
  insn = next_nonnote_insn (insn);
9465
 
9466
 done:
9467
  *plen = len;
9468
  *pin_use = in_use;
9469
  return insn;
9470
}
9471
 
9472
static rtx
9473
alphaev4_next_nop (int *pin_use)
9474
{
9475
  int in_use = *pin_use;
9476
  rtx nop;
9477
 
9478
  if (!(in_use & EV4_IB0))
9479
    {
9480
      in_use |= EV4_IB0;
9481
      nop = gen_nop ();
9482
    }
9483
  else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9484
    {
9485
      in_use |= EV4_IB1;
9486
      nop = gen_nop ();
9487
    }
9488
  else if (TARGET_FP && !(in_use & EV4_IB1))
9489
    {
9490
      in_use |= EV4_IB1;
9491
      nop = gen_fnop ();
9492
    }
9493
  else
9494
    nop = gen_unop ();
9495
 
9496
  *pin_use = in_use;
9497
  return nop;
9498
}
9499
 
9500
static rtx
9501
alphaev5_next_nop (int *pin_use)
9502
{
9503
  int in_use = *pin_use;
9504
  rtx nop;
9505
 
9506
  if (!(in_use & EV5_E1))
9507
    {
9508
      in_use |= EV5_E1;
9509
      nop = gen_nop ();
9510
    }
9511
  else if (TARGET_FP && !(in_use & EV5_FA))
9512
    {
9513
      in_use |= EV5_FA;
9514
      nop = gen_fnop ();
9515
    }
9516
  else if (TARGET_FP && !(in_use & EV5_FM))
9517
    {
9518
      in_use |= EV5_FM;
9519
      nop = gen_fnop ();
9520
    }
9521
  else
9522
    nop = gen_unop ();
9523
 
9524
  *pin_use = in_use;
9525
  return nop;
9526
}
9527
 
9528
/* The instruction group alignment main loop.  */
9529
 
9530
static void
9531
alpha_align_insns (unsigned int max_align,
9532
                   rtx (*next_group) (rtx, int *, int *),
9533
                   rtx (*next_nop) (int *))
9534
{
9535
  /* ALIGN is the known alignment for the insn group.  */
9536
  unsigned int align;
9537
  /* OFS is the offset of the current insn in the insn group.  */
9538
  int ofs;
9539
  int prev_in_use, in_use, len, ldgp;
9540
  rtx i, next;
9541
 
9542
  /* Let shorten branches care for assigning alignments to code labels.  */
9543
  shorten_branches (get_insns ());
9544
 
9545
  if (align_functions < 4)
9546
    align = 4;
9547
  else if ((unsigned int) align_functions < max_align)
9548
    align = align_functions;
9549
  else
9550
    align = max_align;
9551
 
9552
  ofs = prev_in_use = 0;
9553
  i = get_insns ();
9554
  if (NOTE_P (i))
9555
    i = next_nonnote_insn (i);
9556
 
9557
  ldgp = alpha_function_needs_gp ? 8 : 0;
9558
 
9559
  while (i)
9560
    {
9561
      next = (*next_group) (i, &in_use, &len);
9562
 
9563
      /* When we see a label, resync alignment etc.  */
9564
      if (LABEL_P (i))
9565
        {
9566
          unsigned int new_align = 1 << label_to_alignment (i);
9567
 
9568
          if (new_align >= align)
9569
            {
9570
              align = new_align < max_align ? new_align : max_align;
9571
              ofs = 0;
9572
            }
9573
 
9574
          else if (ofs & (new_align-1))
9575
            ofs = (ofs | (new_align-1)) + 1;
9576
          gcc_assert (!len);
9577
        }
9578
 
9579
      /* Handle complex instructions special.  */
9580
      else if (in_use == 0)
9581
        {
9582
          /* Asms will have length < 0.  This is a signal that we have
9583
             lost alignment knowledge.  Assume, however, that the asm
9584
             will not mis-align instructions.  */
9585
          if (len < 0)
9586
            {
9587
              ofs = 0;
9588
              align = 4;
9589
              len = 0;
9590
            }
9591
        }
9592
 
9593
      /* If the known alignment is smaller than the recognized insn group,
9594
         realign the output.  */
9595
      else if ((int) align < len)
9596
        {
9597
          unsigned int new_log_align = len > 8 ? 4 : 3;
9598
          rtx prev, where;
9599
 
9600
          where = prev = prev_nonnote_insn (i);
9601
          if (!where || !LABEL_P (where))
9602
            where = i;
9603
 
9604
          /* Can't realign between a call and its gp reload.  */
9605
          if (! (TARGET_EXPLICIT_RELOCS
9606
                 && prev && CALL_P (prev)))
9607
            {
9608
              emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9609
              align = 1 << new_log_align;
9610
              ofs = 0;
9611
            }
9612
        }
9613
 
9614
      /* We may not insert padding inside the initial ldgp sequence.  */
9615
      else if (ldgp > 0)
9616
        ldgp -= len;
9617
 
9618
      /* If the group won't fit in the same INT16 as the previous,
9619
         we need to add padding to keep the group together.  Rather
9620
         than simply leaving the insn filling to the assembler, we
9621
         can make use of the knowledge of what sorts of instructions
9622
         were issued in the previous group to make sure that all of
9623
         the added nops are really free.  */
9624
      else if (ofs + len > (int) align)
9625
        {
9626
          int nop_count = (align - ofs) / 4;
9627
          rtx where;
9628
 
9629
          /* Insert nops before labels, branches, and calls to truly merge
9630
             the execution of the nops with the previous instruction group.  */
9631
          where = prev_nonnote_insn (i);
9632
          if (where)
9633
            {
9634
              if (LABEL_P (where))
9635
                {
9636
                  rtx where2 = prev_nonnote_insn (where);
9637
                  if (where2 && JUMP_P (where2))
9638
                    where = where2;
9639
                }
9640
              else if (NONJUMP_INSN_P (where))
9641
                where = i;
9642
            }
9643
          else
9644
            where = i;
9645
 
9646
          do
9647
            emit_insn_before ((*next_nop)(&prev_in_use), where);
9648
          while (--nop_count);
9649
          ofs = 0;
9650
        }
9651
 
9652
      ofs = (ofs + len) & (align - 1);
9653
      prev_in_use = in_use;
9654
      i = next;
9655
    }
9656
}
9657
 
9658
/* Insert an unop between a noreturn function call and GP load.  */
9659
 
9660
static void
9661
alpha_pad_noreturn (void)
9662
{
9663
  rtx insn, next;
9664
 
9665
  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9666
    {
9667
      if (!CALL_P (insn)
9668
          || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
9669
        continue;
9670
 
9671
      next = next_active_insn (insn);
9672
 
9673
      if (next)
9674
        {
9675
          rtx pat = PATTERN (next);
9676
 
9677
          if (GET_CODE (pat) == SET
9678
              && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9679
              && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9680
            emit_insn_after (gen_unop (), insn);
9681
        }
9682
    }
9683
}
9684
 
9685
/* Machine dependent reorg pass.  */
9686
 
9687
static void
9688
alpha_reorg (void)
9689
{
9690
  /* Workaround for a linker error that triggers when an
9691
     exception handler immediatelly follows a noreturn function.
9692
 
9693
     The instruction stream from an object file:
9694
 
9695
  54:   00 40 5b 6b     jsr     ra,(t12),58 <__func+0x58>
9696
  58:   00 00 ba 27     ldah    gp,0(ra)
9697
  5c:   00 00 bd 23     lda     gp,0(gp)
9698
  60:   00 00 7d a7     ldq     t12,0(gp)
9699
  64:   00 40 5b 6b     jsr     ra,(t12),68 <__func+0x68>
9700
 
9701
     was converted in the final link pass to:
9702
 
9703
   fdb24:       a0 03 40 d3     bsr     ra,fe9a8 <_called_func+0x8>
9704
   fdb28:       00 00 fe 2f     unop
9705
   fdb2c:       00 00 fe 2f     unop
9706
   fdb30:       30 82 7d a7     ldq     t12,-32208(gp)
9707
   fdb34:       00 40 5b 6b     jsr     ra,(t12),fdb38 <__func+0x68>
9708
 
9709
     GP load instructions were wrongly cleared by the linker relaxation
9710
     pass.  This workaround prevents removal of GP loads by inserting
9711
     an unop instruction between a noreturn function call and
9712
     exception handler prologue.  */
9713
 
9714
  if (current_function_has_exception_handlers ())
9715
    alpha_pad_noreturn ();
9716
 
9717
  if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9718
    alpha_handle_trap_shadows ();
9719
 
9720
  /* Due to the number of extra trapb insns, don't bother fixing up
9721
     alignment when trap precision is instruction.  Moreover, we can
9722
     only do our job when sched2 is run.  */
9723
  if (optimize && !optimize_size
9724
      && alpha_tp != ALPHA_TP_INSN
9725
      && flag_schedule_insns_after_reload)
9726
    {
9727
      if (alpha_tune == PROCESSOR_EV4)
9728
        alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9729
      else if (alpha_tune == PROCESSOR_EV5)
9730
        alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9731
    }
9732
}
9733
 
9734
#if !TARGET_ABI_UNICOSMK
9735
 
9736
#ifdef HAVE_STAMP_H
9737
#include <stamp.h>
9738
#endif
9739
 
9740
static void
9741
alpha_file_start (void)
9742
{
9743
#ifdef OBJECT_FORMAT_ELF
9744
  /* If emitting dwarf2 debug information, we cannot generate a .file
9745
     directive to start the file, as it will conflict with dwarf2out
9746
     file numbers.  So it's only useful when emitting mdebug output.  */
9747
  targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9748
#endif
9749
 
9750
  default_file_start ();
9751
#ifdef MS_STAMP
9752
  fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9753
#endif
9754
 
9755
  fputs ("\t.set noreorder\n", asm_out_file);
9756
  fputs ("\t.set volatile\n", asm_out_file);
9757
  if (!TARGET_ABI_OPEN_VMS)
9758
    fputs ("\t.set noat\n", asm_out_file);
9759
  if (TARGET_EXPLICIT_RELOCS)
9760
    fputs ("\t.set nomacro\n", asm_out_file);
9761
  if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9762
    {
9763
      const char *arch;
9764
 
9765
      if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9766
        arch = "ev6";
9767
      else if (TARGET_MAX)
9768
        arch = "pca56";
9769
      else if (TARGET_BWX)
9770
        arch = "ev56";
9771
      else if (alpha_cpu == PROCESSOR_EV5)
9772
        arch = "ev5";
9773
      else
9774
        arch = "ev4";
9775
 
9776
      fprintf (asm_out_file, "\t.arch %s\n", arch);
9777
    }
9778
}
9779
#endif
9780
 
9781
#ifdef OBJECT_FORMAT_ELF
9782
/* Since we don't have a .dynbss section, we should not allow global
9783
   relocations in the .rodata section.  */
9784
 
9785
static int
9786
alpha_elf_reloc_rw_mask (void)
9787
{
9788
  return flag_pic ? 3 : 2;
9789
}
9790
 
9791
/* Return a section for X.  The only special thing we do here is to
9792
   honor small data.  */
9793
 
9794
static section *
9795
alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9796
                              unsigned HOST_WIDE_INT align)
9797
{
9798
  if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9799
    /* ??? Consider using mergeable sdata sections.  */
9800
    return sdata_section;
9801
  else
9802
    return default_elf_select_rtx_section (mode, x, align);
9803
}
9804
 
9805
static unsigned int
9806
alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9807
{
9808
  unsigned int flags = 0;
9809
 
9810
  if (strcmp (name, ".sdata") == 0
9811
      || strncmp (name, ".sdata.", 7) == 0
9812
      || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9813
      || strcmp (name, ".sbss") == 0
9814
      || strncmp (name, ".sbss.", 6) == 0
9815
      || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9816
    flags = SECTION_SMALL;
9817
 
9818
  flags |= default_section_type_flags (decl, name, reloc);
9819
  return flags;
9820
}
9821
#endif /* OBJECT_FORMAT_ELF */
9822
 
9823
/* Structure to collect function names for final output in link section.  */
9824
/* Note that items marked with GTY can't be ifdef'ed out.  */
9825
 
9826
enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9827
enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9828
 
9829
struct GTY(()) alpha_links
9830
{
9831
  int num;
9832
  const char *target;
9833
  rtx linkage;
9834
  enum links_kind lkind;
9835
  enum reloc_kind rkind;
9836
};
9837
 
9838
struct GTY(()) alpha_funcs
9839
{
9840
  int num;
9841
  splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9842
    links;
9843
};
9844
 
9845
static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9846
  splay_tree alpha_links_tree;
9847
static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9848
  splay_tree alpha_funcs_tree;
9849
 
9850
static GTY(()) int alpha_funcs_num;
9851
 
9852
#if TARGET_ABI_OPEN_VMS
9853
 
9854
/* Return the VMS argument type corresponding to MODE.  */
9855
 
9856
enum avms_arg_type
9857
alpha_arg_type (enum machine_mode mode)
9858
{
9859
  switch (mode)
9860
    {
9861
    case SFmode:
9862
      return TARGET_FLOAT_VAX ? FF : FS;
9863
    case DFmode:
9864
      return TARGET_FLOAT_VAX ? FD : FT;
9865
    default:
9866
      return I64;
9867
    }
9868
}
9869
 
9870
/* Return an rtx for an integer representing the VMS Argument Information
9871
   register value.  */
9872
 
9873
rtx
9874
alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9875
{
9876
  unsigned HOST_WIDE_INT regval = cum.num_args;
9877
  int i;
9878
 
9879
  for (i = 0; i < 6; i++)
9880
    regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9881
 
9882
  return GEN_INT (regval);
9883
}
9884
 
9885
/* Register the need for a (fake) .linkage entry for calls to function NAME.
9886
   IS_LOCAL is 1 if this is for a definition, 0 if this is for a real call.
9887
   Return a SYMBOL_REF suited to the call instruction.  */
9888
 
9889
rtx
9890
alpha_need_linkage (const char *name, int is_local)
9891
{
9892
  splay_tree_node node;
9893
  struct alpha_links *al;
9894
  const char *target;
9895
  tree id;
9896
 
9897
  if (name[0] == '*')
9898
    name++;
9899
 
9900
  if (is_local)
9901
    {
9902
      struct alpha_funcs *cfaf;
9903
 
9904
      if (!alpha_funcs_tree)
9905
        alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9906
                                               splay_tree_compare_pointers);
9907
 
9908
      cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9909
 
9910
      cfaf->links = 0;
9911
      cfaf->num = ++alpha_funcs_num;
9912
 
9913
      splay_tree_insert (alpha_funcs_tree,
9914
                         (splay_tree_key) current_function_decl,
9915
                         (splay_tree_value) cfaf);
9916
    }
9917
 
9918
  if (alpha_links_tree)
9919
    {
9920
      /* Is this name already defined?  */
9921
 
9922
      node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9923
      if (node)
9924
        {
9925
          al = (struct alpha_links *) node->value;
9926
          if (is_local)
9927
            {
9928
              /* Defined here but external assumed.  */
9929
              if (al->lkind == KIND_EXTERN)
9930
                al->lkind = KIND_LOCAL;
9931
            }
9932
          else
9933
            {
9934
              /* Used here but unused assumed.  */
9935
              if (al->lkind == KIND_UNUSED)
9936
                al->lkind = KIND_LOCAL;
9937
            }
9938
          return al->linkage;
9939
        }
9940
    }
9941
  else
9942
    alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9943
 
9944
  al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9945
  name = ggc_strdup (name);
9946
 
9947
  /* Assume external if no definition.  */
9948
  al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9949
 
9950
  /* Ensure we have an IDENTIFIER so assemble_name can mark it used
9951
     and find the ultimate alias target like assemble_name.  */
9952
  id = get_identifier (name);
9953
  target = NULL;
9954
  while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9955
    {
9956
      id = TREE_CHAIN (id);
9957
      target = IDENTIFIER_POINTER (id);
9958
    }
9959
 
9960
  al->target = target ? target : name;
9961
  al->linkage = gen_rtx_SYMBOL_REF (Pmode, name);
9962
 
9963
  splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9964
                     (splay_tree_value) al);
9965
 
9966
  return al->linkage;
9967
}
9968
 
9969
/* Return a SYMBOL_REF representing the reference to the .linkage entry
9970
   of function FUNC built for calls made from CFUNDECL.  LFLAG is 1 if
9971
   this is the reference to the linkage pointer value, 0 if this is the
9972
   reference to the function entry value.  RFLAG is 1 if this a reduced
9973
   reference (code address only), 0 if this is a full reference.  */
9974
 
9975
rtx
9976
alpha_use_linkage (rtx func, tree cfundecl, int lflag, int rflag)
9977
{
9978
  splay_tree_node cfunnode;
9979
  struct alpha_funcs *cfaf;
9980
  struct alpha_links *al;
9981
  const char *name = XSTR (func, 0);
9982
 
9983
  cfaf = (struct alpha_funcs *) 0;
9984
  al = (struct alpha_links *) 0;
9985
 
9986
  cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9987
  cfaf = (struct alpha_funcs *) cfunnode->value;
9988
 
9989
  if (cfaf->links)
9990
    {
9991
      splay_tree_node lnode;
9992
 
9993
      /* Is this name already defined?  */
9994
 
9995
      lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9996
      if (lnode)
9997
        al = (struct alpha_links *) lnode->value;
9998
    }
9999
  else
10000
    cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
10001
 
10002
  if (!al)
10003
    {
10004
      size_t name_len;
10005
      size_t buflen;
10006
      char *linksym;
10007
      splay_tree_node node = 0;
10008
      struct alpha_links *anl;
10009
 
10010
      if (name[0] == '*')
10011
        name++;
10012
 
10013
      name_len = strlen (name);
10014
      linksym = (char *) alloca (name_len + 50);
10015
 
10016
      al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
10017
      al->num = cfaf->num;
10018
 
10019
      node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
10020
      if (node)
10021
        {
10022
          anl = (struct alpha_links *) node->value;
10023
          al->lkind = anl->lkind;
10024
          name = anl->target;
10025
        }
10026
 
10027
      sprintf (linksym, "$%d..%s..lk", cfaf->num, name);
10028
      buflen = strlen (linksym);
10029
 
10030
      al->linkage = gen_rtx_SYMBOL_REF
10031
        (Pmode, ggc_alloc_string (linksym, buflen + 1));
10032
 
10033
      splay_tree_insert (cfaf->links, (splay_tree_key) name,
10034
                         (splay_tree_value) al);
10035
    }
10036
 
10037
  if (rflag)
10038
    al->rkind = KIND_CODEADDR;
10039
  else
10040
    al->rkind = KIND_LINKAGE;
10041
 
10042
  if (lflag)
10043
    return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
10044
  else
10045
    return al->linkage;
10046
}
10047
 
10048
static int
10049
alpha_write_one_linkage (splay_tree_node node, void *data)
10050
{
10051
  const char *const name = (const char *) node->key;
10052
  struct alpha_links *link = (struct alpha_links *) node->value;
10053
  FILE *stream = (FILE *) data;
10054
 
10055
  fprintf (stream, "$%d..%s..lk:\n", link->num, name);
10056
  if (link->rkind == KIND_CODEADDR)
10057
    {
10058
      if (link->lkind == KIND_LOCAL)
10059
        {
10060
          /* Local and used */
10061
          fprintf (stream, "\t.quad %s..en\n", name);
10062
        }
10063
      else
10064
        {
10065
          /* External and used, request code address.  */
10066
          fprintf (stream, "\t.code_address %s\n", name);
10067
        }
10068
    }
10069
  else
10070
    {
10071
      if (link->lkind == KIND_LOCAL)
10072
        {
10073
          /* Local and used, build linkage pair.  */
10074
          fprintf (stream, "\t.quad %s..en\n", name);
10075
          fprintf (stream, "\t.quad %s\n", name);
10076
        }
10077
      else
10078
        {
10079
          /* External and used, request linkage pair.  */
10080
          fprintf (stream, "\t.linkage %s\n", name);
10081
        }
10082
    }
10083
 
10084
  return 0;
10085
}
10086
 
10087
static void
10088
alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
10089
{
10090
  splay_tree_node node;
10091
  struct alpha_funcs *func;
10092
 
10093
  fprintf (stream, "\t.link\n");
10094
  fprintf (stream, "\t.align 3\n");
10095
  in_section = NULL;
10096
 
10097
  node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
10098
  func = (struct alpha_funcs *) node->value;
10099
 
10100
  fputs ("\t.name ", stream);
10101
  assemble_name (stream, funname);
10102
  fputs ("..na\n", stream);
10103
  ASM_OUTPUT_LABEL (stream, funname);
10104
  fprintf (stream, "\t.pdesc ");
10105
  assemble_name (stream, funname);
10106
  fprintf (stream, "..en,%s\n",
10107
           alpha_procedure_type == PT_STACK ? "stack"
10108
           : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
10109
 
10110
  if (func->links)
10111
    {
10112
      splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
10113
      /* splay_tree_delete (func->links); */
10114
    }
10115
}
10116
 
10117
/* Switch to an arbitrary section NAME with attributes as specified
10118
   by FLAGS.  ALIGN specifies any known alignment requirements for
10119
   the section; 0 if the default should be used.  */
10120
 
10121
static void
10122
vms_asm_named_section (const char *name, unsigned int flags,
10123
                       tree decl ATTRIBUTE_UNUSED)
10124
{
10125
  fputc ('\n', asm_out_file);
10126
  fprintf (asm_out_file, ".section\t%s", name);
10127
 
10128
  if (flags & SECTION_DEBUG)
10129
    fprintf (asm_out_file, ",NOWRT");
10130
 
10131
  fputc ('\n', asm_out_file);
10132
}
10133
 
10134
/* Record an element in the table of global constructors.  SYMBOL is
10135
   a SYMBOL_REF of the function to be called; PRIORITY is a number
10136
   between 0 and MAX_INIT_PRIORITY.
10137
 
10138
   Differs from default_ctors_section_asm_out_constructor in that the
10139
   width of the .ctors entry is always 64 bits, rather than the 32 bits
10140
   used by a normal pointer.  */
10141
 
10142
static void
10143
vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10144
{
10145
  switch_to_section (ctors_section);
10146
  assemble_align (BITS_PER_WORD);
10147
  assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
10148
}
10149
 
10150
static void
10151
vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10152
{
10153
  switch_to_section (dtors_section);
10154
  assemble_align (BITS_PER_WORD);
10155
  assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
10156
}
10157
#else
10158
 
10159
rtx
10160
alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
10161
                    int is_local ATTRIBUTE_UNUSED)
10162
{
10163
  return NULL_RTX;
10164
}
10165
 
10166
rtx
10167
alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
10168
                   tree cfundecl ATTRIBUTE_UNUSED,
10169
                   int lflag ATTRIBUTE_UNUSED,
10170
                   int rflag ATTRIBUTE_UNUSED)
10171
{
10172
  return NULL_RTX;
10173
}
10174
 
10175
#endif /* TARGET_ABI_OPEN_VMS */
10176
 
10177
#if TARGET_ABI_UNICOSMK
10178
 
10179
/* This evaluates to true if we do not know how to pass TYPE solely in
10180
   registers.  This is the case for all arguments that do not fit in two
10181
   registers.  */
10182
 
10183
static bool
10184
unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
10185
{
10186
  if (type == NULL)
10187
    return false;
10188
 
10189
  if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10190
    return true;
10191
  if (TREE_ADDRESSABLE (type))
10192
    return true;
10193
 
10194
  return ALPHA_ARG_SIZE (mode, type, 0) > 2;
10195
}
10196
 
10197
/* Define the offset between two registers, one to be eliminated, and the
10198
   other its replacement, at the start of a routine.  */
10199
 
10200
int
10201
unicosmk_initial_elimination_offset (int from, int to)
10202
{
10203
  int fixed_size;
10204
 
10205
  fixed_size = alpha_sa_size();
10206
  if (fixed_size != 0)
10207
    fixed_size += 48;
10208
 
10209
  if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10210
    return -fixed_size;
10211
  else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10212
    return 0;
10213
  else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
10214
    return (ALPHA_ROUND (crtl->outgoing_args_size)
10215
            + ALPHA_ROUND (get_frame_size()));
10216
  else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
10217
    return (ALPHA_ROUND (fixed_size)
10218
            + ALPHA_ROUND (get_frame_size()
10219
                           + crtl->outgoing_args_size));
10220
  else
10221
    gcc_unreachable ();
10222
}
10223
 
10224
/* Output the module name for .ident and .end directives. We have to strip
10225
   directories and add make sure that the module name starts with a letter
10226
   or '$'.  */
10227
 
10228
static void
10229
unicosmk_output_module_name (FILE *file)
10230
{
10231
  const char *name = lbasename (main_input_filename);
10232
  unsigned len = strlen (name);
10233
  char *clean_name = alloca (len + 2);
10234
  char *ptr = clean_name;
10235
 
10236
  /* CAM only accepts module names that start with a letter or '$'. We
10237
     prefix the module name with a '$' if necessary.  */
10238
 
10239
  if (!ISALPHA (*name))
10240
    *ptr++ = '$';
10241
  memcpy (ptr, name, len + 1);
10242
  clean_symbol_name (clean_name);
10243
  fputs (clean_name, file);
10244
}
10245
 
10246
/* Output the definition of a common variable.  */
10247
 
10248
void
10249
unicosmk_output_common (FILE *file, const char *name, int size, int align)
10250
{
10251
  tree name_tree;
10252
  printf ("T3E__: common %s\n", name);
10253
 
10254
  in_section = NULL;
10255
  fputs("\t.endp\n\n\t.psect ", file);
10256
  assemble_name(file, name);
10257
  fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
10258
  fprintf(file, "\t.byte\t0:%d\n", size);
10259
 
10260
  /* Mark the symbol as defined in this module.  */
10261
  name_tree = get_identifier (name);
10262
  TREE_ASM_WRITTEN (name_tree) = 1;
10263
}
10264
 
10265
#define SECTION_PUBLIC SECTION_MACH_DEP
10266
#define SECTION_MAIN (SECTION_PUBLIC << 1)
10267
static int current_section_align;
10268
 
10269
/* A get_unnamed_section callback for switching to the text section.  */
10270
 
10271
static void
10272
unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10273
{
10274
  static int count = 0;
10275
  fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
10276
}
10277
 
10278
/* A get_unnamed_section callback for switching to the data section.  */
10279
 
10280
static void
10281
unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10282
{
10283
  static int count = 1;
10284
  fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
10285
}
10286
 
10287
/* Implement TARGET_ASM_INIT_SECTIONS.
10288
 
10289
   The Cray assembler is really weird with respect to sections. It has only
10290
   named sections and you can't reopen a section once it has been closed.
10291
   This means that we have to generate unique names whenever we want to
10292
   reenter the text or the data section.  */
10293
 
10294
static void
10295
unicosmk_init_sections (void)
10296
{
10297
  text_section = get_unnamed_section (SECTION_CODE,
10298
                                      unicosmk_output_text_section_asm_op,
10299
                                      NULL);
10300
  data_section = get_unnamed_section (SECTION_WRITE,
10301
                                      unicosmk_output_data_section_asm_op,
10302
                                      NULL);
10303
  readonly_data_section = data_section;
10304
}
10305
 
10306
static unsigned int
10307
unicosmk_section_type_flags (tree decl, const char *name,
10308
                             int reloc ATTRIBUTE_UNUSED)
10309
{
10310
  unsigned int flags = default_section_type_flags (decl, name, reloc);
10311
 
10312
  if (!decl)
10313
    return flags;
10314
 
10315
  if (TREE_CODE (decl) == FUNCTION_DECL)
10316
    {
10317
      current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10318
      if (align_functions_log > current_section_align)
10319
        current_section_align = align_functions_log;
10320
 
10321
      if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
10322
        flags |= SECTION_MAIN;
10323
    }
10324
  else
10325
    current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
10326
 
10327
  if (TREE_PUBLIC (decl))
10328
    flags |= SECTION_PUBLIC;
10329
 
10330
  return flags;
10331
}
10332
 
10333
/* Generate a section name for decl and associate it with the
10334
   declaration.  */
10335
 
10336
static void
10337
unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
10338
{
10339
  const char *name;
10340
  int len;
10341
 
10342
  gcc_assert (decl);
10343
 
10344
  name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10345
  name = default_strip_name_encoding (name);
10346
  len = strlen (name);
10347
 
10348
  if (TREE_CODE (decl) == FUNCTION_DECL)
10349
    {
10350
      char *string;
10351
 
10352
      /* It is essential that we prefix the section name here because
10353
         otherwise the section names generated for constructors and
10354
         destructors confuse collect2.  */
10355
 
10356
      string = alloca (len + 6);
10357
      sprintf (string, "code@%s", name);
10358
      DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10359
    }
10360
  else if (TREE_PUBLIC (decl))
10361
    DECL_SECTION_NAME (decl) = build_string (len, name);
10362
  else
10363
    {
10364
      char *string;
10365
 
10366
      string = alloca (len + 6);
10367
      sprintf (string, "data@%s", name);
10368
      DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10369
    }
10370
}
10371
 
10372
/* Switch to an arbitrary section NAME with attributes as specified
10373
   by FLAGS.  ALIGN specifies any known alignment requirements for
10374
   the section; 0 if the default should be used.  */
10375
 
10376
static void
10377
unicosmk_asm_named_section (const char *name, unsigned int flags,
10378
                            tree decl ATTRIBUTE_UNUSED)
10379
{
10380
  const char *kind;
10381
 
10382
  /* Close the previous section.  */
10383
 
10384
  fputs ("\t.endp\n\n", asm_out_file);
10385
 
10386
  /* Find out what kind of section we are opening.  */
10387
 
10388
  if (flags & SECTION_MAIN)
10389
    fputs ("\t.start\tmain\n", asm_out_file);
10390
 
10391
  if (flags & SECTION_CODE)
10392
    kind = "code";
10393
  else if (flags & SECTION_PUBLIC)
10394
    kind = "common";
10395
  else
10396
    kind = "data";
10397
 
10398
  if (current_section_align != 0)
10399
    fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10400
             current_section_align, kind);
10401
  else
10402
    fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10403
}
10404
 
10405
static void
10406
unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10407
{
10408
  if (DECL_P (decl)
10409
      && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10410
    unicosmk_unique_section (decl, 0);
10411
}
10412
 
10413
/* Output an alignment directive. We have to use the macro 'gcc@code@align'
10414
   in code sections because .align fill unused space with zeroes.  */
10415
 
10416
void
10417
unicosmk_output_align (FILE *file, int align)
10418
{
10419
  if (inside_function)
10420
    fprintf (file, "\tgcc@code@align\t%d\n", align);
10421
  else
10422
    fprintf (file, "\t.align\t%d\n", align);
10423
}
10424
 
10425
/* Add a case vector to the current function's list of deferred case
10426
   vectors. Case vectors have to be put into a separate section because CAM
10427
   does not allow data definitions in code sections.  */
10428
 
10429
void
10430
unicosmk_defer_case_vector (rtx lab, rtx vec)
10431
{
10432
  struct machine_function *machine = cfun->machine;
10433
 
10434
  vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10435
  machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10436
                                          machine->addr_list);
10437
}
10438
 
10439
/* Output a case vector.  */
10440
 
10441
static void
10442
unicosmk_output_addr_vec (FILE *file, rtx vec)
10443
{
10444
  rtx lab  = XEXP (vec, 0);
10445
  rtx body = XEXP (vec, 1);
10446
  int vlen = XVECLEN (body, 0);
10447
  int idx;
10448
 
10449
  (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10450
 
10451
  for (idx = 0; idx < vlen; idx++)
10452
    {
10453
      ASM_OUTPUT_ADDR_VEC_ELT
10454
        (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10455
    }
10456
}
10457
 
10458
/* Output current function's deferred case vectors.  */
10459
 
10460
static void
10461
unicosmk_output_deferred_case_vectors (FILE *file)
10462
{
10463
  struct machine_function *machine = cfun->machine;
10464
  rtx t;
10465
 
10466
  if (machine->addr_list == NULL_RTX)
10467
    return;
10468
 
10469
  switch_to_section (data_section);
10470
  for (t = machine->addr_list; t; t = XEXP (t, 1))
10471
    unicosmk_output_addr_vec (file, XEXP (t, 0));
10472
}
10473
 
10474
/* Generate the name of the SSIB section for the current function.  */
10475
 
10476
#define SSIB_PREFIX "__SSIB_"
10477
#define SSIB_PREFIX_LEN 7
10478
 
10479
static const char *
10480
unicosmk_ssib_name (void)
10481
{
10482
  /* This is ok since CAM won't be able to deal with names longer than that
10483
     anyway.  */
10484
 
10485
  static char name[256];
10486
 
10487
  rtx x;
10488
  const char *fnname;
10489
  int len;
10490
 
10491
  x = DECL_RTL (cfun->decl);
10492
  gcc_assert (MEM_P (x));
10493
  x = XEXP (x, 0);
10494
  gcc_assert (GET_CODE (x) == SYMBOL_REF);
10495
  fnname = XSTR (x, 0);
10496
 
10497
  len = strlen (fnname);
10498
  if (len + SSIB_PREFIX_LEN > 255)
10499
    len = 255 - SSIB_PREFIX_LEN;
10500
 
10501
  strcpy (name, SSIB_PREFIX);
10502
  strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10503
  name[len + SSIB_PREFIX_LEN] = 0;
10504
 
10505
  return name;
10506
}
10507
 
10508
/* Set up the dynamic subprogram information block (DSIB) and update the
10509
   frame pointer register ($15) for subroutines which have a frame. If the
10510
   subroutine doesn't have a frame, simply increment $15.  */
10511
 
10512
static void
10513
unicosmk_gen_dsib (unsigned long *imaskP)
10514
{
10515
  if (alpha_procedure_type == PT_STACK)
10516
    {
10517
      const char *ssib_name;
10518
      rtx mem;
10519
 
10520
      /* Allocate 64 bytes for the DSIB.  */
10521
 
10522
      FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10523
                                  GEN_INT (-64))));
10524
      emit_insn (gen_blockage ());
10525
 
10526
      /* Save the return address.  */
10527
 
10528
      mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10529
      set_mem_alias_set (mem, alpha_sr_alias_set);
10530
      FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10531
      (*imaskP) &= ~(1UL << REG_RA);
10532
 
10533
      /* Save the old frame pointer.  */
10534
 
10535
      mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10536
      set_mem_alias_set (mem, alpha_sr_alias_set);
10537
      FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10538
      (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10539
 
10540
      emit_insn (gen_blockage ());
10541
 
10542
      /* Store the SSIB pointer.  */
10543
 
10544
      ssib_name = ggc_strdup (unicosmk_ssib_name ());
10545
      mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10546
      set_mem_alias_set (mem, alpha_sr_alias_set);
10547
 
10548
      FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10549
                           gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10550
      FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10551
 
10552
      /* Save the CIW index.  */
10553
 
10554
      mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10555
      set_mem_alias_set (mem, alpha_sr_alias_set);
10556
      FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10557
 
10558
      emit_insn (gen_blockage ());
10559
 
10560
      /* Set the new frame pointer.  */
10561
      FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10562
                                  stack_pointer_rtx, GEN_INT (64))));
10563
    }
10564
  else
10565
    {
10566
      /* Increment the frame pointer register to indicate that we do not
10567
         have a frame.  */
10568
      emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10569
                             hard_frame_pointer_rtx, const1_rtx));
10570
    }
10571
}
10572
 
10573
/* Output the static subroutine information block for the current
10574
   function.  */
10575
 
10576
static void
10577
unicosmk_output_ssib (FILE *file, const char *fnname)
10578
{
10579
  int len;
10580
  int i;
10581
  rtx x;
10582
  rtx ciw;
10583
  struct machine_function *machine = cfun->machine;
10584
 
10585
  in_section = NULL;
10586
  fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10587
           unicosmk_ssib_name ());
10588
 
10589
  /* Some required stuff and the function name length.  */
10590
 
10591
  len = strlen (fnname);
10592
  fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10593
 
10594
  /* Saved registers
10595
     ??? We don't do that yet.  */
10596
 
10597
  fputs ("\t.quad\t0\n", file);
10598
 
10599
  /* Function address.  */
10600
 
10601
  fputs ("\t.quad\t", file);
10602
  assemble_name (file, fnname);
10603
  putc ('\n', file);
10604
 
10605
  fputs ("\t.quad\t0\n", file);
10606
  fputs ("\t.quad\t0\n", file);
10607
 
10608
  /* Function name.
10609
     ??? We do it the same way Cray CC does it but this could be
10610
     simplified.  */
10611
 
10612
  for( i = 0; i < len; i++ )
10613
    fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10614
  if( (len % 8) == 0 )
10615
    fputs ("\t.quad\t0\n", file);
10616
  else
10617
    fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10618
 
10619
  /* All call information words used in the function.  */
10620
 
10621
  for (x = machine->first_ciw; x; x = XEXP (x, 1))
10622
    {
10623
      ciw = XEXP (x, 0);
10624
#if HOST_BITS_PER_WIDE_INT == 32
10625
      fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10626
               CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10627
#else
10628
      fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10629
#endif
10630
    }
10631
}
10632
 
10633
/* Add a call information word (CIW) to the list of the current function's
10634
   CIWs and return its index.
10635
 
10636
   X is a CONST_INT or CONST_DOUBLE representing the CIW.  */
10637
 
10638
rtx
10639
unicosmk_add_call_info_word (rtx x)
10640
{
10641
  rtx node;
10642
  struct machine_function *machine = cfun->machine;
10643
 
10644
  node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10645
  if (machine->first_ciw == NULL_RTX)
10646
    machine->first_ciw = node;
10647
  else
10648
    XEXP (machine->last_ciw, 1) = node;
10649
 
10650
  machine->last_ciw = node;
10651
  ++machine->ciw_count;
10652
 
10653
  return GEN_INT (machine->ciw_count
10654
                  + strlen (current_function_name ())/8 + 5);
10655
}
10656
 
10657
/* The Cray assembler doesn't accept extern declarations for symbols which
10658
   are defined in the same file. We have to keep track of all global
10659
   symbols which are referenced and/or defined in a source file and output
10660
   extern declarations for those which are referenced but not defined at
10661
   the end of file.  */
10662
 
10663
/* List of identifiers for which an extern declaration might have to be
10664
   emitted.  */
10665
/* FIXME: needs to use GC, so it can be saved and restored for PCH.  */
10666
 
10667
struct unicosmk_extern_list
10668
{
10669
  struct unicosmk_extern_list *next;
10670
  const char *name;
10671
};
10672
 
10673
static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10674
 
10675
/* Output extern declarations which are required for every asm file.  */
10676
 
10677
static void
10678
unicosmk_output_default_externs (FILE *file)
10679
{
10680
  static const char *const externs[] =
10681
    { "__T3E_MISMATCH" };
10682
 
10683
  int i;
10684
  int n;
10685
 
10686
  n = ARRAY_SIZE (externs);
10687
 
10688
  for (i = 0; i < n; i++)
10689
    fprintf (file, "\t.extern\t%s\n", externs[i]);
10690
}
10691
 
10692
/* Output extern declarations for global symbols which are have been
10693
   referenced but not defined.  */
10694
 
10695
static void
10696
unicosmk_output_externs (FILE *file)
10697
{
10698
  struct unicosmk_extern_list *p;
10699
  const char *real_name;
10700
  int len;
10701
  tree name_tree;
10702
 
10703
  len = strlen (user_label_prefix);
10704
  for (p = unicosmk_extern_head; p != 0; p = p->next)
10705
    {
10706
      /* We have to strip the encoding and possibly remove user_label_prefix
10707
         from the identifier in order to handle -fleading-underscore and
10708
         explicit asm names correctly (cf. gcc.dg/asm-names-1.c).  */
10709
      real_name = default_strip_name_encoding (p->name);
10710
      if (len && p->name[0] == '*'
10711
          && !memcmp (real_name, user_label_prefix, len))
10712
        real_name += len;
10713
 
10714
      name_tree = get_identifier (real_name);
10715
      if (! TREE_ASM_WRITTEN (name_tree))
10716
        {
10717
          TREE_ASM_WRITTEN (name_tree) = 1;
10718
          fputs ("\t.extern\t", file);
10719
          assemble_name (file, p->name);
10720
          putc ('\n', file);
10721
        }
10722
    }
10723
}
10724
 
10725
/* Record an extern.  */
10726
 
10727
void
10728
unicosmk_add_extern (const char *name)
10729
{
10730
  struct unicosmk_extern_list *p;
10731
 
10732
  p = (struct unicosmk_extern_list *)
10733
       xmalloc (sizeof (struct unicosmk_extern_list));
10734
  p->next = unicosmk_extern_head;
10735
  p->name = name;
10736
  unicosmk_extern_head = p;
10737
}
10738
 
10739
/* The Cray assembler generates incorrect code if identifiers which
10740
   conflict with register names are used as instruction operands. We have
10741
   to replace such identifiers with DEX expressions.  */
10742
 
10743
/* Structure to collect identifiers which have been replaced by DEX
10744
   expressions.  */
10745
/* FIXME: needs to use GC, so it can be saved and restored for PCH.  */
10746
 
10747
struct unicosmk_dex {
10748
  struct unicosmk_dex *next;
10749
  const char *name;
10750
};
10751
 
10752
/* List of identifiers which have been replaced by DEX expressions. The DEX
10753
   number is determined by the position in the list.  */
10754
 
10755
static struct unicosmk_dex *unicosmk_dex_list = NULL;
10756
 
10757
/* The number of elements in the DEX list.  */
10758
 
10759
static int unicosmk_dex_count = 0;
10760
 
10761
/* Check if NAME must be replaced by a DEX expression.  */
10762
 
10763
static int
10764
unicosmk_special_name (const char *name)
10765
{
10766
  if (name[0] == '*')
10767
    ++name;
10768
 
10769
  if (name[0] == '$')
10770
    ++name;
10771
 
10772
  if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10773
    return 0;
10774
 
10775
  switch (name[1])
10776
    {
10777
    case '1':  case '2':
10778
      return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10779
 
10780
    case '3':
10781
      return (name[2] == '\0'
10782
               || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10783
 
10784
    default:
10785
      return (ISDIGIT (name[1]) && name[2] == '\0');
10786
    }
10787
}
10788
 
10789
/* Return the DEX number if X must be replaced by a DEX expression and 0
10790
   otherwise.  */
10791
 
10792
static int
10793
unicosmk_need_dex (rtx x)
10794
{
10795
  struct unicosmk_dex *dex;
10796
  const char *name;
10797
  int i;
10798
 
10799
  if (GET_CODE (x) != SYMBOL_REF)
10800
    return 0;
10801
 
10802
  name = XSTR (x,0);
10803
  if (! unicosmk_special_name (name))
10804
    return 0;
10805
 
10806
  i = unicosmk_dex_count;
10807
  for (dex = unicosmk_dex_list; dex; dex = dex->next)
10808
    {
10809
      if (! strcmp (name, dex->name))
10810
        return i;
10811
      --i;
10812
    }
10813
 
10814
  dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10815
  dex->name = name;
10816
  dex->next = unicosmk_dex_list;
10817
  unicosmk_dex_list = dex;
10818
 
10819
  ++unicosmk_dex_count;
10820
  return unicosmk_dex_count;
10821
}
10822
 
10823
/* Output the DEX definitions for this file.  */
10824
 
10825
static void
10826
unicosmk_output_dex (FILE *file)
10827
{
10828
  struct unicosmk_dex *dex;
10829
  int i;
10830
 
10831
  if (unicosmk_dex_list == NULL)
10832
    return;
10833
 
10834
  fprintf (file, "\t.dexstart\n");
10835
 
10836
  i = unicosmk_dex_count;
10837
  for (dex = unicosmk_dex_list; dex; dex = dex->next)
10838
    {
10839
      fprintf (file, "\tDEX (%d) = ", i);
10840
      assemble_name (file, dex->name);
10841
      putc ('\n', file);
10842
      --i;
10843
    }
10844
 
10845
  fprintf (file, "\t.dexend\n");
10846
}
10847
 
10848
/* Output text that to appear at the beginning of an assembler file.  */
10849
 
10850
static void
10851
unicosmk_file_start (void)
10852
{
10853
  int i;
10854
 
10855
  fputs ("\t.ident\t", asm_out_file);
10856
  unicosmk_output_module_name (asm_out_file);
10857
  fputs ("\n\n", asm_out_file);
10858
 
10859
  /* The Unicos/Mk assembler uses different register names. Instead of trying
10860
     to support them, we simply use micro definitions.  */
10861
 
10862
  /* CAM has different register names: rN for the integer register N and fN
10863
     for the floating-point register N. Instead of trying to use these in
10864
     alpha.md, we define the symbols $N and $fN to refer to the appropriate
10865
     register.  */
10866
 
10867
  for (i = 0; i < 32; ++i)
10868
    fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10869
 
10870
  for (i = 0; i < 32; ++i)
10871
    fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10872
 
10873
  putc ('\n', asm_out_file);
10874
 
10875
  /* The .align directive fill unused space with zeroes which does not work
10876
     in code sections. We define the macro 'gcc@code@align' which uses nops
10877
     instead. Note that it assumes that code sections always have the
10878
     biggest possible alignment since . refers to the current offset from
10879
     the beginning of the section.  */
10880
 
10881
  fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10882
  fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10883
  fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10884
  fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10885
  fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10886
  fputs ("\tbis r31,r31,r31\n", asm_out_file);
10887
  fputs ("\t.endr\n", asm_out_file);
10888
  fputs ("\t.endif\n", asm_out_file);
10889
  fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10890
 
10891
  /* Output extern declarations which should always be visible.  */
10892
  unicosmk_output_default_externs (asm_out_file);
10893
 
10894
  /* Open a dummy section. We always need to be inside a section for the
10895
     section-switching code to work correctly.
10896
     ??? This should be a module id or something like that. I still have to
10897
     figure out what the rules for those are.  */
10898
  fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10899
}
10900
 
10901
/* Output text to appear at the end of an assembler file. This includes all
10902
   pending extern declarations and DEX expressions.  */
10903
 
10904
static void
10905
unicosmk_file_end (void)
10906
{
10907
  fputs ("\t.endp\n\n", asm_out_file);
10908
 
10909
  /* Output all pending externs.  */
10910
 
10911
  unicosmk_output_externs (asm_out_file);
10912
 
10913
  /* Output dex definitions used for functions whose names conflict with
10914
     register names.  */
10915
 
10916
  unicosmk_output_dex (asm_out_file);
10917
 
10918
  fputs ("\t.end\t", asm_out_file);
10919
  unicosmk_output_module_name (asm_out_file);
10920
  putc ('\n', asm_out_file);
10921
}
10922
 
10923
#else
10924
 
10925
static void
10926
unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10927
{}
10928
 
10929
static void
10930
unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10931
{}
10932
 
10933
static void
10934
unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10935
                      const char * fnname ATTRIBUTE_UNUSED)
10936
{}
10937
 
10938
rtx
10939
unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10940
{
10941
  return NULL_RTX;
10942
}
10943
 
10944
static int
10945
unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10946
{
10947
  return 0;
10948
}
10949
 
10950
#endif /* TARGET_ABI_UNICOSMK */
10951
 
10952
static void
10953
alpha_init_libfuncs (void)
10954
{
10955
  if (TARGET_ABI_UNICOSMK)
10956
    {
10957
      /* Prevent gcc from generating calls to __divsi3.  */
10958
      set_optab_libfunc (sdiv_optab, SImode, 0);
10959
      set_optab_libfunc (udiv_optab, SImode, 0);
10960
 
10961
      /* Use the functions provided by the system library
10962
         for DImode integer division.  */
10963
      set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10964
      set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10965
    }
10966
  else if (TARGET_ABI_OPEN_VMS)
10967
    {
10968
      /* Use the VMS runtime library functions for division and
10969
         remainder.  */
10970
      set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10971
      set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10972
      set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10973
      set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10974
      set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10975
      set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10976
      set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10977
      set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10978
      abort_libfunc = init_one_libfunc ("decc$abort");
10979
      memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10980
#ifdef MEM_LIBFUNCS_INIT
10981
      MEM_LIBFUNCS_INIT;
10982
#endif
10983
    }
10984
}
10985
 
10986
 
10987
/* Initialize the GCC target structure.  */
10988
#if TARGET_ABI_OPEN_VMS
10989
# undef TARGET_ATTRIBUTE_TABLE
10990
# define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10991
# undef TARGET_CAN_ELIMINATE
10992
# define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
10993
#endif
10994
 
10995
#undef TARGET_IN_SMALL_DATA_P
10996
#define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10997
 
10998
#if TARGET_ABI_UNICOSMK
10999
# undef TARGET_INSERT_ATTRIBUTES
11000
# define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
11001
# undef TARGET_SECTION_TYPE_FLAGS
11002
# define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
11003
# undef TARGET_ASM_UNIQUE_SECTION
11004
# define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
11005
#undef TARGET_ASM_FUNCTION_RODATA_SECTION
11006
#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
11007
# undef TARGET_ASM_GLOBALIZE_LABEL
11008
# define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
11009
# undef TARGET_MUST_PASS_IN_STACK
11010
# define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
11011
#endif
11012
 
11013
#undef TARGET_ASM_ALIGNED_HI_OP
11014
#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
11015
#undef TARGET_ASM_ALIGNED_DI_OP
11016
#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
11017
 
11018
/* Default unaligned ops are provided for ELF systems.  To get unaligned
11019
   data for non-ELF systems, we have to turn off auto alignment.  */
11020
#if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
11021
#undef TARGET_ASM_UNALIGNED_HI_OP
11022
#define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
11023
#undef TARGET_ASM_UNALIGNED_SI_OP
11024
#define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
11025
#undef TARGET_ASM_UNALIGNED_DI_OP
11026
#define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
11027
#endif
11028
 
11029
#ifdef OBJECT_FORMAT_ELF
11030
#undef  TARGET_ASM_RELOC_RW_MASK
11031
#define TARGET_ASM_RELOC_RW_MASK  alpha_elf_reloc_rw_mask
11032
#undef  TARGET_ASM_SELECT_RTX_SECTION
11033
#define TARGET_ASM_SELECT_RTX_SECTION  alpha_elf_select_rtx_section
11034
#undef  TARGET_SECTION_TYPE_FLAGS
11035
#define TARGET_SECTION_TYPE_FLAGS  alpha_elf_section_type_flags
11036
#endif
11037
 
11038
#undef TARGET_ASM_FUNCTION_END_PROLOGUE
11039
#define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
11040
 
11041
#undef TARGET_INIT_LIBFUNCS
11042
#define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
11043
 
11044
#undef TARGET_LEGITIMIZE_ADDRESS
11045
#define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
11046
 
11047
#if TARGET_ABI_UNICOSMK
11048
#undef TARGET_ASM_FILE_START
11049
#define TARGET_ASM_FILE_START unicosmk_file_start
11050
#undef TARGET_ASM_FILE_END
11051
#define TARGET_ASM_FILE_END unicosmk_file_end
11052
#else
11053
#undef TARGET_ASM_FILE_START
11054
#define TARGET_ASM_FILE_START alpha_file_start
11055
#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
11056
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
11057
#endif
11058
 
11059
#undef TARGET_SCHED_ADJUST_COST
11060
#define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
11061
#undef TARGET_SCHED_ISSUE_RATE
11062
#define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
11063
#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
11064
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
11065
  alpha_multipass_dfa_lookahead
11066
 
11067
#undef TARGET_HAVE_TLS
11068
#define TARGET_HAVE_TLS HAVE_AS_TLS
11069
 
11070
#undef  TARGET_INIT_BUILTINS
11071
#define TARGET_INIT_BUILTINS alpha_init_builtins
11072
#undef  TARGET_EXPAND_BUILTIN
11073
#define TARGET_EXPAND_BUILTIN alpha_expand_builtin
11074
#undef  TARGET_FOLD_BUILTIN
11075
#define TARGET_FOLD_BUILTIN alpha_fold_builtin
11076
 
11077
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
11078
#define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
11079
#undef TARGET_CANNOT_COPY_INSN_P
11080
#define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
11081
#undef TARGET_CANNOT_FORCE_CONST_MEM
11082
#define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
11083
 
11084
#if TARGET_ABI_OSF
11085
#undef TARGET_ASM_OUTPUT_MI_THUNK
11086
#define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
11087
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
11088
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
11089
#undef TARGET_STDARG_OPTIMIZE_HOOK
11090
#define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
11091
#endif
11092
 
11093
#undef TARGET_RTX_COSTS
11094
#define TARGET_RTX_COSTS alpha_rtx_costs
11095
#undef TARGET_ADDRESS_COST
11096
#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
11097
 
11098
#undef TARGET_MACHINE_DEPENDENT_REORG
11099
#define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
11100
 
11101
#undef TARGET_PROMOTE_FUNCTION_MODE
11102
#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
11103
#undef TARGET_PROMOTE_PROTOTYPES
11104
#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
11105
#undef TARGET_RETURN_IN_MEMORY
11106
#define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
11107
#undef TARGET_PASS_BY_REFERENCE
11108
#define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
11109
#undef TARGET_SETUP_INCOMING_VARARGS
11110
#define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
11111
#undef TARGET_STRICT_ARGUMENT_NAMING
11112
#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
11113
#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
11114
#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
11115
#undef TARGET_SPLIT_COMPLEX_ARG
11116
#define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
11117
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
11118
#define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
11119
#undef TARGET_ARG_PARTIAL_BYTES
11120
#define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
11121
#undef TARGET_TRAMPOLINE_INIT
11122
#define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
11123
 
11124
#undef TARGET_SECONDARY_RELOAD
11125
#define TARGET_SECONDARY_RELOAD alpha_secondary_reload
11126
 
11127
#undef TARGET_SCALAR_MODE_SUPPORTED_P
11128
#define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
11129
#undef TARGET_VECTOR_MODE_SUPPORTED_P
11130
#define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
11131
 
11132
#undef TARGET_BUILD_BUILTIN_VA_LIST
11133
#define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
11134
 
11135
#undef TARGET_EXPAND_BUILTIN_VA_START
11136
#define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
11137
 
11138
/* The Alpha architecture does not require sequential consistency.  See
11139
   http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
11140
   for an example of how it can be violated in practice.  */
11141
#undef TARGET_RELAXED_ORDERING
11142
#define TARGET_RELAXED_ORDERING true
11143
 
11144
#undef TARGET_DEFAULT_TARGET_FLAGS
11145
#define TARGET_DEFAULT_TARGET_FLAGS \
11146
  (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
11147
#undef TARGET_HANDLE_OPTION
11148
#define TARGET_HANDLE_OPTION alpha_handle_option
11149
 
11150
#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11151
#undef TARGET_MANGLE_TYPE
11152
#define TARGET_MANGLE_TYPE alpha_mangle_type
11153
#endif
11154
 
11155
#undef TARGET_LEGITIMATE_ADDRESS_P
11156
#define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
11157
 
11158
struct gcc_target targetm = TARGET_INITIALIZER;
11159
 
11160
 
11161
#include "gt-alpha.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.