OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [config/] [alpha/] [alpha.c] - Blame information for rev 801

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 709 jeremybenn
/* Subroutines used for code generation on the DEC Alpha.
2
   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3
   2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4
   Free Software Foundation, Inc.
5
   Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6
 
7
This file is part of GCC.
8
 
9
GCC is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 3, or (at your option)
12
any later version.
13
 
14
GCC is distributed in the hope that it will be useful,
15
but WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17
GNU General Public License for more details.
18
 
19
You should have received a copy of the GNU General Public License
20
along with GCC; see the file COPYING3.  If not see
21
<http://www.gnu.org/licenses/>.  */
22
 
23
 
24
#include "config.h"
25
#include "system.h"
26
#include "coretypes.h"
27
#include "tm.h"
28
#include "rtl.h"
29
#include "tree.h"
30
#include "regs.h"
31
#include "hard-reg-set.h"
32
#include "insn-config.h"
33
#include "conditions.h"
34
#include "output.h"
35
#include "insn-attr.h"
36
#include "flags.h"
37
#include "recog.h"
38
#include "expr.h"
39
#include "optabs.h"
40
#include "reload.h"
41
#include "obstack.h"
42
#include "except.h"
43
#include "function.h"
44
#include "diagnostic-core.h"
45
#include "ggc.h"
46
#include "integrate.h"
47
#include "tm_p.h"
48
#include "target.h"
49
#include "target-def.h"
50
#include "common/common-target.h"
51
#include "debug.h"
52
#include "langhooks.h"
53
#include "splay-tree.h"
54
#include "cfglayout.h"
55
#include "gimple.h"
56
#include "tree-flow.h"
57
#include "tree-stdarg.h"
58
#include "tm-constrs.h"
59
#include "df.h"
60
#include "libfuncs.h"
61
#include "opts.h"
62
 
63
/* Specify which cpu to schedule for.  */
64
enum processor_type alpha_tune;
65
 
66
/* Which cpu we're generating code for.  */
67
enum processor_type alpha_cpu;
68
 
69
static const char * const alpha_cpu_name[] =
70
{
71
  "ev4", "ev5", "ev6"
72
};
73
 
74
/* Specify how accurate floating-point traps need to be.  */
75
 
76
enum alpha_trap_precision alpha_tp;
77
 
78
/* Specify the floating-point rounding mode.  */
79
 
80
enum alpha_fp_rounding_mode alpha_fprm;
81
 
82
/* Specify which things cause traps.  */
83
 
84
enum alpha_fp_trap_mode alpha_fptm;
85
 
86
/* Nonzero if inside of a function, because the Alpha asm can't
87
   handle .files inside of functions.  */
88
 
89
static int inside_function = FALSE;
90
 
91
/* The number of cycles of latency we should assume on memory reads.  */
92
 
93
int alpha_memory_latency = 3;
94
 
95
/* Whether the function needs the GP.  */
96
 
97
static int alpha_function_needs_gp;
98
 
99
/* The assembler name of the current function.  */
100
 
101
static const char *alpha_fnname;
102
 
103
/* The next explicit relocation sequence number.  */
104
extern GTY(()) int alpha_next_sequence_number;
105
int alpha_next_sequence_number = 1;
106
 
107
/* The literal and gpdisp sequence numbers for this insn, as printed
108
   by %# and %* respectively.  */
109
extern GTY(()) int alpha_this_literal_sequence_number;
110
extern GTY(()) int alpha_this_gpdisp_sequence_number;
111
int alpha_this_literal_sequence_number;
112
int alpha_this_gpdisp_sequence_number;
113
 
114
/* Costs of various operations on the different architectures.  */
115
 
116
struct alpha_rtx_cost_data
117
{
118
  unsigned char fp_add;
119
  unsigned char fp_mult;
120
  unsigned char fp_div_sf;
121
  unsigned char fp_div_df;
122
  unsigned char int_mult_si;
123
  unsigned char int_mult_di;
124
  unsigned char int_shift;
125
  unsigned char int_cmov;
126
  unsigned short int_div;
127
};
128
 
129
static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
130
{
131
  { /* EV4 */
132
    COSTS_N_INSNS (6),          /* fp_add */
133
    COSTS_N_INSNS (6),          /* fp_mult */
134
    COSTS_N_INSNS (34),         /* fp_div_sf */
135
    COSTS_N_INSNS (63),         /* fp_div_df */
136
    COSTS_N_INSNS (23),         /* int_mult_si */
137
    COSTS_N_INSNS (23),         /* int_mult_di */
138
    COSTS_N_INSNS (2),          /* int_shift */
139
    COSTS_N_INSNS (2),          /* int_cmov */
140
    COSTS_N_INSNS (97),         /* int_div */
141
  },
142
  { /* EV5 */
143
    COSTS_N_INSNS (4),          /* fp_add */
144
    COSTS_N_INSNS (4),          /* fp_mult */
145
    COSTS_N_INSNS (15),         /* fp_div_sf */
146
    COSTS_N_INSNS (22),         /* fp_div_df */
147
    COSTS_N_INSNS (8),          /* int_mult_si */
148
    COSTS_N_INSNS (12),         /* int_mult_di */
149
    COSTS_N_INSNS (1) + 1,      /* int_shift */
150
    COSTS_N_INSNS (1),          /* int_cmov */
151
    COSTS_N_INSNS (83),         /* int_div */
152
  },
153
  { /* EV6 */
154
    COSTS_N_INSNS (4),          /* fp_add */
155
    COSTS_N_INSNS (4),          /* fp_mult */
156
    COSTS_N_INSNS (12),         /* fp_div_sf */
157
    COSTS_N_INSNS (15),         /* fp_div_df */
158
    COSTS_N_INSNS (7),          /* int_mult_si */
159
    COSTS_N_INSNS (7),          /* int_mult_di */
160
    COSTS_N_INSNS (1),          /* int_shift */
161
    COSTS_N_INSNS (2),          /* int_cmov */
162
    COSTS_N_INSNS (86),         /* int_div */
163
  },
164
};
165
 
166
/* Similar but tuned for code size instead of execution latency.  The
167
   extra +N is fractional cost tuning based on latency.  It's used to
168
   encourage use of cheaper insns like shift, but only if there's just
169
   one of them.  */
170
 
171
static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
172
{
173
  COSTS_N_INSNS (1),            /* fp_add */
174
  COSTS_N_INSNS (1),            /* fp_mult */
175
  COSTS_N_INSNS (1),            /* fp_div_sf */
176
  COSTS_N_INSNS (1) + 1,        /* fp_div_df */
177
  COSTS_N_INSNS (1) + 1,        /* int_mult_si */
178
  COSTS_N_INSNS (1) + 2,        /* int_mult_di */
179
  COSTS_N_INSNS (1),            /* int_shift */
180
  COSTS_N_INSNS (1),            /* int_cmov */
181
  COSTS_N_INSNS (6),            /* int_div */
182
};
183
 
184
/* Get the number of args of a function in one of two ways.  */
185
#if TARGET_ABI_OPEN_VMS
186
#define NUM_ARGS crtl->args.info.num_args
187
#else
188
#define NUM_ARGS crtl->args.info
189
#endif
190
 
191
#define REG_PV 27
192
#define REG_RA 26
193
 
194
/* Declarations of static functions.  */
195
static struct machine_function *alpha_init_machine_status (void);
196
static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
197
 
198
#if TARGET_ABI_OPEN_VMS
199
static void alpha_write_linkage (FILE *, const char *);
200
static bool vms_valid_pointer_mode (enum machine_mode);
201
#else
202
#define vms_patch_builtins()  gcc_unreachable()
203
#endif
204
 
205
#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
206
/* Implement TARGET_MANGLE_TYPE.  */
207
 
208
static const char *
209
alpha_mangle_type (const_tree type)
210
{
211
  if (TYPE_MAIN_VARIANT (type) == long_double_type_node
212
      && TARGET_LONG_DOUBLE_128)
213
    return "g";
214
 
215
  /* For all other types, use normal C++ mangling.  */
216
  return NULL;
217
}
218
#endif
219
 
220
/* Parse target option strings.  */
221
 
222
static void
223
alpha_option_override (void)
224
{
225
  static const struct cpu_table {
226
    const char *const name;
227
    const enum processor_type processor;
228
    const int flags;
229
  } cpu_table[] = {
230
    { "ev4",    PROCESSOR_EV4, 0 },
231
    { "ev45",   PROCESSOR_EV4, 0 },
232
    { "21064",  PROCESSOR_EV4, 0 },
233
    { "ev5",    PROCESSOR_EV5, 0 },
234
    { "21164",  PROCESSOR_EV5, 0 },
235
    { "ev56",   PROCESSOR_EV5, MASK_BWX },
236
    { "21164a", PROCESSOR_EV5, MASK_BWX },
237
    { "pca56",  PROCESSOR_EV5, MASK_BWX|MASK_MAX },
238
    { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
239
    { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
240
    { "ev6",    PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
241
    { "21264",  PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
242
    { "ev67",   PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
243
    { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
244
  };
245
 
246
  int const ct_size = ARRAY_SIZE (cpu_table);
247
  int i;
248
 
249
#ifdef SUBTARGET_OVERRIDE_OPTIONS
250
  SUBTARGET_OVERRIDE_OPTIONS;
251
#endif
252
 
253
  /* Default to full IEEE compliance mode for Go language.  */
254
  if (strcmp (lang_hooks.name, "GNU Go") == 0
255
      && !(target_flags_explicit & MASK_IEEE))
256
    target_flags |= MASK_IEEE;
257
 
258
  alpha_fprm = ALPHA_FPRM_NORM;
259
  alpha_tp = ALPHA_TP_PROG;
260
  alpha_fptm = ALPHA_FPTM_N;
261
 
262
  if (TARGET_IEEE)
263
    {
264
      alpha_tp = ALPHA_TP_INSN;
265
      alpha_fptm = ALPHA_FPTM_SU;
266
    }
267
  if (TARGET_IEEE_WITH_INEXACT)
268
    {
269
      alpha_tp = ALPHA_TP_INSN;
270
      alpha_fptm = ALPHA_FPTM_SUI;
271
    }
272
 
273
  if (alpha_tp_string)
274
    {
275
      if (! strcmp (alpha_tp_string, "p"))
276
        alpha_tp = ALPHA_TP_PROG;
277
      else if (! strcmp (alpha_tp_string, "f"))
278
        alpha_tp = ALPHA_TP_FUNC;
279
      else if (! strcmp (alpha_tp_string, "i"))
280
        alpha_tp = ALPHA_TP_INSN;
281
      else
282
        error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
283
    }
284
 
285
  if (alpha_fprm_string)
286
    {
287
      if (! strcmp (alpha_fprm_string, "n"))
288
        alpha_fprm = ALPHA_FPRM_NORM;
289
      else if (! strcmp (alpha_fprm_string, "m"))
290
        alpha_fprm = ALPHA_FPRM_MINF;
291
      else if (! strcmp (alpha_fprm_string, "c"))
292
        alpha_fprm = ALPHA_FPRM_CHOP;
293
      else if (! strcmp (alpha_fprm_string,"d"))
294
        alpha_fprm = ALPHA_FPRM_DYN;
295
      else
296
        error ("bad value %qs for -mfp-rounding-mode switch",
297
               alpha_fprm_string);
298
    }
299
 
300
  if (alpha_fptm_string)
301
    {
302
      if (strcmp (alpha_fptm_string, "n") == 0)
303
        alpha_fptm = ALPHA_FPTM_N;
304
      else if (strcmp (alpha_fptm_string, "u") == 0)
305
        alpha_fptm = ALPHA_FPTM_U;
306
      else if (strcmp (alpha_fptm_string, "su") == 0)
307
        alpha_fptm = ALPHA_FPTM_SU;
308
      else if (strcmp (alpha_fptm_string, "sui") == 0)
309
        alpha_fptm = ALPHA_FPTM_SUI;
310
      else
311
        error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
312
    }
313
 
314
  if (alpha_cpu_string)
315
    {
316
      for (i = 0; i < ct_size; i++)
317
        if (! strcmp (alpha_cpu_string, cpu_table [i].name))
318
          {
319
            alpha_tune = alpha_cpu = cpu_table [i].processor;
320
            target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
321
            target_flags |= cpu_table [i].flags;
322
            break;
323
          }
324
      if (i == ct_size)
325
        error ("bad value %qs for -mcpu switch", alpha_cpu_string);
326
    }
327
 
328
  if (alpha_tune_string)
329
    {
330
      for (i = 0; i < ct_size; i++)
331
        if (! strcmp (alpha_tune_string, cpu_table [i].name))
332
          {
333
            alpha_tune = cpu_table [i].processor;
334
            break;
335
          }
336
      if (i == ct_size)
337
        error ("bad value %qs for -mtune switch", alpha_tune_string);
338
    }
339
 
340
  /* Do some sanity checks on the above options.  */
341
 
342
  if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
343
      && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
344
    {
345
      warning (0, "fp software completion requires -mtrap-precision=i");
346
      alpha_tp = ALPHA_TP_INSN;
347
    }
348
 
349
  if (alpha_cpu == PROCESSOR_EV6)
350
    {
351
      /* Except for EV6 pass 1 (not released), we always have precise
352
         arithmetic traps.  Which means we can do software completion
353
         without minding trap shadows.  */
354
      alpha_tp = ALPHA_TP_PROG;
355
    }
356
 
357
  if (TARGET_FLOAT_VAX)
358
    {
359
      if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
360
        {
361
          warning (0, "rounding mode not supported for VAX floats");
362
          alpha_fprm = ALPHA_FPRM_NORM;
363
        }
364
      if (alpha_fptm == ALPHA_FPTM_SUI)
365
        {
366
          warning (0, "trap mode not supported for VAX floats");
367
          alpha_fptm = ALPHA_FPTM_SU;
368
        }
369
      if (target_flags_explicit & MASK_LONG_DOUBLE_128)
370
        warning (0, "128-bit long double not supported for VAX floats");
371
      target_flags &= ~MASK_LONG_DOUBLE_128;
372
    }
373
 
374
  {
375
    char *end;
376
    int lat;
377
 
378
    if (!alpha_mlat_string)
379
      alpha_mlat_string = "L1";
380
 
381
    if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
382
        && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
383
      ;
384
    else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
385
             && ISDIGIT ((unsigned char)alpha_mlat_string[1])
386
             && alpha_mlat_string[2] == '\0')
387
      {
388
        static int const cache_latency[][4] =
389
        {
390
          { 3, 30, -1 },        /* ev4 -- Bcache is a guess */
391
          { 2, 12, 38 },        /* ev5 -- Bcache from PC164 LMbench numbers */
392
          { 3, 12, 30 },        /* ev6 -- Bcache from DS20 LMbench.  */
393
        };
394
 
395
        lat = alpha_mlat_string[1] - '0';
396
        if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
397
          {
398
            warning (0, "L%d cache latency unknown for %s",
399
                     lat, alpha_cpu_name[alpha_tune]);
400
            lat = 3;
401
          }
402
        else
403
          lat = cache_latency[alpha_tune][lat-1];
404
      }
405
    else if (! strcmp (alpha_mlat_string, "main"))
406
      {
407
        /* Most current memories have about 370ns latency.  This is
408
           a reasonable guess for a fast cpu.  */
409
        lat = 150;
410
      }
411
    else
412
      {
413
        warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
414
        lat = 3;
415
      }
416
 
417
    alpha_memory_latency = lat;
418
  }
419
 
420
  /* Default the definition of "small data" to 8 bytes.  */
421
  if (!global_options_set.x_g_switch_value)
422
    g_switch_value = 8;
423
 
424
  /* Infer TARGET_SMALL_DATA from -fpic/-fPIC.  */
425
  if (flag_pic == 1)
426
    target_flags |= MASK_SMALL_DATA;
427
  else if (flag_pic == 2)
428
    target_flags &= ~MASK_SMALL_DATA;
429
 
430
  /* Align labels and loops for optimal branching.  */
431
  /* ??? Kludge these by not doing anything if we don't optimize and also if
432
     we are writing ECOFF symbols to work around a bug in DEC's assembler.  */
433
  if (optimize > 0 && write_symbols != SDB_DEBUG)
434
    {
435
      if (align_loops <= 0)
436
        align_loops = 16;
437
      if (align_jumps <= 0)
438
        align_jumps = 16;
439
    }
440
  if (align_functions <= 0)
441
    align_functions = 16;
442
 
443
  /* Register variables and functions with the garbage collector.  */
444
 
445
  /* Set up function hooks.  */
446
  init_machine_status = alpha_init_machine_status;
447
 
448
  /* Tell the compiler when we're using VAX floating point.  */
449
  if (TARGET_FLOAT_VAX)
450
    {
451
      REAL_MODE_FORMAT (SFmode) = &vax_f_format;
452
      REAL_MODE_FORMAT (DFmode) = &vax_g_format;
453
      REAL_MODE_FORMAT (TFmode) = NULL;
454
    }
455
 
456
#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
457
  if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
458
    target_flags |= MASK_LONG_DOUBLE_128;
459
#endif
460
}
461
 
462
/* Returns 1 if VALUE is a mask that contains full bytes of zero or ones.  */
463
 
464
int
465
zap_mask (HOST_WIDE_INT value)
466
{
467
  int i;
468
 
469
  for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
470
       i++, value >>= 8)
471
    if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
472
      return 0;
473
 
474
  return 1;
475
}
476
 
477
/* Return true if OP is valid for a particular TLS relocation.
478
   We are already guaranteed that OP is a CONST.  */
479
 
480
int
481
tls_symbolic_operand_1 (rtx op, int size, int unspec)
482
{
483
  op = XEXP (op, 0);
484
 
485
  if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
486
    return 0;
487
  op = XVECEXP (op, 0, 0);
488
 
489
  if (GET_CODE (op) != SYMBOL_REF)
490
    return 0;
491
 
492
  switch (SYMBOL_REF_TLS_MODEL (op))
493
    {
494
    case TLS_MODEL_LOCAL_DYNAMIC:
495
      return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
496
    case TLS_MODEL_INITIAL_EXEC:
497
      return unspec == UNSPEC_TPREL && size == 64;
498
    case TLS_MODEL_LOCAL_EXEC:
499
      return unspec == UNSPEC_TPREL && size == alpha_tls_size;
500
    default:
501
      gcc_unreachable ();
502
    }
503
}
504
 
505
/* Used by aligned_memory_operand and unaligned_memory_operand to
506
   resolve what reload is going to do with OP if it's a register.  */
507
 
508
rtx
509
resolve_reload_operand (rtx op)
510
{
511
  if (reload_in_progress)
512
    {
513
      rtx tmp = op;
514
      if (GET_CODE (tmp) == SUBREG)
515
        tmp = SUBREG_REG (tmp);
516
      if (REG_P (tmp)
517
          && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
518
        {
519
          op = reg_equiv_memory_loc (REGNO (tmp));
520
          if (op == 0)
521
            return 0;
522
        }
523
    }
524
  return op;
525
}
526
 
527
/* The scalar modes supported differs from the default check-what-c-supports
528
   version in that sometimes TFmode is available even when long double
529
   indicates only DFmode.  */
530
 
531
static bool
532
alpha_scalar_mode_supported_p (enum machine_mode mode)
533
{
534
  switch (mode)
535
    {
536
    case QImode:
537
    case HImode:
538
    case SImode:
539
    case DImode:
540
    case TImode: /* via optabs.c */
541
      return true;
542
 
543
    case SFmode:
544
    case DFmode:
545
      return true;
546
 
547
    case TFmode:
548
      return TARGET_HAS_XFLOATING_LIBS;
549
 
550
    default:
551
      return false;
552
    }
553
}
554
 
555
/* Alpha implements a couple of integer vector mode operations when
556
   TARGET_MAX is enabled.  We do not check TARGET_MAX here, however,
557
   which allows the vectorizer to operate on e.g. move instructions,
558
   or when expand_vector_operations can do something useful.  */
559
 
560
static bool
561
alpha_vector_mode_supported_p (enum machine_mode mode)
562
{
563
  return mode == V8QImode || mode == V4HImode || mode == V2SImode;
564
}
565
 
566
/* Return 1 if this function can directly return via $26.  */
567
 
568
int
569
direct_return (void)
570
{
571
  return (TARGET_ABI_OSF
572
          && reload_completed
573
          && alpha_sa_size () == 0
574
          && get_frame_size () == 0
575
          && crtl->outgoing_args_size == 0
576
          && crtl->args.pretend_args_size == 0);
577
}
578
 
579
/* Return the TLS model to use for SYMBOL.  */
580
 
581
static enum tls_model
582
tls_symbolic_operand_type (rtx symbol)
583
{
584
  enum tls_model model;
585
 
586
  if (GET_CODE (symbol) != SYMBOL_REF)
587
    return TLS_MODEL_NONE;
588
  model = SYMBOL_REF_TLS_MODEL (symbol);
589
 
590
  /* Local-exec with a 64-bit size is the same code as initial-exec.  */
591
  if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
592
    model = TLS_MODEL_INITIAL_EXEC;
593
 
594
  return model;
595
}
596
 
597
/* Return true if the function DECL will share the same GP as any
598
   function in the current unit of translation.  */
599
 
600
static bool
601
decl_has_samegp (const_tree decl)
602
{
603
  /* Functions that are not local can be overridden, and thus may
604
     not share the same gp.  */
605
  if (!(*targetm.binds_local_p) (decl))
606
    return false;
607
 
608
  /* If -msmall-data is in effect, assume that there is only one GP
609
     for the module, and so any local symbol has this property.  We
610
     need explicit relocations to be able to enforce this for symbols
611
     not defined in this unit of translation, however.  */
612
  if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
613
    return true;
614
 
615
  /* Functions that are not external are defined in this UoT.  */
616
  /* ??? Irritatingly, static functions not yet emitted are still
617
     marked "external".  Apply this to non-static functions only.  */
618
  return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
619
}
620
 
621
/* Return true if EXP should be placed in the small data section.  */
622
 
623
static bool
624
alpha_in_small_data_p (const_tree exp)
625
{
626
  /* We want to merge strings, so we never consider them small data.  */
627
  if (TREE_CODE (exp) == STRING_CST)
628
    return false;
629
 
630
  /* Functions are never in the small data area.  Duh.  */
631
  if (TREE_CODE (exp) == FUNCTION_DECL)
632
    return false;
633
 
634
  if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
635
    {
636
      const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
637
      if (strcmp (section, ".sdata") == 0
638
          || strcmp (section, ".sbss") == 0)
639
        return true;
640
    }
641
  else
642
    {
643
      HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
644
 
645
      /* If this is an incomplete type with size 0, then we can't put it
646
         in sdata because it might be too big when completed.  */
647
      if (size > 0 && size <= g_switch_value)
648
        return true;
649
    }
650
 
651
  return false;
652
}
653
 
654
#if TARGET_ABI_OPEN_VMS
655
static bool
656
vms_valid_pointer_mode (enum machine_mode mode)
657
{
658
  return (mode == SImode || mode == DImode);
659
}
660
 
661
static bool
662
alpha_linkage_symbol_p (const char *symname)
663
{
664
  int symlen = strlen (symname);
665
 
666
  if (symlen > 4)
667
    return strcmp (&symname [symlen - 4], "..lk") == 0;
668
 
669
  return false;
670
}
671
 
672
#define LINKAGE_SYMBOL_REF_P(X) \
673
  ((GET_CODE (X) == SYMBOL_REF   \
674
    && alpha_linkage_symbol_p (XSTR (X, 0))) \
675
   || (GET_CODE (X) == CONST                 \
676
       && GET_CODE (XEXP (X, 0)) == PLUS     \
677
       && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
678
       && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
679
#endif
680
 
681
/* legitimate_address_p recognizes an RTL expression that is a valid
682
   memory address for an instruction.  The MODE argument is the
683
   machine mode for the MEM expression that wants to use this address.
684
 
685
   For Alpha, we have either a constant address or the sum of a
686
   register and a constant address, or just a register.  For DImode,
687
   any of those forms can be surrounded with an AND that clear the
688
   low-order three bits; this is an "unaligned" access.  */
689
 
690
static bool
691
alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
692
{
693
  /* If this is an ldq_u type address, discard the outer AND.  */
694
  if (mode == DImode
695
      && GET_CODE (x) == AND
696
      && CONST_INT_P (XEXP (x, 1))
697
      && INTVAL (XEXP (x, 1)) == -8)
698
    x = XEXP (x, 0);
699
 
700
  /* Discard non-paradoxical subregs.  */
701
  if (GET_CODE (x) == SUBREG
702
      && (GET_MODE_SIZE (GET_MODE (x))
703
          < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
704
    x = SUBREG_REG (x);
705
 
706
  /* Unadorned general registers are valid.  */
707
  if (REG_P (x)
708
      && (strict
709
          ? STRICT_REG_OK_FOR_BASE_P (x)
710
          : NONSTRICT_REG_OK_FOR_BASE_P (x)))
711
    return true;
712
 
713
  /* Constant addresses (i.e. +/- 32k) are valid.  */
714
  if (CONSTANT_ADDRESS_P (x))
715
    return true;
716
 
717
#if TARGET_ABI_OPEN_VMS
718
  if (LINKAGE_SYMBOL_REF_P (x))
719
    return true;
720
#endif
721
 
722
  /* Register plus a small constant offset is valid.  */
723
  if (GET_CODE (x) == PLUS)
724
    {
725
      rtx ofs = XEXP (x, 1);
726
      x = XEXP (x, 0);
727
 
728
      /* Discard non-paradoxical subregs.  */
729
      if (GET_CODE (x) == SUBREG
730
          && (GET_MODE_SIZE (GET_MODE (x))
731
              < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
732
        x = SUBREG_REG (x);
733
 
734
      if (REG_P (x))
735
        {
736
          if (! strict
737
              && NONSTRICT_REG_OK_FP_BASE_P (x)
738
              && CONST_INT_P (ofs))
739
            return true;
740
          if ((strict
741
               ? STRICT_REG_OK_FOR_BASE_P (x)
742
               : NONSTRICT_REG_OK_FOR_BASE_P (x))
743
              && CONSTANT_ADDRESS_P (ofs))
744
            return true;
745
        }
746
    }
747
 
748
  /* If we're managing explicit relocations, LO_SUM is valid, as are small
749
     data symbols.  Avoid explicit relocations of modes larger than word
750
     mode since i.e. $LC0+8($1) can fold around +/- 32k offset.  */
751
  else if (TARGET_EXPLICIT_RELOCS
752
           && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
753
    {
754
      if (small_symbolic_operand (x, Pmode))
755
        return true;
756
 
757
      if (GET_CODE (x) == LO_SUM)
758
        {
759
          rtx ofs = XEXP (x, 1);
760
          x = XEXP (x, 0);
761
 
762
          /* Discard non-paradoxical subregs.  */
763
          if (GET_CODE (x) == SUBREG
764
              && (GET_MODE_SIZE (GET_MODE (x))
765
                  < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
766
            x = SUBREG_REG (x);
767
 
768
          /* Must have a valid base register.  */
769
          if (! (REG_P (x)
770
                 && (strict
771
                     ? STRICT_REG_OK_FOR_BASE_P (x)
772
                     : NONSTRICT_REG_OK_FOR_BASE_P (x))))
773
            return false;
774
 
775
          /* The symbol must be local.  */
776
          if (local_symbolic_operand (ofs, Pmode)
777
              || dtp32_symbolic_operand (ofs, Pmode)
778
              || tp32_symbolic_operand (ofs, Pmode))
779
            return true;
780
        }
781
    }
782
 
783
  return false;
784
}
785
 
786
/* Build the SYMBOL_REF for __tls_get_addr.  */
787
 
788
static GTY(()) rtx tls_get_addr_libfunc;
789
 
790
static rtx
791
get_tls_get_addr (void)
792
{
793
  if (!tls_get_addr_libfunc)
794
    tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
795
  return tls_get_addr_libfunc;
796
}
797
 
798
/* Try machine-dependent ways of modifying an illegitimate address
799
   to be legitimate.  If we find one, return the new, valid address.  */
800
 
801
static rtx
802
alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
803
{
804
  HOST_WIDE_INT addend;
805
 
806
  /* If the address is (plus reg const_int) and the CONST_INT is not a
807
     valid offset, compute the high part of the constant and add it to
808
     the register.  Then our address is (plus temp low-part-const).  */
809
  if (GET_CODE (x) == PLUS
810
      && REG_P (XEXP (x, 0))
811
      && CONST_INT_P (XEXP (x, 1))
812
      && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
813
    {
814
      addend = INTVAL (XEXP (x, 1));
815
      x = XEXP (x, 0);
816
      goto split_addend;
817
    }
818
 
819
  /* If the address is (const (plus FOO const_int)), find the low-order
820
     part of the CONST_INT.  Then load FOO plus any high-order part of the
821
     CONST_INT into a register.  Our address is (plus reg low-part-const).
822
     This is done to reduce the number of GOT entries.  */
823
  if (can_create_pseudo_p ()
824
      && GET_CODE (x) == CONST
825
      && GET_CODE (XEXP (x, 0)) == PLUS
826
      && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
827
    {
828
      addend = INTVAL (XEXP (XEXP (x, 0), 1));
829
      x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
830
      goto split_addend;
831
    }
832
 
833
  /* If we have a (plus reg const), emit the load as in (2), then add
834
     the two registers, and finally generate (plus reg low-part-const) as
835
     our address.  */
836
  if (can_create_pseudo_p ()
837
      && GET_CODE (x) == PLUS
838
      && REG_P (XEXP (x, 0))
839
      && GET_CODE (XEXP (x, 1)) == CONST
840
      && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
841
      && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
842
    {
843
      addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
844
      x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
845
                               XEXP (XEXP (XEXP (x, 1), 0), 0),
846
                               NULL_RTX, 1, OPTAB_LIB_WIDEN);
847
      goto split_addend;
848
    }
849
 
850
  /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
851
     Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
852
     around +/- 32k offset.  */
853
  if (TARGET_EXPLICIT_RELOCS
854
      && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
855
      && symbolic_operand (x, Pmode))
856
    {
857
      rtx r0, r16, eqv, tga, tp, insn, dest, seq;
858
 
859
      switch (tls_symbolic_operand_type (x))
860
        {
861
        case TLS_MODEL_NONE:
862
          break;
863
 
864
        case TLS_MODEL_GLOBAL_DYNAMIC:
865
          start_sequence ();
866
 
867
          r0 = gen_rtx_REG (Pmode, 0);
868
          r16 = gen_rtx_REG (Pmode, 16);
869
          tga = get_tls_get_addr ();
870
          dest = gen_reg_rtx (Pmode);
871
          seq = GEN_INT (alpha_next_sequence_number++);
872
 
873
          emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
874
          insn = gen_call_value_osf_tlsgd (r0, tga, seq);
875
          insn = emit_call_insn (insn);
876
          RTL_CONST_CALL_P (insn) = 1;
877
          use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
878
 
879
          insn = get_insns ();
880
          end_sequence ();
881
 
882
          emit_libcall_block (insn, dest, r0, x);
883
          return dest;
884
 
885
        case TLS_MODEL_LOCAL_DYNAMIC:
886
          start_sequence ();
887
 
888
          r0 = gen_rtx_REG (Pmode, 0);
889
          r16 = gen_rtx_REG (Pmode, 16);
890
          tga = get_tls_get_addr ();
891
          scratch = gen_reg_rtx (Pmode);
892
          seq = GEN_INT (alpha_next_sequence_number++);
893
 
894
          emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
895
          insn = gen_call_value_osf_tlsldm (r0, tga, seq);
896
          insn = emit_call_insn (insn);
897
          RTL_CONST_CALL_P (insn) = 1;
898
          use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
899
 
900
          insn = get_insns ();
901
          end_sequence ();
902
 
903
          eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
904
                                UNSPEC_TLSLDM_CALL);
905
          emit_libcall_block (insn, scratch, r0, eqv);
906
 
907
          eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
908
          eqv = gen_rtx_CONST (Pmode, eqv);
909
 
910
          if (alpha_tls_size == 64)
911
            {
912
              dest = gen_reg_rtx (Pmode);
913
              emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
914
              emit_insn (gen_adddi3 (dest, dest, scratch));
915
              return dest;
916
            }
917
          if (alpha_tls_size == 32)
918
            {
919
              insn = gen_rtx_HIGH (Pmode, eqv);
920
              insn = gen_rtx_PLUS (Pmode, scratch, insn);
921
              scratch = gen_reg_rtx (Pmode);
922
              emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
923
            }
924
          return gen_rtx_LO_SUM (Pmode, scratch, eqv);
925
 
926
        case TLS_MODEL_INITIAL_EXEC:
927
          eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
928
          eqv = gen_rtx_CONST (Pmode, eqv);
929
          tp = gen_reg_rtx (Pmode);
930
          scratch = gen_reg_rtx (Pmode);
931
          dest = gen_reg_rtx (Pmode);
932
 
933
          emit_insn (gen_load_tp (tp));
934
          emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
935
          emit_insn (gen_adddi3 (dest, tp, scratch));
936
          return dest;
937
 
938
        case TLS_MODEL_LOCAL_EXEC:
939
          eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
940
          eqv = gen_rtx_CONST (Pmode, eqv);
941
          tp = gen_reg_rtx (Pmode);
942
 
943
          emit_insn (gen_load_tp (tp));
944
          if (alpha_tls_size == 32)
945
            {
946
              insn = gen_rtx_HIGH (Pmode, eqv);
947
              insn = gen_rtx_PLUS (Pmode, tp, insn);
948
              tp = gen_reg_rtx (Pmode);
949
              emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
950
            }
951
          return gen_rtx_LO_SUM (Pmode, tp, eqv);
952
 
953
        default:
954
          gcc_unreachable ();
955
        }
956
 
957
      if (local_symbolic_operand (x, Pmode))
958
        {
959
          if (small_symbolic_operand (x, Pmode))
960
            return x;
961
          else
962
            {
963
              if (can_create_pseudo_p ())
964
                scratch = gen_reg_rtx (Pmode);
965
              emit_insn (gen_rtx_SET (VOIDmode, scratch,
966
                                      gen_rtx_HIGH (Pmode, x)));
967
              return gen_rtx_LO_SUM (Pmode, scratch, x);
968
            }
969
        }
970
    }
971
 
972
  return NULL;
973
 
974
 split_addend:
975
  {
976
    HOST_WIDE_INT low, high;
977
 
978
    low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
979
    addend -= low;
980
    high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
981
    addend -= high;
982
 
983
    if (addend)
984
      x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
985
                               (!can_create_pseudo_p () ? scratch : NULL_RTX),
986
                               1, OPTAB_LIB_WIDEN);
987
    if (high)
988
      x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
989
                               (!can_create_pseudo_p () ? scratch : NULL_RTX),
990
                               1, OPTAB_LIB_WIDEN);
991
 
992
    return plus_constant (x, low);
993
  }
994
}
995
 
996
 
997
/* Try machine-dependent ways of modifying an illegitimate address
998
   to be legitimate.  Return X or the new, valid address.  */
999
 
1000
static rtx
1001
alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1002
                          enum machine_mode mode)
1003
{
1004
  rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1005
  return new_x ? new_x : x;
1006
}
1007
 
1008
/* Primarily this is required for TLS symbols, but given that our move
1009
   patterns *ought* to be able to handle any symbol at any time, we
1010
   should never be spilling symbolic operands to the constant pool, ever.  */
1011
 
1012
static bool
1013
alpha_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1014
{
1015
  enum rtx_code code = GET_CODE (x);
1016
  return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1017
}
1018
 
1019
/* We do not allow indirect calls to be optimized into sibling calls, nor
1020
   can we allow a call to a function with a different GP to be optimized
1021
   into a sibcall.  */
1022
 
1023
static bool
1024
alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1025
{
1026
  /* Can't do indirect tail calls, since we don't know if the target
1027
     uses the same GP.  */
1028
  if (!decl)
1029
    return false;
1030
 
1031
  /* Otherwise, we can make a tail call if the target function shares
1032
     the same GP.  */
1033
  return decl_has_samegp (decl);
1034
}
1035
 
1036
int
1037
some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1038
{
1039
  rtx x = *px;
1040
 
1041
  /* Don't re-split.  */
1042
  if (GET_CODE (x) == LO_SUM)
1043
    return -1;
1044
 
1045
  return small_symbolic_operand (x, Pmode) != 0;
1046
}
1047
 
1048
static int
1049
split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1050
{
1051
  rtx x = *px;
1052
 
1053
  /* Don't re-split.  */
1054
  if (GET_CODE (x) == LO_SUM)
1055
    return -1;
1056
 
1057
  if (small_symbolic_operand (x, Pmode))
1058
    {
1059
      x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1060
      *px = x;
1061
      return -1;
1062
    }
1063
 
1064
  return 0;
1065
}
1066
 
1067
rtx
1068
split_small_symbolic_operand (rtx x)
1069
{
1070
  x = copy_insn (x);
1071
  for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1072
  return x;
1073
}
1074
 
1075
/* Indicate that INSN cannot be duplicated.  This is true for any insn
1076
   that we've marked with gpdisp relocs, since those have to stay in
1077
   1-1 correspondence with one another.
1078
 
1079
   Technically we could copy them if we could set up a mapping from one
1080
   sequence number to another, across the set of insns to be duplicated.
1081
   This seems overly complicated and error-prone since interblock motion
1082
   from sched-ebb could move one of the pair of insns to a different block.
1083
 
1084
   Also cannot allow jsr insns to be duplicated.  If they throw exceptions,
1085
   then they'll be in a different block from their ldgp.  Which could lead
1086
   the bb reorder code to think that it would be ok to copy just the block
1087
   containing the call and branch to the block containing the ldgp.  */
1088
 
1089
static bool
1090
alpha_cannot_copy_insn_p (rtx insn)
1091
{
1092
  if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1093
    return false;
1094
  if (recog_memoized (insn) >= 0)
1095
    return get_attr_cannot_copy (insn);
1096
  else
1097
    return false;
1098
}
1099
 
1100
 
1101
/* Try a machine-dependent way of reloading an illegitimate address
1102
   operand.  If we find one, push the reload and return the new rtx.  */
1103
 
1104
rtx
1105
alpha_legitimize_reload_address (rtx x,
1106
                                 enum machine_mode mode ATTRIBUTE_UNUSED,
1107
                                 int opnum, int type,
1108
                                 int ind_levels ATTRIBUTE_UNUSED)
1109
{
1110
  /* We must recognize output that we have already generated ourselves.  */
1111
  if (GET_CODE (x) == PLUS
1112
      && GET_CODE (XEXP (x, 0)) == PLUS
1113
      && REG_P (XEXP (XEXP (x, 0), 0))
1114
      && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1115
      && CONST_INT_P (XEXP (x, 1)))
1116
    {
1117
      push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1118
                   BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1119
                   opnum, (enum reload_type) type);
1120
      return x;
1121
    }
1122
 
1123
  /* We wish to handle large displacements off a base register by
1124
     splitting the addend across an ldah and the mem insn.  This
1125
     cuts number of extra insns needed from 3 to 1.  */
1126
  if (GET_CODE (x) == PLUS
1127
      && REG_P (XEXP (x, 0))
1128
      && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1129
      && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1130
      && GET_CODE (XEXP (x, 1)) == CONST_INT)
1131
    {
1132
      HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1133
      HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1134
      HOST_WIDE_INT high
1135
        = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1136
 
1137
      /* Check for 32-bit overflow.  */
1138
      if (high + low != val)
1139
        return NULL_RTX;
1140
 
1141
      /* Reload the high part into a base reg; leave the low part
1142
         in the mem directly.  */
1143
      x = gen_rtx_PLUS (GET_MODE (x),
1144
                        gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1145
                                      GEN_INT (high)),
1146
                        GEN_INT (low));
1147
 
1148
      push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1149
                   BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1150
                   opnum, (enum reload_type) type);
1151
      return x;
1152
    }
1153
 
1154
  return NULL_RTX;
1155
}
1156
 
1157
/* Compute a (partial) cost for rtx X.  Return true if the complete
1158
   cost has been computed, and false if subexpressions should be
1159
   scanned.  In either case, *TOTAL contains the cost result.  */
1160
 
1161
static bool
1162
alpha_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
1163
                 bool speed)
1164
{
1165
  enum machine_mode mode = GET_MODE (x);
1166
  bool float_mode_p = FLOAT_MODE_P (mode);
1167
  const struct alpha_rtx_cost_data *cost_data;
1168
 
1169
  if (!speed)
1170
    cost_data = &alpha_rtx_cost_size;
1171
  else
1172
    cost_data = &alpha_rtx_cost_data[alpha_tune];
1173
 
1174
  switch (code)
1175
    {
1176
    case CONST_INT:
1177
      /* If this is an 8-bit constant, return zero since it can be used
1178
         nearly anywhere with no cost.  If it is a valid operand for an
1179
         ADD or AND, likewise return 0 if we know it will be used in that
1180
         context.  Otherwise, return 2 since it might be used there later.
1181
         All other constants take at least two insns.  */
1182
      if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1183
        {
1184
          *total = 0;
1185
          return true;
1186
        }
1187
      /* FALLTHRU */
1188
 
1189
    case CONST_DOUBLE:
1190
      if (x == CONST0_RTX (mode))
1191
        *total = 0;
1192
      else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1193
               || (outer_code == AND && and_operand (x, VOIDmode)))
1194
        *total = 0;
1195
      else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1196
        *total = 2;
1197
      else
1198
        *total = COSTS_N_INSNS (2);
1199
      return true;
1200
 
1201
    case CONST:
1202
    case SYMBOL_REF:
1203
    case LABEL_REF:
1204
      if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1205
        *total = COSTS_N_INSNS (outer_code != MEM);
1206
      else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1207
        *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1208
      else if (tls_symbolic_operand_type (x))
1209
        /* Estimate of cost for call_pal rduniq.  */
1210
        /* ??? How many insns do we emit here?  More than one...  */
1211
        *total = COSTS_N_INSNS (15);
1212
      else
1213
        /* Otherwise we do a load from the GOT.  */
1214
        *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1215
      return true;
1216
 
1217
    case HIGH:
1218
      /* This is effectively an add_operand.  */
1219
      *total = 2;
1220
      return true;
1221
 
1222
    case PLUS:
1223
    case MINUS:
1224
      if (float_mode_p)
1225
        *total = cost_data->fp_add;
1226
      else if (GET_CODE (XEXP (x, 0)) == MULT
1227
               && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1228
        {
1229
          *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1230
                              (enum rtx_code) outer_code, opno, speed)
1231
                    + rtx_cost (XEXP (x, 1),
1232
                                (enum rtx_code) outer_code, opno, speed)
1233
                    + COSTS_N_INSNS (1));
1234
          return true;
1235
        }
1236
      return false;
1237
 
1238
    case MULT:
1239
      if (float_mode_p)
1240
        *total = cost_data->fp_mult;
1241
      else if (mode == DImode)
1242
        *total = cost_data->int_mult_di;
1243
      else
1244
        *total = cost_data->int_mult_si;
1245
      return false;
1246
 
1247
    case ASHIFT:
1248
      if (CONST_INT_P (XEXP (x, 1))
1249
          && INTVAL (XEXP (x, 1)) <= 3)
1250
        {
1251
          *total = COSTS_N_INSNS (1);
1252
          return false;
1253
        }
1254
      /* FALLTHRU */
1255
 
1256
    case ASHIFTRT:
1257
    case LSHIFTRT:
1258
      *total = cost_data->int_shift;
1259
      return false;
1260
 
1261
    case IF_THEN_ELSE:
1262
      if (float_mode_p)
1263
        *total = cost_data->fp_add;
1264
      else
1265
        *total = cost_data->int_cmov;
1266
      return false;
1267
 
1268
    case DIV:
1269
    case UDIV:
1270
    case MOD:
1271
    case UMOD:
1272
      if (!float_mode_p)
1273
        *total = cost_data->int_div;
1274
      else if (mode == SFmode)
1275
        *total = cost_data->fp_div_sf;
1276
      else
1277
        *total = cost_data->fp_div_df;
1278
      return false;
1279
 
1280
    case MEM:
1281
      *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1282
      return true;
1283
 
1284
    case NEG:
1285
      if (! float_mode_p)
1286
        {
1287
          *total = COSTS_N_INSNS (1);
1288
          return false;
1289
        }
1290
      /* FALLTHRU */
1291
 
1292
    case ABS:
1293
      if (! float_mode_p)
1294
        {
1295
          *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1296
          return false;
1297
        }
1298
      /* FALLTHRU */
1299
 
1300
    case FLOAT:
1301
    case UNSIGNED_FLOAT:
1302
    case FIX:
1303
    case UNSIGNED_FIX:
1304
    case FLOAT_TRUNCATE:
1305
      *total = cost_data->fp_add;
1306
      return false;
1307
 
1308
    case FLOAT_EXTEND:
1309
      if (MEM_P (XEXP (x, 0)))
1310
        *total = 0;
1311
      else
1312
        *total = cost_data->fp_add;
1313
      return false;
1314
 
1315
    default:
1316
      return false;
1317
    }
1318
}
1319
 
1320
/* REF is an alignable memory location.  Place an aligned SImode
1321
   reference into *PALIGNED_MEM and the number of bits to shift into
1322
   *PBITNUM.  SCRATCH is a free register for use in reloading out
1323
   of range stack slots.  */
1324
 
1325
void
1326
get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1327
{
1328
  rtx base;
1329
  HOST_WIDE_INT disp, offset;
1330
 
1331
  gcc_assert (MEM_P (ref));
1332
 
1333
  if (reload_in_progress
1334
      && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1335
    {
1336
      base = find_replacement (&XEXP (ref, 0));
1337
      gcc_assert (memory_address_p (GET_MODE (ref), base));
1338
    }
1339
  else
1340
    base = XEXP (ref, 0);
1341
 
1342
  if (GET_CODE (base) == PLUS)
1343
    disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1344
  else
1345
    disp = 0;
1346
 
1347
  /* Find the byte offset within an aligned word.  If the memory itself is
1348
     claimed to be aligned, believe it.  Otherwise, aligned_memory_operand
1349
     will have examined the base register and determined it is aligned, and
1350
     thus displacements from it are naturally alignable.  */
1351
  if (MEM_ALIGN (ref) >= 32)
1352
    offset = 0;
1353
  else
1354
    offset = disp & 3;
1355
 
1356
  /* The location should not cross aligned word boundary.  */
1357
  gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1358
              <= GET_MODE_SIZE (SImode));
1359
 
1360
  /* Access the entire aligned word.  */
1361
  *paligned_mem = widen_memory_access (ref, SImode, -offset);
1362
 
1363
  /* Convert the byte offset within the word to a bit offset.  */
1364
  offset *= BITS_PER_UNIT;
1365
  *pbitnum = GEN_INT (offset);
1366
}
1367
 
1368
/* Similar, but just get the address.  Handle the two reload cases.
1369
   Add EXTRA_OFFSET to the address we return.  */
1370
 
1371
rtx
1372
get_unaligned_address (rtx ref)
1373
{
1374
  rtx base;
1375
  HOST_WIDE_INT offset = 0;
1376
 
1377
  gcc_assert (MEM_P (ref));
1378
 
1379
  if (reload_in_progress
1380
      && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1381
    {
1382
      base = find_replacement (&XEXP (ref, 0));
1383
 
1384
      gcc_assert (memory_address_p (GET_MODE (ref), base));
1385
    }
1386
  else
1387
    base = XEXP (ref, 0);
1388
 
1389
  if (GET_CODE (base) == PLUS)
1390
    offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1391
 
1392
  return plus_constant (base, offset);
1393
}
1394
 
1395
/* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1396
   X is always returned in a register.  */
1397
 
1398
rtx
1399
get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1400
{
1401
  if (GET_CODE (addr) == PLUS)
1402
    {
1403
      ofs += INTVAL (XEXP (addr, 1));
1404
      addr = XEXP (addr, 0);
1405
    }
1406
 
1407
  return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1408
                              NULL_RTX, 1, OPTAB_LIB_WIDEN);
1409
}
1410
 
1411
/* On the Alpha, all (non-symbolic) constants except zero go into
1412
   a floating-point register via memory.  Note that we cannot
1413
   return anything that is not a subset of RCLASS, and that some
1414
   symbolic constants cannot be dropped to memory.  */
1415
 
1416
enum reg_class
1417
alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1418
{
1419
  /* Zero is present in any register class.  */
1420
  if (x == CONST0_RTX (GET_MODE (x)))
1421
    return rclass;
1422
 
1423
  /* These sorts of constants we can easily drop to memory.  */
1424
  if (CONST_INT_P (x)
1425
      || GET_CODE (x) == CONST_DOUBLE
1426
      || GET_CODE (x) == CONST_VECTOR)
1427
    {
1428
      if (rclass == FLOAT_REGS)
1429
        return NO_REGS;
1430
      if (rclass == ALL_REGS)
1431
        return GENERAL_REGS;
1432
      return rclass;
1433
    }
1434
 
1435
  /* All other kinds of constants should not (and in the case of HIGH
1436
     cannot) be dropped to memory -- instead we use a GENERAL_REGS
1437
     secondary reload.  */
1438
  if (CONSTANT_P (x))
1439
    return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1440
 
1441
  return rclass;
1442
}
1443
 
1444
/* Inform reload about cases where moving X with a mode MODE to a register in
1445
   RCLASS requires an extra scratch or immediate register.  Return the class
1446
   needed for the immediate register.  */
1447
 
1448
static reg_class_t
1449
alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1450
                        enum machine_mode mode, secondary_reload_info *sri)
1451
{
1452
  enum reg_class rclass = (enum reg_class) rclass_i;
1453
 
1454
  /* Loading and storing HImode or QImode values to and from memory
1455
     usually requires a scratch register.  */
1456
  if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1457
    {
1458
      if (any_memory_operand (x, mode))
1459
        {
1460
          if (in_p)
1461
            {
1462
              if (!aligned_memory_operand (x, mode))
1463
                sri->icode = direct_optab_handler (reload_in_optab, mode);
1464
            }
1465
          else
1466
            sri->icode = direct_optab_handler (reload_out_optab, mode);
1467
          return NO_REGS;
1468
        }
1469
    }
1470
 
1471
  /* We also cannot do integral arithmetic into FP regs, as might result
1472
     from register elimination into a DImode fp register.  */
1473
  if (rclass == FLOAT_REGS)
1474
    {
1475
      if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1476
        return GENERAL_REGS;
1477
      if (in_p && INTEGRAL_MODE_P (mode)
1478
          && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1479
        return GENERAL_REGS;
1480
    }
1481
 
1482
  return NO_REGS;
1483
}
1484
 
1485
/* Subfunction of the following function.  Update the flags of any MEM
1486
   found in part of X.  */
1487
 
1488
static int
1489
alpha_set_memflags_1 (rtx *xp, void *data)
1490
{
1491
  rtx x = *xp, orig = (rtx) data;
1492
 
1493
  if (!MEM_P (x))
1494
    return 0;
1495
 
1496
  MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1497
  MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1498
  MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1499
 
1500
  /* Sadly, we cannot use alias sets because the extra aliasing
1501
     produced by the AND interferes.  Given that two-byte quantities
1502
     are the only thing we would be able to differentiate anyway,
1503
     there does not seem to be any point in convoluting the early
1504
     out of the alias check.  */
1505
 
1506
  return -1;
1507
}
1508
 
1509
/* Given SEQ, which is an INSN list, look for any MEMs in either
1510
   a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1511
   volatile flags from REF into each of the MEMs found.  If REF is not
1512
   a MEM, don't do anything.  */
1513
 
1514
void
1515
alpha_set_memflags (rtx seq, rtx ref)
1516
{
1517
  rtx insn;
1518
 
1519
  if (!MEM_P (ref))
1520
    return;
1521
 
1522
  /* This is only called from alpha.md, after having had something
1523
     generated from one of the insn patterns.  So if everything is
1524
     zero, the pattern is already up-to-date.  */
1525
  if (!MEM_VOLATILE_P (ref)
1526
      && !MEM_NOTRAP_P (ref)
1527
      && !MEM_READONLY_P (ref))
1528
    return;
1529
 
1530
  for (insn = seq; insn; insn = NEXT_INSN (insn))
1531
    if (INSN_P (insn))
1532
      for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1533
    else
1534
      gcc_unreachable ();
1535
}
1536
 
1537
static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1538
                                 int, bool);
1539
 
1540
/* Internal routine for alpha_emit_set_const to check for N or below insns.
1541
   If NO_OUTPUT is true, then we only check to see if N insns are possible,
1542
   and return pc_rtx if successful.  */
1543
 
1544
static rtx
1545
alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1546
                        HOST_WIDE_INT c, int n, bool no_output)
1547
{
1548
  HOST_WIDE_INT new_const;
1549
  int i, bits;
1550
  /* Use a pseudo if highly optimizing and still generating RTL.  */
1551
  rtx subtarget
1552
    = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1553
  rtx temp, insn;
1554
 
1555
  /* If this is a sign-extended 32-bit constant, we can do this in at most
1556
     three insns, so do it if we have enough insns left.  We always have
1557
     a sign-extended 32-bit constant when compiling on a narrow machine.  */
1558
 
1559
  if (HOST_BITS_PER_WIDE_INT != 64
1560
      || c >> 31 == -1 || c >> 31 == 0)
1561
    {
1562
      HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1563
      HOST_WIDE_INT tmp1 = c - low;
1564
      HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1565
      HOST_WIDE_INT extra = 0;
1566
 
1567
      /* If HIGH will be interpreted as negative but the constant is
1568
         positive, we must adjust it to do two ldha insns.  */
1569
 
1570
      if ((high & 0x8000) != 0 && c >= 0)
1571
        {
1572
          extra = 0x4000;
1573
          tmp1 -= 0x40000000;
1574
          high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1575
        }
1576
 
1577
      if (c == low || (low == 0 && extra == 0))
1578
        {
1579
          /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1580
             but that meant that we can't handle INT_MIN on 32-bit machines
1581
             (like NT/Alpha), because we recurse indefinitely through
1582
             emit_move_insn to gen_movdi.  So instead, since we know exactly
1583
             what we want, create it explicitly.  */
1584
 
1585
          if (no_output)
1586
            return pc_rtx;
1587
          if (target == NULL)
1588
            target = gen_reg_rtx (mode);
1589
          emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1590
          return target;
1591
        }
1592
      else if (n >= 2 + (extra != 0))
1593
        {
1594
          if (no_output)
1595
            return pc_rtx;
1596
          if (!can_create_pseudo_p ())
1597
            {
1598
              emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1599
              temp = target;
1600
            }
1601
          else
1602
            temp = copy_to_suggested_reg (GEN_INT (high << 16),
1603
                                          subtarget, mode);
1604
 
1605
          /* As of 2002-02-23, addsi3 is only available when not optimizing.
1606
             This means that if we go through expand_binop, we'll try to
1607
             generate extensions, etc, which will require new pseudos, which
1608
             will fail during some split phases.  The SImode add patterns
1609
             still exist, but are not named.  So build the insns by hand.  */
1610
 
1611
          if (extra != 0)
1612
            {
1613
              if (! subtarget)
1614
                subtarget = gen_reg_rtx (mode);
1615
              insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1616
              insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1617
              emit_insn (insn);
1618
              temp = subtarget;
1619
            }
1620
 
1621
          if (target == NULL)
1622
            target = gen_reg_rtx (mode);
1623
          insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1624
          insn = gen_rtx_SET (VOIDmode, target, insn);
1625
          emit_insn (insn);
1626
          return target;
1627
        }
1628
    }
1629
 
1630
  /* If we couldn't do it that way, try some other methods.  But if we have
1631
     no instructions left, don't bother.  Likewise, if this is SImode and
1632
     we can't make pseudos, we can't do anything since the expand_binop
1633
     and expand_unop calls will widen and try to make pseudos.  */
1634
 
1635
  if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1636
    return 0;
1637
 
1638
  /* Next, see if we can load a related constant and then shift and possibly
1639
     negate it to get the constant we want.  Try this once each increasing
1640
     numbers of insns.  */
1641
 
1642
  for (i = 1; i < n; i++)
1643
    {
1644
      /* First, see if minus some low bits, we've an easy load of
1645
         high bits.  */
1646
 
1647
      new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1648
      if (new_const != 0)
1649
        {
1650
          temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1651
          if (temp)
1652
            {
1653
              if (no_output)
1654
                return temp;
1655
              return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1656
                                   target, 0, OPTAB_WIDEN);
1657
            }
1658
        }
1659
 
1660
      /* Next try complementing.  */
1661
      temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1662
      if (temp)
1663
        {
1664
          if (no_output)
1665
            return temp;
1666
          return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1667
        }
1668
 
1669
      /* Next try to form a constant and do a left shift.  We can do this
1670
         if some low-order bits are zero; the exact_log2 call below tells
1671
         us that information.  The bits we are shifting out could be any
1672
         value, but here we'll just try the 0- and sign-extended forms of
1673
         the constant.  To try to increase the chance of having the same
1674
         constant in more than one insn, start at the highest number of
1675
         bits to shift, but try all possibilities in case a ZAPNOT will
1676
         be useful.  */
1677
 
1678
      bits = exact_log2 (c & -c);
1679
      if (bits > 0)
1680
        for (; bits > 0; bits--)
1681
          {
1682
            new_const = c >> bits;
1683
            temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1684
            if (!temp && c < 0)
1685
              {
1686
                new_const = (unsigned HOST_WIDE_INT)c >> bits;
1687
                temp = alpha_emit_set_const (subtarget, mode, new_const,
1688
                                             i, no_output);
1689
              }
1690
            if (temp)
1691
              {
1692
                if (no_output)
1693
                  return temp;
1694
                return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1695
                                     target, 0, OPTAB_WIDEN);
1696
              }
1697
          }
1698
 
1699
      /* Now try high-order zero bits.  Here we try the shifted-in bits as
1700
         all zero and all ones.  Be careful to avoid shifting outside the
1701
         mode and to avoid shifting outside the host wide int size.  */
1702
      /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1703
         confuse the recursive call and set all of the high 32 bits.  */
1704
 
1705
      bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1706
              - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1707
      if (bits > 0)
1708
        for (; bits > 0; bits--)
1709
          {
1710
            new_const = c << bits;
1711
            temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1712
            if (!temp)
1713
              {
1714
                new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1715
                temp = alpha_emit_set_const (subtarget, mode, new_const,
1716
                                             i, no_output);
1717
              }
1718
            if (temp)
1719
              {
1720
                if (no_output)
1721
                  return temp;
1722
                return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1723
                                     target, 1, OPTAB_WIDEN);
1724
              }
1725
          }
1726
 
1727
      /* Now try high-order 1 bits.  We get that with a sign-extension.
1728
         But one bit isn't enough here.  Be careful to avoid shifting outside
1729
         the mode and to avoid shifting outside the host wide int size.  */
1730
 
1731
      bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1732
              - floor_log2 (~ c) - 2);
1733
      if (bits > 0)
1734
        for (; bits > 0; bits--)
1735
          {
1736
            new_const = c << bits;
1737
            temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1738
            if (!temp)
1739
              {
1740
                new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1741
                temp = alpha_emit_set_const (subtarget, mode, new_const,
1742
                                             i, no_output);
1743
              }
1744
            if (temp)
1745
              {
1746
                if (no_output)
1747
                  return temp;
1748
                return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1749
                                     target, 0, OPTAB_WIDEN);
1750
              }
1751
          }
1752
    }
1753
 
1754
#if HOST_BITS_PER_WIDE_INT == 64
1755
  /* Finally, see if can load a value into the target that is the same as the
1756
     constant except that all bytes that are 0 are changed to be 0xff.  If we
1757
     can, then we can do a ZAPNOT to obtain the desired constant.  */
1758
 
1759
  new_const = c;
1760
  for (i = 0; i < 64; i += 8)
1761
    if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1762
      new_const |= (HOST_WIDE_INT) 0xff << i;
1763
 
1764
  /* We are only called for SImode and DImode.  If this is SImode, ensure that
1765
     we are sign extended to a full word.  */
1766
 
1767
  if (mode == SImode)
1768
    new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1769
 
1770
  if (new_const != c)
1771
    {
1772
      temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1773
      if (temp)
1774
        {
1775
          if (no_output)
1776
            return temp;
1777
          return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1778
                               target, 0, OPTAB_WIDEN);
1779
        }
1780
    }
1781
#endif
1782
 
1783
  return 0;
1784
}
1785
 
1786
/* Try to output insns to set TARGET equal to the constant C if it can be
1787
   done in less than N insns.  Do all computations in MODE.  Returns the place
1788
   where the output has been placed if it can be done and the insns have been
1789
   emitted.  If it would take more than N insns, zero is returned and no
1790
   insns and emitted.  */
1791
 
1792
static rtx
1793
alpha_emit_set_const (rtx target, enum machine_mode mode,
1794
                      HOST_WIDE_INT c, int n, bool no_output)
1795
{
1796
  enum machine_mode orig_mode = mode;
1797
  rtx orig_target = target;
1798
  rtx result = 0;
1799
  int i;
1800
 
1801
  /* If we can't make any pseudos, TARGET is an SImode hard register, we
1802
     can't load this constant in one insn, do this in DImode.  */
1803
  if (!can_create_pseudo_p () && mode == SImode
1804
      && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1805
    {
1806
      result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1807
      if (result)
1808
        return result;
1809
 
1810
      target = no_output ? NULL : gen_lowpart (DImode, target);
1811
      mode = DImode;
1812
    }
1813
  else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1814
    {
1815
      target = no_output ? NULL : gen_lowpart (DImode, target);
1816
      mode = DImode;
1817
    }
1818
 
1819
  /* Try 1 insn, then 2, then up to N.  */
1820
  for (i = 1; i <= n; i++)
1821
    {
1822
      result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1823
      if (result)
1824
        {
1825
          rtx insn, set;
1826
 
1827
          if (no_output)
1828
            return result;
1829
 
1830
          insn = get_last_insn ();
1831
          set = single_set (insn);
1832
          if (! CONSTANT_P (SET_SRC (set)))
1833
            set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1834
          break;
1835
        }
1836
    }
1837
 
1838
  /* Allow for the case where we changed the mode of TARGET.  */
1839
  if (result)
1840
    {
1841
      if (result == target)
1842
        result = orig_target;
1843
      else if (mode != orig_mode)
1844
        result = gen_lowpart (orig_mode, result);
1845
    }
1846
 
1847
  return result;
1848
}
1849
 
1850
/* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1851
   fall back to a straight forward decomposition.  We do this to avoid
1852
   exponential run times encountered when looking for longer sequences
1853
   with alpha_emit_set_const.  */
1854
 
1855
static rtx
1856
alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1857
{
1858
  HOST_WIDE_INT d1, d2, d3, d4;
1859
 
1860
  /* Decompose the entire word */
1861
#if HOST_BITS_PER_WIDE_INT >= 64
1862
  gcc_assert (c2 == -(c1 < 0));
1863
  d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1864
  c1 -= d1;
1865
  d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1866
  c1 = (c1 - d2) >> 32;
1867
  d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1868
  c1 -= d3;
1869
  d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1870
  gcc_assert (c1 == d4);
1871
#else
1872
  d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1873
  c1 -= d1;
1874
  d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1875
  gcc_assert (c1 == d2);
1876
  c2 += (d2 < 0);
1877
  d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1878
  c2 -= d3;
1879
  d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1880
  gcc_assert (c2 == d4);
1881
#endif
1882
 
1883
  /* Construct the high word */
1884
  if (d4)
1885
    {
1886
      emit_move_insn (target, GEN_INT (d4));
1887
      if (d3)
1888
        emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1889
    }
1890
  else
1891
    emit_move_insn (target, GEN_INT (d3));
1892
 
1893
  /* Shift it into place */
1894
  emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1895
 
1896
  /* Add in the low bits.  */
1897
  if (d2)
1898
    emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1899
  if (d1)
1900
    emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1901
 
1902
  return target;
1903
}
1904
 
1905
/* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
1906
   the low 64 bits.  */
1907
 
1908
static void
1909
alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
1910
{
1911
  HOST_WIDE_INT i0, i1;
1912
 
1913
  if (GET_CODE (x) == CONST_VECTOR)
1914
    x = simplify_subreg (DImode, x, GET_MODE (x), 0);
1915
 
1916
 
1917
  if (CONST_INT_P (x))
1918
    {
1919
      i0 = INTVAL (x);
1920
      i1 = -(i0 < 0);
1921
    }
1922
  else if (HOST_BITS_PER_WIDE_INT >= 64)
1923
    {
1924
      i0 = CONST_DOUBLE_LOW (x);
1925
      i1 = -(i0 < 0);
1926
    }
1927
  else
1928
    {
1929
      i0 = CONST_DOUBLE_LOW (x);
1930
      i1 = CONST_DOUBLE_HIGH (x);
1931
    }
1932
 
1933
  *p0 = i0;
1934
  *p1 = i1;
1935
}
1936
 
1937
/* Implement TARGET_LEGITIMATE_CONSTANT_P.  This is all constants for which
1938
   we are willing to load the value into a register via a move pattern.
1939
   Normally this is all symbolic constants, integral constants that
1940
   take three or fewer instructions, and floating-point zero.  */
1941
 
1942
bool
1943
alpha_legitimate_constant_p (enum machine_mode mode, rtx x)
1944
{
1945
  HOST_WIDE_INT i0, i1;
1946
 
1947
  switch (GET_CODE (x))
1948
    {
1949
    case LABEL_REF:
1950
    case HIGH:
1951
      return true;
1952
 
1953
    case CONST:
1954
      if (GET_CODE (XEXP (x, 0)) == PLUS
1955
          && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1956
        x = XEXP (XEXP (x, 0), 0);
1957
      else
1958
        return true;
1959
 
1960
      if (GET_CODE (x) != SYMBOL_REF)
1961
        return true;
1962
 
1963
      /* FALLTHRU */
1964
 
1965
    case SYMBOL_REF:
1966
      /* TLS symbols are never valid.  */
1967
      return SYMBOL_REF_TLS_MODEL (x) == 0;
1968
 
1969
    case CONST_DOUBLE:
1970
      if (x == CONST0_RTX (mode))
1971
        return true;
1972
      if (FLOAT_MODE_P (mode))
1973
        return false;
1974
      goto do_integer;
1975
 
1976
    case CONST_VECTOR:
1977
      if (x == CONST0_RTX (mode))
1978
        return true;
1979
      if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
1980
        return false;
1981
      if (GET_MODE_SIZE (mode) != 8)
1982
        return false;
1983
      goto do_integer;
1984
 
1985
    case CONST_INT:
1986
    do_integer:
1987
      if (TARGET_BUILD_CONSTANTS)
1988
        return true;
1989
      alpha_extract_integer (x, &i0, &i1);
1990
      if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
1991
        return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
1992
      return false;
1993
 
1994
    default:
1995
      return false;
1996
    }
1997
}
1998
 
1999
/* Operand 1 is known to be a constant, and should require more than one
2000
   instruction to load.  Emit that multi-part load.  */
2001
 
2002
bool
2003
alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2004
{
2005
  HOST_WIDE_INT i0, i1;
2006
  rtx temp = NULL_RTX;
2007
 
2008
  alpha_extract_integer (operands[1], &i0, &i1);
2009
 
2010
  if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2011
    temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2012
 
2013
  if (!temp && TARGET_BUILD_CONSTANTS)
2014
    temp = alpha_emit_set_long_const (operands[0], i0, i1);
2015
 
2016
  if (temp)
2017
    {
2018
      if (!rtx_equal_p (operands[0], temp))
2019
        emit_move_insn (operands[0], temp);
2020
      return true;
2021
    }
2022
 
2023
  return false;
2024
}
2025
 
2026
/* Expand a move instruction; return true if all work is done.
2027
   We don't handle non-bwx subword loads here.  */
2028
 
2029
bool
2030
alpha_expand_mov (enum machine_mode mode, rtx *operands)
2031
{
2032
  rtx tmp;
2033
 
2034
  /* If the output is not a register, the input must be.  */
2035
  if (MEM_P (operands[0])
2036
      && ! reg_or_0_operand (operands[1], mode))
2037
    operands[1] = force_reg (mode, operands[1]);
2038
 
2039
  /* Allow legitimize_address to perform some simplifications.  */
2040
  if (mode == Pmode && symbolic_operand (operands[1], mode))
2041
    {
2042
      tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2043
      if (tmp)
2044
        {
2045
          if (tmp == operands[0])
2046
            return true;
2047
          operands[1] = tmp;
2048
          return false;
2049
        }
2050
    }
2051
 
2052
  /* Early out for non-constants and valid constants.  */
2053
  if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2054
    return false;
2055
 
2056
  /* Split large integers.  */
2057
  if (CONST_INT_P (operands[1])
2058
      || GET_CODE (operands[1]) == CONST_DOUBLE
2059
      || GET_CODE (operands[1]) == CONST_VECTOR)
2060
    {
2061
      if (alpha_split_const_mov (mode, operands))
2062
        return true;
2063
    }
2064
 
2065
  /* Otherwise we've nothing left but to drop the thing to memory.  */
2066
  tmp = force_const_mem (mode, operands[1]);
2067
 
2068
  if (tmp == NULL_RTX)
2069
    return false;
2070
 
2071
  if (reload_in_progress)
2072
    {
2073
      emit_move_insn (operands[0], XEXP (tmp, 0));
2074
      operands[1] = replace_equiv_address (tmp, operands[0]);
2075
    }
2076
  else
2077
    operands[1] = validize_mem (tmp);
2078
  return false;
2079
}
2080
 
2081
/* Expand a non-bwx QImode or HImode move instruction;
2082
   return true if all work is done.  */
2083
 
2084
bool
2085
alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2086
{
2087
  rtx seq;
2088
 
2089
  /* If the output is not a register, the input must be.  */
2090
  if (MEM_P (operands[0]))
2091
    operands[1] = force_reg (mode, operands[1]);
2092
 
2093
  /* Handle four memory cases, unaligned and aligned for either the input
2094
     or the output.  The only case where we can be called during reload is
2095
     for aligned loads; all other cases require temporaries.  */
2096
 
2097
  if (any_memory_operand (operands[1], mode))
2098
    {
2099
      if (aligned_memory_operand (operands[1], mode))
2100
        {
2101
          if (reload_in_progress)
2102
            {
2103
              if (mode == QImode)
2104
                seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2105
              else
2106
                seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2107
              emit_insn (seq);
2108
            }
2109
          else
2110
            {
2111
              rtx aligned_mem, bitnum;
2112
              rtx scratch = gen_reg_rtx (SImode);
2113
              rtx subtarget;
2114
              bool copyout;
2115
 
2116
              get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2117
 
2118
              subtarget = operands[0];
2119
              if (REG_P (subtarget))
2120
                subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2121
              else
2122
                subtarget = gen_reg_rtx (DImode), copyout = true;
2123
 
2124
              if (mode == QImode)
2125
                seq = gen_aligned_loadqi (subtarget, aligned_mem,
2126
                                          bitnum, scratch);
2127
              else
2128
                seq = gen_aligned_loadhi (subtarget, aligned_mem,
2129
                                          bitnum, scratch);
2130
              emit_insn (seq);
2131
 
2132
              if (copyout)
2133
                emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2134
            }
2135
        }
2136
      else
2137
        {
2138
          /* Don't pass these as parameters since that makes the generated
2139
             code depend on parameter evaluation order which will cause
2140
             bootstrap failures.  */
2141
 
2142
          rtx temp1, temp2, subtarget, ua;
2143
          bool copyout;
2144
 
2145
          temp1 = gen_reg_rtx (DImode);
2146
          temp2 = gen_reg_rtx (DImode);
2147
 
2148
          subtarget = operands[0];
2149
          if (REG_P (subtarget))
2150
            subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2151
          else
2152
            subtarget = gen_reg_rtx (DImode), copyout = true;
2153
 
2154
          ua = get_unaligned_address (operands[1]);
2155
          if (mode == QImode)
2156
            seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2157
          else
2158
            seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2159
 
2160
          alpha_set_memflags (seq, operands[1]);
2161
          emit_insn (seq);
2162
 
2163
          if (copyout)
2164
            emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2165
        }
2166
      return true;
2167
    }
2168
 
2169
  if (any_memory_operand (operands[0], mode))
2170
    {
2171
      if (aligned_memory_operand (operands[0], mode))
2172
        {
2173
          rtx aligned_mem, bitnum;
2174
          rtx temp1 = gen_reg_rtx (SImode);
2175
          rtx temp2 = gen_reg_rtx (SImode);
2176
 
2177
          get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2178
 
2179
          emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2180
                                        temp1, temp2));
2181
        }
2182
      else
2183
        {
2184
          rtx temp1 = gen_reg_rtx (DImode);
2185
          rtx temp2 = gen_reg_rtx (DImode);
2186
          rtx temp3 = gen_reg_rtx (DImode);
2187
          rtx ua = get_unaligned_address (operands[0]);
2188
 
2189
          if (mode == QImode)
2190
            seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2191
          else
2192
            seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2193
 
2194
          alpha_set_memflags (seq, operands[0]);
2195
          emit_insn (seq);
2196
        }
2197
      return true;
2198
    }
2199
 
2200
  return false;
2201
}
2202
 
2203
/* Implement the movmisalign patterns.  One of the operands is a memory
2204
   that is not naturally aligned.  Emit instructions to load it.  */
2205
 
2206
void
2207
alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2208
{
2209
  /* Honor misaligned loads, for those we promised to do so.  */
2210
  if (MEM_P (operands[1]))
2211
    {
2212
      rtx tmp;
2213
 
2214
      if (register_operand (operands[0], mode))
2215
        tmp = operands[0];
2216
      else
2217
        tmp = gen_reg_rtx (mode);
2218
 
2219
      alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2220
      if (tmp != operands[0])
2221
        emit_move_insn (operands[0], tmp);
2222
    }
2223
  else if (MEM_P (operands[0]))
2224
    {
2225
      if (!reg_or_0_operand (operands[1], mode))
2226
        operands[1] = force_reg (mode, operands[1]);
2227
      alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2228
    }
2229
  else
2230
    gcc_unreachable ();
2231
}
2232
 
2233
/* Generate an unsigned DImode to FP conversion.  This is the same code
2234
   optabs would emit if we didn't have TFmode patterns.
2235
 
2236
   For SFmode, this is the only construction I've found that can pass
2237
   gcc.c-torture/execute/ieee/rbug.c.  No scenario that uses DFmode
2238
   intermediates will work, because you'll get intermediate rounding
2239
   that ruins the end result.  Some of this could be fixed by turning
2240
   on round-to-positive-infinity, but that requires diddling the fpsr,
2241
   which kills performance.  I tried turning this around and converting
2242
   to a negative number, so that I could turn on /m, but either I did
2243
   it wrong or there's something else cause I wound up with the exact
2244
   same single-bit error.  There is a branch-less form of this same code:
2245
 
2246
        srl     $16,1,$1
2247
        and     $16,1,$2
2248
        cmplt   $16,0,$3
2249
        or      $1,$2,$2
2250
        cmovge  $16,$16,$2
2251
        itoft   $3,$f10
2252
        itoft   $2,$f11
2253
        cvtqs   $f11,$f11
2254
        adds    $f11,$f11,$f0
2255
        fcmoveq $f10,$f11,$f0
2256
 
2257
   I'm not using it because it's the same number of instructions as
2258
   this branch-full form, and it has more serialized long latency
2259
   instructions on the critical path.
2260
 
2261
   For DFmode, we can avoid rounding errors by breaking up the word
2262
   into two pieces, converting them separately, and adding them back:
2263
 
2264
   LC0: .long 0,0x5f800000
2265
 
2266
        itoft   $16,$f11
2267
        lda     $2,LC0
2268
        cmplt   $16,0,$1
2269
        cpyse   $f11,$f31,$f10
2270
        cpyse   $f31,$f11,$f11
2271
        s4addq  $1,$2,$1
2272
        lds     $f12,0($1)
2273
        cvtqt   $f10,$f10
2274
        cvtqt   $f11,$f11
2275
        addt    $f12,$f10,$f0
2276
        addt    $f0,$f11,$f0
2277
 
2278
   This doesn't seem to be a clear-cut win over the optabs form.
2279
   It probably all depends on the distribution of numbers being
2280
   converted -- in the optabs form, all but high-bit-set has a
2281
   much lower minimum execution time.  */
2282
 
2283
void
2284
alpha_emit_floatuns (rtx operands[2])
2285
{
2286
  rtx neglab, donelab, i0, i1, f0, in, out;
2287
  enum machine_mode mode;
2288
 
2289
  out = operands[0];
2290
  in = force_reg (DImode, operands[1]);
2291
  mode = GET_MODE (out);
2292
  neglab = gen_label_rtx ();
2293
  donelab = gen_label_rtx ();
2294
  i0 = gen_reg_rtx (DImode);
2295
  i1 = gen_reg_rtx (DImode);
2296
  f0 = gen_reg_rtx (mode);
2297
 
2298
  emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2299
 
2300
  emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2301
  emit_jump_insn (gen_jump (donelab));
2302
  emit_barrier ();
2303
 
2304
  emit_label (neglab);
2305
 
2306
  emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2307
  emit_insn (gen_anddi3 (i1, in, const1_rtx));
2308
  emit_insn (gen_iordi3 (i0, i0, i1));
2309
  emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2310
  emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2311
 
2312
  emit_label (donelab);
2313
}
2314
 
2315
/* Generate the comparison for a conditional branch.  */
2316
 
2317
void
2318
alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2319
{
2320
  enum rtx_code cmp_code, branch_code;
2321
  enum machine_mode branch_mode = VOIDmode;
2322
  enum rtx_code code = GET_CODE (operands[0]);
2323
  rtx op0 = operands[1], op1 = operands[2];
2324
  rtx tem;
2325
 
2326
  if (cmp_mode == TFmode)
2327
    {
2328
      op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2329
      op1 = const0_rtx;
2330
      cmp_mode = DImode;
2331
    }
2332
 
2333
  /* The general case: fold the comparison code to the types of compares
2334
     that we have, choosing the branch as necessary.  */
2335
  switch (code)
2336
    {
2337
    case EQ:  case LE:  case LT:  case LEU:  case LTU:
2338
    case UNORDERED:
2339
      /* We have these compares: */
2340
      cmp_code = code, branch_code = NE;
2341
      break;
2342
 
2343
    case NE:
2344
    case ORDERED:
2345
      /* These must be reversed.  */
2346
      cmp_code = reverse_condition (code), branch_code = EQ;
2347
      break;
2348
 
2349
    case GE:  case GT: case GEU:  case GTU:
2350
      /* For FP, we swap them, for INT, we reverse them.  */
2351
      if (cmp_mode == DFmode)
2352
        {
2353
          cmp_code = swap_condition (code);
2354
          branch_code = NE;
2355
          tem = op0, op0 = op1, op1 = tem;
2356
        }
2357
      else
2358
        {
2359
          cmp_code = reverse_condition (code);
2360
          branch_code = EQ;
2361
        }
2362
      break;
2363
 
2364
    default:
2365
      gcc_unreachable ();
2366
    }
2367
 
2368
  if (cmp_mode == DFmode)
2369
    {
2370
      if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2371
        {
2372
          /* When we are not as concerned about non-finite values, and we
2373
             are comparing against zero, we can branch directly.  */
2374
          if (op1 == CONST0_RTX (DFmode))
2375
            cmp_code = UNKNOWN, branch_code = code;
2376
          else if (op0 == CONST0_RTX (DFmode))
2377
            {
2378
              /* Undo the swap we probably did just above.  */
2379
              tem = op0, op0 = op1, op1 = tem;
2380
              branch_code = swap_condition (cmp_code);
2381
              cmp_code = UNKNOWN;
2382
            }
2383
        }
2384
      else
2385
        {
2386
          /* ??? We mark the branch mode to be CCmode to prevent the
2387
             compare and branch from being combined, since the compare
2388
             insn follows IEEE rules that the branch does not.  */
2389
          branch_mode = CCmode;
2390
        }
2391
    }
2392
  else
2393
    {
2394
      /* The following optimizations are only for signed compares.  */
2395
      if (code != LEU && code != LTU && code != GEU && code != GTU)
2396
        {
2397
          /* Whee.  Compare and branch against 0 directly.  */
2398
          if (op1 == const0_rtx)
2399
            cmp_code = UNKNOWN, branch_code = code;
2400
 
2401
          /* If the constants doesn't fit into an immediate, but can
2402
             be generated by lda/ldah, we adjust the argument and
2403
             compare against zero, so we can use beq/bne directly.  */
2404
          /* ??? Don't do this when comparing against symbols, otherwise
2405
             we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2406
             be declared false out of hand (at least for non-weak).  */
2407
          else if (CONST_INT_P (op1)
2408
                   && (code == EQ || code == NE)
2409
                   && !(symbolic_operand (op0, VOIDmode)
2410
                        || (REG_P (op0) && REG_POINTER (op0))))
2411
            {
2412
              rtx n_op1 = GEN_INT (-INTVAL (op1));
2413
 
2414
              if (! satisfies_constraint_I (op1)
2415
                  && (satisfies_constraint_K (n_op1)
2416
                      || satisfies_constraint_L (n_op1)))
2417
                cmp_code = PLUS, branch_code = code, op1 = n_op1;
2418
            }
2419
        }
2420
 
2421
      if (!reg_or_0_operand (op0, DImode))
2422
        op0 = force_reg (DImode, op0);
2423
      if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2424
        op1 = force_reg (DImode, op1);
2425
    }
2426
 
2427
  /* Emit an initial compare instruction, if necessary.  */
2428
  tem = op0;
2429
  if (cmp_code != UNKNOWN)
2430
    {
2431
      tem = gen_reg_rtx (cmp_mode);
2432
      emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2433
    }
2434
 
2435
  /* Emit the branch instruction.  */
2436
  tem = gen_rtx_SET (VOIDmode, pc_rtx,
2437
                     gen_rtx_IF_THEN_ELSE (VOIDmode,
2438
                                           gen_rtx_fmt_ee (branch_code,
2439
                                                           branch_mode, tem,
2440
                                                           CONST0_RTX (cmp_mode)),
2441
                                           gen_rtx_LABEL_REF (VOIDmode,
2442
                                                              operands[3]),
2443
                                           pc_rtx));
2444
  emit_jump_insn (tem);
2445
}
2446
 
2447
/* Certain simplifications can be done to make invalid setcc operations
2448
   valid.  Return the final comparison, or NULL if we can't work.  */
2449
 
2450
bool
2451
alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2452
{
2453
  enum rtx_code cmp_code;
2454
  enum rtx_code code = GET_CODE (operands[1]);
2455
  rtx op0 = operands[2], op1 = operands[3];
2456
  rtx tmp;
2457
 
2458
  if (cmp_mode == TFmode)
2459
    {
2460
      op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2461
      op1 = const0_rtx;
2462
      cmp_mode = DImode;
2463
    }
2464
 
2465
  if (cmp_mode == DFmode && !TARGET_FIX)
2466
    return 0;
2467
 
2468
  /* The general case: fold the comparison code to the types of compares
2469
     that we have, choosing the branch as necessary.  */
2470
 
2471
  cmp_code = UNKNOWN;
2472
  switch (code)
2473
    {
2474
    case EQ:  case LE:  case LT:  case LEU:  case LTU:
2475
    case UNORDERED:
2476
      /* We have these compares.  */
2477
      if (cmp_mode == DFmode)
2478
        cmp_code = code, code = NE;
2479
      break;
2480
 
2481
    case NE:
2482
      if (cmp_mode == DImode && op1 == const0_rtx)
2483
        break;
2484
      /* FALLTHRU */
2485
 
2486
    case ORDERED:
2487
      cmp_code = reverse_condition (code);
2488
      code = EQ;
2489
      break;
2490
 
2491
    case GE:  case GT: case GEU:  case GTU:
2492
      /* These normally need swapping, but for integer zero we have
2493
         special patterns that recognize swapped operands.  */
2494
      if (cmp_mode == DImode && op1 == const0_rtx)
2495
        break;
2496
      code = swap_condition (code);
2497
      if (cmp_mode == DFmode)
2498
        cmp_code = code, code = NE;
2499
      tmp = op0, op0 = op1, op1 = tmp;
2500
      break;
2501
 
2502
    default:
2503
      gcc_unreachable ();
2504
    }
2505
 
2506
  if (cmp_mode == DImode)
2507
    {
2508
      if (!register_operand (op0, DImode))
2509
        op0 = force_reg (DImode, op0);
2510
      if (!reg_or_8bit_operand (op1, DImode))
2511
        op1 = force_reg (DImode, op1);
2512
    }
2513
 
2514
  /* Emit an initial compare instruction, if necessary.  */
2515
  if (cmp_code != UNKNOWN)
2516
    {
2517
      tmp = gen_reg_rtx (cmp_mode);
2518
      emit_insn (gen_rtx_SET (VOIDmode, tmp,
2519
                              gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2520
 
2521
      op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2522
      op1 = const0_rtx;
2523
    }
2524
 
2525
  /* Emit the setcc instruction.  */
2526
  emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2527
                          gen_rtx_fmt_ee (code, DImode, op0, op1)));
2528
  return true;
2529
}
2530
 
2531
 
2532
/* Rewrite a comparison against zero CMP of the form
2533
   (CODE (cc0) (const_int 0)) so it can be written validly in
2534
   a conditional move (if_then_else CMP ...).
2535
   If both of the operands that set cc0 are nonzero we must emit
2536
   an insn to perform the compare (it can't be done within
2537
   the conditional move).  */
2538
 
2539
rtx
2540
alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2541
{
2542
  enum rtx_code code = GET_CODE (cmp);
2543
  enum rtx_code cmov_code = NE;
2544
  rtx op0 = XEXP (cmp, 0);
2545
  rtx op1 = XEXP (cmp, 1);
2546
  enum machine_mode cmp_mode
2547
    = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2548
  enum machine_mode cmov_mode = VOIDmode;
2549
  int local_fast_math = flag_unsafe_math_optimizations;
2550
  rtx tem;
2551
 
2552
  if (cmp_mode == TFmode)
2553
    {
2554
      op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2555
      op1 = const0_rtx;
2556
      cmp_mode = DImode;
2557
    }
2558
 
2559
  gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2560
 
2561
  if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2562
    {
2563
      enum rtx_code cmp_code;
2564
 
2565
      if (! TARGET_FIX)
2566
        return 0;
2567
 
2568
      /* If we have fp<->int register move instructions, do a cmov by
2569
         performing the comparison in fp registers, and move the
2570
         zero/nonzero value to integer registers, where we can then
2571
         use a normal cmov, or vice-versa.  */
2572
 
2573
      switch (code)
2574
        {
2575
        case EQ: case LE: case LT: case LEU: case LTU:
2576
          /* We have these compares.  */
2577
          cmp_code = code, code = NE;
2578
          break;
2579
 
2580
        case NE:
2581
          /* This must be reversed.  */
2582
          cmp_code = EQ, code = EQ;
2583
          break;
2584
 
2585
        case GE: case GT: case GEU: case GTU:
2586
          /* These normally need swapping, but for integer zero we have
2587
             special patterns that recognize swapped operands.  */
2588
          if (cmp_mode == DImode && op1 == const0_rtx)
2589
            cmp_code = code, code = NE;
2590
          else
2591
            {
2592
              cmp_code = swap_condition (code);
2593
              code = NE;
2594
              tem = op0, op0 = op1, op1 = tem;
2595
            }
2596
          break;
2597
 
2598
        default:
2599
          gcc_unreachable ();
2600
        }
2601
 
2602
      tem = gen_reg_rtx (cmp_mode);
2603
      emit_insn (gen_rtx_SET (VOIDmode, tem,
2604
                              gen_rtx_fmt_ee (cmp_code, cmp_mode,
2605
                                              op0, op1)));
2606
 
2607
      cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2608
      op0 = gen_lowpart (cmp_mode, tem);
2609
      op1 = CONST0_RTX (cmp_mode);
2610
      local_fast_math = 1;
2611
    }
2612
 
2613
  /* We may be able to use a conditional move directly.
2614
     This avoids emitting spurious compares.  */
2615
  if (signed_comparison_operator (cmp, VOIDmode)
2616
      && (cmp_mode == DImode || local_fast_math)
2617
      && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2618
    return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2619
 
2620
  /* We can't put the comparison inside the conditional move;
2621
     emit a compare instruction and put that inside the
2622
     conditional move.  Make sure we emit only comparisons we have;
2623
     swap or reverse as necessary.  */
2624
 
2625
  if (!can_create_pseudo_p ())
2626
    return NULL_RTX;
2627
 
2628
  switch (code)
2629
    {
2630
    case EQ:  case LE:  case LT:  case LEU:  case LTU:
2631
      /* We have these compares: */
2632
      break;
2633
 
2634
    case NE:
2635
      /* This must be reversed.  */
2636
      code = reverse_condition (code);
2637
      cmov_code = EQ;
2638
      break;
2639
 
2640
    case GE:  case GT:  case GEU:  case GTU:
2641
      /* These must be swapped.  */
2642
      if (op1 != CONST0_RTX (cmp_mode))
2643
        {
2644
          code = swap_condition (code);
2645
          tem = op0, op0 = op1, op1 = tem;
2646
        }
2647
      break;
2648
 
2649
    default:
2650
      gcc_unreachable ();
2651
    }
2652
 
2653
  if (cmp_mode == DImode)
2654
    {
2655
      if (!reg_or_0_operand (op0, DImode))
2656
        op0 = force_reg (DImode, op0);
2657
      if (!reg_or_8bit_operand (op1, DImode))
2658
        op1 = force_reg (DImode, op1);
2659
    }
2660
 
2661
  /* ??? We mark the branch mode to be CCmode to prevent the compare
2662
     and cmov from being combined, since the compare insn follows IEEE
2663
     rules that the cmov does not.  */
2664
  if (cmp_mode == DFmode && !local_fast_math)
2665
    cmov_mode = CCmode;
2666
 
2667
  tem = gen_reg_rtx (cmp_mode);
2668
  emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2669
  return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2670
}
2671
 
2672
/* Simplify a conditional move of two constants into a setcc with
2673
   arithmetic.  This is done with a splitter since combine would
2674
   just undo the work if done during code generation.  It also catches
2675
   cases we wouldn't have before cse.  */
2676
 
2677
int
2678
alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2679
                              rtx t_rtx, rtx f_rtx)
2680
{
2681
  HOST_WIDE_INT t, f, diff;
2682
  enum machine_mode mode;
2683
  rtx target, subtarget, tmp;
2684
 
2685
  mode = GET_MODE (dest);
2686
  t = INTVAL (t_rtx);
2687
  f = INTVAL (f_rtx);
2688
  diff = t - f;
2689
 
2690
  if (((code == NE || code == EQ) && diff < 0)
2691
      || (code == GE || code == GT))
2692
    {
2693
      code = reverse_condition (code);
2694
      diff = t, t = f, f = diff;
2695
      diff = t - f;
2696
    }
2697
 
2698
  subtarget = target = dest;
2699
  if (mode != DImode)
2700
    {
2701
      target = gen_lowpart (DImode, dest);
2702
      if (can_create_pseudo_p ())
2703
        subtarget = gen_reg_rtx (DImode);
2704
      else
2705
        subtarget = target;
2706
    }
2707
  /* Below, we must be careful to use copy_rtx on target and subtarget
2708
     in intermediate insns, as they may be a subreg rtx, which may not
2709
     be shared.  */
2710
 
2711
  if (f == 0 && exact_log2 (diff) > 0
2712
      /* On EV6, we've got enough shifters to make non-arithmetic shifts
2713
         viable over a longer latency cmove.  On EV5, the E0 slot is a
2714
         scarce resource, and on EV4 shift has the same latency as a cmove.  */
2715
      && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2716
    {
2717
      tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2718
      emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2719
 
2720
      tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2721
                            GEN_INT (exact_log2 (t)));
2722
      emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2723
    }
2724
  else if (f == 0 && t == -1)
2725
    {
2726
      tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2727
      emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2728
 
2729
      emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2730
    }
2731
  else if (diff == 1 || diff == 4 || diff == 8)
2732
    {
2733
      rtx add_op;
2734
 
2735
      tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2736
      emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2737
 
2738
      if (diff == 1)
2739
        emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2740
      else
2741
        {
2742
          add_op = GEN_INT (f);
2743
          if (sext_add_operand (add_op, mode))
2744
            {
2745
              tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2746
                                  GEN_INT (diff));
2747
              tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2748
              emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2749
            }
2750
          else
2751
            return 0;
2752
        }
2753
    }
2754
  else
2755
    return 0;
2756
 
2757
  return 1;
2758
}
2759
 
2760
/* Look up the function X_floating library function name for the
2761
   given operation.  */
2762
 
2763
struct GTY(()) xfloating_op
2764
{
2765
  const enum rtx_code code;
2766
  const char *const GTY((skip)) osf_func;
2767
  const char *const GTY((skip)) vms_func;
2768
  rtx libcall;
2769
};
2770
 
2771
static GTY(()) struct xfloating_op xfloating_ops[] =
2772
{
2773
  { PLUS,               "_OtsAddX", "OTS$ADD_X", 0 },
2774
  { MINUS,              "_OtsSubX", "OTS$SUB_X", 0 },
2775
  { MULT,               "_OtsMulX", "OTS$MUL_X", 0 },
2776
  { DIV,                "_OtsDivX", "OTS$DIV_X", 0 },
2777
  { EQ,                 "_OtsEqlX", "OTS$EQL_X", 0 },
2778
  { NE,                 "_OtsNeqX", "OTS$NEQ_X", 0 },
2779
  { LT,                 "_OtsLssX", "OTS$LSS_X", 0 },
2780
  { LE,                 "_OtsLeqX", "OTS$LEQ_X", 0 },
2781
  { GT,                 "_OtsGtrX", "OTS$GTR_X", 0 },
2782
  { GE,                 "_OtsGeqX", "OTS$GEQ_X", 0 },
2783
  { FIX,                "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2784
  { FLOAT,              "_OtsCvtQX", "OTS$CVTQX", 0 },
2785
  { UNSIGNED_FLOAT,     "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2786
  { FLOAT_EXTEND,       "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2787
  { FLOAT_TRUNCATE,     "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2788
};
2789
 
2790
static GTY(()) struct xfloating_op vax_cvt_ops[] =
2791
{
2792
  { FLOAT_EXTEND,       "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2793
  { FLOAT_TRUNCATE,     "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2794
};
2795
 
2796
static rtx
2797
alpha_lookup_xfloating_lib_func (enum rtx_code code)
2798
{
2799
  struct xfloating_op *ops = xfloating_ops;
2800
  long n = ARRAY_SIZE (xfloating_ops);
2801
  long i;
2802
 
2803
  gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2804
 
2805
  /* How irritating.  Nothing to key off for the main table.  */
2806
  if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2807
    {
2808
      ops = vax_cvt_ops;
2809
      n = ARRAY_SIZE (vax_cvt_ops);
2810
    }
2811
 
2812
  for (i = 0; i < n; ++i, ++ops)
2813
    if (ops->code == code)
2814
      {
2815
        rtx func = ops->libcall;
2816
        if (!func)
2817
          {
2818
            func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2819
                                     ? ops->vms_func : ops->osf_func);
2820
            ops->libcall = func;
2821
          }
2822
        return func;
2823
      }
2824
 
2825
  gcc_unreachable ();
2826
}
2827
 
2828
/* Most X_floating operations take the rounding mode as an argument.
2829
   Compute that here.  */
2830
 
2831
static int
2832
alpha_compute_xfloating_mode_arg (enum rtx_code code,
2833
                                  enum alpha_fp_rounding_mode round)
2834
{
2835
  int mode;
2836
 
2837
  switch (round)
2838
    {
2839
    case ALPHA_FPRM_NORM:
2840
      mode = 2;
2841
      break;
2842
    case ALPHA_FPRM_MINF:
2843
      mode = 1;
2844
      break;
2845
    case ALPHA_FPRM_CHOP:
2846
      mode = 0;
2847
      break;
2848
    case ALPHA_FPRM_DYN:
2849
      mode = 4;
2850
      break;
2851
    default:
2852
      gcc_unreachable ();
2853
 
2854
    /* XXX For reference, round to +inf is mode = 3.  */
2855
    }
2856
 
2857
  if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2858
    mode |= 0x10000;
2859
 
2860
  return mode;
2861
}
2862
 
2863
/* Emit an X_floating library function call.
2864
 
2865
   Note that these functions do not follow normal calling conventions:
2866
   TFmode arguments are passed in two integer registers (as opposed to
2867
   indirect); TFmode return values appear in R16+R17.
2868
 
2869
   FUNC is the function to call.
2870
   TARGET is where the output belongs.
2871
   OPERANDS are the inputs.
2872
   NOPERANDS is the count of inputs.
2873
   EQUIV is the expression equivalent for the function.
2874
*/
2875
 
2876
static void
2877
alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2878
                              int noperands, rtx equiv)
2879
{
2880
  rtx usage = NULL_RTX, tmp, reg;
2881
  int regno = 16, i;
2882
 
2883
  start_sequence ();
2884
 
2885
  for (i = 0; i < noperands; ++i)
2886
    {
2887
      switch (GET_MODE (operands[i]))
2888
        {
2889
        case TFmode:
2890
          reg = gen_rtx_REG (TFmode, regno);
2891
          regno += 2;
2892
          break;
2893
 
2894
        case DFmode:
2895
          reg = gen_rtx_REG (DFmode, regno + 32);
2896
          regno += 1;
2897
          break;
2898
 
2899
        case VOIDmode:
2900
          gcc_assert (CONST_INT_P (operands[i]));
2901
          /* FALLTHRU */
2902
        case DImode:
2903
          reg = gen_rtx_REG (DImode, regno);
2904
          regno += 1;
2905
          break;
2906
 
2907
        default:
2908
          gcc_unreachable ();
2909
        }
2910
 
2911
      emit_move_insn (reg, operands[i]);
2912
      use_reg (&usage, reg);
2913
    }
2914
 
2915
  switch (GET_MODE (target))
2916
    {
2917
    case TFmode:
2918
      reg = gen_rtx_REG (TFmode, 16);
2919
      break;
2920
    case DFmode:
2921
      reg = gen_rtx_REG (DFmode, 32);
2922
      break;
2923
    case DImode:
2924
      reg = gen_rtx_REG (DImode, 0);
2925
      break;
2926
    default:
2927
      gcc_unreachable ();
2928
    }
2929
 
2930
  tmp = gen_rtx_MEM (QImode, func);
2931
  tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
2932
                                        const0_rtx, const0_rtx));
2933
  CALL_INSN_FUNCTION_USAGE (tmp) = usage;
2934
  RTL_CONST_CALL_P (tmp) = 1;
2935
 
2936
  tmp = get_insns ();
2937
  end_sequence ();
2938
 
2939
  emit_libcall_block (tmp, target, reg, equiv);
2940
}
2941
 
2942
/* Emit an X_floating library function call for arithmetic (+,-,*,/).  */
2943
 
2944
void
2945
alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
2946
{
2947
  rtx func;
2948
  int mode;
2949
  rtx out_operands[3];
2950
 
2951
  func = alpha_lookup_xfloating_lib_func (code);
2952
  mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
2953
 
2954
  out_operands[0] = operands[1];
2955
  out_operands[1] = operands[2];
2956
  out_operands[2] = GEN_INT (mode);
2957
  alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
2958
                                gen_rtx_fmt_ee (code, TFmode, operands[1],
2959
                                                operands[2]));
2960
}
2961
 
2962
/* Emit an X_floating library function call for a comparison.  */
2963
 
2964
static rtx
2965
alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
2966
{
2967
  enum rtx_code cmp_code, res_code;
2968
  rtx func, out, operands[2], note;
2969
 
2970
  /* X_floating library comparison functions return
2971
           -1  unordered
2972
 
2973
            1  true
2974
     Convert the compare against the raw return value.  */
2975
 
2976
  cmp_code = *pcode;
2977
  switch (cmp_code)
2978
    {
2979
    case UNORDERED:
2980
      cmp_code = EQ;
2981
      res_code = LT;
2982
      break;
2983
    case ORDERED:
2984
      cmp_code = EQ;
2985
      res_code = GE;
2986
      break;
2987
    case NE:
2988
      res_code = NE;
2989
      break;
2990
    case EQ:
2991
    case LT:
2992
    case GT:
2993
    case LE:
2994
    case GE:
2995
      res_code = GT;
2996
      break;
2997
    default:
2998
      gcc_unreachable ();
2999
    }
3000
  *pcode = res_code;
3001
 
3002
  func = alpha_lookup_xfloating_lib_func (cmp_code);
3003
 
3004
  operands[0] = op0;
3005
  operands[1] = op1;
3006
  out = gen_reg_rtx (DImode);
3007
 
3008
  /* What's actually returned is -1,0,1, not a proper boolean value,
3009
     so use an EXPR_LIST as with a generic libcall instead of a
3010
     comparison type expression.  */
3011
  note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3012
  note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3013
  note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3014
  alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3015
 
3016
  return out;
3017
}
3018
 
3019
/* Emit an X_floating library function call for a conversion.  */
3020
 
3021
void
3022
alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3023
{
3024
  int noperands = 1, mode;
3025
  rtx out_operands[2];
3026
  rtx func;
3027
  enum rtx_code code = orig_code;
3028
 
3029
  if (code == UNSIGNED_FIX)
3030
    code = FIX;
3031
 
3032
  func = alpha_lookup_xfloating_lib_func (code);
3033
 
3034
  out_operands[0] = operands[1];
3035
 
3036
  switch (code)
3037
    {
3038
    case FIX:
3039
      mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3040
      out_operands[1] = GEN_INT (mode);
3041
      noperands = 2;
3042
      break;
3043
    case FLOAT_TRUNCATE:
3044
      mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3045
      out_operands[1] = GEN_INT (mode);
3046
      noperands = 2;
3047
      break;
3048
    default:
3049
      break;
3050
    }
3051
 
3052
  alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3053
                                gen_rtx_fmt_e (orig_code,
3054
                                               GET_MODE (operands[0]),
3055
                                               operands[1]));
3056
}
3057
 
3058
/* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3059
   DImode moves from OP[2,3] to OP[0,1].  If FIXUP_OVERLAP is true,
3060
   guarantee that the sequence
3061
     set (OP[0] OP[2])
3062
     set (OP[1] OP[3])
3063
   is valid.  Naturally, output operand ordering is little-endian.
3064
   This is used by *movtf_internal and *movti_internal.  */
3065
 
3066
void
3067
alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3068
                        bool fixup_overlap)
3069
{
3070
  switch (GET_CODE (operands[1]))
3071
    {
3072
    case REG:
3073
      operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3074
      operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3075
      break;
3076
 
3077
    case MEM:
3078
      operands[3] = adjust_address (operands[1], DImode, 8);
3079
      operands[2] = adjust_address (operands[1], DImode, 0);
3080
      break;
3081
 
3082
    case CONST_INT:
3083
    case CONST_DOUBLE:
3084
      gcc_assert (operands[1] == CONST0_RTX (mode));
3085
      operands[2] = operands[3] = const0_rtx;
3086
      break;
3087
 
3088
    default:
3089
      gcc_unreachable ();
3090
    }
3091
 
3092
  switch (GET_CODE (operands[0]))
3093
    {
3094
    case REG:
3095
      operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3096
      operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3097
      break;
3098
 
3099
    case MEM:
3100
      operands[1] = adjust_address (operands[0], DImode, 8);
3101
      operands[0] = adjust_address (operands[0], DImode, 0);
3102
      break;
3103
 
3104
    default:
3105
      gcc_unreachable ();
3106
    }
3107
 
3108
  if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3109
    {
3110
      rtx tmp;
3111
      tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3112
      tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3113
    }
3114
}
3115
 
3116
/* Implement negtf2 or abstf2.  Op0 is destination, op1 is source,
3117
   op2 is a register containing the sign bit, operation is the
3118
   logical operation to be performed.  */
3119
 
3120
void
3121
alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3122
{
3123
  rtx high_bit = operands[2];
3124
  rtx scratch;
3125
  int move;
3126
 
3127
  alpha_split_tmode_pair (operands, TFmode, false);
3128
 
3129
  /* Detect three flavors of operand overlap.  */
3130
  move = 1;
3131
  if (rtx_equal_p (operands[0], operands[2]))
3132
    move = 0;
3133
  else if (rtx_equal_p (operands[1], operands[2]))
3134
    {
3135
      if (rtx_equal_p (operands[0], high_bit))
3136
        move = 2;
3137
      else
3138
        move = -1;
3139
    }
3140
 
3141
  if (move < 0)
3142
    emit_move_insn (operands[0], operands[2]);
3143
 
3144
  /* ??? If the destination overlaps both source tf and high_bit, then
3145
     assume source tf is dead in its entirety and use the other half
3146
     for a scratch register.  Otherwise "scratch" is just the proper
3147
     destination register.  */
3148
  scratch = operands[move < 2 ? 1 : 3];
3149
 
3150
  emit_insn ((*operation) (scratch, high_bit, operands[3]));
3151
 
3152
  if (move > 0)
3153
    {
3154
      emit_move_insn (operands[0], operands[2]);
3155
      if (move > 1)
3156
        emit_move_insn (operands[1], scratch);
3157
    }
3158
}
3159
 
3160
/* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3161
   unaligned data:
3162
 
3163
           unsigned:                       signed:
3164
   word:   ldq_u  r1,X(r11)                ldq_u  r1,X(r11)
3165
           ldq_u  r2,X+1(r11)              ldq_u  r2,X+1(r11)
3166
           lda    r3,X(r11)                lda    r3,X+2(r11)
3167
           extwl  r1,r3,r1                 extql  r1,r3,r1
3168
           extwh  r2,r3,r2                 extqh  r2,r3,r2
3169
           or     r1.r2.r1                 or     r1,r2,r1
3170
                                           sra    r1,48,r1
3171
 
3172
   long:   ldq_u  r1,X(r11)                ldq_u  r1,X(r11)
3173
           ldq_u  r2,X+3(r11)              ldq_u  r2,X+3(r11)
3174
           lda    r3,X(r11)                lda    r3,X(r11)
3175
           extll  r1,r3,r1                 extll  r1,r3,r1
3176
           extlh  r2,r3,r2                 extlh  r2,r3,r2
3177
           or     r1.r2.r1                 addl   r1,r2,r1
3178
 
3179
   quad:   ldq_u  r1,X(r11)
3180
           ldq_u  r2,X+7(r11)
3181
           lda    r3,X(r11)
3182
           extql  r1,r3,r1
3183
           extqh  r2,r3,r2
3184
           or     r1.r2.r1
3185
*/
3186
 
3187
void
3188
alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3189
                             HOST_WIDE_INT ofs, int sign)
3190
{
3191
  rtx meml, memh, addr, extl, exth, tmp, mema;
3192
  enum machine_mode mode;
3193
 
3194
  if (TARGET_BWX && size == 2)
3195
    {
3196
      meml = adjust_address (mem, QImode, ofs);
3197
      memh = adjust_address (mem, QImode, ofs+1);
3198
      extl = gen_reg_rtx (DImode);
3199
      exth = gen_reg_rtx (DImode);
3200
      emit_insn (gen_zero_extendqidi2 (extl, meml));
3201
      emit_insn (gen_zero_extendqidi2 (exth, memh));
3202
      exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3203
                                  NULL, 1, OPTAB_LIB_WIDEN);
3204
      addr = expand_simple_binop (DImode, IOR, extl, exth,
3205
                                  NULL, 1, OPTAB_LIB_WIDEN);
3206
 
3207
      if (sign && GET_MODE (tgt) != HImode)
3208
        {
3209
          addr = gen_lowpart (HImode, addr);
3210
          emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3211
        }
3212
      else
3213
        {
3214
          if (GET_MODE (tgt) != DImode)
3215
            addr = gen_lowpart (GET_MODE (tgt), addr);
3216
          emit_move_insn (tgt, addr);
3217
        }
3218
      return;
3219
    }
3220
 
3221
  meml = gen_reg_rtx (DImode);
3222
  memh = gen_reg_rtx (DImode);
3223
  addr = gen_reg_rtx (DImode);
3224
  extl = gen_reg_rtx (DImode);
3225
  exth = gen_reg_rtx (DImode);
3226
 
3227
  mema = XEXP (mem, 0);
3228
  if (GET_CODE (mema) == LO_SUM)
3229
    mema = force_reg (Pmode, mema);
3230
 
3231
  /* AND addresses cannot be in any alias set, since they may implicitly
3232
     alias surrounding code.  Ideally we'd have some alias set that
3233
     covered all types except those with alignment 8 or higher.  */
3234
 
3235
  tmp = change_address (mem, DImode,
3236
                        gen_rtx_AND (DImode,
3237
                                     plus_constant (mema, ofs),
3238
                                     GEN_INT (-8)));
3239
  set_mem_alias_set (tmp, 0);
3240
  emit_move_insn (meml, tmp);
3241
 
3242
  tmp = change_address (mem, DImode,
3243
                        gen_rtx_AND (DImode,
3244
                                     plus_constant (mema, ofs + size - 1),
3245
                                     GEN_INT (-8)));
3246
  set_mem_alias_set (tmp, 0);
3247
  emit_move_insn (memh, tmp);
3248
 
3249
  if (sign && size == 2)
3250
    {
3251
      emit_move_insn (addr, plus_constant (mema, ofs+2));
3252
 
3253
      emit_insn (gen_extql (extl, meml, addr));
3254
      emit_insn (gen_extqh (exth, memh, addr));
3255
 
3256
      /* We must use tgt here for the target.  Alpha-vms port fails if we use
3257
         addr for the target, because addr is marked as a pointer and combine
3258
         knows that pointers are always sign-extended 32-bit values.  */
3259
      addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3260
      addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3261
                           addr, 1, OPTAB_WIDEN);
3262
    }
3263
  else
3264
    {
3265
      emit_move_insn (addr, plus_constant (mema, ofs));
3266
      emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3267
      switch ((int) size)
3268
        {
3269
        case 2:
3270
          emit_insn (gen_extwh (exth, memh, addr));
3271
          mode = HImode;
3272
          break;
3273
        case 4:
3274
          emit_insn (gen_extlh (exth, memh, addr));
3275
          mode = SImode;
3276
          break;
3277
        case 8:
3278
          emit_insn (gen_extqh (exth, memh, addr));
3279
          mode = DImode;
3280
          break;
3281
        default:
3282
          gcc_unreachable ();
3283
        }
3284
 
3285
      addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3286
                           gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3287
                           sign, OPTAB_WIDEN);
3288
    }
3289
 
3290
  if (addr != tgt)
3291
    emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3292
}
3293
 
3294
/* Similarly, use ins and msk instructions to perform unaligned stores.  */
3295
 
3296
void
3297
alpha_expand_unaligned_store (rtx dst, rtx src,
3298
                              HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3299
{
3300
  rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3301
 
3302
  if (TARGET_BWX && size == 2)
3303
    {
3304
      if (src != const0_rtx)
3305
        {
3306
          dstl = gen_lowpart (QImode, src);
3307
          dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3308
                                      NULL, 1, OPTAB_LIB_WIDEN);
3309
          dsth = gen_lowpart (QImode, dsth);
3310
        }
3311
      else
3312
        dstl = dsth = const0_rtx;
3313
 
3314
      meml = adjust_address (dst, QImode, ofs);
3315
      memh = adjust_address (dst, QImode, ofs+1);
3316
 
3317
      emit_move_insn (meml, dstl);
3318
      emit_move_insn (memh, dsth);
3319
      return;
3320
    }
3321
 
3322
  dstl = gen_reg_rtx (DImode);
3323
  dsth = gen_reg_rtx (DImode);
3324
  insl = gen_reg_rtx (DImode);
3325
  insh = gen_reg_rtx (DImode);
3326
 
3327
  dsta = XEXP (dst, 0);
3328
  if (GET_CODE (dsta) == LO_SUM)
3329
    dsta = force_reg (Pmode, dsta);
3330
 
3331
  /* AND addresses cannot be in any alias set, since they may implicitly
3332
     alias surrounding code.  Ideally we'd have some alias set that
3333
     covered all types except those with alignment 8 or higher.  */
3334
 
3335
  meml = change_address (dst, DImode,
3336
                         gen_rtx_AND (DImode,
3337
                                      plus_constant (dsta, ofs),
3338
                                      GEN_INT (-8)));
3339
  set_mem_alias_set (meml, 0);
3340
 
3341
  memh = change_address (dst, DImode,
3342
                         gen_rtx_AND (DImode,
3343
                                      plus_constant (dsta, ofs + size - 1),
3344
                                      GEN_INT (-8)));
3345
  set_mem_alias_set (memh, 0);
3346
 
3347
  emit_move_insn (dsth, memh);
3348
  emit_move_insn (dstl, meml);
3349
 
3350
  addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3351
 
3352
  if (src != CONST0_RTX (GET_MODE (src)))
3353
    {
3354
      emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3355
                            GEN_INT (size*8), addr));
3356
 
3357
      switch ((int) size)
3358
        {
3359
        case 2:
3360
          emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3361
          break;
3362
        case 4:
3363
          emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3364
          break;
3365
        case 8:
3366
          emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3367
          break;
3368
        default:
3369
          gcc_unreachable ();
3370
        }
3371
    }
3372
 
3373
  emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3374
 
3375
  switch ((int) size)
3376
    {
3377
    case 2:
3378
      emit_insn (gen_mskwl (dstl, dstl, addr));
3379
      break;
3380
    case 4:
3381
      emit_insn (gen_mskll (dstl, dstl, addr));
3382
      break;
3383
    case 8:
3384
      emit_insn (gen_mskql (dstl, dstl, addr));
3385
      break;
3386
    default:
3387
      gcc_unreachable ();
3388
    }
3389
 
3390
  if (src != CONST0_RTX (GET_MODE (src)))
3391
    {
3392
      dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3393
      dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3394
    }
3395
 
3396
  /* Must store high before low for degenerate case of aligned.  */
3397
  emit_move_insn (memh, dsth);
3398
  emit_move_insn (meml, dstl);
3399
}
3400
 
3401
/* The block move code tries to maximize speed by separating loads and
3402
   stores at the expense of register pressure: we load all of the data
3403
   before we store it back out.  There are two secondary effects worth
3404
   mentioning, that this speeds copying to/from aligned and unaligned
3405
   buffers, and that it makes the code significantly easier to write.  */
3406
 
3407
#define MAX_MOVE_WORDS  8
3408
 
3409
/* Load an integral number of consecutive unaligned quadwords.  */
3410
 
3411
static void
3412
alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3413
                                   HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3414
{
3415
  rtx const im8 = GEN_INT (-8);
3416
  rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3417
  rtx sreg, areg, tmp, smema;
3418
  HOST_WIDE_INT i;
3419
 
3420
  smema = XEXP (smem, 0);
3421
  if (GET_CODE (smema) == LO_SUM)
3422
    smema = force_reg (Pmode, smema);
3423
 
3424
  /* Generate all the tmp registers we need.  */
3425
  for (i = 0; i < words; ++i)
3426
    {
3427
      data_regs[i] = out_regs[i];
3428
      ext_tmps[i] = gen_reg_rtx (DImode);
3429
    }
3430
  data_regs[words] = gen_reg_rtx (DImode);
3431
 
3432
  if (ofs != 0)
3433
    smem = adjust_address (smem, GET_MODE (smem), ofs);
3434
 
3435
  /* Load up all of the source data.  */
3436
  for (i = 0; i < words; ++i)
3437
    {
3438
      tmp = change_address (smem, DImode,
3439
                            gen_rtx_AND (DImode,
3440
                                         plus_constant (smema, 8*i),
3441
                                         im8));
3442
      set_mem_alias_set (tmp, 0);
3443
      emit_move_insn (data_regs[i], tmp);
3444
    }
3445
 
3446
  tmp = change_address (smem, DImode,
3447
                        gen_rtx_AND (DImode,
3448
                                     plus_constant (smema, 8*words - 1),
3449
                                     im8));
3450
  set_mem_alias_set (tmp, 0);
3451
  emit_move_insn (data_regs[words], tmp);
3452
 
3453
  /* Extract the half-word fragments.  Unfortunately DEC decided to make
3454
     extxh with offset zero a noop instead of zeroing the register, so
3455
     we must take care of that edge condition ourselves with cmov.  */
3456
 
3457
  sreg = copy_addr_to_reg (smema);
3458
  areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3459
                       1, OPTAB_WIDEN);
3460
  for (i = 0; i < words; ++i)
3461
    {
3462
      emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
3463
      emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3464
      emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3465
                              gen_rtx_IF_THEN_ELSE (DImode,
3466
                                                    gen_rtx_EQ (DImode, areg,
3467
                                                                const0_rtx),
3468
                                                    const0_rtx, ext_tmps[i])));
3469
    }
3470
 
3471
  /* Merge the half-words into whole words.  */
3472
  for (i = 0; i < words; ++i)
3473
    {
3474
      out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3475
                                  ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3476
    }
3477
}
3478
 
3479
/* Store an integral number of consecutive unaligned quadwords.  DATA_REGS
3480
   may be NULL to store zeros.  */
3481
 
3482
static void
3483
alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3484
                                    HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3485
{
3486
  rtx const im8 = GEN_INT (-8);
3487
  rtx ins_tmps[MAX_MOVE_WORDS];
3488
  rtx st_tmp_1, st_tmp_2, dreg;
3489
  rtx st_addr_1, st_addr_2, dmema;
3490
  HOST_WIDE_INT i;
3491
 
3492
  dmema = XEXP (dmem, 0);
3493
  if (GET_CODE (dmema) == LO_SUM)
3494
    dmema = force_reg (Pmode, dmema);
3495
 
3496
  /* Generate all the tmp registers we need.  */
3497
  if (data_regs != NULL)
3498
    for (i = 0; i < words; ++i)
3499
      ins_tmps[i] = gen_reg_rtx(DImode);
3500
  st_tmp_1 = gen_reg_rtx(DImode);
3501
  st_tmp_2 = gen_reg_rtx(DImode);
3502
 
3503
  if (ofs != 0)
3504
    dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3505
 
3506
  st_addr_2 = change_address (dmem, DImode,
3507
                              gen_rtx_AND (DImode,
3508
                                           plus_constant (dmema, words*8 - 1),
3509
                                       im8));
3510
  set_mem_alias_set (st_addr_2, 0);
3511
 
3512
  st_addr_1 = change_address (dmem, DImode,
3513
                              gen_rtx_AND (DImode, dmema, im8));
3514
  set_mem_alias_set (st_addr_1, 0);
3515
 
3516
  /* Load up the destination end bits.  */
3517
  emit_move_insn (st_tmp_2, st_addr_2);
3518
  emit_move_insn (st_tmp_1, st_addr_1);
3519
 
3520
  /* Shift the input data into place.  */
3521
  dreg = copy_addr_to_reg (dmema);
3522
  if (data_regs != NULL)
3523
    {
3524
      for (i = words-1; i >= 0; --i)
3525
        {
3526
          emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
3527
          emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3528
        }
3529
      for (i = words-1; i > 0; --i)
3530
        {
3531
          ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3532
                                        ins_tmps[i-1], ins_tmps[i-1], 1,
3533
                                        OPTAB_WIDEN);
3534
        }
3535
    }
3536
 
3537
  /* Split and merge the ends with the destination data.  */
3538
  emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3539
  emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
3540
 
3541
  if (data_regs != NULL)
3542
    {
3543
      st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3544
                               st_tmp_2, 1, OPTAB_WIDEN);
3545
      st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3546
                               st_tmp_1, 1, OPTAB_WIDEN);
3547
    }
3548
 
3549
  /* Store it all.  */
3550
  emit_move_insn (st_addr_2, st_tmp_2);
3551
  for (i = words-1; i > 0; --i)
3552
    {
3553
      rtx tmp = change_address (dmem, DImode,
3554
                                gen_rtx_AND (DImode,
3555
                                             plus_constant (dmema, i*8),
3556
                                             im8));
3557
      set_mem_alias_set (tmp, 0);
3558
      emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3559
    }
3560
  emit_move_insn (st_addr_1, st_tmp_1);
3561
}
3562
 
3563
 
3564
/* Expand string/block move operations.
3565
 
3566
   operands[0] is the pointer to the destination.
3567
   operands[1] is the pointer to the source.
3568
   operands[2] is the number of bytes to move.
3569
   operands[3] is the alignment.  */
3570
 
3571
int
3572
alpha_expand_block_move (rtx operands[])
3573
{
3574
  rtx bytes_rtx = operands[2];
3575
  rtx align_rtx = operands[3];
3576
  HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3577
  HOST_WIDE_INT bytes = orig_bytes;
3578
  HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3579
  HOST_WIDE_INT dst_align = src_align;
3580
  rtx orig_src = operands[1];
3581
  rtx orig_dst = operands[0];
3582
  rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3583
  rtx tmp;
3584
  unsigned int i, words, ofs, nregs = 0;
3585
 
3586
  if (orig_bytes <= 0)
3587
    return 1;
3588
  else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3589
    return 0;
3590
 
3591
  /* Look for additional alignment information from recorded register info.  */
3592
 
3593
  tmp = XEXP (orig_src, 0);
3594
  if (REG_P (tmp))
3595
    src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3596
  else if (GET_CODE (tmp) == PLUS
3597
           && REG_P (XEXP (tmp, 0))
3598
           && CONST_INT_P (XEXP (tmp, 1)))
3599
    {
3600
      unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3601
      unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3602
 
3603
      if (a > src_align)
3604
        {
3605
          if (a >= 64 && c % 8 == 0)
3606
            src_align = 64;
3607
          else if (a >= 32 && c % 4 == 0)
3608
            src_align = 32;
3609
          else if (a >= 16 && c % 2 == 0)
3610
            src_align = 16;
3611
        }
3612
    }
3613
 
3614
  tmp = XEXP (orig_dst, 0);
3615
  if (REG_P (tmp))
3616
    dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3617
  else if (GET_CODE (tmp) == PLUS
3618
           && REG_P (XEXP (tmp, 0))
3619
           && CONST_INT_P (XEXP (tmp, 1)))
3620
    {
3621
      unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3622
      unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3623
 
3624
      if (a > dst_align)
3625
        {
3626
          if (a >= 64 && c % 8 == 0)
3627
            dst_align = 64;
3628
          else if (a >= 32 && c % 4 == 0)
3629
            dst_align = 32;
3630
          else if (a >= 16 && c % 2 == 0)
3631
            dst_align = 16;
3632
        }
3633
    }
3634
 
3635
  ofs = 0;
3636
  if (src_align >= 64 && bytes >= 8)
3637
    {
3638
      words = bytes / 8;
3639
 
3640
      for (i = 0; i < words; ++i)
3641
        data_regs[nregs + i] = gen_reg_rtx (DImode);
3642
 
3643
      for (i = 0; i < words; ++i)
3644
        emit_move_insn (data_regs[nregs + i],
3645
                        adjust_address (orig_src, DImode, ofs + i * 8));
3646
 
3647
      nregs += words;
3648
      bytes -= words * 8;
3649
      ofs += words * 8;
3650
    }
3651
 
3652
  if (src_align >= 32 && bytes >= 4)
3653
    {
3654
      words = bytes / 4;
3655
 
3656
      for (i = 0; i < words; ++i)
3657
        data_regs[nregs + i] = gen_reg_rtx (SImode);
3658
 
3659
      for (i = 0; i < words; ++i)
3660
        emit_move_insn (data_regs[nregs + i],
3661
                        adjust_address (orig_src, SImode, ofs + i * 4));
3662
 
3663
      nregs += words;
3664
      bytes -= words * 4;
3665
      ofs += words * 4;
3666
    }
3667
 
3668
  if (bytes >= 8)
3669
    {
3670
      words = bytes / 8;
3671
 
3672
      for (i = 0; i < words+1; ++i)
3673
        data_regs[nregs + i] = gen_reg_rtx (DImode);
3674
 
3675
      alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3676
                                         words, ofs);
3677
 
3678
      nregs += words;
3679
      bytes -= words * 8;
3680
      ofs += words * 8;
3681
    }
3682
 
3683
  if (! TARGET_BWX && bytes >= 4)
3684
    {
3685
      data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3686
      alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3687
      bytes -= 4;
3688
      ofs += 4;
3689
    }
3690
 
3691
  if (bytes >= 2)
3692
    {
3693
      if (src_align >= 16)
3694
        {
3695
          do {
3696
            data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3697
            emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3698
            bytes -= 2;
3699
            ofs += 2;
3700
          } while (bytes >= 2);
3701
        }
3702
      else if (! TARGET_BWX)
3703
        {
3704
          data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3705
          alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3706
          bytes -= 2;
3707
          ofs += 2;
3708
        }
3709
    }
3710
 
3711
  while (bytes > 0)
3712
    {
3713
      data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3714
      emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3715
      bytes -= 1;
3716
      ofs += 1;
3717
    }
3718
 
3719
  gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3720
 
3721
  /* Now save it back out again.  */
3722
 
3723
  i = 0, ofs = 0;
3724
 
3725
  /* Write out the data in whatever chunks reading the source allowed.  */
3726
  if (dst_align >= 64)
3727
    {
3728
      while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3729
        {
3730
          emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3731
                          data_regs[i]);
3732
          ofs += 8;
3733
          i++;
3734
        }
3735
    }
3736
 
3737
  if (dst_align >= 32)
3738
    {
3739
      /* If the source has remaining DImode regs, write them out in
3740
         two pieces.  */
3741
      while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3742
        {
3743
          tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3744
                              NULL_RTX, 1, OPTAB_WIDEN);
3745
 
3746
          emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3747
                          gen_lowpart (SImode, data_regs[i]));
3748
          emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3749
                          gen_lowpart (SImode, tmp));
3750
          ofs += 8;
3751
          i++;
3752
        }
3753
 
3754
      while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3755
        {
3756
          emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3757
                          data_regs[i]);
3758
          ofs += 4;
3759
          i++;
3760
        }
3761
    }
3762
 
3763
  if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3764
    {
3765
      /* Write out a remaining block of words using unaligned methods.  */
3766
 
3767
      for (words = 1; i + words < nregs; words++)
3768
        if (GET_MODE (data_regs[i + words]) != DImode)
3769
          break;
3770
 
3771
      if (words == 1)
3772
        alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3773
      else
3774
        alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3775
                                            words, ofs);
3776
 
3777
      i += words;
3778
      ofs += words * 8;
3779
    }
3780
 
3781
  /* Due to the above, this won't be aligned.  */
3782
  /* ??? If we have more than one of these, consider constructing full
3783
     words in registers and using alpha_expand_unaligned_store_words.  */
3784
  while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3785
    {
3786
      alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3787
      ofs += 4;
3788
      i++;
3789
    }
3790
 
3791
  if (dst_align >= 16)
3792
    while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3793
      {
3794
        emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
3795
        i++;
3796
        ofs += 2;
3797
      }
3798
  else
3799
    while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3800
      {
3801
        alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
3802
        i++;
3803
        ofs += 2;
3804
      }
3805
 
3806
  /* The remainder must be byte copies.  */
3807
  while (i < nregs)
3808
    {
3809
      gcc_assert (GET_MODE (data_regs[i]) == QImode);
3810
      emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
3811
      i++;
3812
      ofs += 1;
3813
    }
3814
 
3815
  return 1;
3816
}
3817
 
3818
int
3819
alpha_expand_block_clear (rtx operands[])
3820
{
3821
  rtx bytes_rtx = operands[1];
3822
  rtx align_rtx = operands[3];
3823
  HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3824
  HOST_WIDE_INT bytes = orig_bytes;
3825
  HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
3826
  HOST_WIDE_INT alignofs = 0;
3827
  rtx orig_dst = operands[0];
3828
  rtx tmp;
3829
  int i, words, ofs = 0;
3830
 
3831
  if (orig_bytes <= 0)
3832
    return 1;
3833
  if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3834
    return 0;
3835
 
3836
  /* Look for stricter alignment.  */
3837
  tmp = XEXP (orig_dst, 0);
3838
  if (REG_P (tmp))
3839
    align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3840
  else if (GET_CODE (tmp) == PLUS
3841
           && REG_P (XEXP (tmp, 0))
3842
           && CONST_INT_P (XEXP (tmp, 1)))
3843
    {
3844
      HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3845
      int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3846
 
3847
      if (a > align)
3848
        {
3849
          if (a >= 64)
3850
            align = a, alignofs = 8 - c % 8;
3851
          else if (a >= 32)
3852
            align = a, alignofs = 4 - c % 4;
3853
          else if (a >= 16)
3854
            align = a, alignofs = 2 - c % 2;
3855
        }
3856
    }
3857
 
3858
  /* Handle an unaligned prefix first.  */
3859
 
3860
  if (alignofs > 0)
3861
    {
3862
#if HOST_BITS_PER_WIDE_INT >= 64
3863
      /* Given that alignofs is bounded by align, the only time BWX could
3864
         generate three stores is for a 7 byte fill.  Prefer two individual
3865
         stores over a load/mask/store sequence.  */
3866
      if ((!TARGET_BWX || alignofs == 7)
3867
               && align >= 32
3868
               && !(alignofs == 4 && bytes >= 4))
3869
        {
3870
          enum machine_mode mode = (align >= 64 ? DImode : SImode);
3871
          int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
3872
          rtx mem, tmp;
3873
          HOST_WIDE_INT mask;
3874
 
3875
          mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
3876
          set_mem_alias_set (mem, 0);
3877
 
3878
          mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
3879
          if (bytes < alignofs)
3880
            {
3881
              mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
3882
              ofs += bytes;
3883
              bytes = 0;
3884
            }
3885
          else
3886
            {
3887
              bytes -= alignofs;
3888
              ofs += alignofs;
3889
            }
3890
          alignofs = 0;
3891
 
3892
          tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
3893
                              NULL_RTX, 1, OPTAB_WIDEN);
3894
 
3895
          emit_move_insn (mem, tmp);
3896
        }
3897
#endif
3898
 
3899
      if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
3900
        {
3901
          emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
3902
          bytes -= 1;
3903
          ofs += 1;
3904
          alignofs -= 1;
3905
        }
3906
      if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
3907
        {
3908
          emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
3909
          bytes -= 2;
3910
          ofs += 2;
3911
          alignofs -= 2;
3912
        }
3913
      if (alignofs == 4 && bytes >= 4)
3914
        {
3915
          emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
3916
          bytes -= 4;
3917
          ofs += 4;
3918
          alignofs = 0;
3919
        }
3920
 
3921
      /* If we've not used the extra lead alignment information by now,
3922
         we won't be able to.  Downgrade align to match what's left over.  */
3923
      if (alignofs > 0)
3924
        {
3925
          alignofs = alignofs & -alignofs;
3926
          align = MIN (align, alignofs * BITS_PER_UNIT);
3927
        }
3928
    }
3929
 
3930
  /* Handle a block of contiguous long-words.  */
3931
 
3932
  if (align >= 64 && bytes >= 8)
3933
    {
3934
      words = bytes / 8;
3935
 
3936
      for (i = 0; i < words; ++i)
3937
        emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
3938
                        const0_rtx);
3939
 
3940
      bytes -= words * 8;
3941
      ofs += words * 8;
3942
    }
3943
 
3944
  /* If the block is large and appropriately aligned, emit a single
3945
     store followed by a sequence of stq_u insns.  */
3946
 
3947
  if (align >= 32 && bytes > 16)
3948
    {
3949
      rtx orig_dsta;
3950
 
3951
      emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
3952
      bytes -= 4;
3953
      ofs += 4;
3954
 
3955
      orig_dsta = XEXP (orig_dst, 0);
3956
      if (GET_CODE (orig_dsta) == LO_SUM)
3957
        orig_dsta = force_reg (Pmode, orig_dsta);
3958
 
3959
      words = bytes / 8;
3960
      for (i = 0; i < words; ++i)
3961
        {
3962
          rtx mem
3963
            = change_address (orig_dst, DImode,
3964
                              gen_rtx_AND (DImode,
3965
                                           plus_constant (orig_dsta, ofs + i*8),
3966
                                           GEN_INT (-8)));
3967
          set_mem_alias_set (mem, 0);
3968
          emit_move_insn (mem, const0_rtx);
3969
        }
3970
 
3971
      /* Depending on the alignment, the first stq_u may have overlapped
3972
         with the initial stl, which means that the last stq_u didn't
3973
         write as much as it would appear.  Leave those questionable bytes
3974
         unaccounted for.  */
3975
      bytes -= words * 8 - 4;
3976
      ofs += words * 8 - 4;
3977
    }
3978
 
3979
  /* Handle a smaller block of aligned words.  */
3980
 
3981
  if ((align >= 64 && bytes == 4)
3982
      || (align == 32 && bytes >= 4))
3983
    {
3984
      words = bytes / 4;
3985
 
3986
      for (i = 0; i < words; ++i)
3987
        emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
3988
                        const0_rtx);
3989
 
3990
      bytes -= words * 4;
3991
      ofs += words * 4;
3992
    }
3993
 
3994
  /* An unaligned block uses stq_u stores for as many as possible.  */
3995
 
3996
  if (bytes >= 8)
3997
    {
3998
      words = bytes / 8;
3999
 
4000
      alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4001
 
4002
      bytes -= words * 8;
4003
      ofs += words * 8;
4004
    }
4005
 
4006
  /* Next clean up any trailing pieces.  */
4007
 
4008
#if HOST_BITS_PER_WIDE_INT >= 64
4009
  /* Count the number of bits in BYTES for which aligned stores could
4010
     be emitted.  */
4011
  words = 0;
4012
  for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4013
    if (bytes & i)
4014
      words += 1;
4015
 
4016
  /* If we have appropriate alignment (and it wouldn't take too many
4017
     instructions otherwise), mask out the bytes we need.  */
4018
  if (TARGET_BWX ? words > 2 : bytes > 0)
4019
    {
4020
      if (align >= 64)
4021
        {
4022
          rtx mem, tmp;
4023
          HOST_WIDE_INT mask;
4024
 
4025
          mem = adjust_address (orig_dst, DImode, ofs);
4026
          set_mem_alias_set (mem, 0);
4027
 
4028
          mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4029
 
4030
          tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4031
                              NULL_RTX, 1, OPTAB_WIDEN);
4032
 
4033
          emit_move_insn (mem, tmp);
4034
          return 1;
4035
        }
4036
      else if (align >= 32 && bytes < 4)
4037
        {
4038
          rtx mem, tmp;
4039
          HOST_WIDE_INT mask;
4040
 
4041
          mem = adjust_address (orig_dst, SImode, ofs);
4042
          set_mem_alias_set (mem, 0);
4043
 
4044
          mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4045
 
4046
          tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4047
                              NULL_RTX, 1, OPTAB_WIDEN);
4048
 
4049
          emit_move_insn (mem, tmp);
4050
          return 1;
4051
        }
4052
    }
4053
#endif
4054
 
4055
  if (!TARGET_BWX && bytes >= 4)
4056
    {
4057
      alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4058
      bytes -= 4;
4059
      ofs += 4;
4060
    }
4061
 
4062
  if (bytes >= 2)
4063
    {
4064
      if (align >= 16)
4065
        {
4066
          do {
4067
            emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4068
                            const0_rtx);
4069
            bytes -= 2;
4070
            ofs += 2;
4071
          } while (bytes >= 2);
4072
        }
4073
      else if (! TARGET_BWX)
4074
        {
4075
          alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4076
          bytes -= 2;
4077
          ofs += 2;
4078
        }
4079
    }
4080
 
4081
  while (bytes > 0)
4082
    {
4083
      emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4084
      bytes -= 1;
4085
      ofs += 1;
4086
    }
4087
 
4088
  return 1;
4089
}
4090
 
4091
/* Returns a mask so that zap(x, value) == x & mask.  */
4092
 
4093
rtx
4094
alpha_expand_zap_mask (HOST_WIDE_INT value)
4095
{
4096
  rtx result;
4097
  int i;
4098
 
4099
  if (HOST_BITS_PER_WIDE_INT >= 64)
4100
    {
4101
      HOST_WIDE_INT mask = 0;
4102
 
4103
      for (i = 7; i >= 0; --i)
4104
        {
4105
          mask <<= 8;
4106
          if (!((value >> i) & 1))
4107
            mask |= 0xff;
4108
        }
4109
 
4110
      result = gen_int_mode (mask, DImode);
4111
    }
4112
  else
4113
    {
4114
      HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4115
 
4116
      gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4117
 
4118
      for (i = 7; i >= 4; --i)
4119
        {
4120
          mask_hi <<= 8;
4121
          if (!((value >> i) & 1))
4122
            mask_hi |= 0xff;
4123
        }
4124
 
4125
      for (i = 3; i >= 0; --i)
4126
        {
4127
          mask_lo <<= 8;
4128
          if (!((value >> i) & 1))
4129
            mask_lo |= 0xff;
4130
        }
4131
 
4132
      result = immed_double_const (mask_lo, mask_hi, DImode);
4133
    }
4134
 
4135
  return result;
4136
}
4137
 
4138
void
4139
alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4140
                                   enum machine_mode mode,
4141
                                   rtx op0, rtx op1, rtx op2)
4142
{
4143
  op0 = gen_lowpart (mode, op0);
4144
 
4145
  if (op1 == const0_rtx)
4146
    op1 = CONST0_RTX (mode);
4147
  else
4148
    op1 = gen_lowpart (mode, op1);
4149
 
4150
  if (op2 == const0_rtx)
4151
    op2 = CONST0_RTX (mode);
4152
  else
4153
    op2 = gen_lowpart (mode, op2);
4154
 
4155
  emit_insn ((*gen) (op0, op1, op2));
4156
}
4157
 
4158
/* A subroutine of the atomic operation splitters.  Jump to LABEL if
4159
   COND is true.  Mark the jump as unlikely to be taken.  */
4160
 
4161
static void
4162
emit_unlikely_jump (rtx cond, rtx label)
4163
{
4164
  rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4165
  rtx x;
4166
 
4167
  x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4168
  x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4169
  add_reg_note (x, REG_BR_PROB, very_unlikely);
4170
}
4171
 
4172
/* A subroutine of the atomic operation splitters.  Emit a load-locked
4173
   instruction in MODE.  */
4174
 
4175
static void
4176
emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4177
{
4178
  rtx (*fn) (rtx, rtx) = NULL;
4179
  if (mode == SImode)
4180
    fn = gen_load_locked_si;
4181
  else if (mode == DImode)
4182
    fn = gen_load_locked_di;
4183
  emit_insn (fn (reg, mem));
4184
}
4185
 
4186
/* A subroutine of the atomic operation splitters.  Emit a store-conditional
4187
   instruction in MODE.  */
4188
 
4189
static void
4190
emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4191
{
4192
  rtx (*fn) (rtx, rtx, rtx) = NULL;
4193
  if (mode == SImode)
4194
    fn = gen_store_conditional_si;
4195
  else if (mode == DImode)
4196
    fn = gen_store_conditional_di;
4197
  emit_insn (fn (res, mem, val));
4198
}
4199
 
4200
/* Subroutines of the atomic operation splitters.  Emit barriers
4201
   as needed for the memory MODEL.  */
4202
 
4203
static void
4204
alpha_pre_atomic_barrier (enum memmodel model)
4205
{
4206
  switch (model)
4207
    {
4208
    case MEMMODEL_RELAXED:
4209
    case MEMMODEL_CONSUME:
4210
    case MEMMODEL_ACQUIRE:
4211
      break;
4212
    case MEMMODEL_RELEASE:
4213
    case MEMMODEL_ACQ_REL:
4214
    case MEMMODEL_SEQ_CST:
4215
      emit_insn (gen_memory_barrier ());
4216
      break;
4217
    default:
4218
      gcc_unreachable ();
4219
    }
4220
}
4221
 
4222
static void
4223
alpha_post_atomic_barrier (enum memmodel model)
4224
{
4225
  switch (model)
4226
    {
4227
    case MEMMODEL_RELAXED:
4228
    case MEMMODEL_CONSUME:
4229
    case MEMMODEL_RELEASE:
4230
      break;
4231
    case MEMMODEL_ACQUIRE:
4232
    case MEMMODEL_ACQ_REL:
4233
    case MEMMODEL_SEQ_CST:
4234
      emit_insn (gen_memory_barrier ());
4235
      break;
4236
    default:
4237
      gcc_unreachable ();
4238
    }
4239
}
4240
 
4241
/* A subroutine of the atomic operation splitters.  Emit an insxl
4242
   instruction in MODE.  */
4243
 
4244
static rtx
4245
emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4246
{
4247
  rtx ret = gen_reg_rtx (DImode);
4248
  rtx (*fn) (rtx, rtx, rtx);
4249
 
4250
  switch (mode)
4251
    {
4252
    case QImode:
4253
      fn = gen_insbl;
4254
      break;
4255
    case HImode:
4256
      fn = gen_inswl;
4257
      break;
4258
    case SImode:
4259
      fn = gen_insll;
4260
      break;
4261
    case DImode:
4262
      fn = gen_insql;
4263
      break;
4264
    default:
4265
      gcc_unreachable ();
4266
    }
4267
 
4268
  op1 = force_reg (mode, op1);
4269
  emit_insn (fn (ret, op1, op2));
4270
 
4271
  return ret;
4272
}
4273
 
4274
/* Expand an atomic fetch-and-operate pattern.  CODE is the binary operation
4275
   to perform.  MEM is the memory on which to operate.  VAL is the second
4276
   operand of the binary operator.  BEFORE and AFTER are optional locations to
4277
   return the value of MEM either before of after the operation.  SCRATCH is
4278
   a scratch register.  */
4279
 
4280
void
4281
alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
4282
                       rtx after, rtx scratch, enum memmodel model)
4283
{
4284
  enum machine_mode mode = GET_MODE (mem);
4285
  rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4286
 
4287
  alpha_pre_atomic_barrier (model);
4288
 
4289
  label = gen_label_rtx ();
4290
  emit_label (label);
4291
  label = gen_rtx_LABEL_REF (DImode, label);
4292
 
4293
  if (before == NULL)
4294
    before = scratch;
4295
  emit_load_locked (mode, before, mem);
4296
 
4297
  if (code == NOT)
4298
    {
4299
      x = gen_rtx_AND (mode, before, val);
4300
      emit_insn (gen_rtx_SET (VOIDmode, val, x));
4301
 
4302
      x = gen_rtx_NOT (mode, val);
4303
    }
4304
  else
4305
    x = gen_rtx_fmt_ee (code, mode, before, val);
4306
  if (after)
4307
    emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4308
  emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4309
 
4310
  emit_store_conditional (mode, cond, mem, scratch);
4311
 
4312
  x = gen_rtx_EQ (DImode, cond, const0_rtx);
4313
  emit_unlikely_jump (x, label);
4314
 
4315
  alpha_post_atomic_barrier (model);
4316
}
4317
 
4318
/* Expand a compare and swap operation.  */
4319
 
4320
void
4321
alpha_split_compare_and_swap (rtx operands[])
4322
{
4323
  rtx cond, retval, mem, oldval, newval;
4324
  bool is_weak;
4325
  enum memmodel mod_s, mod_f;
4326
  enum machine_mode mode;
4327
  rtx label1, label2, x;
4328
 
4329
  cond = operands[0];
4330
  retval = operands[1];
4331
  mem = operands[2];
4332
  oldval = operands[3];
4333
  newval = operands[4];
4334
  is_weak = (operands[5] != const0_rtx);
4335
  mod_s = (enum memmodel) INTVAL (operands[6]);
4336
  mod_f = (enum memmodel) INTVAL (operands[7]);
4337
  mode = GET_MODE (mem);
4338
 
4339
  alpha_pre_atomic_barrier (mod_s);
4340
 
4341
  label1 = NULL_RTX;
4342
  if (!is_weak)
4343
    {
4344
      label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4345
      emit_label (XEXP (label1, 0));
4346
    }
4347
  label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4348
 
4349
  emit_load_locked (mode, retval, mem);
4350
 
4351
  x = gen_lowpart (DImode, retval);
4352
  if (oldval == const0_rtx)
4353
    {
4354
      emit_move_insn (cond, const0_rtx);
4355
      x = gen_rtx_NE (DImode, x, const0_rtx);
4356
    }
4357
  else
4358
    {
4359
      x = gen_rtx_EQ (DImode, x, oldval);
4360
      emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4361
      x = gen_rtx_EQ (DImode, cond, const0_rtx);
4362
    }
4363
  emit_unlikely_jump (x, label2);
4364
 
4365
  emit_move_insn (cond, newval);
4366
  emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond));
4367
 
4368
  if (!is_weak)
4369
    {
4370
      x = gen_rtx_EQ (DImode, cond, const0_rtx);
4371
      emit_unlikely_jump (x, label1);
4372
    }
4373
 
4374
  if (mod_f != MEMMODEL_RELAXED)
4375
    emit_label (XEXP (label2, 0));
4376
 
4377
  alpha_post_atomic_barrier (mod_s);
4378
 
4379
  if (mod_f == MEMMODEL_RELAXED)
4380
    emit_label (XEXP (label2, 0));
4381
}
4382
 
4383
void
4384
alpha_expand_compare_and_swap_12 (rtx operands[])
4385
{
4386
  rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
4387
  enum machine_mode mode;
4388
  rtx addr, align, wdst;
4389
  rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
4390
 
4391
  cond = operands[0];
4392
  dst = operands[1];
4393
  mem = operands[2];
4394
  oldval = operands[3];
4395
  newval = operands[4];
4396
  is_weak = operands[5];
4397
  mod_s = operands[6];
4398
  mod_f = operands[7];
4399
  mode = GET_MODE (mem);
4400
 
4401
  /* We forced the address into a register via mem_noofs_operand.  */
4402
  addr = XEXP (mem, 0);
4403
  gcc_assert (register_operand (addr, DImode));
4404
 
4405
  align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4406
                               NULL_RTX, 1, OPTAB_DIRECT);
4407
 
4408
  oldval = convert_modes (DImode, mode, oldval, 1);
4409
 
4410
  if (newval != const0_rtx)
4411
    newval = emit_insxl (mode, newval, addr);
4412
 
4413
  wdst = gen_reg_rtx (DImode);
4414
  if (mode == QImode)
4415
    gen = gen_atomic_compare_and_swapqi_1;
4416
  else
4417
    gen = gen_atomic_compare_and_swaphi_1;
4418
  emit_insn (gen (cond, wdst, mem, oldval, newval, align,
4419
                  is_weak, mod_s, mod_f));
4420
 
4421
  emit_move_insn (dst, gen_lowpart (mode, wdst));
4422
}
4423
 
4424
void
4425
alpha_split_compare_and_swap_12 (rtx operands[])
4426
{
4427
  rtx cond, dest, orig_mem, oldval, newval, align, scratch;
4428
  enum machine_mode mode;
4429
  bool is_weak;
4430
  enum memmodel mod_s, mod_f;
4431
  rtx label1, label2, mem, addr, width, mask, x;
4432
 
4433
  cond = operands[0];
4434
  dest = operands[1];
4435
  orig_mem = operands[2];
4436
  oldval = operands[3];
4437
  newval = operands[4];
4438
  align = operands[5];
4439
  is_weak = (operands[6] != const0_rtx);
4440
  mod_s = (enum memmodel) INTVAL (operands[7]);
4441
  mod_f = (enum memmodel) INTVAL (operands[8]);
4442
  scratch = operands[9];
4443
  mode = GET_MODE (orig_mem);
4444
  addr = XEXP (orig_mem, 0);
4445
 
4446
  mem = gen_rtx_MEM (DImode, align);
4447
  MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4448
  if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4449
    set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4450
 
4451
  alpha_pre_atomic_barrier (mod_s);
4452
 
4453
  label1 = NULL_RTX;
4454
  if (!is_weak)
4455
    {
4456
      label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4457
      emit_label (XEXP (label1, 0));
4458
    }
4459
  label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4460
 
4461
  emit_load_locked (DImode, scratch, mem);
4462
 
4463
  width = GEN_INT (GET_MODE_BITSIZE (mode));
4464
  mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4465
  emit_insn (gen_extxl (dest, scratch, width, addr));
4466
 
4467
  if (oldval == const0_rtx)
4468
    {
4469
      emit_move_insn (cond, const0_rtx);
4470
      x = gen_rtx_NE (DImode, dest, const0_rtx);
4471
    }
4472
  else
4473
    {
4474
      x = gen_rtx_EQ (DImode, dest, oldval);
4475
      emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4476
      x = gen_rtx_EQ (DImode, cond, const0_rtx);
4477
    }
4478
  emit_unlikely_jump (x, label2);
4479
 
4480
  emit_insn (gen_mskxl (cond, scratch, mask, addr));
4481
 
4482
  if (newval != const0_rtx)
4483
    emit_insn (gen_iordi3 (cond, cond, newval));
4484
 
4485
  emit_store_conditional (DImode, cond, mem, cond);
4486
 
4487
  if (!is_weak)
4488
    {
4489
      x = gen_rtx_EQ (DImode, cond, const0_rtx);
4490
      emit_unlikely_jump (x, label1);
4491
    }
4492
 
4493
  if (mod_f != MEMMODEL_RELAXED)
4494
    emit_label (XEXP (label2, 0));
4495
 
4496
  alpha_post_atomic_barrier (mod_s);
4497
 
4498
  if (mod_f == MEMMODEL_RELAXED)
4499
    emit_label (XEXP (label2, 0));
4500
}
4501
 
4502
/* Expand an atomic exchange operation.  */
4503
 
4504
void
4505
alpha_split_atomic_exchange (rtx operands[])
4506
{
4507
  rtx retval, mem, val, scratch;
4508
  enum memmodel model;
4509
  enum machine_mode mode;
4510
  rtx label, x, cond;
4511
 
4512
  retval = operands[0];
4513
  mem = operands[1];
4514
  val = operands[2];
4515
  model = (enum memmodel) INTVAL (operands[3]);
4516
  scratch = operands[4];
4517
  mode = GET_MODE (mem);
4518
  cond = gen_lowpart (DImode, scratch);
4519
 
4520
  alpha_pre_atomic_barrier (model);
4521
 
4522
  label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4523
  emit_label (XEXP (label, 0));
4524
 
4525
  emit_load_locked (mode, retval, mem);
4526
  emit_move_insn (scratch, val);
4527
  emit_store_conditional (mode, cond, mem, scratch);
4528
 
4529
  x = gen_rtx_EQ (DImode, cond, const0_rtx);
4530
  emit_unlikely_jump (x, label);
4531
 
4532
  alpha_post_atomic_barrier (model);
4533
}
4534
 
4535
void
4536
alpha_expand_atomic_exchange_12 (rtx operands[])
4537
{
4538
  rtx dst, mem, val, model;
4539
  enum machine_mode mode;
4540
  rtx addr, align, wdst;
4541
  rtx (*gen) (rtx, rtx, rtx, rtx, rtx);
4542
 
4543
  dst = operands[0];
4544
  mem = operands[1];
4545
  val = operands[2];
4546
  model = operands[3];
4547
  mode = GET_MODE (mem);
4548
 
4549
  /* We forced the address into a register via mem_noofs_operand.  */
4550
  addr = XEXP (mem, 0);
4551
  gcc_assert (register_operand (addr, DImode));
4552
 
4553
  align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4554
                               NULL_RTX, 1, OPTAB_DIRECT);
4555
 
4556
  /* Insert val into the correct byte location within the word.  */
4557
  if (val != const0_rtx)
4558
    val = emit_insxl (mode, val, addr);
4559
 
4560
  wdst = gen_reg_rtx (DImode);
4561
  if (mode == QImode)
4562
    gen = gen_atomic_exchangeqi_1;
4563
  else
4564
    gen = gen_atomic_exchangehi_1;
4565
  emit_insn (gen (wdst, mem, val, align, model));
4566
 
4567
  emit_move_insn (dst, gen_lowpart (mode, wdst));
4568
}
4569
 
4570
void
4571
alpha_split_atomic_exchange_12 (rtx operands[])
4572
{
4573
  rtx dest, orig_mem, addr, val, align, scratch;
4574
  rtx label, mem, width, mask, x;
4575
  enum machine_mode mode;
4576
  enum memmodel model;
4577
 
4578
  dest = operands[0];
4579
  orig_mem = operands[1];
4580
  val = operands[2];
4581
  align = operands[3];
4582
  model = (enum memmodel) INTVAL (operands[4]);
4583
  scratch = operands[5];
4584
  mode = GET_MODE (orig_mem);
4585
  addr = XEXP (orig_mem, 0);
4586
 
4587
  mem = gen_rtx_MEM (DImode, align);
4588
  MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4589
  if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4590
    set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4591
 
4592
  alpha_pre_atomic_barrier (model);
4593
 
4594
  label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4595
  emit_label (XEXP (label, 0));
4596
 
4597
  emit_load_locked (DImode, scratch, mem);
4598
 
4599
  width = GEN_INT (GET_MODE_BITSIZE (mode));
4600
  mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4601
  emit_insn (gen_extxl (dest, scratch, width, addr));
4602
  emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4603
  if (val != const0_rtx)
4604
    emit_insn (gen_iordi3 (scratch, scratch, val));
4605
 
4606
  emit_store_conditional (DImode, scratch, mem, scratch);
4607
 
4608
  x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4609
  emit_unlikely_jump (x, label);
4610
 
4611
  alpha_post_atomic_barrier (model);
4612
}
4613
 
4614
/* Adjust the cost of a scheduling dependency.  Return the new cost of
4615
   a dependency LINK or INSN on DEP_INSN.  COST is the current cost.  */
4616
 
4617
static int
4618
alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4619
{
4620
  enum attr_type dep_insn_type;
4621
 
4622
  /* If the dependence is an anti-dependence, there is no cost.  For an
4623
     output dependence, there is sometimes a cost, but it doesn't seem
4624
     worth handling those few cases.  */
4625
  if (REG_NOTE_KIND (link) != 0)
4626
    return cost;
4627
 
4628
  /* If we can't recognize the insns, we can't really do anything.  */
4629
  if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4630
    return cost;
4631
 
4632
  dep_insn_type = get_attr_type (dep_insn);
4633
 
4634
  /* Bring in the user-defined memory latency.  */
4635
  if (dep_insn_type == TYPE_ILD
4636
      || dep_insn_type == TYPE_FLD
4637
      || dep_insn_type == TYPE_LDSYM)
4638
    cost += alpha_memory_latency-1;
4639
 
4640
  /* Everything else handled in DFA bypasses now.  */
4641
 
4642
  return cost;
4643
}
4644
 
4645
/* The number of instructions that can be issued per cycle.  */
4646
 
4647
static int
4648
alpha_issue_rate (void)
4649
{
4650
  return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4651
}
4652
 
4653
/* How many alternative schedules to try.  This should be as wide as the
4654
   scheduling freedom in the DFA, but no wider.  Making this value too
4655
   large results extra work for the scheduler.
4656
 
4657
   For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4658
   alternative schedules.  For EV5, we can choose between E0/E1 and
4659
   FA/FM.  For EV6, an arithmetic insn can be issued to U0/U1/L0/L1.  */
4660
 
4661
static int
4662
alpha_multipass_dfa_lookahead (void)
4663
{
4664
  return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4665
}
4666
 
4667
/* Machine-specific function data.  */
4668
 
4669
struct GTY(()) alpha_links;
4670
 
4671
struct GTY(()) machine_function
4672
{
4673
  /* For OSF.  */
4674
  const char *some_ld_name;
4675
 
4676
  /* For TARGET_LD_BUGGY_LDGP.  */
4677
  rtx gp_save_rtx;
4678
 
4679
  /* For VMS condition handlers.  */
4680
  bool uses_condition_handler;
4681
 
4682
  /* Linkage entries.  */
4683
  splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
4684
    links;
4685
};
4686
 
4687
/* How to allocate a 'struct machine_function'.  */
4688
 
4689
static struct machine_function *
4690
alpha_init_machine_status (void)
4691
{
4692
  return ggc_alloc_cleared_machine_function ();
4693
}
4694
 
4695
/* Support for frame based VMS condition handlers.  */
4696
 
4697
/* A VMS condition handler may be established for a function with a call to
4698
   __builtin_establish_vms_condition_handler, and cancelled with a call to
4699
   __builtin_revert_vms_condition_handler.
4700
 
4701
   The VMS Condition Handling Facility knows about the existence of a handler
4702
   from the procedure descriptor .handler field.  As the VMS native compilers,
4703
   we store the user specified handler's address at a fixed location in the
4704
   stack frame and point the procedure descriptor at a common wrapper which
4705
   fetches the real handler's address and issues an indirect call.
4706
 
4707
   The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4708
 
4709
   We force the procedure kind to PT_STACK, and the fixed frame location is
4710
   fp+8, just before the register save area. We use the handler_data field in
4711
   the procedure descriptor to state the fp offset at which the installed
4712
   handler address can be found.  */
4713
 
4714
#define VMS_COND_HANDLER_FP_OFFSET 8
4715
 
4716
/* Expand code to store the currently installed user VMS condition handler
4717
   into TARGET and install HANDLER as the new condition handler.  */
4718
 
4719
void
4720
alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4721
{
4722
  rtx handler_slot_address
4723
    = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
4724
 
4725
  rtx handler_slot
4726
    = gen_rtx_MEM (DImode, handler_slot_address);
4727
 
4728
  emit_move_insn (target, handler_slot);
4729
  emit_move_insn (handler_slot, handler);
4730
 
4731
  /* Notify the start/prologue/epilogue emitters that the condition handler
4732
     slot is needed.  In addition to reserving the slot space, this will force
4733
     the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4734
     use above is correct.  */
4735
  cfun->machine->uses_condition_handler = true;
4736
}
4737
 
4738
/* Expand code to store the current VMS condition handler into TARGET and
4739
   nullify it.  */
4740
 
4741
void
4742
alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4743
{
4744
  /* We implement this by establishing a null condition handler, with the tiny
4745
     side effect of setting uses_condition_handler.  This is a little bit
4746
     pessimistic if no actual builtin_establish call is ever issued, which is
4747
     not a real problem and expected never to happen anyway.  */
4748
 
4749
  alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4750
}
4751
 
4752
/* Functions to save and restore alpha_return_addr_rtx.  */
4753
 
4754
/* Start the ball rolling with RETURN_ADDR_RTX.  */
4755
 
4756
rtx
4757
alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4758
{
4759
  if (count != 0)
4760
    return const0_rtx;
4761
 
4762
  return get_hard_reg_initial_val (Pmode, REG_RA);
4763
}
4764
 
4765
/* Return or create a memory slot containing the gp value for the current
4766
   function.  Needed only if TARGET_LD_BUGGY_LDGP.  */
4767
 
4768
rtx
4769
alpha_gp_save_rtx (void)
4770
{
4771
  rtx seq, m = cfun->machine->gp_save_rtx;
4772
 
4773
  if (m == NULL)
4774
    {
4775
      start_sequence ();
4776
 
4777
      m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4778
      m = validize_mem (m);
4779
      emit_move_insn (m, pic_offset_table_rtx);
4780
 
4781
      seq = get_insns ();
4782
      end_sequence ();
4783
 
4784
      /* We used to simply emit the sequence after entry_of_function.
4785
         However this breaks the CFG if the first instruction in the
4786
         first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4787
         label.  Emit the sequence properly on the edge.  We are only
4788
         invoked from dw2_build_landing_pads and finish_eh_generation
4789
         will call commit_edge_insertions thanks to a kludge.  */
4790
      insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4791
 
4792
      cfun->machine->gp_save_rtx = m;
4793
    }
4794
 
4795
  return m;
4796
}
4797
 
4798
static void
4799
alpha_instantiate_decls (void)
4800
{
4801
  if (cfun->machine->gp_save_rtx != NULL_RTX)
4802
    instantiate_decl_rtl (cfun->machine->gp_save_rtx);
4803
}
4804
 
4805
static int
4806
alpha_ra_ever_killed (void)
4807
{
4808
  rtx top;
4809
 
4810
  if (!has_hard_reg_initial_val (Pmode, REG_RA))
4811
    return (int)df_regs_ever_live_p (REG_RA);
4812
 
4813
  push_topmost_sequence ();
4814
  top = get_insns ();
4815
  pop_topmost_sequence ();
4816
 
4817
  return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4818
}
4819
 
4820
 
4821
/* Return the trap mode suffix applicable to the current
4822
   instruction, or NULL.  */
4823
 
4824
static const char *
4825
get_trap_mode_suffix (void)
4826
{
4827
  enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4828
 
4829
  switch (s)
4830
    {
4831
    case TRAP_SUFFIX_NONE:
4832
      return NULL;
4833
 
4834
    case TRAP_SUFFIX_SU:
4835
      if (alpha_fptm >= ALPHA_FPTM_SU)
4836
        return "su";
4837
      return NULL;
4838
 
4839
    case TRAP_SUFFIX_SUI:
4840
      if (alpha_fptm >= ALPHA_FPTM_SUI)
4841
        return "sui";
4842
      return NULL;
4843
 
4844
    case TRAP_SUFFIX_V_SV:
4845
      switch (alpha_fptm)
4846
        {
4847
        case ALPHA_FPTM_N:
4848
          return NULL;
4849
        case ALPHA_FPTM_U:
4850
          return "v";
4851
        case ALPHA_FPTM_SU:
4852
        case ALPHA_FPTM_SUI:
4853
          return "sv";
4854
        default:
4855
          gcc_unreachable ();
4856
        }
4857
 
4858
    case TRAP_SUFFIX_V_SV_SVI:
4859
      switch (alpha_fptm)
4860
        {
4861
        case ALPHA_FPTM_N:
4862
          return NULL;
4863
        case ALPHA_FPTM_U:
4864
          return "v";
4865
        case ALPHA_FPTM_SU:
4866
          return "sv";
4867
        case ALPHA_FPTM_SUI:
4868
          return "svi";
4869
        default:
4870
          gcc_unreachable ();
4871
        }
4872
      break;
4873
 
4874
    case TRAP_SUFFIX_U_SU_SUI:
4875
      switch (alpha_fptm)
4876
        {
4877
        case ALPHA_FPTM_N:
4878
          return NULL;
4879
        case ALPHA_FPTM_U:
4880
          return "u";
4881
        case ALPHA_FPTM_SU:
4882
          return "su";
4883
        case ALPHA_FPTM_SUI:
4884
          return "sui";
4885
        default:
4886
          gcc_unreachable ();
4887
        }
4888
      break;
4889
 
4890
    default:
4891
      gcc_unreachable ();
4892
    }
4893
  gcc_unreachable ();
4894
}
4895
 
4896
/* Return the rounding mode suffix applicable to the current
4897
   instruction, or NULL.  */
4898
 
4899
static const char *
4900
get_round_mode_suffix (void)
4901
{
4902
  enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4903
 
4904
  switch (s)
4905
    {
4906
    case ROUND_SUFFIX_NONE:
4907
      return NULL;
4908
    case ROUND_SUFFIX_NORMAL:
4909
      switch (alpha_fprm)
4910
        {
4911
        case ALPHA_FPRM_NORM:
4912
          return NULL;
4913
        case ALPHA_FPRM_MINF:
4914
          return "m";
4915
        case ALPHA_FPRM_CHOP:
4916
          return "c";
4917
        case ALPHA_FPRM_DYN:
4918
          return "d";
4919
        default:
4920
          gcc_unreachable ();
4921
        }
4922
      break;
4923
 
4924
    case ROUND_SUFFIX_C:
4925
      return "c";
4926
 
4927
    default:
4928
      gcc_unreachable ();
4929
    }
4930
  gcc_unreachable ();
4931
}
4932
 
4933
/* Locate some local-dynamic symbol still in use by this function
4934
   so that we can print its name in some movdi_er_tlsldm pattern.  */
4935
 
4936
static int
4937
get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4938
{
4939
  rtx x = *px;
4940
 
4941
  if (GET_CODE (x) == SYMBOL_REF
4942
      && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4943
    {
4944
      cfun->machine->some_ld_name = XSTR (x, 0);
4945
      return 1;
4946
    }
4947
 
4948
  return 0;
4949
}
4950
 
4951
static const char *
4952
get_some_local_dynamic_name (void)
4953
{
4954
  rtx insn;
4955
 
4956
  if (cfun->machine->some_ld_name)
4957
    return cfun->machine->some_ld_name;
4958
 
4959
  for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4960
    if (INSN_P (insn)
4961
        && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4962
      return cfun->machine->some_ld_name;
4963
 
4964
  gcc_unreachable ();
4965
}
4966
 
4967
/* Print an operand.  Recognize special options, documented below.  */
4968
 
4969
void
4970
print_operand (FILE *file, rtx x, int code)
4971
{
4972
  int i;
4973
 
4974
  switch (code)
4975
    {
4976
    case '~':
4977
      /* Print the assembler name of the current function.  */
4978
      assemble_name (file, alpha_fnname);
4979
      break;
4980
 
4981
    case '&':
4982
      assemble_name (file, get_some_local_dynamic_name ());
4983
      break;
4984
 
4985
    case '/':
4986
      {
4987
        const char *trap = get_trap_mode_suffix ();
4988
        const char *round = get_round_mode_suffix ();
4989
 
4990
        if (trap || round)
4991
          fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4992
                   (trap ? trap : ""), (round ? round : ""));
4993
        break;
4994
      }
4995
 
4996
    case ',':
4997
      /* Generates single precision instruction suffix.  */
4998
      fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4999
      break;
5000
 
5001
    case '-':
5002
      /* Generates double precision instruction suffix.  */
5003
      fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5004
      break;
5005
 
5006
    case '#':
5007
      if (alpha_this_literal_sequence_number == 0)
5008
        alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5009
      fprintf (file, "%d", alpha_this_literal_sequence_number);
5010
      break;
5011
 
5012
    case '*':
5013
      if (alpha_this_gpdisp_sequence_number == 0)
5014
        alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5015
      fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5016
      break;
5017
 
5018
    case 'H':
5019
      if (GET_CODE (x) == HIGH)
5020
        output_addr_const (file, XEXP (x, 0));
5021
      else
5022
        output_operand_lossage ("invalid %%H value");
5023
      break;
5024
 
5025
    case 'J':
5026
      {
5027
        const char *lituse;
5028
 
5029
        if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5030
          {
5031
            x = XVECEXP (x, 0, 0);
5032
            lituse = "lituse_tlsgd";
5033
          }
5034
        else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5035
          {
5036
            x = XVECEXP (x, 0, 0);
5037
            lituse = "lituse_tlsldm";
5038
          }
5039
        else if (CONST_INT_P (x))
5040
          lituse = "lituse_jsr";
5041
        else
5042
          {
5043
            output_operand_lossage ("invalid %%J value");
5044
            break;
5045
          }
5046
 
5047
        if (x != const0_rtx)
5048
          fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5049
      }
5050
      break;
5051
 
5052
    case 'j':
5053
      {
5054
        const char *lituse;
5055
 
5056
#ifdef HAVE_AS_JSRDIRECT_RELOCS
5057
        lituse = "lituse_jsrdirect";
5058
#else
5059
        lituse = "lituse_jsr";
5060
#endif
5061
 
5062
        gcc_assert (INTVAL (x) != 0);
5063
        fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5064
      }
5065
      break;
5066
    case 'r':
5067
      /* If this operand is the constant zero, write it as "$31".  */
5068
      if (REG_P (x))
5069
        fprintf (file, "%s", reg_names[REGNO (x)]);
5070
      else if (x == CONST0_RTX (GET_MODE (x)))
5071
        fprintf (file, "$31");
5072
      else
5073
        output_operand_lossage ("invalid %%r value");
5074
      break;
5075
 
5076
    case 'R':
5077
      /* Similar, but for floating-point.  */
5078
      if (REG_P (x))
5079
        fprintf (file, "%s", reg_names[REGNO (x)]);
5080
      else if (x == CONST0_RTX (GET_MODE (x)))
5081
        fprintf (file, "$f31");
5082
      else
5083
        output_operand_lossage ("invalid %%R value");
5084
      break;
5085
 
5086
    case 'N':
5087
      /* Write the 1's complement of a constant.  */
5088
      if (!CONST_INT_P (x))
5089
        output_operand_lossage ("invalid %%N value");
5090
 
5091
      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5092
      break;
5093
 
5094
    case 'P':
5095
      /* Write 1 << C, for a constant C.  */
5096
      if (!CONST_INT_P (x))
5097
        output_operand_lossage ("invalid %%P value");
5098
 
5099
      fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5100
      break;
5101
 
5102
    case 'h':
5103
      /* Write the high-order 16 bits of a constant, sign-extended.  */
5104
      if (!CONST_INT_P (x))
5105
        output_operand_lossage ("invalid %%h value");
5106
 
5107
      fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5108
      break;
5109
 
5110
    case 'L':
5111
      /* Write the low-order 16 bits of a constant, sign-extended.  */
5112
      if (!CONST_INT_P (x))
5113
        output_operand_lossage ("invalid %%L value");
5114
 
5115
      fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5116
               (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5117
      break;
5118
 
5119
    case 'm':
5120
      /* Write mask for ZAP insn.  */
5121
      if (GET_CODE (x) == CONST_DOUBLE)
5122
        {
5123
          HOST_WIDE_INT mask = 0;
5124
          HOST_WIDE_INT value;
5125
 
5126
          value = CONST_DOUBLE_LOW (x);
5127
          for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5128
               i++, value >>= 8)
5129
            if (value & 0xff)
5130
              mask |= (1 << i);
5131
 
5132
          value = CONST_DOUBLE_HIGH (x);
5133
          for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5134
               i++, value >>= 8)
5135
            if (value & 0xff)
5136
              mask |= (1 << (i + sizeof (int)));
5137
 
5138
          fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5139
        }
5140
 
5141
      else if (CONST_INT_P (x))
5142
        {
5143
          HOST_WIDE_INT mask = 0, value = INTVAL (x);
5144
 
5145
          for (i = 0; i < 8; i++, value >>= 8)
5146
            if (value & 0xff)
5147
              mask |= (1 << i);
5148
 
5149
          fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5150
        }
5151
      else
5152
        output_operand_lossage ("invalid %%m value");
5153
      break;
5154
 
5155
    case 'M':
5156
      /* 'b', 'w', 'l', or 'q' as the value of the constant.  */
5157
      if (!CONST_INT_P (x)
5158
          || (INTVAL (x) != 8 && INTVAL (x) != 16
5159
              && INTVAL (x) != 32 && INTVAL (x) != 64))
5160
        output_operand_lossage ("invalid %%M value");
5161
 
5162
      fprintf (file, "%s",
5163
               (INTVAL (x) == 8 ? "b"
5164
                : INTVAL (x) == 16 ? "w"
5165
                : INTVAL (x) == 32 ? "l"
5166
                : "q"));
5167
      break;
5168
 
5169
    case 'U':
5170
      /* Similar, except do it from the mask.  */
5171
      if (CONST_INT_P (x))
5172
        {
5173
          HOST_WIDE_INT value = INTVAL (x);
5174
 
5175
          if (value == 0xff)
5176
            {
5177
              fputc ('b', file);
5178
              break;
5179
            }
5180
          if (value == 0xffff)
5181
            {
5182
              fputc ('w', file);
5183
              break;
5184
            }
5185
          if (value == 0xffffffff)
5186
            {
5187
              fputc ('l', file);
5188
              break;
5189
            }
5190
          if (value == -1)
5191
            {
5192
              fputc ('q', file);
5193
              break;
5194
            }
5195
        }
5196
      else if (HOST_BITS_PER_WIDE_INT == 32
5197
               && GET_CODE (x) == CONST_DOUBLE
5198
               && CONST_DOUBLE_LOW (x) == 0xffffffff
5199
               && CONST_DOUBLE_HIGH (x) == 0)
5200
        {
5201
          fputc ('l', file);
5202
          break;
5203
        }
5204
      output_operand_lossage ("invalid %%U value");
5205
      break;
5206
 
5207
    case 's':
5208
      /* Write the constant value divided by 8.  */
5209
      if (!CONST_INT_P (x)
5210
          || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5211
          || (INTVAL (x) & 7) != 0)
5212
        output_operand_lossage ("invalid %%s value");
5213
 
5214
      fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5215
      break;
5216
 
5217
    case 'S':
5218
      /* Same, except compute (64 - c) / 8 */
5219
 
5220
      if (!CONST_INT_P (x)
5221
          && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5222
          && (INTVAL (x) & 7) != 8)
5223
        output_operand_lossage ("invalid %%s value");
5224
 
5225
      fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5226
      break;
5227
 
5228
    case 'C': case 'D': case 'c': case 'd':
5229
      /* Write out comparison name.  */
5230
      {
5231
        enum rtx_code c = GET_CODE (x);
5232
 
5233
        if (!COMPARISON_P (x))
5234
          output_operand_lossage ("invalid %%C value");
5235
 
5236
        else if (code == 'D')
5237
          c = reverse_condition (c);
5238
        else if (code == 'c')
5239
          c = swap_condition (c);
5240
        else if (code == 'd')
5241
          c = swap_condition (reverse_condition (c));
5242
 
5243
        if (c == LEU)
5244
          fprintf (file, "ule");
5245
        else if (c == LTU)
5246
          fprintf (file, "ult");
5247
        else if (c == UNORDERED)
5248
          fprintf (file, "un");
5249
        else
5250
          fprintf (file, "%s", GET_RTX_NAME (c));
5251
      }
5252
      break;
5253
 
5254
    case 'E':
5255
      /* Write the divide or modulus operator.  */
5256
      switch (GET_CODE (x))
5257
        {
5258
        case DIV:
5259
          fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5260
          break;
5261
        case UDIV:
5262
          fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5263
          break;
5264
        case MOD:
5265
          fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5266
          break;
5267
        case UMOD:
5268
          fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5269
          break;
5270
        default:
5271
          output_operand_lossage ("invalid %%E value");
5272
          break;
5273
        }
5274
      break;
5275
 
5276
    case 'A':
5277
      /* Write "_u" for unaligned access.  */
5278
      if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5279
        fprintf (file, "_u");
5280
      break;
5281
 
5282
    case 0:
5283
      if (REG_P (x))
5284
        fprintf (file, "%s", reg_names[REGNO (x)]);
5285
      else if (MEM_P (x))
5286
        output_address (XEXP (x, 0));
5287
      else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5288
        {
5289
          switch (XINT (XEXP (x, 0), 1))
5290
            {
5291
            case UNSPEC_DTPREL:
5292
            case UNSPEC_TPREL:
5293
              output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5294
              break;
5295
            default:
5296
              output_operand_lossage ("unknown relocation unspec");
5297
              break;
5298
            }
5299
        }
5300
      else
5301
        output_addr_const (file, x);
5302
      break;
5303
 
5304
    default:
5305
      output_operand_lossage ("invalid %%xn code");
5306
    }
5307
}
5308
 
5309
void
5310
print_operand_address (FILE *file, rtx addr)
5311
{
5312
  int basereg = 31;
5313
  HOST_WIDE_INT offset = 0;
5314
 
5315
  if (GET_CODE (addr) == AND)
5316
    addr = XEXP (addr, 0);
5317
 
5318
  if (GET_CODE (addr) == PLUS
5319
      && CONST_INT_P (XEXP (addr, 1)))
5320
    {
5321
      offset = INTVAL (XEXP (addr, 1));
5322
      addr = XEXP (addr, 0);
5323
    }
5324
 
5325
  if (GET_CODE (addr) == LO_SUM)
5326
    {
5327
      const char *reloc16, *reloclo;
5328
      rtx op1 = XEXP (addr, 1);
5329
 
5330
      if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5331
        {
5332
          op1 = XEXP (op1, 0);
5333
          switch (XINT (op1, 1))
5334
            {
5335
            case UNSPEC_DTPREL:
5336
              reloc16 = NULL;
5337
              reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5338
              break;
5339
            case UNSPEC_TPREL:
5340
              reloc16 = NULL;
5341
              reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5342
              break;
5343
            default:
5344
              output_operand_lossage ("unknown relocation unspec");
5345
              return;
5346
            }
5347
 
5348
          output_addr_const (file, XVECEXP (op1, 0, 0));
5349
        }
5350
      else
5351
        {
5352
          reloc16 = "gprel";
5353
          reloclo = "gprellow";
5354
          output_addr_const (file, op1);
5355
        }
5356
 
5357
      if (offset)
5358
        fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5359
 
5360
      addr = XEXP (addr, 0);
5361
      switch (GET_CODE (addr))
5362
        {
5363
        case REG:
5364
          basereg = REGNO (addr);
5365
          break;
5366
 
5367
        case SUBREG:
5368
          basereg = subreg_regno (addr);
5369
          break;
5370
 
5371
        default:
5372
          gcc_unreachable ();
5373
        }
5374
 
5375
      fprintf (file, "($%d)\t\t!%s", basereg,
5376
               (basereg == 29 ? reloc16 : reloclo));
5377
      return;
5378
    }
5379
 
5380
  switch (GET_CODE (addr))
5381
    {
5382
    case REG:
5383
      basereg = REGNO (addr);
5384
      break;
5385
 
5386
    case SUBREG:
5387
      basereg = subreg_regno (addr);
5388
      break;
5389
 
5390
    case CONST_INT:
5391
      offset = INTVAL (addr);
5392
      break;
5393
 
5394
#if TARGET_ABI_OPEN_VMS
5395
    case SYMBOL_REF:
5396
      fprintf (file, "%s", XSTR (addr, 0));
5397
      return;
5398
 
5399
    case CONST:
5400
      gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5401
                  && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5402
      fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5403
               XSTR (XEXP (XEXP (addr, 0), 0), 0),
5404
               INTVAL (XEXP (XEXP (addr, 0), 1)));
5405
      return;
5406
 
5407
#endif
5408
    default:
5409
      gcc_unreachable ();
5410
    }
5411
 
5412
  fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5413
}
5414
 
5415
/* Emit RTL insns to initialize the variable parts of a trampoline at
5416
   M_TRAMP.  FNDECL is target function's decl.  CHAIN_VALUE is an rtx
5417
   for the static chain value for the function.  */
5418
 
5419
static void
5420
alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5421
{
5422
  rtx fnaddr, mem, word1, word2;
5423
 
5424
  fnaddr = XEXP (DECL_RTL (fndecl), 0);
5425
 
5426
#ifdef POINTERS_EXTEND_UNSIGNED
5427
  fnaddr = convert_memory_address (Pmode, fnaddr);
5428
  chain_value = convert_memory_address (Pmode, chain_value);
5429
#endif
5430
 
5431
  if (TARGET_ABI_OPEN_VMS)
5432
    {
5433
      const char *fnname;
5434
      char *trname;
5435
 
5436
      /* Construct the name of the trampoline entry point.  */
5437
      fnname = XSTR (fnaddr, 0);
5438
      trname = (char *) alloca (strlen (fnname) + 5);
5439
      strcpy (trname, fnname);
5440
      strcat (trname, "..tr");
5441
      fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5442
      word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5443
 
5444
      /* Trampoline (or "bounded") procedure descriptor is constructed from
5445
         the function's procedure descriptor with certain fields zeroed IAW
5446
         the VMS calling standard. This is stored in the first quadword.  */
5447
      word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5448
      word1 = expand_and (DImode, word1, GEN_INT (0xffff0fff0000fff0), NULL);
5449
    }
5450
  else
5451
    {
5452
      /* These 4 instructions are:
5453
            ldq $1,24($27)
5454
            ldq $27,16($27)
5455
            jmp $31,($27),0
5456
            nop
5457
         We don't bother setting the HINT field of the jump; the nop
5458
         is merely there for padding.  */
5459
      word1 = GEN_INT (0xa77b0010a43b0018);
5460
      word2 = GEN_INT (0x47ff041f6bfb0000);
5461
    }
5462
 
5463
  /* Store the first two words, as computed above.  */
5464
  mem = adjust_address (m_tramp, DImode, 0);
5465
  emit_move_insn (mem, word1);
5466
  mem = adjust_address (m_tramp, DImode, 8);
5467
  emit_move_insn (mem, word2);
5468
 
5469
  /* Store function address and static chain value.  */
5470
  mem = adjust_address (m_tramp, Pmode, 16);
5471
  emit_move_insn (mem, fnaddr);
5472
  mem = adjust_address (m_tramp, Pmode, 24);
5473
  emit_move_insn (mem, chain_value);
5474
 
5475
  if (TARGET_ABI_OSF)
5476
    {
5477
      emit_insn (gen_imb ());
5478
#ifdef HAVE_ENABLE_EXECUTE_STACK
5479
      emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5480
                         LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5481
#endif
5482
    }
5483
}
5484
 
5485
/* Determine where to put an argument to a function.
5486
   Value is zero to push the argument on the stack,
5487
   or a hard register in which to store the argument.
5488
 
5489
   MODE is the argument's machine mode.
5490
   TYPE is the data type of the argument (as a tree).
5491
    This is null for libcalls where that information may
5492
    not be available.
5493
   CUM is a variable of type CUMULATIVE_ARGS which gives info about
5494
    the preceding args and about the function being called.
5495
   NAMED is nonzero if this argument is a named parameter
5496
    (otherwise it is an extra parameter matching an ellipsis).
5497
 
5498
   On Alpha the first 6 words of args are normally in registers
5499
   and the rest are pushed.  */
5500
 
5501
static rtx
5502
alpha_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
5503
                    const_tree type, bool named ATTRIBUTE_UNUSED)
5504
{
5505
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5506
  int basereg;
5507
  int num_args;
5508
 
5509
  /* Don't get confused and pass small structures in FP registers.  */
5510
  if (type && AGGREGATE_TYPE_P (type))
5511
    basereg = 16;
5512
  else
5513
    {
5514
#ifdef ENABLE_CHECKING
5515
      /* With alpha_split_complex_arg, we shouldn't see any raw complex
5516
         values here.  */
5517
      gcc_assert (!COMPLEX_MODE_P (mode));
5518
#endif
5519
 
5520
      /* Set up defaults for FP operands passed in FP registers, and
5521
         integral operands passed in integer registers.  */
5522
      if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5523
        basereg = 32 + 16;
5524
      else
5525
        basereg = 16;
5526
    }
5527
 
5528
  /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5529
     the two platforms, so we can't avoid conditional compilation.  */
5530
#if TARGET_ABI_OPEN_VMS
5531
    {
5532
      if (mode == VOIDmode)
5533
        return alpha_arg_info_reg_val (*cum);
5534
 
5535
      num_args = cum->num_args;
5536
      if (num_args >= 6
5537
          || targetm.calls.must_pass_in_stack (mode, type))
5538
        return NULL_RTX;
5539
    }
5540
#elif TARGET_ABI_OSF
5541
    {
5542
      if (*cum >= 6)
5543
        return NULL_RTX;
5544
      num_args = *cum;
5545
 
5546
      /* VOID is passed as a special flag for "last argument".  */
5547
      if (type == void_type_node)
5548
        basereg = 16;
5549
      else if (targetm.calls.must_pass_in_stack (mode, type))
5550
        return NULL_RTX;
5551
    }
5552
#else
5553
#error Unhandled ABI
5554
#endif
5555
 
5556
  return gen_rtx_REG (mode, num_args + basereg);
5557
}
5558
 
5559
/* Update the data in CUM to advance over an argument
5560
   of mode MODE and data type TYPE.
5561
   (TYPE is null for libcalls where that information may not be available.)  */
5562
 
5563
static void
5564
alpha_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
5565
                            const_tree type, bool named ATTRIBUTE_UNUSED)
5566
{
5567
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5568
  bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5569
  int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5570
 
5571
#if TARGET_ABI_OSF
5572
  *cum += increment;
5573
#else
5574
  if (!onstack && cum->num_args < 6)
5575
    cum->atypes[cum->num_args] = alpha_arg_type (mode);
5576
  cum->num_args += increment;
5577
#endif
5578
}
5579
 
5580
static int
5581
alpha_arg_partial_bytes (cumulative_args_t cum_v,
5582
                         enum machine_mode mode ATTRIBUTE_UNUSED,
5583
                         tree type ATTRIBUTE_UNUSED,
5584
                         bool named ATTRIBUTE_UNUSED)
5585
{
5586
  int words = 0;
5587
  CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
5588
 
5589
#if TARGET_ABI_OPEN_VMS
5590
  if (cum->num_args < 6
5591
      && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5592
    words = 6 - cum->num_args;
5593
#elif TARGET_ABI_OSF
5594
  if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5595
    words = 6 - *cum;
5596
#else
5597
#error Unhandled ABI
5598
#endif
5599
 
5600
  return words * UNITS_PER_WORD;
5601
}
5602
 
5603
 
5604
/* Return true if TYPE must be returned in memory, instead of in registers.  */
5605
 
5606
static bool
5607
alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5608
{
5609
  enum machine_mode mode = VOIDmode;
5610
  int size;
5611
 
5612
  if (type)
5613
    {
5614
      mode = TYPE_MODE (type);
5615
 
5616
      /* All aggregates are returned in memory, except on OpenVMS where
5617
         records that fit 64 bits should be returned by immediate value
5618
         as required by section 3.8.7.1 of the OpenVMS Calling Standard.  */
5619
      if (TARGET_ABI_OPEN_VMS
5620
          && TREE_CODE (type) != ARRAY_TYPE
5621
          && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5622
        return false;
5623
 
5624
      if (AGGREGATE_TYPE_P (type))
5625
        return true;
5626
    }
5627
 
5628
  size = GET_MODE_SIZE (mode);
5629
  switch (GET_MODE_CLASS (mode))
5630
    {
5631
    case MODE_VECTOR_FLOAT:
5632
      /* Pass all float vectors in memory, like an aggregate.  */
5633
      return true;
5634
 
5635
    case MODE_COMPLEX_FLOAT:
5636
      /* We judge complex floats on the size of their element,
5637
         not the size of the whole type.  */
5638
      size = GET_MODE_UNIT_SIZE (mode);
5639
      break;
5640
 
5641
    case MODE_INT:
5642
    case MODE_FLOAT:
5643
    case MODE_COMPLEX_INT:
5644
    case MODE_VECTOR_INT:
5645
      break;
5646
 
5647
    default:
5648
      /* ??? We get called on all sorts of random stuff from
5649
         aggregate_value_p.  We must return something, but it's not
5650
         clear what's safe to return.  Pretend it's a struct I
5651
         guess.  */
5652
      return true;
5653
    }
5654
 
5655
  /* Otherwise types must fit in one register.  */
5656
  return size > UNITS_PER_WORD;
5657
}
5658
 
5659
/* Return true if TYPE should be passed by invisible reference.  */
5660
 
5661
static bool
5662
alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
5663
                         enum machine_mode mode,
5664
                         const_tree type ATTRIBUTE_UNUSED,
5665
                         bool named ATTRIBUTE_UNUSED)
5666
{
5667
  return mode == TFmode || mode == TCmode;
5668
}
5669
 
5670
/* Define how to find the value returned by a function.  VALTYPE is the
5671
   data type of the value (as a tree).  If the precise function being
5672
   called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5673
   MODE is set instead of VALTYPE for libcalls.
5674
 
5675
   On Alpha the value is found in $0 for integer functions and
5676
   $f0 for floating-point functions.  */
5677
 
5678
rtx
5679
function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5680
                enum machine_mode mode)
5681
{
5682
  unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5683
  enum mode_class mclass;
5684
 
5685
  gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5686
 
5687
  if (valtype)
5688
    mode = TYPE_MODE (valtype);
5689
 
5690
  mclass = GET_MODE_CLASS (mode);
5691
  switch (mclass)
5692
    {
5693
    case MODE_INT:
5694
      /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5695
         where we have them returning both SImode and DImode.  */
5696
      if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5697
        PROMOTE_MODE (mode, dummy, valtype);
5698
      /* FALLTHRU */
5699
 
5700
    case MODE_COMPLEX_INT:
5701
    case MODE_VECTOR_INT:
5702
      regnum = 0;
5703
      break;
5704
 
5705
    case MODE_FLOAT:
5706
      regnum = 32;
5707
      break;
5708
 
5709
    case MODE_COMPLEX_FLOAT:
5710
      {
5711
        enum machine_mode cmode = GET_MODE_INNER (mode);
5712
 
5713
        return gen_rtx_PARALLEL
5714
          (VOIDmode,
5715
           gen_rtvec (2,
5716
                      gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5717
                                         const0_rtx),
5718
                      gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5719
                                         GEN_INT (GET_MODE_SIZE (cmode)))));
5720
      }
5721
 
5722
    case MODE_RANDOM:
5723
      /* We should only reach here for BLKmode on VMS.  */
5724
      gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5725
      regnum = 0;
5726
      break;
5727
 
5728
    default:
5729
      gcc_unreachable ();
5730
    }
5731
 
5732
  return gen_rtx_REG (mode, regnum);
5733
}
5734
 
5735
/* TCmode complex values are passed by invisible reference.  We
5736
   should not split these values.  */
5737
 
5738
static bool
5739
alpha_split_complex_arg (const_tree type)
5740
{
5741
  return TYPE_MODE (type) != TCmode;
5742
}
5743
 
5744
static tree
5745
alpha_build_builtin_va_list (void)
5746
{
5747
  tree base, ofs, space, record, type_decl;
5748
 
5749
  if (TARGET_ABI_OPEN_VMS)
5750
    return ptr_type_node;
5751
 
5752
  record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5753
  type_decl = build_decl (BUILTINS_LOCATION,
5754
                          TYPE_DECL, get_identifier ("__va_list_tag"), record);
5755
  TYPE_STUB_DECL (record) = type_decl;
5756
  TYPE_NAME (record) = type_decl;
5757
 
5758
  /* C++? SET_IS_AGGR_TYPE (record, 1); */
5759
 
5760
  /* Dummy field to prevent alignment warnings.  */
5761
  space = build_decl (BUILTINS_LOCATION,
5762
                      FIELD_DECL, NULL_TREE, integer_type_node);
5763
  DECL_FIELD_CONTEXT (space) = record;
5764
  DECL_ARTIFICIAL (space) = 1;
5765
  DECL_IGNORED_P (space) = 1;
5766
 
5767
  ofs = build_decl (BUILTINS_LOCATION,
5768
                    FIELD_DECL, get_identifier ("__offset"),
5769
                    integer_type_node);
5770
  DECL_FIELD_CONTEXT (ofs) = record;
5771
  DECL_CHAIN (ofs) = space;
5772
  /* ??? This is a hack, __offset is marked volatile to prevent
5773
     DCE that confuses stdarg optimization and results in
5774
     gcc.c-torture/execute/stdarg-1.c failure.  See PR 41089.  */
5775
  TREE_THIS_VOLATILE (ofs) = 1;
5776
 
5777
  base = build_decl (BUILTINS_LOCATION,
5778
                     FIELD_DECL, get_identifier ("__base"),
5779
                     ptr_type_node);
5780
  DECL_FIELD_CONTEXT (base) = record;
5781
  DECL_CHAIN (base) = ofs;
5782
 
5783
  TYPE_FIELDS (record) = base;
5784
  layout_type (record);
5785
 
5786
  va_list_gpr_counter_field = ofs;
5787
  return record;
5788
}
5789
 
5790
#if TARGET_ABI_OSF
5791
/* Helper function for alpha_stdarg_optimize_hook.  Skip over casts
5792
   and constant additions.  */
5793
 
5794
static gimple
5795
va_list_skip_additions (tree lhs)
5796
{
5797
  gimple stmt;
5798
 
5799
  for (;;)
5800
    {
5801
      enum tree_code code;
5802
 
5803
      stmt = SSA_NAME_DEF_STMT (lhs);
5804
 
5805
      if (gimple_code (stmt) == GIMPLE_PHI)
5806
        return stmt;
5807
 
5808
      if (!is_gimple_assign (stmt)
5809
          || gimple_assign_lhs (stmt) != lhs)
5810
        return NULL;
5811
 
5812
      if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5813
        return stmt;
5814
      code = gimple_assign_rhs_code (stmt);
5815
      if (!CONVERT_EXPR_CODE_P (code)
5816
          && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5817
              || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5818
              || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5819
        return stmt;
5820
 
5821
      lhs = gimple_assign_rhs1 (stmt);
5822
    }
5823
}
5824
 
5825
/* Check if LHS = RHS statement is
5826
   LHS = *(ap.__base + ap.__offset + cst)
5827
   or
5828
   LHS = *(ap.__base
5829
           + ((ap.__offset + cst <= 47)
5830
              ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5831
   If the former, indicate that GPR registers are needed,
5832
   if the latter, indicate that FPR registers are needed.
5833
 
5834
   Also look for LHS = (*ptr).field, where ptr is one of the forms
5835
   listed above.
5836
 
5837
   On alpha, cfun->va_list_gpr_size is used as size of the needed
5838
   regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5839
   registers are needed and bit 1 set if FPR registers are needed.
5840
   Return true if va_list references should not be scanned for the
5841
   current statement.  */
5842
 
5843
static bool
5844
alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5845
{
5846
  tree base, offset, rhs;
5847
  int offset_arg = 1;
5848
  gimple base_stmt;
5849
 
5850
  if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5851
      != GIMPLE_SINGLE_RHS)
5852
    return false;
5853
 
5854
  rhs = gimple_assign_rhs1 (stmt);
5855
  while (handled_component_p (rhs))
5856
    rhs = TREE_OPERAND (rhs, 0);
5857
  if (TREE_CODE (rhs) != MEM_REF
5858
      || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5859
    return false;
5860
 
5861
  stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5862
  if (stmt == NULL
5863
      || !is_gimple_assign (stmt)
5864
      || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5865
    return false;
5866
 
5867
  base = gimple_assign_rhs1 (stmt);
5868
  if (TREE_CODE (base) == SSA_NAME)
5869
    {
5870
      base_stmt = va_list_skip_additions (base);
5871
      if (base_stmt
5872
          && is_gimple_assign (base_stmt)
5873
          && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5874
        base = gimple_assign_rhs1 (base_stmt);
5875
    }
5876
 
5877
  if (TREE_CODE (base) != COMPONENT_REF
5878
      || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5879
    {
5880
      base = gimple_assign_rhs2 (stmt);
5881
      if (TREE_CODE (base) == SSA_NAME)
5882
        {
5883
          base_stmt = va_list_skip_additions (base);
5884
          if (base_stmt
5885
              && is_gimple_assign (base_stmt)
5886
              && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5887
            base = gimple_assign_rhs1 (base_stmt);
5888
        }
5889
 
5890
      if (TREE_CODE (base) != COMPONENT_REF
5891
          || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5892
        return false;
5893
 
5894
      offset_arg = 0;
5895
    }
5896
 
5897
  base = get_base_address (base);
5898
  if (TREE_CODE (base) != VAR_DECL
5899
      || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5900
    return false;
5901
 
5902
  offset = gimple_op (stmt, 1 + offset_arg);
5903
  if (TREE_CODE (offset) == SSA_NAME)
5904
    {
5905
      gimple offset_stmt = va_list_skip_additions (offset);
5906
 
5907
      if (offset_stmt
5908
          && gimple_code (offset_stmt) == GIMPLE_PHI)
5909
        {
5910
          HOST_WIDE_INT sub;
5911
          gimple arg1_stmt, arg2_stmt;
5912
          tree arg1, arg2;
5913
          enum tree_code code1, code2;
5914
 
5915
          if (gimple_phi_num_args (offset_stmt) != 2)
5916
            goto escapes;
5917
 
5918
          arg1_stmt
5919
            = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5920
          arg2_stmt
5921
            = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5922
          if (arg1_stmt == NULL
5923
              || !is_gimple_assign (arg1_stmt)
5924
              || arg2_stmt == NULL
5925
              || !is_gimple_assign (arg2_stmt))
5926
            goto escapes;
5927
 
5928
          code1 = gimple_assign_rhs_code (arg1_stmt);
5929
          code2 = gimple_assign_rhs_code (arg2_stmt);
5930
          if (code1 == COMPONENT_REF
5931
              && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5932
            /* Do nothing.  */;
5933
          else if (code2 == COMPONENT_REF
5934
                   && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5935
            {
5936
              gimple tem = arg1_stmt;
5937
              code2 = code1;
5938
              arg1_stmt = arg2_stmt;
5939
              arg2_stmt = tem;
5940
            }
5941
          else
5942
            goto escapes;
5943
 
5944
          if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
5945
            goto escapes;
5946
 
5947
          sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
5948
          if (code2 == MINUS_EXPR)
5949
            sub = -sub;
5950
          if (sub < -48 || sub > -32)
5951
            goto escapes;
5952
 
5953
          arg1 = gimple_assign_rhs1 (arg1_stmt);
5954
          arg2 = gimple_assign_rhs1 (arg2_stmt);
5955
          if (TREE_CODE (arg2) == SSA_NAME)
5956
            {
5957
              arg2_stmt = va_list_skip_additions (arg2);
5958
              if (arg2_stmt == NULL
5959
                  || !is_gimple_assign (arg2_stmt)
5960
                  || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
5961
                goto escapes;
5962
              arg2 = gimple_assign_rhs1 (arg2_stmt);
5963
            }
5964
          if (arg1 != arg2)
5965
            goto escapes;
5966
 
5967
          if (TREE_CODE (arg1) != COMPONENT_REF
5968
              || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5969
              || get_base_address (arg1) != base)
5970
            goto escapes;
5971
 
5972
          /* Need floating point regs.  */
5973
          cfun->va_list_fpr_size |= 2;
5974
          return false;
5975
        }
5976
      if (offset_stmt
5977
          && is_gimple_assign (offset_stmt)
5978
          && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
5979
        offset = gimple_assign_rhs1 (offset_stmt);
5980
    }
5981
  if (TREE_CODE (offset) != COMPONENT_REF
5982
      || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5983
      || get_base_address (offset) != base)
5984
    goto escapes;
5985
  else
5986
    /* Need general regs.  */
5987
    cfun->va_list_fpr_size |= 1;
5988
  return false;
5989
 
5990
escapes:
5991
  si->va_list_escapes = true;
5992
  return false;
5993
}
5994
#endif
5995
 
5996
/* Perform any needed actions needed for a function that is receiving a
5997
   variable number of arguments.  */
5998
 
5999
static void
6000
alpha_setup_incoming_varargs (cumulative_args_t pcum, enum machine_mode mode,
6001
                              tree type, int *pretend_size, int no_rtl)
6002
{
6003
  CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
6004
 
6005
  /* Skip the current argument.  */
6006
  targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
6007
                                      true);
6008
 
6009
#if TARGET_ABI_OPEN_VMS
6010
  /* For VMS, we allocate space for all 6 arg registers plus a count.
6011
 
6012
     However, if NO registers need to be saved, don't allocate any space.
6013
     This is not only because we won't need the space, but because AP
6014
     includes the current_pretend_args_size and we don't want to mess up
6015
     any ap-relative addresses already made.  */
6016
  if (cum.num_args < 6)
6017
    {
6018
      if (!no_rtl)
6019
        {
6020
          emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6021
          emit_insn (gen_arg_home ());
6022
        }
6023
      *pretend_size = 7 * UNITS_PER_WORD;
6024
    }
6025
#else
6026
  /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6027
     only push those that are remaining.  However, if NO registers need to
6028
     be saved, don't allocate any space.  This is not only because we won't
6029
     need the space, but because AP includes the current_pretend_args_size
6030
     and we don't want to mess up any ap-relative addresses already made.
6031
 
6032
     If we are not to use the floating-point registers, save the integer
6033
     registers where we would put the floating-point registers.  This is
6034
     not the most efficient way to implement varargs with just one register
6035
     class, but it isn't worth doing anything more efficient in this rare
6036
     case.  */
6037
  if (cum >= 6)
6038
    return;
6039
 
6040
  if (!no_rtl)
6041
    {
6042
      int count;
6043
      alias_set_type set = get_varargs_alias_set ();
6044
      rtx tmp;
6045
 
6046
      count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6047
      if (count > 6 - cum)
6048
        count = 6 - cum;
6049
 
6050
      /* Detect whether integer registers or floating-point registers
6051
         are needed by the detected va_arg statements.  See above for
6052
         how these values are computed.  Note that the "escape" value
6053
         is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6054
         these bits set.  */
6055
      gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6056
 
6057
      if (cfun->va_list_fpr_size & 1)
6058
        {
6059
          tmp = gen_rtx_MEM (BLKmode,
6060
                             plus_constant (virtual_incoming_args_rtx,
6061
                                            (cum + 6) * UNITS_PER_WORD));
6062
          MEM_NOTRAP_P (tmp) = 1;
6063
          set_mem_alias_set (tmp, set);
6064
          move_block_from_reg (16 + cum, tmp, count);
6065
        }
6066
 
6067
      if (cfun->va_list_fpr_size & 2)
6068
        {
6069
          tmp = gen_rtx_MEM (BLKmode,
6070
                             plus_constant (virtual_incoming_args_rtx,
6071
                                            cum * UNITS_PER_WORD));
6072
          MEM_NOTRAP_P (tmp) = 1;
6073
          set_mem_alias_set (tmp, set);
6074
          move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6075
        }
6076
     }
6077
  *pretend_size = 12 * UNITS_PER_WORD;
6078
#endif
6079
}
6080
 
6081
static void
6082
alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6083
{
6084
  HOST_WIDE_INT offset;
6085
  tree t, offset_field, base_field;
6086
 
6087
  if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6088
    return;
6089
 
6090
  /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6091
     up by 48, storing fp arg registers in the first 48 bytes, and the
6092
     integer arg registers in the next 48 bytes.  This is only done,
6093
     however, if any integer registers need to be stored.
6094
 
6095
     If no integer registers need be stored, then we must subtract 48
6096
     in order to account for the integer arg registers which are counted
6097
     in argsize above, but which are not actually stored on the stack.
6098
     Must further be careful here about structures straddling the last
6099
     integer argument register; that futzes with pretend_args_size,
6100
     which changes the meaning of AP.  */
6101
 
6102
  if (NUM_ARGS < 6)
6103
    offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6104
  else
6105
    offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6106
 
6107
  if (TARGET_ABI_OPEN_VMS)
6108
    {
6109
      t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6110
      t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
6111
      t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6112
      TREE_SIDE_EFFECTS (t) = 1;
6113
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6114
    }
6115
  else
6116
    {
6117
      base_field = TYPE_FIELDS (TREE_TYPE (valist));
6118
      offset_field = DECL_CHAIN (base_field);
6119
 
6120
      base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6121
                           valist, base_field, NULL_TREE);
6122
      offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6123
                             valist, offset_field, NULL_TREE);
6124
 
6125
      t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6126
      t = fold_build_pointer_plus_hwi (t, offset);
6127
      t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6128
      TREE_SIDE_EFFECTS (t) = 1;
6129
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6130
 
6131
      t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6132
      t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6133
      TREE_SIDE_EFFECTS (t) = 1;
6134
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6135
    }
6136
}
6137
 
6138
static tree
6139
alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6140
                         gimple_seq *pre_p)
6141
{
6142
  tree type_size, ptr_type, addend, t, addr;
6143
  gimple_seq internal_post;
6144
 
6145
  /* If the type could not be passed in registers, skip the block
6146
     reserved for the registers.  */
6147
  if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6148
    {
6149
      t = build_int_cst (TREE_TYPE (offset), 6*8);
6150
      gimplify_assign (offset,
6151
                       build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6152
                       pre_p);
6153
    }
6154
 
6155
  addend = offset;
6156
  ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6157
 
6158
  if (TREE_CODE (type) == COMPLEX_TYPE)
6159
    {
6160
      tree real_part, imag_part, real_temp;
6161
 
6162
      real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6163
                                           offset, pre_p);
6164
 
6165
      /* Copy the value into a new temporary, lest the formal temporary
6166
         be reused out from under us.  */
6167
      real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6168
 
6169
      imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6170
                                           offset, pre_p);
6171
 
6172
      return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6173
    }
6174
  else if (TREE_CODE (type) == REAL_TYPE)
6175
    {
6176
      tree fpaddend, cond, fourtyeight;
6177
 
6178
      fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6179
      fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6180
                              addend, fourtyeight);
6181
      cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6182
      addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6183
                            fpaddend, addend);
6184
    }
6185
 
6186
  /* Build the final address and force that value into a temporary.  */
6187
  addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
6188
  internal_post = NULL;
6189
  gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6190
  gimple_seq_add_seq (pre_p, internal_post);
6191
 
6192
  /* Update the offset field.  */
6193
  type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6194
  if (type_size == NULL || TREE_OVERFLOW (type_size))
6195
    t = size_zero_node;
6196
  else
6197
    {
6198
      t = size_binop (PLUS_EXPR, type_size, size_int (7));
6199
      t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6200
      t = size_binop (MULT_EXPR, t, size_int (8));
6201
    }
6202
  t = fold_convert (TREE_TYPE (offset), t);
6203
  gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6204
                   pre_p);
6205
 
6206
  return build_va_arg_indirect_ref (addr);
6207
}
6208
 
6209
static tree
6210
alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6211
                       gimple_seq *post_p)
6212
{
6213
  tree offset_field, base_field, offset, base, t, r;
6214
  bool indirect;
6215
 
6216
  if (TARGET_ABI_OPEN_VMS)
6217
    return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6218
 
6219
  base_field = TYPE_FIELDS (va_list_type_node);
6220
  offset_field = DECL_CHAIN (base_field);
6221
  base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6222
                       valist, base_field, NULL_TREE);
6223
  offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6224
                         valist, offset_field, NULL_TREE);
6225
 
6226
  /* Pull the fields of the structure out into temporaries.  Since we never
6227
     modify the base field, we can use a formal temporary.  Sign-extend the
6228
     offset field so that it's the proper width for pointer arithmetic.  */
6229
  base = get_formal_tmp_var (base_field, pre_p);
6230
 
6231
  t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6232
  offset = get_initialized_tmp_var (t, pre_p, NULL);
6233
 
6234
  indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6235
  if (indirect)
6236
    type = build_pointer_type_for_mode (type, ptr_mode, true);
6237
 
6238
  /* Find the value.  Note that this will be a stable indirection, or
6239
     a composite of stable indirections in the case of complex.  */
6240
  r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6241
 
6242
  /* Stuff the offset temporary back into its field.  */
6243
  gimplify_assign (unshare_expr (offset_field),
6244
                   fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6245
 
6246
  if (indirect)
6247
    r = build_va_arg_indirect_ref (r);
6248
 
6249
  return r;
6250
}
6251
 
6252
/* Builtins.  */
6253
 
6254
enum alpha_builtin
6255
{
6256
  ALPHA_BUILTIN_CMPBGE,
6257
  ALPHA_BUILTIN_EXTBL,
6258
  ALPHA_BUILTIN_EXTWL,
6259
  ALPHA_BUILTIN_EXTLL,
6260
  ALPHA_BUILTIN_EXTQL,
6261
  ALPHA_BUILTIN_EXTWH,
6262
  ALPHA_BUILTIN_EXTLH,
6263
  ALPHA_BUILTIN_EXTQH,
6264
  ALPHA_BUILTIN_INSBL,
6265
  ALPHA_BUILTIN_INSWL,
6266
  ALPHA_BUILTIN_INSLL,
6267
  ALPHA_BUILTIN_INSQL,
6268
  ALPHA_BUILTIN_INSWH,
6269
  ALPHA_BUILTIN_INSLH,
6270
  ALPHA_BUILTIN_INSQH,
6271
  ALPHA_BUILTIN_MSKBL,
6272
  ALPHA_BUILTIN_MSKWL,
6273
  ALPHA_BUILTIN_MSKLL,
6274
  ALPHA_BUILTIN_MSKQL,
6275
  ALPHA_BUILTIN_MSKWH,
6276
  ALPHA_BUILTIN_MSKLH,
6277
  ALPHA_BUILTIN_MSKQH,
6278
  ALPHA_BUILTIN_UMULH,
6279
  ALPHA_BUILTIN_ZAP,
6280
  ALPHA_BUILTIN_ZAPNOT,
6281
  ALPHA_BUILTIN_AMASK,
6282
  ALPHA_BUILTIN_IMPLVER,
6283
  ALPHA_BUILTIN_RPCC,
6284
  ALPHA_BUILTIN_THREAD_POINTER,
6285
  ALPHA_BUILTIN_SET_THREAD_POINTER,
6286
  ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6287
  ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6288
 
6289
  /* TARGET_MAX */
6290
  ALPHA_BUILTIN_MINUB8,
6291
  ALPHA_BUILTIN_MINSB8,
6292
  ALPHA_BUILTIN_MINUW4,
6293
  ALPHA_BUILTIN_MINSW4,
6294
  ALPHA_BUILTIN_MAXUB8,
6295
  ALPHA_BUILTIN_MAXSB8,
6296
  ALPHA_BUILTIN_MAXUW4,
6297
  ALPHA_BUILTIN_MAXSW4,
6298
  ALPHA_BUILTIN_PERR,
6299
  ALPHA_BUILTIN_PKLB,
6300
  ALPHA_BUILTIN_PKWB,
6301
  ALPHA_BUILTIN_UNPKBL,
6302
  ALPHA_BUILTIN_UNPKBW,
6303
 
6304
  /* TARGET_CIX */
6305
  ALPHA_BUILTIN_CTTZ,
6306
  ALPHA_BUILTIN_CTLZ,
6307
  ALPHA_BUILTIN_CTPOP,
6308
 
6309
  ALPHA_BUILTIN_max
6310
};
6311
 
6312
static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6313
  CODE_FOR_builtin_cmpbge,
6314
  CODE_FOR_extbl,
6315
  CODE_FOR_extwl,
6316
  CODE_FOR_extll,
6317
  CODE_FOR_extql,
6318
  CODE_FOR_extwh,
6319
  CODE_FOR_extlh,
6320
  CODE_FOR_extqh,
6321
  CODE_FOR_builtin_insbl,
6322
  CODE_FOR_builtin_inswl,
6323
  CODE_FOR_builtin_insll,
6324
  CODE_FOR_insql,
6325
  CODE_FOR_inswh,
6326
  CODE_FOR_inslh,
6327
  CODE_FOR_insqh,
6328
  CODE_FOR_mskbl,
6329
  CODE_FOR_mskwl,
6330
  CODE_FOR_mskll,
6331
  CODE_FOR_mskql,
6332
  CODE_FOR_mskwh,
6333
  CODE_FOR_msklh,
6334
  CODE_FOR_mskqh,
6335
  CODE_FOR_umuldi3_highpart,
6336
  CODE_FOR_builtin_zap,
6337
  CODE_FOR_builtin_zapnot,
6338
  CODE_FOR_builtin_amask,
6339
  CODE_FOR_builtin_implver,
6340
  CODE_FOR_builtin_rpcc,
6341
  CODE_FOR_load_tp,
6342
  CODE_FOR_set_tp,
6343
  CODE_FOR_builtin_establish_vms_condition_handler,
6344
  CODE_FOR_builtin_revert_vms_condition_handler,
6345
 
6346
  /* TARGET_MAX */
6347
  CODE_FOR_builtin_minub8,
6348
  CODE_FOR_builtin_minsb8,
6349
  CODE_FOR_builtin_minuw4,
6350
  CODE_FOR_builtin_minsw4,
6351
  CODE_FOR_builtin_maxub8,
6352
  CODE_FOR_builtin_maxsb8,
6353
  CODE_FOR_builtin_maxuw4,
6354
  CODE_FOR_builtin_maxsw4,
6355
  CODE_FOR_builtin_perr,
6356
  CODE_FOR_builtin_pklb,
6357
  CODE_FOR_builtin_pkwb,
6358
  CODE_FOR_builtin_unpkbl,
6359
  CODE_FOR_builtin_unpkbw,
6360
 
6361
  /* TARGET_CIX */
6362
  CODE_FOR_ctzdi2,
6363
  CODE_FOR_clzdi2,
6364
  CODE_FOR_popcountdi2
6365
};
6366
 
6367
struct alpha_builtin_def
6368
{
6369
  const char *name;
6370
  enum alpha_builtin code;
6371
  unsigned int target_mask;
6372
  bool is_const;
6373
};
6374
 
6375
static struct alpha_builtin_def const zero_arg_builtins[] = {
6376
  { "__builtin_alpha_implver",  ALPHA_BUILTIN_IMPLVER,  0, true },
6377
  { "__builtin_alpha_rpcc",     ALPHA_BUILTIN_RPCC,     0, false }
6378
};
6379
 
6380
static struct alpha_builtin_def const one_arg_builtins[] = {
6381
  { "__builtin_alpha_amask",    ALPHA_BUILTIN_AMASK,    0, true },
6382
  { "__builtin_alpha_pklb",     ALPHA_BUILTIN_PKLB,     MASK_MAX, true },
6383
  { "__builtin_alpha_pkwb",     ALPHA_BUILTIN_PKWB,     MASK_MAX, true },
6384
  { "__builtin_alpha_unpkbl",   ALPHA_BUILTIN_UNPKBL,   MASK_MAX, true },
6385
  { "__builtin_alpha_unpkbw",   ALPHA_BUILTIN_UNPKBW,   MASK_MAX, true },
6386
  { "__builtin_alpha_cttz",     ALPHA_BUILTIN_CTTZ,     MASK_CIX, true },
6387
  { "__builtin_alpha_ctlz",     ALPHA_BUILTIN_CTLZ,     MASK_CIX, true },
6388
  { "__builtin_alpha_ctpop",    ALPHA_BUILTIN_CTPOP,    MASK_CIX, true }
6389
};
6390
 
6391
static struct alpha_builtin_def const two_arg_builtins[] = {
6392
  { "__builtin_alpha_cmpbge",   ALPHA_BUILTIN_CMPBGE,   0, true },
6393
  { "__builtin_alpha_extbl",    ALPHA_BUILTIN_EXTBL,    0, true },
6394
  { "__builtin_alpha_extwl",    ALPHA_BUILTIN_EXTWL,    0, true },
6395
  { "__builtin_alpha_extll",    ALPHA_BUILTIN_EXTLL,    0, true },
6396
  { "__builtin_alpha_extql",    ALPHA_BUILTIN_EXTQL,    0, true },
6397
  { "__builtin_alpha_extwh",    ALPHA_BUILTIN_EXTWH,    0, true },
6398
  { "__builtin_alpha_extlh",    ALPHA_BUILTIN_EXTLH,    0, true },
6399
  { "__builtin_alpha_extqh",    ALPHA_BUILTIN_EXTQH,    0, true },
6400
  { "__builtin_alpha_insbl",    ALPHA_BUILTIN_INSBL,    0, true },
6401
  { "__builtin_alpha_inswl",    ALPHA_BUILTIN_INSWL,    0, true },
6402
  { "__builtin_alpha_insll",    ALPHA_BUILTIN_INSLL,    0, true },
6403
  { "__builtin_alpha_insql",    ALPHA_BUILTIN_INSQL,    0, true },
6404
  { "__builtin_alpha_inswh",    ALPHA_BUILTIN_INSWH,    0, true },
6405
  { "__builtin_alpha_inslh",    ALPHA_BUILTIN_INSLH,    0, true },
6406
  { "__builtin_alpha_insqh",    ALPHA_BUILTIN_INSQH,    0, true },
6407
  { "__builtin_alpha_mskbl",    ALPHA_BUILTIN_MSKBL,    0, true },
6408
  { "__builtin_alpha_mskwl",    ALPHA_BUILTIN_MSKWL,    0, true },
6409
  { "__builtin_alpha_mskll",    ALPHA_BUILTIN_MSKLL,    0, true },
6410
  { "__builtin_alpha_mskql",    ALPHA_BUILTIN_MSKQL,    0, true },
6411
  { "__builtin_alpha_mskwh",    ALPHA_BUILTIN_MSKWH,    0, true },
6412
  { "__builtin_alpha_msklh",    ALPHA_BUILTIN_MSKLH,    0, true },
6413
  { "__builtin_alpha_mskqh",    ALPHA_BUILTIN_MSKQH,    0, true },
6414
  { "__builtin_alpha_umulh",    ALPHA_BUILTIN_UMULH,    0, true },
6415
  { "__builtin_alpha_zap",      ALPHA_BUILTIN_ZAP,      0, true },
6416
  { "__builtin_alpha_zapnot",   ALPHA_BUILTIN_ZAPNOT,   0, true },
6417
  { "__builtin_alpha_minub8",   ALPHA_BUILTIN_MINUB8,   MASK_MAX, true },
6418
  { "__builtin_alpha_minsb8",   ALPHA_BUILTIN_MINSB8,   MASK_MAX, true },
6419
  { "__builtin_alpha_minuw4",   ALPHA_BUILTIN_MINUW4,   MASK_MAX, true },
6420
  { "__builtin_alpha_minsw4",   ALPHA_BUILTIN_MINSW4,   MASK_MAX, true },
6421
  { "__builtin_alpha_maxub8",   ALPHA_BUILTIN_MAXUB8,   MASK_MAX, true },
6422
  { "__builtin_alpha_maxsb8",   ALPHA_BUILTIN_MAXSB8,   MASK_MAX, true },
6423
  { "__builtin_alpha_maxuw4",   ALPHA_BUILTIN_MAXUW4,   MASK_MAX, true },
6424
  { "__builtin_alpha_maxsw4",   ALPHA_BUILTIN_MAXSW4,   MASK_MAX, true },
6425
  { "__builtin_alpha_perr",     ALPHA_BUILTIN_PERR,     MASK_MAX, true }
6426
};
6427
 
6428
static GTY(()) tree alpha_v8qi_u;
6429
static GTY(()) tree alpha_v8qi_s;
6430
static GTY(()) tree alpha_v4hi_u;
6431
static GTY(()) tree alpha_v4hi_s;
6432
 
6433
static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6434
 
6435
/* Return the alpha builtin for CODE.  */
6436
 
6437
static tree
6438
alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6439
{
6440
  if (code >= ALPHA_BUILTIN_max)
6441
    return error_mark_node;
6442
  return alpha_builtins[code];
6443
}
6444
 
6445
/* Helper function of alpha_init_builtins.  Add the built-in specified
6446
   by NAME, TYPE, CODE, and ECF.  */
6447
 
6448
static void
6449
alpha_builtin_function (const char *name, tree ftype,
6450
                        enum alpha_builtin code, unsigned ecf)
6451
{
6452
  tree decl = add_builtin_function (name, ftype, (int) code,
6453
                                    BUILT_IN_MD, NULL, NULL_TREE);
6454
 
6455
  if (ecf & ECF_CONST)
6456
    TREE_READONLY (decl) = 1;
6457
  if (ecf & ECF_NOTHROW)
6458
    TREE_NOTHROW (decl) = 1;
6459
 
6460
  alpha_builtins [(int) code] = decl;
6461
}
6462
 
6463
/* Helper function of alpha_init_builtins.  Add the COUNT built-in
6464
   functions pointed to by P, with function type FTYPE.  */
6465
 
6466
static void
6467
alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6468
                    tree ftype)
6469
{
6470
  size_t i;
6471
 
6472
  for (i = 0; i < count; ++i, ++p)
6473
    if ((target_flags & p->target_mask) == p->target_mask)
6474
      alpha_builtin_function (p->name, ftype, p->code,
6475
                              (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6476
}
6477
 
6478
static void
6479
alpha_init_builtins (void)
6480
{
6481
  tree dimode_integer_type_node;
6482
  tree ftype;
6483
 
6484
  dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6485
 
6486
  ftype = build_function_type_list (dimode_integer_type_node, NULL_TREE);
6487
  alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6488
                      ftype);
6489
 
6490
  ftype = build_function_type_list (dimode_integer_type_node,
6491
                                    dimode_integer_type_node, NULL_TREE);
6492
  alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6493
                      ftype);
6494
 
6495
  ftype = build_function_type_list (dimode_integer_type_node,
6496
                                    dimode_integer_type_node,
6497
                                    dimode_integer_type_node, NULL_TREE);
6498
  alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6499
                      ftype);
6500
 
6501
  ftype = build_function_type_list (ptr_type_node, NULL_TREE);
6502
  alpha_builtin_function ("__builtin_thread_pointer", ftype,
6503
                          ALPHA_BUILTIN_THREAD_POINTER, ECF_NOTHROW);
6504
 
6505
  ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6506
  alpha_builtin_function ("__builtin_set_thread_pointer", ftype,
6507
                          ALPHA_BUILTIN_SET_THREAD_POINTER, ECF_NOTHROW);
6508
 
6509
  if (TARGET_ABI_OPEN_VMS)
6510
    {
6511
      ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6512
                                        NULL_TREE);
6513
      alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6514
                              ftype,
6515
                              ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6516
                              0);
6517
 
6518
      ftype = build_function_type_list (ptr_type_node, void_type_node,
6519
                                        NULL_TREE);
6520
      alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6521
                              ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6522
 
6523
      vms_patch_builtins ();
6524
    }
6525
 
6526
  alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6527
  alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6528
  alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6529
  alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6530
}
6531
 
6532
/* Expand an expression EXP that calls a built-in function,
6533
   with result going to TARGET if that's convenient
6534
   (and in mode MODE if that's convenient).
6535
   SUBTARGET may be used as the target for computing one of EXP's operands.
6536
   IGNORE is nonzero if the value is to be ignored.  */
6537
 
6538
static rtx
6539
alpha_expand_builtin (tree exp, rtx target,
6540
                      rtx subtarget ATTRIBUTE_UNUSED,
6541
                      enum machine_mode mode ATTRIBUTE_UNUSED,
6542
                      int ignore ATTRIBUTE_UNUSED)
6543
{
6544
#define MAX_ARGS 2
6545
 
6546
  tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6547
  unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6548
  tree arg;
6549
  call_expr_arg_iterator iter;
6550
  enum insn_code icode;
6551
  rtx op[MAX_ARGS], pat;
6552
  int arity;
6553
  bool nonvoid;
6554
 
6555
  if (fcode >= ALPHA_BUILTIN_max)
6556
    internal_error ("bad builtin fcode");
6557
  icode = code_for_builtin[fcode];
6558
  if (icode == 0)
6559
    internal_error ("bad builtin fcode");
6560
 
6561
  nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6562
 
6563
  arity = 0;
6564
  FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6565
    {
6566
      const struct insn_operand_data *insn_op;
6567
 
6568
      if (arg == error_mark_node)
6569
        return NULL_RTX;
6570
      if (arity > MAX_ARGS)
6571
        return NULL_RTX;
6572
 
6573
      insn_op = &insn_data[icode].operand[arity + nonvoid];
6574
 
6575
      op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6576
 
6577
      if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6578
        op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6579
      arity++;
6580
    }
6581
 
6582
  if (nonvoid)
6583
    {
6584
      enum machine_mode tmode = insn_data[icode].operand[0].mode;
6585
      if (!target
6586
          || GET_MODE (target) != tmode
6587
          || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6588
        target = gen_reg_rtx (tmode);
6589
    }
6590
 
6591
  switch (arity)
6592
    {
6593
    case 0:
6594
      pat = GEN_FCN (icode) (target);
6595
      break;
6596
    case 1:
6597
      if (nonvoid)
6598
        pat = GEN_FCN (icode) (target, op[0]);
6599
      else
6600
        pat = GEN_FCN (icode) (op[0]);
6601
      break;
6602
    case 2:
6603
      pat = GEN_FCN (icode) (target, op[0], op[1]);
6604
      break;
6605
    default:
6606
      gcc_unreachable ();
6607
    }
6608
  if (!pat)
6609
    return NULL_RTX;
6610
  emit_insn (pat);
6611
 
6612
  if (nonvoid)
6613
    return target;
6614
  else
6615
    return const0_rtx;
6616
}
6617
 
6618
 
6619
/* Several bits below assume HWI >= 64 bits.  This should be enforced
6620
   by config.gcc.  */
6621
#if HOST_BITS_PER_WIDE_INT < 64
6622
# error "HOST_WIDE_INT too small"
6623
#endif
6624
 
6625
/* Fold the builtin for the CMPBGE instruction.  This is a vector comparison
6626
   with an 8-bit output vector.  OPINT contains the integer operands; bit N
6627
   of OP_CONST is set if OPINT[N] is valid.  */
6628
 
6629
static tree
6630
alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6631
{
6632
  if (op_const == 3)
6633
    {
6634
      int i, val;
6635
      for (i = 0, val = 0; i < 8; ++i)
6636
        {
6637
          unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6638
          unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6639
          if (c0 >= c1)
6640
            val |= 1 << i;
6641
        }
6642
      return build_int_cst (long_integer_type_node, val);
6643
    }
6644
  else if (op_const == 2 && opint[1] == 0)
6645
    return build_int_cst (long_integer_type_node, 0xff);
6646
  return NULL;
6647
}
6648
 
6649
/* Fold the builtin for the ZAPNOT instruction.  This is essentially a
6650
   specialized form of an AND operation.  Other byte manipulation instructions
6651
   are defined in terms of this instruction, so this is also used as a
6652
   subroutine for other builtins.
6653
 
6654
   OP contains the tree operands; OPINT contains the extracted integer values.
6655
   Bit N of OP_CONST it set if OPINT[N] is valid.  OP may be null if only
6656
   OPINT may be considered.  */
6657
 
6658
static tree
6659
alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6660
                           long op_const)
6661
{
6662
  if (op_const & 2)
6663
    {
6664
      unsigned HOST_WIDE_INT mask = 0;
6665
      int i;
6666
 
6667
      for (i = 0; i < 8; ++i)
6668
        if ((opint[1] >> i) & 1)
6669
          mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6670
 
6671
      if (op_const & 1)
6672
        return build_int_cst (long_integer_type_node, opint[0] & mask);
6673
 
6674
      if (op)
6675
        return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6676
                            build_int_cst (long_integer_type_node, mask));
6677
    }
6678
  else if ((op_const & 1) && opint[0] == 0)
6679
    return build_int_cst (long_integer_type_node, 0);
6680
  return NULL;
6681
}
6682
 
6683
/* Fold the builtins for the EXT family of instructions.  */
6684
 
6685
static tree
6686
alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6687
                          long op_const, unsigned HOST_WIDE_INT bytemask,
6688
                          bool is_high)
6689
{
6690
  long zap_const = 2;
6691
  tree *zap_op = NULL;
6692
 
6693
  if (op_const & 2)
6694
    {
6695
      unsigned HOST_WIDE_INT loc;
6696
 
6697
      loc = opint[1] & 7;
6698
      loc *= BITS_PER_UNIT;
6699
 
6700
      if (loc != 0)
6701
        {
6702
          if (op_const & 1)
6703
            {
6704
              unsigned HOST_WIDE_INT temp = opint[0];
6705
              if (is_high)
6706
                temp <<= loc;
6707
              else
6708
                temp >>= loc;
6709
              opint[0] = temp;
6710
              zap_const = 3;
6711
            }
6712
        }
6713
      else
6714
        zap_op = op;
6715
    }
6716
 
6717
  opint[1] = bytemask;
6718
  return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6719
}
6720
 
6721
/* Fold the builtins for the INS family of instructions.  */
6722
 
6723
static tree
6724
alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6725
                          long op_const, unsigned HOST_WIDE_INT bytemask,
6726
                          bool is_high)
6727
{
6728
  if ((op_const & 1) && opint[0] == 0)
6729
    return build_int_cst (long_integer_type_node, 0);
6730
 
6731
  if (op_const & 2)
6732
    {
6733
      unsigned HOST_WIDE_INT temp, loc, byteloc;
6734
      tree *zap_op = NULL;
6735
 
6736
      loc = opint[1] & 7;
6737
      bytemask <<= loc;
6738
 
6739
      temp = opint[0];
6740
      if (is_high)
6741
        {
6742
          byteloc = (64 - (loc * 8)) & 0x3f;
6743
          if (byteloc == 0)
6744
            zap_op = op;
6745
          else
6746
            temp >>= byteloc;
6747
          bytemask >>= 8;
6748
        }
6749
      else
6750
        {
6751
          byteloc = loc * 8;
6752
          if (byteloc == 0)
6753
            zap_op = op;
6754
          else
6755
            temp <<= byteloc;
6756
        }
6757
 
6758
      opint[0] = temp;
6759
      opint[1] = bytemask;
6760
      return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6761
    }
6762
 
6763
  return NULL;
6764
}
6765
 
6766
static tree
6767
alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6768
                          long op_const, unsigned HOST_WIDE_INT bytemask,
6769
                          bool is_high)
6770
{
6771
  if (op_const & 2)
6772
    {
6773
      unsigned HOST_WIDE_INT loc;
6774
 
6775
      loc = opint[1] & 7;
6776
      bytemask <<= loc;
6777
 
6778
      if (is_high)
6779
        bytemask >>= 8;
6780
 
6781
      opint[1] = bytemask ^ 0xff;
6782
    }
6783
 
6784
  return alpha_fold_builtin_zapnot (op, opint, op_const);
6785
}
6786
 
6787
static tree
6788
alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6789
{
6790
  switch (op_const)
6791
    {
6792
    case 3:
6793
      {
6794
        unsigned HOST_WIDE_INT l;
6795
        HOST_WIDE_INT h;
6796
 
6797
        mul_double (opint[0], 0, opint[1], 0, &l, &h);
6798
 
6799
#if HOST_BITS_PER_WIDE_INT > 64
6800
# error fixme
6801
#endif
6802
 
6803
        return build_int_cst (long_integer_type_node, h);
6804
      }
6805
 
6806
    case 1:
6807
      opint[1] = opint[0];
6808
      /* FALLTHRU */
6809
    case 2:
6810
      /* Note that (X*1) >> 64 == 0.  */
6811
      if (opint[1] == 0 || opint[1] == 1)
6812
        return build_int_cst (long_integer_type_node, 0);
6813
      break;
6814
    }
6815
  return NULL;
6816
}
6817
 
6818
static tree
6819
alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6820
{
6821
  tree op0 = fold_convert (vtype, op[0]);
6822
  tree op1 = fold_convert (vtype, op[1]);
6823
  tree val = fold_build2 (code, vtype, op0, op1);
6824
  return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6825
}
6826
 
6827
static tree
6828
alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6829
{
6830
  unsigned HOST_WIDE_INT temp = 0;
6831
  int i;
6832
 
6833
  if (op_const != 3)
6834
    return NULL;
6835
 
6836
  for (i = 0; i < 8; ++i)
6837
    {
6838
      unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6839
      unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6840
      if (a >= b)
6841
        temp += a - b;
6842
      else
6843
        temp += b - a;
6844
    }
6845
 
6846
  return build_int_cst (long_integer_type_node, temp);
6847
}
6848
 
6849
static tree
6850
alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6851
{
6852
  unsigned HOST_WIDE_INT temp;
6853
 
6854
  if (op_const == 0)
6855
    return NULL;
6856
 
6857
  temp = opint[0] & 0xff;
6858
  temp |= (opint[0] >> 24) & 0xff00;
6859
 
6860
  return build_int_cst (long_integer_type_node, temp);
6861
}
6862
 
6863
static tree
6864
alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6865
{
6866
  unsigned HOST_WIDE_INT temp;
6867
 
6868
  if (op_const == 0)
6869
    return NULL;
6870
 
6871
  temp = opint[0] & 0xff;
6872
  temp |= (opint[0] >>  8) & 0xff00;
6873
  temp |= (opint[0] >> 16) & 0xff0000;
6874
  temp |= (opint[0] >> 24) & 0xff000000;
6875
 
6876
  return build_int_cst (long_integer_type_node, temp);
6877
}
6878
 
6879
static tree
6880
alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6881
{
6882
  unsigned HOST_WIDE_INT temp;
6883
 
6884
  if (op_const == 0)
6885
    return NULL;
6886
 
6887
  temp = opint[0] & 0xff;
6888
  temp |= (opint[0] & 0xff00) << 24;
6889
 
6890
  return build_int_cst (long_integer_type_node, temp);
6891
}
6892
 
6893
static tree
6894
alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6895
{
6896
  unsigned HOST_WIDE_INT temp;
6897
 
6898
  if (op_const == 0)
6899
    return NULL;
6900
 
6901
  temp = opint[0] & 0xff;
6902
  temp |= (opint[0] & 0x0000ff00) << 8;
6903
  temp |= (opint[0] & 0x00ff0000) << 16;
6904
  temp |= (opint[0] & 0xff000000) << 24;
6905
 
6906
  return build_int_cst (long_integer_type_node, temp);
6907
}
6908
 
6909
static tree
6910
alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6911
{
6912
  unsigned HOST_WIDE_INT temp;
6913
 
6914
  if (op_const == 0)
6915
    return NULL;
6916
 
6917
  if (opint[0] == 0)
6918
    temp = 64;
6919
  else
6920
    temp = exact_log2 (opint[0] & -opint[0]);
6921
 
6922
  return build_int_cst (long_integer_type_node, temp);
6923
}
6924
 
6925
static tree
6926
alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6927
{
6928
  unsigned HOST_WIDE_INT temp;
6929
 
6930
  if (op_const == 0)
6931
    return NULL;
6932
 
6933
  if (opint[0] == 0)
6934
    temp = 64;
6935
  else
6936
    temp = 64 - floor_log2 (opint[0]) - 1;
6937
 
6938
  return build_int_cst (long_integer_type_node, temp);
6939
}
6940
 
6941
static tree
6942
alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6943
{
6944
  unsigned HOST_WIDE_INT temp, op;
6945
 
6946
  if (op_const == 0)
6947
    return NULL;
6948
 
6949
  op = opint[0];
6950
  temp = 0;
6951
  while (op)
6952
    temp++, op &= op - 1;
6953
 
6954
  return build_int_cst (long_integer_type_node, temp);
6955
}
6956
 
6957
/* Fold one of our builtin functions.  */
6958
 
6959
static tree
6960
alpha_fold_builtin (tree fndecl, int n_args, tree *op,
6961
                    bool ignore ATTRIBUTE_UNUSED)
6962
{
6963
  unsigned HOST_WIDE_INT opint[MAX_ARGS];
6964
  long op_const = 0;
6965
  int i;
6966
 
6967
  if (n_args >= MAX_ARGS)
6968
    return NULL;
6969
 
6970
  for (i = 0; i < n_args; i++)
6971
    {
6972
      tree arg = op[i];
6973
      if (arg == error_mark_node)
6974
        return NULL;
6975
 
6976
      opint[i] = 0;
6977
      if (TREE_CODE (arg) == INTEGER_CST)
6978
        {
6979
          op_const |= 1L << i;
6980
          opint[i] = int_cst_value (arg);
6981
        }
6982
    }
6983
 
6984
  switch (DECL_FUNCTION_CODE (fndecl))
6985
    {
6986
    case ALPHA_BUILTIN_CMPBGE:
6987
      return alpha_fold_builtin_cmpbge (opint, op_const);
6988
 
6989
    case ALPHA_BUILTIN_EXTBL:
6990
      return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6991
    case ALPHA_BUILTIN_EXTWL:
6992
      return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6993
    case ALPHA_BUILTIN_EXTLL:
6994
      return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6995
    case ALPHA_BUILTIN_EXTQL:
6996
      return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6997
    case ALPHA_BUILTIN_EXTWH:
6998
      return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6999
    case ALPHA_BUILTIN_EXTLH:
7000
      return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7001
    case ALPHA_BUILTIN_EXTQH:
7002
      return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7003
 
7004
    case ALPHA_BUILTIN_INSBL:
7005
      return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7006
    case ALPHA_BUILTIN_INSWL:
7007
      return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7008
    case ALPHA_BUILTIN_INSLL:
7009
      return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7010
    case ALPHA_BUILTIN_INSQL:
7011
      return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7012
    case ALPHA_BUILTIN_INSWH:
7013
      return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7014
    case ALPHA_BUILTIN_INSLH:
7015
      return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7016
    case ALPHA_BUILTIN_INSQH:
7017
      return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7018
 
7019
    case ALPHA_BUILTIN_MSKBL:
7020
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7021
    case ALPHA_BUILTIN_MSKWL:
7022
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7023
    case ALPHA_BUILTIN_MSKLL:
7024
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7025
    case ALPHA_BUILTIN_MSKQL:
7026
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7027
    case ALPHA_BUILTIN_MSKWH:
7028
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7029
    case ALPHA_BUILTIN_MSKLH:
7030
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7031
    case ALPHA_BUILTIN_MSKQH:
7032
      return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7033
 
7034
    case ALPHA_BUILTIN_UMULH:
7035
      return alpha_fold_builtin_umulh (opint, op_const);
7036
 
7037
    case ALPHA_BUILTIN_ZAP:
7038
      opint[1] ^= 0xff;
7039
      /* FALLTHRU */
7040
    case ALPHA_BUILTIN_ZAPNOT:
7041
      return alpha_fold_builtin_zapnot (op, opint, op_const);
7042
 
7043
    case ALPHA_BUILTIN_MINUB8:
7044
      return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7045
    case ALPHA_BUILTIN_MINSB8:
7046
      return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7047
    case ALPHA_BUILTIN_MINUW4:
7048
      return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7049
    case ALPHA_BUILTIN_MINSW4:
7050
      return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7051
    case ALPHA_BUILTIN_MAXUB8:
7052
      return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7053
    case ALPHA_BUILTIN_MAXSB8:
7054
      return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7055
    case ALPHA_BUILTIN_MAXUW4:
7056
      return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7057
    case ALPHA_BUILTIN_MAXSW4:
7058
      return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7059
 
7060
    case ALPHA_BUILTIN_PERR:
7061
      return alpha_fold_builtin_perr (opint, op_const);
7062
    case ALPHA_BUILTIN_PKLB:
7063
      return alpha_fold_builtin_pklb (opint, op_const);
7064
    case ALPHA_BUILTIN_PKWB:
7065
      return alpha_fold_builtin_pkwb (opint, op_const);
7066
    case ALPHA_BUILTIN_UNPKBL:
7067
      return alpha_fold_builtin_unpkbl (opint, op_const);
7068
    case ALPHA_BUILTIN_UNPKBW:
7069
      return alpha_fold_builtin_unpkbw (opint, op_const);
7070
 
7071
    case ALPHA_BUILTIN_CTTZ:
7072
      return alpha_fold_builtin_cttz (opint, op_const);
7073
    case ALPHA_BUILTIN_CTLZ:
7074
      return alpha_fold_builtin_ctlz (opint, op_const);
7075
    case ALPHA_BUILTIN_CTPOP:
7076
      return alpha_fold_builtin_ctpop (opint, op_const);
7077
 
7078
    case ALPHA_BUILTIN_AMASK:
7079
    case ALPHA_BUILTIN_IMPLVER:
7080
    case ALPHA_BUILTIN_RPCC:
7081
    case ALPHA_BUILTIN_THREAD_POINTER:
7082
    case ALPHA_BUILTIN_SET_THREAD_POINTER:
7083
      /* None of these are foldable at compile-time.  */
7084
    default:
7085
      return NULL;
7086
    }
7087
}
7088
 
7089
/* This page contains routines that are used to determine what the function
7090
   prologue and epilogue code will do and write them out.  */
7091
 
7092
/* Compute the size of the save area in the stack.  */
7093
 
7094
/* These variables are used for communication between the following functions.
7095
   They indicate various things about the current function being compiled
7096
   that are used to tell what kind of prologue, epilogue and procedure
7097
   descriptor to generate.  */
7098
 
7099
/* Nonzero if we need a stack procedure.  */
7100
enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7101
static enum alpha_procedure_types alpha_procedure_type;
7102
 
7103
/* Register number (either FP or SP) that is used to unwind the frame.  */
7104
static int vms_unwind_regno;
7105
 
7106
/* Register number used to save FP.  We need not have one for RA since
7107
   we don't modify it for register procedures.  This is only defined
7108
   for register frame procedures.  */
7109
static int vms_save_fp_regno;
7110
 
7111
/* Register number used to reference objects off our PV.  */
7112
static int vms_base_regno;
7113
 
7114
/* Compute register masks for saved registers.  */
7115
 
7116
static void
7117
alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7118
{
7119
  unsigned long imask = 0;
7120
  unsigned long fmask = 0;
7121
  unsigned int i;
7122
 
7123
  /* When outputting a thunk, we don't have valid register life info,
7124
     but assemble_start_function wants to output .frame and .mask
7125
     directives.  */
7126
  if (cfun->is_thunk)
7127
    {
7128
      *imaskP = 0;
7129
      *fmaskP = 0;
7130
      return;
7131
    }
7132
 
7133
  if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7134
    imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7135
 
7136
  /* One for every register we have to save.  */
7137
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7138
    if (! fixed_regs[i] && ! call_used_regs[i]
7139
        && df_regs_ever_live_p (i) && i != REG_RA)
7140
      {
7141
        if (i < 32)
7142
          imask |= (1UL << i);
7143
        else
7144
          fmask |= (1UL << (i - 32));
7145
      }
7146
 
7147
  /* We need to restore these for the handler.  */
7148
  if (crtl->calls_eh_return)
7149
    {
7150
      for (i = 0; ; ++i)
7151
        {
7152
          unsigned regno = EH_RETURN_DATA_REGNO (i);
7153
          if (regno == INVALID_REGNUM)
7154
            break;
7155
          imask |= 1UL << regno;
7156
        }
7157
    }
7158
 
7159
  /* If any register spilled, then spill the return address also.  */
7160
  /* ??? This is required by the Digital stack unwind specification
7161
     and isn't needed if we're doing Dwarf2 unwinding.  */
7162
  if (imask || fmask || alpha_ra_ever_killed ())
7163
    imask |= (1UL << REG_RA);
7164
 
7165
  *imaskP = imask;
7166
  *fmaskP = fmask;
7167
}
7168
 
7169
int
7170
alpha_sa_size (void)
7171
{
7172
  unsigned long mask[2];
7173
  int sa_size = 0;
7174
  int i, j;
7175
 
7176
  alpha_sa_mask (&mask[0], &mask[1]);
7177
 
7178
  for (j = 0; j < 2; ++j)
7179
    for (i = 0; i < 32; ++i)
7180
      if ((mask[j] >> i) & 1)
7181
        sa_size++;
7182
 
7183
  if (TARGET_ABI_OPEN_VMS)
7184
    {
7185
      /* Start with a stack procedure if we make any calls (REG_RA used), or
7186
         need a frame pointer, with a register procedure if we otherwise need
7187
         at least a slot, and with a null procedure in other cases.  */
7188
      if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7189
        alpha_procedure_type = PT_STACK;
7190
      else if (get_frame_size() != 0)
7191
        alpha_procedure_type = PT_REGISTER;
7192
      else
7193
        alpha_procedure_type = PT_NULL;
7194
 
7195
      /* Don't reserve space for saving FP & RA yet.  Do that later after we've
7196
         made the final decision on stack procedure vs register procedure.  */
7197
      if (alpha_procedure_type == PT_STACK)
7198
        sa_size -= 2;
7199
 
7200
      /* Decide whether to refer to objects off our PV via FP or PV.
7201
         If we need FP for something else or if we receive a nonlocal
7202
         goto (which expects PV to contain the value), we must use PV.
7203
         Otherwise, start by assuming we can use FP.  */
7204
 
7205
      vms_base_regno
7206
        = (frame_pointer_needed
7207
           || cfun->has_nonlocal_label
7208
           || alpha_procedure_type == PT_STACK
7209
           || crtl->outgoing_args_size)
7210
          ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7211
 
7212
      /* If we want to copy PV into FP, we need to find some register
7213
         in which to save FP.  */
7214
 
7215
      vms_save_fp_regno = -1;
7216
      if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7217
        for (i = 0; i < 32; i++)
7218
          if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7219
            vms_save_fp_regno = i;
7220
 
7221
      /* A VMS condition handler requires a stack procedure in our
7222
         implementation. (not required by the calling standard).  */
7223
      if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7224
          || cfun->machine->uses_condition_handler)
7225
        vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7226
      else if (alpha_procedure_type == PT_NULL)
7227
        vms_base_regno = REG_PV;
7228
 
7229
      /* Stack unwinding should be done via FP unless we use it for PV.  */
7230
      vms_unwind_regno = (vms_base_regno == REG_PV
7231
                          ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7232
 
7233
      /* If this is a stack procedure, allow space for saving FP, RA and
7234
         a condition handler slot if needed.  */
7235
      if (alpha_procedure_type == PT_STACK)
7236
        sa_size += 2 + cfun->machine->uses_condition_handler;
7237
    }
7238
  else
7239
    {
7240
      /* Our size must be even (multiple of 16 bytes).  */
7241
      if (sa_size & 1)
7242
        sa_size++;
7243
    }
7244
 
7245
  return sa_size * 8;
7246
}
7247
 
7248
/* Define the offset between two registers, one to be eliminated,
7249
   and the other its replacement, at the start of a routine.  */
7250
 
7251
HOST_WIDE_INT
7252
alpha_initial_elimination_offset (unsigned int from,
7253
                                  unsigned int to ATTRIBUTE_UNUSED)
7254
{
7255
  HOST_WIDE_INT ret;
7256
 
7257
  ret = alpha_sa_size ();
7258
  ret += ALPHA_ROUND (crtl->outgoing_args_size);
7259
 
7260
  switch (from)
7261
    {
7262
    case FRAME_POINTER_REGNUM:
7263
      break;
7264
 
7265
    case ARG_POINTER_REGNUM:
7266
      ret += (ALPHA_ROUND (get_frame_size ()
7267
                           + crtl->args.pretend_args_size)
7268
              - crtl->args.pretend_args_size);
7269
      break;
7270
 
7271
    default:
7272
      gcc_unreachable ();
7273
    }
7274
 
7275
  return ret;
7276
}
7277
 
7278
#if TARGET_ABI_OPEN_VMS
7279
 
7280
/* Worker function for TARGET_CAN_ELIMINATE.  */
7281
 
7282
static bool
7283
alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7284
{
7285
  /* We need the alpha_procedure_type to decide. Evaluate it now.  */
7286
  alpha_sa_size ();
7287
 
7288
  switch (alpha_procedure_type)
7289
    {
7290
    case PT_NULL:
7291
      /* NULL procedures have no frame of their own and we only
7292
         know how to resolve from the current stack pointer.  */
7293
      return to == STACK_POINTER_REGNUM;
7294
 
7295
    case PT_REGISTER:
7296
    case PT_STACK:
7297
      /* We always eliminate except to the stack pointer if there is no
7298
         usable frame pointer at hand.  */
7299
      return (to != STACK_POINTER_REGNUM
7300
              || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7301
    }
7302
 
7303
  gcc_unreachable ();
7304
}
7305
 
7306
/* FROM is to be eliminated for TO. Return the offset so that TO+offset
7307
   designates the same location as FROM.  */
7308
 
7309
HOST_WIDE_INT
7310
alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7311
{
7312
  /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7313
     HARD_FRAME or STACK_PTR.  We need the alpha_procedure_type to decide
7314
     on the proper computations and will need the register save area size
7315
     in most cases.  */
7316
 
7317
  HOST_WIDE_INT sa_size = alpha_sa_size ();
7318
 
7319
  /* PT_NULL procedures have no frame of their own and we only allow
7320
     elimination to the stack pointer. This is the argument pointer and we
7321
     resolve the soft frame pointer to that as well.  */
7322
 
7323
  if (alpha_procedure_type == PT_NULL)
7324
    return 0;
7325
 
7326
  /* For a PT_STACK procedure the frame layout looks as follows
7327
 
7328
                      -----> decreasing addresses
7329
 
7330
                   <             size rounded up to 16       |   likewise   >
7331
     --------------#------------------------------+++--------------+++-------#
7332
     incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7333
     --------------#---------------------------------------------------------#
7334
                                   ^         ^              ^               ^
7335
                              ARG_PTR FRAME_PTR HARD_FRAME_PTR       STACK_PTR
7336
 
7337
 
7338
     PT_REGISTER procedures are similar in that they may have a frame of their
7339
     own. They have no regs-sa/pv/outgoing-args area.
7340
 
7341
     We first compute offset to HARD_FRAME_PTR, then add what we need to get
7342
     to STACK_PTR if need be.  */
7343
 
7344
  {
7345
    HOST_WIDE_INT offset;
7346
    HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7347
 
7348
    switch (from)
7349
      {
7350
      case FRAME_POINTER_REGNUM:
7351
        offset = ALPHA_ROUND (sa_size + pv_save_size);
7352
        break;
7353
      case ARG_POINTER_REGNUM:
7354
        offset = (ALPHA_ROUND (sa_size + pv_save_size
7355
                               + get_frame_size ()
7356
                               + crtl->args.pretend_args_size)
7357
                  - crtl->args.pretend_args_size);
7358
        break;
7359
      default:
7360
        gcc_unreachable ();
7361
      }
7362
 
7363
    if (to == STACK_POINTER_REGNUM)
7364
      offset += ALPHA_ROUND (crtl->outgoing_args_size);
7365
 
7366
    return offset;
7367
  }
7368
}
7369
 
7370
#define COMMON_OBJECT "common_object"
7371
 
7372
static tree
7373
common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7374
                       tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7375
                       bool *no_add_attrs ATTRIBUTE_UNUSED)
7376
{
7377
  tree decl = *node;
7378
  gcc_assert (DECL_P (decl));
7379
 
7380
  DECL_COMMON (decl) = 1;
7381
  return NULL_TREE;
7382
}
7383
 
7384
static const struct attribute_spec vms_attribute_table[] =
7385
{
7386
  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7387
       affects_type_identity } */
7388
  { COMMON_OBJECT,   0, 1, true,  false, false, common_object_handler, false },
7389
  { NULL,            0, 0, false, false, false, NULL, false }
7390
};
7391
 
7392
void
7393
vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7394
                               unsigned HOST_WIDE_INT size,
7395
                               unsigned int align)
7396
{
7397
  tree attr = DECL_ATTRIBUTES (decl);
7398
  fprintf (file, "%s", COMMON_ASM_OP);
7399
  assemble_name (file, name);
7400
  fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7401
  /* ??? Unlike on OSF/1, the alignment factor is not in log units.  */
7402
  fprintf (file, ",%u", align / BITS_PER_UNIT);
7403
  if (attr)
7404
    {
7405
      attr = lookup_attribute (COMMON_OBJECT, attr);
7406
      if (attr)
7407
        fprintf (file, ",%s",
7408
                 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7409
    }
7410
  fputc ('\n', file);
7411
}
7412
 
7413
#undef COMMON_OBJECT
7414
 
7415
#endif
7416
 
7417
static int
7418
find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7419
{
7420
  return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7421
}
7422
 
7423
int
7424
alpha_find_lo_sum_using_gp (rtx insn)
7425
{
7426
  return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7427
}
7428
 
7429
static int
7430
alpha_does_function_need_gp (void)
7431
{
7432
  rtx insn;
7433
 
7434
  /* The GP being variable is an OSF abi thing.  */
7435
  if (! TARGET_ABI_OSF)
7436
    return 0;
7437
 
7438
  /* We need the gp to load the address of __mcount.  */
7439
  if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7440
    return 1;
7441
 
7442
  /* The code emitted by alpha_output_mi_thunk_osf uses the gp.  */
7443
  if (cfun->is_thunk)
7444
    return 1;
7445
 
7446
  /* The nonlocal receiver pattern assumes that the gp is valid for
7447
     the nested function.  Reasonable because it's almost always set
7448
     correctly already.  For the cases where that's wrong, make sure
7449
     the nested function loads its gp on entry.  */
7450
  if (crtl->has_nonlocal_goto)
7451
    return 1;
7452
 
7453
  /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7454
     Even if we are a static function, we still need to do this in case
7455
     our address is taken and passed to something like qsort.  */
7456
 
7457
  push_topmost_sequence ();
7458
  insn = get_insns ();
7459
  pop_topmost_sequence ();
7460
 
7461
  for (; insn; insn = NEXT_INSN (insn))
7462
    if (NONDEBUG_INSN_P (insn)
7463
        && ! JUMP_TABLE_DATA_P (insn)
7464
        && GET_CODE (PATTERN (insn)) != USE
7465
        && GET_CODE (PATTERN (insn)) != CLOBBER
7466
        && get_attr_usegp (insn))
7467
      return 1;
7468
 
7469
  return 0;
7470
}
7471
 
7472
 
7473
/* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7474
   sequences.  */
7475
 
7476
static rtx
7477
set_frame_related_p (void)
7478
{
7479
  rtx seq = get_insns ();
7480
  rtx insn;
7481
 
7482
  end_sequence ();
7483
 
7484
  if (!seq)
7485
    return NULL_RTX;
7486
 
7487
  if (INSN_P (seq))
7488
    {
7489
      insn = seq;
7490
      while (insn != NULL_RTX)
7491
        {
7492
          RTX_FRAME_RELATED_P (insn) = 1;
7493
          insn = NEXT_INSN (insn);
7494
        }
7495
      seq = emit_insn (seq);
7496
    }
7497
  else
7498
    {
7499
      seq = emit_insn (seq);
7500
      RTX_FRAME_RELATED_P (seq) = 1;
7501
    }
7502
  return seq;
7503
}
7504
 
7505
#define FRP(exp)  (start_sequence (), exp, set_frame_related_p ())
7506
 
7507
/* Generates a store with the proper unwind info attached.  VALUE is
7508
   stored at BASE_REG+BASE_OFS.  If FRAME_BIAS is nonzero, then BASE_REG
7509
   contains SP+FRAME_BIAS, and that is the unwind info that should be
7510
   generated.  If FRAME_REG != VALUE, then VALUE is being stored on
7511
   behalf of FRAME_REG, and FRAME_REG should be present in the unwind.  */
7512
 
7513
static void
7514
emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7515
                    HOST_WIDE_INT base_ofs, rtx frame_reg)
7516
{
7517
  rtx addr, mem, insn;
7518
 
7519
  addr = plus_constant (base_reg, base_ofs);
7520
  mem = gen_frame_mem (DImode, addr);
7521
 
7522
  insn = emit_move_insn (mem, value);
7523
  RTX_FRAME_RELATED_P (insn) = 1;
7524
 
7525
  if (frame_bias || value != frame_reg)
7526
    {
7527
      if (frame_bias)
7528
        {
7529
          addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7530
          mem = gen_rtx_MEM (DImode, addr);
7531
        }
7532
 
7533
      add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7534
                    gen_rtx_SET (VOIDmode, mem, frame_reg));
7535
    }
7536
}
7537
 
7538
static void
7539
emit_frame_store (unsigned int regno, rtx base_reg,
7540
                  HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7541
{
7542
  rtx reg = gen_rtx_REG (DImode, regno);
7543
  emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7544
}
7545
 
7546
/* Compute the frame size.  SIZE is the size of the "naked" frame
7547
   and SA_SIZE is the size of the register save area.  */
7548
 
7549
static HOST_WIDE_INT
7550
compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7551
{
7552
  if (TARGET_ABI_OPEN_VMS)
7553
    return ALPHA_ROUND (sa_size
7554
                        + (alpha_procedure_type == PT_STACK ? 8 : 0)
7555
                        + size
7556
                        + crtl->args.pretend_args_size);
7557
  else
7558
    return ALPHA_ROUND (crtl->outgoing_args_size)
7559
           + sa_size
7560
           + ALPHA_ROUND (size
7561
                          + crtl->args.pretend_args_size);
7562
}
7563
 
7564
/* Write function prologue.  */
7565
 
7566
/* On vms we have two kinds of functions:
7567
 
7568
   - stack frame (PROC_STACK)
7569
        these are 'normal' functions with local vars and which are
7570
        calling other functions
7571
   - register frame (PROC_REGISTER)
7572
        keeps all data in registers, needs no stack
7573
 
7574
   We must pass this to the assembler so it can generate the
7575
   proper pdsc (procedure descriptor)
7576
   This is done with the '.pdesc' command.
7577
 
7578
   On not-vms, we don't really differentiate between the two, as we can
7579
   simply allocate stack without saving registers.  */
7580
 
7581
void
7582
alpha_expand_prologue (void)
7583
{
7584
  /* Registers to save.  */
7585
  unsigned long imask = 0;
7586
  unsigned long fmask = 0;
7587
  /* Stack space needed for pushing registers clobbered by us.  */
7588
  HOST_WIDE_INT sa_size, sa_bias;
7589
  /* Complete stack size needed.  */
7590
  HOST_WIDE_INT frame_size;
7591
  /* Probed stack size; it additionally includes the size of
7592
     the "reserve region" if any.  */
7593
  HOST_WIDE_INT probed_size;
7594
  /* Offset from base reg to register save area.  */
7595
  HOST_WIDE_INT reg_offset;
7596
  rtx sa_reg;
7597
  int i;
7598
 
7599
  sa_size = alpha_sa_size ();
7600
  frame_size = compute_frame_size (get_frame_size (), sa_size);
7601
 
7602
  if (flag_stack_usage_info)
7603
    current_function_static_stack_size = frame_size;
7604
 
7605
  if (TARGET_ABI_OPEN_VMS)
7606
    reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7607
  else
7608
    reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7609
 
7610
  alpha_sa_mask (&imask, &fmask);
7611
 
7612
  /* Emit an insn to reload GP, if needed.  */
7613
  if (TARGET_ABI_OSF)
7614
    {
7615
      alpha_function_needs_gp = alpha_does_function_need_gp ();
7616
      if (alpha_function_needs_gp)
7617
        emit_insn (gen_prologue_ldgp ());
7618
    }
7619
 
7620
  /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7621
     the call to mcount ourselves, rather than having the linker do it
7622
     magically in response to -pg.  Since _mcount has special linkage,
7623
     don't represent the call as a call.  */
7624
  if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7625
    emit_insn (gen_prologue_mcount ());
7626
 
7627
  /* Adjust the stack by the frame size.  If the frame size is > 4096
7628
     bytes, we need to be sure we probe somewhere in the first and last
7629
     4096 bytes (we can probably get away without the latter test) and
7630
     every 8192 bytes in between.  If the frame size is > 32768, we
7631
     do this in a loop.  Otherwise, we generate the explicit probe
7632
     instructions.
7633
 
7634
     Note that we are only allowed to adjust sp once in the prologue.  */
7635
 
7636
  probed_size = frame_size;
7637
  if (flag_stack_check)
7638
    probed_size += STACK_CHECK_PROTECT;
7639
 
7640
  if (probed_size <= 32768)
7641
    {
7642
      if (probed_size > 4096)
7643
        {
7644
          int probed;
7645
 
7646
          for (probed = 4096; probed < probed_size; probed += 8192)
7647
            emit_insn (gen_probe_stack (GEN_INT (-probed)));
7648
 
7649
          /* We only have to do this probe if we aren't saving registers or
7650
             if we are probing beyond the frame because of -fstack-check.  */
7651
          if ((sa_size == 0 && probed_size > probed - 4096)
7652
              || flag_stack_check)
7653
            emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7654
        }
7655
 
7656
      if (frame_size != 0)
7657
        FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7658
                                    GEN_INT (-frame_size))));
7659
    }
7660
  else
7661
    {
7662
      /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7663
         number of 8192 byte blocks to probe.  We then probe each block
7664
         in the loop and then set SP to the proper location.  If the
7665
         amount remaining is > 4096, we have to do one more probe if we
7666
         are not saving any registers or if we are probing beyond the
7667
         frame because of -fstack-check.  */
7668
 
7669
      HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7670
      HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7671
      rtx ptr = gen_rtx_REG (DImode, 22);
7672
      rtx count = gen_rtx_REG (DImode, 23);
7673
      rtx seq;
7674
 
7675
      emit_move_insn (count, GEN_INT (blocks));
7676
      emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7677
 
7678
      /* Because of the difficulty in emitting a new basic block this
7679
         late in the compilation, generate the loop as a single insn.  */
7680
      emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7681
 
7682
      if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7683
        {
7684
          rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7685
          MEM_VOLATILE_P (last) = 1;
7686
          emit_move_insn (last, const0_rtx);
7687
        }
7688
 
7689
      if (flag_stack_check)
7690
        {
7691
          /* If -fstack-check is specified we have to load the entire
7692
             constant into a register and subtract from the sp in one go,
7693
             because the probed stack size is not equal to the frame size.  */
7694
          HOST_WIDE_INT lo, hi;
7695
          lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7696
          hi = frame_size - lo;
7697
 
7698
          emit_move_insn (ptr, GEN_INT (hi));
7699
          emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7700
          seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7701
                                       ptr));
7702
        }
7703
      else
7704
        {
7705
          seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7706
                                       GEN_INT (-leftover)));
7707
        }
7708
 
7709
      /* This alternative is special, because the DWARF code cannot
7710
         possibly intuit through the loop above.  So we invent this
7711
         note it looks at instead.  */
7712
      RTX_FRAME_RELATED_P (seq) = 1;
7713
      add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7714
                    gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7715
                                 plus_constant (stack_pointer_rtx,
7716
                                                -frame_size)));
7717
    }
7718
 
7719
  /* Cope with very large offsets to the register save area.  */
7720
  sa_bias = 0;
7721
  sa_reg = stack_pointer_rtx;
7722
  if (reg_offset + sa_size > 0x8000)
7723
    {
7724
      int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7725
      rtx sa_bias_rtx;
7726
 
7727
      if (low + sa_size <= 0x8000)
7728
        sa_bias = reg_offset - low, reg_offset = low;
7729
      else
7730
        sa_bias = reg_offset, reg_offset = 0;
7731
 
7732
      sa_reg = gen_rtx_REG (DImode, 24);
7733
      sa_bias_rtx = GEN_INT (sa_bias);
7734
 
7735
      if (add_operand (sa_bias_rtx, DImode))
7736
        emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7737
      else
7738
        {
7739
          emit_move_insn (sa_reg, sa_bias_rtx);
7740
          emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7741
        }
7742
    }
7743
 
7744
  /* Save regs in stack order.  Beginning with VMS PV.  */
7745
  if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7746
    emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7747
 
7748
  /* Save register RA next.  */
7749
  if (imask & (1UL << REG_RA))
7750
    {
7751
      emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7752
      imask &= ~(1UL << REG_RA);
7753
      reg_offset += 8;
7754
    }
7755
 
7756
  /* Now save any other registers required to be saved.  */
7757
  for (i = 0; i < 31; i++)
7758
    if (imask & (1UL << i))
7759
      {
7760
        emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7761
        reg_offset += 8;
7762
      }
7763
 
7764
  for (i = 0; i < 31; i++)
7765
    if (fmask & (1UL << i))
7766
      {
7767
        emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7768
        reg_offset += 8;
7769
      }
7770
 
7771
  if (TARGET_ABI_OPEN_VMS)
7772
    {
7773
      /* Register frame procedures save the fp.  */
7774
      if (alpha_procedure_type == PT_REGISTER)
7775
        {
7776
          rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7777
                                     hard_frame_pointer_rtx);
7778
          add_reg_note (insn, REG_CFA_REGISTER, NULL);
7779
          RTX_FRAME_RELATED_P (insn) = 1;
7780
        }
7781
 
7782
      if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7783
        emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7784
                                    gen_rtx_REG (DImode, REG_PV)));
7785
 
7786
      if (alpha_procedure_type != PT_NULL
7787
          && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7788
        FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7789
 
7790
      /* If we have to allocate space for outgoing args, do it now.  */
7791
      if (crtl->outgoing_args_size != 0)
7792
        {
7793
          rtx seq
7794
            = emit_move_insn (stack_pointer_rtx,
7795
                              plus_constant
7796
                              (hard_frame_pointer_rtx,
7797
                               - (ALPHA_ROUND
7798
                                  (crtl->outgoing_args_size))));
7799
 
7800
          /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7801
             if ! frame_pointer_needed. Setting the bit will change the CFA
7802
             computation rule to use sp again, which would be wrong if we had
7803
             frame_pointer_needed, as this means sp might move unpredictably
7804
             later on.
7805
 
7806
             Also, note that
7807
               frame_pointer_needed
7808
               => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7809
             and
7810
               crtl->outgoing_args_size != 0
7811
               => alpha_procedure_type != PT_NULL,
7812
 
7813
             so when we are not setting the bit here, we are guaranteed to
7814
             have emitted an FRP frame pointer update just before.  */
7815
          RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7816
        }
7817
    }
7818
  else
7819
    {
7820
      /* If we need a frame pointer, set it from the stack pointer.  */
7821
      if (frame_pointer_needed)
7822
        {
7823
          if (TARGET_CAN_FAULT_IN_PROLOGUE)
7824
            FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7825
          else
7826
            /* This must always be the last instruction in the
7827
               prologue, thus we emit a special move + clobber.  */
7828
              FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7829
                                           stack_pointer_rtx, sa_reg)));
7830
        }
7831
    }
7832
 
7833
  /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7834
     the prologue, for exception handling reasons, we cannot do this for
7835
     any insn that might fault.  We could prevent this for mems with a
7836
     (clobber:BLK (scratch)), but this doesn't work for fp insns.  So we
7837
     have to prevent all such scheduling with a blockage.
7838
 
7839
     Linux, on the other hand, never bothered to implement OSF/1's
7840
     exception handling, and so doesn't care about such things.  Anyone
7841
     planning to use dwarf2 frame-unwind info can also omit the blockage.  */
7842
 
7843
  if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7844
    emit_insn (gen_blockage ());
7845
}
7846
 
7847
/* Count the number of .file directives, so that .loc is up to date.  */
7848
int num_source_filenames = 0;
7849
 
7850
/* Output the textual info surrounding the prologue.  */
7851
 
7852
void
7853
alpha_start_function (FILE *file, const char *fnname,
7854
                      tree decl ATTRIBUTE_UNUSED)
7855
{
7856
  unsigned long imask = 0;
7857
  unsigned long fmask = 0;
7858
  /* Stack space needed for pushing registers clobbered by us.  */
7859
  HOST_WIDE_INT sa_size;
7860
  /* Complete stack size needed.  */
7861
  unsigned HOST_WIDE_INT frame_size;
7862
  /* The maximum debuggable frame size (512 Kbytes using Tru64 as).  */
7863
  unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7864
                                          ? 524288
7865
                                          : 1UL << 31;
7866
  /* Offset from base reg to register save area.  */
7867
  HOST_WIDE_INT reg_offset;
7868
  char *entry_label = (char *) alloca (strlen (fnname) + 6);
7869
  char *tramp_label = (char *) alloca (strlen (fnname) + 6);
7870
  int i;
7871
 
7872
#if TARGET_ABI_OPEN_VMS
7873
  if (vms_debug_main
7874
      && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
7875
    {
7876
      targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
7877
      ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
7878
      switch_to_section (text_section);
7879
      vms_debug_main = NULL;
7880
    }
7881
#endif
7882
 
7883
  alpha_fnname = fnname;
7884
  sa_size = alpha_sa_size ();
7885
  frame_size = compute_frame_size (get_frame_size (), sa_size);
7886
 
7887
  if (TARGET_ABI_OPEN_VMS)
7888
    reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7889
  else
7890
    reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7891
 
7892
  alpha_sa_mask (&imask, &fmask);
7893
 
7894
  /* Ecoff can handle multiple .file directives, so put out file and lineno.
7895
     We have to do that before the .ent directive as we cannot switch
7896
     files within procedures with native ecoff because line numbers are
7897
     linked to procedure descriptors.
7898
     Outputting the lineno helps debugging of one line functions as they
7899
     would otherwise get no line number at all. Please note that we would
7900
     like to put out last_linenum from final.c, but it is not accessible.  */
7901
 
7902
  if (write_symbols == SDB_DEBUG)
7903
    {
7904
#ifdef ASM_OUTPUT_SOURCE_FILENAME
7905
      ASM_OUTPUT_SOURCE_FILENAME (file,
7906
                                  DECL_SOURCE_FILE (current_function_decl));
7907
#endif
7908
#ifdef SDB_OUTPUT_SOURCE_LINE
7909
      if (debug_info_level != DINFO_LEVEL_TERSE)
7910
        SDB_OUTPUT_SOURCE_LINE (file,
7911
                                DECL_SOURCE_LINE (current_function_decl));
7912
#endif
7913
    }
7914
 
7915
  /* Issue function start and label.  */
7916
  if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
7917
    {
7918
      fputs ("\t.ent ", file);
7919
      assemble_name (file, fnname);
7920
      putc ('\n', file);
7921
 
7922
      /* If the function needs GP, we'll write the "..ng" label there.
7923
         Otherwise, do it here.  */
7924
      if (TARGET_ABI_OSF
7925
          && ! alpha_function_needs_gp
7926
          && ! cfun->is_thunk)
7927
        {
7928
          putc ('$', file);
7929
          assemble_name (file, fnname);
7930
          fputs ("..ng:\n", file);
7931
        }
7932
    }
7933
  /* Nested functions on VMS that are potentially called via trampoline
7934
     get a special transfer entry point that loads the called functions
7935
     procedure descriptor and static chain.  */
7936
   if (TARGET_ABI_OPEN_VMS
7937
       && !TREE_PUBLIC (decl)
7938
       && DECL_CONTEXT (decl)
7939
       && !TYPE_P (DECL_CONTEXT (decl))
7940
       && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
7941
     {
7942
        strcpy (tramp_label, fnname);
7943
        strcat (tramp_label, "..tr");
7944
        ASM_OUTPUT_LABEL (file, tramp_label);
7945
        fprintf (file, "\tldq $1,24($27)\n");
7946
        fprintf (file, "\tldq $27,16($27)\n");
7947
     }
7948
 
7949
  strcpy (entry_label, fnname);
7950
  if (TARGET_ABI_OPEN_VMS)
7951
    strcat (entry_label, "..en");
7952
 
7953
  ASM_OUTPUT_LABEL (file, entry_label);
7954
  inside_function = TRUE;
7955
 
7956
  if (TARGET_ABI_OPEN_VMS)
7957
    fprintf (file, "\t.base $%d\n", vms_base_regno);
7958
 
7959
  if (TARGET_ABI_OSF
7960
      && TARGET_IEEE_CONFORMANT
7961
      && !flag_inhibit_size_directive)
7962
    {
7963
      /* Set flags in procedure descriptor to request IEEE-conformant
7964
         math-library routines.  The value we set it to is PDSC_EXC_IEEE
7965
         (/usr/include/pdsc.h).  */
7966
      fputs ("\t.eflag 48\n", file);
7967
    }
7968
 
7969
  /* Set up offsets to alpha virtual arg/local debugging pointer.  */
7970
  alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
7971
  alpha_arg_offset = -frame_size + 48;
7972
 
7973
  /* Describe our frame.  If the frame size is larger than an integer,
7974
     print it as zero to avoid an assembler error.  We won't be
7975
     properly describing such a frame, but that's the best we can do.  */
7976
  if (TARGET_ABI_OPEN_VMS)
7977
    fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7978
             HOST_WIDE_INT_PRINT_DEC "\n",
7979
             vms_unwind_regno,
7980
             frame_size >= (1UL << 31) ? 0 : frame_size,
7981
             reg_offset);
7982
  else if (!flag_inhibit_size_directive)
7983
    fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7984
             (frame_pointer_needed
7985
              ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7986
             frame_size >= max_frame_size ? 0 : frame_size,
7987
             crtl->args.pretend_args_size);
7988
 
7989
  /* Describe which registers were spilled.  */
7990
  if (TARGET_ABI_OPEN_VMS)
7991
    {
7992
      if (imask)
7993
        /* ??? Does VMS care if mask contains ra?  The old code didn't
7994
           set it, so I don't here.  */
7995
        fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7996
      if (fmask)
7997
        fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7998
      if (alpha_procedure_type == PT_REGISTER)
7999
        fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8000
    }
8001
  else if (!flag_inhibit_size_directive)
8002
    {
8003
      if (imask)
8004
        {
8005
          fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8006
                   frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8007
 
8008
          for (i = 0; i < 32; ++i)
8009
            if (imask & (1UL << i))
8010
              reg_offset += 8;
8011
        }
8012
 
8013
      if (fmask)
8014
        fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8015
                 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8016
    }
8017
 
8018
#if TARGET_ABI_OPEN_VMS
8019
  /* If a user condition handler has been installed at some point, emit
8020
     the procedure descriptor bits to point the Condition Handling Facility
8021
     at the indirection wrapper, and state the fp offset at which the user
8022
     handler may be found.  */
8023
  if (cfun->machine->uses_condition_handler)
8024
    {
8025
      fprintf (file, "\t.handler __gcc_shell_handler\n");
8026
      fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8027
    }
8028
 
8029
#ifdef TARGET_VMS_CRASH_DEBUG
8030
  /* Support of minimal traceback info.  */
8031
  switch_to_section (readonly_data_section);
8032
  fprintf (file, "\t.align 3\n");
8033
  assemble_name (file, fnname); fputs ("..na:\n", file);
8034
  fputs ("\t.ascii \"", file);
8035
  assemble_name (file, fnname);
8036
  fputs ("\\0\"\n", file);
8037
  switch_to_section (text_section);
8038
#endif
8039
#endif /* TARGET_ABI_OPEN_VMS */
8040
}
8041
 
8042
/* Emit the .prologue note at the scheduled end of the prologue.  */
8043
 
8044
static void
8045
alpha_output_function_end_prologue (FILE *file)
8046
{
8047
  if (TARGET_ABI_OPEN_VMS)
8048
    fputs ("\t.prologue\n", file);
8049
  else if (!flag_inhibit_size_directive)
8050
    fprintf (file, "\t.prologue %d\n",
8051
             alpha_function_needs_gp || cfun->is_thunk);
8052
}
8053
 
8054
/* Write function epilogue.  */
8055
 
8056
void
8057
alpha_expand_epilogue (void)
8058
{
8059
  /* Registers to save.  */
8060
  unsigned long imask = 0;
8061
  unsigned long fmask = 0;
8062
  /* Stack space needed for pushing registers clobbered by us.  */
8063
  HOST_WIDE_INT sa_size;
8064
  /* Complete stack size needed.  */
8065
  HOST_WIDE_INT frame_size;
8066
  /* Offset from base reg to register save area.  */
8067
  HOST_WIDE_INT reg_offset;
8068
  int fp_is_frame_pointer, fp_offset;
8069
  rtx sa_reg, sa_reg_exp = NULL;
8070
  rtx sp_adj1, sp_adj2, mem, reg, insn;
8071
  rtx eh_ofs;
8072
  rtx cfa_restores = NULL_RTX;
8073
  int i;
8074
 
8075
  sa_size = alpha_sa_size ();
8076
  frame_size = compute_frame_size (get_frame_size (), sa_size);
8077
 
8078
  if (TARGET_ABI_OPEN_VMS)
8079
    {
8080
       if (alpha_procedure_type == PT_STACK)
8081
          reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8082
       else
8083
          reg_offset = 0;
8084
    }
8085
  else
8086
    reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8087
 
8088
  alpha_sa_mask (&imask, &fmask);
8089
 
8090
  fp_is_frame_pointer
8091
    = (TARGET_ABI_OPEN_VMS
8092
       ? alpha_procedure_type == PT_STACK
8093
       : frame_pointer_needed);
8094
  fp_offset = 0;
8095
  sa_reg = stack_pointer_rtx;
8096
 
8097
  if (crtl->calls_eh_return)
8098
    eh_ofs = EH_RETURN_STACKADJ_RTX;
8099
  else
8100
    eh_ofs = NULL_RTX;
8101
 
8102
  if (sa_size)
8103
    {
8104
      /* If we have a frame pointer, restore SP from it.  */
8105
      if (TARGET_ABI_OPEN_VMS
8106
          ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8107
          : frame_pointer_needed)
8108
        emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8109
 
8110
      /* Cope with very large offsets to the register save area.  */
8111
      if (reg_offset + sa_size > 0x8000)
8112
        {
8113
          int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8114
          HOST_WIDE_INT bias;
8115
 
8116
          if (low + sa_size <= 0x8000)
8117
            bias = reg_offset - low, reg_offset = low;
8118
          else
8119
            bias = reg_offset, reg_offset = 0;
8120
 
8121
          sa_reg = gen_rtx_REG (DImode, 22);
8122
          sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8123
 
8124
          emit_move_insn (sa_reg, sa_reg_exp);
8125
        }
8126
 
8127
      /* Restore registers in order, excepting a true frame pointer.  */
8128
 
8129
      mem = gen_frame_mem (DImode, plus_constant (sa_reg, reg_offset));
8130
      reg = gen_rtx_REG (DImode, REG_RA);
8131
      emit_move_insn (reg, mem);
8132
      cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8133
 
8134
      reg_offset += 8;
8135
      imask &= ~(1UL << REG_RA);
8136
 
8137
      for (i = 0; i < 31; ++i)
8138
        if (imask & (1UL << i))
8139
          {
8140
            if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8141
              fp_offset = reg_offset;
8142
            else
8143
              {
8144
                mem = gen_frame_mem (DImode,
8145
                                     plus_constant (sa_reg, reg_offset));
8146
                reg = gen_rtx_REG (DImode, i);
8147
                emit_move_insn (reg, mem);
8148
                cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8149
                                               cfa_restores);
8150
              }
8151
            reg_offset += 8;
8152
          }
8153
 
8154
      for (i = 0; i < 31; ++i)
8155
        if (fmask & (1UL << i))
8156
          {
8157
            mem = gen_frame_mem (DFmode, plus_constant (sa_reg, reg_offset));
8158
            reg = gen_rtx_REG (DFmode, i+32);
8159
            emit_move_insn (reg, mem);
8160
            cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8161
            reg_offset += 8;
8162
          }
8163
    }
8164
 
8165
  if (frame_size || eh_ofs)
8166
    {
8167
      sp_adj1 = stack_pointer_rtx;
8168
 
8169
      if (eh_ofs)
8170
        {
8171
          sp_adj1 = gen_rtx_REG (DImode, 23);
8172
          emit_move_insn (sp_adj1,
8173
                          gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8174
        }
8175
 
8176
      /* If the stack size is large, begin computation into a temporary
8177
         register so as not to interfere with a potential fp restore,
8178
         which must be consecutive with an SP restore.  */
8179
      if (frame_size < 32768 && !cfun->calls_alloca)
8180
        sp_adj2 = GEN_INT (frame_size);
8181
      else if (frame_size < 0x40007fffL)
8182
        {
8183
          int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8184
 
8185
          sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8186
          if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8187
            sp_adj1 = sa_reg;
8188
          else
8189
            {
8190
              sp_adj1 = gen_rtx_REG (DImode, 23);
8191
              emit_move_insn (sp_adj1, sp_adj2);
8192
            }
8193
          sp_adj2 = GEN_INT (low);
8194
        }
8195
      else
8196
        {
8197
          rtx tmp = gen_rtx_REG (DImode, 23);
8198
          sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8199
          if (!sp_adj2)
8200
            {
8201
              /* We can't drop new things to memory this late, afaik,
8202
                 so build it up by pieces.  */
8203
              sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8204
                                                   -(frame_size < 0));
8205
              gcc_assert (sp_adj2);
8206
            }
8207
        }
8208
 
8209
      /* From now on, things must be in order.  So emit blockages.  */
8210
 
8211
      /* Restore the frame pointer.  */
8212
      if (fp_is_frame_pointer)
8213
        {
8214
          emit_insn (gen_blockage ());
8215
          mem = gen_frame_mem (DImode, plus_constant (sa_reg, fp_offset));
8216
          emit_move_insn (hard_frame_pointer_rtx, mem);
8217
          cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8218
                                         hard_frame_pointer_rtx, cfa_restores);
8219
        }
8220
      else if (TARGET_ABI_OPEN_VMS)
8221
        {
8222
          emit_insn (gen_blockage ());
8223
          emit_move_insn (hard_frame_pointer_rtx,
8224
                          gen_rtx_REG (DImode, vms_save_fp_regno));
8225
          cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8226
                                         hard_frame_pointer_rtx, cfa_restores);
8227
        }
8228
 
8229
      /* Restore the stack pointer.  */
8230
      emit_insn (gen_blockage ());
8231
      if (sp_adj2 == const0_rtx)
8232
        insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8233
      else
8234
        insn = emit_move_insn (stack_pointer_rtx,
8235
                               gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8236
      REG_NOTES (insn) = cfa_restores;
8237
      add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8238
      RTX_FRAME_RELATED_P (insn) = 1;
8239
    }
8240
  else
8241
    {
8242
      gcc_assert (cfa_restores == NULL);
8243
 
8244
      if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8245
        {
8246
          emit_insn (gen_blockage ());
8247
          insn = emit_move_insn (hard_frame_pointer_rtx,
8248
                                 gen_rtx_REG (DImode, vms_save_fp_regno));
8249
          add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8250
          RTX_FRAME_RELATED_P (insn) = 1;
8251
        }
8252
    }
8253
}
8254
 
8255
/* Output the rest of the textual info surrounding the epilogue.  */
8256
 
8257
void
8258
alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8259
{
8260
  rtx insn;
8261
 
8262
  /* We output a nop after noreturn calls at the very end of the function to
8263
     ensure that the return address always remains in the caller's code range,
8264
     as not doing so might confuse unwinding engines.  */
8265
  insn = get_last_insn ();
8266
  if (!INSN_P (insn))
8267
    insn = prev_active_insn (insn);
8268
  if (insn && CALL_P (insn))
8269
    output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8270
 
8271
#if TARGET_ABI_OPEN_VMS
8272
  /* Write the linkage entries.  */
8273
  alpha_write_linkage (file, fnname);
8274
#endif
8275
 
8276
  /* End the function.  */
8277
  if (TARGET_ABI_OPEN_VMS
8278
      || !flag_inhibit_size_directive)
8279
    {
8280
      fputs ("\t.end ", file);
8281
      assemble_name (file, fnname);
8282
      putc ('\n', file);
8283
    }
8284
  inside_function = FALSE;
8285
}
8286
 
8287
#if TARGET_ABI_OSF
8288
/* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8289
 
8290
   In order to avoid the hordes of differences between generated code
8291
   with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8292
   lots of code loading up large constants, generate rtl and emit it
8293
   instead of going straight to text.
8294
 
8295
   Not sure why this idea hasn't been explored before...  */
8296
 
8297
static void
8298
alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8299
                           HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8300
                           tree function)
8301
{
8302
  HOST_WIDE_INT hi, lo;
8303
  rtx this_rtx, insn, funexp;
8304
 
8305
  /* We always require a valid GP.  */
8306
  emit_insn (gen_prologue_ldgp ());
8307
  emit_note (NOTE_INSN_PROLOGUE_END);
8308
 
8309
  /* Find the "this" pointer.  If the function returns a structure,
8310
     the structure return pointer is in $16.  */
8311
  if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8312
    this_rtx = gen_rtx_REG (Pmode, 17);
8313
  else
8314
    this_rtx = gen_rtx_REG (Pmode, 16);
8315
 
8316
  /* Add DELTA.  When possible we use ldah+lda.  Otherwise load the
8317
     entire constant for the add.  */
8318
  lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8319
  hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8320
  if (hi + lo == delta)
8321
    {
8322
      if (hi)
8323
        emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8324
      if (lo)
8325
        emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8326
    }
8327
  else
8328
    {
8329
      rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8330
                                           delta, -(delta < 0));
8331
      emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8332
    }
8333
 
8334
  /* Add a delta stored in the vtable at VCALL_OFFSET.  */
8335
  if (vcall_offset)
8336
    {
8337
      rtx tmp, tmp2;
8338
 
8339
      tmp = gen_rtx_REG (Pmode, 0);
8340
      emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8341
 
8342
      lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8343
      hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8344
      if (hi + lo == vcall_offset)
8345
        {
8346
          if (hi)
8347
            emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8348
        }
8349
      else
8350
        {
8351
          tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8352
                                            vcall_offset, -(vcall_offset < 0));
8353
          emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8354
          lo = 0;
8355
        }
8356
      if (lo)
8357
        tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8358
      else
8359
        tmp2 = tmp;
8360
      emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8361
 
8362
      emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8363
    }
8364
 
8365
  /* Generate a tail call to the target function.  */
8366
  if (! TREE_USED (function))
8367
    {
8368
      assemble_external (function);
8369
      TREE_USED (function) = 1;
8370
    }
8371
  funexp = XEXP (DECL_RTL (function), 0);
8372
  funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8373
  insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8374
  SIBLING_CALL_P (insn) = 1;
8375
 
8376
  /* Run just enough of rest_of_compilation to get the insns emitted.
8377
     There's not really enough bulk here to make other passes such as
8378
     instruction scheduling worth while.  Note that use_thunk calls
8379
     assemble_start_function and assemble_end_function.  */
8380
  insn = get_insns ();
8381
  insn_locators_alloc ();
8382
  shorten_branches (insn);
8383
  final_start_function (insn, file, 1);
8384
  final (insn, file, 1);
8385
  final_end_function ();
8386
}
8387
#endif /* TARGET_ABI_OSF */
8388
 
8389
/* Debugging support.  */
8390
 
8391
#include "gstab.h"
8392
 
8393
/* Count the number of sdb related labels are generated (to find block
8394
   start and end boundaries).  */
8395
 
8396
int sdb_label_count = 0;
8397
 
8398
/* Name of the file containing the current function.  */
8399
 
8400
static const char *current_function_file = "";
8401
 
8402
/* Offsets to alpha virtual arg/local debugging pointers.  */
8403
 
8404
long alpha_arg_offset;
8405
long alpha_auto_offset;
8406
 
8407
/* Emit a new filename to a stream.  */
8408
 
8409
void
8410
alpha_output_filename (FILE *stream, const char *name)
8411
{
8412
  static int first_time = TRUE;
8413
 
8414
  if (first_time)
8415
    {
8416
      first_time = FALSE;
8417
      ++num_source_filenames;
8418
      current_function_file = name;
8419
      fprintf (stream, "\t.file\t%d ", num_source_filenames);
8420
      output_quoted_string (stream, name);
8421
      fprintf (stream, "\n");
8422
      if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8423
        fprintf (stream, "\t#@stabs\n");
8424
    }
8425
 
8426
  else if (write_symbols == DBX_DEBUG)
8427
    /* dbxout.c will emit an appropriate .stabs directive.  */
8428
    return;
8429
 
8430
  else if (name != current_function_file
8431
           && strcmp (name, current_function_file) != 0)
8432
    {
8433
      if (inside_function && ! TARGET_GAS)
8434
        fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8435
      else
8436
        {
8437
          ++num_source_filenames;
8438
          current_function_file = name;
8439
          fprintf (stream, "\t.file\t%d ", num_source_filenames);
8440
        }
8441
 
8442
      output_quoted_string (stream, name);
8443
      fprintf (stream, "\n");
8444
    }
8445
}
8446
 
8447
/* Structure to show the current status of registers and memory.  */
8448
 
8449
struct shadow_summary
8450
{
8451
  struct {
8452
    unsigned int i     : 31;    /* Mask of int regs */
8453
    unsigned int fp    : 31;    /* Mask of fp regs */
8454
    unsigned int mem   :  1;    /* mem == imem | fpmem */
8455
  } used, defd;
8456
};
8457
 
8458
/* Summary the effects of expression X on the machine.  Update SUM, a pointer
8459
   to the summary structure.  SET is nonzero if the insn is setting the
8460
   object, otherwise zero.  */
8461
 
8462
static void
8463
summarize_insn (rtx x, struct shadow_summary *sum, int set)
8464
{
8465
  const char *format_ptr;
8466
  int i, j;
8467
 
8468
  if (x == 0)
8469
    return;
8470
 
8471
  switch (GET_CODE (x))
8472
    {
8473
      /* ??? Note that this case would be incorrect if the Alpha had a
8474
         ZERO_EXTRACT in SET_DEST.  */
8475
    case SET:
8476
      summarize_insn (SET_SRC (x), sum, 0);
8477
      summarize_insn (SET_DEST (x), sum, 1);
8478
      break;
8479
 
8480
    case CLOBBER:
8481
      summarize_insn (XEXP (x, 0), sum, 1);
8482
      break;
8483
 
8484
    case USE:
8485
      summarize_insn (XEXP (x, 0), sum, 0);
8486
      break;
8487
 
8488
    case ASM_OPERANDS:
8489
      for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8490
        summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8491
      break;
8492
 
8493
    case PARALLEL:
8494
      for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8495
        summarize_insn (XVECEXP (x, 0, i), sum, 0);
8496
      break;
8497
 
8498
    case SUBREG:
8499
      summarize_insn (SUBREG_REG (x), sum, 0);
8500
      break;
8501
 
8502
    case REG:
8503
      {
8504
        int regno = REGNO (x);
8505
        unsigned long mask = ((unsigned long) 1) << (regno % 32);
8506
 
8507
        if (regno == 31 || regno == 63)
8508
          break;
8509
 
8510
        if (set)
8511
          {
8512
            if (regno < 32)
8513
              sum->defd.i |= mask;
8514
            else
8515
              sum->defd.fp |= mask;
8516
          }
8517
        else
8518
          {
8519
            if (regno < 32)
8520
              sum->used.i  |= mask;
8521
            else
8522
              sum->used.fp |= mask;
8523
          }
8524
        }
8525
      break;
8526
 
8527
    case MEM:
8528
      if (set)
8529
        sum->defd.mem = 1;
8530
      else
8531
        sum->used.mem = 1;
8532
 
8533
      /* Find the regs used in memory address computation: */
8534
      summarize_insn (XEXP (x, 0), sum, 0);
8535
      break;
8536
 
8537
    case CONST_INT:   case CONST_DOUBLE:
8538
    case SYMBOL_REF:  case LABEL_REF:     case CONST:
8539
    case SCRATCH:     case ASM_INPUT:
8540
      break;
8541
 
8542
      /* Handle common unary and binary ops for efficiency.  */
8543
    case COMPARE:  case PLUS:    case MINUS:   case MULT:      case DIV:
8544
    case MOD:      case UDIV:    case UMOD:    case AND:       case IOR:
8545
    case XOR:      case ASHIFT:  case ROTATE:  case ASHIFTRT:  case LSHIFTRT:
8546
    case ROTATERT: case SMIN:    case SMAX:    case UMIN:      case UMAX:
8547
    case NE:       case EQ:      case GE:      case GT:        case LE:
8548
    case LT:       case GEU:     case GTU:     case LEU:       case LTU:
8549
      summarize_insn (XEXP (x, 0), sum, 0);
8550
      summarize_insn (XEXP (x, 1), sum, 0);
8551
      break;
8552
 
8553
    case NEG:  case NOT:  case SIGN_EXTEND:  case ZERO_EXTEND:
8554
    case TRUNCATE:  case FLOAT_EXTEND:  case FLOAT_TRUNCATE:  case FLOAT:
8555
    case FIX:  case UNSIGNED_FLOAT:  case UNSIGNED_FIX:  case ABS:
8556
    case SQRT:  case FFS:
8557
      summarize_insn (XEXP (x, 0), sum, 0);
8558
      break;
8559
 
8560
    default:
8561
      format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8562
      for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8563
        switch (format_ptr[i])
8564
          {
8565
          case 'e':
8566
            summarize_insn (XEXP (x, i), sum, 0);
8567
            break;
8568
 
8569
          case 'E':
8570
            for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8571
              summarize_insn (XVECEXP (x, i, j), sum, 0);
8572
            break;
8573
 
8574
          case 'i':
8575
            break;
8576
 
8577
          default:
8578
            gcc_unreachable ();
8579
          }
8580
    }
8581
}
8582
 
8583
/* Ensure a sufficient number of `trapb' insns are in the code when
8584
   the user requests code with a trap precision of functions or
8585
   instructions.
8586
 
8587
   In naive mode, when the user requests a trap-precision of
8588
   "instruction", a trapb is needed after every instruction that may
8589
   generate a trap.  This ensures that the code is resumption safe but
8590
   it is also slow.
8591
 
8592
   When optimizations are turned on, we delay issuing a trapb as long
8593
   as possible.  In this context, a trap shadow is the sequence of
8594
   instructions that starts with a (potentially) trap generating
8595
   instruction and extends to the next trapb or call_pal instruction
8596
   (but GCC never generates call_pal by itself).  We can delay (and
8597
   therefore sometimes omit) a trapb subject to the following
8598
   conditions:
8599
 
8600
   (a) On entry to the trap shadow, if any Alpha register or memory
8601
   location contains a value that is used as an operand value by some
8602
   instruction in the trap shadow (live on entry), then no instruction
8603
   in the trap shadow may modify the register or memory location.
8604
 
8605
   (b) Within the trap shadow, the computation of the base register
8606
   for a memory load or store instruction may not involve using the
8607
   result of an instruction that might generate an UNPREDICTABLE
8608
   result.
8609
 
8610
   (c) Within the trap shadow, no register may be used more than once
8611
   as a destination register.  (This is to make life easier for the
8612
   trap-handler.)
8613
 
8614
   (d) The trap shadow may not include any branch instructions.  */
8615
 
8616
static void
8617
alpha_handle_trap_shadows (void)
8618
{
8619
  struct shadow_summary shadow;
8620
  int trap_pending, exception_nesting;
8621
  rtx i, n;
8622
 
8623
  trap_pending = 0;
8624
  exception_nesting = 0;
8625
  shadow.used.i = 0;
8626
  shadow.used.fp = 0;
8627
  shadow.used.mem = 0;
8628
  shadow.defd = shadow.used;
8629
 
8630
  for (i = get_insns (); i ; i = NEXT_INSN (i))
8631
    {
8632
      if (NOTE_P (i))
8633
        {
8634
          switch (NOTE_KIND (i))
8635
            {
8636
            case NOTE_INSN_EH_REGION_BEG:
8637
              exception_nesting++;
8638
              if (trap_pending)
8639
                goto close_shadow;
8640
              break;
8641
 
8642
            case NOTE_INSN_EH_REGION_END:
8643
              exception_nesting--;
8644
              if (trap_pending)
8645
                goto close_shadow;
8646
              break;
8647
 
8648
            case NOTE_INSN_EPILOGUE_BEG:
8649
              if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8650
                goto close_shadow;
8651
              break;
8652
            }
8653
        }
8654
      else if (trap_pending)
8655
        {
8656
          if (alpha_tp == ALPHA_TP_FUNC)
8657
            {
8658
              if (JUMP_P (i)
8659
                  && GET_CODE (PATTERN (i)) == RETURN)
8660
                goto close_shadow;
8661
            }
8662
          else if (alpha_tp == ALPHA_TP_INSN)
8663
            {
8664
              if (optimize > 0)
8665
                {
8666
                  struct shadow_summary sum;
8667
 
8668
                  sum.used.i = 0;
8669
                  sum.used.fp = 0;
8670
                  sum.used.mem = 0;
8671
                  sum.defd = sum.used;
8672
 
8673
                  switch (GET_CODE (i))
8674
                    {
8675
                    case INSN:
8676
                      /* Annoyingly, get_attr_trap will die on these.  */
8677
                      if (GET_CODE (PATTERN (i)) == USE
8678
                          || GET_CODE (PATTERN (i)) == CLOBBER)
8679
                        break;
8680
 
8681
                      summarize_insn (PATTERN (i), &sum, 0);
8682
 
8683
                      if ((sum.defd.i & shadow.defd.i)
8684
                          || (sum.defd.fp & shadow.defd.fp))
8685
                        {
8686
                          /* (c) would be violated */
8687
                          goto close_shadow;
8688
                        }
8689
 
8690
                      /* Combine shadow with summary of current insn: */
8691
                      shadow.used.i   |= sum.used.i;
8692
                      shadow.used.fp  |= sum.used.fp;
8693
                      shadow.used.mem |= sum.used.mem;
8694
                      shadow.defd.i   |= sum.defd.i;
8695
                      shadow.defd.fp  |= sum.defd.fp;
8696
                      shadow.defd.mem |= sum.defd.mem;
8697
 
8698
                      if ((sum.defd.i & shadow.used.i)
8699
                          || (sum.defd.fp & shadow.used.fp)
8700
                          || (sum.defd.mem & shadow.used.mem))
8701
                        {
8702
                          /* (a) would be violated (also takes care of (b))  */
8703
                          gcc_assert (get_attr_trap (i) != TRAP_YES
8704
                                      || (!(sum.defd.i & sum.used.i)
8705
                                          && !(sum.defd.fp & sum.used.fp)));
8706
 
8707
                          goto close_shadow;
8708
                        }
8709
                      break;
8710
 
8711
                    case JUMP_INSN:
8712
                    case CALL_INSN:
8713
                    case CODE_LABEL:
8714
                      goto close_shadow;
8715
 
8716
                    default:
8717
                      gcc_unreachable ();
8718
                    }
8719
                }
8720
              else
8721
                {
8722
                close_shadow:
8723
                  n = emit_insn_before (gen_trapb (), i);
8724
                  PUT_MODE (n, TImode);
8725
                  PUT_MODE (i, TImode);
8726
                  trap_pending = 0;
8727
                  shadow.used.i = 0;
8728
                  shadow.used.fp = 0;
8729
                  shadow.used.mem = 0;
8730
                  shadow.defd = shadow.used;
8731
                }
8732
            }
8733
        }
8734
 
8735
      if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8736
          && NONJUMP_INSN_P (i)
8737
          && GET_CODE (PATTERN (i)) != USE
8738
          && GET_CODE (PATTERN (i)) != CLOBBER
8739
          && get_attr_trap (i) == TRAP_YES)
8740
        {
8741
          if (optimize && !trap_pending)
8742
            summarize_insn (PATTERN (i), &shadow, 0);
8743
          trap_pending = 1;
8744
        }
8745
    }
8746
}
8747
 
8748
/* Alpha can only issue instruction groups simultaneously if they are
8749
   suitably aligned.  This is very processor-specific.  */
8750
/* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8751
   that are marked "fake".  These instructions do not exist on that target,
8752
   but it is possible to see these insns with deranged combinations of
8753
   command-line options, such as "-mtune=ev4 -mmax".  Instead of aborting,
8754
   choose a result at random.  */
8755
 
8756
enum alphaev4_pipe {
8757
  EV4_STOP = 0,
8758
  EV4_IB0 = 1,
8759
  EV4_IB1 = 2,
8760
  EV4_IBX = 4
8761
};
8762
 
8763
enum alphaev5_pipe {
8764
  EV5_STOP = 0,
8765
  EV5_NONE = 1,
8766
  EV5_E01 = 2,
8767
  EV5_E0 = 4,
8768
  EV5_E1 = 8,
8769
  EV5_FAM = 16,
8770
  EV5_FA = 32,
8771
  EV5_FM = 64
8772
};
8773
 
8774
static enum alphaev4_pipe
8775
alphaev4_insn_pipe (rtx insn)
8776
{
8777
  if (recog_memoized (insn) < 0)
8778
    return EV4_STOP;
8779
  if (get_attr_length (insn) != 4)
8780
    return EV4_STOP;
8781
 
8782
  switch (get_attr_type (insn))
8783
    {
8784
    case TYPE_ILD:
8785
    case TYPE_LDSYM:
8786
    case TYPE_FLD:
8787
    case TYPE_LD_L:
8788
      return EV4_IBX;
8789
 
8790
    case TYPE_IADD:
8791
    case TYPE_ILOG:
8792
    case TYPE_ICMOV:
8793
    case TYPE_ICMP:
8794
    case TYPE_FST:
8795
    case TYPE_SHIFT:
8796
    case TYPE_IMUL:
8797
    case TYPE_FBR:
8798
    case TYPE_MVI:              /* fake */
8799
      return EV4_IB0;
8800
 
8801
    case TYPE_IST:
8802
    case TYPE_MISC:
8803
    case TYPE_IBR:
8804
    case TYPE_JSR:
8805
    case TYPE_CALLPAL:
8806
    case TYPE_FCPYS:
8807
    case TYPE_FCMOV:
8808
    case TYPE_FADD:
8809
    case TYPE_FDIV:
8810
    case TYPE_FMUL:
8811
    case TYPE_ST_C:
8812
    case TYPE_MB:
8813
    case TYPE_FSQRT:            /* fake */
8814
    case TYPE_FTOI:             /* fake */
8815
    case TYPE_ITOF:             /* fake */
8816
      return EV4_IB1;
8817
 
8818
    default:
8819
      gcc_unreachable ();
8820
    }
8821
}
8822
 
8823
static enum alphaev5_pipe
8824
alphaev5_insn_pipe (rtx insn)
8825
{
8826
  if (recog_memoized (insn) < 0)
8827
    return EV5_STOP;
8828
  if (get_attr_length (insn) != 4)
8829
    return EV5_STOP;
8830
 
8831
  switch (get_attr_type (insn))
8832
    {
8833
    case TYPE_ILD:
8834
    case TYPE_FLD:
8835
    case TYPE_LDSYM:
8836
    case TYPE_IADD:
8837
    case TYPE_ILOG:
8838
    case TYPE_ICMOV:
8839
    case TYPE_ICMP:
8840
      return EV5_E01;
8841
 
8842
    case TYPE_IST:
8843
    case TYPE_FST:
8844
    case TYPE_SHIFT:
8845
    case TYPE_IMUL:
8846
    case TYPE_MISC:
8847
    case TYPE_MVI:
8848
    case TYPE_LD_L:
8849
    case TYPE_ST_C:
8850
    case TYPE_MB:
8851
    case TYPE_FTOI:             /* fake */
8852
    case TYPE_ITOF:             /* fake */
8853
      return EV5_E0;
8854
 
8855
    case TYPE_IBR:
8856
    case TYPE_JSR:
8857
    case TYPE_CALLPAL:
8858
      return EV5_E1;
8859
 
8860
    case TYPE_FCPYS:
8861
      return EV5_FAM;
8862
 
8863
    case TYPE_FBR:
8864
    case TYPE_FCMOV:
8865
    case TYPE_FADD:
8866
    case TYPE_FDIV:
8867
    case TYPE_FSQRT:            /* fake */
8868
      return EV5_FA;
8869
 
8870
    case TYPE_FMUL:
8871
      return EV5_FM;
8872
 
8873
    default:
8874
      gcc_unreachable ();
8875
    }
8876
}
8877
 
8878
/* IN_USE is a mask of the slots currently filled within the insn group.
8879
   The mask bits come from alphaev4_pipe above.  If EV4_IBX is set, then
8880
   the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8881
 
8882
   LEN is, of course, the length of the group in bytes.  */
8883
 
8884
static rtx
8885
alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8886
{
8887
  int len, in_use;
8888
 
8889
  len = in_use = 0;
8890
 
8891
  if (! INSN_P (insn)
8892
      || GET_CODE (PATTERN (insn)) == CLOBBER
8893
      || GET_CODE (PATTERN (insn)) == USE)
8894
    goto next_and_done;
8895
 
8896
  while (1)
8897
    {
8898
      enum alphaev4_pipe pipe;
8899
 
8900
      pipe = alphaev4_insn_pipe (insn);
8901
      switch (pipe)
8902
        {
8903
        case EV4_STOP:
8904
          /* Force complex instructions to start new groups.  */
8905
          if (in_use)
8906
            goto done;
8907
 
8908
          /* If this is a completely unrecognized insn, it's an asm.
8909
             We don't know how long it is, so record length as -1 to
8910
             signal a needed realignment.  */
8911
          if (recog_memoized (insn) < 0)
8912
            len = -1;
8913
          else
8914
            len = get_attr_length (insn);
8915
          goto next_and_done;
8916
 
8917
        case EV4_IBX:
8918
          if (in_use & EV4_IB0)
8919
            {
8920
              if (in_use & EV4_IB1)
8921
                goto done;
8922
              in_use |= EV4_IB1;
8923
            }
8924
          else
8925
            in_use |= EV4_IB0 | EV4_IBX;
8926
          break;
8927
 
8928
        case EV4_IB0:
8929
          if (in_use & EV4_IB0)
8930
            {
8931
              if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8932
                goto done;
8933
              in_use |= EV4_IB1;
8934
            }
8935
          in_use |= EV4_IB0;
8936
          break;
8937
 
8938
        case EV4_IB1:
8939
          if (in_use & EV4_IB1)
8940
            goto done;
8941
          in_use |= EV4_IB1;
8942
          break;
8943
 
8944
        default:
8945
          gcc_unreachable ();
8946
        }
8947
      len += 4;
8948
 
8949
      /* Haifa doesn't do well scheduling branches.  */
8950
      if (JUMP_P (insn))
8951
        goto next_and_done;
8952
 
8953
    next:
8954
      insn = next_nonnote_insn (insn);
8955
 
8956
      if (!insn || ! INSN_P (insn))
8957
        goto done;
8958
 
8959
      /* Let Haifa tell us where it thinks insn group boundaries are.  */
8960
      if (GET_MODE (insn) == TImode)
8961
        goto done;
8962
 
8963
      if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8964
        goto next;
8965
    }
8966
 
8967
 next_and_done:
8968
  insn = next_nonnote_insn (insn);
8969
 
8970
 done:
8971
  *plen = len;
8972
  *pin_use = in_use;
8973
  return insn;
8974
}
8975
 
8976
/* IN_USE is a mask of the slots currently filled within the insn group.
8977
   The mask bits come from alphaev5_pipe above.  If EV5_E01 is set, then
8978
   the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8979
 
8980
   LEN is, of course, the length of the group in bytes.  */
8981
 
8982
static rtx
8983
alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8984
{
8985
  int len, in_use;
8986
 
8987
  len = in_use = 0;
8988
 
8989
  if (! INSN_P (insn)
8990
      || GET_CODE (PATTERN (insn)) == CLOBBER
8991
      || GET_CODE (PATTERN (insn)) == USE)
8992
    goto next_and_done;
8993
 
8994
  while (1)
8995
    {
8996
      enum alphaev5_pipe pipe;
8997
 
8998
      pipe = alphaev5_insn_pipe (insn);
8999
      switch (pipe)
9000
        {
9001
        case EV5_STOP:
9002
          /* Force complex instructions to start new groups.  */
9003
          if (in_use)
9004
            goto done;
9005
 
9006
          /* If this is a completely unrecognized insn, it's an asm.
9007
             We don't know how long it is, so record length as -1 to
9008
             signal a needed realignment.  */
9009
          if (recog_memoized (insn) < 0)
9010
            len = -1;
9011
          else
9012
            len = get_attr_length (insn);
9013
          goto next_and_done;
9014
 
9015
        /* ??? Most of the places below, we would like to assert never
9016
           happen, as it would indicate an error either in Haifa, or
9017
           in the scheduling description.  Unfortunately, Haifa never
9018
           schedules the last instruction of the BB, so we don't have
9019
           an accurate TI bit to go off.  */
9020
        case EV5_E01:
9021
          if (in_use & EV5_E0)
9022
            {
9023
              if (in_use & EV5_E1)
9024
                goto done;
9025
              in_use |= EV5_E1;
9026
            }
9027
          else
9028
            in_use |= EV5_E0 | EV5_E01;
9029
          break;
9030
 
9031
        case EV5_E0:
9032
          if (in_use & EV5_E0)
9033
            {
9034
              if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9035
                goto done;
9036
              in_use |= EV5_E1;
9037
            }
9038
          in_use |= EV5_E0;
9039
          break;
9040
 
9041
        case EV5_E1:
9042
          if (in_use & EV5_E1)
9043
            goto done;
9044
          in_use |= EV5_E1;
9045
          break;
9046
 
9047
        case EV5_FAM:
9048
          if (in_use & EV5_FA)
9049
            {
9050
              if (in_use & EV5_FM)
9051
                goto done;
9052
              in_use |= EV5_FM;
9053
            }
9054
          else
9055
            in_use |= EV5_FA | EV5_FAM;
9056
          break;
9057
 
9058
        case EV5_FA:
9059
          if (in_use & EV5_FA)
9060
            goto done;
9061
          in_use |= EV5_FA;
9062
          break;
9063
 
9064
        case EV5_FM:
9065
          if (in_use & EV5_FM)
9066
            goto done;
9067
          in_use |= EV5_FM;
9068
          break;
9069
 
9070
        case EV5_NONE:
9071
          break;
9072
 
9073
        default:
9074
          gcc_unreachable ();
9075
        }
9076
      len += 4;
9077
 
9078
      /* Haifa doesn't do well scheduling branches.  */
9079
      /* ??? If this is predicted not-taken, slotting continues, except
9080
         that no more IBR, FBR, or JSR insns may be slotted.  */
9081
      if (JUMP_P (insn))
9082
        goto next_and_done;
9083
 
9084
    next:
9085
      insn = next_nonnote_insn (insn);
9086
 
9087
      if (!insn || ! INSN_P (insn))
9088
        goto done;
9089
 
9090
      /* Let Haifa tell us where it thinks insn group boundaries are.  */
9091
      if (GET_MODE (insn) == TImode)
9092
        goto done;
9093
 
9094
      if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9095
        goto next;
9096
    }
9097
 
9098
 next_and_done:
9099
  insn = next_nonnote_insn (insn);
9100
 
9101
 done:
9102
  *plen = len;
9103
  *pin_use = in_use;
9104
  return insn;
9105
}
9106
 
9107
static rtx
9108
alphaev4_next_nop (int *pin_use)
9109
{
9110
  int in_use = *pin_use;
9111
  rtx nop;
9112
 
9113
  if (!(in_use & EV4_IB0))
9114
    {
9115
      in_use |= EV4_IB0;
9116
      nop = gen_nop ();
9117
    }
9118
  else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9119
    {
9120
      in_use |= EV4_IB1;
9121
      nop = gen_nop ();
9122
    }
9123
  else if (TARGET_FP && !(in_use & EV4_IB1))
9124
    {
9125
      in_use |= EV4_IB1;
9126
      nop = gen_fnop ();
9127
    }
9128
  else
9129
    nop = gen_unop ();
9130
 
9131
  *pin_use = in_use;
9132
  return nop;
9133
}
9134
 
9135
static rtx
9136
alphaev5_next_nop (int *pin_use)
9137
{
9138
  int in_use = *pin_use;
9139
  rtx nop;
9140
 
9141
  if (!(in_use & EV5_E1))
9142
    {
9143
      in_use |= EV5_E1;
9144
      nop = gen_nop ();
9145
    }
9146
  else if (TARGET_FP && !(in_use & EV5_FA))
9147
    {
9148
      in_use |= EV5_FA;
9149
      nop = gen_fnop ();
9150
    }
9151
  else if (TARGET_FP && !(in_use & EV5_FM))
9152
    {
9153
      in_use |= EV5_FM;
9154
      nop = gen_fnop ();
9155
    }
9156
  else
9157
    nop = gen_unop ();
9158
 
9159
  *pin_use = in_use;
9160
  return nop;
9161
}
9162
 
9163
/* The instruction group alignment main loop.  */
9164
 
9165
static void
9166
alpha_align_insns (unsigned int max_align,
9167
                   rtx (*next_group) (rtx, int *, int *),
9168
                   rtx (*next_nop) (int *))
9169
{
9170
  /* ALIGN is the known alignment for the insn group.  */
9171
  unsigned int align;
9172
  /* OFS is the offset of the current insn in the insn group.  */
9173
  int ofs;
9174
  int prev_in_use, in_use, len, ldgp;
9175
  rtx i, next;
9176
 
9177
  /* Let shorten branches care for assigning alignments to code labels.  */
9178
  shorten_branches (get_insns ());
9179
 
9180
  if (align_functions < 4)
9181
    align = 4;
9182
  else if ((unsigned int) align_functions < max_align)
9183
    align = align_functions;
9184
  else
9185
    align = max_align;
9186
 
9187
  ofs = prev_in_use = 0;
9188
  i = get_insns ();
9189
  if (NOTE_P (i))
9190
    i = next_nonnote_insn (i);
9191
 
9192
  ldgp = alpha_function_needs_gp ? 8 : 0;
9193
 
9194
  while (i)
9195
    {
9196
      next = (*next_group) (i, &in_use, &len);
9197
 
9198
      /* When we see a label, resync alignment etc.  */
9199
      if (LABEL_P (i))
9200
        {
9201
          unsigned int new_align = 1 << label_to_alignment (i);
9202
 
9203
          if (new_align >= align)
9204
            {
9205
              align = new_align < max_align ? new_align : max_align;
9206
              ofs = 0;
9207
            }
9208
 
9209
          else if (ofs & (new_align-1))
9210
            ofs = (ofs | (new_align-1)) + 1;
9211
          gcc_assert (!len);
9212
        }
9213
 
9214
      /* Handle complex instructions special.  */
9215
      else if (in_use == 0)
9216
        {
9217
          /* Asms will have length < 0.  This is a signal that we have
9218
             lost alignment knowledge.  Assume, however, that the asm
9219
             will not mis-align instructions.  */
9220
          if (len < 0)
9221
            {
9222
              ofs = 0;
9223
              align = 4;
9224
              len = 0;
9225
            }
9226
        }
9227
 
9228
      /* If the known alignment is smaller than the recognized insn group,
9229
         realign the output.  */
9230
      else if ((int) align < len)
9231
        {
9232
          unsigned int new_log_align = len > 8 ? 4 : 3;
9233
          rtx prev, where;
9234
 
9235
          where = prev = prev_nonnote_insn (i);
9236
          if (!where || !LABEL_P (where))
9237
            where = i;
9238
 
9239
          /* Can't realign between a call and its gp reload.  */
9240
          if (! (TARGET_EXPLICIT_RELOCS
9241
                 && prev && CALL_P (prev)))
9242
            {
9243
              emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9244
              align = 1 << new_log_align;
9245
              ofs = 0;
9246
            }
9247
        }
9248
 
9249
      /* We may not insert padding inside the initial ldgp sequence.  */
9250
      else if (ldgp > 0)
9251
        ldgp -= len;
9252
 
9253
      /* If the group won't fit in the same INT16 as the previous,
9254
         we need to add padding to keep the group together.  Rather
9255
         than simply leaving the insn filling to the assembler, we
9256
         can make use of the knowledge of what sorts of instructions
9257
         were issued in the previous group to make sure that all of
9258
         the added nops are really free.  */
9259
      else if (ofs + len > (int) align)
9260
        {
9261
          int nop_count = (align - ofs) / 4;
9262
          rtx where;
9263
 
9264
          /* Insert nops before labels, branches, and calls to truly merge
9265
             the execution of the nops with the previous instruction group.  */
9266
          where = prev_nonnote_insn (i);
9267
          if (where)
9268
            {
9269
              if (LABEL_P (where))
9270
                {
9271
                  rtx where2 = prev_nonnote_insn (where);
9272
                  if (where2 && JUMP_P (where2))
9273
                    where = where2;
9274
                }
9275
              else if (NONJUMP_INSN_P (where))
9276
                where = i;
9277
            }
9278
          else
9279
            where = i;
9280
 
9281
          do
9282
            emit_insn_before ((*next_nop)(&prev_in_use), where);
9283
          while (--nop_count);
9284
          ofs = 0;
9285
        }
9286
 
9287
      ofs = (ofs + len) & (align - 1);
9288
      prev_in_use = in_use;
9289
      i = next;
9290
    }
9291
}
9292
 
9293
/* Insert an unop between a noreturn function call and GP load.  */
9294
 
9295
static void
9296
alpha_pad_noreturn (void)
9297
{
9298
  rtx insn, next;
9299
 
9300
  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9301
    {
9302
      if (! (CALL_P (insn)
9303
             && find_reg_note (insn, REG_NORETURN, NULL_RTX)))
9304
        continue;
9305
 
9306
      /* Make sure we do not split a call and its corresponding
9307
         CALL_ARG_LOCATION note.  */
9308
      if (CALL_P (insn))
9309
        {
9310
          next = NEXT_INSN (insn);
9311
          if (next && NOTE_P (next)
9312
              && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9313
            insn = next;
9314
        }
9315
 
9316
      next = next_active_insn (insn);
9317
 
9318
      if (next)
9319
        {
9320
          rtx pat = PATTERN (next);
9321
 
9322
          if (GET_CODE (pat) == SET
9323
              && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9324
              && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9325
            emit_insn_after (gen_unop (), insn);
9326
        }
9327
    }
9328
}
9329
 
9330
/* Machine dependent reorg pass.  */
9331
 
9332
static void
9333
alpha_reorg (void)
9334
{
9335
  /* Workaround for a linker error that triggers when an
9336
     exception handler immediatelly follows a noreturn function.
9337
 
9338
     The instruction stream from an object file:
9339
 
9340
  54:   00 40 5b 6b     jsr     ra,(t12),58 <__func+0x58>
9341
  58:   00 00 ba 27     ldah    gp,0(ra)
9342
  5c:   00 00 bd 23     lda     gp,0(gp)
9343
  60:   00 00 7d a7     ldq     t12,0(gp)
9344
  64:   00 40 5b 6b     jsr     ra,(t12),68 <__func+0x68>
9345
 
9346
     was converted in the final link pass to:
9347
 
9348
   fdb24:       a0 03 40 d3     bsr     ra,fe9a8 <_called_func+0x8>
9349
   fdb28:       00 00 fe 2f     unop
9350
   fdb2c:       00 00 fe 2f     unop
9351
   fdb30:       30 82 7d a7     ldq     t12,-32208(gp)
9352
   fdb34:       00 40 5b 6b     jsr     ra,(t12),fdb38 <__func+0x68>
9353
 
9354
     GP load instructions were wrongly cleared by the linker relaxation
9355
     pass.  This workaround prevents removal of GP loads by inserting
9356
     an unop instruction between a noreturn function call and
9357
     exception handler prologue.  */
9358
 
9359
  if (current_function_has_exception_handlers ())
9360
    alpha_pad_noreturn ();
9361
 
9362
  if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9363
    alpha_handle_trap_shadows ();
9364
 
9365
  /* Due to the number of extra trapb insns, don't bother fixing up
9366
     alignment when trap precision is instruction.  Moreover, we can
9367
     only do our job when sched2 is run.  */
9368
  if (optimize && !optimize_size
9369
      && alpha_tp != ALPHA_TP_INSN
9370
      && flag_schedule_insns_after_reload)
9371
    {
9372
      if (alpha_tune == PROCESSOR_EV4)
9373
        alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9374
      else if (alpha_tune == PROCESSOR_EV5)
9375
        alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9376
    }
9377
}
9378
 
9379
#ifdef HAVE_STAMP_H
9380
#include <stamp.h>
9381
#endif
9382
 
9383
static void
9384
alpha_file_start (void)
9385
{
9386
#ifdef OBJECT_FORMAT_ELF
9387
  /* If emitting dwarf2 debug information, we cannot generate a .file
9388
     directive to start the file, as it will conflict with dwarf2out
9389
     file numbers.  So it's only useful when emitting mdebug output.  */
9390
  targetm.asm_file_start_file_directive = (write_symbols == DBX_DEBUG);
9391
#endif
9392
 
9393
  default_file_start ();
9394
#ifdef MS_STAMP
9395
  fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9396
#endif
9397
 
9398
  fputs ("\t.set noreorder\n", asm_out_file);
9399
  fputs ("\t.set volatile\n", asm_out_file);
9400
  if (TARGET_ABI_OSF)
9401
    fputs ("\t.set noat\n", asm_out_file);
9402
  if (TARGET_EXPLICIT_RELOCS)
9403
    fputs ("\t.set nomacro\n", asm_out_file);
9404
  if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9405
    {
9406
      const char *arch;
9407
 
9408
      if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9409
        arch = "ev6";
9410
      else if (TARGET_MAX)
9411
        arch = "pca56";
9412
      else if (TARGET_BWX)
9413
        arch = "ev56";
9414
      else if (alpha_cpu == PROCESSOR_EV5)
9415
        arch = "ev5";
9416
      else
9417
        arch = "ev4";
9418
 
9419
      fprintf (asm_out_file, "\t.arch %s\n", arch);
9420
    }
9421
}
9422
 
9423
#ifdef OBJECT_FORMAT_ELF
9424
/* Since we don't have a .dynbss section, we should not allow global
9425
   relocations in the .rodata section.  */
9426
 
9427
static int
9428
alpha_elf_reloc_rw_mask (void)
9429
{
9430
  return flag_pic ? 3 : 2;
9431
}
9432
 
9433
/* Return a section for X.  The only special thing we do here is to
9434
   honor small data.  */
9435
 
9436
static section *
9437
alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9438
                              unsigned HOST_WIDE_INT align)
9439
{
9440
  if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9441
    /* ??? Consider using mergeable sdata sections.  */
9442
    return sdata_section;
9443
  else
9444
    return default_elf_select_rtx_section (mode, x, align);
9445
}
9446
 
9447
static unsigned int
9448
alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9449
{
9450
  unsigned int flags = 0;
9451
 
9452
  if (strcmp (name, ".sdata") == 0
9453
      || strncmp (name, ".sdata.", 7) == 0
9454
      || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9455
      || strcmp (name, ".sbss") == 0
9456
      || strncmp (name, ".sbss.", 6) == 0
9457
      || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9458
    flags = SECTION_SMALL;
9459
 
9460
  flags |= default_section_type_flags (decl, name, reloc);
9461
  return flags;
9462
}
9463
#endif /* OBJECT_FORMAT_ELF */
9464
 
9465
/* Structure to collect function names for final output in link section.  */
9466
/* Note that items marked with GTY can't be ifdef'ed out.  */
9467
 
9468
enum reloc_kind
9469
{
9470
  KIND_LINKAGE,
9471
  KIND_CODEADDR
9472
};
9473
 
9474
struct GTY(()) alpha_links
9475
{
9476
  rtx func;
9477
  rtx linkage;
9478
  enum reloc_kind rkind;
9479
};
9480
 
9481
#if TARGET_ABI_OPEN_VMS
9482
 
9483
/* Return the VMS argument type corresponding to MODE.  */
9484
 
9485
enum avms_arg_type
9486
alpha_arg_type (enum machine_mode mode)
9487
{
9488
  switch (mode)
9489
    {
9490
    case SFmode:
9491
      return TARGET_FLOAT_VAX ? FF : FS;
9492
    case DFmode:
9493
      return TARGET_FLOAT_VAX ? FD : FT;
9494
    default:
9495
      return I64;
9496
    }
9497
}
9498
 
9499
/* Return an rtx for an integer representing the VMS Argument Information
9500
   register value.  */
9501
 
9502
rtx
9503
alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9504
{
9505
  unsigned HOST_WIDE_INT regval = cum.num_args;
9506
  int i;
9507
 
9508
  for (i = 0; i < 6; i++)
9509
    regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9510
 
9511
  return GEN_INT (regval);
9512
}
9513
 
9514
 
9515
/* Return a SYMBOL_REF representing the reference to the .linkage entry
9516
   of function FUNC built for calls made from CFUNDECL.  LFLAG is 1 if
9517
   this is the reference to the linkage pointer value, 0 if this is the
9518
   reference to the function entry value.  RFLAG is 1 if this a reduced
9519
   reference (code address only), 0 if this is a full reference.  */
9520
 
9521
rtx
9522
alpha_use_linkage (rtx func, bool lflag, bool rflag)
9523
{
9524
  struct alpha_links *al = NULL;
9525
  const char *name = XSTR (func, 0);
9526
 
9527
  if (cfun->machine->links)
9528
    {
9529
      splay_tree_node lnode;
9530
 
9531
      /* Is this name already defined?  */
9532
      lnode = splay_tree_lookup (cfun->machine->links, (splay_tree_key) name);
9533
      if (lnode)
9534
        al = (struct alpha_links *) lnode->value;
9535
    }
9536
  else
9537
    cfun->machine->links = splay_tree_new_ggc
9538
      ((splay_tree_compare_fn) strcmp,
9539
       ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9540
       ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9541
 
9542
  if (al == NULL)
9543
    {
9544
      size_t buf_len;
9545
      char *linksym;
9546
      tree id;
9547
 
9548
      if (name[0] == '*')
9549
        name++;
9550
 
9551
      /* Follow transparent alias, as this is used for CRTL translations.  */
9552
      id = maybe_get_identifier (name);
9553
      if (id)
9554
        {
9555
          while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9556
            id = TREE_CHAIN (id);
9557
          name = IDENTIFIER_POINTER (id);
9558
        }
9559
 
9560
      buf_len = strlen (name) + 8 + 9;
9561
      linksym = (char *) alloca (buf_len);
9562
      snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
9563
 
9564
      al = ggc_alloc_alpha_links ();
9565
      al->func = func;
9566
      al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
9567
 
9568
      splay_tree_insert (cfun->machine->links,
9569
                         (splay_tree_key) ggc_strdup (name),
9570
                         (splay_tree_value) al);
9571
    }
9572
 
9573
  al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
9574
 
9575
  if (lflag)
9576
    return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9577
  else
9578
    return al->linkage;
9579
}
9580
 
9581
static int
9582
alpha_write_one_linkage (splay_tree_node node, void *data)
9583
{
9584
  const char *const name = (const char *) node->key;
9585
  struct alpha_links *link = (struct alpha_links *) node->value;
9586
  FILE *stream = (FILE *) data;
9587
 
9588
  ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
9589
  if (link->rkind == KIND_CODEADDR)
9590
    {
9591
      /* External and used, request code address.  */
9592
      fprintf (stream, "\t.code_address ");
9593
    }
9594
  else
9595
    {
9596
      if (!SYMBOL_REF_EXTERNAL_P (link->func)
9597
          && SYMBOL_REF_LOCAL_P (link->func))
9598
        {
9599
          /* Locally defined, build linkage pair.  */
9600
          fprintf (stream, "\t.quad %s..en\n", name);
9601
          fprintf (stream, "\t.quad ");
9602
        }
9603
      else
9604
        {
9605
          /* External, request linkage pair.  */
9606
          fprintf (stream, "\t.linkage ");
9607
        }
9608
    }
9609
  assemble_name (stream, name);
9610
  fputs ("\n", stream);
9611
 
9612
  return 0;
9613
}
9614
 
9615
static void
9616
alpha_write_linkage (FILE *stream, const char *funname)
9617
{
9618
  fprintf (stream, "\t.link\n");
9619
  fprintf (stream, "\t.align 3\n");
9620
  in_section = NULL;
9621
 
9622
#ifdef TARGET_VMS_CRASH_DEBUG
9623
  fputs ("\t.name ", stream);
9624
  assemble_name (stream, funname);
9625
  fputs ("..na\n", stream);
9626
#endif
9627
 
9628
  ASM_OUTPUT_LABEL (stream, funname);
9629
  fprintf (stream, "\t.pdesc ");
9630
  assemble_name (stream, funname);
9631
  fprintf (stream, "..en,%s\n",
9632
           alpha_procedure_type == PT_STACK ? "stack"
9633
           : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9634
 
9635
  if (cfun->machine->links)
9636
    {
9637
      splay_tree_foreach (cfun->machine->links, alpha_write_one_linkage, stream);
9638
      /* splay_tree_delete (func->links); */
9639
    }
9640
}
9641
 
9642
/* Switch to an arbitrary section NAME with attributes as specified
9643
   by FLAGS.  ALIGN specifies any known alignment requirements for
9644
   the section; 0 if the default should be used.  */
9645
 
9646
static void
9647
vms_asm_named_section (const char *name, unsigned int flags,
9648
                       tree decl ATTRIBUTE_UNUSED)
9649
{
9650
  fputc ('\n', asm_out_file);
9651
  fprintf (asm_out_file, ".section\t%s", name);
9652
 
9653
  if (flags & SECTION_DEBUG)
9654
    fprintf (asm_out_file, ",NOWRT");
9655
 
9656
  fputc ('\n', asm_out_file);
9657
}
9658
 
9659
/* Record an element in the table of global constructors.  SYMBOL is
9660
   a SYMBOL_REF of the function to be called; PRIORITY is a number
9661
   between 0 and MAX_INIT_PRIORITY.
9662
 
9663
   Differs from default_ctors_section_asm_out_constructor in that the
9664
   width of the .ctors entry is always 64 bits, rather than the 32 bits
9665
   used by a normal pointer.  */
9666
 
9667
static void
9668
vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9669
{
9670
  switch_to_section (ctors_section);
9671
  assemble_align (BITS_PER_WORD);
9672
  assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9673
}
9674
 
9675
static void
9676
vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9677
{
9678
  switch_to_section (dtors_section);
9679
  assemble_align (BITS_PER_WORD);
9680
  assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9681
}
9682
#else
9683
rtx
9684
alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9685
                   bool lflag ATTRIBUTE_UNUSED,
9686
                   bool rflag ATTRIBUTE_UNUSED)
9687
{
9688
  return NULL_RTX;
9689
}
9690
 
9691
#endif /* TARGET_ABI_OPEN_VMS */
9692
 
9693
static void
9694
alpha_init_libfuncs (void)
9695
{
9696
  if (TARGET_ABI_OPEN_VMS)
9697
    {
9698
      /* Use the VMS runtime library functions for division and
9699
         remainder.  */
9700
      set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9701
      set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9702
      set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9703
      set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9704
      set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9705
      set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9706
      set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9707
      set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9708
      abort_libfunc = init_one_libfunc ("decc$abort");
9709
      memcmp_libfunc = init_one_libfunc ("decc$memcmp");
9710
#ifdef MEM_LIBFUNCS_INIT
9711
      MEM_LIBFUNCS_INIT;
9712
#endif
9713
    }
9714
}
9715
 
9716
/* On the Alpha, we use this to disable the floating-point registers
9717
   when they don't exist.  */
9718
 
9719
static void
9720
alpha_conditional_register_usage (void)
9721
{
9722
  int i;
9723
  if (! TARGET_FPREGS)
9724
    for (i = 32; i < 63; i++)
9725
      fixed_regs[i] = call_used_regs[i] = 1;
9726
}
9727
 
9728
/* Initialize the GCC target structure.  */
9729
#if TARGET_ABI_OPEN_VMS
9730
# undef TARGET_ATTRIBUTE_TABLE
9731
# define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9732
# undef TARGET_CAN_ELIMINATE
9733
# define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9734
#endif
9735
 
9736
#undef TARGET_IN_SMALL_DATA_P
9737
#define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9738
 
9739
#undef TARGET_ASM_ALIGNED_HI_OP
9740
#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9741
#undef TARGET_ASM_ALIGNED_DI_OP
9742
#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9743
 
9744
/* Default unaligned ops are provided for ELF systems.  To get unaligned
9745
   data for non-ELF systems, we have to turn off auto alignment.  */
9746
#if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
9747
#undef TARGET_ASM_UNALIGNED_HI_OP
9748
#define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9749
#undef TARGET_ASM_UNALIGNED_SI_OP
9750
#define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9751
#undef TARGET_ASM_UNALIGNED_DI_OP
9752
#define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9753
#endif
9754
 
9755
#ifdef OBJECT_FORMAT_ELF
9756
#undef  TARGET_ASM_RELOC_RW_MASK
9757
#define TARGET_ASM_RELOC_RW_MASK  alpha_elf_reloc_rw_mask
9758
#undef  TARGET_ASM_SELECT_RTX_SECTION
9759
#define TARGET_ASM_SELECT_RTX_SECTION  alpha_elf_select_rtx_section
9760
#undef  TARGET_SECTION_TYPE_FLAGS
9761
#define TARGET_SECTION_TYPE_FLAGS  alpha_elf_section_type_flags
9762
#endif
9763
 
9764
#undef TARGET_ASM_FUNCTION_END_PROLOGUE
9765
#define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9766
 
9767
#undef TARGET_INIT_LIBFUNCS
9768
#define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9769
 
9770
#undef TARGET_LEGITIMIZE_ADDRESS
9771
#define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9772
 
9773
#undef TARGET_ASM_FILE_START
9774
#define TARGET_ASM_FILE_START alpha_file_start
9775
#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
9776
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
9777
 
9778
#undef TARGET_SCHED_ADJUST_COST
9779
#define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9780
#undef TARGET_SCHED_ISSUE_RATE
9781
#define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9782
#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9783
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9784
  alpha_multipass_dfa_lookahead
9785
 
9786
#undef TARGET_HAVE_TLS
9787
#define TARGET_HAVE_TLS HAVE_AS_TLS
9788
 
9789
#undef  TARGET_BUILTIN_DECL
9790
#define TARGET_BUILTIN_DECL  alpha_builtin_decl
9791
#undef  TARGET_INIT_BUILTINS
9792
#define TARGET_INIT_BUILTINS alpha_init_builtins
9793
#undef  TARGET_EXPAND_BUILTIN
9794
#define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9795
#undef  TARGET_FOLD_BUILTIN
9796
#define TARGET_FOLD_BUILTIN alpha_fold_builtin
9797
 
9798
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
9799
#define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9800
#undef TARGET_CANNOT_COPY_INSN_P
9801
#define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
9802
#undef TARGET_LEGITIMATE_CONSTANT_P
9803
#define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
9804
#undef TARGET_CANNOT_FORCE_CONST_MEM
9805
#define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
9806
 
9807
#if TARGET_ABI_OSF
9808
#undef TARGET_ASM_OUTPUT_MI_THUNK
9809
#define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9810
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9811
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
9812
#undef TARGET_STDARG_OPTIMIZE_HOOK
9813
#define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
9814
#endif
9815
 
9816
/* Use 16-bits anchor.  */
9817
#undef TARGET_MIN_ANCHOR_OFFSET
9818
#define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
9819
#undef TARGET_MAX_ANCHOR_OFFSET
9820
#define TARGET_MAX_ANCHOR_OFFSET 0x7fff
9821
#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
9822
#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
9823
 
9824
#undef TARGET_RTX_COSTS
9825
#define TARGET_RTX_COSTS alpha_rtx_costs
9826
#undef TARGET_ADDRESS_COST
9827
#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
9828
 
9829
#undef TARGET_MACHINE_DEPENDENT_REORG
9830
#define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9831
 
9832
#undef TARGET_PROMOTE_FUNCTION_MODE
9833
#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
9834
#undef TARGET_PROMOTE_PROTOTYPES
9835
#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
9836
#undef TARGET_RETURN_IN_MEMORY
9837
#define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
9838
#undef TARGET_PASS_BY_REFERENCE
9839
#define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
9840
#undef TARGET_SETUP_INCOMING_VARARGS
9841
#define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9842
#undef TARGET_STRICT_ARGUMENT_NAMING
9843
#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9844
#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9845
#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
9846
#undef TARGET_SPLIT_COMPLEX_ARG
9847
#define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
9848
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
9849
#define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
9850
#undef TARGET_ARG_PARTIAL_BYTES
9851
#define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
9852
#undef TARGET_FUNCTION_ARG
9853
#define TARGET_FUNCTION_ARG alpha_function_arg
9854
#undef TARGET_FUNCTION_ARG_ADVANCE
9855
#define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
9856
#undef TARGET_TRAMPOLINE_INIT
9857
#define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
9858
 
9859
#undef TARGET_INSTANTIATE_DECLS
9860
#define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
9861
 
9862
#undef TARGET_SECONDARY_RELOAD
9863
#define TARGET_SECONDARY_RELOAD alpha_secondary_reload
9864
 
9865
#undef TARGET_SCALAR_MODE_SUPPORTED_P
9866
#define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
9867
#undef TARGET_VECTOR_MODE_SUPPORTED_P
9868
#define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
9869
 
9870
#undef TARGET_BUILD_BUILTIN_VA_LIST
9871
#define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
9872
 
9873
#undef TARGET_EXPAND_BUILTIN_VA_START
9874
#define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
9875
 
9876
/* The Alpha architecture does not require sequential consistency.  See
9877
   http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
9878
   for an example of how it can be violated in practice.  */
9879
#undef TARGET_RELAXED_ORDERING
9880
#define TARGET_RELAXED_ORDERING true
9881
 
9882
#undef TARGET_OPTION_OVERRIDE
9883
#define TARGET_OPTION_OVERRIDE alpha_option_override
9884
 
9885
#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9886
#undef TARGET_MANGLE_TYPE
9887
#define TARGET_MANGLE_TYPE alpha_mangle_type
9888
#endif
9889
 
9890
#undef TARGET_LEGITIMATE_ADDRESS_P
9891
#define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
9892
 
9893
#undef TARGET_CONDITIONAL_REGISTER_USAGE
9894
#define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
9895
 
9896
struct gcc_target targetm = TARGET_INITIALIZER;
9897
 
9898
 
9899
#include "gt-alpha.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.