OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [optabs.c] - Blame information for rev 749

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2
   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4
   2011, 2012  Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
 
23
#include "config.h"
24
#include "system.h"
25
#include "coretypes.h"
26
#include "tm.h"
27
#include "diagnostic-core.h"
28
 
29
/* Include insn-config.h before expr.h so that HAVE_conditional_move
30
   is properly defined.  */
31
#include "insn-config.h"
32
#include "rtl.h"
33
#include "tree.h"
34
#include "tm_p.h"
35
#include "flags.h"
36
#include "function.h"
37
#include "except.h"
38
#include "expr.h"
39
#include "optabs.h"
40
#include "libfuncs.h"
41
#include "recog.h"
42
#include "reload.h"
43
#include "ggc.h"
44
#include "basic-block.h"
45
#include "target.h"
46
 
47
struct target_optabs default_target_optabs;
48
struct target_libfuncs default_target_libfuncs;
49
#if SWITCHABLE_TARGET
50
struct target_optabs *this_target_optabs = &default_target_optabs;
51
struct target_libfuncs *this_target_libfuncs = &default_target_libfuncs;
52
#endif
53
 
54
#define libfunc_hash \
55
  (this_target_libfuncs->x_libfunc_hash)
56
 
57
/* Contains the optab used for each rtx code.  */
58
optab code_to_optab[NUM_RTX_CODE + 1];
59
 
60
static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
61
                                   enum machine_mode *);
62
static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
63
 
64
/* Debug facility for use in GDB.  */
65
void debug_optab_libfuncs (void);
66
 
67
/* Prefixes for the current version of decimal floating point (BID vs. DPD) */
68
#if ENABLE_DECIMAL_BID_FORMAT
69
#define DECIMAL_PREFIX "bid_"
70
#else
71
#define DECIMAL_PREFIX "dpd_"
72
#endif
73
 
74
/* Used for libfunc_hash.  */
75
 
76
static hashval_t
77
hash_libfunc (const void *p)
78
{
79
  const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
80
 
81
  return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
82
          ^ e->optab);
83
}
84
 
85
/* Used for libfunc_hash.  */
86
 
87
static int
88
eq_libfunc (const void *p, const void *q)
89
{
90
  const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
91
  const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
92
 
93
  return (e1->optab == e2->optab
94
          && e1->mode1 == e2->mode1
95
          && e1->mode2 == e2->mode2);
96
}
97
 
98
/* Return libfunc corresponding operation defined by OPTAB converting
99
   from MODE2 to MODE1.  Trigger lazy initialization if needed, return NULL
100
   if no libfunc is available.  */
101
rtx
102
convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
103
                       enum machine_mode mode2)
104
{
105
  struct libfunc_entry e;
106
  struct libfunc_entry **slot;
107
 
108
  e.optab = (size_t) (optab - &convert_optab_table[0]);
109
  e.mode1 = mode1;
110
  e.mode2 = mode2;
111
  slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
112
  if (!slot)
113
    {
114
      if (optab->libcall_gen)
115
        {
116
          optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
117
          slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
118
          if (slot)
119
            return (*slot)->libfunc;
120
          else
121
            return NULL;
122
        }
123
      return NULL;
124
    }
125
  return (*slot)->libfunc;
126
}
127
 
128
/* Return libfunc corresponding operation defined by OPTAB in MODE.
129
   Trigger lazy initialization if needed, return NULL if no libfunc is
130
   available.  */
131
rtx
132
optab_libfunc (optab optab, enum machine_mode mode)
133
{
134
  struct libfunc_entry e;
135
  struct libfunc_entry **slot;
136
 
137
  e.optab = (size_t) (optab - &optab_table[0]);
138
  e.mode1 = mode;
139
  e.mode2 = VOIDmode;
140
  slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
141
  if (!slot)
142
    {
143
      if (optab->libcall_gen)
144
        {
145
          optab->libcall_gen (optab, optab->libcall_basename,
146
                              optab->libcall_suffix, mode);
147
          slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
148
                                                           &e, NO_INSERT);
149
          if (slot)
150
            return (*slot)->libfunc;
151
          else
152
            return NULL;
153
        }
154
      return NULL;
155
    }
156
  return (*slot)->libfunc;
157
}
158
 
159
 
160
/* Add a REG_EQUAL note to the last insn in INSNS.  TARGET is being set to
161
   the result of operation CODE applied to OP0 (and OP1 if it is a binary
162
   operation).
163
 
164
   If the last insn does not set TARGET, don't do anything, but return 1.
165
 
166
   If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
167
   don't add the REG_EQUAL note but return 0.  Our caller can then try
168
   again, ensuring that TARGET is not one of the operands.  */
169
 
170
static int
171
add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
172
{
173
  rtx last_insn, insn, set;
174
  rtx note;
175
 
176
  gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
177
 
178
  if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
179
      && GET_RTX_CLASS (code) != RTX_BIN_ARITH
180
      && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
181
      && GET_RTX_CLASS (code) != RTX_COMPARE
182
      && GET_RTX_CLASS (code) != RTX_UNARY)
183
    return 1;
184
 
185
  if (GET_CODE (target) == ZERO_EXTRACT)
186
    return 1;
187
 
188
  for (last_insn = insns;
189
       NEXT_INSN (last_insn) != NULL_RTX;
190
       last_insn = NEXT_INSN (last_insn))
191
    ;
192
 
193
  set = single_set (last_insn);
194
  if (set == NULL_RTX)
195
    return 1;
196
 
197
  if (! rtx_equal_p (SET_DEST (set), target)
198
      /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it.  */
199
      && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
200
          || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
201
    return 1;
202
 
203
  /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
204
     besides the last insn.  */
205
  if (reg_overlap_mentioned_p (target, op0)
206
      || (op1 && reg_overlap_mentioned_p (target, op1)))
207
    {
208
      insn = PREV_INSN (last_insn);
209
      while (insn != NULL_RTX)
210
        {
211
          if (reg_set_p (target, insn))
212
            return 0;
213
 
214
          insn = PREV_INSN (insn);
215
        }
216
    }
217
 
218
  if (GET_RTX_CLASS (code) == RTX_UNARY)
219
    switch (code)
220
      {
221
      case FFS:
222
      case CLZ:
223
      case CTZ:
224
      case CLRSB:
225
      case POPCOUNT:
226
      case PARITY:
227
      case BSWAP:
228
        if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
229
          {
230
            note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
231
            if (GET_MODE_SIZE (GET_MODE (op0))
232
                > GET_MODE_SIZE (GET_MODE (target)))
233
              note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
234
                                         note, GET_MODE (op0));
235
            else
236
              note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
237
                                         note, GET_MODE (op0));
238
            break;
239
          }
240
        /* FALLTHRU */
241
      default:
242
        note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
243
        break;
244
      }
245
  else
246
    note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
247
 
248
  set_unique_reg_note (last_insn, REG_EQUAL, note);
249
 
250
  return 1;
251
}
252
 
253
/* Given two input operands, OP0 and OP1, determine what the correct from_mode
254
   for a widening operation would be.  In most cases this would be OP0, but if
255
   that's a constant it'll be VOIDmode, which isn't useful.  */
256
 
257
static enum machine_mode
258
widened_mode (enum machine_mode to_mode, rtx op0, rtx op1)
259
{
260
  enum machine_mode m0 = GET_MODE (op0);
261
  enum machine_mode m1 = GET_MODE (op1);
262
  enum machine_mode result;
263
 
264
  if (m0 == VOIDmode && m1 == VOIDmode)
265
    return to_mode;
266
  else if (m0 == VOIDmode || GET_MODE_SIZE (m0) < GET_MODE_SIZE (m1))
267
    result = m1;
268
  else
269
    result = m0;
270
 
271
  if (GET_MODE_SIZE (result) > GET_MODE_SIZE (to_mode))
272
    return to_mode;
273
 
274
  return result;
275
}
276
 
277
/* Find a widening optab even if it doesn't widen as much as we want.
278
   E.g. if from_mode is HImode, and to_mode is DImode, and there is no
279
   direct HI->SI insn, then return SI->DI, if that exists.
280
   If PERMIT_NON_WIDENING is non-zero then this can be used with
281
   non-widening optabs also.  */
282
 
283
enum insn_code
284
find_widening_optab_handler_and_mode (optab op, enum machine_mode to_mode,
285
                                      enum machine_mode from_mode,
286
                                      int permit_non_widening,
287
                                      enum machine_mode *found_mode)
288
{
289
  for (; (permit_non_widening || from_mode != to_mode)
290
         && GET_MODE_SIZE (from_mode) <= GET_MODE_SIZE (to_mode)
291
         && from_mode != VOIDmode;
292
       from_mode = GET_MODE_WIDER_MODE (from_mode))
293
    {
294
      enum insn_code handler = widening_optab_handler (op, to_mode,
295
                                                       from_mode);
296
 
297
      if (handler != CODE_FOR_nothing)
298
        {
299
          if (found_mode)
300
            *found_mode = from_mode;
301
          return handler;
302
        }
303
    }
304
 
305
  return CODE_FOR_nothing;
306
}
307
 
308
/* Widen OP to MODE and return the rtx for the widened operand.  UNSIGNEDP
309
   says whether OP is signed or unsigned.  NO_EXTEND is nonzero if we need
310
   not actually do a sign-extend or zero-extend, but can leave the
311
   higher-order bits of the result rtx undefined, for example, in the case
312
   of logical operations, but not right shifts.  */
313
 
314
static rtx
315
widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
316
               int unsignedp, int no_extend)
317
{
318
  rtx result;
319
 
320
  /* If we don't have to extend and this is a constant, return it.  */
321
  if (no_extend && GET_MODE (op) == VOIDmode)
322
    return op;
323
 
324
  /* If we must extend do so.  If OP is a SUBREG for a promoted object, also
325
     extend since it will be more efficient to do so unless the signedness of
326
     a promoted object differs from our extension.  */
327
  if (! no_extend
328
      || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
329
          && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
330
    return convert_modes (mode, oldmode, op, unsignedp);
331
 
332
  /* If MODE is no wider than a single word, we return a paradoxical
333
     SUBREG.  */
334
  if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
335
    return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
336
 
337
  /* Otherwise, get an object of MODE, clobber it, and set the low-order
338
     part to OP.  */
339
 
340
  result = gen_reg_rtx (mode);
341
  emit_clobber (result);
342
  emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
343
  return result;
344
}
345
 
346
/* Return the optab used for computing the operation given by the tree code,
347
   CODE and the tree EXP.  This function is not always usable (for example, it
348
   cannot give complete results for multiplication or division) but probably
349
   ought to be relied on more widely throughout the expander.  */
350
optab
351
optab_for_tree_code (enum tree_code code, const_tree type,
352
                     enum optab_subtype subtype)
353
{
354
  bool trapv;
355
  switch (code)
356
    {
357
    case BIT_AND_EXPR:
358
      return and_optab;
359
 
360
    case BIT_IOR_EXPR:
361
      return ior_optab;
362
 
363
    case BIT_NOT_EXPR:
364
      return one_cmpl_optab;
365
 
366
    case BIT_XOR_EXPR:
367
      return xor_optab;
368
 
369
    case TRUNC_MOD_EXPR:
370
    case CEIL_MOD_EXPR:
371
    case FLOOR_MOD_EXPR:
372
    case ROUND_MOD_EXPR:
373
      return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
374
 
375
    case RDIV_EXPR:
376
    case TRUNC_DIV_EXPR:
377
    case CEIL_DIV_EXPR:
378
    case FLOOR_DIV_EXPR:
379
    case ROUND_DIV_EXPR:
380
    case EXACT_DIV_EXPR:
381
      if (TYPE_SATURATING(type))
382
        return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
383
      return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
384
 
385
    case LSHIFT_EXPR:
386
      if (TREE_CODE (type) == VECTOR_TYPE)
387
        {
388
          if (subtype == optab_vector)
389
            return TYPE_SATURATING (type) ? NULL : vashl_optab;
390
 
391
          gcc_assert (subtype == optab_scalar);
392
        }
393
      if (TYPE_SATURATING(type))
394
        return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
395
      return ashl_optab;
396
 
397
    case RSHIFT_EXPR:
398
      if (TREE_CODE (type) == VECTOR_TYPE)
399
        {
400
          if (subtype == optab_vector)
401
            return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
402
 
403
          gcc_assert (subtype == optab_scalar);
404
        }
405
      return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
406
 
407
    case LROTATE_EXPR:
408
      if (TREE_CODE (type) == VECTOR_TYPE)
409
        {
410
          if (subtype == optab_vector)
411
            return vrotl_optab;
412
 
413
          gcc_assert (subtype == optab_scalar);
414
        }
415
      return rotl_optab;
416
 
417
    case RROTATE_EXPR:
418
      if (TREE_CODE (type) == VECTOR_TYPE)
419
        {
420
          if (subtype == optab_vector)
421
            return vrotr_optab;
422
 
423
          gcc_assert (subtype == optab_scalar);
424
        }
425
      return rotr_optab;
426
 
427
    case MAX_EXPR:
428
      return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
429
 
430
    case MIN_EXPR:
431
      return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
432
 
433
    case REALIGN_LOAD_EXPR:
434
      return vec_realign_load_optab;
435
 
436
    case WIDEN_SUM_EXPR:
437
      return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
438
 
439
    case DOT_PROD_EXPR:
440
      return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
441
 
442
    case WIDEN_MULT_PLUS_EXPR:
443
      return (TYPE_UNSIGNED (type)
444
              ? (TYPE_SATURATING (type)
445
                 ? usmadd_widen_optab : umadd_widen_optab)
446
              : (TYPE_SATURATING (type)
447
                 ? ssmadd_widen_optab : smadd_widen_optab));
448
 
449
    case WIDEN_MULT_MINUS_EXPR:
450
      return (TYPE_UNSIGNED (type)
451
              ? (TYPE_SATURATING (type)
452
                 ? usmsub_widen_optab : umsub_widen_optab)
453
              : (TYPE_SATURATING (type)
454
                 ? ssmsub_widen_optab : smsub_widen_optab));
455
 
456
    case FMA_EXPR:
457
      return fma_optab;
458
 
459
    case REDUC_MAX_EXPR:
460
      return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
461
 
462
    case REDUC_MIN_EXPR:
463
      return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
464
 
465
    case REDUC_PLUS_EXPR:
466
      return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
467
 
468
    case VEC_LSHIFT_EXPR:
469
      return vec_shl_optab;
470
 
471
    case VEC_RSHIFT_EXPR:
472
      return vec_shr_optab;
473
 
474
    case VEC_WIDEN_MULT_HI_EXPR:
475
      return TYPE_UNSIGNED (type) ?
476
        vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
477
 
478
    case VEC_WIDEN_MULT_LO_EXPR:
479
      return TYPE_UNSIGNED (type) ?
480
        vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
481
 
482
    case VEC_WIDEN_LSHIFT_HI_EXPR:
483
      return TYPE_UNSIGNED (type) ?
484
        vec_widen_ushiftl_hi_optab : vec_widen_sshiftl_hi_optab;
485
 
486
    case VEC_WIDEN_LSHIFT_LO_EXPR:
487
      return TYPE_UNSIGNED (type) ?
488
        vec_widen_ushiftl_lo_optab : vec_widen_sshiftl_lo_optab;
489
 
490
    case VEC_UNPACK_HI_EXPR:
491
      return TYPE_UNSIGNED (type) ?
492
        vec_unpacku_hi_optab : vec_unpacks_hi_optab;
493
 
494
    case VEC_UNPACK_LO_EXPR:
495
      return TYPE_UNSIGNED (type) ?
496
        vec_unpacku_lo_optab : vec_unpacks_lo_optab;
497
 
498
    case VEC_UNPACK_FLOAT_HI_EXPR:
499
      /* The signedness is determined from input operand.  */
500
      return TYPE_UNSIGNED (type) ?
501
        vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
502
 
503
    case VEC_UNPACK_FLOAT_LO_EXPR:
504
      /* The signedness is determined from input operand.  */
505
      return TYPE_UNSIGNED (type) ?
506
        vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
507
 
508
    case VEC_PACK_TRUNC_EXPR:
509
      return vec_pack_trunc_optab;
510
 
511
    case VEC_PACK_SAT_EXPR:
512
      return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
513
 
514
    case VEC_PACK_FIX_TRUNC_EXPR:
515
      /* The signedness is determined from output operand.  */
516
      return TYPE_UNSIGNED (type) ?
517
        vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
518
 
519
    default:
520
      break;
521
    }
522
 
523
  trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
524
  switch (code)
525
    {
526
    case POINTER_PLUS_EXPR:
527
    case PLUS_EXPR:
528
      if (TYPE_SATURATING(type))
529
        return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
530
      return trapv ? addv_optab : add_optab;
531
 
532
    case MINUS_EXPR:
533
      if (TYPE_SATURATING(type))
534
        return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
535
      return trapv ? subv_optab : sub_optab;
536
 
537
    case MULT_EXPR:
538
      if (TYPE_SATURATING(type))
539
        return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
540
      return trapv ? smulv_optab : smul_optab;
541
 
542
    case NEGATE_EXPR:
543
      if (TYPE_SATURATING(type))
544
        return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
545
      return trapv ? negv_optab : neg_optab;
546
 
547
    case ABS_EXPR:
548
      return trapv ? absv_optab : abs_optab;
549
 
550
    default:
551
      return NULL;
552
    }
553
}
554
 
555
 
556
/* Expand vector widening operations.
557
 
558
   There are two different classes of operations handled here:
559
   1) Operations whose result is wider than all the arguments to the operation.
560
      Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
561
      In this case OP0 and optionally OP1 would be initialized,
562
      but WIDE_OP wouldn't (not relevant for this case).
563
   2) Operations whose result is of the same size as the last argument to the
564
      operation, but wider than all the other arguments to the operation.
565
      Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
566
      In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
567
 
568
   E.g, when called to expand the following operations, this is how
569
   the arguments will be initialized:
570
                                nops    OP0     OP1     WIDE_OP
571
   widening-sum                 2       oprnd0  -       oprnd1
572
   widening-dot-product         3       oprnd0  oprnd1  oprnd2
573
   widening-mult                2       oprnd0  oprnd1  -
574
   type-promotion (vec-unpack)  1       oprnd0  -       -  */
575
 
576
rtx
577
expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
578
                           rtx target, int unsignedp)
579
{
580
  struct expand_operand eops[4];
581
  tree oprnd0, oprnd1, oprnd2;
582
  enum machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
583
  optab widen_pattern_optab;
584
  enum insn_code icode;
585
  int nops = TREE_CODE_LENGTH (ops->code);
586
  int op;
587
 
588
  oprnd0 = ops->op0;
589
  tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
590
  widen_pattern_optab =
591
    optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
592
  if (ops->code == WIDEN_MULT_PLUS_EXPR
593
      || ops->code == WIDEN_MULT_MINUS_EXPR)
594
    icode = find_widening_optab_handler (widen_pattern_optab,
595
                                         TYPE_MODE (TREE_TYPE (ops->op2)),
596
                                         tmode0, 0);
597
  else
598
    icode = optab_handler (widen_pattern_optab, tmode0);
599
  gcc_assert (icode != CODE_FOR_nothing);
600
 
601
  if (nops >= 2)
602
    {
603
      oprnd1 = ops->op1;
604
      tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
605
    }
606
 
607
  /* The last operand is of a wider mode than the rest of the operands.  */
608
  if (nops == 2)
609
    wmode = tmode1;
610
  else if (nops == 3)
611
    {
612
      gcc_assert (tmode1 == tmode0);
613
      gcc_assert (op1);
614
      oprnd2 = ops->op2;
615
      wmode = TYPE_MODE (TREE_TYPE (oprnd2));
616
    }
617
 
618
  op = 0;
619
  create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
620
  create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
621
  if (op1)
622
    create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
623
  if (wide_op)
624
    create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
625
  expand_insn (icode, op, eops);
626
  return eops[0].value;
627
}
628
 
629
/* Generate code to perform an operation specified by TERNARY_OPTAB
630
   on operands OP0, OP1 and OP2, with result having machine-mode MODE.
631
 
632
   UNSIGNEDP is for the case where we have to widen the operands
633
   to perform the operation.  It says to use zero-extension.
634
 
635
   If TARGET is nonzero, the value
636
   is generated there, if it is convenient to do so.
637
   In all cases an rtx is returned for the locus of the value;
638
   this may or may not be TARGET.  */
639
 
640
rtx
641
expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
642
                   rtx op1, rtx op2, rtx target, int unsignedp)
643
{
644
  struct expand_operand ops[4];
645
  enum insn_code icode = optab_handler (ternary_optab, mode);
646
 
647
  gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
648
 
649
  create_output_operand (&ops[0], target, mode);
650
  create_convert_operand_from (&ops[1], op0, mode, unsignedp);
651
  create_convert_operand_from (&ops[2], op1, mode, unsignedp);
652
  create_convert_operand_from (&ops[3], op2, mode, unsignedp);
653
  expand_insn (icode, 4, ops);
654
  return ops[0].value;
655
}
656
 
657
 
658
/* Like expand_binop, but return a constant rtx if the result can be
659
   calculated at compile time.  The arguments and return value are
660
   otherwise the same as for expand_binop.  */
661
 
662
rtx
663
simplify_expand_binop (enum machine_mode mode, optab binoptab,
664
                       rtx op0, rtx op1, rtx target, int unsignedp,
665
                       enum optab_methods methods)
666
{
667
  if (CONSTANT_P (op0) && CONSTANT_P (op1))
668
    {
669
      rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
670
 
671
      if (x)
672
        return x;
673
    }
674
 
675
  return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
676
}
677
 
678
/* Like simplify_expand_binop, but always put the result in TARGET.
679
   Return true if the expansion succeeded.  */
680
 
681
bool
682
force_expand_binop (enum machine_mode mode, optab binoptab,
683
                    rtx op0, rtx op1, rtx target, int unsignedp,
684
                    enum optab_methods methods)
685
{
686
  rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
687
                                 target, unsignedp, methods);
688
  if (x == 0)
689
    return false;
690
  if (x != target)
691
    emit_move_insn (target, x);
692
  return true;
693
}
694
 
695
/* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR.  */
696
 
697
rtx
698
expand_vec_shift_expr (sepops ops, rtx target)
699
{
700
  struct expand_operand eops[3];
701
  enum insn_code icode;
702
  rtx rtx_op1, rtx_op2;
703
  enum machine_mode mode = TYPE_MODE (ops->type);
704
  tree vec_oprnd = ops->op0;
705
  tree shift_oprnd = ops->op1;
706
  optab shift_optab;
707
 
708
  switch (ops->code)
709
    {
710
      case VEC_RSHIFT_EXPR:
711
        shift_optab = vec_shr_optab;
712
        break;
713
      case VEC_LSHIFT_EXPR:
714
        shift_optab = vec_shl_optab;
715
        break;
716
      default:
717
        gcc_unreachable ();
718
    }
719
 
720
  icode = optab_handler (shift_optab, mode);
721
  gcc_assert (icode != CODE_FOR_nothing);
722
 
723
  rtx_op1 = expand_normal (vec_oprnd);
724
  rtx_op2 = expand_normal (shift_oprnd);
725
 
726
  create_output_operand (&eops[0], target, mode);
727
  create_input_operand (&eops[1], rtx_op1, GET_MODE (rtx_op1));
728
  create_convert_operand_from_type (&eops[2], rtx_op2, TREE_TYPE (shift_oprnd));
729
  expand_insn (icode, 3, eops);
730
 
731
  return eops[0].value;
732
}
733
 
734
/* Create a new vector value in VMODE with all elements set to OP.  The
735
   mode of OP must be the element mode of VMODE.  If OP is a constant,
736
   then the return value will be a constant.  */
737
 
738
static rtx
739
expand_vector_broadcast (enum machine_mode vmode, rtx op)
740
{
741
  enum insn_code icode;
742
  rtvec vec;
743
  rtx ret;
744
  int i, n;
745
 
746
  gcc_checking_assert (VECTOR_MODE_P (vmode));
747
 
748
  n = GET_MODE_NUNITS (vmode);
749
  vec = rtvec_alloc (n);
750
  for (i = 0; i < n; ++i)
751
    RTVEC_ELT (vec, i) = op;
752
 
753
  if (CONSTANT_P (op))
754
    return gen_rtx_CONST_VECTOR (vmode, vec);
755
 
756
  /* ??? If the target doesn't have a vec_init, then we have no easy way
757
     of performing this operation.  Most of this sort of generic support
758
     is hidden away in the vector lowering support in gimple.  */
759
  icode = optab_handler (vec_init_optab, vmode);
760
  if (icode == CODE_FOR_nothing)
761
    return NULL;
762
 
763
  ret = gen_reg_rtx (vmode);
764
  emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
765
 
766
  return ret;
767
}
768
 
769
/* This subroutine of expand_doubleword_shift handles the cases in which
770
   the effective shift value is >= BITS_PER_WORD.  The arguments and return
771
   value are the same as for the parent routine, except that SUPERWORD_OP1
772
   is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
773
   INTO_TARGET may be null if the caller has decided to calculate it.  */
774
 
775
static bool
776
expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
777
                        rtx outof_target, rtx into_target,
778
                        int unsignedp, enum optab_methods methods)
779
{
780
  if (into_target != 0)
781
    if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
782
                             into_target, unsignedp, methods))
783
      return false;
784
 
785
  if (outof_target != 0)
786
    {
787
      /* For a signed right shift, we must fill OUTOF_TARGET with copies
788
         of the sign bit, otherwise we must fill it with zeros.  */
789
      if (binoptab != ashr_optab)
790
        emit_move_insn (outof_target, CONST0_RTX (word_mode));
791
      else
792
        if (!force_expand_binop (word_mode, binoptab,
793
                                 outof_input, GEN_INT (BITS_PER_WORD - 1),
794
                                 outof_target, unsignedp, methods))
795
          return false;
796
    }
797
  return true;
798
}
799
 
800
/* This subroutine of expand_doubleword_shift handles the cases in which
801
   the effective shift value is < BITS_PER_WORD.  The arguments and return
802
   value are the same as for the parent routine.  */
803
 
804
static bool
805
expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
806
                      rtx outof_input, rtx into_input, rtx op1,
807
                      rtx outof_target, rtx into_target,
808
                      int unsignedp, enum optab_methods methods,
809
                      unsigned HOST_WIDE_INT shift_mask)
810
{
811
  optab reverse_unsigned_shift, unsigned_shift;
812
  rtx tmp, carries;
813
 
814
  reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
815
  unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
816
 
817
  /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
818
     We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
819
     the opposite direction to BINOPTAB.  */
820
  if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
821
    {
822
      carries = outof_input;
823
      tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
824
      tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
825
                                   0, true, methods);
826
    }
827
  else
828
    {
829
      /* We must avoid shifting by BITS_PER_WORD bits since that is either
830
         the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
831
         has unknown behavior.  Do a single shift first, then shift by the
832
         remainder.  It's OK to use ~OP1 as the remainder if shift counts
833
         are truncated to the mode size.  */
834
      carries = expand_binop (word_mode, reverse_unsigned_shift,
835
                              outof_input, const1_rtx, 0, unsignedp, methods);
836
      if (shift_mask == BITS_PER_WORD - 1)
837
        {
838
          tmp = immed_double_const (-1, -1, op1_mode);
839
          tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
840
                                       0, true, methods);
841
        }
842
      else
843
        {
844
          tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
845
          tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
846
                                       0, true, methods);
847
        }
848
    }
849
  if (tmp == 0 || carries == 0)
850
    return false;
851
  carries = expand_binop (word_mode, reverse_unsigned_shift,
852
                          carries, tmp, 0, unsignedp, methods);
853
  if (carries == 0)
854
    return false;
855
 
856
  /* Shift INTO_INPUT logically by OP1.  This is the last use of INTO_INPUT
857
     so the result can go directly into INTO_TARGET if convenient.  */
858
  tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
859
                      into_target, unsignedp, methods);
860
  if (tmp == 0)
861
    return false;
862
 
863
  /* Now OR in the bits carried over from OUTOF_INPUT.  */
864
  if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
865
                           into_target, unsignedp, methods))
866
    return false;
867
 
868
  /* Use a standard word_mode shift for the out-of half.  */
869
  if (outof_target != 0)
870
    if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
871
                             outof_target, unsignedp, methods))
872
      return false;
873
 
874
  return true;
875
}
876
 
877
 
878
#ifdef HAVE_conditional_move
879
/* Try implementing expand_doubleword_shift using conditional moves.
880
   The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
881
   otherwise it is by >= BITS_PER_WORD.  SUBWORD_OP1 and SUPERWORD_OP1
882
   are the shift counts to use in the former and latter case.  All other
883
   arguments are the same as the parent routine.  */
884
 
885
static bool
886
expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
887
                                  enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
888
                                  rtx outof_input, rtx into_input,
889
                                  rtx subword_op1, rtx superword_op1,
890
                                  rtx outof_target, rtx into_target,
891
                                  int unsignedp, enum optab_methods methods,
892
                                  unsigned HOST_WIDE_INT shift_mask)
893
{
894
  rtx outof_superword, into_superword;
895
 
896
  /* Put the superword version of the output into OUTOF_SUPERWORD and
897
     INTO_SUPERWORD.  */
898
  outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
899
  if (outof_target != 0 && subword_op1 == superword_op1)
900
    {
901
      /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
902
         OUTOF_TARGET, is the same as the value of INTO_SUPERWORD.  */
903
      into_superword = outof_target;
904
      if (!expand_superword_shift (binoptab, outof_input, superword_op1,
905
                                   outof_superword, 0, unsignedp, methods))
906
        return false;
907
    }
908
  else
909
    {
910
      into_superword = gen_reg_rtx (word_mode);
911
      if (!expand_superword_shift (binoptab, outof_input, superword_op1,
912
                                   outof_superword, into_superword,
913
                                   unsignedp, methods))
914
        return false;
915
    }
916
 
917
  /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET.  */
918
  if (!expand_subword_shift (op1_mode, binoptab,
919
                             outof_input, into_input, subword_op1,
920
                             outof_target, into_target,
921
                             unsignedp, methods, shift_mask))
922
    return false;
923
 
924
  /* Select between them.  Do the INTO half first because INTO_SUPERWORD
925
     might be the current value of OUTOF_TARGET.  */
926
  if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
927
                              into_target, into_superword, word_mode, false))
928
    return false;
929
 
930
  if (outof_target != 0)
931
    if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
932
                                outof_target, outof_superword,
933
                                word_mode, false))
934
      return false;
935
 
936
  return true;
937
}
938
#endif
939
 
940
/* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
941
   OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
942
   input operand; the shift moves bits in the direction OUTOF_INPUT->
943
   INTO_TARGET.  OUTOF_TARGET and INTO_TARGET are the equivalent words
944
   of the target.  OP1 is the shift count and OP1_MODE is its mode.
945
   If OP1 is constant, it will have been truncated as appropriate
946
   and is known to be nonzero.
947
 
948
   If SHIFT_MASK is zero, the result of word shifts is undefined when the
949
   shift count is outside the range [0, BITS_PER_WORD).  This routine must
950
   avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
951
 
952
   If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
953
   masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
954
   fill with zeros or sign bits as appropriate.
955
 
956
   If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
957
   a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
958
   Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
959
   In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
960
   are undefined.
961
 
962
   BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop.  This function
963
   may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
964
   OUTOF_INPUT and OUTOF_TARGET.  OUTOF_TARGET can be null if the parent
965
   function wants to calculate it itself.
966
 
967
   Return true if the shift could be successfully synthesized.  */
968
 
969
static bool
970
expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
971
                         rtx outof_input, rtx into_input, rtx op1,
972
                         rtx outof_target, rtx into_target,
973
                         int unsignedp, enum optab_methods methods,
974
                         unsigned HOST_WIDE_INT shift_mask)
975
{
976
  rtx superword_op1, tmp, cmp1, cmp2;
977
  rtx subword_label, done_label;
978
  enum rtx_code cmp_code;
979
 
980
  /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
981
     fill the result with sign or zero bits as appropriate.  If so, the value
982
     of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1).   Recursively call
983
     this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
984
     and INTO_INPUT), then emit code to set up OUTOF_TARGET.
985
 
986
     This isn't worthwhile for constant shifts since the optimizers will
987
     cope better with in-range shift counts.  */
988
  if (shift_mask >= BITS_PER_WORD
989
      && outof_target != 0
990
      && !CONSTANT_P (op1))
991
    {
992
      if (!expand_doubleword_shift (op1_mode, binoptab,
993
                                    outof_input, into_input, op1,
994
                                    0, into_target,
995
                                    unsignedp, methods, shift_mask))
996
        return false;
997
      if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
998
                               outof_target, unsignedp, methods))
999
        return false;
1000
      return true;
1001
    }
1002
 
1003
  /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1004
     is true when the effective shift value is less than BITS_PER_WORD.
1005
     Set SUPERWORD_OP1 to the shift count that should be used to shift
1006
     OUTOF_INPUT into INTO_TARGET when the condition is false.  */
1007
  tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1008
  if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1009
    {
1010
      /* Set CMP1 to OP1 & BITS_PER_WORD.  The result is zero iff OP1
1011
         is a subword shift count.  */
1012
      cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1013
                                    0, true, methods);
1014
      cmp2 = CONST0_RTX (op1_mode);
1015
      cmp_code = EQ;
1016
      superword_op1 = op1;
1017
    }
1018
  else
1019
    {
1020
      /* Set CMP1 to OP1 - BITS_PER_WORD.  */
1021
      cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1022
                                    0, true, methods);
1023
      cmp2 = CONST0_RTX (op1_mode);
1024
      cmp_code = LT;
1025
      superword_op1 = cmp1;
1026
    }
1027
  if (cmp1 == 0)
1028
    return false;
1029
 
1030
  /* If we can compute the condition at compile time, pick the
1031
     appropriate subroutine.  */
1032
  tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1033
  if (tmp != 0 && CONST_INT_P (tmp))
1034
    {
1035
      if (tmp == const0_rtx)
1036
        return expand_superword_shift (binoptab, outof_input, superword_op1,
1037
                                       outof_target, into_target,
1038
                                       unsignedp, methods);
1039
      else
1040
        return expand_subword_shift (op1_mode, binoptab,
1041
                                     outof_input, into_input, op1,
1042
                                     outof_target, into_target,
1043
                                     unsignedp, methods, shift_mask);
1044
    }
1045
 
1046
#ifdef HAVE_conditional_move
1047
  /* Try using conditional moves to generate straight-line code.  */
1048
  {
1049
    rtx start = get_last_insn ();
1050
    if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1051
                                          cmp_code, cmp1, cmp2,
1052
                                          outof_input, into_input,
1053
                                          op1, superword_op1,
1054
                                          outof_target, into_target,
1055
                                          unsignedp, methods, shift_mask))
1056
      return true;
1057
    delete_insns_since (start);
1058
  }
1059
#endif
1060
 
1061
  /* As a last resort, use branches to select the correct alternative.  */
1062
  subword_label = gen_label_rtx ();
1063
  done_label = gen_label_rtx ();
1064
 
1065
  NO_DEFER_POP;
1066
  do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1067
                           0, 0, subword_label, -1);
1068
  OK_DEFER_POP;
1069
 
1070
  if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1071
                               outof_target, into_target,
1072
                               unsignedp, methods))
1073
    return false;
1074
 
1075
  emit_jump_insn (gen_jump (done_label));
1076
  emit_barrier ();
1077
  emit_label (subword_label);
1078
 
1079
  if (!expand_subword_shift (op1_mode, binoptab,
1080
                             outof_input, into_input, op1,
1081
                             outof_target, into_target,
1082
                             unsignedp, methods, shift_mask))
1083
    return false;
1084
 
1085
  emit_label (done_label);
1086
  return true;
1087
}
1088
 
1089
/* Subroutine of expand_binop.  Perform a double word multiplication of
1090
   operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1091
   as the target's word_mode.  This function return NULL_RTX if anything
1092
   goes wrong, in which case it may have already emitted instructions
1093
   which need to be deleted.
1094
 
1095
   If we want to multiply two two-word values and have normal and widening
1096
   multiplies of single-word values, we can do this with three smaller
1097
   multiplications.
1098
 
1099
   The multiplication proceeds as follows:
1100
                                 _______________________
1101
                                [__op0_high_|__op0_low__]
1102
                                 _______________________
1103
        *                       [__op1_high_|__op1_low__]
1104
        _______________________________________________
1105
                                 _______________________
1106
    (1)                         [__op0_low__*__op1_low__]
1107
                     _______________________
1108
    (2a)            [__op0_low__*__op1_high_]
1109
                     _______________________
1110
    (2b)            [__op0_high_*__op1_low__]
1111
         _______________________
1112
    (3) [__op0_high_*__op1_high_]
1113
 
1114
 
1115
  This gives a 4-word result.  Since we are only interested in the
1116
  lower 2 words, partial result (3) and the upper words of (2a) and
1117
  (2b) don't need to be calculated.  Hence (2a) and (2b) can be
1118
  calculated using non-widening multiplication.
1119
 
1120
  (1), however, needs to be calculated with an unsigned widening
1121
  multiplication.  If this operation is not directly supported we
1122
  try using a signed widening multiplication and adjust the result.
1123
  This adjustment works as follows:
1124
 
1125
      If both operands are positive then no adjustment is needed.
1126
 
1127
      If the operands have different signs, for example op0_low < 0 and
1128
      op1_low >= 0, the instruction treats the most significant bit of
1129
      op0_low as a sign bit instead of a bit with significance
1130
      2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1131
      with 2**BITS_PER_WORD - op0_low, and two's complements the
1132
      result.  Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1133
      the result.
1134
 
1135
      Similarly, if both operands are negative, we need to add
1136
      (op0_low + op1_low) * 2**BITS_PER_WORD.
1137
 
1138
      We use a trick to adjust quickly.  We logically shift op0_low right
1139
      (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1140
      op0_high (op1_high) before it is used to calculate 2b (2a).  If no
1141
      logical shift exists, we do an arithmetic right shift and subtract
1142
      the 0 or -1.  */
1143
 
1144
static rtx
1145
expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1146
                       bool umulp, enum optab_methods methods)
1147
{
1148
  int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1149
  int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1150
  rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1151
  rtx product, adjust, product_high, temp;
1152
 
1153
  rtx op0_high = operand_subword_force (op0, high, mode);
1154
  rtx op0_low = operand_subword_force (op0, low, mode);
1155
  rtx op1_high = operand_subword_force (op1, high, mode);
1156
  rtx op1_low = operand_subword_force (op1, low, mode);
1157
 
1158
  /* If we're using an unsigned multiply to directly compute the product
1159
     of the low-order words of the operands and perform any required
1160
     adjustments of the operands, we begin by trying two more multiplications
1161
     and then computing the appropriate sum.
1162
 
1163
     We have checked above that the required addition is provided.
1164
     Full-word addition will normally always succeed, especially if
1165
     it is provided at all, so we don't worry about its failure.  The
1166
     multiplication may well fail, however, so we do handle that.  */
1167
 
1168
  if (!umulp)
1169
    {
1170
      /* ??? This could be done with emit_store_flag where available.  */
1171
      temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1172
                           NULL_RTX, 1, methods);
1173
      if (temp)
1174
        op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1175
                                 NULL_RTX, 0, OPTAB_DIRECT);
1176
      else
1177
        {
1178
          temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1179
                               NULL_RTX, 0, methods);
1180
          if (!temp)
1181
            return NULL_RTX;
1182
          op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1183
                                   NULL_RTX, 0, OPTAB_DIRECT);
1184
        }
1185
 
1186
      if (!op0_high)
1187
        return NULL_RTX;
1188
    }
1189
 
1190
  adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1191
                         NULL_RTX, 0, OPTAB_DIRECT);
1192
  if (!adjust)
1193
    return NULL_RTX;
1194
 
1195
  /* OP0_HIGH should now be dead.  */
1196
 
1197
  if (!umulp)
1198
    {
1199
      /* ??? This could be done with emit_store_flag where available.  */
1200
      temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1201
                           NULL_RTX, 1, methods);
1202
      if (temp)
1203
        op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1204
                                 NULL_RTX, 0, OPTAB_DIRECT);
1205
      else
1206
        {
1207
          temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1208
                               NULL_RTX, 0, methods);
1209
          if (!temp)
1210
            return NULL_RTX;
1211
          op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1212
                                   NULL_RTX, 0, OPTAB_DIRECT);
1213
        }
1214
 
1215
      if (!op1_high)
1216
        return NULL_RTX;
1217
    }
1218
 
1219
  temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1220
                       NULL_RTX, 0, OPTAB_DIRECT);
1221
  if (!temp)
1222
    return NULL_RTX;
1223
 
1224
  /* OP1_HIGH should now be dead.  */
1225
 
1226
  adjust = expand_binop (word_mode, add_optab, adjust, temp,
1227
                         NULL_RTX, 0, OPTAB_DIRECT);
1228
 
1229
  if (target && !REG_P (target))
1230
    target = NULL_RTX;
1231
 
1232
  if (umulp)
1233
    product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1234
                            target, 1, OPTAB_DIRECT);
1235
  else
1236
    product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1237
                            target, 1, OPTAB_DIRECT);
1238
 
1239
  if (!product)
1240
    return NULL_RTX;
1241
 
1242
  product_high = operand_subword (product, high, 1, mode);
1243
  adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1244
                         NULL_RTX, 0, OPTAB_DIRECT);
1245
  emit_move_insn (product_high, adjust);
1246
  return product;
1247
}
1248
 
1249
/* Wrapper around expand_binop which takes an rtx code to specify
1250
   the operation to perform, not an optab pointer.  All other
1251
   arguments are the same.  */
1252
rtx
1253
expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1254
                     rtx op1, rtx target, int unsignedp,
1255
                     enum optab_methods methods)
1256
{
1257
  optab binop = code_to_optab[(int) code];
1258
  gcc_assert (binop);
1259
 
1260
  return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1261
}
1262
 
1263
/* Return whether OP0 and OP1 should be swapped when expanding a commutative
1264
   binop.  Order them according to commutative_operand_precedence and, if
1265
   possible, try to put TARGET or a pseudo first.  */
1266
static bool
1267
swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1268
{
1269
  int op0_prec = commutative_operand_precedence (op0);
1270
  int op1_prec = commutative_operand_precedence (op1);
1271
 
1272
  if (op0_prec < op1_prec)
1273
    return true;
1274
 
1275
  if (op0_prec > op1_prec)
1276
    return false;
1277
 
1278
  /* With equal precedence, both orders are ok, but it is better if the
1279
     first operand is TARGET, or if both TARGET and OP0 are pseudos.  */
1280
  if (target == 0 || REG_P (target))
1281
    return (REG_P (op1) && !REG_P (op0)) || target == op1;
1282
  else
1283
    return rtx_equal_p (op1, target);
1284
}
1285
 
1286
/* Return true if BINOPTAB implements a shift operation.  */
1287
 
1288
static bool
1289
shift_optab_p (optab binoptab)
1290
{
1291
  switch (binoptab->code)
1292
    {
1293
    case ASHIFT:
1294
    case SS_ASHIFT:
1295
    case US_ASHIFT:
1296
    case ASHIFTRT:
1297
    case LSHIFTRT:
1298
    case ROTATE:
1299
    case ROTATERT:
1300
      return true;
1301
 
1302
    default:
1303
      return false;
1304
    }
1305
}
1306
 
1307
/* Return true if BINOPTAB implements a commutative binary operation.  */
1308
 
1309
static bool
1310
commutative_optab_p (optab binoptab)
1311
{
1312
  return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1313
          || binoptab == smul_widen_optab
1314
          || binoptab == umul_widen_optab
1315
          || binoptab == smul_highpart_optab
1316
          || binoptab == umul_highpart_optab);
1317
}
1318
 
1319
/* X is to be used in mode MODE as operand OPN to BINOPTAB.  If we're
1320
   optimizing, and if the operand is a constant that costs more than
1321
   1 instruction, force the constant into a register and return that
1322
   register.  Return X otherwise.  UNSIGNEDP says whether X is unsigned.  */
1323
 
1324
static rtx
1325
avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1326
                          int opn, rtx x, bool unsignedp)
1327
{
1328
  bool speed = optimize_insn_for_speed_p ();
1329
 
1330
  if (mode != VOIDmode
1331
      && optimize
1332
      && CONSTANT_P (x)
1333
      && rtx_cost (x, binoptab->code, opn, speed) > set_src_cost (x, speed))
1334
    {
1335
      if (CONST_INT_P (x))
1336
        {
1337
          HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1338
          if (intval != INTVAL (x))
1339
            x = GEN_INT (intval);
1340
        }
1341
      else
1342
        x = convert_modes (mode, VOIDmode, x, unsignedp);
1343
      x = force_reg (mode, x);
1344
    }
1345
  return x;
1346
}
1347
 
1348
/* Helper function for expand_binop: handle the case where there
1349
   is an insn that directly implements the indicated operation.
1350
   Returns null if this is not possible.  */
1351
static rtx
1352
expand_binop_directly (enum machine_mode mode, optab binoptab,
1353
                       rtx op0, rtx op1,
1354
                       rtx target, int unsignedp, enum optab_methods methods,
1355
                       rtx last)
1356
{
1357
  enum machine_mode from_mode = widened_mode (mode, op0, op1);
1358
  enum insn_code icode = find_widening_optab_handler (binoptab, mode,
1359
                                                      from_mode, 1);
1360
  enum machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
1361
  enum machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
1362
  enum machine_mode mode0, mode1, tmp_mode;
1363
  struct expand_operand ops[3];
1364
  bool commutative_p;
1365
  rtx pat;
1366
  rtx xop0 = op0, xop1 = op1;
1367
  rtx swap;
1368
 
1369
  /* If it is a commutative operator and the modes would match
1370
     if we would swap the operands, we can save the conversions.  */
1371
  commutative_p = commutative_optab_p (binoptab);
1372
  if (commutative_p
1373
      && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1374
      && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1375
    {
1376
      swap = xop0;
1377
      xop0 = xop1;
1378
      xop1 = swap;
1379
    }
1380
 
1381
  /* If we are optimizing, force expensive constants into a register.  */
1382
  xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1383
  if (!shift_optab_p (binoptab))
1384
    xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1385
 
1386
  /* In case the insn wants input operands in modes different from
1387
     those of the actual operands, convert the operands.  It would
1388
     seem that we don't need to convert CONST_INTs, but we do, so
1389
     that they're properly zero-extended, sign-extended or truncated
1390
     for their mode.  */
1391
 
1392
  mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1393
  if (xmode0 != VOIDmode && xmode0 != mode0)
1394
    {
1395
      xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1396
      mode0 = xmode0;
1397
    }
1398
 
1399
  mode1 = GET_MODE (xop1) != VOIDmode ? GET_MODE (xop1) : mode;
1400
  if (xmode1 != VOIDmode && xmode1 != mode1)
1401
    {
1402
      xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1403
      mode1 = xmode1;
1404
    }
1405
 
1406
  /* If operation is commutative,
1407
     try to make the first operand a register.
1408
     Even better, try to make it the same as the target.
1409
     Also try to make the last operand a constant.  */
1410
  if (commutative_p
1411
      && swap_commutative_operands_with_target (target, xop0, xop1))
1412
    {
1413
      swap = xop1;
1414
      xop1 = xop0;
1415
      xop0 = swap;
1416
    }
1417
 
1418
  /* Now, if insn's predicates don't allow our operands, put them into
1419
     pseudo regs.  */
1420
 
1421
  if (binoptab == vec_pack_trunc_optab
1422
      || binoptab == vec_pack_usat_optab
1423
      || binoptab == vec_pack_ssat_optab
1424
      || binoptab == vec_pack_ufix_trunc_optab
1425
      || binoptab == vec_pack_sfix_trunc_optab)
1426
    {
1427
      /* The mode of the result is different then the mode of the
1428
         arguments.  */
1429
      tmp_mode = insn_data[(int) icode].operand[0].mode;
1430
      if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1431
        {
1432
          delete_insns_since (last);
1433
          return NULL_RTX;
1434
        }
1435
    }
1436
  else
1437
    tmp_mode = mode;
1438
 
1439
  create_output_operand (&ops[0], target, tmp_mode);
1440
  create_input_operand (&ops[1], xop0, mode0);
1441
  create_input_operand (&ops[2], xop1, mode1);
1442
  pat = maybe_gen_insn (icode, 3, ops);
1443
  if (pat)
1444
    {
1445
      /* If PAT is composed of more than one insn, try to add an appropriate
1446
         REG_EQUAL note to it.  If we can't because TEMP conflicts with an
1447
         operand, call expand_binop again, this time without a target.  */
1448
      if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1449
          && ! add_equal_note (pat, ops[0].value, binoptab->code,
1450
                               ops[1].value, ops[2].value))
1451
        {
1452
          delete_insns_since (last);
1453
          return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1454
                               unsignedp, methods);
1455
        }
1456
 
1457
      emit_insn (pat);
1458
      return ops[0].value;
1459
    }
1460
  delete_insns_since (last);
1461
  return NULL_RTX;
1462
}
1463
 
1464
/* Generate code to perform an operation specified by BINOPTAB
1465
   on operands OP0 and OP1, with result having machine-mode MODE.
1466
 
1467
   UNSIGNEDP is for the case where we have to widen the operands
1468
   to perform the operation.  It says to use zero-extension.
1469
 
1470
   If TARGET is nonzero, the value
1471
   is generated there, if it is convenient to do so.
1472
   In all cases an rtx is returned for the locus of the value;
1473
   this may or may not be TARGET.  */
1474
 
1475
rtx
1476
expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1477
              rtx target, int unsignedp, enum optab_methods methods)
1478
{
1479
  enum optab_methods next_methods
1480
    = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1481
       ? OPTAB_WIDEN : methods);
1482
  enum mode_class mclass;
1483
  enum machine_mode wider_mode;
1484
  rtx libfunc;
1485
  rtx temp;
1486
  rtx entry_last = get_last_insn ();
1487
  rtx last;
1488
 
1489
  mclass = GET_MODE_CLASS (mode);
1490
 
1491
  /* If subtracting an integer constant, convert this into an addition of
1492
     the negated constant.  */
1493
 
1494
  if (binoptab == sub_optab && CONST_INT_P (op1))
1495
    {
1496
      op1 = negate_rtx (mode, op1);
1497
      binoptab = add_optab;
1498
    }
1499
 
1500
  /* Record where to delete back to if we backtrack.  */
1501
  last = get_last_insn ();
1502
 
1503
  /* If we can do it with a three-operand insn, do so.  */
1504
 
1505
  if (methods != OPTAB_MUST_WIDEN
1506
      && find_widening_optab_handler (binoptab, mode,
1507
                                      widened_mode (mode, op0, op1), 1)
1508
            != CODE_FOR_nothing)
1509
    {
1510
      temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1511
                                    unsignedp, methods, last);
1512
      if (temp)
1513
        return temp;
1514
    }
1515
 
1516
  /* If we were trying to rotate, and that didn't work, try rotating
1517
     the other direction before falling back to shifts and bitwise-or.  */
1518
  if (((binoptab == rotl_optab
1519
        && optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1520
       || (binoptab == rotr_optab
1521
           && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1522
      && mclass == MODE_INT)
1523
    {
1524
      optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1525
      rtx newop1;
1526
      unsigned int bits = GET_MODE_PRECISION (mode);
1527
 
1528
      if (CONST_INT_P (op1))
1529
        newop1 = GEN_INT (bits - INTVAL (op1));
1530
      else if (targetm.shift_truncation_mask (mode) == bits - 1)
1531
        newop1 = negate_rtx (GET_MODE (op1), op1);
1532
      else
1533
        newop1 = expand_binop (GET_MODE (op1), sub_optab,
1534
                               GEN_INT (bits), op1,
1535
                               NULL_RTX, unsignedp, OPTAB_DIRECT);
1536
 
1537
      temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1538
                                    target, unsignedp, methods, last);
1539
      if (temp)
1540
        return temp;
1541
    }
1542
 
1543
  /* If this is a multiply, see if we can do a widening operation that
1544
     takes operands of this mode and makes a wider mode.  */
1545
 
1546
  if (binoptab == smul_optab
1547
      && GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1548
      && (widening_optab_handler ((unsignedp ? umul_widen_optab
1549
                                             : smul_widen_optab),
1550
                                  GET_MODE_2XWIDER_MODE (mode), mode)
1551
          != CODE_FOR_nothing))
1552
    {
1553
      temp = expand_binop (GET_MODE_2XWIDER_MODE (mode),
1554
                           unsignedp ? umul_widen_optab : smul_widen_optab,
1555
                           op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1556
 
1557
      if (temp != 0)
1558
        {
1559
          if (GET_MODE_CLASS (mode) == MODE_INT
1560
              && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1561
            return gen_lowpart (mode, temp);
1562
          else
1563
            return convert_to_mode (mode, temp, unsignedp);
1564
        }
1565
    }
1566
 
1567
  /* If this is a vector shift by a scalar, see if we can do a vector
1568
     shift by a vector.  If so, broadcast the scalar into a vector.  */
1569
  if (mclass == MODE_VECTOR_INT)
1570
    {
1571
      optab otheroptab = NULL;
1572
 
1573
      if (binoptab == ashl_optab)
1574
        otheroptab = vashl_optab;
1575
      else if (binoptab == ashr_optab)
1576
        otheroptab = vashr_optab;
1577
      else if (binoptab == lshr_optab)
1578
        otheroptab = vlshr_optab;
1579
      else if (binoptab == rotl_optab)
1580
        otheroptab = vrotl_optab;
1581
      else if (binoptab == rotr_optab)
1582
        otheroptab = vrotr_optab;
1583
 
1584
      if (otheroptab && optab_handler (otheroptab, mode) != CODE_FOR_nothing)
1585
        {
1586
          rtx vop1 = expand_vector_broadcast (mode, op1);
1587
          if (vop1)
1588
            {
1589
              temp = expand_binop_directly (mode, otheroptab, op0, vop1,
1590
                                            target, unsignedp, methods, last);
1591
              if (temp)
1592
                return temp;
1593
            }
1594
        }
1595
    }
1596
 
1597
  /* Look for a wider mode of the same class for which we think we
1598
     can open-code the operation.  Check for a widening multiply at the
1599
     wider mode as well.  */
1600
 
1601
  if (CLASS_HAS_WIDER_MODES_P (mclass)
1602
      && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1603
    for (wider_mode = GET_MODE_WIDER_MODE (mode);
1604
         wider_mode != VOIDmode;
1605
         wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1606
      {
1607
        if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1608
            || (binoptab == smul_optab
1609
                && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1610
                && (find_widening_optab_handler ((unsignedp
1611
                                                  ? umul_widen_optab
1612
                                                  : smul_widen_optab),
1613
                                                 GET_MODE_WIDER_MODE (wider_mode),
1614
                                                 mode, 0)
1615
                    != CODE_FOR_nothing)))
1616
          {
1617
            rtx xop0 = op0, xop1 = op1;
1618
            int no_extend = 0;
1619
 
1620
            /* For certain integer operations, we need not actually extend
1621
               the narrow operands, as long as we will truncate
1622
               the results to the same narrowness.  */
1623
 
1624
            if ((binoptab == ior_optab || binoptab == and_optab
1625
                 || binoptab == xor_optab
1626
                 || binoptab == add_optab || binoptab == sub_optab
1627
                 || binoptab == smul_optab || binoptab == ashl_optab)
1628
                && mclass == MODE_INT)
1629
              {
1630
                no_extend = 1;
1631
                xop0 = avoid_expensive_constant (mode, binoptab, 0,
1632
                                                 xop0, unsignedp);
1633
                if (binoptab != ashl_optab)
1634
                  xop1 = avoid_expensive_constant (mode, binoptab, 1,
1635
                                                   xop1, unsignedp);
1636
              }
1637
 
1638
            xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1639
 
1640
            /* The second operand of a shift must always be extended.  */
1641
            xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1642
                                  no_extend && binoptab != ashl_optab);
1643
 
1644
            temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1645
                                 unsignedp, OPTAB_DIRECT);
1646
            if (temp)
1647
              {
1648
                if (mclass != MODE_INT
1649
                    || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1650
                  {
1651
                    if (target == 0)
1652
                      target = gen_reg_rtx (mode);
1653
                    convert_move (target, temp, 0);
1654
                    return target;
1655
                  }
1656
                else
1657
                  return gen_lowpart (mode, temp);
1658
              }
1659
            else
1660
              delete_insns_since (last);
1661
          }
1662
      }
1663
 
1664
  /* If operation is commutative,
1665
     try to make the first operand a register.
1666
     Even better, try to make it the same as the target.
1667
     Also try to make the last operand a constant.  */
1668
  if (commutative_optab_p (binoptab)
1669
      && swap_commutative_operands_with_target (target, op0, op1))
1670
    {
1671
      temp = op1;
1672
      op1 = op0;
1673
      op0 = temp;
1674
    }
1675
 
1676
  /* These can be done a word at a time.  */
1677
  if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1678
      && mclass == MODE_INT
1679
      && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1680
      && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1681
    {
1682
      int i;
1683
      rtx insns;
1684
 
1685
      /* If TARGET is the same as one of the operands, the REG_EQUAL note
1686
         won't be accurate, so use a new target.  */
1687
      if (target == 0
1688
          || target == op0
1689
          || target == op1
1690
          || !valid_multiword_target_p (target))
1691
        target = gen_reg_rtx (mode);
1692
 
1693
      start_sequence ();
1694
 
1695
      /* Do the actual arithmetic.  */
1696
      for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1697
        {
1698
          rtx target_piece = operand_subword (target, i, 1, mode);
1699
          rtx x = expand_binop (word_mode, binoptab,
1700
                                operand_subword_force (op0, i, mode),
1701
                                operand_subword_force (op1, i, mode),
1702
                                target_piece, unsignedp, next_methods);
1703
 
1704
          if (x == 0)
1705
            break;
1706
 
1707
          if (target_piece != x)
1708
            emit_move_insn (target_piece, x);
1709
        }
1710
 
1711
      insns = get_insns ();
1712
      end_sequence ();
1713
 
1714
      if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1715
        {
1716
          emit_insn (insns);
1717
          return target;
1718
        }
1719
    }
1720
 
1721
  /* Synthesize double word shifts from single word shifts.  */
1722
  if ((binoptab == lshr_optab || binoptab == ashl_optab
1723
       || binoptab == ashr_optab)
1724
      && mclass == MODE_INT
1725
      && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1726
      && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1727
      && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode)
1728
      && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1729
      && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1730
      && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1731
    {
1732
      unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1733
      enum machine_mode op1_mode;
1734
 
1735
      double_shift_mask = targetm.shift_truncation_mask (mode);
1736
      shift_mask = targetm.shift_truncation_mask (word_mode);
1737
      op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1738
 
1739
      /* Apply the truncation to constant shifts.  */
1740
      if (double_shift_mask > 0 && CONST_INT_P (op1))
1741
        op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1742
 
1743
      if (op1 == CONST0_RTX (op1_mode))
1744
        return op0;
1745
 
1746
      /* Make sure that this is a combination that expand_doubleword_shift
1747
         can handle.  See the comments there for details.  */
1748
      if (double_shift_mask == 0
1749
          || (shift_mask == BITS_PER_WORD - 1
1750
              && double_shift_mask == BITS_PER_WORD * 2 - 1))
1751
        {
1752
          rtx insns;
1753
          rtx into_target, outof_target;
1754
          rtx into_input, outof_input;
1755
          int left_shift, outof_word;
1756
 
1757
          /* If TARGET is the same as one of the operands, the REG_EQUAL note
1758
             won't be accurate, so use a new target.  */
1759
          if (target == 0
1760
              || target == op0
1761
              || target == op1
1762
              || !valid_multiword_target_p (target))
1763
            target = gen_reg_rtx (mode);
1764
 
1765
          start_sequence ();
1766
 
1767
          /* OUTOF_* is the word we are shifting bits away from, and
1768
             INTO_* is the word that we are shifting bits towards, thus
1769
             they differ depending on the direction of the shift and
1770
             WORDS_BIG_ENDIAN.  */
1771
 
1772
          left_shift = binoptab == ashl_optab;
1773
          outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1774
 
1775
          outof_target = operand_subword (target, outof_word, 1, mode);
1776
          into_target = operand_subword (target, 1 - outof_word, 1, mode);
1777
 
1778
          outof_input = operand_subword_force (op0, outof_word, mode);
1779
          into_input = operand_subword_force (op0, 1 - outof_word, mode);
1780
 
1781
          if (expand_doubleword_shift (op1_mode, binoptab,
1782
                                       outof_input, into_input, op1,
1783
                                       outof_target, into_target,
1784
                                       unsignedp, next_methods, shift_mask))
1785
            {
1786
              insns = get_insns ();
1787
              end_sequence ();
1788
 
1789
              emit_insn (insns);
1790
              return target;
1791
            }
1792
          end_sequence ();
1793
        }
1794
    }
1795
 
1796
  /* Synthesize double word rotates from single word shifts.  */
1797
  if ((binoptab == rotl_optab || binoptab == rotr_optab)
1798
      && mclass == MODE_INT
1799
      && CONST_INT_P (op1)
1800
      && GET_MODE_PRECISION (mode) == 2 * BITS_PER_WORD
1801
      && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1802
      && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1803
    {
1804
      rtx insns;
1805
      rtx into_target, outof_target;
1806
      rtx into_input, outof_input;
1807
      rtx inter;
1808
      int shift_count, left_shift, outof_word;
1809
 
1810
      /* If TARGET is the same as one of the operands, the REG_EQUAL note
1811
         won't be accurate, so use a new target. Do this also if target is not
1812
         a REG, first because having a register instead may open optimization
1813
         opportunities, and second because if target and op0 happen to be MEMs
1814
         designating the same location, we would risk clobbering it too early
1815
         in the code sequence we generate below.  */
1816
      if (target == 0
1817
          || target == op0
1818
          || target == op1
1819
          || !REG_P (target)
1820
          || !valid_multiword_target_p (target))
1821
        target = gen_reg_rtx (mode);
1822
 
1823
      start_sequence ();
1824
 
1825
      shift_count = INTVAL (op1);
1826
 
1827
      /* OUTOF_* is the word we are shifting bits away from, and
1828
         INTO_* is the word that we are shifting bits towards, thus
1829
         they differ depending on the direction of the shift and
1830
         WORDS_BIG_ENDIAN.  */
1831
 
1832
      left_shift = (binoptab == rotl_optab);
1833
      outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1834
 
1835
      outof_target = operand_subword (target, outof_word, 1, mode);
1836
      into_target = operand_subword (target, 1 - outof_word, 1, mode);
1837
 
1838
      outof_input = operand_subword_force (op0, outof_word, mode);
1839
      into_input = operand_subword_force (op0, 1 - outof_word, mode);
1840
 
1841
      if (shift_count == BITS_PER_WORD)
1842
        {
1843
          /* This is just a word swap.  */
1844
          emit_move_insn (outof_target, into_input);
1845
          emit_move_insn (into_target, outof_input);
1846
          inter = const0_rtx;
1847
        }
1848
      else
1849
        {
1850
          rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1851
          rtx first_shift_count, second_shift_count;
1852
          optab reverse_unsigned_shift, unsigned_shift;
1853
 
1854
          reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1855
                                    ? lshr_optab : ashl_optab);
1856
 
1857
          unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1858
                            ? ashl_optab : lshr_optab);
1859
 
1860
          if (shift_count > BITS_PER_WORD)
1861
            {
1862
              first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1863
              second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1864
            }
1865
          else
1866
            {
1867
              first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1868
              second_shift_count = GEN_INT (shift_count);
1869
            }
1870
 
1871
          into_temp1 = expand_binop (word_mode, unsigned_shift,
1872
                                     outof_input, first_shift_count,
1873
                                     NULL_RTX, unsignedp, next_methods);
1874
          into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1875
                                     into_input, second_shift_count,
1876
                                     NULL_RTX, unsignedp, next_methods);
1877
 
1878
          if (into_temp1 != 0 && into_temp2 != 0)
1879
            inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1880
                                  into_target, unsignedp, next_methods);
1881
          else
1882
            inter = 0;
1883
 
1884
          if (inter != 0 && inter != into_target)
1885
            emit_move_insn (into_target, inter);
1886
 
1887
          outof_temp1 = expand_binop (word_mode, unsigned_shift,
1888
                                      into_input, first_shift_count,
1889
                                      NULL_RTX, unsignedp, next_methods);
1890
          outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1891
                                      outof_input, second_shift_count,
1892
                                      NULL_RTX, unsignedp, next_methods);
1893
 
1894
          if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1895
            inter = expand_binop (word_mode, ior_optab,
1896
                                  outof_temp1, outof_temp2,
1897
                                  outof_target, unsignedp, next_methods);
1898
 
1899
          if (inter != 0 && inter != outof_target)
1900
            emit_move_insn (outof_target, inter);
1901
        }
1902
 
1903
      insns = get_insns ();
1904
      end_sequence ();
1905
 
1906
      if (inter != 0)
1907
        {
1908
          emit_insn (insns);
1909
          return target;
1910
        }
1911
    }
1912
 
1913
  /* These can be done a word at a time by propagating carries.  */
1914
  if ((binoptab == add_optab || binoptab == sub_optab)
1915
      && mclass == MODE_INT
1916
      && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1917
      && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1918
    {
1919
      unsigned int i;
1920
      optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1921
      const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1922
      rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1923
      rtx xop0, xop1, xtarget;
1924
 
1925
      /* We can handle either a 1 or -1 value for the carry.  If STORE_FLAG
1926
         value is one of those, use it.  Otherwise, use 1 since it is the
1927
         one easiest to get.  */
1928
#if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1929
      int normalizep = STORE_FLAG_VALUE;
1930
#else
1931
      int normalizep = 1;
1932
#endif
1933
 
1934
      /* Prepare the operands.  */
1935
      xop0 = force_reg (mode, op0);
1936
      xop1 = force_reg (mode, op1);
1937
 
1938
      xtarget = gen_reg_rtx (mode);
1939
 
1940
      if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1941
        target = xtarget;
1942
 
1943
      /* Indicate for flow that the entire target reg is being set.  */
1944
      if (REG_P (target))
1945
        emit_clobber (xtarget);
1946
 
1947
      /* Do the actual arithmetic.  */
1948
      for (i = 0; i < nwords; i++)
1949
        {
1950
          int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1951
          rtx target_piece = operand_subword (xtarget, index, 1, mode);
1952
          rtx op0_piece = operand_subword_force (xop0, index, mode);
1953
          rtx op1_piece = operand_subword_force (xop1, index, mode);
1954
          rtx x;
1955
 
1956
          /* Main add/subtract of the input operands.  */
1957
          x = expand_binop (word_mode, binoptab,
1958
                            op0_piece, op1_piece,
1959
                            target_piece, unsignedp, next_methods);
1960
          if (x == 0)
1961
            break;
1962
 
1963
          if (i + 1 < nwords)
1964
            {
1965
              /* Store carry from main add/subtract.  */
1966
              carry_out = gen_reg_rtx (word_mode);
1967
              carry_out = emit_store_flag_force (carry_out,
1968
                                                 (binoptab == add_optab
1969
                                                  ? LT : GT),
1970
                                                 x, op0_piece,
1971
                                                 word_mode, 1, normalizep);
1972
            }
1973
 
1974
          if (i > 0)
1975
            {
1976
              rtx newx;
1977
 
1978
              /* Add/subtract previous carry to main result.  */
1979
              newx = expand_binop (word_mode,
1980
                                   normalizep == 1 ? binoptab : otheroptab,
1981
                                   x, carry_in,
1982
                                   NULL_RTX, 1, next_methods);
1983
 
1984
              if (i + 1 < nwords)
1985
                {
1986
                  /* Get out carry from adding/subtracting carry in.  */
1987
                  rtx carry_tmp = gen_reg_rtx (word_mode);
1988
                  carry_tmp = emit_store_flag_force (carry_tmp,
1989
                                                     (binoptab == add_optab
1990
                                                      ? LT : GT),
1991
                                                     newx, x,
1992
                                                     word_mode, 1, normalizep);
1993
 
1994
                  /* Logical-ior the two poss. carry together.  */
1995
                  carry_out = expand_binop (word_mode, ior_optab,
1996
                                            carry_out, carry_tmp,
1997
                                            carry_out, 0, next_methods);
1998
                  if (carry_out == 0)
1999
                    break;
2000
                }
2001
              emit_move_insn (target_piece, newx);
2002
            }
2003
          else
2004
            {
2005
              if (x != target_piece)
2006
                emit_move_insn (target_piece, x);
2007
            }
2008
 
2009
          carry_in = carry_out;
2010
        }
2011
 
2012
      if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2013
        {
2014
          if (optab_handler (mov_optab, mode) != CODE_FOR_nothing
2015
              || ! rtx_equal_p (target, xtarget))
2016
            {
2017
              rtx temp = emit_move_insn (target, xtarget);
2018
 
2019
              set_dst_reg_note (temp, REG_EQUAL,
2020
                                gen_rtx_fmt_ee (binoptab->code, mode,
2021
                                                copy_rtx (xop0),
2022
                                                copy_rtx (xop1)),
2023
                                target);
2024
            }
2025
          else
2026
            target = xtarget;
2027
 
2028
          return target;
2029
        }
2030
 
2031
      else
2032
        delete_insns_since (last);
2033
    }
2034
 
2035
  /* Attempt to synthesize double word multiplies using a sequence of word
2036
     mode multiplications.  We first attempt to generate a sequence using a
2037
     more efficient unsigned widening multiply, and if that fails we then
2038
     try using a signed widening multiply.  */
2039
 
2040
  if (binoptab == smul_optab
2041
      && mclass == MODE_INT
2042
      && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2043
      && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
2044
      && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
2045
    {
2046
      rtx product = NULL_RTX;
2047
      if (widening_optab_handler (umul_widen_optab, mode, word_mode)
2048
            != CODE_FOR_nothing)
2049
        {
2050
          product = expand_doubleword_mult (mode, op0, op1, target,
2051
                                            true, methods);
2052
          if (!product)
2053
            delete_insns_since (last);
2054
        }
2055
 
2056
      if (product == NULL_RTX
2057
          && widening_optab_handler (smul_widen_optab, mode, word_mode)
2058
                != CODE_FOR_nothing)
2059
        {
2060
          product = expand_doubleword_mult (mode, op0, op1, target,
2061
                                            false, methods);
2062
          if (!product)
2063
            delete_insns_since (last);
2064
        }
2065
 
2066
      if (product != NULL_RTX)
2067
        {
2068
          if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
2069
            {
2070
              temp = emit_move_insn (target ? target : product, product);
2071
              set_dst_reg_note (temp,
2072
                                REG_EQUAL,
2073
                                gen_rtx_fmt_ee (MULT, mode,
2074
                                                copy_rtx (op0),
2075
                                                copy_rtx (op1)),
2076
                                target ? target : product);
2077
            }
2078
          return product;
2079
        }
2080
    }
2081
 
2082
  /* It can't be open-coded in this mode.
2083
     Use a library call if one is available and caller says that's ok.  */
2084
 
2085
  libfunc = optab_libfunc (binoptab, mode);
2086
  if (libfunc
2087
      && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2088
    {
2089
      rtx insns;
2090
      rtx op1x = op1;
2091
      enum machine_mode op1_mode = mode;
2092
      rtx value;
2093
 
2094
      start_sequence ();
2095
 
2096
      if (shift_optab_p (binoptab))
2097
        {
2098
          op1_mode = targetm.libgcc_shift_count_mode ();
2099
          /* Specify unsigned here,
2100
             since negative shift counts are meaningless.  */
2101
          op1x = convert_to_mode (op1_mode, op1, 1);
2102
        }
2103
 
2104
      if (GET_MODE (op0) != VOIDmode
2105
          && GET_MODE (op0) != mode)
2106
        op0 = convert_to_mode (mode, op0, unsignedp);
2107
 
2108
      /* Pass 1 for NO_QUEUE so we don't lose any increments
2109
         if the libcall is cse'd or moved.  */
2110
      value = emit_library_call_value (libfunc,
2111
                                       NULL_RTX, LCT_CONST, mode, 2,
2112
                                       op0, mode, op1x, op1_mode);
2113
 
2114
      insns = get_insns ();
2115
      end_sequence ();
2116
 
2117
      target = gen_reg_rtx (mode);
2118
      emit_libcall_block (insns, target, value,
2119
                          gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2120
 
2121
      return target;
2122
    }
2123
 
2124
  delete_insns_since (last);
2125
 
2126
  /* It can't be done in this mode.  Can we do it in a wider mode?  */
2127
 
2128
  if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2129
         || methods == OPTAB_MUST_WIDEN))
2130
    {
2131
      /* Caller says, don't even try.  */
2132
      delete_insns_since (entry_last);
2133
      return 0;
2134
    }
2135
 
2136
  /* Compute the value of METHODS to pass to recursive calls.
2137
     Don't allow widening to be tried recursively.  */
2138
 
2139
  methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2140
 
2141
  /* Look for a wider mode of the same class for which it appears we can do
2142
     the operation.  */
2143
 
2144
  if (CLASS_HAS_WIDER_MODES_P (mclass))
2145
    {
2146
      for (wider_mode = GET_MODE_WIDER_MODE (mode);
2147
           wider_mode != VOIDmode;
2148
           wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2149
        {
2150
          if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
2151
                  != CODE_FOR_nothing
2152
              || (methods == OPTAB_LIB
2153
                  && optab_libfunc (binoptab, wider_mode)))
2154
            {
2155
              rtx xop0 = op0, xop1 = op1;
2156
              int no_extend = 0;
2157
 
2158
              /* For certain integer operations, we need not actually extend
2159
                 the narrow operands, as long as we will truncate
2160
                 the results to the same narrowness.  */
2161
 
2162
              if ((binoptab == ior_optab || binoptab == and_optab
2163
                   || binoptab == xor_optab
2164
                   || binoptab == add_optab || binoptab == sub_optab
2165
                   || binoptab == smul_optab || binoptab == ashl_optab)
2166
                  && mclass == MODE_INT)
2167
                no_extend = 1;
2168
 
2169
              xop0 = widen_operand (xop0, wider_mode, mode,
2170
                                    unsignedp, no_extend);
2171
 
2172
              /* The second operand of a shift must always be extended.  */
2173
              xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2174
                                    no_extend && binoptab != ashl_optab);
2175
 
2176
              temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2177
                                   unsignedp, methods);
2178
              if (temp)
2179
                {
2180
                  if (mclass != MODE_INT
2181
                      || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2182
                    {
2183
                      if (target == 0)
2184
                        target = gen_reg_rtx (mode);
2185
                      convert_move (target, temp, 0);
2186
                      return target;
2187
                    }
2188
                  else
2189
                    return gen_lowpart (mode, temp);
2190
                }
2191
              else
2192
                delete_insns_since (last);
2193
            }
2194
        }
2195
    }
2196
 
2197
  delete_insns_since (entry_last);
2198
  return 0;
2199
}
2200
 
2201
/* Expand a binary operator which has both signed and unsigned forms.
2202
   UOPTAB is the optab for unsigned operations, and SOPTAB is for
2203
   signed operations.
2204
 
2205
   If we widen unsigned operands, we may use a signed wider operation instead
2206
   of an unsigned wider operation, since the result would be the same.  */
2207
 
2208
rtx
2209
sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2210
                   rtx op0, rtx op1, rtx target, int unsignedp,
2211
                   enum optab_methods methods)
2212
{
2213
  rtx temp;
2214
  optab direct_optab = unsignedp ? uoptab : soptab;
2215
  struct optab_d wide_soptab;
2216
 
2217
  /* Do it without widening, if possible.  */
2218
  temp = expand_binop (mode, direct_optab, op0, op1, target,
2219
                       unsignedp, OPTAB_DIRECT);
2220
  if (temp || methods == OPTAB_DIRECT)
2221
    return temp;
2222
 
2223
  /* Try widening to a signed int.  Make a fake signed optab that
2224
     hides any signed insn for direct use.  */
2225
  wide_soptab = *soptab;
2226
  set_optab_handler (&wide_soptab, mode, CODE_FOR_nothing);
2227
  /* We don't want to generate new hash table entries from this fake
2228
     optab.  */
2229
  wide_soptab.libcall_gen = NULL;
2230
 
2231
  temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2232
                       unsignedp, OPTAB_WIDEN);
2233
 
2234
  /* For unsigned operands, try widening to an unsigned int.  */
2235
  if (temp == 0 && unsignedp)
2236
    temp = expand_binop (mode, uoptab, op0, op1, target,
2237
                         unsignedp, OPTAB_WIDEN);
2238
  if (temp || methods == OPTAB_WIDEN)
2239
    return temp;
2240
 
2241
  /* Use the right width libcall if that exists.  */
2242
  temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2243
  if (temp || methods == OPTAB_LIB)
2244
    return temp;
2245
 
2246
  /* Must widen and use a libcall, use either signed or unsigned.  */
2247
  temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2248
                       unsignedp, methods);
2249
  if (temp != 0)
2250
    return temp;
2251
  if (unsignedp)
2252
    return expand_binop (mode, uoptab, op0, op1, target,
2253
                         unsignedp, methods);
2254
  return 0;
2255
}
2256
 
2257
/* Generate code to perform an operation specified by UNOPPTAB
2258
   on operand OP0, with two results to TARG0 and TARG1.
2259
   We assume that the order of the operands for the instruction
2260
   is TARG0, TARG1, OP0.
2261
 
2262
   Either TARG0 or TARG1 may be zero, but what that means is that
2263
   the result is not actually wanted.  We will generate it into
2264
   a dummy pseudo-reg and discard it.  They may not both be zero.
2265
 
2266
   Returns 1 if this operation can be performed; 0 if not.  */
2267
 
2268
int
2269
expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2270
                    int unsignedp)
2271
{
2272
  enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2273
  enum mode_class mclass;
2274
  enum machine_mode wider_mode;
2275
  rtx entry_last = get_last_insn ();
2276
  rtx last;
2277
 
2278
  mclass = GET_MODE_CLASS (mode);
2279
 
2280
  if (!targ0)
2281
    targ0 = gen_reg_rtx (mode);
2282
  if (!targ1)
2283
    targ1 = gen_reg_rtx (mode);
2284
 
2285
  /* Record where to go back to if we fail.  */
2286
  last = get_last_insn ();
2287
 
2288
  if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2289
    {
2290
      struct expand_operand ops[3];
2291
      enum insn_code icode = optab_handler (unoptab, mode);
2292
 
2293
      create_fixed_operand (&ops[0], targ0);
2294
      create_fixed_operand (&ops[1], targ1);
2295
      create_convert_operand_from (&ops[2], op0, mode, unsignedp);
2296
      if (maybe_expand_insn (icode, 3, ops))
2297
        return 1;
2298
    }
2299
 
2300
  /* It can't be done in this mode.  Can we do it in a wider mode?  */
2301
 
2302
  if (CLASS_HAS_WIDER_MODES_P (mclass))
2303
    {
2304
      for (wider_mode = GET_MODE_WIDER_MODE (mode);
2305
           wider_mode != VOIDmode;
2306
           wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2307
        {
2308
          if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2309
            {
2310
              rtx t0 = gen_reg_rtx (wider_mode);
2311
              rtx t1 = gen_reg_rtx (wider_mode);
2312
              rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2313
 
2314
              if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2315
                {
2316
                  convert_move (targ0, t0, unsignedp);
2317
                  convert_move (targ1, t1, unsignedp);
2318
                  return 1;
2319
                }
2320
              else
2321
                delete_insns_since (last);
2322
            }
2323
        }
2324
    }
2325
 
2326
  delete_insns_since (entry_last);
2327
  return 0;
2328
}
2329
 
2330
/* Generate code to perform an operation specified by BINOPTAB
2331
   on operands OP0 and OP1, with two results to TARG1 and TARG2.
2332
   We assume that the order of the operands for the instruction
2333
   is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2334
   [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2335
 
2336
   Either TARG0 or TARG1 may be zero, but what that means is that
2337
   the result is not actually wanted.  We will generate it into
2338
   a dummy pseudo-reg and discard it.  They may not both be zero.
2339
 
2340
   Returns 1 if this operation can be performed; 0 if not.  */
2341
 
2342
int
2343
expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2344
                     int unsignedp)
2345
{
2346
  enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2347
  enum mode_class mclass;
2348
  enum machine_mode wider_mode;
2349
  rtx entry_last = get_last_insn ();
2350
  rtx last;
2351
 
2352
  mclass = GET_MODE_CLASS (mode);
2353
 
2354
  if (!targ0)
2355
    targ0 = gen_reg_rtx (mode);
2356
  if (!targ1)
2357
    targ1 = gen_reg_rtx (mode);
2358
 
2359
  /* Record where to go back to if we fail.  */
2360
  last = get_last_insn ();
2361
 
2362
  if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2363
    {
2364
      struct expand_operand ops[4];
2365
      enum insn_code icode = optab_handler (binoptab, mode);
2366
      enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2367
      enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2368
      rtx xop0 = op0, xop1 = op1;
2369
 
2370
      /* If we are optimizing, force expensive constants into a register.  */
2371
      xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2372
      xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2373
 
2374
      create_fixed_operand (&ops[0], targ0);
2375
      create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2376
      create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2377
      create_fixed_operand (&ops[3], targ1);
2378
      if (maybe_expand_insn (icode, 4, ops))
2379
        return 1;
2380
      delete_insns_since (last);
2381
    }
2382
 
2383
  /* It can't be done in this mode.  Can we do it in a wider mode?  */
2384
 
2385
  if (CLASS_HAS_WIDER_MODES_P (mclass))
2386
    {
2387
      for (wider_mode = GET_MODE_WIDER_MODE (mode);
2388
           wider_mode != VOIDmode;
2389
           wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2390
        {
2391
          if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2392
            {
2393
              rtx t0 = gen_reg_rtx (wider_mode);
2394
              rtx t1 = gen_reg_rtx (wider_mode);
2395
              rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2396
              rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2397
 
2398
              if (expand_twoval_binop (binoptab, cop0, cop1,
2399
                                       t0, t1, unsignedp))
2400
                {
2401
                  convert_move (targ0, t0, unsignedp);
2402
                  convert_move (targ1, t1, unsignedp);
2403
                  return 1;
2404
                }
2405
              else
2406
                delete_insns_since (last);
2407
            }
2408
        }
2409
    }
2410
 
2411
  delete_insns_since (entry_last);
2412
  return 0;
2413
}
2414
 
2415
/* Expand the two-valued library call indicated by BINOPTAB, but
2416
   preserve only one of the values.  If TARG0 is non-NULL, the first
2417
   value is placed into TARG0; otherwise the second value is placed
2418
   into TARG1.  Exactly one of TARG0 and TARG1 must be non-NULL.  The
2419
   value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2420
   This routine assumes that the value returned by the library call is
2421
   as if the return value was of an integral mode twice as wide as the
2422
   mode of OP0.  Returns 1 if the call was successful.  */
2423
 
2424
bool
2425
expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2426
                             rtx targ0, rtx targ1, enum rtx_code code)
2427
{
2428
  enum machine_mode mode;
2429
  enum machine_mode libval_mode;
2430
  rtx libval;
2431
  rtx insns;
2432
  rtx libfunc;
2433
 
2434
  /* Exactly one of TARG0 or TARG1 should be non-NULL.  */
2435
  gcc_assert (!targ0 != !targ1);
2436
 
2437
  mode = GET_MODE (op0);
2438
  libfunc = optab_libfunc (binoptab, mode);
2439
  if (!libfunc)
2440
    return false;
2441
 
2442
  /* The value returned by the library function will have twice as
2443
     many bits as the nominal MODE.  */
2444
  libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2445
                                        MODE_INT);
2446
  start_sequence ();
2447
  libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2448
                                    libval_mode, 2,
2449
                                    op0, mode,
2450
                                    op1, mode);
2451
  /* Get the part of VAL containing the value that we want.  */
2452
  libval = simplify_gen_subreg (mode, libval, libval_mode,
2453
                                targ0 ? 0 : GET_MODE_SIZE (mode));
2454
  insns = get_insns ();
2455
  end_sequence ();
2456
  /* Move the into the desired location.  */
2457
  emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2458
                      gen_rtx_fmt_ee (code, mode, op0, op1));
2459
 
2460
  return true;
2461
}
2462
 
2463
 
2464
/* Wrapper around expand_unop which takes an rtx code to specify
2465
   the operation to perform, not an optab pointer.  All other
2466
   arguments are the same.  */
2467
rtx
2468
expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2469
                    rtx target, int unsignedp)
2470
{
2471
  optab unop = code_to_optab[(int) code];
2472
  gcc_assert (unop);
2473
 
2474
  return expand_unop (mode, unop, op0, target, unsignedp);
2475
}
2476
 
2477
/* Try calculating
2478
        (clz:narrow x)
2479
   as
2480
        (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2481
 
2482
   A similar operation can be used for clrsb.  UNOPTAB says which operation
2483
   we are trying to expand.  */
2484
static rtx
2485
widen_leading (enum machine_mode mode, rtx op0, rtx target, optab unoptab)
2486
{
2487
  enum mode_class mclass = GET_MODE_CLASS (mode);
2488
  if (CLASS_HAS_WIDER_MODES_P (mclass))
2489
    {
2490
      enum machine_mode wider_mode;
2491
      for (wider_mode = GET_MODE_WIDER_MODE (mode);
2492
           wider_mode != VOIDmode;
2493
           wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2494
        {
2495
          if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2496
            {
2497
              rtx xop0, temp, last;
2498
 
2499
              last = get_last_insn ();
2500
 
2501
              if (target == 0)
2502
                target = gen_reg_rtx (mode);
2503
              xop0 = widen_operand (op0, wider_mode, mode,
2504
                                    unoptab != clrsb_optab, false);
2505
              temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2506
                                  unoptab != clrsb_optab);
2507
              if (temp != 0)
2508
                temp = expand_binop (wider_mode, sub_optab, temp,
2509
                                     GEN_INT (GET_MODE_PRECISION (wider_mode)
2510
                                              - GET_MODE_PRECISION (mode)),
2511
                                     target, true, OPTAB_DIRECT);
2512
              if (temp == 0)
2513
                delete_insns_since (last);
2514
 
2515
              return temp;
2516
            }
2517
        }
2518
    }
2519
  return 0;
2520
}
2521
 
2522
/* Try calculating clz of a double-word quantity as two clz's of word-sized
2523
   quantities, choosing which based on whether the high word is nonzero.  */
2524
static rtx
2525
expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2526
{
2527
  rtx xop0 = force_reg (mode, op0);
2528
  rtx subhi = gen_highpart (word_mode, xop0);
2529
  rtx sublo = gen_lowpart (word_mode, xop0);
2530
  rtx hi0_label = gen_label_rtx ();
2531
  rtx after_label = gen_label_rtx ();
2532
  rtx seq, temp, result;
2533
 
2534
  /* If we were not given a target, use a word_mode register, not a
2535
     'mode' register.  The result will fit, and nobody is expecting
2536
     anything bigger (the return type of __builtin_clz* is int).  */
2537
  if (!target)
2538
    target = gen_reg_rtx (word_mode);
2539
 
2540
  /* In any case, write to a word_mode scratch in both branches of the
2541
     conditional, so we can ensure there is a single move insn setting
2542
     'target' to tag a REG_EQUAL note on.  */
2543
  result = gen_reg_rtx (word_mode);
2544
 
2545
  start_sequence ();
2546
 
2547
  /* If the high word is not equal to zero,
2548
     then clz of the full value is clz of the high word.  */
2549
  emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2550
                           word_mode, true, hi0_label);
2551
 
2552
  temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2553
  if (!temp)
2554
    goto fail;
2555
 
2556
  if (temp != result)
2557
    convert_move (result, temp, true);
2558
 
2559
  emit_jump_insn (gen_jump (after_label));
2560
  emit_barrier ();
2561
 
2562
  /* Else clz of the full value is clz of the low word plus the number
2563
     of bits in the high word.  */
2564
  emit_label (hi0_label);
2565
 
2566
  temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2567
  if (!temp)
2568
    goto fail;
2569
  temp = expand_binop (word_mode, add_optab, temp,
2570
                       GEN_INT (GET_MODE_BITSIZE (word_mode)),
2571
                       result, true, OPTAB_DIRECT);
2572
  if (!temp)
2573
    goto fail;
2574
  if (temp != result)
2575
    convert_move (result, temp, true);
2576
 
2577
  emit_label (after_label);
2578
  convert_move (target, result, true);
2579
 
2580
  seq = get_insns ();
2581
  end_sequence ();
2582
 
2583
  add_equal_note (seq, target, CLZ, xop0, 0);
2584
  emit_insn (seq);
2585
  return target;
2586
 
2587
 fail:
2588
  end_sequence ();
2589
  return 0;
2590
}
2591
 
2592
/* Try calculating
2593
        (bswap:narrow x)
2594
   as
2595
        (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))).  */
2596
static rtx
2597
widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2598
{
2599
  enum mode_class mclass = GET_MODE_CLASS (mode);
2600
  enum machine_mode wider_mode;
2601
  rtx x, last;
2602
 
2603
  if (!CLASS_HAS_WIDER_MODES_P (mclass))
2604
    return NULL_RTX;
2605
 
2606
  for (wider_mode = GET_MODE_WIDER_MODE (mode);
2607
       wider_mode != VOIDmode;
2608
       wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2609
    if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
2610
      goto found;
2611
  return NULL_RTX;
2612
 
2613
 found:
2614
  last = get_last_insn ();
2615
 
2616
  x = widen_operand (op0, wider_mode, mode, true, true);
2617
  x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2618
 
2619
  gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2620
              && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2621
  if (x != 0)
2622
    x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2623
                      GET_MODE_BITSIZE (wider_mode)
2624
                      - GET_MODE_BITSIZE (mode),
2625
                      NULL_RTX, true);
2626
 
2627
  if (x != 0)
2628
    {
2629
      if (target == 0)
2630
        target = gen_reg_rtx (mode);
2631
      emit_move_insn (target, gen_lowpart (mode, x));
2632
    }
2633
  else
2634
    delete_insns_since (last);
2635
 
2636
  return target;
2637
}
2638
 
2639
/* Try calculating bswap as two bswaps of two word-sized operands.  */
2640
 
2641
static rtx
2642
expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2643
{
2644
  rtx t0, t1;
2645
 
2646
  t1 = expand_unop (word_mode, bswap_optab,
2647
                    operand_subword_force (op, 0, mode), NULL_RTX, true);
2648
  t0 = expand_unop (word_mode, bswap_optab,
2649
                    operand_subword_force (op, 1, mode), NULL_RTX, true);
2650
 
2651
  if (target == 0 || !valid_multiword_target_p (target))
2652
    target = gen_reg_rtx (mode);
2653
  if (REG_P (target))
2654
    emit_clobber (target);
2655
  emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2656
  emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2657
 
2658
  return target;
2659
}
2660
 
2661
/* Try calculating (parity x) as (and (popcount x) 1), where
2662
   popcount can also be done in a wider mode.  */
2663
static rtx
2664
expand_parity (enum machine_mode mode, rtx op0, rtx target)
2665
{
2666
  enum mode_class mclass = GET_MODE_CLASS (mode);
2667
  if (CLASS_HAS_WIDER_MODES_P (mclass))
2668
    {
2669
      enum machine_mode wider_mode;
2670
      for (wider_mode = mode; wider_mode != VOIDmode;
2671
           wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2672
        {
2673
          if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2674
            {
2675
              rtx xop0, temp, last;
2676
 
2677
              last = get_last_insn ();
2678
 
2679
              if (target == 0)
2680
                target = gen_reg_rtx (mode);
2681
              xop0 = widen_operand (op0, wider_mode, mode, true, false);
2682
              temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2683
                                  true);
2684
              if (temp != 0)
2685
                temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2686
                                     target, true, OPTAB_DIRECT);
2687
              if (temp == 0)
2688
                delete_insns_since (last);
2689
 
2690
              return temp;
2691
            }
2692
        }
2693
    }
2694
  return 0;
2695
}
2696
 
2697
/* Try calculating ctz(x) as K - clz(x & -x) ,
2698
   where K is GET_MODE_PRECISION(mode) - 1.
2699
 
2700
   Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2701
   don't have to worry about what the hardware does in that case.  (If
2702
   the clz instruction produces the usual value at 0, which is K, the
2703
   result of this code sequence will be -1; expand_ffs, below, relies
2704
   on this.  It might be nice to have it be K instead, for consistency
2705
   with the (very few) processors that provide a ctz with a defined
2706
   value, but that would take one more instruction, and it would be
2707
   less convenient for expand_ffs anyway.  */
2708
 
2709
static rtx
2710
expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2711
{
2712
  rtx seq, temp;
2713
 
2714
  if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2715
    return 0;
2716
 
2717
  start_sequence ();
2718
 
2719
  temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2720
  if (temp)
2721
    temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2722
                         true, OPTAB_DIRECT);
2723
  if (temp)
2724
    temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2725
  if (temp)
2726
    temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_PRECISION (mode) - 1),
2727
                         temp, target,
2728
                         true, OPTAB_DIRECT);
2729
  if (temp == 0)
2730
    {
2731
      end_sequence ();
2732
      return 0;
2733
    }
2734
 
2735
  seq = get_insns ();
2736
  end_sequence ();
2737
 
2738
  add_equal_note (seq, temp, CTZ, op0, 0);
2739
  emit_insn (seq);
2740
  return temp;
2741
}
2742
 
2743
 
2744
/* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2745
   else with the sequence used by expand_clz.
2746
 
2747
   The ffs builtin promises to return zero for a zero value and ctz/clz
2748
   may have an undefined value in that case.  If they do not give us a
2749
   convenient value, we have to generate a test and branch.  */
2750
static rtx
2751
expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2752
{
2753
  HOST_WIDE_INT val = 0;
2754
  bool defined_at_zero = false;
2755
  rtx temp, seq;
2756
 
2757
  if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2758
    {
2759
      start_sequence ();
2760
 
2761
      temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2762
      if (!temp)
2763
        goto fail;
2764
 
2765
      defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2766
    }
2767
  else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2768
    {
2769
      start_sequence ();
2770
      temp = expand_ctz (mode, op0, 0);
2771
      if (!temp)
2772
        goto fail;
2773
 
2774
      if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2775
        {
2776
          defined_at_zero = true;
2777
          val = (GET_MODE_PRECISION (mode) - 1) - val;
2778
        }
2779
    }
2780
  else
2781
    return 0;
2782
 
2783
  if (defined_at_zero && val == -1)
2784
    /* No correction needed at zero.  */;
2785
  else
2786
    {
2787
      /* We don't try to do anything clever with the situation found
2788
         on some processors (eg Alpha) where ctz(0:mode) ==
2789
         bitsize(mode).  If someone can think of a way to send N to -1
2790
         and leave alone all values in the range 0..N-1 (where N is a
2791
         power of two), cheaper than this test-and-branch, please add it.
2792
 
2793
         The test-and-branch is done after the operation itself, in case
2794
         the operation sets condition codes that can be recycled for this.
2795
         (This is true on i386, for instance.)  */
2796
 
2797
      rtx nonzero_label = gen_label_rtx ();
2798
      emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2799
                               mode, true, nonzero_label);
2800
 
2801
      convert_move (temp, GEN_INT (-1), false);
2802
      emit_label (nonzero_label);
2803
    }
2804
 
2805
  /* temp now has a value in the range -1..bitsize-1.  ffs is supposed
2806
     to produce a value in the range 0..bitsize.  */
2807
  temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2808
                       target, false, OPTAB_DIRECT);
2809
  if (!temp)
2810
    goto fail;
2811
 
2812
  seq = get_insns ();
2813
  end_sequence ();
2814
 
2815
  add_equal_note (seq, temp, FFS, op0, 0);
2816
  emit_insn (seq);
2817
  return temp;
2818
 
2819
 fail:
2820
  end_sequence ();
2821
  return 0;
2822
}
2823
 
2824
/* Extract the OMODE lowpart from VAL, which has IMODE.  Under certain
2825
   conditions, VAL may already be a SUBREG against which we cannot generate
2826
   a further SUBREG.  In this case, we expect forcing the value into a
2827
   register will work around the situation.  */
2828
 
2829
static rtx
2830
lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2831
                           enum machine_mode imode)
2832
{
2833
  rtx ret;
2834
  ret = lowpart_subreg (omode, val, imode);
2835
  if (ret == NULL)
2836
    {
2837
      val = force_reg (imode, val);
2838
      ret = lowpart_subreg (omode, val, imode);
2839
      gcc_assert (ret != NULL);
2840
    }
2841
  return ret;
2842
}
2843
 
2844
/* Expand a floating point absolute value or negation operation via a
2845
   logical operation on the sign bit.  */
2846
 
2847
static rtx
2848
expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2849
                   rtx op0, rtx target)
2850
{
2851
  const struct real_format *fmt;
2852
  int bitpos, word, nwords, i;
2853
  enum machine_mode imode;
2854
  double_int mask;
2855
  rtx temp, insns;
2856
 
2857
  /* The format has to have a simple sign bit.  */
2858
  fmt = REAL_MODE_FORMAT (mode);
2859
  if (fmt == NULL)
2860
    return NULL_RTX;
2861
 
2862
  bitpos = fmt->signbit_rw;
2863
  if (bitpos < 0)
2864
    return NULL_RTX;
2865
 
2866
  /* Don't create negative zeros if the format doesn't support them.  */
2867
  if (code == NEG && !fmt->has_signed_zero)
2868
    return NULL_RTX;
2869
 
2870
  if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2871
    {
2872
      imode = int_mode_for_mode (mode);
2873
      if (imode == BLKmode)
2874
        return NULL_RTX;
2875
      word = 0;
2876
      nwords = 1;
2877
    }
2878
  else
2879
    {
2880
      imode = word_mode;
2881
 
2882
      if (FLOAT_WORDS_BIG_ENDIAN)
2883
        word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2884
      else
2885
        word = bitpos / BITS_PER_WORD;
2886
      bitpos = bitpos % BITS_PER_WORD;
2887
      nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2888
    }
2889
 
2890
  mask = double_int_setbit (double_int_zero, bitpos);
2891
  if (code == ABS)
2892
    mask = double_int_not (mask);
2893
 
2894
  if (target == 0
2895
      || target == op0
2896
      || (nwords > 1 && !valid_multiword_target_p (target)))
2897
    target = gen_reg_rtx (mode);
2898
 
2899
  if (nwords > 1)
2900
    {
2901
      start_sequence ();
2902
 
2903
      for (i = 0; i < nwords; ++i)
2904
        {
2905
          rtx targ_piece = operand_subword (target, i, 1, mode);
2906
          rtx op0_piece = operand_subword_force (op0, i, mode);
2907
 
2908
          if (i == word)
2909
            {
2910
              temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2911
                                   op0_piece,
2912
                                   immed_double_int_const (mask, imode),
2913
                                   targ_piece, 1, OPTAB_LIB_WIDEN);
2914
              if (temp != targ_piece)
2915
                emit_move_insn (targ_piece, temp);
2916
            }
2917
          else
2918
            emit_move_insn (targ_piece, op0_piece);
2919
        }
2920
 
2921
      insns = get_insns ();
2922
      end_sequence ();
2923
 
2924
      emit_insn (insns);
2925
    }
2926
  else
2927
    {
2928
      temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2929
                           gen_lowpart (imode, op0),
2930
                           immed_double_int_const (mask, imode),
2931
                           gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2932
      target = lowpart_subreg_maybe_copy (mode, temp, imode);
2933
 
2934
      set_dst_reg_note (get_last_insn (), REG_EQUAL,
2935
                        gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2936
                        target);
2937
    }
2938
 
2939
  return target;
2940
}
2941
 
2942
/* As expand_unop, but will fail rather than attempt the operation in a
2943
   different mode or with a libcall.  */
2944
static rtx
2945
expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2946
             int unsignedp)
2947
{
2948
  if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2949
    {
2950
      struct expand_operand ops[2];
2951
      enum insn_code icode = optab_handler (unoptab, mode);
2952
      rtx last = get_last_insn ();
2953
      rtx pat;
2954
 
2955
      create_output_operand (&ops[0], target, mode);
2956
      create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2957
      pat = maybe_gen_insn (icode, 2, ops);
2958
      if (pat)
2959
        {
2960
          if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2961
              && ! add_equal_note (pat, ops[0].value, unoptab->code,
2962
                                   ops[1].value, NULL_RTX))
2963
            {
2964
              delete_insns_since (last);
2965
              return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2966
            }
2967
 
2968
          emit_insn (pat);
2969
 
2970
          return ops[0].value;
2971
        }
2972
    }
2973
  return 0;
2974
}
2975
 
2976
/* Generate code to perform an operation specified by UNOPTAB
2977
   on operand OP0, with result having machine-mode MODE.
2978
 
2979
   UNSIGNEDP is for the case where we have to widen the operands
2980
   to perform the operation.  It says to use zero-extension.
2981
 
2982
   If TARGET is nonzero, the value
2983
   is generated there, if it is convenient to do so.
2984
   In all cases an rtx is returned for the locus of the value;
2985
   this may or may not be TARGET.  */
2986
 
2987
rtx
2988
expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2989
             int unsignedp)
2990
{
2991
  enum mode_class mclass = GET_MODE_CLASS (mode);
2992
  enum machine_mode wider_mode;
2993
  rtx temp;
2994
  rtx libfunc;
2995
 
2996
  temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
2997
  if (temp)
2998
    return temp;
2999
 
3000
  /* It can't be done in this mode.  Can we open-code it in a wider mode?  */
3001
 
3002
  /* Widening (or narrowing) clz needs special treatment.  */
3003
  if (unoptab == clz_optab)
3004
    {
3005
      temp = widen_leading (mode, op0, target, unoptab);
3006
      if (temp)
3007
        return temp;
3008
 
3009
      if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3010
          && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3011
        {
3012
          temp = expand_doubleword_clz (mode, op0, target);
3013
          if (temp)
3014
            return temp;
3015
        }
3016
 
3017
      goto try_libcall;
3018
    }
3019
 
3020
  if (unoptab == clrsb_optab)
3021
    {
3022
      temp = widen_leading (mode, op0, target, unoptab);
3023
      if (temp)
3024
        return temp;
3025
      goto try_libcall;
3026
    }
3027
 
3028
  /* Widening (or narrowing) bswap needs special treatment.  */
3029
  if (unoptab == bswap_optab)
3030
    {
3031
      temp = widen_bswap (mode, op0, target);
3032
      if (temp)
3033
        return temp;
3034
 
3035
      if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3036
          && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3037
        {
3038
          temp = expand_doubleword_bswap (mode, op0, target);
3039
          if (temp)
3040
            return temp;
3041
        }
3042
 
3043
      goto try_libcall;
3044
    }
3045
 
3046
  if (CLASS_HAS_WIDER_MODES_P (mclass))
3047
    for (wider_mode = GET_MODE_WIDER_MODE (mode);
3048
         wider_mode != VOIDmode;
3049
         wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3050
      {
3051
        if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
3052
          {
3053
            rtx xop0 = op0;
3054
            rtx last = get_last_insn ();
3055
 
3056
            /* For certain operations, we need not actually extend
3057
               the narrow operand, as long as we will truncate the
3058
               results to the same narrowness.  */
3059
 
3060
            xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3061
                                  (unoptab == neg_optab
3062
                                   || unoptab == one_cmpl_optab)
3063
                                  && mclass == MODE_INT);
3064
 
3065
            temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3066
                                unsignedp);
3067
 
3068
            if (temp)
3069
              {
3070
                if (mclass != MODE_INT
3071
                    || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
3072
                  {
3073
                    if (target == 0)
3074
                      target = gen_reg_rtx (mode);
3075
                    convert_move (target, temp, 0);
3076
                    return target;
3077
                  }
3078
                else
3079
                  return gen_lowpart (mode, temp);
3080
              }
3081
            else
3082
              delete_insns_since (last);
3083
          }
3084
      }
3085
 
3086
  /* These can be done a word at a time.  */
3087
  if (unoptab == one_cmpl_optab
3088
      && mclass == MODE_INT
3089
      && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3090
      && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3091
    {
3092
      int i;
3093
      rtx insns;
3094
 
3095
      if (target == 0 || target == op0 || !valid_multiword_target_p (target))
3096
        target = gen_reg_rtx (mode);
3097
 
3098
      start_sequence ();
3099
 
3100
      /* Do the actual arithmetic.  */
3101
      for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3102
        {
3103
          rtx target_piece = operand_subword (target, i, 1, mode);
3104
          rtx x = expand_unop (word_mode, unoptab,
3105
                               operand_subword_force (op0, i, mode),
3106
                               target_piece, unsignedp);
3107
 
3108
          if (target_piece != x)
3109
            emit_move_insn (target_piece, x);
3110
        }
3111
 
3112
      insns = get_insns ();
3113
      end_sequence ();
3114
 
3115
      emit_insn (insns);
3116
      return target;
3117
    }
3118
 
3119
  if (unoptab->code == NEG)
3120
    {
3121
      /* Try negating floating point values by flipping the sign bit.  */
3122
      if (SCALAR_FLOAT_MODE_P (mode))
3123
        {
3124
          temp = expand_absneg_bit (NEG, mode, op0, target);
3125
          if (temp)
3126
            return temp;
3127
        }
3128
 
3129
      /* If there is no negation pattern, and we have no negative zero,
3130
         try subtracting from zero.  */
3131
      if (!HONOR_SIGNED_ZEROS (mode))
3132
        {
3133
          temp = expand_binop (mode, (unoptab == negv_optab
3134
                                      ? subv_optab : sub_optab),
3135
                               CONST0_RTX (mode), op0, target,
3136
                               unsignedp, OPTAB_DIRECT);
3137
          if (temp)
3138
            return temp;
3139
        }
3140
    }
3141
 
3142
  /* Try calculating parity (x) as popcount (x) % 2.  */
3143
  if (unoptab == parity_optab)
3144
    {
3145
      temp = expand_parity (mode, op0, target);
3146
      if (temp)
3147
        return temp;
3148
    }
3149
 
3150
  /* Try implementing ffs (x) in terms of clz (x).  */
3151
  if (unoptab == ffs_optab)
3152
    {
3153
      temp = expand_ffs (mode, op0, target);
3154
      if (temp)
3155
        return temp;
3156
    }
3157
 
3158
  /* Try implementing ctz (x) in terms of clz (x).  */
3159
  if (unoptab == ctz_optab)
3160
    {
3161
      temp = expand_ctz (mode, op0, target);
3162
      if (temp)
3163
        return temp;
3164
    }
3165
 
3166
 try_libcall:
3167
  /* Now try a library call in this mode.  */
3168
  libfunc = optab_libfunc (unoptab, mode);
3169
  if (libfunc)
3170
    {
3171
      rtx insns;
3172
      rtx value;
3173
      rtx eq_value;
3174
      enum machine_mode outmode = mode;
3175
 
3176
      /* All of these functions return small values.  Thus we choose to
3177
         have them return something that isn't a double-word.  */
3178
      if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3179
          || unoptab == clrsb_optab || unoptab == popcount_optab
3180
          || unoptab == parity_optab)
3181
        outmode
3182
          = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3183
                                          optab_libfunc (unoptab, mode)));
3184
 
3185
      start_sequence ();
3186
 
3187
      /* Pass 1 for NO_QUEUE so we don't lose any increments
3188
         if the libcall is cse'd or moved.  */
3189
      value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3190
                                       1, op0, mode);
3191
      insns = get_insns ();
3192
      end_sequence ();
3193
 
3194
      target = gen_reg_rtx (outmode);
3195
      eq_value = gen_rtx_fmt_e (unoptab->code, mode, op0);
3196
      if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
3197
        eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3198
      else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
3199
        eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode);
3200
      emit_libcall_block (insns, target, value, eq_value);
3201
 
3202
      return target;
3203
    }
3204
 
3205
  /* It can't be done in this mode.  Can we do it in a wider mode?  */
3206
 
3207
  if (CLASS_HAS_WIDER_MODES_P (mclass))
3208
    {
3209
      for (wider_mode = GET_MODE_WIDER_MODE (mode);
3210
           wider_mode != VOIDmode;
3211
           wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3212
        {
3213
          if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3214
              || optab_libfunc (unoptab, wider_mode))
3215
            {
3216
              rtx xop0 = op0;
3217
              rtx last = get_last_insn ();
3218
 
3219
              /* For certain operations, we need not actually extend
3220
                 the narrow operand, as long as we will truncate the
3221
                 results to the same narrowness.  */
3222
 
3223
              xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3224
                                    (unoptab == neg_optab
3225
                                     || unoptab == one_cmpl_optab)
3226
                                    && mclass == MODE_INT);
3227
 
3228
              temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3229
                                  unsignedp);
3230
 
3231
              /* If we are generating clz using wider mode, adjust the
3232
                 result.  Similarly for clrsb.  */
3233
              if ((unoptab == clz_optab || unoptab == clrsb_optab)
3234
                  && temp != 0)
3235
                temp = expand_binop (wider_mode, sub_optab, temp,
3236
                                     GEN_INT (GET_MODE_PRECISION (wider_mode)
3237
                                              - GET_MODE_PRECISION (mode)),
3238
                                     target, true, OPTAB_DIRECT);
3239
 
3240
              if (temp)
3241
                {
3242
                  if (mclass != MODE_INT)
3243
                    {
3244
                      if (target == 0)
3245
                        target = gen_reg_rtx (mode);
3246
                      convert_move (target, temp, 0);
3247
                      return target;
3248
                    }
3249
                  else
3250
                    return gen_lowpart (mode, temp);
3251
                }
3252
              else
3253
                delete_insns_since (last);
3254
            }
3255
        }
3256
    }
3257
 
3258
  /* One final attempt at implementing negation via subtraction,
3259
     this time allowing widening of the operand.  */
3260
  if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3261
    {
3262
      rtx temp;
3263
      temp = expand_binop (mode,
3264
                           unoptab == negv_optab ? subv_optab : sub_optab,
3265
                           CONST0_RTX (mode), op0,
3266
                           target, unsignedp, OPTAB_LIB_WIDEN);
3267
      if (temp)
3268
        return temp;
3269
    }
3270
 
3271
  return 0;
3272
}
3273
 
3274
/* Emit code to compute the absolute value of OP0, with result to
3275
   TARGET if convenient.  (TARGET may be 0.)  The return value says
3276
   where the result actually is to be found.
3277
 
3278
   MODE is the mode of the operand; the mode of the result is
3279
   different but can be deduced from MODE.
3280
 
3281
 */
3282
 
3283
rtx
3284
expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3285
                   int result_unsignedp)
3286
{
3287
  rtx temp;
3288
 
3289
  if (! flag_trapv)
3290
    result_unsignedp = 1;
3291
 
3292
  /* First try to do it with a special abs instruction.  */
3293
  temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3294
                      op0, target, 0);
3295
  if (temp != 0)
3296
    return temp;
3297
 
3298
  /* For floating point modes, try clearing the sign bit.  */
3299
  if (SCALAR_FLOAT_MODE_P (mode))
3300
    {
3301
      temp = expand_absneg_bit (ABS, mode, op0, target);
3302
      if (temp)
3303
        return temp;
3304
    }
3305
 
3306
  /* If we have a MAX insn, we can do this as MAX (x, -x).  */
3307
  if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3308
      && !HONOR_SIGNED_ZEROS (mode))
3309
    {
3310
      rtx last = get_last_insn ();
3311
 
3312
      temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3313
      if (temp != 0)
3314
        temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3315
                             OPTAB_WIDEN);
3316
 
3317
      if (temp != 0)
3318
        return temp;
3319
 
3320
      delete_insns_since (last);
3321
    }
3322
 
3323
  /* If this machine has expensive jumps, we can do integer absolute
3324
     value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3325
     where W is the width of MODE.  */
3326
 
3327
  if (GET_MODE_CLASS (mode) == MODE_INT
3328
      && BRANCH_COST (optimize_insn_for_speed_p (),
3329
                      false) >= 2)
3330
    {
3331
      rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3332
                                   GET_MODE_PRECISION (mode) - 1,
3333
                                   NULL_RTX, 0);
3334
 
3335
      temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3336
                           OPTAB_LIB_WIDEN);
3337
      if (temp != 0)
3338
        temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3339
                             temp, extended, target, 0, OPTAB_LIB_WIDEN);
3340
 
3341
      if (temp != 0)
3342
        return temp;
3343
    }
3344
 
3345
  return NULL_RTX;
3346
}
3347
 
3348
rtx
3349
expand_abs (enum machine_mode mode, rtx op0, rtx target,
3350
            int result_unsignedp, int safe)
3351
{
3352
  rtx temp, op1;
3353
 
3354
  if (! flag_trapv)
3355
    result_unsignedp = 1;
3356
 
3357
  temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3358
  if (temp != 0)
3359
    return temp;
3360
 
3361
  /* If that does not win, use conditional jump and negate.  */
3362
 
3363
  /* It is safe to use the target if it is the same
3364
     as the source if this is also a pseudo register */
3365
  if (op0 == target && REG_P (op0)
3366
      && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3367
    safe = 1;
3368
 
3369
  op1 = gen_label_rtx ();
3370
  if (target == 0 || ! safe
3371
      || GET_MODE (target) != mode
3372
      || (MEM_P (target) && MEM_VOLATILE_P (target))
3373
      || (REG_P (target)
3374
          && REGNO (target) < FIRST_PSEUDO_REGISTER))
3375
    target = gen_reg_rtx (mode);
3376
 
3377
  emit_move_insn (target, op0);
3378
  NO_DEFER_POP;
3379
 
3380
  do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3381
                           NULL_RTX, NULL_RTX, op1, -1);
3382
 
3383
  op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3384
                     target, target, 0);
3385
  if (op0 != target)
3386
    emit_move_insn (target, op0);
3387
  emit_label (op1);
3388
  OK_DEFER_POP;
3389
  return target;
3390
}
3391
 
3392
/* Emit code to compute the one's complement absolute value of OP0
3393
   (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3394
   (TARGET may be NULL_RTX.)  The return value says where the result
3395
   actually is to be found.
3396
 
3397
   MODE is the mode of the operand; the mode of the result is
3398
   different but can be deduced from MODE.  */
3399
 
3400
rtx
3401
expand_one_cmpl_abs_nojump (enum machine_mode mode, rtx op0, rtx target)
3402
{
3403
  rtx temp;
3404
 
3405
  /* Not applicable for floating point modes.  */
3406
  if (FLOAT_MODE_P (mode))
3407
    return NULL_RTX;
3408
 
3409
  /* If we have a MAX insn, we can do this as MAX (x, ~x).  */
3410
  if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3411
    {
3412
      rtx last = get_last_insn ();
3413
 
3414
      temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3415
      if (temp != 0)
3416
        temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3417
                             OPTAB_WIDEN);
3418
 
3419
      if (temp != 0)
3420
        return temp;
3421
 
3422
      delete_insns_since (last);
3423
    }
3424
 
3425
  /* If this machine has expensive jumps, we can do one's complement
3426
     absolute value of X as (((signed) x >> (W-1)) ^ x).  */
3427
 
3428
  if (GET_MODE_CLASS (mode) == MODE_INT
3429
      && BRANCH_COST (optimize_insn_for_speed_p (),
3430
                     false) >= 2)
3431
    {
3432
      rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3433
                                   GET_MODE_PRECISION (mode) - 1,
3434
                                   NULL_RTX, 0);
3435
 
3436
      temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3437
                           OPTAB_LIB_WIDEN);
3438
 
3439
      if (temp != 0)
3440
        return temp;
3441
    }
3442
 
3443
  return NULL_RTX;
3444
}
3445
 
3446
/* A subroutine of expand_copysign, perform the copysign operation using the
3447
   abs and neg primitives advertised to exist on the target.  The assumption
3448
   is that we have a split register file, and leaving op0 in fp registers,
3449
   and not playing with subregs so much, will help the register allocator.  */
3450
 
3451
static rtx
3452
expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3453
                        int bitpos, bool op0_is_abs)
3454
{
3455
  enum machine_mode imode;
3456
  enum insn_code icode;
3457
  rtx sign, label;
3458
 
3459
  if (target == op1)
3460
    target = NULL_RTX;
3461
 
3462
  /* Check if the back end provides an insn that handles signbit for the
3463
     argument's mode. */
3464
  icode = optab_handler (signbit_optab, mode);
3465
  if (icode != CODE_FOR_nothing)
3466
    {
3467
      imode = insn_data[(int) icode].operand[0].mode;
3468
      sign = gen_reg_rtx (imode);
3469
      emit_unop_insn (icode, sign, op1, UNKNOWN);
3470
    }
3471
  else
3472
    {
3473
      double_int mask;
3474
 
3475
      if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3476
        {
3477
          imode = int_mode_for_mode (mode);
3478
          if (imode == BLKmode)
3479
            return NULL_RTX;
3480
          op1 = gen_lowpart (imode, op1);
3481
        }
3482
      else
3483
        {
3484
          int word;
3485
 
3486
          imode = word_mode;
3487
          if (FLOAT_WORDS_BIG_ENDIAN)
3488
            word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3489
          else
3490
            word = bitpos / BITS_PER_WORD;
3491
          bitpos = bitpos % BITS_PER_WORD;
3492
          op1 = operand_subword_force (op1, word, mode);
3493
        }
3494
 
3495
      mask = double_int_setbit (double_int_zero, bitpos);
3496
 
3497
      sign = expand_binop (imode, and_optab, op1,
3498
                           immed_double_int_const (mask, imode),
3499
                           NULL_RTX, 1, OPTAB_LIB_WIDEN);
3500
    }
3501
 
3502
  if (!op0_is_abs)
3503
    {
3504
      op0 = expand_unop (mode, abs_optab, op0, target, 0);
3505
      if (op0 == NULL)
3506
        return NULL_RTX;
3507
      target = op0;
3508
    }
3509
  else
3510
    {
3511
      if (target == NULL_RTX)
3512
        target = copy_to_reg (op0);
3513
      else
3514
        emit_move_insn (target, op0);
3515
    }
3516
 
3517
  label = gen_label_rtx ();
3518
  emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3519
 
3520
  if (GET_CODE (op0) == CONST_DOUBLE)
3521
    op0 = simplify_unary_operation (NEG, mode, op0, mode);
3522
  else
3523
    op0 = expand_unop (mode, neg_optab, op0, target, 0);
3524
  if (op0 != target)
3525
    emit_move_insn (target, op0);
3526
 
3527
  emit_label (label);
3528
 
3529
  return target;
3530
}
3531
 
3532
 
3533
/* A subroutine of expand_copysign, perform the entire copysign operation
3534
   with integer bitmasks.  BITPOS is the position of the sign bit; OP0_IS_ABS
3535
   is true if op0 is known to have its sign bit clear.  */
3536
 
3537
static rtx
3538
expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3539
                     int bitpos, bool op0_is_abs)
3540
{
3541
  enum machine_mode imode;
3542
  double_int mask;
3543
  int word, nwords, i;
3544
  rtx temp, insns;
3545
 
3546
  if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3547
    {
3548
      imode = int_mode_for_mode (mode);
3549
      if (imode == BLKmode)
3550
        return NULL_RTX;
3551
      word = 0;
3552
      nwords = 1;
3553
    }
3554
  else
3555
    {
3556
      imode = word_mode;
3557
 
3558
      if (FLOAT_WORDS_BIG_ENDIAN)
3559
        word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3560
      else
3561
        word = bitpos / BITS_PER_WORD;
3562
      bitpos = bitpos % BITS_PER_WORD;
3563
      nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3564
    }
3565
 
3566
  mask = double_int_setbit (double_int_zero, bitpos);
3567
 
3568
  if (target == 0
3569
      || target == op0
3570
      || target == op1
3571
      || (nwords > 1 && !valid_multiword_target_p (target)))
3572
    target = gen_reg_rtx (mode);
3573
 
3574
  if (nwords > 1)
3575
    {
3576
      start_sequence ();
3577
 
3578
      for (i = 0; i < nwords; ++i)
3579
        {
3580
          rtx targ_piece = operand_subword (target, i, 1, mode);
3581
          rtx op0_piece = operand_subword_force (op0, i, mode);
3582
 
3583
          if (i == word)
3584
            {
3585
              if (!op0_is_abs)
3586
                op0_piece
3587
                  = expand_binop (imode, and_optab, op0_piece,
3588
                                  immed_double_int_const (double_int_not (mask),
3589
                                                          imode),
3590
                                  NULL_RTX, 1, OPTAB_LIB_WIDEN);
3591
 
3592
              op1 = expand_binop (imode, and_optab,
3593
                                  operand_subword_force (op1, i, mode),
3594
                                  immed_double_int_const (mask, imode),
3595
                                  NULL_RTX, 1, OPTAB_LIB_WIDEN);
3596
 
3597
              temp = expand_binop (imode, ior_optab, op0_piece, op1,
3598
                                   targ_piece, 1, OPTAB_LIB_WIDEN);
3599
              if (temp != targ_piece)
3600
                emit_move_insn (targ_piece, temp);
3601
            }
3602
          else
3603
            emit_move_insn (targ_piece, op0_piece);
3604
        }
3605
 
3606
      insns = get_insns ();
3607
      end_sequence ();
3608
 
3609
      emit_insn (insns);
3610
    }
3611
  else
3612
    {
3613
      op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3614
                          immed_double_int_const (mask, imode),
3615
                          NULL_RTX, 1, OPTAB_LIB_WIDEN);
3616
 
3617
      op0 = gen_lowpart (imode, op0);
3618
      if (!op0_is_abs)
3619
        op0 = expand_binop (imode, and_optab, op0,
3620
                            immed_double_int_const (double_int_not (mask),
3621
                                                    imode),
3622
                            NULL_RTX, 1, OPTAB_LIB_WIDEN);
3623
 
3624
      temp = expand_binop (imode, ior_optab, op0, op1,
3625
                           gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3626
      target = lowpart_subreg_maybe_copy (mode, temp, imode);
3627
    }
3628
 
3629
  return target;
3630
}
3631
 
3632
/* Expand the C99 copysign operation.  OP0 and OP1 must be the same
3633
   scalar floating point mode.  Return NULL if we do not know how to
3634
   expand the operation inline.  */
3635
 
3636
rtx
3637
expand_copysign (rtx op0, rtx op1, rtx target)
3638
{
3639
  enum machine_mode mode = GET_MODE (op0);
3640
  const struct real_format *fmt;
3641
  bool op0_is_abs;
3642
  rtx temp;
3643
 
3644
  gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3645
  gcc_assert (GET_MODE (op1) == mode);
3646
 
3647
  /* First try to do it with a special instruction.  */
3648
  temp = expand_binop (mode, copysign_optab, op0, op1,
3649
                       target, 0, OPTAB_DIRECT);
3650
  if (temp)
3651
    return temp;
3652
 
3653
  fmt = REAL_MODE_FORMAT (mode);
3654
  if (fmt == NULL || !fmt->has_signed_zero)
3655
    return NULL_RTX;
3656
 
3657
  op0_is_abs = false;
3658
  if (GET_CODE (op0) == CONST_DOUBLE)
3659
    {
3660
      if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3661
        op0 = simplify_unary_operation (ABS, mode, op0, mode);
3662
      op0_is_abs = true;
3663
    }
3664
 
3665
  if (fmt->signbit_ro >= 0
3666
      && (GET_CODE (op0) == CONST_DOUBLE
3667
          || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3668
              && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3669
    {
3670
      temp = expand_copysign_absneg (mode, op0, op1, target,
3671
                                     fmt->signbit_ro, op0_is_abs);
3672
      if (temp)
3673
        return temp;
3674
    }
3675
 
3676
  if (fmt->signbit_rw < 0)
3677
    return NULL_RTX;
3678
  return expand_copysign_bit (mode, op0, op1, target,
3679
                              fmt->signbit_rw, op0_is_abs);
3680
}
3681
 
3682
/* Generate an instruction whose insn-code is INSN_CODE,
3683
   with two operands: an output TARGET and an input OP0.
3684
   TARGET *must* be nonzero, and the output is always stored there.
3685
   CODE is an rtx code such that (CODE OP0) is an rtx that describes
3686
   the value that is stored into TARGET.
3687
 
3688
   Return false if expansion failed.  */
3689
 
3690
bool
3691
maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3692
                      enum rtx_code code)
3693
{
3694
  struct expand_operand ops[2];
3695
  rtx pat;
3696
 
3697
  create_output_operand (&ops[0], target, GET_MODE (target));
3698
  create_input_operand (&ops[1], op0, GET_MODE (op0));
3699
  pat = maybe_gen_insn (icode, 2, ops);
3700
  if (!pat)
3701
    return false;
3702
 
3703
  if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3704
    add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3705
 
3706
  emit_insn (pat);
3707
 
3708
  if (ops[0].value != target)
3709
    emit_move_insn (target, ops[0].value);
3710
  return true;
3711
}
3712
/* Generate an instruction whose insn-code is INSN_CODE,
3713
   with two operands: an output TARGET and an input OP0.
3714
   TARGET *must* be nonzero, and the output is always stored there.
3715
   CODE is an rtx code such that (CODE OP0) is an rtx that describes
3716
   the value that is stored into TARGET.  */
3717
 
3718
void
3719
emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3720
{
3721
  bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3722
  gcc_assert (ok);
3723
}
3724
 
3725
struct no_conflict_data
3726
{
3727
  rtx target, first, insn;
3728
  bool must_stay;
3729
};
3730
 
3731
/* Called via note_stores by emit_libcall_block.  Set P->must_stay if
3732
   the currently examined clobber / store has to stay in the list of
3733
   insns that constitute the actual libcall block.  */
3734
static void
3735
no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3736
{
3737
  struct no_conflict_data *p= (struct no_conflict_data *) p0;
3738
 
3739
  /* If this inns directly contributes to setting the target, it must stay.  */
3740
  if (reg_overlap_mentioned_p (p->target, dest))
3741
    p->must_stay = true;
3742
  /* If we haven't committed to keeping any other insns in the list yet,
3743
     there is nothing more to check.  */
3744
  else if (p->insn == p->first)
3745
    return;
3746
  /* If this insn sets / clobbers a register that feeds one of the insns
3747
     already in the list, this insn has to stay too.  */
3748
  else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3749
           || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3750
           || reg_used_between_p (dest, p->first, p->insn)
3751
           /* Likewise if this insn depends on a register set by a previous
3752
              insn in the list, or if it sets a result (presumably a hard
3753
              register) that is set or clobbered by a previous insn.
3754
              N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3755
              SET_DEST perform the former check on the address, and the latter
3756
              check on the MEM.  */
3757
           || (GET_CODE (set) == SET
3758
               && (modified_in_p (SET_SRC (set), p->first)
3759
                   || modified_in_p (SET_DEST (set), p->first)
3760
                   || modified_between_p (SET_SRC (set), p->first, p->insn)
3761
                   || modified_between_p (SET_DEST (set), p->first, p->insn))))
3762
    p->must_stay = true;
3763
}
3764
 
3765
 
3766
/* Emit code to make a call to a constant function or a library call.
3767
 
3768
   INSNS is a list containing all insns emitted in the call.
3769
   These insns leave the result in RESULT.  Our block is to copy RESULT
3770
   to TARGET, which is logically equivalent to EQUIV.
3771
 
3772
   We first emit any insns that set a pseudo on the assumption that these are
3773
   loading constants into registers; doing so allows them to be safely cse'ed
3774
   between blocks.  Then we emit all the other insns in the block, followed by
3775
   an insn to move RESULT to TARGET.  This last insn will have a REQ_EQUAL
3776
   note with an operand of EQUIV.  */
3777
 
3778
void
3779
emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3780
{
3781
  rtx final_dest = target;
3782
  rtx next, last, insn;
3783
 
3784
  /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3785
     into a MEM later.  Protect the libcall block from this change.  */
3786
  if (! REG_P (target) || REG_USERVAR_P (target))
3787
    target = gen_reg_rtx (GET_MODE (target));
3788
 
3789
  /* If we're using non-call exceptions, a libcall corresponding to an
3790
     operation that may trap may also trap.  */
3791
  /* ??? See the comment in front of make_reg_eh_region_note.  */
3792
  if (cfun->can_throw_non_call_exceptions && may_trap_p (equiv))
3793
    {
3794
      for (insn = insns; insn; insn = NEXT_INSN (insn))
3795
        if (CALL_P (insn))
3796
          {
3797
            rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3798
            if (note)
3799
              {
3800
                int lp_nr = INTVAL (XEXP (note, 0));
3801
                if (lp_nr == 0 || lp_nr == INT_MIN)
3802
                  remove_note (insn, note);
3803
              }
3804
          }
3805
    }
3806
  else
3807
    {
3808
      /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3809
         reg note to indicate that this call cannot throw or execute a nonlocal
3810
         goto (unless there is already a REG_EH_REGION note, in which case
3811
         we update it).  */
3812
      for (insn = insns; insn; insn = NEXT_INSN (insn))
3813
        if (CALL_P (insn))
3814
          make_reg_eh_region_note_nothrow_nononlocal (insn);
3815
    }
3816
 
3817
  /* First emit all insns that set pseudos.  Remove them from the list as
3818
     we go.  Avoid insns that set pseudos which were referenced in previous
3819
     insns.  These can be generated by move_by_pieces, for example,
3820
     to update an address.  Similarly, avoid insns that reference things
3821
     set in previous insns.  */
3822
 
3823
  for (insn = insns; insn; insn = next)
3824
    {
3825
      rtx set = single_set (insn);
3826
 
3827
      next = NEXT_INSN (insn);
3828
 
3829
      if (set != 0 && REG_P (SET_DEST (set))
3830
          && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3831
        {
3832
          struct no_conflict_data data;
3833
 
3834
          data.target = const0_rtx;
3835
          data.first = insns;
3836
          data.insn = insn;
3837
          data.must_stay = 0;
3838
          note_stores (PATTERN (insn), no_conflict_move_test, &data);
3839
          if (! data.must_stay)
3840
            {
3841
              if (PREV_INSN (insn))
3842
                NEXT_INSN (PREV_INSN (insn)) = next;
3843
              else
3844
                insns = next;
3845
 
3846
              if (next)
3847
                PREV_INSN (next) = PREV_INSN (insn);
3848
 
3849
              add_insn (insn);
3850
            }
3851
        }
3852
 
3853
      /* Some ports use a loop to copy large arguments onto the stack.
3854
         Don't move anything outside such a loop.  */
3855
      if (LABEL_P (insn))
3856
        break;
3857
    }
3858
 
3859
  /* Write the remaining insns followed by the final copy.  */
3860
  for (insn = insns; insn; insn = next)
3861
    {
3862
      next = NEXT_INSN (insn);
3863
 
3864
      add_insn (insn);
3865
    }
3866
 
3867
  last = emit_move_insn (target, result);
3868
  set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3869
 
3870
  if (final_dest != target)
3871
    emit_move_insn (final_dest, target);
3872
}
3873
 
3874
/* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3875
   PURPOSE describes how this comparison will be used.  CODE is the rtx
3876
   comparison code we will be using.
3877
 
3878
   ??? Actually, CODE is slightly weaker than that.  A target is still
3879
   required to implement all of the normal bcc operations, but not
3880
   required to implement all (or any) of the unordered bcc operations.  */
3881
 
3882
int
3883
can_compare_p (enum rtx_code code, enum machine_mode mode,
3884
               enum can_compare_purpose purpose)
3885
{
3886
  rtx test;
3887
  test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3888
  do
3889
    {
3890
      enum insn_code icode;
3891
 
3892
      if (purpose == ccp_jump
3893
          && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3894
          && insn_operand_matches (icode, 0, test))
3895
        return 1;
3896
      if (purpose == ccp_store_flag
3897
          && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3898
          && insn_operand_matches (icode, 1, test))
3899
        return 1;
3900
      if (purpose == ccp_cmov
3901
          && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3902
        return 1;
3903
 
3904
      mode = GET_MODE_WIDER_MODE (mode);
3905
      PUT_MODE (test, mode);
3906
    }
3907
  while (mode != VOIDmode);
3908
 
3909
  return 0;
3910
}
3911
 
3912
/* This function is called when we are going to emit a compare instruction that
3913
   compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3914
 
3915
   *PMODE is the mode of the inputs (in case they are const_int).
3916
   *PUNSIGNEDP nonzero says that the operands are unsigned;
3917
   this matters if they need to be widened (as given by METHODS).
3918
 
3919
   If they have mode BLKmode, then SIZE specifies the size of both operands.
3920
 
3921
   This function performs all the setup necessary so that the caller only has
3922
   to emit a single comparison insn.  This setup can involve doing a BLKmode
3923
   comparison or emitting a library call to perform the comparison if no insn
3924
   is available to handle it.
3925
   The values which are passed in through pointers can be modified; the caller
3926
   should perform the comparison on the modified values.  Constant
3927
   comparisons must have already been folded.  */
3928
 
3929
static void
3930
prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3931
                  int unsignedp, enum optab_methods methods,
3932
                  rtx *ptest, enum machine_mode *pmode)
3933
{
3934
  enum machine_mode mode = *pmode;
3935
  rtx libfunc, test;
3936
  enum machine_mode cmp_mode;
3937
  enum mode_class mclass;
3938
 
3939
  /* The other methods are not needed.  */
3940
  gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3941
              || methods == OPTAB_LIB_WIDEN);
3942
 
3943
  /* If we are optimizing, force expensive constants into a register.  */
3944
  if (CONSTANT_P (x) && optimize
3945
      && (rtx_cost (x, COMPARE, 0, optimize_insn_for_speed_p ())
3946
          > COSTS_N_INSNS (1)))
3947
    x = force_reg (mode, x);
3948
 
3949
  if (CONSTANT_P (y) && optimize
3950
      && (rtx_cost (y, COMPARE, 1, optimize_insn_for_speed_p ())
3951
          > COSTS_N_INSNS (1)))
3952
    y = force_reg (mode, y);
3953
 
3954
#ifdef HAVE_cc0
3955
  /* Make sure if we have a canonical comparison.  The RTL
3956
     documentation states that canonical comparisons are required only
3957
     for targets which have cc0.  */
3958
  gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3959
#endif
3960
 
3961
  /* Don't let both operands fail to indicate the mode.  */
3962
  if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3963
    x = force_reg (mode, x);
3964
  if (mode == VOIDmode)
3965
    mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3966
 
3967
  /* Handle all BLKmode compares.  */
3968
 
3969
  if (mode == BLKmode)
3970
    {
3971
      enum machine_mode result_mode;
3972
      enum insn_code cmp_code;
3973
      tree length_type;
3974
      rtx libfunc;
3975
      rtx result;
3976
      rtx opalign
3977
        = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3978
 
3979
      gcc_assert (size);
3980
 
3981
      /* Try to use a memory block compare insn - either cmpstr
3982
         or cmpmem will do.  */
3983
      for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3984
           cmp_mode != VOIDmode;
3985
           cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3986
        {
3987
          cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
3988
          if (cmp_code == CODE_FOR_nothing)
3989
            cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
3990
          if (cmp_code == CODE_FOR_nothing)
3991
            cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
3992
          if (cmp_code == CODE_FOR_nothing)
3993
            continue;
3994
 
3995
          /* Must make sure the size fits the insn's mode.  */
3996
          if ((CONST_INT_P (size)
3997
               && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3998
              || (GET_MODE_BITSIZE (GET_MODE (size))
3999
                  > GET_MODE_BITSIZE (cmp_mode)))
4000
            continue;
4001
 
4002
          result_mode = insn_data[cmp_code].operand[0].mode;
4003
          result = gen_reg_rtx (result_mode);
4004
          size = convert_to_mode (cmp_mode, size, 1);
4005
          emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4006
 
4007
          *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4008
          *pmode = result_mode;
4009
          return;
4010
        }
4011
 
4012
      if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
4013
        goto fail;
4014
 
4015
      /* Otherwise call a library function, memcmp.  */
4016
      libfunc = memcmp_libfunc;
4017
      length_type = sizetype;
4018
      result_mode = TYPE_MODE (integer_type_node);
4019
      cmp_mode = TYPE_MODE (length_type);
4020
      size = convert_to_mode (TYPE_MODE (length_type), size,
4021
                              TYPE_UNSIGNED (length_type));
4022
 
4023
      result = emit_library_call_value (libfunc, 0, LCT_PURE,
4024
                                        result_mode, 3,
4025
                                        XEXP (x, 0), Pmode,
4026
                                        XEXP (y, 0), Pmode,
4027
                                        size, cmp_mode);
4028
 
4029
      *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4030
      *pmode = result_mode;
4031
      return;
4032
    }
4033
 
4034
  /* Don't allow operands to the compare to trap, as that can put the
4035
     compare and branch in different basic blocks.  */
4036
  if (cfun->can_throw_non_call_exceptions)
4037
    {
4038
      if (may_trap_p (x))
4039
        x = force_reg (mode, x);
4040
      if (may_trap_p (y))
4041
        y = force_reg (mode, y);
4042
    }
4043
 
4044
  if (GET_MODE_CLASS (mode) == MODE_CC)
4045
    {
4046
      gcc_assert (can_compare_p (comparison, CCmode, ccp_jump));
4047
      *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4048
      return;
4049
    }
4050
 
4051
  mclass = GET_MODE_CLASS (mode);
4052
  test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4053
  cmp_mode = mode;
4054
  do
4055
   {
4056
      enum insn_code icode;
4057
      icode = optab_handler (cbranch_optab, cmp_mode);
4058
      if (icode != CODE_FOR_nothing
4059
          && insn_operand_matches (icode, 0, test))
4060
        {
4061
          rtx last = get_last_insn ();
4062
          rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
4063
          rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
4064
          if (op0 && op1
4065
              && insn_operand_matches (icode, 1, op0)
4066
              && insn_operand_matches (icode, 2, op1))
4067
            {
4068
              XEXP (test, 0) = op0;
4069
              XEXP (test, 1) = op1;
4070
              *ptest = test;
4071
              *pmode = cmp_mode;
4072
              return;
4073
            }
4074
          delete_insns_since (last);
4075
        }
4076
 
4077
      if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
4078
        break;
4079
      cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
4080
    }
4081
  while (cmp_mode != VOIDmode);
4082
 
4083
  if (methods != OPTAB_LIB_WIDEN)
4084
    goto fail;
4085
 
4086
  if (!SCALAR_FLOAT_MODE_P (mode))
4087
    {
4088
      rtx result;
4089
 
4090
      /* Handle a libcall just for the mode we are using.  */
4091
      libfunc = optab_libfunc (cmp_optab, mode);
4092
      gcc_assert (libfunc);
4093
 
4094
      /* If we want unsigned, and this mode has a distinct unsigned
4095
         comparison routine, use that.  */
4096
      if (unsignedp)
4097
        {
4098
          rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4099
          if (ulibfunc)
4100
            libfunc = ulibfunc;
4101
        }
4102
 
4103
      result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4104
                                        targetm.libgcc_cmp_return_mode (),
4105
                                        2, x, mode, y, mode);
4106
 
4107
      /* There are two kinds of comparison routines. Biased routines
4108
         return 0/1/2, and unbiased routines return -1/0/1. Other parts
4109
         of gcc expect that the comparison operation is equivalent
4110
         to the modified comparison. For signed comparisons compare the
4111
         result against 1 in the biased case, and zero in the unbiased
4112
         case. For unsigned comparisons always compare against 1 after
4113
         biasing the unbiased result by adding 1. This gives us a way to
4114
         represent LTU.
4115
         The comparisons in the fixed-point helper library are always
4116
         biased.  */
4117
      x = result;
4118
      y = const1_rtx;
4119
 
4120
      if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
4121
        {
4122
          if (unsignedp)
4123
            x = plus_constant (result, 1);
4124
          else
4125
            y = const0_rtx;
4126
        }
4127
 
4128
      *pmode = word_mode;
4129
      prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
4130
                        ptest, pmode);
4131
    }
4132
  else
4133
    prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
4134
 
4135
  return;
4136
 
4137
 fail:
4138
  *ptest = NULL_RTX;
4139
}
4140
 
4141
/* Before emitting an insn with code ICODE, make sure that X, which is going
4142
   to be used for operand OPNUM of the insn, is converted from mode MODE to
4143
   WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4144
   that it is accepted by the operand predicate.  Return the new value.  */
4145
 
4146
rtx
4147
prepare_operand (enum insn_code icode, rtx x, int opnum, enum machine_mode mode,
4148
                 enum machine_mode wider_mode, int unsignedp)
4149
{
4150
  if (mode != wider_mode)
4151
    x = convert_modes (wider_mode, mode, x, unsignedp);
4152
 
4153
  if (!insn_operand_matches (icode, opnum, x))
4154
    {
4155
      if (reload_completed)
4156
        return NULL_RTX;
4157
      x = copy_to_mode_reg (insn_data[(int) icode].operand[opnum].mode, x);
4158
    }
4159
 
4160
  return x;
4161
}
4162
 
4163
/* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4164
   we can do the branch.  */
4165
 
4166
static void
4167
emit_cmp_and_jump_insn_1 (rtx test, enum machine_mode mode, rtx label)
4168
{
4169
  enum machine_mode optab_mode;
4170
  enum mode_class mclass;
4171
  enum insn_code icode;
4172
 
4173
  mclass = GET_MODE_CLASS (mode);
4174
  optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4175
  icode = optab_handler (cbranch_optab, optab_mode);
4176
 
4177
  gcc_assert (icode != CODE_FOR_nothing);
4178
  gcc_assert (insn_operand_matches (icode, 0, test));
4179
  emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0), XEXP (test, 1), label));
4180
}
4181
 
4182
/* Generate code to compare X with Y so that the condition codes are
4183
   set and to jump to LABEL if the condition is true.  If X is a
4184
   constant and Y is not a constant, then the comparison is swapped to
4185
   ensure that the comparison RTL has the canonical form.
4186
 
4187
   UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4188
   need to be widened.  UNSIGNEDP is also used to select the proper
4189
   branch condition code.
4190
 
4191
   If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4192
 
4193
   MODE is the mode of the inputs (in case they are const_int).
4194
 
4195
   COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4196
   It will be potentially converted into an unsigned variant based on
4197
   UNSIGNEDP to select a proper jump instruction.  */
4198
 
4199
void
4200
emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4201
                         enum machine_mode mode, int unsignedp, rtx label)
4202
{
4203
  rtx op0 = x, op1 = y;
4204
  rtx test;
4205
 
4206
  /* Swap operands and condition to ensure canonical RTL.  */
4207
  if (swap_commutative_operands_p (x, y)
4208
      && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4209
    {
4210
      op0 = y, op1 = x;
4211
      comparison = swap_condition (comparison);
4212
    }
4213
 
4214
  /* If OP0 is still a constant, then both X and Y must be constants
4215
     or the opposite comparison is not supported.  Force X into a register
4216
     to create canonical RTL.  */
4217
  if (CONSTANT_P (op0))
4218
    op0 = force_reg (mode, op0);
4219
 
4220
  if (unsignedp)
4221
    comparison = unsigned_condition (comparison);
4222
 
4223
  prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4224
                    &test, &mode);
4225
  emit_cmp_and_jump_insn_1 (test, mode, label);
4226
}
4227
 
4228
 
4229
/* Emit a library call comparison between floating point X and Y.
4230
   COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).  */
4231
 
4232
static void
4233
prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4234
                       rtx *ptest, enum machine_mode *pmode)
4235
{
4236
  enum rtx_code swapped = swap_condition (comparison);
4237
  enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4238
  enum machine_mode orig_mode = GET_MODE (x);
4239
  enum machine_mode mode, cmp_mode;
4240
  rtx true_rtx, false_rtx;
4241
  rtx value, target, insns, equiv;
4242
  rtx libfunc = 0;
4243
  bool reversed_p = false;
4244
  cmp_mode = targetm.libgcc_cmp_return_mode ();
4245
 
4246
  for (mode = orig_mode;
4247
       mode != VOIDmode;
4248
       mode = GET_MODE_WIDER_MODE (mode))
4249
    {
4250
      if (code_to_optab[comparison]
4251
          && (libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4252
        break;
4253
 
4254
      if (code_to_optab[swapped]
4255
          && (libfunc = optab_libfunc (code_to_optab[swapped], mode)))
4256
        {
4257
          rtx tmp;
4258
          tmp = x; x = y; y = tmp;
4259
          comparison = swapped;
4260
          break;
4261
        }
4262
 
4263
      if (code_to_optab[reversed]
4264
          && (libfunc = optab_libfunc (code_to_optab[reversed], mode)))
4265
        {
4266
          comparison = reversed;
4267
          reversed_p = true;
4268
          break;
4269
        }
4270
    }
4271
 
4272
  gcc_assert (mode != VOIDmode);
4273
 
4274
  if (mode != orig_mode)
4275
    {
4276
      x = convert_to_mode (mode, x, 0);
4277
      y = convert_to_mode (mode, y, 0);
4278
    }
4279
 
4280
  /* Attach a REG_EQUAL note describing the semantics of the libcall to
4281
     the RTL.  The allows the RTL optimizers to delete the libcall if the
4282
     condition can be determined at compile-time.  */
4283
  if (comparison == UNORDERED
4284
      || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4285
    {
4286
      true_rtx = const_true_rtx;
4287
      false_rtx = const0_rtx;
4288
    }
4289
  else
4290
    {
4291
      switch (comparison)
4292
        {
4293
        case EQ:
4294
          true_rtx = const0_rtx;
4295
          false_rtx = const_true_rtx;
4296
          break;
4297
 
4298
        case NE:
4299
          true_rtx = const_true_rtx;
4300
          false_rtx = const0_rtx;
4301
          break;
4302
 
4303
        case GT:
4304
          true_rtx = const1_rtx;
4305
          false_rtx = const0_rtx;
4306
          break;
4307
 
4308
        case GE:
4309
          true_rtx = const0_rtx;
4310
          false_rtx = constm1_rtx;
4311
          break;
4312
 
4313
        case LT:
4314
          true_rtx = constm1_rtx;
4315
          false_rtx = const0_rtx;
4316
          break;
4317
 
4318
        case LE:
4319
          true_rtx = const0_rtx;
4320
          false_rtx = const1_rtx;
4321
          break;
4322
 
4323
        default:
4324
          gcc_unreachable ();
4325
        }
4326
    }
4327
 
4328
  if (comparison == UNORDERED)
4329
    {
4330
      rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4331
      equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4332
      equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4333
                                    temp, const_true_rtx, equiv);
4334
    }
4335
  else
4336
    {
4337
      equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4338
      if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4339
        equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4340
                                      equiv, true_rtx, false_rtx);
4341
    }
4342
 
4343
  start_sequence ();
4344
  value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4345
                                   cmp_mode, 2, x, mode, y, mode);
4346
  insns = get_insns ();
4347
  end_sequence ();
4348
 
4349
  target = gen_reg_rtx (cmp_mode);
4350
  emit_libcall_block (insns, target, value, equiv);
4351
 
4352
  if (comparison == UNORDERED
4353
      || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4354
      || reversed_p)
4355
    *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4356
  else
4357
    *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4358
 
4359
  *pmode = cmp_mode;
4360
}
4361
 
4362
/* Generate code to indirectly jump to a location given in the rtx LOC.  */
4363
 
4364
void
4365
emit_indirect_jump (rtx loc)
4366
{
4367
  struct expand_operand ops[1];
4368
 
4369
  create_address_operand (&ops[0], loc);
4370
  expand_jump_insn (CODE_FOR_indirect_jump, 1, ops);
4371
  emit_barrier ();
4372
}
4373
 
4374
#ifdef HAVE_conditional_move
4375
 
4376
/* Emit a conditional move instruction if the machine supports one for that
4377
   condition and machine mode.
4378
 
4379
   OP0 and OP1 are the operands that should be compared using CODE.  CMODE is
4380
   the mode to use should they be constants.  If it is VOIDmode, they cannot
4381
   both be constants.
4382
 
4383
   OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4384
   should be stored there.  MODE is the mode to use should they be constants.
4385
   If it is VOIDmode, they cannot both be constants.
4386
 
4387
   The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4388
   is not supported.  */
4389
 
4390
rtx
4391
emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4392
                       enum machine_mode cmode, rtx op2, rtx op3,
4393
                       enum machine_mode mode, int unsignedp)
4394
{
4395
  rtx tem, comparison, last;
4396
  enum insn_code icode;
4397
  enum rtx_code reversed;
4398
 
4399
  /* If one operand is constant, make it the second one.  Only do this
4400
     if the other operand is not constant as well.  */
4401
 
4402
  if (swap_commutative_operands_p (op0, op1))
4403
    {
4404
      tem = op0;
4405
      op0 = op1;
4406
      op1 = tem;
4407
      code = swap_condition (code);
4408
    }
4409
 
4410
  /* get_condition will prefer to generate LT and GT even if the old
4411
     comparison was against zero, so undo that canonicalization here since
4412
     comparisons against zero are cheaper.  */
4413
  if (code == LT && op1 == const1_rtx)
4414
    code = LE, op1 = const0_rtx;
4415
  else if (code == GT && op1 == constm1_rtx)
4416
    code = GE, op1 = const0_rtx;
4417
 
4418
  if (cmode == VOIDmode)
4419
    cmode = GET_MODE (op0);
4420
 
4421
  if (swap_commutative_operands_p (op2, op3)
4422
      && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4423
          != UNKNOWN))
4424
    {
4425
      tem = op2;
4426
      op2 = op3;
4427
      op3 = tem;
4428
      code = reversed;
4429
    }
4430
 
4431
  if (mode == VOIDmode)
4432
    mode = GET_MODE (op2);
4433
 
4434
  icode = direct_optab_handler (movcc_optab, mode);
4435
 
4436
  if (icode == CODE_FOR_nothing)
4437
    return 0;
4438
 
4439
  if (!target)
4440
    target = gen_reg_rtx (mode);
4441
 
4442
  code = unsignedp ? unsigned_condition (code) : code;
4443
  comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4444
 
4445
  /* We can get const0_rtx or const_true_rtx in some circumstances.  Just
4446
     return NULL and let the caller figure out how best to deal with this
4447
     situation.  */
4448
  if (!COMPARISON_P (comparison))
4449
    return NULL_RTX;
4450
 
4451
  do_pending_stack_adjust ();
4452
  last = get_last_insn ();
4453
  prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4454
                    GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4455
                    &comparison, &cmode);
4456
  if (comparison)
4457
    {
4458
      struct expand_operand ops[4];
4459
 
4460
      create_output_operand (&ops[0], target, mode);
4461
      create_fixed_operand (&ops[1], comparison);
4462
      create_input_operand (&ops[2], op2, mode);
4463
      create_input_operand (&ops[3], op3, mode);
4464
      if (maybe_expand_insn (icode, 4, ops))
4465
        {
4466
          if (ops[0].value != target)
4467
            convert_move (target, ops[0].value, false);
4468
          return target;
4469
        }
4470
    }
4471
  delete_insns_since (last);
4472
  return NULL_RTX;
4473
}
4474
 
4475
/* Return nonzero if a conditional move of mode MODE is supported.
4476
 
4477
   This function is for combine so it can tell whether an insn that looks
4478
   like a conditional move is actually supported by the hardware.  If we
4479
   guess wrong we lose a bit on optimization, but that's it.  */
4480
/* ??? sparc64 supports conditionally moving integers values based on fp
4481
   comparisons, and vice versa.  How do we handle them?  */
4482
 
4483
int
4484
can_conditionally_move_p (enum machine_mode mode)
4485
{
4486
  if (direct_optab_handler (movcc_optab, mode) != CODE_FOR_nothing)
4487
    return 1;
4488
 
4489
  return 0;
4490
}
4491
 
4492
#endif /* HAVE_conditional_move */
4493
 
4494
/* Emit a conditional addition instruction if the machine supports one for that
4495
   condition and machine mode.
4496
 
4497
   OP0 and OP1 are the operands that should be compared using CODE.  CMODE is
4498
   the mode to use should they be constants.  If it is VOIDmode, they cannot
4499
   both be constants.
4500
 
4501
   OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4502
   should be stored there.  MODE is the mode to use should they be constants.
4503
   If it is VOIDmode, they cannot both be constants.
4504
 
4505
   The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4506
   is not supported.  */
4507
 
4508
rtx
4509
emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4510
                      enum machine_mode cmode, rtx op2, rtx op3,
4511
                      enum machine_mode mode, int unsignedp)
4512
{
4513
  rtx tem, comparison, last;
4514
  enum insn_code icode;
4515
  enum rtx_code reversed;
4516
 
4517
  /* If one operand is constant, make it the second one.  Only do this
4518
     if the other operand is not constant as well.  */
4519
 
4520
  if (swap_commutative_operands_p (op0, op1))
4521
    {
4522
      tem = op0;
4523
      op0 = op1;
4524
      op1 = tem;
4525
      code = swap_condition (code);
4526
    }
4527
 
4528
  /* get_condition will prefer to generate LT and GT even if the old
4529
     comparison was against zero, so undo that canonicalization here since
4530
     comparisons against zero are cheaper.  */
4531
  if (code == LT && op1 == const1_rtx)
4532
    code = LE, op1 = const0_rtx;
4533
  else if (code == GT && op1 == constm1_rtx)
4534
    code = GE, op1 = const0_rtx;
4535
 
4536
  if (cmode == VOIDmode)
4537
    cmode = GET_MODE (op0);
4538
 
4539
  if (swap_commutative_operands_p (op2, op3)
4540
      && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4541
          != UNKNOWN))
4542
    {
4543
      tem = op2;
4544
      op2 = op3;
4545
      op3 = tem;
4546
      code = reversed;
4547
    }
4548
 
4549
  if (mode == VOIDmode)
4550
    mode = GET_MODE (op2);
4551
 
4552
  icode = optab_handler (addcc_optab, mode);
4553
 
4554
  if (icode == CODE_FOR_nothing)
4555
    return 0;
4556
 
4557
  if (!target)
4558
    target = gen_reg_rtx (mode);
4559
 
4560
  code = unsignedp ? unsigned_condition (code) : code;
4561
  comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4562
 
4563
  /* We can get const0_rtx or const_true_rtx in some circumstances.  Just
4564
     return NULL and let the caller figure out how best to deal with this
4565
     situation.  */
4566
  if (!COMPARISON_P (comparison))
4567
    return NULL_RTX;
4568
 
4569
  do_pending_stack_adjust ();
4570
  last = get_last_insn ();
4571
  prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4572
                    GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4573
                    &comparison, &cmode);
4574
  if (comparison)
4575
    {
4576
      struct expand_operand ops[4];
4577
 
4578
      create_output_operand (&ops[0], target, mode);
4579
      create_fixed_operand (&ops[1], comparison);
4580
      create_input_operand (&ops[2], op2, mode);
4581
      create_input_operand (&ops[3], op3, mode);
4582
      if (maybe_expand_insn (icode, 4, ops))
4583
        {
4584
          if (ops[0].value != target)
4585
            convert_move (target, ops[0].value, false);
4586
          return target;
4587
        }
4588
    }
4589
  delete_insns_since (last);
4590
  return NULL_RTX;
4591
}
4592
 
4593
/* These functions attempt to generate an insn body, rather than
4594
   emitting the insn, but if the gen function already emits them, we
4595
   make no attempt to turn them back into naked patterns.  */
4596
 
4597
/* Generate and return an insn body to add Y to X.  */
4598
 
4599
rtx
4600
gen_add2_insn (rtx x, rtx y)
4601
{
4602
  enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4603
 
4604
  gcc_assert (insn_operand_matches (icode, 0, x));
4605
  gcc_assert (insn_operand_matches (icode, 1, x));
4606
  gcc_assert (insn_operand_matches (icode, 2, y));
4607
 
4608
  return GEN_FCN (icode) (x, x, y);
4609
}
4610
 
4611
/* Generate and return an insn body to add r1 and c,
4612
   storing the result in r0.  */
4613
 
4614
rtx
4615
gen_add3_insn (rtx r0, rtx r1, rtx c)
4616
{
4617
  enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4618
 
4619
  if (icode == CODE_FOR_nothing
4620
      || !insn_operand_matches (icode, 0, r0)
4621
      || !insn_operand_matches (icode, 1, r1)
4622
      || !insn_operand_matches (icode, 2, c))
4623
    return NULL_RTX;
4624
 
4625
  return GEN_FCN (icode) (r0, r1, c);
4626
}
4627
 
4628
int
4629
have_add2_insn (rtx x, rtx y)
4630
{
4631
  enum insn_code icode;
4632
 
4633
  gcc_assert (GET_MODE (x) != VOIDmode);
4634
 
4635
  icode = optab_handler (add_optab, GET_MODE (x));
4636
 
4637
  if (icode == CODE_FOR_nothing)
4638
    return 0;
4639
 
4640
  if (!insn_operand_matches (icode, 0, x)
4641
      || !insn_operand_matches (icode, 1, x)
4642
      || !insn_operand_matches (icode, 2, y))
4643
    return 0;
4644
 
4645
  return 1;
4646
}
4647
 
4648
/* Generate and return an insn body to subtract Y from X.  */
4649
 
4650
rtx
4651
gen_sub2_insn (rtx x, rtx y)
4652
{
4653
  enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4654
 
4655
  gcc_assert (insn_operand_matches (icode, 0, x));
4656
  gcc_assert (insn_operand_matches (icode, 1, x));
4657
  gcc_assert (insn_operand_matches (icode, 2, y));
4658
 
4659
  return GEN_FCN (icode) (x, x, y);
4660
}
4661
 
4662
/* Generate and return an insn body to subtract r1 and c,
4663
   storing the result in r0.  */
4664
 
4665
rtx
4666
gen_sub3_insn (rtx r0, rtx r1, rtx c)
4667
{
4668
  enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4669
 
4670
  if (icode == CODE_FOR_nothing
4671
      || !insn_operand_matches (icode, 0, r0)
4672
      || !insn_operand_matches (icode, 1, r1)
4673
      || !insn_operand_matches (icode, 2, c))
4674
    return NULL_RTX;
4675
 
4676
  return GEN_FCN (icode) (r0, r1, c);
4677
}
4678
 
4679
int
4680
have_sub2_insn (rtx x, rtx y)
4681
{
4682
  enum insn_code icode;
4683
 
4684
  gcc_assert (GET_MODE (x) != VOIDmode);
4685
 
4686
  icode = optab_handler (sub_optab, GET_MODE (x));
4687
 
4688
  if (icode == CODE_FOR_nothing)
4689
    return 0;
4690
 
4691
  if (!insn_operand_matches (icode, 0, x)
4692
      || !insn_operand_matches (icode, 1, x)
4693
      || !insn_operand_matches (icode, 2, y))
4694
    return 0;
4695
 
4696
  return 1;
4697
}
4698
 
4699
/* Generate the body of an instruction to copy Y into X.
4700
   It may be a list of insns, if one insn isn't enough.  */
4701
 
4702
rtx
4703
gen_move_insn (rtx x, rtx y)
4704
{
4705
  rtx seq;
4706
 
4707
  start_sequence ();
4708
  emit_move_insn_1 (x, y);
4709
  seq = get_insns ();
4710
  end_sequence ();
4711
  return seq;
4712
}
4713
 
4714
/* Return the insn code used to extend FROM_MODE to TO_MODE.
4715
   UNSIGNEDP specifies zero-extension instead of sign-extension.  If
4716
   no such operation exists, CODE_FOR_nothing will be returned.  */
4717
 
4718
enum insn_code
4719
can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4720
              int unsignedp)
4721
{
4722
  convert_optab tab;
4723
#ifdef HAVE_ptr_extend
4724
  if (unsignedp < 0)
4725
    return CODE_FOR_ptr_extend;
4726
#endif
4727
 
4728
  tab = unsignedp ? zext_optab : sext_optab;
4729
  return convert_optab_handler (tab, to_mode, from_mode);
4730
}
4731
 
4732
/* Generate the body of an insn to extend Y (with mode MFROM)
4733
   into X (with mode MTO).  Do zero-extension if UNSIGNEDP is nonzero.  */
4734
 
4735
rtx
4736
gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4737
                 enum machine_mode mfrom, int unsignedp)
4738
{
4739
  enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4740
  return GEN_FCN (icode) (x, y);
4741
}
4742
 
4743
/* can_fix_p and can_float_p say whether the target machine
4744
   can directly convert a given fixed point type to
4745
   a given floating point type, or vice versa.
4746
   The returned value is the CODE_FOR_... value to use,
4747
   or CODE_FOR_nothing if these modes cannot be directly converted.
4748
 
4749
   *TRUNCP_PTR is set to 1 if it is necessary to output
4750
   an explicit FTRUNC insn before the fix insn; otherwise 0.  */
4751
 
4752
static enum insn_code
4753
can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4754
           int unsignedp, int *truncp_ptr)
4755
{
4756
  convert_optab tab;
4757
  enum insn_code icode;
4758
 
4759
  tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4760
  icode = convert_optab_handler (tab, fixmode, fltmode);
4761
  if (icode != CODE_FOR_nothing)
4762
    {
4763
      *truncp_ptr = 0;
4764
      return icode;
4765
    }
4766
 
4767
  /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4768
     for this to work. We need to rework the fix* and ftrunc* patterns
4769
     and documentation.  */
4770
  tab = unsignedp ? ufix_optab : sfix_optab;
4771
  icode = convert_optab_handler (tab, fixmode, fltmode);
4772
  if (icode != CODE_FOR_nothing
4773
      && optab_handler (ftrunc_optab, fltmode) != CODE_FOR_nothing)
4774
    {
4775
      *truncp_ptr = 1;
4776
      return icode;
4777
    }
4778
 
4779
  *truncp_ptr = 0;
4780
  return CODE_FOR_nothing;
4781
}
4782
 
4783
enum insn_code
4784
can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4785
             int unsignedp)
4786
{
4787
  convert_optab tab;
4788
 
4789
  tab = unsignedp ? ufloat_optab : sfloat_optab;
4790
  return convert_optab_handler (tab, fltmode, fixmode);
4791
}
4792
 
4793
/* Function supportable_convert_operation
4794
 
4795
   Check whether an operation represented by the code CODE is a
4796
   convert operation that is supported by the target platform in
4797
   vector form (i.e., when operating on arguments of type VECTYPE_IN
4798
   producing a result of type VECTYPE_OUT).
4799
 
4800
   Convert operations we currently support directly are FIX_TRUNC and FLOAT.
4801
   This function checks if these operations are supported
4802
   by the target platform either directly (via vector tree-codes), or via
4803
   target builtins.
4804
 
4805
   Output:
4806
   - CODE1 is code of vector operation to be used when
4807
   vectorizing the operation, if available.
4808
   - DECL is decl of target builtin functions to be used
4809
   when vectorizing the operation, if available.  In this case,
4810
   CODE1 is CALL_EXPR.  */
4811
 
4812
bool
4813
supportable_convert_operation (enum tree_code code,
4814
                                    tree vectype_out, tree vectype_in,
4815
                                    tree *decl, enum tree_code *code1)
4816
{
4817
  enum machine_mode m1,m2;
4818
  int truncp;
4819
 
4820
  m1 = TYPE_MODE (vectype_out);
4821
  m2 = TYPE_MODE (vectype_in);
4822
 
4823
  /* First check if we can done conversion directly.  */
4824
  if ((code == FIX_TRUNC_EXPR
4825
       && can_fix_p (m1,m2,TYPE_UNSIGNED (vectype_out), &truncp)
4826
          != CODE_FOR_nothing)
4827
      || (code == FLOAT_EXPR
4828
          && can_float_p (m1,m2,TYPE_UNSIGNED (vectype_in))
4829
             != CODE_FOR_nothing))
4830
    {
4831
      *code1 = code;
4832
      return true;
4833
    }
4834
 
4835
  /* Now check for builtin.  */
4836
  if (targetm.vectorize.builtin_conversion
4837
      && targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
4838
    {
4839
      *code1 = CALL_EXPR;
4840
      *decl = targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in);
4841
      return true;
4842
    }
4843
  return false;
4844
}
4845
 
4846
 
4847
/* Generate code to convert FROM to floating point
4848
   and store in TO.  FROM must be fixed point and not VOIDmode.
4849
   UNSIGNEDP nonzero means regard FROM as unsigned.
4850
   Normally this is done by correcting the final value
4851
   if it is negative.  */
4852
 
4853
void
4854
expand_float (rtx to, rtx from, int unsignedp)
4855
{
4856
  enum insn_code icode;
4857
  rtx target = to;
4858
  enum machine_mode fmode, imode;
4859
  bool can_do_signed = false;
4860
 
4861
  /* Crash now, because we won't be able to decide which mode to use.  */
4862
  gcc_assert (GET_MODE (from) != VOIDmode);
4863
 
4864
  /* Look for an insn to do the conversion.  Do it in the specified
4865
     modes if possible; otherwise convert either input, output or both to
4866
     wider mode.  If the integer mode is wider than the mode of FROM,
4867
     we can do the conversion signed even if the input is unsigned.  */
4868
 
4869
  for (fmode = GET_MODE (to); fmode != VOIDmode;
4870
       fmode = GET_MODE_WIDER_MODE (fmode))
4871
    for (imode = GET_MODE (from); imode != VOIDmode;
4872
         imode = GET_MODE_WIDER_MODE (imode))
4873
      {
4874
        int doing_unsigned = unsignedp;
4875
 
4876
        if (fmode != GET_MODE (to)
4877
            && significand_size (fmode) < GET_MODE_PRECISION (GET_MODE (from)))
4878
          continue;
4879
 
4880
        icode = can_float_p (fmode, imode, unsignedp);
4881
        if (icode == CODE_FOR_nothing && unsignedp)
4882
          {
4883
            enum insn_code scode = can_float_p (fmode, imode, 0);
4884
            if (scode != CODE_FOR_nothing)
4885
              can_do_signed = true;
4886
            if (imode != GET_MODE (from))
4887
              icode = scode, doing_unsigned = 0;
4888
          }
4889
 
4890
        if (icode != CODE_FOR_nothing)
4891
          {
4892
            if (imode != GET_MODE (from))
4893
              from = convert_to_mode (imode, from, unsignedp);
4894
 
4895
            if (fmode != GET_MODE (to))
4896
              target = gen_reg_rtx (fmode);
4897
 
4898
            emit_unop_insn (icode, target, from,
4899
                            doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4900
 
4901
            if (target != to)
4902
              convert_move (to, target, 0);
4903
            return;
4904
          }
4905
      }
4906
 
4907
  /* Unsigned integer, and no way to convert directly.  Convert as signed,
4908
     then unconditionally adjust the result.  */
4909
  if (unsignedp && can_do_signed)
4910
    {
4911
      rtx label = gen_label_rtx ();
4912
      rtx temp;
4913
      REAL_VALUE_TYPE offset;
4914
 
4915
      /* Look for a usable floating mode FMODE wider than the source and at
4916
         least as wide as the target.  Using FMODE will avoid rounding woes
4917
         with unsigned values greater than the signed maximum value.  */
4918
 
4919
      for (fmode = GET_MODE (to);  fmode != VOIDmode;
4920
           fmode = GET_MODE_WIDER_MODE (fmode))
4921
        if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4922
            && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4923
          break;
4924
 
4925
      if (fmode == VOIDmode)
4926
        {
4927
          /* There is no such mode.  Pretend the target is wide enough.  */
4928
          fmode = GET_MODE (to);
4929
 
4930
          /* Avoid double-rounding when TO is narrower than FROM.  */
4931
          if ((significand_size (fmode) + 1)
4932
              < GET_MODE_PRECISION (GET_MODE (from)))
4933
            {
4934
              rtx temp1;
4935
              rtx neglabel = gen_label_rtx ();
4936
 
4937
              /* Don't use TARGET if it isn't a register, is a hard register,
4938
                 or is the wrong mode.  */
4939
              if (!REG_P (target)
4940
                  || REGNO (target) < FIRST_PSEUDO_REGISTER
4941
                  || GET_MODE (target) != fmode)
4942
                target = gen_reg_rtx (fmode);
4943
 
4944
              imode = GET_MODE (from);
4945
              do_pending_stack_adjust ();
4946
 
4947
              /* Test whether the sign bit is set.  */
4948
              emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4949
                                       0, neglabel);
4950
 
4951
              /* The sign bit is not set.  Convert as signed.  */
4952
              expand_float (target, from, 0);
4953
              emit_jump_insn (gen_jump (label));
4954
              emit_barrier ();
4955
 
4956
              /* The sign bit is set.
4957
                 Convert to a usable (positive signed) value by shifting right
4958
                 one bit, while remembering if a nonzero bit was shifted
4959
                 out; i.e., compute  (from & 1) | (from >> 1).  */
4960
 
4961
              emit_label (neglabel);
4962
              temp = expand_binop (imode, and_optab, from, const1_rtx,
4963
                                   NULL_RTX, 1, OPTAB_LIB_WIDEN);
4964
              temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4965
              temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4966
                                   OPTAB_LIB_WIDEN);
4967
              expand_float (target, temp, 0);
4968
 
4969
              /* Multiply by 2 to undo the shift above.  */
4970
              temp = expand_binop (fmode, add_optab, target, target,
4971
                                   target, 0, OPTAB_LIB_WIDEN);
4972
              if (temp != target)
4973
                emit_move_insn (target, temp);
4974
 
4975
              do_pending_stack_adjust ();
4976
              emit_label (label);
4977
              goto done;
4978
            }
4979
        }
4980
 
4981
      /* If we are about to do some arithmetic to correct for an
4982
         unsigned operand, do it in a pseudo-register.  */
4983
 
4984
      if (GET_MODE (to) != fmode
4985
          || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4986
        target = gen_reg_rtx (fmode);
4987
 
4988
      /* Convert as signed integer to floating.  */
4989
      expand_float (target, from, 0);
4990
 
4991
      /* If FROM is negative (and therefore TO is negative),
4992
         correct its value by 2**bitwidth.  */
4993
 
4994
      do_pending_stack_adjust ();
4995
      emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4996
                               0, label);
4997
 
4998
 
4999
      real_2expN (&offset, GET_MODE_PRECISION (GET_MODE (from)), fmode);
5000
      temp = expand_binop (fmode, add_optab, target,
5001
                           CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
5002
                           target, 0, OPTAB_LIB_WIDEN);
5003
      if (temp != target)
5004
        emit_move_insn (target, temp);
5005
 
5006
      do_pending_stack_adjust ();
5007
      emit_label (label);
5008
      goto done;
5009
    }
5010
 
5011
  /* No hardware instruction available; call a library routine.  */
5012
    {
5013
      rtx libfunc;
5014
      rtx insns;
5015
      rtx value;
5016
      convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5017
 
5018
      if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
5019
        from = convert_to_mode (SImode, from, unsignedp);
5020
 
5021
      libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5022
      gcc_assert (libfunc);
5023
 
5024
      start_sequence ();
5025
 
5026
      value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5027
                                       GET_MODE (to), 1, from,
5028
                                       GET_MODE (from));
5029
      insns = get_insns ();
5030
      end_sequence ();
5031
 
5032
      emit_libcall_block (insns, target, value,
5033
                          gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5034
                                         GET_MODE (to), from));
5035
    }
5036
 
5037
 done:
5038
 
5039
  /* Copy result to requested destination
5040
     if we have been computing in a temp location.  */
5041
 
5042
  if (target != to)
5043
    {
5044
      if (GET_MODE (target) == GET_MODE (to))
5045
        emit_move_insn (to, target);
5046
      else
5047
        convert_move (to, target, 0);
5048
    }
5049
}
5050
 
5051
/* Generate code to convert FROM to fixed point and store in TO.  FROM
5052
   must be floating point.  */
5053
 
5054
void
5055
expand_fix (rtx to, rtx from, int unsignedp)
5056
{
5057
  enum insn_code icode;
5058
  rtx target = to;
5059
  enum machine_mode fmode, imode;
5060
  int must_trunc = 0;
5061
 
5062
  /* We first try to find a pair of modes, one real and one integer, at
5063
     least as wide as FROM and TO, respectively, in which we can open-code
5064
     this conversion.  If the integer mode is wider than the mode of TO,
5065
     we can do the conversion either signed or unsigned.  */
5066
 
5067
  for (fmode = GET_MODE (from); fmode != VOIDmode;
5068
       fmode = GET_MODE_WIDER_MODE (fmode))
5069
    for (imode = GET_MODE (to); imode != VOIDmode;
5070
         imode = GET_MODE_WIDER_MODE (imode))
5071
      {
5072
        int doing_unsigned = unsignedp;
5073
 
5074
        icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5075
        if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5076
          icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5077
 
5078
        if (icode != CODE_FOR_nothing)
5079
          {
5080
            rtx last = get_last_insn ();
5081
            if (fmode != GET_MODE (from))
5082
              from = convert_to_mode (fmode, from, 0);
5083
 
5084
            if (must_trunc)
5085
              {
5086
                rtx temp = gen_reg_rtx (GET_MODE (from));
5087
                from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5088
                                    temp, 0);
5089
              }
5090
 
5091
            if (imode != GET_MODE (to))
5092
              target = gen_reg_rtx (imode);
5093
 
5094
            if (maybe_emit_unop_insn (icode, target, from,
5095
                                      doing_unsigned ? UNSIGNED_FIX : FIX))
5096
              {
5097
                if (target != to)
5098
                  convert_move (to, target, unsignedp);
5099
                return;
5100
              }
5101
            delete_insns_since (last);
5102
          }
5103
      }
5104
 
5105
  /* For an unsigned conversion, there is one more way to do it.
5106
     If we have a signed conversion, we generate code that compares
5107
     the real value to the largest representable positive number.  If if
5108
     is smaller, the conversion is done normally.  Otherwise, subtract
5109
     one plus the highest signed number, convert, and add it back.
5110
 
5111
     We only need to check all real modes, since we know we didn't find
5112
     anything with a wider integer mode.
5113
 
5114
     This code used to extend FP value into mode wider than the destination.
5115
     This is needed for decimal float modes which cannot accurately
5116
     represent one plus the highest signed number of the same size, but
5117
     not for binary modes.  Consider, for instance conversion from SFmode
5118
     into DImode.
5119
 
5120
     The hot path through the code is dealing with inputs smaller than 2^63
5121
     and doing just the conversion, so there is no bits to lose.
5122
 
5123
     In the other path we know the value is positive in the range 2^63..2^64-1
5124
     inclusive.  (as for other input overflow happens and result is undefined)
5125
     So we know that the most important bit set in mantissa corresponds to
5126
     2^63.  The subtraction of 2^63 should not generate any rounding as it
5127
     simply clears out that bit.  The rest is trivial.  */
5128
 
5129
  if (unsignedp && GET_MODE_PRECISION (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5130
    for (fmode = GET_MODE (from); fmode != VOIDmode;
5131
         fmode = GET_MODE_WIDER_MODE (fmode))
5132
      if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5133
          && (!DECIMAL_FLOAT_MODE_P (fmode)
5134
              || GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (GET_MODE (to))))
5135
        {
5136
          int bitsize;
5137
          REAL_VALUE_TYPE offset;
5138
          rtx limit, lab1, lab2, insn;
5139
 
5140
          bitsize = GET_MODE_PRECISION (GET_MODE (to));
5141
          real_2expN (&offset, bitsize - 1, fmode);
5142
          limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5143
          lab1 = gen_label_rtx ();
5144
          lab2 = gen_label_rtx ();
5145
 
5146
          if (fmode != GET_MODE (from))
5147
            from = convert_to_mode (fmode, from, 0);
5148
 
5149
          /* See if we need to do the subtraction.  */
5150
          do_pending_stack_adjust ();
5151
          emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5152
                                   0, lab1);
5153
 
5154
          /* If not, do the signed "fix" and branch around fixup code.  */
5155
          expand_fix (to, from, 0);
5156
          emit_jump_insn (gen_jump (lab2));
5157
          emit_barrier ();
5158
 
5159
          /* Otherwise, subtract 2**(N-1), convert to signed number,
5160
             then add 2**(N-1).  Do the addition using XOR since this
5161
             will often generate better code.  */
5162
          emit_label (lab1);
5163
          target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5164
                                 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5165
          expand_fix (to, target, 0);
5166
          target = expand_binop (GET_MODE (to), xor_optab, to,
5167
                                 gen_int_mode
5168
                                 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5169
                                  GET_MODE (to)),
5170
                                 to, 1, OPTAB_LIB_WIDEN);
5171
 
5172
          if (target != to)
5173
            emit_move_insn (to, target);
5174
 
5175
          emit_label (lab2);
5176
 
5177
          if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
5178
            {
5179
              /* Make a place for a REG_NOTE and add it.  */
5180
              insn = emit_move_insn (to, to);
5181
              set_dst_reg_note (insn, REG_EQUAL,
5182
                                gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to),
5183
                                               copy_rtx (from)),
5184
                                to);
5185
            }
5186
 
5187
          return;
5188
        }
5189
 
5190
  /* We can't do it with an insn, so use a library call.  But first ensure
5191
     that the mode of TO is at least as wide as SImode, since those are the
5192
     only library calls we know about.  */
5193
 
5194
  if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5195
    {
5196
      target = gen_reg_rtx (SImode);
5197
 
5198
      expand_fix (target, from, unsignedp);
5199
    }
5200
  else
5201
    {
5202
      rtx insns;
5203
      rtx value;
5204
      rtx libfunc;
5205
 
5206
      convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5207
      libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5208
      gcc_assert (libfunc);
5209
 
5210
      start_sequence ();
5211
 
5212
      value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5213
                                       GET_MODE (to), 1, from,
5214
                                       GET_MODE (from));
5215
      insns = get_insns ();
5216
      end_sequence ();
5217
 
5218
      emit_libcall_block (insns, target, value,
5219
                          gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5220
                                         GET_MODE (to), from));
5221
    }
5222
 
5223
  if (target != to)
5224
    {
5225
      if (GET_MODE (to) == GET_MODE (target))
5226
        emit_move_insn (to, target);
5227
      else
5228
        convert_move (to, target, 0);
5229
    }
5230
}
5231
 
5232
/* Generate code to convert FROM or TO a fixed-point.
5233
   If UINTP is true, either TO or FROM is an unsigned integer.
5234
   If SATP is true, we need to saturate the result.  */
5235
 
5236
void
5237
expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5238
{
5239
  enum machine_mode to_mode = GET_MODE (to);
5240
  enum machine_mode from_mode = GET_MODE (from);
5241
  convert_optab tab;
5242
  enum rtx_code this_code;
5243
  enum insn_code code;
5244
  rtx insns, value;
5245
  rtx libfunc;
5246
 
5247
  if (to_mode == from_mode)
5248
    {
5249
      emit_move_insn (to, from);
5250
      return;
5251
    }
5252
 
5253
  if (uintp)
5254
    {
5255
      tab = satp ? satfractuns_optab : fractuns_optab;
5256
      this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5257
    }
5258
  else
5259
    {
5260
      tab = satp ? satfract_optab : fract_optab;
5261
      this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5262
    }
5263
  code = convert_optab_handler (tab, to_mode, from_mode);
5264
  if (code != CODE_FOR_nothing)
5265
    {
5266
      emit_unop_insn (code, to, from, this_code);
5267
      return;
5268
    }
5269
 
5270
  libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5271
  gcc_assert (libfunc);
5272
 
5273
  start_sequence ();
5274
  value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5275
                                   1, from, from_mode);
5276
  insns = get_insns ();
5277
  end_sequence ();
5278
 
5279
  emit_libcall_block (insns, to, value,
5280
                      gen_rtx_fmt_e (tab->code, to_mode, from));
5281
}
5282
 
5283
/* Generate code to convert FROM to fixed point and store in TO.  FROM
5284
   must be floating point, TO must be signed.  Use the conversion optab
5285
   TAB to do the conversion.  */
5286
 
5287
bool
5288
expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5289
{
5290
  enum insn_code icode;
5291
  rtx target = to;
5292
  enum machine_mode fmode, imode;
5293
 
5294
  /* We first try to find a pair of modes, one real and one integer, at
5295
     least as wide as FROM and TO, respectively, in which we can open-code
5296
     this conversion.  If the integer mode is wider than the mode of TO,
5297
     we can do the conversion either signed or unsigned.  */
5298
 
5299
  for (fmode = GET_MODE (from); fmode != VOIDmode;
5300
       fmode = GET_MODE_WIDER_MODE (fmode))
5301
    for (imode = GET_MODE (to); imode != VOIDmode;
5302
         imode = GET_MODE_WIDER_MODE (imode))
5303
      {
5304
        icode = convert_optab_handler (tab, imode, fmode);
5305
        if (icode != CODE_FOR_nothing)
5306
          {
5307
            rtx last = get_last_insn ();
5308
            if (fmode != GET_MODE (from))
5309
              from = convert_to_mode (fmode, from, 0);
5310
 
5311
            if (imode != GET_MODE (to))
5312
              target = gen_reg_rtx (imode);
5313
 
5314
            if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5315
              {
5316
                delete_insns_since (last);
5317
                continue;
5318
              }
5319
            if (target != to)
5320
              convert_move (to, target, 0);
5321
            return true;
5322
          }
5323
      }
5324
 
5325
  return false;
5326
}
5327
 
5328
/* Report whether we have an instruction to perform the operation
5329
   specified by CODE on operands of mode MODE.  */
5330
int
5331
have_insn_for (enum rtx_code code, enum machine_mode mode)
5332
{
5333
  return (code_to_optab[(int) code] != 0
5334
          && (optab_handler (code_to_optab[(int) code], mode)
5335
              != CODE_FOR_nothing));
5336
}
5337
 
5338
/* Set all insn_code fields to CODE_FOR_nothing.  */
5339
 
5340
static void
5341
init_insn_codes (void)
5342
{
5343
  memset (optab_table, 0, sizeof (optab_table));
5344
  memset (convert_optab_table, 0, sizeof (convert_optab_table));
5345
  memset (direct_optab_table, 0, sizeof (direct_optab_table));
5346
}
5347
 
5348
/* Initialize OP's code to CODE, and write it into the code_to_optab table.  */
5349
static inline void
5350
init_optab (optab op, enum rtx_code code)
5351
{
5352
  op->code = code;
5353
  code_to_optab[(int) code] = op;
5354
}
5355
 
5356
/* Same, but fill in its code as CODE, and do _not_ write it into
5357
   the code_to_optab table.  */
5358
static inline void
5359
init_optabv (optab op, enum rtx_code code)
5360
{
5361
  op->code = code;
5362
}
5363
 
5364
/* Conversion optabs never go in the code_to_optab table.  */
5365
static void
5366
init_convert_optab (convert_optab op, enum rtx_code code)
5367
{
5368
  op->code = code;
5369
}
5370
 
5371
/* Initialize the libfunc fields of an entire group of entries in some
5372
   optab.  Each entry is set equal to a string consisting of a leading
5373
   pair of underscores followed by a generic operation name followed by
5374
   a mode name (downshifted to lowercase) followed by a single character
5375
   representing the number of operands for the given operation (which is
5376
   usually one of the characters '2', '3', or '4').
5377
 
5378
   OPTABLE is the table in which libfunc fields are to be initialized.
5379
   OPNAME is the generic (string) name of the operation.
5380
   SUFFIX is the character which specifies the number of operands for
5381
     the given generic operation.
5382
   MODE is the mode to generate for.
5383
*/
5384
 
5385
static void
5386
gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5387
{
5388
  unsigned opname_len = strlen (opname);
5389
  const char *mname = GET_MODE_NAME (mode);
5390
  unsigned mname_len = strlen (mname);
5391
  int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5392
  int len = prefix_len + opname_len + mname_len + 1 + 1;
5393
  char *libfunc_name = XALLOCAVEC (char, len);
5394
  char *p;
5395
  const char *q;
5396
 
5397
  p = libfunc_name;
5398
  *p++ = '_';
5399
  *p++ = '_';
5400
  if (targetm.libfunc_gnu_prefix)
5401
    {
5402
      *p++ = 'g';
5403
      *p++ = 'n';
5404
      *p++ = 'u';
5405
      *p++ = '_';
5406
    }
5407
  for (q = opname; *q; )
5408
    *p++ = *q++;
5409
  for (q = mname; *q; q++)
5410
    *p++ = TOLOWER (*q);
5411
  *p++ = suffix;
5412
  *p = '\0';
5413
 
5414
  set_optab_libfunc (optable, mode,
5415
                     ggc_alloc_string (libfunc_name, p - libfunc_name));
5416
}
5417
 
5418
/* Like gen_libfunc, but verify that integer operation is involved.  */
5419
 
5420
static void
5421
gen_int_libfunc (optab optable, const char *opname, char suffix,
5422
                 enum machine_mode mode)
5423
{
5424
  int maxsize = 2 * BITS_PER_WORD;
5425
 
5426
  if (GET_MODE_CLASS (mode) != MODE_INT)
5427
    return;
5428
  if (maxsize < LONG_LONG_TYPE_SIZE)
5429
    maxsize = LONG_LONG_TYPE_SIZE;
5430
  if (GET_MODE_CLASS (mode) != MODE_INT
5431
      || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5432
    return;
5433
  gen_libfunc (optable, opname, suffix, mode);
5434
}
5435
 
5436
/* Like gen_libfunc, but verify that FP and set decimal prefix if needed.  */
5437
 
5438
static void
5439
gen_fp_libfunc (optab optable, const char *opname, char suffix,
5440
                enum machine_mode mode)
5441
{
5442
  char *dec_opname;
5443
 
5444
  if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5445
    gen_libfunc (optable, opname, suffix, mode);
5446
  if (DECIMAL_FLOAT_MODE_P (mode))
5447
    {
5448
      dec_opname = XALLOCAVEC (char, sizeof (DECIMAL_PREFIX) + strlen (opname));
5449
      /* For BID support, change the name to have either a bid_ or dpd_ prefix
5450
         depending on the low level floating format used.  */
5451
      memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5452
      strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5453
      gen_libfunc (optable, dec_opname, suffix, mode);
5454
    }
5455
}
5456
 
5457
/* Like gen_libfunc, but verify that fixed-point operation is involved.  */
5458
 
5459
static void
5460
gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5461
                   enum machine_mode mode)
5462
{
5463
  if (!ALL_FIXED_POINT_MODE_P (mode))
5464
    return;
5465
  gen_libfunc (optable, opname, suffix, mode);
5466
}
5467
 
5468
/* Like gen_libfunc, but verify that signed fixed-point operation is
5469
   involved.  */
5470
 
5471
static void
5472
gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5473
                          enum machine_mode mode)
5474
{
5475
  if (!SIGNED_FIXED_POINT_MODE_P (mode))
5476
    return;
5477
  gen_libfunc (optable, opname, suffix, mode);
5478
}
5479
 
5480
/* Like gen_libfunc, but verify that unsigned fixed-point operation is
5481
   involved.  */
5482
 
5483
static void
5484
gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5485
                            enum machine_mode mode)
5486
{
5487
  if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5488
    return;
5489
  gen_libfunc (optable, opname, suffix, mode);
5490
}
5491
 
5492
/* Like gen_libfunc, but verify that FP or INT operation is involved.  */
5493
 
5494
static void
5495
gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5496
                    enum machine_mode mode)
5497
{
5498
  if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5499
    gen_fp_libfunc (optable, name, suffix, mode);
5500
  if (INTEGRAL_MODE_P (mode))
5501
    gen_int_libfunc (optable, name, suffix, mode);
5502
}
5503
 
5504
/* Like gen_libfunc, but verify that FP or INT operation is involved
5505
   and add 'v' suffix for integer operation.  */
5506
 
5507
static void
5508
gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5509
                     enum machine_mode mode)
5510
{
5511
  if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5512
    gen_fp_libfunc (optable, name, suffix, mode);
5513
  if (GET_MODE_CLASS (mode) == MODE_INT)
5514
    {
5515
      int len = strlen (name);
5516
      char *v_name = XALLOCAVEC (char, len + 2);
5517
      strcpy (v_name, name);
5518
      v_name[len] = 'v';
5519
      v_name[len + 1] = 0;
5520
      gen_int_libfunc (optable, v_name, suffix, mode);
5521
    }
5522
}
5523
 
5524
/* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5525
   involved.  */
5526
 
5527
static void
5528
gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5529
                          enum machine_mode mode)
5530
{
5531
  if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5532
    gen_fp_libfunc (optable, name, suffix, mode);
5533
  if (INTEGRAL_MODE_P (mode))
5534
    gen_int_libfunc (optable, name, suffix, mode);
5535
  if (ALL_FIXED_POINT_MODE_P (mode))
5536
    gen_fixed_libfunc (optable, name, suffix, mode);
5537
}
5538
 
5539
/* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5540
   involved.  */
5541
 
5542
static void
5543
gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5544
                                 enum machine_mode mode)
5545
{
5546
  if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5547
    gen_fp_libfunc (optable, name, suffix, mode);
5548
  if (INTEGRAL_MODE_P (mode))
5549
    gen_int_libfunc (optable, name, suffix, mode);
5550
  if (SIGNED_FIXED_POINT_MODE_P (mode))
5551
    gen_signed_fixed_libfunc (optable, name, suffix, mode);
5552
}
5553
 
5554
/* Like gen_libfunc, but verify that INT or FIXED operation is
5555
   involved.  */
5556
 
5557
static void
5558
gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5559
                       enum machine_mode mode)
5560
{
5561
  if (INTEGRAL_MODE_P (mode))
5562
    gen_int_libfunc (optable, name, suffix, mode);
5563
  if (ALL_FIXED_POINT_MODE_P (mode))
5564
    gen_fixed_libfunc (optable, name, suffix, mode);
5565
}
5566
 
5567
/* Like gen_libfunc, but verify that INT or signed FIXED operation is
5568
   involved.  */
5569
 
5570
static void
5571
gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5572
                              enum machine_mode mode)
5573
{
5574
  if (INTEGRAL_MODE_P (mode))
5575
    gen_int_libfunc (optable, name, suffix, mode);
5576
  if (SIGNED_FIXED_POINT_MODE_P (mode))
5577
    gen_signed_fixed_libfunc (optable, name, suffix, mode);
5578
}
5579
 
5580
/* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5581
   involved.  */
5582
 
5583
static void
5584
gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5585
                                enum machine_mode mode)
5586
{
5587
  if (INTEGRAL_MODE_P (mode))
5588
    gen_int_libfunc (optable, name, suffix, mode);
5589
  if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5590
    gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5591
}
5592
 
5593
/* Initialize the libfunc fields of an entire group of entries of an
5594
   inter-mode-class conversion optab.  The string formation rules are
5595
   similar to the ones for init_libfuncs, above, but instead of having
5596
   a mode name and an operand count these functions have two mode names
5597
   and no operand count.  */
5598
 
5599
static void
5600
gen_interclass_conv_libfunc (convert_optab tab,
5601
                             const char *opname,
5602
                             enum machine_mode tmode,
5603
                             enum machine_mode fmode)
5604
{
5605
  size_t opname_len = strlen (opname);
5606
  size_t mname_len = 0;
5607
 
5608
  const char *fname, *tname;
5609
  const char *q;
5610
  int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5611
  char *libfunc_name, *suffix;
5612
  char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5613
  char *p;
5614
 
5615
  /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5616
     depends on which underlying decimal floating point format is used.  */
5617
  const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5618
 
5619
  mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5620
 
5621
  nondec_name = XALLOCAVEC (char, prefix_len + opname_len + mname_len + 1 + 1);
5622
  nondec_name[0] = '_';
5623
  nondec_name[1] = '_';
5624
  if (targetm.libfunc_gnu_prefix)
5625
    {
5626
      nondec_name[2] = 'g';
5627
      nondec_name[3] = 'n';
5628
      nondec_name[4] = 'u';
5629
      nondec_name[5] = '_';
5630
    }
5631
 
5632
  memcpy (&nondec_name[prefix_len], opname, opname_len);
5633
  nondec_suffix = nondec_name + opname_len + prefix_len;
5634
 
5635
  dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5636
  dec_name[0] = '_';
5637
  dec_name[1] = '_';
5638
  memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5639
  memcpy (&dec_name[2+dec_len], opname, opname_len);
5640
  dec_suffix = dec_name + dec_len + opname_len + 2;
5641
 
5642
  fname = GET_MODE_NAME (fmode);
5643
  tname = GET_MODE_NAME (tmode);
5644
 
5645
  if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5646
    {
5647
      libfunc_name = dec_name;
5648
      suffix = dec_suffix;
5649
    }
5650
  else
5651
    {
5652
      libfunc_name = nondec_name;
5653
      suffix = nondec_suffix;
5654
    }
5655
 
5656
  p = suffix;
5657
  for (q = fname; *q; p++, q++)
5658
    *p = TOLOWER (*q);
5659
  for (q = tname; *q; p++, q++)
5660
    *p = TOLOWER (*q);
5661
 
5662
  *p = '\0';
5663
 
5664
  set_conv_libfunc (tab, tmode, fmode,
5665
                    ggc_alloc_string (libfunc_name, p - libfunc_name));
5666
}
5667
 
5668
/* Same as gen_interclass_conv_libfunc but verify that we are producing
5669
   int->fp conversion.  */
5670
 
5671
static void
5672
gen_int_to_fp_conv_libfunc (convert_optab tab,
5673
                            const char *opname,
5674
                            enum machine_mode tmode,
5675
                            enum machine_mode fmode)
5676
{
5677
  if (GET_MODE_CLASS (fmode) != MODE_INT)
5678
    return;
5679
  if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5680
    return;
5681
  gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5682
}
5683
 
5684
/* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5685
   naming scheme.  */
5686
 
5687
static void
5688
gen_ufloat_conv_libfunc (convert_optab tab,
5689
                         const char *opname ATTRIBUTE_UNUSED,
5690
                         enum machine_mode tmode,
5691
                         enum machine_mode fmode)
5692
{
5693
  if (DECIMAL_FLOAT_MODE_P (tmode))
5694
    gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5695
  else
5696
    gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5697
}
5698
 
5699
/* Same as gen_interclass_conv_libfunc but verify that we are producing
5700
   fp->int conversion.  */
5701
 
5702
static void
5703
gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5704
                                       const char *opname,
5705
                                       enum machine_mode tmode,
5706
                                       enum machine_mode fmode)
5707
{
5708
  if (GET_MODE_CLASS (fmode) != MODE_INT)
5709
    return;
5710
  if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5711
    return;
5712
  gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5713
}
5714
 
5715
/* Same as gen_interclass_conv_libfunc but verify that we are producing
5716
   fp->int conversion with no decimal floating point involved.  */
5717
 
5718
static void
5719
gen_fp_to_int_conv_libfunc (convert_optab tab,
5720
                            const char *opname,
5721
                            enum machine_mode tmode,
5722
                            enum machine_mode fmode)
5723
{
5724
  if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5725
    return;
5726
  if (GET_MODE_CLASS (tmode) != MODE_INT)
5727
    return;
5728
  gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5729
}
5730
 
5731
/* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5732
   The string formation rules are
5733
   similar to the ones for init_libfunc, above.  */
5734
 
5735
static void
5736
gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5737
                             enum machine_mode tmode, enum machine_mode fmode)
5738
{
5739
  size_t opname_len = strlen (opname);
5740
  size_t mname_len = 0;
5741
 
5742
  const char *fname, *tname;
5743
  const char *q;
5744
  int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5745
  char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5746
  char *libfunc_name, *suffix;
5747
  char *p;
5748
 
5749
  /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5750
     depends on which underlying decimal floating point format is used.  */
5751
  const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5752
 
5753
  mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5754
 
5755
  nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5756
  nondec_name[0] = '_';
5757
  nondec_name[1] = '_';
5758
  if (targetm.libfunc_gnu_prefix)
5759
    {
5760
      nondec_name[2] = 'g';
5761
      nondec_name[3] = 'n';
5762
      nondec_name[4] = 'u';
5763
      nondec_name[5] = '_';
5764
    }
5765
  memcpy (&nondec_name[prefix_len], opname, opname_len);
5766
  nondec_suffix = nondec_name + opname_len + prefix_len;
5767
 
5768
  dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5769
  dec_name[0] = '_';
5770
  dec_name[1] = '_';
5771
  memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5772
  memcpy (&dec_name[2 + dec_len], opname, opname_len);
5773
  dec_suffix = dec_name + dec_len + opname_len + 2;
5774
 
5775
  fname = GET_MODE_NAME (fmode);
5776
  tname = GET_MODE_NAME (tmode);
5777
 
5778
  if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5779
    {
5780
      libfunc_name = dec_name;
5781
      suffix = dec_suffix;
5782
    }
5783
  else
5784
    {
5785
      libfunc_name = nondec_name;
5786
      suffix = nondec_suffix;
5787
    }
5788
 
5789
  p = suffix;
5790
  for (q = fname; *q; p++, q++)
5791
    *p = TOLOWER (*q);
5792
  for (q = tname; *q; p++, q++)
5793
    *p = TOLOWER (*q);
5794
 
5795
  *p++ = '2';
5796
  *p = '\0';
5797
 
5798
  set_conv_libfunc (tab, tmode, fmode,
5799
                    ggc_alloc_string (libfunc_name, p - libfunc_name));
5800
}
5801
 
5802
/* Pick proper libcall for trunc_optab.  We need to chose if we do
5803
   truncation or extension and interclass or intraclass.  */
5804
 
5805
static void
5806
gen_trunc_conv_libfunc (convert_optab tab,
5807
                         const char *opname,
5808
                         enum machine_mode tmode,
5809
                         enum machine_mode fmode)
5810
{
5811
  if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5812
    return;
5813
  if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5814
    return;
5815
  if (tmode == fmode)
5816
    return;
5817
 
5818
  if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5819
      || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5820
     gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5821
 
5822
  if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5823
    return;
5824
 
5825
  if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5826
       && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5827
      || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5828
    gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5829
}
5830
 
5831
/* Pick proper libcall for extend_optab.  We need to chose if we do
5832
   truncation or extension and interclass or intraclass.  */
5833
 
5834
static void
5835
gen_extend_conv_libfunc (convert_optab tab,
5836
                         const char *opname ATTRIBUTE_UNUSED,
5837
                         enum machine_mode tmode,
5838
                         enum machine_mode fmode)
5839
{
5840
  if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5841
    return;
5842
  if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5843
    return;
5844
  if (tmode == fmode)
5845
    return;
5846
 
5847
  if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5848
      || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5849
     gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5850
 
5851
  if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
5852
    return;
5853
 
5854
  if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5855
       && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5856
      || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5857
    gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5858
}
5859
 
5860
/* Pick proper libcall for fract_optab.  We need to chose if we do
5861
   interclass or intraclass.  */
5862
 
5863
static void
5864
gen_fract_conv_libfunc (convert_optab tab,
5865
                        const char *opname,
5866
                        enum machine_mode tmode,
5867
                        enum machine_mode fmode)
5868
{
5869
  if (tmode == fmode)
5870
    return;
5871
  if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
5872
    return;
5873
 
5874
  if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5875
    gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5876
  else
5877
    gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5878
}
5879
 
5880
/* Pick proper libcall for fractuns_optab.  */
5881
 
5882
static void
5883
gen_fractuns_conv_libfunc (convert_optab tab,
5884
                           const char *opname,
5885
                           enum machine_mode tmode,
5886
                           enum machine_mode fmode)
5887
{
5888
  if (tmode == fmode)
5889
    return;
5890
  /* One mode must be a fixed-point mode, and the other must be an integer
5891
     mode. */
5892
  if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
5893
        || (ALL_FIXED_POINT_MODE_P (fmode)
5894
            && GET_MODE_CLASS (tmode) == MODE_INT)))
5895
    return;
5896
 
5897
  gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5898
}
5899
 
5900
/* Pick proper libcall for satfract_optab.  We need to chose if we do
5901
   interclass or intraclass.  */
5902
 
5903
static void
5904
gen_satfract_conv_libfunc (convert_optab tab,
5905
                           const char *opname,
5906
                           enum machine_mode tmode,
5907
                           enum machine_mode fmode)
5908
{
5909
  if (tmode == fmode)
5910
    return;
5911
  /* TMODE must be a fixed-point mode.  */
5912
  if (!ALL_FIXED_POINT_MODE_P (tmode))
5913
    return;
5914
 
5915
  if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5916
    gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5917
  else
5918
    gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5919
}
5920
 
5921
/* Pick proper libcall for satfractuns_optab.  */
5922
 
5923
static void
5924
gen_satfractuns_conv_libfunc (convert_optab tab,
5925
                              const char *opname,
5926
                              enum machine_mode tmode,
5927
                              enum machine_mode fmode)
5928
{
5929
  if (tmode == fmode)
5930
    return;
5931
  /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
5932
  if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
5933
    return;
5934
 
5935
  gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5936
}
5937
 
5938
/* A table of previously-created libfuncs, hashed by name.  */
5939
static GTY ((param_is (union tree_node))) htab_t libfunc_decls;
5940
 
5941
/* Hashtable callbacks for libfunc_decls.  */
5942
 
5943
static hashval_t
5944
libfunc_decl_hash (const void *entry)
5945
{
5946
  return IDENTIFIER_HASH_VALUE (DECL_NAME ((const_tree) entry));
5947
}
5948
 
5949
static int
5950
libfunc_decl_eq (const void *entry1, const void *entry2)
5951
{
5952
  return DECL_NAME ((const_tree) entry1) == (const_tree) entry2;
5953
}
5954
 
5955
/* Build a decl for a libfunc named NAME. */
5956
 
5957
tree
5958
build_libfunc_function (const char *name)
5959
{
5960
  tree decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
5961
                          get_identifier (name),
5962
                          build_function_type (integer_type_node, NULL_TREE));
5963
  /* ??? We don't have any type information except for this is
5964
     a function.  Pretend this is "int foo()".  */
5965
  DECL_ARTIFICIAL (decl) = 1;
5966
  DECL_EXTERNAL (decl) = 1;
5967
  TREE_PUBLIC (decl) = 1;
5968
  gcc_assert (DECL_ASSEMBLER_NAME (decl));
5969
 
5970
  /* Zap the nonsensical SYMBOL_REF_DECL for this.  What we're left with
5971
     are the flags assigned by targetm.encode_section_info.  */
5972
  SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
5973
 
5974
  return decl;
5975
}
5976
 
5977
rtx
5978
init_one_libfunc (const char *name)
5979
{
5980
  tree id, decl;
5981
  void **slot;
5982
  hashval_t hash;
5983
 
5984
  if (libfunc_decls == NULL)
5985
    libfunc_decls = htab_create_ggc (37, libfunc_decl_hash,
5986
                                     libfunc_decl_eq, NULL);
5987
 
5988
  /* See if we have already created a libfunc decl for this function.  */
5989
  id = get_identifier (name);
5990
  hash = IDENTIFIER_HASH_VALUE (id);
5991
  slot = htab_find_slot_with_hash (libfunc_decls, id, hash, INSERT);
5992
  decl = (tree) *slot;
5993
  if (decl == NULL)
5994
    {
5995
      /* Create a new decl, so that it can be passed to
5996
         targetm.encode_section_info.  */
5997
      decl = build_libfunc_function (name);
5998
      *slot = decl;
5999
    }
6000
  return XEXP (DECL_RTL (decl), 0);
6001
}
6002
 
6003
/* Adjust the assembler name of libfunc NAME to ASMSPEC.  */
6004
 
6005
rtx
6006
set_user_assembler_libfunc (const char *name, const char *asmspec)
6007
{
6008
  tree id, decl;
6009
  void **slot;
6010
  hashval_t hash;
6011
 
6012
  id = get_identifier (name);
6013
  hash = IDENTIFIER_HASH_VALUE (id);
6014
  slot = htab_find_slot_with_hash (libfunc_decls, id, hash, NO_INSERT);
6015
  gcc_assert (slot);
6016
  decl = (tree) *slot;
6017
  set_user_assembler_name (decl, asmspec);
6018
  return XEXP (DECL_RTL (decl), 0);
6019
}
6020
 
6021
/* Call this to reset the function entry for one optab (OPTABLE) in mode
6022
   MODE to NAME, which should be either 0 or a string constant.  */
6023
void
6024
set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
6025
{
6026
  rtx val;
6027
  struct libfunc_entry e;
6028
  struct libfunc_entry **slot;
6029
  e.optab = (size_t) (optable - &optab_table[0]);
6030
  e.mode1 = mode;
6031
  e.mode2 = VOIDmode;
6032
 
6033
  if (name)
6034
    val = init_one_libfunc (name);
6035
  else
6036
    val = 0;
6037
  slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6038
  if (*slot == NULL)
6039
    *slot = ggc_alloc_libfunc_entry ();
6040
  (*slot)->optab = (size_t) (optable - &optab_table[0]);
6041
  (*slot)->mode1 = mode;
6042
  (*slot)->mode2 = VOIDmode;
6043
  (*slot)->libfunc = val;
6044
}
6045
 
6046
/* Call this to reset the function entry for one conversion optab
6047
   (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6048
   either 0 or a string constant.  */
6049
void
6050
set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
6051
                  enum machine_mode fmode, const char *name)
6052
{
6053
  rtx val;
6054
  struct libfunc_entry e;
6055
  struct libfunc_entry **slot;
6056
  e.optab = (size_t) (optable - &convert_optab_table[0]);
6057
  e.mode1 = tmode;
6058
  e.mode2 = fmode;
6059
 
6060
  if (name)
6061
    val = init_one_libfunc (name);
6062
  else
6063
    val = 0;
6064
  slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6065
  if (*slot == NULL)
6066
    *slot = ggc_alloc_libfunc_entry ();
6067
  (*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
6068
  (*slot)->mode1 = tmode;
6069
  (*slot)->mode2 = fmode;
6070
  (*slot)->libfunc = val;
6071
}
6072
 
6073
/* Call this to initialize the contents of the optabs
6074
   appropriately for the current target machine.  */
6075
 
6076
void
6077
init_optabs (void)
6078
{
6079
  if (libfunc_hash)
6080
    {
6081
      htab_empty (libfunc_hash);
6082
      /* We statically initialize the insn_codes with the equivalent of
6083
         CODE_FOR_nothing.  Repeat the process if reinitialising.  */
6084
      init_insn_codes ();
6085
    }
6086
  else
6087
    libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6088
 
6089
  init_optab (add_optab, PLUS);
6090
  init_optabv (addv_optab, PLUS);
6091
  init_optab (sub_optab, MINUS);
6092
  init_optabv (subv_optab, MINUS);
6093
  init_optab (ssadd_optab, SS_PLUS);
6094
  init_optab (usadd_optab, US_PLUS);
6095
  init_optab (sssub_optab, SS_MINUS);
6096
  init_optab (ussub_optab, US_MINUS);
6097
  init_optab (smul_optab, MULT);
6098
  init_optab (ssmul_optab, SS_MULT);
6099
  init_optab (usmul_optab, US_MULT);
6100
  init_optabv (smulv_optab, MULT);
6101
  init_optab (smul_highpart_optab, UNKNOWN);
6102
  init_optab (umul_highpart_optab, UNKNOWN);
6103
  init_optab (smul_widen_optab, UNKNOWN);
6104
  init_optab (umul_widen_optab, UNKNOWN);
6105
  init_optab (usmul_widen_optab, UNKNOWN);
6106
  init_optab (smadd_widen_optab, UNKNOWN);
6107
  init_optab (umadd_widen_optab, UNKNOWN);
6108
  init_optab (ssmadd_widen_optab, UNKNOWN);
6109
  init_optab (usmadd_widen_optab, UNKNOWN);
6110
  init_optab (smsub_widen_optab, UNKNOWN);
6111
  init_optab (umsub_widen_optab, UNKNOWN);
6112
  init_optab (ssmsub_widen_optab, UNKNOWN);
6113
  init_optab (usmsub_widen_optab, UNKNOWN);
6114
  init_optab (sdiv_optab, DIV);
6115
  init_optab (ssdiv_optab, SS_DIV);
6116
  init_optab (usdiv_optab, US_DIV);
6117
  init_optabv (sdivv_optab, DIV);
6118
  init_optab (sdivmod_optab, UNKNOWN);
6119
  init_optab (udiv_optab, UDIV);
6120
  init_optab (udivmod_optab, UNKNOWN);
6121
  init_optab (smod_optab, MOD);
6122
  init_optab (umod_optab, UMOD);
6123
  init_optab (fmod_optab, UNKNOWN);
6124
  init_optab (remainder_optab, UNKNOWN);
6125
  init_optab (ftrunc_optab, UNKNOWN);
6126
  init_optab (and_optab, AND);
6127
  init_optab (ior_optab, IOR);
6128
  init_optab (xor_optab, XOR);
6129
  init_optab (ashl_optab, ASHIFT);
6130
  init_optab (ssashl_optab, SS_ASHIFT);
6131
  init_optab (usashl_optab, US_ASHIFT);
6132
  init_optab (ashr_optab, ASHIFTRT);
6133
  init_optab (lshr_optab, LSHIFTRT);
6134
  init_optabv (vashl_optab, ASHIFT);
6135
  init_optabv (vashr_optab, ASHIFTRT);
6136
  init_optabv (vlshr_optab, LSHIFTRT);
6137
  init_optab (rotl_optab, ROTATE);
6138
  init_optab (rotr_optab, ROTATERT);
6139
  init_optab (smin_optab, SMIN);
6140
  init_optab (smax_optab, SMAX);
6141
  init_optab (umin_optab, UMIN);
6142
  init_optab (umax_optab, UMAX);
6143
  init_optab (pow_optab, UNKNOWN);
6144
  init_optab (atan2_optab, UNKNOWN);
6145
  init_optab (fma_optab, FMA);
6146
  init_optab (fms_optab, UNKNOWN);
6147
  init_optab (fnma_optab, UNKNOWN);
6148
  init_optab (fnms_optab, UNKNOWN);
6149
 
6150
  /* These three have codes assigned exclusively for the sake of
6151
     have_insn_for.  */
6152
  init_optab (mov_optab, SET);
6153
  init_optab (movstrict_optab, STRICT_LOW_PART);
6154
  init_optab (cbranch_optab, COMPARE);
6155
 
6156
  init_optab (cmov_optab, UNKNOWN);
6157
  init_optab (cstore_optab, UNKNOWN);
6158
  init_optab (ctrap_optab, UNKNOWN);
6159
 
6160
  init_optab (storent_optab, UNKNOWN);
6161
 
6162
  init_optab (cmp_optab, UNKNOWN);
6163
  init_optab (ucmp_optab, UNKNOWN);
6164
 
6165
  init_optab (eq_optab, EQ);
6166
  init_optab (ne_optab, NE);
6167
  init_optab (gt_optab, GT);
6168
  init_optab (ge_optab, GE);
6169
  init_optab (lt_optab, LT);
6170
  init_optab (le_optab, LE);
6171
  init_optab (unord_optab, UNORDERED);
6172
 
6173
  init_optab (neg_optab, NEG);
6174
  init_optab (ssneg_optab, SS_NEG);
6175
  init_optab (usneg_optab, US_NEG);
6176
  init_optabv (negv_optab, NEG);
6177
  init_optab (abs_optab, ABS);
6178
  init_optabv (absv_optab, ABS);
6179
  init_optab (addcc_optab, UNKNOWN);
6180
  init_optab (one_cmpl_optab, NOT);
6181
  init_optab (bswap_optab, BSWAP);
6182
  init_optab (ffs_optab, FFS);
6183
  init_optab (clz_optab, CLZ);
6184
  init_optab (ctz_optab, CTZ);
6185
  init_optab (clrsb_optab, CLRSB);
6186
  init_optab (popcount_optab, POPCOUNT);
6187
  init_optab (parity_optab, PARITY);
6188
  init_optab (sqrt_optab, SQRT);
6189
  init_optab (floor_optab, UNKNOWN);
6190
  init_optab (ceil_optab, UNKNOWN);
6191
  init_optab (round_optab, UNKNOWN);
6192
  init_optab (btrunc_optab, UNKNOWN);
6193
  init_optab (nearbyint_optab, UNKNOWN);
6194
  init_optab (rint_optab, UNKNOWN);
6195
  init_optab (sincos_optab, UNKNOWN);
6196
  init_optab (sin_optab, UNKNOWN);
6197
  init_optab (asin_optab, UNKNOWN);
6198
  init_optab (cos_optab, UNKNOWN);
6199
  init_optab (acos_optab, UNKNOWN);
6200
  init_optab (exp_optab, UNKNOWN);
6201
  init_optab (exp10_optab, UNKNOWN);
6202
  init_optab (exp2_optab, UNKNOWN);
6203
  init_optab (expm1_optab, UNKNOWN);
6204
  init_optab (ldexp_optab, UNKNOWN);
6205
  init_optab (scalb_optab, UNKNOWN);
6206
  init_optab (significand_optab, UNKNOWN);
6207
  init_optab (logb_optab, UNKNOWN);
6208
  init_optab (ilogb_optab, UNKNOWN);
6209
  init_optab (log_optab, UNKNOWN);
6210
  init_optab (log10_optab, UNKNOWN);
6211
  init_optab (log2_optab, UNKNOWN);
6212
  init_optab (log1p_optab, UNKNOWN);
6213
  init_optab (tan_optab, UNKNOWN);
6214
  init_optab (atan_optab, UNKNOWN);
6215
  init_optab (copysign_optab, UNKNOWN);
6216
  init_optab (signbit_optab, UNKNOWN);
6217
 
6218
  init_optab (isinf_optab, UNKNOWN);
6219
 
6220
  init_optab (strlen_optab, UNKNOWN);
6221
  init_optab (push_optab, UNKNOWN);
6222
 
6223
  init_optab (reduc_smax_optab, UNKNOWN);
6224
  init_optab (reduc_umax_optab, UNKNOWN);
6225
  init_optab (reduc_smin_optab, UNKNOWN);
6226
  init_optab (reduc_umin_optab, UNKNOWN);
6227
  init_optab (reduc_splus_optab, UNKNOWN);
6228
  init_optab (reduc_uplus_optab, UNKNOWN);
6229
 
6230
  init_optab (ssum_widen_optab, UNKNOWN);
6231
  init_optab (usum_widen_optab, UNKNOWN);
6232
  init_optab (sdot_prod_optab, UNKNOWN);
6233
  init_optab (udot_prod_optab, UNKNOWN);
6234
 
6235
  init_optab (vec_extract_optab, UNKNOWN);
6236
  init_optab (vec_set_optab, UNKNOWN);
6237
  init_optab (vec_init_optab, UNKNOWN);
6238
  init_optab (vec_shl_optab, UNKNOWN);
6239
  init_optab (vec_shr_optab, UNKNOWN);
6240
  init_optab (vec_realign_load_optab, UNKNOWN);
6241
  init_optab (movmisalign_optab, UNKNOWN);
6242
  init_optab (vec_widen_umult_hi_optab, UNKNOWN);
6243
  init_optab (vec_widen_umult_lo_optab, UNKNOWN);
6244
  init_optab (vec_widen_smult_hi_optab, UNKNOWN);
6245
  init_optab (vec_widen_smult_lo_optab, UNKNOWN);
6246
  init_optab (vec_widen_ushiftl_hi_optab, UNKNOWN);
6247
  init_optab (vec_widen_ushiftl_lo_optab, UNKNOWN);
6248
  init_optab (vec_widen_sshiftl_hi_optab, UNKNOWN);
6249
  init_optab (vec_widen_sshiftl_lo_optab, UNKNOWN);
6250
  init_optab (vec_unpacks_hi_optab, UNKNOWN);
6251
  init_optab (vec_unpacks_lo_optab, UNKNOWN);
6252
  init_optab (vec_unpacku_hi_optab, UNKNOWN);
6253
  init_optab (vec_unpacku_lo_optab, UNKNOWN);
6254
  init_optab (vec_unpacks_float_hi_optab, UNKNOWN);
6255
  init_optab (vec_unpacks_float_lo_optab, UNKNOWN);
6256
  init_optab (vec_unpacku_float_hi_optab, UNKNOWN);
6257
  init_optab (vec_unpacku_float_lo_optab, UNKNOWN);
6258
  init_optab (vec_pack_trunc_optab, UNKNOWN);
6259
  init_optab (vec_pack_usat_optab, UNKNOWN);
6260
  init_optab (vec_pack_ssat_optab, UNKNOWN);
6261
  init_optab (vec_pack_ufix_trunc_optab, UNKNOWN);
6262
  init_optab (vec_pack_sfix_trunc_optab, UNKNOWN);
6263
 
6264
  init_optab (powi_optab, UNKNOWN);
6265
 
6266
  /* Conversions.  */
6267
  init_convert_optab (sext_optab, SIGN_EXTEND);
6268
  init_convert_optab (zext_optab, ZERO_EXTEND);
6269
  init_convert_optab (trunc_optab, TRUNCATE);
6270
  init_convert_optab (sfix_optab, FIX);
6271
  init_convert_optab (ufix_optab, UNSIGNED_FIX);
6272
  init_convert_optab (sfixtrunc_optab, UNKNOWN);
6273
  init_convert_optab (ufixtrunc_optab, UNKNOWN);
6274
  init_convert_optab (sfloat_optab, FLOAT);
6275
  init_convert_optab (ufloat_optab, UNSIGNED_FLOAT);
6276
  init_convert_optab (lrint_optab, UNKNOWN);
6277
  init_convert_optab (lround_optab, UNKNOWN);
6278
  init_convert_optab (lfloor_optab, UNKNOWN);
6279
  init_convert_optab (lceil_optab, UNKNOWN);
6280
 
6281
  init_convert_optab (fract_optab, FRACT_CONVERT);
6282
  init_convert_optab (fractuns_optab, UNSIGNED_FRACT_CONVERT);
6283
  init_convert_optab (satfract_optab, SAT_FRACT);
6284
  init_convert_optab (satfractuns_optab, UNSIGNED_SAT_FRACT);
6285
 
6286
  /* Fill in the optabs with the insns we support.  */
6287
  init_all_optabs ();
6288
 
6289
  /* Initialize the optabs with the names of the library functions.  */
6290
  add_optab->libcall_basename = "add";
6291
  add_optab->libcall_suffix = '3';
6292
  add_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6293
  addv_optab->libcall_basename = "add";
6294
  addv_optab->libcall_suffix = '3';
6295
  addv_optab->libcall_gen = gen_intv_fp_libfunc;
6296
  ssadd_optab->libcall_basename = "ssadd";
6297
  ssadd_optab->libcall_suffix = '3';
6298
  ssadd_optab->libcall_gen = gen_signed_fixed_libfunc;
6299
  usadd_optab->libcall_basename = "usadd";
6300
  usadd_optab->libcall_suffix = '3';
6301
  usadd_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6302
  sub_optab->libcall_basename = "sub";
6303
  sub_optab->libcall_suffix = '3';
6304
  sub_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6305
  subv_optab->libcall_basename = "sub";
6306
  subv_optab->libcall_suffix = '3';
6307
  subv_optab->libcall_gen = gen_intv_fp_libfunc;
6308
  sssub_optab->libcall_basename = "sssub";
6309
  sssub_optab->libcall_suffix = '3';
6310
  sssub_optab->libcall_gen = gen_signed_fixed_libfunc;
6311
  ussub_optab->libcall_basename = "ussub";
6312
  ussub_optab->libcall_suffix = '3';
6313
  ussub_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6314
  smul_optab->libcall_basename = "mul";
6315
  smul_optab->libcall_suffix = '3';
6316
  smul_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6317
  smulv_optab->libcall_basename = "mul";
6318
  smulv_optab->libcall_suffix = '3';
6319
  smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6320
  ssmul_optab->libcall_basename = "ssmul";
6321
  ssmul_optab->libcall_suffix = '3';
6322
  ssmul_optab->libcall_gen = gen_signed_fixed_libfunc;
6323
  usmul_optab->libcall_basename = "usmul";
6324
  usmul_optab->libcall_suffix = '3';
6325
  usmul_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6326
  sdiv_optab->libcall_basename = "div";
6327
  sdiv_optab->libcall_suffix = '3';
6328
  sdiv_optab->libcall_gen = gen_int_fp_signed_fixed_libfunc;
6329
  sdivv_optab->libcall_basename = "divv";
6330
  sdivv_optab->libcall_suffix = '3';
6331
  sdivv_optab->libcall_gen = gen_int_libfunc;
6332
  ssdiv_optab->libcall_basename = "ssdiv";
6333
  ssdiv_optab->libcall_suffix = '3';
6334
  ssdiv_optab->libcall_gen = gen_signed_fixed_libfunc;
6335
  udiv_optab->libcall_basename = "udiv";
6336
  udiv_optab->libcall_suffix = '3';
6337
  udiv_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6338
  usdiv_optab->libcall_basename = "usdiv";
6339
  usdiv_optab->libcall_suffix = '3';
6340
  usdiv_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6341
  sdivmod_optab->libcall_basename = "divmod";
6342
  sdivmod_optab->libcall_suffix = '4';
6343
  sdivmod_optab->libcall_gen = gen_int_libfunc;
6344
  udivmod_optab->libcall_basename = "udivmod";
6345
  udivmod_optab->libcall_suffix = '4';
6346
  udivmod_optab->libcall_gen = gen_int_libfunc;
6347
  smod_optab->libcall_basename = "mod";
6348
  smod_optab->libcall_suffix = '3';
6349
  smod_optab->libcall_gen = gen_int_libfunc;
6350
  umod_optab->libcall_basename = "umod";
6351
  umod_optab->libcall_suffix = '3';
6352
  umod_optab->libcall_gen = gen_int_libfunc;
6353
  ftrunc_optab->libcall_basename = "ftrunc";
6354
  ftrunc_optab->libcall_suffix = '2';
6355
  ftrunc_optab->libcall_gen = gen_fp_libfunc;
6356
  and_optab->libcall_basename = "and";
6357
  and_optab->libcall_suffix = '3';
6358
  and_optab->libcall_gen = gen_int_libfunc;
6359
  ior_optab->libcall_basename = "ior";
6360
  ior_optab->libcall_suffix = '3';
6361
  ior_optab->libcall_gen = gen_int_libfunc;
6362
  xor_optab->libcall_basename = "xor";
6363
  xor_optab->libcall_suffix = '3';
6364
  xor_optab->libcall_gen = gen_int_libfunc;
6365
  ashl_optab->libcall_basename = "ashl";
6366
  ashl_optab->libcall_suffix = '3';
6367
  ashl_optab->libcall_gen = gen_int_fixed_libfunc;
6368
  ssashl_optab->libcall_basename = "ssashl";
6369
  ssashl_optab->libcall_suffix = '3';
6370
  ssashl_optab->libcall_gen = gen_signed_fixed_libfunc;
6371
  usashl_optab->libcall_basename = "usashl";
6372
  usashl_optab->libcall_suffix = '3';
6373
  usashl_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6374
  ashr_optab->libcall_basename = "ashr";
6375
  ashr_optab->libcall_suffix = '3';
6376
  ashr_optab->libcall_gen = gen_int_signed_fixed_libfunc;
6377
  lshr_optab->libcall_basename = "lshr";
6378
  lshr_optab->libcall_suffix = '3';
6379
  lshr_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6380
  smin_optab->libcall_basename = "min";
6381
  smin_optab->libcall_suffix = '3';
6382
  smin_optab->libcall_gen = gen_int_fp_libfunc;
6383
  smax_optab->libcall_basename = "max";
6384
  smax_optab->libcall_suffix = '3';
6385
  smax_optab->libcall_gen = gen_int_fp_libfunc;
6386
  umin_optab->libcall_basename = "umin";
6387
  umin_optab->libcall_suffix = '3';
6388
  umin_optab->libcall_gen = gen_int_libfunc;
6389
  umax_optab->libcall_basename = "umax";
6390
  umax_optab->libcall_suffix = '3';
6391
  umax_optab->libcall_gen = gen_int_libfunc;
6392
  neg_optab->libcall_basename = "neg";
6393
  neg_optab->libcall_suffix = '2';
6394
  neg_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6395
  ssneg_optab->libcall_basename = "ssneg";
6396
  ssneg_optab->libcall_suffix = '2';
6397
  ssneg_optab->libcall_gen = gen_signed_fixed_libfunc;
6398
  usneg_optab->libcall_basename = "usneg";
6399
  usneg_optab->libcall_suffix = '2';
6400
  usneg_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6401
  negv_optab->libcall_basename = "neg";
6402
  negv_optab->libcall_suffix = '2';
6403
  negv_optab->libcall_gen = gen_intv_fp_libfunc;
6404
  one_cmpl_optab->libcall_basename = "one_cmpl";
6405
  one_cmpl_optab->libcall_suffix = '2';
6406
  one_cmpl_optab->libcall_gen = gen_int_libfunc;
6407
  ffs_optab->libcall_basename = "ffs";
6408
  ffs_optab->libcall_suffix = '2';
6409
  ffs_optab->libcall_gen = gen_int_libfunc;
6410
  clz_optab->libcall_basename = "clz";
6411
  clz_optab->libcall_suffix = '2';
6412
  clz_optab->libcall_gen = gen_int_libfunc;
6413
  ctz_optab->libcall_basename = "ctz";
6414
  ctz_optab->libcall_suffix = '2';
6415
  ctz_optab->libcall_gen = gen_int_libfunc;
6416
  clrsb_optab->libcall_basename = "clrsb";
6417
  clrsb_optab->libcall_suffix = '2';
6418
  clrsb_optab->libcall_gen = gen_int_libfunc;
6419
  popcount_optab->libcall_basename = "popcount";
6420
  popcount_optab->libcall_suffix = '2';
6421
  popcount_optab->libcall_gen = gen_int_libfunc;
6422
  parity_optab->libcall_basename = "parity";
6423
  parity_optab->libcall_suffix = '2';
6424
  parity_optab->libcall_gen = gen_int_libfunc;
6425
 
6426
  /* Comparison libcalls for integers MUST come in pairs,
6427
     signed/unsigned.  */
6428
  cmp_optab->libcall_basename = "cmp";
6429
  cmp_optab->libcall_suffix = '2';
6430
  cmp_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6431
  ucmp_optab->libcall_basename = "ucmp";
6432
  ucmp_optab->libcall_suffix = '2';
6433
  ucmp_optab->libcall_gen = gen_int_libfunc;
6434
 
6435
  /* EQ etc are floating point only.  */
6436
  eq_optab->libcall_basename = "eq";
6437
  eq_optab->libcall_suffix = '2';
6438
  eq_optab->libcall_gen = gen_fp_libfunc;
6439
  ne_optab->libcall_basename = "ne";
6440
  ne_optab->libcall_suffix = '2';
6441
  ne_optab->libcall_gen = gen_fp_libfunc;
6442
  gt_optab->libcall_basename = "gt";
6443
  gt_optab->libcall_suffix = '2';
6444
  gt_optab->libcall_gen = gen_fp_libfunc;
6445
  ge_optab->libcall_basename = "ge";
6446
  ge_optab->libcall_suffix = '2';
6447
  ge_optab->libcall_gen = gen_fp_libfunc;
6448
  lt_optab->libcall_basename = "lt";
6449
  lt_optab->libcall_suffix = '2';
6450
  lt_optab->libcall_gen = gen_fp_libfunc;
6451
  le_optab->libcall_basename = "le";
6452
  le_optab->libcall_suffix = '2';
6453
  le_optab->libcall_gen = gen_fp_libfunc;
6454
  unord_optab->libcall_basename = "unord";
6455
  unord_optab->libcall_suffix = '2';
6456
  unord_optab->libcall_gen = gen_fp_libfunc;
6457
 
6458
  powi_optab->libcall_basename = "powi";
6459
  powi_optab->libcall_suffix = '2';
6460
  powi_optab->libcall_gen = gen_fp_libfunc;
6461
 
6462
  /* Conversions.  */
6463
  sfloat_optab->libcall_basename = "float";
6464
  sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6465
  ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6466
  sfix_optab->libcall_basename = "fix";
6467
  sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6468
  ufix_optab->libcall_basename = "fixuns";
6469
  ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6470
  lrint_optab->libcall_basename = "lrint";
6471
  lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6472
  lround_optab->libcall_basename = "lround";
6473
  lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6474
  lfloor_optab->libcall_basename = "lfloor";
6475
  lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6476
  lceil_optab->libcall_basename = "lceil";
6477
  lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6478
 
6479
  /* trunc_optab is also used for FLOAT_EXTEND.  */
6480
  sext_optab->libcall_basename = "extend";
6481
  sext_optab->libcall_gen = gen_extend_conv_libfunc;
6482
  trunc_optab->libcall_basename = "trunc";
6483
  trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6484
 
6485
  /* Conversions for fixed-point modes and other modes.  */
6486
  fract_optab->libcall_basename = "fract";
6487
  fract_optab->libcall_gen = gen_fract_conv_libfunc;
6488
  satfract_optab->libcall_basename = "satfract";
6489
  satfract_optab->libcall_gen = gen_satfract_conv_libfunc;
6490
  fractuns_optab->libcall_basename = "fractuns";
6491
  fractuns_optab->libcall_gen = gen_fractuns_conv_libfunc;
6492
  satfractuns_optab->libcall_basename = "satfractuns";
6493
  satfractuns_optab->libcall_gen = gen_satfractuns_conv_libfunc;
6494
 
6495
  /* The ffs function operates on `int'.  Fall back on it if we do not
6496
     have a libgcc2 function for that width.  */
6497
  if (INT_TYPE_SIZE < BITS_PER_WORD)
6498
    set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6499
                       "ffs");
6500
 
6501
  /* Explicitly initialize the bswap libfuncs since we need them to be
6502
     valid for things other than word_mode.  */
6503
  if (targetm.libfunc_gnu_prefix)
6504
    {
6505
      set_optab_libfunc (bswap_optab, SImode, "__gnu_bswapsi2");
6506
      set_optab_libfunc (bswap_optab, DImode, "__gnu_bswapdi2");
6507
    }
6508
  else
6509
    {
6510
      set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6511
      set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6512
    }
6513
 
6514
  /* Use cabs for double complex abs, since systems generally have cabs.
6515
     Don't define any libcall for float complex, so that cabs will be used.  */
6516
  if (complex_double_type_node)
6517
    set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6518
 
6519
  abort_libfunc = init_one_libfunc ("abort");
6520
  memcpy_libfunc = init_one_libfunc ("memcpy");
6521
  memmove_libfunc = init_one_libfunc ("memmove");
6522
  memcmp_libfunc = init_one_libfunc ("memcmp");
6523
  memset_libfunc = init_one_libfunc ("memset");
6524
  setbits_libfunc = init_one_libfunc ("__setbits");
6525
 
6526
#ifndef DONT_USE_BUILTIN_SETJMP
6527
  setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6528
  longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6529
#else
6530
  setjmp_libfunc = init_one_libfunc ("setjmp");
6531
  longjmp_libfunc = init_one_libfunc ("longjmp");
6532
#endif
6533
  unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6534
  unwind_sjlj_unregister_libfunc
6535
    = init_one_libfunc ("_Unwind_SjLj_Unregister");
6536
 
6537
  /* For function entry/exit instrumentation.  */
6538
  profile_function_entry_libfunc
6539
    = init_one_libfunc ("__cyg_profile_func_enter");
6540
  profile_function_exit_libfunc
6541
    = init_one_libfunc ("__cyg_profile_func_exit");
6542
 
6543
  gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6544
 
6545
  /* Allow the target to add more libcalls or rename some, etc.  */
6546
  targetm.init_libfuncs ();
6547
}
6548
 
6549
/* A helper function for init_sync_libfuncs.  Using the basename BASE,
6550
   install libfuncs into TAB for BASE_N for 1 <= N <= MAX.  */
6551
 
6552
static void
6553
init_sync_libfuncs_1 (optab tab, const char *base, int max)
6554
{
6555
  enum machine_mode mode;
6556
  char buf[64];
6557
  size_t len = strlen (base);
6558
  int i;
6559
 
6560
  gcc_assert (max <= 8);
6561
  gcc_assert (len + 3 < sizeof (buf));
6562
 
6563
  memcpy (buf, base, len);
6564
  buf[len] = '_';
6565
  buf[len + 1] = '0';
6566
  buf[len + 2] = '\0';
6567
 
6568
  mode = QImode;
6569
  for (i = 1; i <= max; i *= 2)
6570
    {
6571
      buf[len + 1] = '0' + i;
6572
      set_optab_libfunc (tab, mode, buf);
6573
      mode = GET_MODE_2XWIDER_MODE (mode);
6574
    }
6575
}
6576
 
6577
void
6578
init_sync_libfuncs (int max)
6579
{
6580
  init_sync_libfuncs_1 (sync_compare_and_swap_optab,
6581
                        "__sync_val_compare_and_swap", max);
6582
  init_sync_libfuncs_1 (sync_lock_test_and_set_optab,
6583
                        "__sync_lock_test_and_set", max);
6584
 
6585
  init_sync_libfuncs_1 (sync_old_add_optab, "__sync_fetch_and_add", max);
6586
  init_sync_libfuncs_1 (sync_old_sub_optab, "__sync_fetch_and_sub", max);
6587
  init_sync_libfuncs_1 (sync_old_ior_optab, "__sync_fetch_and_or", max);
6588
  init_sync_libfuncs_1 (sync_old_and_optab, "__sync_fetch_and_and", max);
6589
  init_sync_libfuncs_1 (sync_old_xor_optab, "__sync_fetch_and_xor", max);
6590
  init_sync_libfuncs_1 (sync_old_nand_optab, "__sync_fetch_and_nand", max);
6591
 
6592
  init_sync_libfuncs_1 (sync_new_add_optab, "__sync_add_and_fetch", max);
6593
  init_sync_libfuncs_1 (sync_new_sub_optab, "__sync_sub_and_fetch", max);
6594
  init_sync_libfuncs_1 (sync_new_ior_optab, "__sync_or_and_fetch", max);
6595
  init_sync_libfuncs_1 (sync_new_and_optab, "__sync_and_and_fetch", max);
6596
  init_sync_libfuncs_1 (sync_new_xor_optab, "__sync_xor_and_fetch", max);
6597
  init_sync_libfuncs_1 (sync_new_nand_optab, "__sync_nand_and_fetch", max);
6598
}
6599
 
6600
/* Print information about the current contents of the optabs on
6601
   STDERR.  */
6602
 
6603
DEBUG_FUNCTION void
6604
debug_optab_libfuncs (void)
6605
{
6606
  int i;
6607
  int j;
6608
  int k;
6609
 
6610
  /* Dump the arithmetic optabs.  */
6611
  for (i = 0; i != (int) OTI_MAX; i++)
6612
    for (j = 0; j < NUM_MACHINE_MODES; ++j)
6613
      {
6614
        optab o;
6615
        rtx l;
6616
 
6617
        o = &optab_table[i];
6618
        l = optab_libfunc (o, (enum machine_mode) j);
6619
        if (l)
6620
          {
6621
            gcc_assert (GET_CODE (l) == SYMBOL_REF);
6622
            fprintf (stderr, "%s\t%s:\t%s\n",
6623
                     GET_RTX_NAME (o->code),
6624
                     GET_MODE_NAME (j),
6625
                     XSTR (l, 0));
6626
          }
6627
      }
6628
 
6629
  /* Dump the conversion optabs.  */
6630
  for (i = 0; i < (int) COI_MAX; ++i)
6631
    for (j = 0; j < NUM_MACHINE_MODES; ++j)
6632
      for (k = 0; k < NUM_MACHINE_MODES; ++k)
6633
        {
6634
          convert_optab o;
6635
          rtx l;
6636
 
6637
          o = &convert_optab_table[i];
6638
          l = convert_optab_libfunc (o, (enum machine_mode) j,
6639
                                     (enum machine_mode) k);
6640
          if (l)
6641
            {
6642
              gcc_assert (GET_CODE (l) == SYMBOL_REF);
6643
              fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6644
                       GET_RTX_NAME (o->code),
6645
                       GET_MODE_NAME (j),
6646
                       GET_MODE_NAME (k),
6647
                       XSTR (l, 0));
6648
            }
6649
        }
6650
}
6651
 
6652
 
6653
/* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6654
   CODE.  Return 0 on failure.  */
6655
 
6656
rtx
6657
gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
6658
{
6659
  enum machine_mode mode = GET_MODE (op1);
6660
  enum insn_code icode;
6661
  rtx insn;
6662
  rtx trap_rtx;
6663
 
6664
  if (mode == VOIDmode)
6665
    return 0;
6666
 
6667
  icode = optab_handler (ctrap_optab, mode);
6668
  if (icode == CODE_FOR_nothing)
6669
    return 0;
6670
 
6671
  /* Some targets only accept a zero trap code.  */
6672
  if (!insn_operand_matches (icode, 3, tcode))
6673
    return 0;
6674
 
6675
  do_pending_stack_adjust ();
6676
  start_sequence ();
6677
  prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
6678
                    &trap_rtx, &mode);
6679
  if (!trap_rtx)
6680
    insn = NULL_RTX;
6681
  else
6682
    insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
6683
                            tcode);
6684
 
6685
  /* If that failed, then give up.  */
6686
  if (insn == 0)
6687
    {
6688
      end_sequence ();
6689
      return 0;
6690
    }
6691
 
6692
  emit_insn (insn);
6693
  insn = get_insns ();
6694
  end_sequence ();
6695
  return insn;
6696
}
6697
 
6698
/* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6699
   or unsigned operation code.  */
6700
 
6701
static enum rtx_code
6702
get_rtx_code (enum tree_code tcode, bool unsignedp)
6703
{
6704
  enum rtx_code code;
6705
  switch (tcode)
6706
    {
6707
    case EQ_EXPR:
6708
      code = EQ;
6709
      break;
6710
    case NE_EXPR:
6711
      code = NE;
6712
      break;
6713
    case LT_EXPR:
6714
      code = unsignedp ? LTU : LT;
6715
      break;
6716
    case LE_EXPR:
6717
      code = unsignedp ? LEU : LE;
6718
      break;
6719
    case GT_EXPR:
6720
      code = unsignedp ? GTU : GT;
6721
      break;
6722
    case GE_EXPR:
6723
      code = unsignedp ? GEU : GE;
6724
      break;
6725
 
6726
    case UNORDERED_EXPR:
6727
      code = UNORDERED;
6728
      break;
6729
    case ORDERED_EXPR:
6730
      code = ORDERED;
6731
      break;
6732
    case UNLT_EXPR:
6733
      code = UNLT;
6734
      break;
6735
    case UNLE_EXPR:
6736
      code = UNLE;
6737
      break;
6738
    case UNGT_EXPR:
6739
      code = UNGT;
6740
      break;
6741
    case UNGE_EXPR:
6742
      code = UNGE;
6743
      break;
6744
    case UNEQ_EXPR:
6745
      code = UNEQ;
6746
      break;
6747
    case LTGT_EXPR:
6748
      code = LTGT;
6749
      break;
6750
 
6751
    default:
6752
      gcc_unreachable ();
6753
    }
6754
  return code;
6755
}
6756
 
6757
/* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6758
   unsigned operators. Do not generate compare instruction.  */
6759
 
6760
static rtx
6761
vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6762
{
6763
  struct expand_operand ops[2];
6764
  enum rtx_code rcode;
6765
  tree t_op0, t_op1;
6766
  rtx rtx_op0, rtx_op1;
6767
 
6768
  /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6769
     ensures that condition is a relational operation.  */
6770
  gcc_assert (COMPARISON_CLASS_P (cond));
6771
 
6772
  rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6773
  t_op0 = TREE_OPERAND (cond, 0);
6774
  t_op1 = TREE_OPERAND (cond, 1);
6775
 
6776
  /* Expand operands.  */
6777
  rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6778
                         EXPAND_STACK_PARM);
6779
  rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6780
                         EXPAND_STACK_PARM);
6781
 
6782
  create_input_operand (&ops[0], rtx_op0, GET_MODE (rtx_op0));
6783
  create_input_operand (&ops[1], rtx_op1, GET_MODE (rtx_op1));
6784
  if (!maybe_legitimize_operands (icode, 4, 2, ops))
6785
    gcc_unreachable ();
6786
  return gen_rtx_fmt_ee (rcode, VOIDmode, ops[0].value, ops[1].value);
6787
}
6788
 
6789
/* Return true if VEC_PERM_EXPR can be expanded using SIMD extensions
6790
   of the CPU.  SEL may be NULL, which stands for an unknown constant.  */
6791
 
6792
bool
6793
can_vec_perm_p (enum machine_mode mode, bool variable,
6794
                const unsigned char *sel)
6795
{
6796
  enum machine_mode qimode;
6797
 
6798
  /* If the target doesn't implement a vector mode for the vector type,
6799
     then no operations are supported.  */
6800
  if (!VECTOR_MODE_P (mode))
6801
    return false;
6802
 
6803
  if (!variable)
6804
    {
6805
      if (direct_optab_handler (vec_perm_const_optab, mode) != CODE_FOR_nothing
6806
          && (sel == NULL
6807
              || targetm.vectorize.vec_perm_const_ok == NULL
6808
              || targetm.vectorize.vec_perm_const_ok (mode, sel)))
6809
        return true;
6810
    }
6811
 
6812
  if (direct_optab_handler (vec_perm_optab, mode) != CODE_FOR_nothing)
6813
    return true;
6814
 
6815
  /* We allow fallback to a QI vector mode, and adjust the mask.  */
6816
  if (GET_MODE_INNER (mode) == QImode)
6817
    return false;
6818
  qimode = mode_for_vector (QImode, GET_MODE_SIZE (mode));
6819
  if (!VECTOR_MODE_P (qimode))
6820
    return false;
6821
 
6822
  /* ??? For completeness, we ought to check the QImode version of
6823
      vec_perm_const_optab.  But all users of this implicit lowering
6824
      feature implement the variable vec_perm_optab.  */
6825
  if (direct_optab_handler (vec_perm_optab, qimode) == CODE_FOR_nothing)
6826
    return false;
6827
 
6828
  /* In order to support the lowering of variable permutations,
6829
     we need to support shifts and adds.  */
6830
  if (variable)
6831
    {
6832
      if (GET_MODE_UNIT_SIZE (mode) > 2
6833
          && optab_handler (ashl_optab, mode) == CODE_FOR_nothing
6834
          && optab_handler (vashl_optab, mode) == CODE_FOR_nothing)
6835
        return false;
6836
      if (optab_handler (add_optab, qimode) == CODE_FOR_nothing)
6837
        return false;
6838
    }
6839
 
6840
  return true;
6841
}
6842
 
6843
/* A subroutine of expand_vec_perm for expanding one vec_perm insn.  */
6844
 
6845
static rtx
6846
expand_vec_perm_1 (enum insn_code icode, rtx target,
6847
                   rtx v0, rtx v1, rtx sel)
6848
{
6849
  enum machine_mode tmode = GET_MODE (target);
6850
  enum machine_mode smode = GET_MODE (sel);
6851
  struct expand_operand ops[4];
6852
 
6853
  create_output_operand (&ops[0], target, tmode);
6854
  create_input_operand (&ops[3], sel, smode);
6855
 
6856
  /* Make an effort to preserve v0 == v1.  The target expander is able to
6857
     rely on this to determine if we're permuting a single input operand.  */
6858
  if (rtx_equal_p (v0, v1))
6859
    {
6860
      if (!insn_operand_matches (icode, 1, v0))
6861
        v0 = force_reg (tmode, v0);
6862
      gcc_checking_assert (insn_operand_matches (icode, 1, v0));
6863
      gcc_checking_assert (insn_operand_matches (icode, 2, v0));
6864
 
6865
      create_fixed_operand (&ops[1], v0);
6866
      create_fixed_operand (&ops[2], v0);
6867
    }
6868
  else
6869
    {
6870
      create_input_operand (&ops[1], v0, tmode);
6871
      create_input_operand (&ops[2], v1, tmode);
6872
    }
6873
 
6874
  if (maybe_expand_insn (icode, 4, ops))
6875
    return ops[0].value;
6876
  return NULL_RTX;
6877
}
6878
 
6879
/* Generate instructions for vec_perm optab given its mode
6880
   and three operands.  */
6881
 
6882
rtx
6883
expand_vec_perm (enum machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
6884
{
6885
  enum insn_code icode;
6886
  enum machine_mode qimode;
6887
  unsigned int i, w, e, u;
6888
  rtx tmp, sel_qi = NULL;
6889
  rtvec vec;
6890
 
6891
  if (!target || GET_MODE (target) != mode)
6892
    target = gen_reg_rtx (mode);
6893
 
6894
  w = GET_MODE_SIZE (mode);
6895
  e = GET_MODE_NUNITS (mode);
6896
  u = GET_MODE_UNIT_SIZE (mode);
6897
 
6898
  /* Set QIMODE to a different vector mode with byte elements.
6899
     If no such mode, or if MODE already has byte elements, use VOIDmode.  */
6900
  qimode = VOIDmode;
6901
  if (GET_MODE_INNER (mode) != QImode)
6902
    {
6903
      qimode = mode_for_vector (QImode, w);
6904
      if (!VECTOR_MODE_P (qimode))
6905
        qimode = VOIDmode;
6906
    }
6907
 
6908
  /* If the input is a constant, expand it specially.  */
6909
  gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
6910
  if (GET_CODE (sel) == CONST_VECTOR)
6911
    {
6912
      icode = direct_optab_handler (vec_perm_const_optab, mode);
6913
      if (icode != CODE_FOR_nothing)
6914
        {
6915
          tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
6916
          if (tmp)
6917
            return tmp;
6918
        }
6919
 
6920
      /* Fall back to a constant byte-based permutation.  */
6921
      if (qimode != VOIDmode)
6922
        {
6923
          vec = rtvec_alloc (w);
6924
          for (i = 0; i < e; ++i)
6925
            {
6926
              unsigned int j, this_e;
6927
 
6928
              this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
6929
              this_e &= 2 * e - 1;
6930
              this_e *= u;
6931
 
6932
              for (j = 0; j < u; ++j)
6933
                RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
6934
            }
6935
          sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
6936
 
6937
          icode = direct_optab_handler (vec_perm_const_optab, qimode);
6938
          if (icode != CODE_FOR_nothing)
6939
            {
6940
              tmp = expand_vec_perm_1 (icode, gen_lowpart (qimode, target),
6941
                                       gen_lowpart (qimode, v0),
6942
                                       gen_lowpart (qimode, v1), sel_qi);
6943
              if (tmp)
6944
                return gen_lowpart (mode, tmp);
6945
            }
6946
        }
6947
    }
6948
 
6949
  /* Otherwise expand as a fully variable permuation.  */
6950
  icode = direct_optab_handler (vec_perm_optab, mode);
6951
  if (icode != CODE_FOR_nothing)
6952
    {
6953
      tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
6954
      if (tmp)
6955
        return tmp;
6956
    }
6957
 
6958
  /* As a special case to aid several targets, lower the element-based
6959
     permutation to a byte-based permutation and try again.  */
6960
  if (qimode == VOIDmode)
6961
    return NULL_RTX;
6962
  icode = direct_optab_handler (vec_perm_optab, qimode);
6963
  if (icode == CODE_FOR_nothing)
6964
    return NULL_RTX;
6965
 
6966
  if (sel_qi == NULL)
6967
    {
6968
      /* Multiply each element by its byte size.  */
6969
      enum machine_mode selmode = GET_MODE (sel);
6970
      if (u == 2)
6971
        sel = expand_simple_binop (selmode, PLUS, sel, sel,
6972
                                   sel, 0, OPTAB_DIRECT);
6973
      else
6974
        sel = expand_simple_binop (selmode, ASHIFT, sel,
6975
                                   GEN_INT (exact_log2 (u)),
6976
                                   sel, 0, OPTAB_DIRECT);
6977
      gcc_assert (sel != NULL);
6978
 
6979
      /* Broadcast the low byte each element into each of its bytes.  */
6980
      vec = rtvec_alloc (w);
6981
      for (i = 0; i < w; ++i)
6982
        {
6983
          int this_e = i / u * u;
6984
          if (BYTES_BIG_ENDIAN)
6985
            this_e += u - 1;
6986
          RTVEC_ELT (vec, i) = GEN_INT (this_e);
6987
        }
6988
      tmp = gen_rtx_CONST_VECTOR (qimode, vec);
6989
      sel = gen_lowpart (qimode, sel);
6990
      sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
6991
      gcc_assert (sel != NULL);
6992
 
6993
      /* Add the byte offset to each byte element.  */
6994
      /* Note that the definition of the indicies here is memory ordering,
6995
         so there should be no difference between big and little endian.  */
6996
      vec = rtvec_alloc (w);
6997
      for (i = 0; i < w; ++i)
6998
        RTVEC_ELT (vec, i) = GEN_INT (i % u);
6999
      tmp = gen_rtx_CONST_VECTOR (qimode, vec);
7000
      sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
7001
                                    sel, 0, OPTAB_DIRECT);
7002
      gcc_assert (sel_qi != NULL);
7003
    }
7004
 
7005
  tmp = expand_vec_perm_1 (icode, gen_lowpart (qimode, target),
7006
                           gen_lowpart (qimode, v0),
7007
                           gen_lowpart (qimode, v1), sel_qi);
7008
  if (tmp)
7009
    tmp = gen_lowpart (mode, tmp);
7010
  return tmp;
7011
}
7012
 
7013
/* Return insn code for a conditional operator with a comparison in
7014
   mode CMODE, unsigned if UNS is true, resulting in a value of mode VMODE.  */
7015
 
7016
static inline enum insn_code
7017
get_vcond_icode (enum machine_mode vmode, enum machine_mode cmode, bool uns)
7018
{
7019
  enum insn_code icode = CODE_FOR_nothing;
7020
  if (uns)
7021
    icode = convert_optab_handler (vcondu_optab, vmode, cmode);
7022
  else
7023
    icode = convert_optab_handler (vcond_optab, vmode, cmode);
7024
  return icode;
7025
}
7026
 
7027
/* Return TRUE iff, appropriate vector insns are available
7028
   for vector cond expr with vector type VALUE_TYPE and a comparison
7029
   with operand vector types in CMP_OP_TYPE.  */
7030
 
7031
bool
7032
expand_vec_cond_expr_p (tree value_type, tree cmp_op_type)
7033
{
7034
  enum machine_mode value_mode = TYPE_MODE (value_type);
7035
  enum machine_mode cmp_op_mode = TYPE_MODE (cmp_op_type);
7036
  if (GET_MODE_SIZE (value_mode) != GET_MODE_SIZE (cmp_op_mode)
7037
      || GET_MODE_NUNITS (value_mode) != GET_MODE_NUNITS (cmp_op_mode)
7038
      || get_vcond_icode (TYPE_MODE (value_type), TYPE_MODE (cmp_op_type),
7039
                          TYPE_UNSIGNED (cmp_op_type)) == CODE_FOR_nothing)
7040
    return false;
7041
  return true;
7042
}
7043
 
7044
/* Generate insns for a VEC_COND_EXPR, given its TYPE and its
7045
   three operands.  */
7046
 
7047
rtx
7048
expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
7049
                      rtx target)
7050
{
7051
  struct expand_operand ops[6];
7052
  enum insn_code icode;
7053
  rtx comparison, rtx_op1, rtx_op2;
7054
  enum machine_mode mode = TYPE_MODE (vec_cond_type);
7055
  enum machine_mode cmp_op_mode;
7056
  bool unsignedp;
7057
 
7058
  gcc_assert (COMPARISON_CLASS_P (op0));
7059
 
7060
  unsignedp = TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0, 0)));
7061
  cmp_op_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 0)));
7062
 
7063
  gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
7064
              && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
7065
 
7066
  icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
7067
  if (icode == CODE_FOR_nothing)
7068
    return 0;
7069
 
7070
  comparison = vector_compare_rtx (op0, unsignedp, icode);
7071
  rtx_op1 = expand_normal (op1);
7072
  rtx_op2 = expand_normal (op2);
7073
 
7074
  create_output_operand (&ops[0], target, mode);
7075
  create_input_operand (&ops[1], rtx_op1, mode);
7076
  create_input_operand (&ops[2], rtx_op2, mode);
7077
  create_fixed_operand (&ops[3], comparison);
7078
  create_fixed_operand (&ops[4], XEXP (comparison, 0));
7079
  create_fixed_operand (&ops[5], XEXP (comparison, 1));
7080
  expand_insn (icode, 6, ops);
7081
  return ops[0].value;
7082
}
7083
 
7084
 
7085
/* Return true if there is a compare_and_swap pattern.  */
7086
 
7087
bool
7088
can_compare_and_swap_p (enum machine_mode mode, bool allow_libcall)
7089
{
7090
  enum insn_code icode;
7091
 
7092
  /* Check for __atomic_compare_and_swap.  */
7093
  icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
7094
  if (icode != CODE_FOR_nothing)
7095
    return true;
7096
 
7097
  /* Check for __sync_compare_and_swap.  */
7098
  icode = optab_handler (sync_compare_and_swap_optab, mode);
7099
  if (icode != CODE_FOR_nothing)
7100
    return true;
7101
  if (allow_libcall && optab_libfunc (sync_compare_and_swap_optab, mode))
7102
    return true;
7103
 
7104
  /* No inline compare and swap.  */
7105
  return false;
7106
}
7107
 
7108
/* Return true if an atomic exchange can be performed.  */
7109
 
7110
bool
7111
can_atomic_exchange_p (enum machine_mode mode, bool allow_libcall)
7112
{
7113
  enum insn_code icode;
7114
 
7115
  /* Check for __atomic_exchange.  */
7116
  icode = direct_optab_handler (atomic_exchange_optab, mode);
7117
  if (icode != CODE_FOR_nothing)
7118
    return true;
7119
 
7120
  /* Don't check __sync_test_and_set, as on some platforms that
7121
     has reduced functionality.  Targets that really do support
7122
     a proper exchange should simply be updated to the __atomics.  */
7123
 
7124
  return can_compare_and_swap_p (mode, allow_libcall);
7125
}
7126
 
7127
 
7128
/* Helper function to find the MODE_CC set in a sync_compare_and_swap
7129
   pattern.  */
7130
 
7131
static void
7132
find_cc_set (rtx x, const_rtx pat, void *data)
7133
{
7134
  if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
7135
      && GET_CODE (pat) == SET)
7136
    {
7137
      rtx *p_cc_reg = (rtx *) data;
7138
      gcc_assert (!*p_cc_reg);
7139
      *p_cc_reg = x;
7140
    }
7141
}
7142
 
7143
/* This is a helper function for the other atomic operations.  This function
7144
   emits a loop that contains SEQ that iterates until a compare-and-swap
7145
   operation at the end succeeds.  MEM is the memory to be modified.  SEQ is
7146
   a set of instructions that takes a value from OLD_REG as an input and
7147
   produces a value in NEW_REG as an output.  Before SEQ, OLD_REG will be
7148
   set to the current contents of MEM.  After SEQ, a compare-and-swap will
7149
   attempt to update MEM with NEW_REG.  The function returns true when the
7150
   loop was generated successfully.  */
7151
 
7152
static bool
7153
expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
7154
{
7155
  enum machine_mode mode = GET_MODE (mem);
7156
  rtx label, cmp_reg, success, oldval;
7157
 
7158
  /* The loop we want to generate looks like
7159
 
7160
        cmp_reg = mem;
7161
      label:
7162
        old_reg = cmp_reg;
7163
        seq;
7164
        (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
7165
        if (success)
7166
          goto label;
7167
 
7168
     Note that we only do the plain load from memory once.  Subsequent
7169
     iterations use the value loaded by the compare-and-swap pattern.  */
7170
 
7171
  label = gen_label_rtx ();
7172
  cmp_reg = gen_reg_rtx (mode);
7173
 
7174
  emit_move_insn (cmp_reg, mem);
7175
  emit_label (label);
7176
  emit_move_insn (old_reg, cmp_reg);
7177
  if (seq)
7178
    emit_insn (seq);
7179
 
7180
  success = NULL_RTX;
7181
  oldval = cmp_reg;
7182
  if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
7183
                                       new_reg, false, MEMMODEL_SEQ_CST,
7184
                                       MEMMODEL_RELAXED))
7185
    return false;
7186
 
7187
  if (oldval != cmp_reg)
7188
    emit_move_insn (cmp_reg, oldval);
7189
 
7190
  /* ??? Mark this jump predicted not taken?  */
7191
  emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
7192
                           GET_MODE (success), 1, label);
7193
  return true;
7194
}
7195
 
7196
 
7197
/* This function tries to emit an atomic_exchange intruction.  VAL is written
7198
   to *MEM using memory model MODEL. The previous contents of *MEM are returned,
7199
   using TARGET if possible.  */
7200
 
7201
static rtx
7202
maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
7203
{
7204
  enum machine_mode mode = GET_MODE (mem);
7205
  enum insn_code icode;
7206
 
7207
  /* If the target supports the exchange directly, great.  */
7208
  icode = direct_optab_handler (atomic_exchange_optab, mode);
7209
  if (icode != CODE_FOR_nothing)
7210
    {
7211
      struct expand_operand ops[4];
7212
 
7213
      create_output_operand (&ops[0], target, mode);
7214
      create_fixed_operand (&ops[1], mem);
7215
      /* VAL may have been promoted to a wider mode.  Shrink it if so.  */
7216
      create_convert_operand_to (&ops[2], val, mode, true);
7217
      create_integer_operand (&ops[3], model);
7218
      if (maybe_expand_insn (icode, 4, ops))
7219
        return ops[0].value;
7220
    }
7221
 
7222
  return NULL_RTX;
7223
}
7224
 
7225
/* This function tries to implement an atomic exchange operation using
7226
   __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
7227
   The previous contents of *MEM are returned, using TARGET if possible.
7228
   Since this instructionn is an acquire barrier only, stronger memory
7229
   models may require additional barriers to be emitted.  */
7230
 
7231
static rtx
7232
maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
7233
                                   enum memmodel model)
7234
{
7235
  enum machine_mode mode = GET_MODE (mem);
7236
  enum insn_code icode;
7237
  rtx last_insn = get_last_insn ();
7238
 
7239
  icode = optab_handler (sync_lock_test_and_set_optab, mode);
7240
 
7241
  /* Legacy sync_lock_test_and_set is an acquire barrier.  If the pattern
7242
     exists, and the memory model is stronger than acquire, add a release
7243
     barrier before the instruction.  */
7244
 
7245
  if (model == MEMMODEL_SEQ_CST
7246
      || model == MEMMODEL_RELEASE
7247
      || model == MEMMODEL_ACQ_REL)
7248
    expand_mem_thread_fence (model);
7249
 
7250
  if (icode != CODE_FOR_nothing)
7251
    {
7252
      struct expand_operand ops[3];
7253
      create_output_operand (&ops[0], target, mode);
7254
      create_fixed_operand (&ops[1], mem);
7255
      /* VAL may have been promoted to a wider mode.  Shrink it if so.  */
7256
      create_convert_operand_to (&ops[2], val, mode, true);
7257
      if (maybe_expand_insn (icode, 3, ops))
7258
        return ops[0].value;
7259
    }
7260
 
7261
  /* If an external test-and-set libcall is provided, use that instead of
7262
     any external compare-and-swap that we might get from the compare-and-
7263
     swap-loop expansion later.  */
7264
  if (!can_compare_and_swap_p (mode, false))
7265
    {
7266
      rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
7267
      if (libfunc != NULL)
7268
        {
7269
          rtx addr;
7270
 
7271
          addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
7272
          return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
7273
                                          mode, 2, addr, ptr_mode,
7274
                                          val, mode);
7275
        }
7276
    }
7277
 
7278
  /* If the test_and_set can't be emitted, eliminate any barrier that might
7279
     have been emitted.  */
7280
  delete_insns_since (last_insn);
7281
  return NULL_RTX;
7282
}
7283
 
7284
/* This function tries to implement an atomic exchange operation using a
7285
   compare_and_swap loop. VAL is written to *MEM.  The previous contents of
7286
   *MEM are returned, using TARGET if possible.  No memory model is required
7287
   since a compare_and_swap loop is seq-cst.  */
7288
 
7289
static rtx
7290
maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
7291
{
7292
  enum machine_mode mode = GET_MODE (mem);
7293
 
7294
  if (can_compare_and_swap_p (mode, true))
7295
    {
7296
      if (!target || !register_operand (target, mode))
7297
        target = gen_reg_rtx (mode);
7298
      if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7299
        val = convert_modes (mode, GET_MODE (val), val, 1);
7300
      if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7301
        return target;
7302
    }
7303
 
7304
  return NULL_RTX;
7305
}
7306
 
7307
/* This function tries to implement an atomic test-and-set operation
7308
   using the atomic_test_and_set instruction pattern.  A boolean value
7309
   is returned from the operation, using TARGET if possible.  */
7310
 
7311
#ifndef HAVE_atomic_test_and_set
7312
#define HAVE_atomic_test_and_set 0
7313
#define CODE_FOR_atomic_test_and_set CODE_FOR_nothing
7314
#endif
7315
 
7316
static rtx
7317
maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
7318
{
7319
  enum machine_mode pat_bool_mode;
7320
  struct expand_operand ops[3];
7321
 
7322
  if (!HAVE_atomic_test_and_set)
7323
    return NULL_RTX;
7324
 
7325
  /* While we always get QImode from __atomic_test_and_set, we get
7326
     other memory modes from __sync_lock_test_and_set.  Note that we
7327
     use no endian adjustment here.  This matches the 4.6 behavior
7328
     in the Sparc backend.  */
7329
  gcc_checking_assert
7330
    (insn_data[CODE_FOR_atomic_test_and_set].operand[1].mode == QImode);
7331
  if (GET_MODE (mem) != QImode)
7332
    mem = adjust_address_nv (mem, QImode, 0);
7333
 
7334
  pat_bool_mode = insn_data[CODE_FOR_atomic_test_and_set].operand[0].mode;
7335
  create_output_operand (&ops[0], target, pat_bool_mode);
7336
  create_fixed_operand (&ops[1], mem);
7337
  create_integer_operand (&ops[2], model);
7338
 
7339
  if (maybe_expand_insn (CODE_FOR_atomic_test_and_set, 3, ops))
7340
    return ops[0].value;
7341
  return NULL_RTX;
7342
}
7343
 
7344
/* This function expands the legacy _sync_lock test_and_set operation which is
7345
   generally an atomic exchange.  Some limited targets only allow the
7346
   constant 1 to be stored.  This is an ACQUIRE operation.
7347
 
7348
   TARGET is an optional place to stick the return value.
7349
   MEM is where VAL is stored.  */
7350
 
7351
rtx
7352
expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
7353
{
7354
  rtx ret;
7355
 
7356
  /* Try an atomic_exchange first.  */
7357
  ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_ACQUIRE);
7358
  if (ret)
7359
    return ret;
7360
 
7361
  ret = maybe_emit_sync_lock_test_and_set (target, mem, val, MEMMODEL_ACQUIRE);
7362
  if (ret)
7363
    return ret;
7364
 
7365
  ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
7366
  if (ret)
7367
    return ret;
7368
 
7369
  /* If there are no other options, try atomic_test_and_set if the value
7370
     being stored is 1.  */
7371
  if (val == const1_rtx)
7372
    ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_ACQUIRE);
7373
 
7374
  return ret;
7375
}
7376
 
7377
/* This function expands the atomic test_and_set operation:
7378
   atomically store a boolean TRUE into MEM and return the previous value.
7379
 
7380
   MEMMODEL is the memory model variant to use.
7381
   TARGET is an optional place to stick the return value.  */
7382
 
7383
rtx
7384
expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
7385
{
7386
  enum machine_mode mode = GET_MODE (mem);
7387
  rtx ret;
7388
 
7389
  ret = maybe_emit_atomic_test_and_set (target, mem, model);
7390
  if (ret)
7391
    return ret;
7392
 
7393
  if (target == NULL_RTX)
7394
    target = gen_reg_rtx (mode);
7395
 
7396
  /* If there is no test and set, try exchange, then a compare_and_swap loop,
7397
     then __sync_test_and_set.  */
7398
  ret = maybe_emit_atomic_exchange (target, mem, const1_rtx, model);
7399
  if (ret)
7400
    return ret;
7401
 
7402
  ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, const1_rtx);
7403
  if (ret)
7404
    return ret;
7405
 
7406
  ret = maybe_emit_sync_lock_test_and_set (target, mem, const1_rtx, model);
7407
  if (ret)
7408
    return ret;
7409
 
7410
  /* Failing all else, assume a single threaded environment and simply perform
7411
     the operation.  */
7412
  emit_move_insn (target, mem);
7413
  emit_move_insn (mem, const1_rtx);
7414
  return target;
7415
}
7416
 
7417
/* This function expands the atomic exchange operation:
7418
   atomically store VAL in MEM and return the previous value in MEM.
7419
 
7420
   MEMMODEL is the memory model variant to use.
7421
   TARGET is an optional place to stick the return value.  */
7422
 
7423
rtx
7424
expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
7425
{
7426
  rtx ret;
7427
 
7428
  ret = maybe_emit_atomic_exchange (target, mem, val, model);
7429
 
7430
  /* Next try a compare-and-swap loop for the exchange.  */
7431
  if (!ret)
7432
    ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
7433
 
7434
  return ret;
7435
}
7436
 
7437
/* This function expands the atomic compare exchange operation:
7438
 
7439
   *PTARGET_BOOL is an optional place to store the boolean success/failure.
7440
   *PTARGET_OVAL is an optional place to store the old value from memory.
7441
   Both target parameters may be NULL to indicate that we do not care about
7442
   that return value.  Both target parameters are updated on success to
7443
   the actual location of the corresponding result.
7444
 
7445
   MEMMODEL is the memory model variant to use.
7446
 
7447
   The return value of the function is true for success.  */
7448
 
7449
bool
7450
expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
7451
                                rtx mem, rtx expected, rtx desired,
7452
                                bool is_weak, enum memmodel succ_model,
7453
                                enum memmodel fail_model)
7454
{
7455
  enum machine_mode mode = GET_MODE (mem);
7456
  struct expand_operand ops[8];
7457
  enum insn_code icode;
7458
  rtx target_oval, target_bool = NULL_RTX;
7459
  rtx libfunc;
7460
 
7461
  /* Load expected into a register for the compare and swap.  */
7462
  if (MEM_P (expected))
7463
    expected = copy_to_reg (expected);
7464
 
7465
  /* Make sure we always have some place to put the return oldval.
7466
     Further, make sure that place is distinct from the input expected,
7467
     just in case we need that path down below.  */
7468
  if (ptarget_oval == NULL
7469
      || (target_oval = *ptarget_oval) == NULL
7470
      || reg_overlap_mentioned_p (expected, target_oval))
7471
    target_oval = gen_reg_rtx (mode);
7472
 
7473
  icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
7474
  if (icode != CODE_FOR_nothing)
7475
    {
7476
      enum machine_mode bool_mode = insn_data[icode].operand[0].mode;
7477
 
7478
      /* Make sure we always have a place for the bool operand.  */
7479
      if (ptarget_bool == NULL
7480
          || (target_bool = *ptarget_bool) == NULL
7481
          || GET_MODE (target_bool) != bool_mode)
7482
        target_bool = gen_reg_rtx (bool_mode);
7483
 
7484
      /* Emit the compare_and_swap.  */
7485
      create_output_operand (&ops[0], target_bool, bool_mode);
7486
      create_output_operand (&ops[1], target_oval, mode);
7487
      create_fixed_operand (&ops[2], mem);
7488
      create_convert_operand_to (&ops[3], expected, mode, true);
7489
      create_convert_operand_to (&ops[4], desired, mode, true);
7490
      create_integer_operand (&ops[5], is_weak);
7491
      create_integer_operand (&ops[6], succ_model);
7492
      create_integer_operand (&ops[7], fail_model);
7493
      expand_insn (icode, 8, ops);
7494
 
7495
      /* Return success/failure.  */
7496
      target_bool = ops[0].value;
7497
      target_oval = ops[1].value;
7498
      goto success;
7499
    }
7500
 
7501
  /* Otherwise fall back to the original __sync_val_compare_and_swap
7502
     which is always seq-cst.  */
7503
  icode = optab_handler (sync_compare_and_swap_optab, mode);
7504
  if (icode != CODE_FOR_nothing)
7505
    {
7506
      rtx cc_reg;
7507
 
7508
      create_output_operand (&ops[0], target_oval, mode);
7509
      create_fixed_operand (&ops[1], mem);
7510
      create_convert_operand_to (&ops[2], expected, mode, true);
7511
      create_convert_operand_to (&ops[3], desired, mode, true);
7512
      if (!maybe_expand_insn (icode, 4, ops))
7513
        return false;
7514
 
7515
      target_oval = ops[0].value;
7516
 
7517
      /* If the caller isn't interested in the boolean return value,
7518
         skip the computation of it.  */
7519
      if (ptarget_bool == NULL)
7520
        goto success;
7521
 
7522
      /* Otherwise, work out if the compare-and-swap succeeded.  */
7523
      cc_reg = NULL_RTX;
7524
      if (have_insn_for (COMPARE, CCmode))
7525
        note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
7526
      if (cc_reg)
7527
        {
7528
          target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
7529
                                               const0_rtx, VOIDmode, 0, 1);
7530
          goto success;
7531
        }
7532
      goto success_bool_from_val;
7533
    }
7534
 
7535
  /* Also check for library support for __sync_val_compare_and_swap.  */
7536
  libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
7537
  if (libfunc != NULL)
7538
    {
7539
      rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
7540
      target_oval = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
7541
                                             mode, 3, addr, ptr_mode,
7542
                                             expected, mode, desired, mode);
7543
 
7544
      /* Compute the boolean return value only if requested.  */
7545
      if (ptarget_bool)
7546
        goto success_bool_from_val;
7547
      else
7548
        goto success;
7549
    }
7550
 
7551
  /* Failure.  */
7552
  return false;
7553
 
7554
 success_bool_from_val:
7555
   target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
7556
                                        expected, VOIDmode, 1, 1);
7557
 success:
7558
  /* Make sure that the oval output winds up where the caller asked.  */
7559
  if (ptarget_oval)
7560
    *ptarget_oval = target_oval;
7561
  if (ptarget_bool)
7562
    *ptarget_bool = target_bool;
7563
  return true;
7564
}
7565
 
7566
/* Generate asm volatile("" : : : "memory") as the memory barrier.  */
7567
 
7568
static void
7569
expand_asm_memory_barrier (void)
7570
{
7571
  rtx asm_op, clob;
7572
 
7573
  asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, empty_string, empty_string, 0,
7574
                                 rtvec_alloc (0), rtvec_alloc (0),
7575
                                 rtvec_alloc (0), UNKNOWN_LOCATION);
7576
  MEM_VOLATILE_P (asm_op) = 1;
7577
 
7578
  clob = gen_rtx_SCRATCH (VOIDmode);
7579
  clob = gen_rtx_MEM (BLKmode, clob);
7580
  clob = gen_rtx_CLOBBER (VOIDmode, clob);
7581
 
7582
  emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
7583
}
7584
 
7585
/* This routine will either emit the mem_thread_fence pattern or issue a
7586
   sync_synchronize to generate a fence for memory model MEMMODEL.  */
7587
 
7588
#ifndef HAVE_mem_thread_fence
7589
# define HAVE_mem_thread_fence 0
7590
# define gen_mem_thread_fence(x) (gcc_unreachable (), NULL_RTX)
7591
#endif
7592
#ifndef HAVE_memory_barrier
7593
# define HAVE_memory_barrier 0
7594
# define gen_memory_barrier()  (gcc_unreachable (), NULL_RTX)
7595
#endif
7596
 
7597
void
7598
expand_mem_thread_fence (enum memmodel model)
7599
{
7600
  if (HAVE_mem_thread_fence)
7601
    emit_insn (gen_mem_thread_fence (GEN_INT (model)));
7602
  else if (model != MEMMODEL_RELAXED)
7603
    {
7604
      if (HAVE_memory_barrier)
7605
        emit_insn (gen_memory_barrier ());
7606
      else if (synchronize_libfunc != NULL_RTX)
7607
        emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0);
7608
      else
7609
        expand_asm_memory_barrier ();
7610
    }
7611
}
7612
 
7613
/* This routine will either emit the mem_signal_fence pattern or issue a
7614
   sync_synchronize to generate a fence for memory model MEMMODEL.  */
7615
 
7616
#ifndef HAVE_mem_signal_fence
7617
# define HAVE_mem_signal_fence 0
7618
# define gen_mem_signal_fence(x) (gcc_unreachable (), NULL_RTX)
7619
#endif
7620
 
7621
void
7622
expand_mem_signal_fence (enum memmodel model)
7623
{
7624
  if (HAVE_mem_signal_fence)
7625
    emit_insn (gen_mem_signal_fence (GEN_INT (model)));
7626
  else if (model != MEMMODEL_RELAXED)
7627
    {
7628
      /* By default targets are coherent between a thread and the signal
7629
         handler running on the same thread.  Thus this really becomes a
7630
         compiler barrier, in that stores must not be sunk past
7631
         (or raised above) a given point.  */
7632
      expand_asm_memory_barrier ();
7633
    }
7634
}
7635
 
7636
/* This function expands the atomic load operation:
7637
   return the atomically loaded value in MEM.
7638
 
7639
   MEMMODEL is the memory model variant to use.
7640
   TARGET is an option place to stick the return value.  */
7641
 
7642
rtx
7643
expand_atomic_load (rtx target, rtx mem, enum memmodel model)
7644
{
7645
  enum machine_mode mode = GET_MODE (mem);
7646
  enum insn_code icode;
7647
 
7648
  /* If the target supports the load directly, great.  */
7649
  icode = direct_optab_handler (atomic_load_optab, mode);
7650
  if (icode != CODE_FOR_nothing)
7651
    {
7652
      struct expand_operand ops[3];
7653
 
7654
      create_output_operand (&ops[0], target, mode);
7655
      create_fixed_operand (&ops[1], mem);
7656
      create_integer_operand (&ops[2], model);
7657
      if (maybe_expand_insn (icode, 3, ops))
7658
        return ops[0].value;
7659
    }
7660
 
7661
  /* If the size of the object is greater than word size on this target,
7662
     then we assume that a load will not be atomic.  */
7663
  if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
7664
    {
7665
      /* Issue val = compare_and_swap (mem, 0, 0).
7666
         This may cause the occasional harmless store of 0 when the value is
7667
         already 0, but it seems to be OK according to the standards guys.  */
7668
      if (expand_atomic_compare_and_swap (NULL, &target, mem, const0_rtx,
7669
                                          const0_rtx, false, model, model))
7670
        return target;
7671
      else
7672
      /* Otherwise there is no atomic load, leave the library call.  */
7673
        return NULL_RTX;
7674
    }
7675
 
7676
  /* Otherwise assume loads are atomic, and emit the proper barriers.  */
7677
  if (!target || target == const0_rtx)
7678
    target = gen_reg_rtx (mode);
7679
 
7680
  /* Emit the appropriate barrier before the load.  */
7681
  expand_mem_thread_fence (model);
7682
 
7683
  emit_move_insn (target, mem);
7684
 
7685
  /* For SEQ_CST, also emit a barrier after the load.  */
7686
  if (model == MEMMODEL_SEQ_CST)
7687
    expand_mem_thread_fence (model);
7688
 
7689
  return target;
7690
}
7691
 
7692
/* This function expands the atomic store operation:
7693
   Atomically store VAL in MEM.
7694
   MEMMODEL is the memory model variant to use.
7695
   USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7696
   function returns const0_rtx if a pattern was emitted.  */
7697
 
7698
rtx
7699
expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
7700
{
7701
  enum machine_mode mode = GET_MODE (mem);
7702
  enum insn_code icode;
7703
  struct expand_operand ops[3];
7704
 
7705
  /* If the target supports the store directly, great.  */
7706
  icode = direct_optab_handler (atomic_store_optab, mode);
7707
  if (icode != CODE_FOR_nothing)
7708
    {
7709
      create_fixed_operand (&ops[0], mem);
7710
      create_input_operand (&ops[1], val, mode);
7711
      create_integer_operand (&ops[2], model);
7712
      if (maybe_expand_insn (icode, 3, ops))
7713
        return const0_rtx;
7714
    }
7715
 
7716
  /* If using __sync_lock_release is a viable alternative, try it.  */
7717
  if (use_release)
7718
    {
7719
      icode = direct_optab_handler (sync_lock_release_optab, mode);
7720
      if (icode != CODE_FOR_nothing)
7721
        {
7722
          create_fixed_operand (&ops[0], mem);
7723
          create_input_operand (&ops[1], const0_rtx, mode);
7724
          if (maybe_expand_insn (icode, 2, ops))
7725
            {
7726
              /* lock_release is only a release barrier.  */
7727
              if (model == MEMMODEL_SEQ_CST)
7728
                expand_mem_thread_fence (model);
7729
              return const0_rtx;
7730
            }
7731
        }
7732
    }
7733
 
7734
  /* If the size of the object is greater than word size on this target,
7735
     a default store will not be atomic, Try a mem_exchange and throw away
7736
     the result.  If that doesn't work, don't do anything.  */
7737
  if (GET_MODE_PRECISION(mode) > BITS_PER_WORD)
7738
    {
7739
      rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
7740
      if (!target)
7741
        target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val);
7742
      if (target)
7743
        return const0_rtx;
7744
      else
7745
        return NULL_RTX;
7746
    }
7747
 
7748
  /* If there is no mem_store, default to a move with barriers */
7749
  if (model == MEMMODEL_SEQ_CST || model == MEMMODEL_RELEASE)
7750
    expand_mem_thread_fence (model);
7751
 
7752
  emit_move_insn (mem, val);
7753
 
7754
  /* For SEQ_CST, also emit a barrier after the load.  */
7755
  if (model == MEMMODEL_SEQ_CST)
7756
    expand_mem_thread_fence (model);
7757
 
7758
  return const0_rtx;
7759
}
7760
 
7761
 
7762
/* Structure containing the pointers and values required to process the
7763
   various forms of the atomic_fetch_op and atomic_op_fetch builtins.  */
7764
 
7765
struct atomic_op_functions
7766
{
7767
  direct_optab mem_fetch_before;
7768
  direct_optab mem_fetch_after;
7769
  direct_optab mem_no_result;
7770
  optab fetch_before;
7771
  optab fetch_after;
7772
  direct_optab no_result;
7773
  enum rtx_code reverse_code;
7774
};
7775
 
7776
 
7777
/* Fill in structure pointed to by OP with the various optab entries for an
7778
   operation of type CODE.  */
7779
 
7780
static void
7781
get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
7782
{
7783
  gcc_assert (op!= NULL);
7784
 
7785
  /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
7786
     in the source code during compilation, and the optab entries are not
7787
     computable until runtime.  Fill in the values at runtime.  */
7788
  switch (code)
7789
    {
7790
    case PLUS:
7791
      op->mem_fetch_before = atomic_fetch_add_optab;
7792
      op->mem_fetch_after = atomic_add_fetch_optab;
7793
      op->mem_no_result = atomic_add_optab;
7794
      op->fetch_before = sync_old_add_optab;
7795
      op->fetch_after = sync_new_add_optab;
7796
      op->no_result = sync_add_optab;
7797
      op->reverse_code = MINUS;
7798
      break;
7799
    case MINUS:
7800
      op->mem_fetch_before = atomic_fetch_sub_optab;
7801
      op->mem_fetch_after = atomic_sub_fetch_optab;
7802
      op->mem_no_result = atomic_sub_optab;
7803
      op->fetch_before = sync_old_sub_optab;
7804
      op->fetch_after = sync_new_sub_optab;
7805
      op->no_result = sync_sub_optab;
7806
      op->reverse_code = PLUS;
7807
      break;
7808
    case XOR:
7809
      op->mem_fetch_before = atomic_fetch_xor_optab;
7810
      op->mem_fetch_after = atomic_xor_fetch_optab;
7811
      op->mem_no_result = atomic_xor_optab;
7812
      op->fetch_before = sync_old_xor_optab;
7813
      op->fetch_after = sync_new_xor_optab;
7814
      op->no_result = sync_xor_optab;
7815
      op->reverse_code = XOR;
7816
      break;
7817
    case AND:
7818
      op->mem_fetch_before = atomic_fetch_and_optab;
7819
      op->mem_fetch_after = atomic_and_fetch_optab;
7820
      op->mem_no_result = atomic_and_optab;
7821
      op->fetch_before = sync_old_and_optab;
7822
      op->fetch_after = sync_new_and_optab;
7823
      op->no_result = sync_and_optab;
7824
      op->reverse_code = UNKNOWN;
7825
      break;
7826
    case IOR:
7827
      op->mem_fetch_before = atomic_fetch_or_optab;
7828
      op->mem_fetch_after = atomic_or_fetch_optab;
7829
      op->mem_no_result = atomic_or_optab;
7830
      op->fetch_before = sync_old_ior_optab;
7831
      op->fetch_after = sync_new_ior_optab;
7832
      op->no_result = sync_ior_optab;
7833
      op->reverse_code = UNKNOWN;
7834
      break;
7835
    case NOT:
7836
      op->mem_fetch_before = atomic_fetch_nand_optab;
7837
      op->mem_fetch_after = atomic_nand_fetch_optab;
7838
      op->mem_no_result = atomic_nand_optab;
7839
      op->fetch_before = sync_old_nand_optab;
7840
      op->fetch_after = sync_new_nand_optab;
7841
      op->no_result = sync_nand_optab;
7842
      op->reverse_code = UNKNOWN;
7843
      break;
7844
    default:
7845
      gcc_unreachable ();
7846
    }
7847
}
7848
 
7849
/* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
7850
   using memory order MODEL.  If AFTER is true the operation needs to return
7851
   the value of *MEM after the operation, otherwise the previous value.
7852
   TARGET is an optional place to place the result.  The result is unused if
7853
   it is const0_rtx.
7854
   Return the result if there is a better sequence, otherwise NULL_RTX.  */
7855
 
7856
static rtx
7857
maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
7858
                         enum memmodel model, bool after)
7859
{
7860
  /* If the value is prefetched, or not used, it may be possible to replace
7861
     the sequence with a native exchange operation.  */
7862
  if (!after || target == const0_rtx)
7863
    {
7864
      /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m).  */
7865
      if (code == AND && val == const0_rtx)
7866
        {
7867
          if (target == const0_rtx)
7868
            target = gen_reg_rtx (GET_MODE (mem));
7869
          return maybe_emit_atomic_exchange (target, mem, val, model);
7870
        }
7871
 
7872
      /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m).  */
7873
      if (code == IOR && val == constm1_rtx)
7874
        {
7875
          if (target == const0_rtx)
7876
            target = gen_reg_rtx (GET_MODE (mem));
7877
          return maybe_emit_atomic_exchange (target, mem, val, model);
7878
        }
7879
    }
7880
 
7881
  return NULL_RTX;
7882
}
7883
 
7884
/* Try to emit an instruction for a specific operation varaition.
7885
   OPTAB contains the OP functions.
7886
   TARGET is an optional place to return the result. const0_rtx means unused.
7887
   MEM is the memory location to operate on.
7888
   VAL is the value to use in the operation.
7889
   USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
7890
   MODEL is the memory model, if used.
7891
   AFTER is true if the returned result is the value after the operation.  */
7892
 
7893
static rtx
7894
maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
7895
               rtx val, bool use_memmodel, enum memmodel model, bool after)
7896
{
7897
  enum machine_mode mode = GET_MODE (mem);
7898
  struct expand_operand ops[4];
7899
  enum insn_code icode;
7900
  int op_counter = 0;
7901
  int num_ops;
7902
 
7903
  /* Check to see if there is a result returned.  */
7904
  if (target == const0_rtx)
7905
    {
7906
      if (use_memmodel)
7907
        {
7908
          icode = direct_optab_handler (optab->mem_no_result, mode);
7909
          create_integer_operand (&ops[2], model);
7910
          num_ops = 3;
7911
        }
7912
      else
7913
        {
7914
          icode = direct_optab_handler (optab->no_result, mode);
7915
          num_ops = 2;
7916
        }
7917
    }
7918
  /* Otherwise, we need to generate a result.  */
7919
  else
7920
    {
7921
      if (use_memmodel)
7922
        {
7923
          icode = direct_optab_handler (after ? optab->mem_fetch_after
7924
                                        : optab->mem_fetch_before, mode);
7925
          create_integer_operand (&ops[3], model);
7926
          num_ops = 4;
7927
        }
7928
      else
7929
        {
7930
          icode = optab_handler (after ? optab->fetch_after
7931
                                 : optab->fetch_before, mode);
7932
          num_ops = 3;
7933
        }
7934
      create_output_operand (&ops[op_counter++], target, mode);
7935
    }
7936
  if (icode == CODE_FOR_nothing)
7937
    return NULL_RTX;
7938
 
7939
  create_fixed_operand (&ops[op_counter++], mem);
7940
  /* VAL may have been promoted to a wider mode.  Shrink it if so.  */
7941
  create_convert_operand_to (&ops[op_counter++], val, mode, true);
7942
 
7943
  if (maybe_expand_insn (icode, num_ops, ops))
7944
    return (target == const0_rtx ? const0_rtx : ops[0].value);
7945
 
7946
  return NULL_RTX;
7947
}
7948
 
7949
 
7950
/* This function expands an atomic fetch_OP or OP_fetch operation:
7951
   TARGET is an option place to stick the return value.  const0_rtx indicates
7952
   the result is unused.
7953
   atomically fetch MEM, perform the operation with VAL and return it to MEM.
7954
   CODE is the operation being performed (OP)
7955
   MEMMODEL is the memory model variant to use.
7956
   AFTER is true to return the result of the operation (OP_fetch).
7957
   AFTER is false to return the value before the operation (fetch_OP).  */
7958
rtx
7959
expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
7960
                        enum memmodel model, bool after)
7961
{
7962
  enum machine_mode mode = GET_MODE (mem);
7963
  struct atomic_op_functions optab;
7964
  rtx result;
7965
  bool unused_result = (target == const0_rtx);
7966
 
7967
  get_atomic_op_for_code (&optab, code);
7968
 
7969
  /* Check to see if there are any better instructions.  */
7970
  result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
7971
  if (result)
7972
    return result;
7973
 
7974
  /* Check for the case where the result isn't used and try those patterns.  */
7975
  if (unused_result)
7976
    {
7977
      /* Try the memory model variant first.  */
7978
      result = maybe_emit_op (&optab, target, mem, val, true, model, true);
7979
      if (result)
7980
        return result;
7981
 
7982
      /* Next try the old style withuot a memory model.  */
7983
      result = maybe_emit_op (&optab, target, mem, val, false, model, true);
7984
      if (result)
7985
        return result;
7986
 
7987
      /* There is no no-result pattern, so try patterns with a result.  */
7988
      target = NULL_RTX;
7989
    }
7990
 
7991
  /* Try the __atomic version.  */
7992
  result = maybe_emit_op (&optab, target, mem, val, true, model, after);
7993
  if (result)
7994
    return result;
7995
 
7996
  /* Try the older __sync version.  */
7997
  result = maybe_emit_op (&optab, target, mem, val, false, model, after);
7998
  if (result)
7999
    return result;
8000
 
8001
  /* If the fetch value can be calculated from the other variation of fetch,
8002
     try that operation.  */
8003
  if (after || unused_result || optab.reverse_code != UNKNOWN)
8004
    {
8005
      /* Try the __atomic version, then the older __sync version.  */
8006
      result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
8007
      if (!result)
8008
        result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
8009
 
8010
      if (result)
8011
        {
8012
          /* If the result isn't used, no need to do compensation code.  */
8013
          if (unused_result)
8014
            return result;
8015
 
8016
          /* Issue compensation code.  Fetch_after  == fetch_before OP val.
8017
             Fetch_before == after REVERSE_OP val.  */
8018
          if (!after)
8019
            code = optab.reverse_code;
8020
          if (code == NOT)
8021
            {
8022
              result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
8023
                                            true, OPTAB_LIB_WIDEN);
8024
              result = expand_simple_unop (mode, NOT, result, target, true);
8025
            }
8026
          else
8027
            result = expand_simple_binop (mode, code, result, val, target,
8028
                                          true, OPTAB_LIB_WIDEN);
8029
          return result;
8030
        }
8031
    }
8032
 
8033
  /* Try the __sync libcalls only if we can't do compare-and-swap inline.  */
8034
  if (!can_compare_and_swap_p (mode, false))
8035
    {
8036
      rtx libfunc;
8037
      bool fixup = false;
8038
 
8039
      libfunc = optab_libfunc (after ? optab.fetch_after
8040
                               : optab.fetch_before, mode);
8041
      if (libfunc == NULL
8042
          && (after || unused_result || optab.reverse_code != UNKNOWN))
8043
        {
8044
          fixup = true;
8045
          if (!after)
8046
            code = optab.reverse_code;
8047
          libfunc = optab_libfunc (after ? optab.fetch_before
8048
                                   : optab.fetch_after, mode);
8049
        }
8050
      if (libfunc != NULL)
8051
        {
8052
          rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
8053
          result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
8054
                                            2, addr, ptr_mode, val, mode);
8055
 
8056
          if (!unused_result && fixup)
8057
            result = expand_simple_binop (mode, code, result, val, target,
8058
                                          true, OPTAB_LIB_WIDEN);
8059
          return result;
8060
        }
8061
    }
8062
 
8063
  /* If nothing else has succeeded, default to a compare and swap loop.  */
8064
  if (can_compare_and_swap_p (mode, true))
8065
    {
8066
      rtx insn;
8067
      rtx t0 = gen_reg_rtx (mode), t1;
8068
 
8069
      start_sequence ();
8070
 
8071
      /* If the result is used, get a register for it.  */
8072
      if (!unused_result)
8073
        {
8074
          if (!target || !register_operand (target, mode))
8075
            target = gen_reg_rtx (mode);
8076
          /* If fetch_before, copy the value now.  */
8077
          if (!after)
8078
            emit_move_insn (target, t0);
8079
        }
8080
      else
8081
        target = const0_rtx;
8082
 
8083
      t1 = t0;
8084
      if (code == NOT)
8085
        {
8086
          t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
8087
                                    true, OPTAB_LIB_WIDEN);
8088
          t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
8089
        }
8090
      else
8091
        t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
8092
                                  OPTAB_LIB_WIDEN);
8093
 
8094
      /* For after, copy the value now.  */
8095
      if (!unused_result && after)
8096
        emit_move_insn (target, t1);
8097
      insn = get_insns ();
8098
      end_sequence ();
8099
 
8100
      if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
8101
        return target;
8102
    }
8103
 
8104
  return NULL_RTX;
8105
}
8106
 
8107
/* Return true if OPERAND is suitable for operand number OPNO of
8108
   instruction ICODE.  */
8109
 
8110
bool
8111
insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
8112
{
8113
  return (!insn_data[(int) icode].operand[opno].predicate
8114
          || (insn_data[(int) icode].operand[opno].predicate
8115
              (operand, insn_data[(int) icode].operand[opno].mode)));
8116
}
8117
 
8118
/* TARGET is a target of a multiword operation that we are going to
8119
   implement as a series of word-mode operations.  Return true if
8120
   TARGET is suitable for this purpose.  */
8121
 
8122
bool
8123
valid_multiword_target_p (rtx target)
8124
{
8125
  enum machine_mode mode;
8126
  int i;
8127
 
8128
  mode = GET_MODE (target);
8129
  for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
8130
    if (!validate_subreg (word_mode, mode, target, i))
8131
      return false;
8132
  return true;
8133
}
8134
 
8135
/* Like maybe_legitimize_operand, but do not change the code of the
8136
   current rtx value.  */
8137
 
8138
static bool
8139
maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
8140
                                    struct expand_operand *op)
8141
{
8142
  /* See if the operand matches in its current form.  */
8143
  if (insn_operand_matches (icode, opno, op->value))
8144
    return true;
8145
 
8146
  /* If the operand is a memory whose address has no side effects,
8147
     try forcing the address into a non-virtual pseudo register.
8148
     The check for side effects is important because copy_to_mode_reg
8149
     cannot handle things like auto-modified addresses.  */
8150
  if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
8151
    {
8152
      rtx addr, mem;
8153
 
8154
      mem = op->value;
8155
      addr = XEXP (mem, 0);
8156
      if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
8157
          && !side_effects_p (addr))
8158
        {
8159
          rtx last;
8160
          enum machine_mode mode;
8161
 
8162
          last = get_last_insn ();
8163
          mode = targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
8164
          mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
8165
          if (insn_operand_matches (icode, opno, mem))
8166
            {
8167
              op->value = mem;
8168
              return true;
8169
            }
8170
          delete_insns_since (last);
8171
        }
8172
    }
8173
 
8174
  return false;
8175
}
8176
 
8177
/* Try to make OP match operand OPNO of instruction ICODE.  Return true
8178
   on success, storing the new operand value back in OP.  */
8179
 
8180
static bool
8181
maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
8182
                          struct expand_operand *op)
8183
{
8184
  enum machine_mode mode, imode;
8185
  bool old_volatile_ok, result;
8186
 
8187
  mode = op->mode;
8188
  switch (op->type)
8189
    {
8190
    case EXPAND_FIXED:
8191
      old_volatile_ok = volatile_ok;
8192
      volatile_ok = true;
8193
      result = maybe_legitimize_operand_same_code (icode, opno, op);
8194
      volatile_ok = old_volatile_ok;
8195
      return result;
8196
 
8197
    case EXPAND_OUTPUT:
8198
      gcc_assert (mode != VOIDmode);
8199
      if (op->value
8200
          && op->value != const0_rtx
8201
          && GET_MODE (op->value) == mode
8202
          && maybe_legitimize_operand_same_code (icode, opno, op))
8203
        return true;
8204
 
8205
      op->value = gen_reg_rtx (mode);
8206
      break;
8207
 
8208
    case EXPAND_INPUT:
8209
    input:
8210
      gcc_assert (mode != VOIDmode);
8211
      gcc_assert (GET_MODE (op->value) == VOIDmode
8212
                  || GET_MODE (op->value) == mode);
8213
      if (maybe_legitimize_operand_same_code (icode, opno, op))
8214
        return true;
8215
 
8216
      op->value = copy_to_mode_reg (mode, op->value);
8217
      break;
8218
 
8219
    case EXPAND_CONVERT_TO:
8220
      gcc_assert (mode != VOIDmode);
8221
      op->value = convert_to_mode (mode, op->value, op->unsigned_p);
8222
      goto input;
8223
 
8224
    case EXPAND_CONVERT_FROM:
8225
      if (GET_MODE (op->value) != VOIDmode)
8226
        mode = GET_MODE (op->value);
8227
      else
8228
        /* The caller must tell us what mode this value has.  */
8229
        gcc_assert (mode != VOIDmode);
8230
 
8231
      imode = insn_data[(int) icode].operand[opno].mode;
8232
      if (imode != VOIDmode && imode != mode)
8233
        {
8234
          op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
8235
          mode = imode;
8236
        }
8237
      goto input;
8238
 
8239
    case EXPAND_ADDRESS:
8240
      gcc_assert (mode != VOIDmode);
8241
      op->value = convert_memory_address (mode, op->value);
8242
      goto input;
8243
 
8244
    case EXPAND_INTEGER:
8245
      mode = insn_data[(int) icode].operand[opno].mode;
8246
      if (mode != VOIDmode && const_int_operand (op->value, mode))
8247
        goto input;
8248
      break;
8249
    }
8250
  return insn_operand_matches (icode, opno, op->value);
8251
}
8252
 
8253
/* Make OP describe an input operand that should have the same value
8254
   as VALUE, after any mode conversion that the target might request.
8255
   TYPE is the type of VALUE.  */
8256
 
8257
void
8258
create_convert_operand_from_type (struct expand_operand *op,
8259
                                  rtx value, tree type)
8260
{
8261
  create_convert_operand_from (op, value, TYPE_MODE (type),
8262
                               TYPE_UNSIGNED (type));
8263
}
8264
 
8265
/* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
8266
   of instruction ICODE.  Return true on success, leaving the new operand
8267
   values in the OPS themselves.  Emit no code on failure.  */
8268
 
8269
bool
8270
maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
8271
                           unsigned int nops, struct expand_operand *ops)
8272
{
8273
  rtx last;
8274
  unsigned int i;
8275
 
8276
  last = get_last_insn ();
8277
  for (i = 0; i < nops; i++)
8278
    if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
8279
      {
8280
        delete_insns_since (last);
8281
        return false;
8282
      }
8283
  return true;
8284
}
8285
 
8286
/* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
8287
   as its operands.  Return the instruction pattern on success,
8288
   and emit any necessary set-up code.  Return null and emit no
8289
   code on failure.  */
8290
 
8291
rtx
8292
maybe_gen_insn (enum insn_code icode, unsigned int nops,
8293
                struct expand_operand *ops)
8294
{
8295
  gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
8296
  if (!maybe_legitimize_operands (icode, 0, nops, ops))
8297
    return NULL_RTX;
8298
 
8299
  switch (nops)
8300
    {
8301
    case 1:
8302
      return GEN_FCN (icode) (ops[0].value);
8303
    case 2:
8304
      return GEN_FCN (icode) (ops[0].value, ops[1].value);
8305
    case 3:
8306
      return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
8307
    case 4:
8308
      return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8309
                              ops[3].value);
8310
    case 5:
8311
      return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8312
                              ops[3].value, ops[4].value);
8313
    case 6:
8314
      return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8315
                              ops[3].value, ops[4].value, ops[5].value);
8316
    case 7:
8317
      return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8318
                              ops[3].value, ops[4].value, ops[5].value,
8319
                              ops[6].value);
8320
    case 8:
8321
      return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8322
                              ops[3].value, ops[4].value, ops[5].value,
8323
                              ops[6].value, ops[7].value);
8324
    }
8325
  gcc_unreachable ();
8326
}
8327
 
8328
/* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
8329
   as its operands.  Return true on success and emit no code on failure.  */
8330
 
8331
bool
8332
maybe_expand_insn (enum insn_code icode, unsigned int nops,
8333
                   struct expand_operand *ops)
8334
{
8335
  rtx pat = maybe_gen_insn (icode, nops, ops);
8336
  if (pat)
8337
    {
8338
      emit_insn (pat);
8339
      return true;
8340
    }
8341
  return false;
8342
}
8343
 
8344
/* Like maybe_expand_insn, but for jumps.  */
8345
 
8346
bool
8347
maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
8348
                        struct expand_operand *ops)
8349
{
8350
  rtx pat = maybe_gen_insn (icode, nops, ops);
8351
  if (pat)
8352
    {
8353
      emit_jump_insn (pat);
8354
      return true;
8355
    }
8356
  return false;
8357
}
8358
 
8359
/* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
8360
   as its operands.  */
8361
 
8362
void
8363
expand_insn (enum insn_code icode, unsigned int nops,
8364
             struct expand_operand *ops)
8365
{
8366
  if (!maybe_expand_insn (icode, nops, ops))
8367
    gcc_unreachable ();
8368
}
8369
 
8370
/* Like expand_insn, but for jumps.  */
8371
 
8372
void
8373
expand_jump_insn (enum insn_code icode, unsigned int nops,
8374
                  struct expand_operand *ops)
8375
{
8376
  if (!maybe_expand_jump_insn (icode, nops, ops))
8377
    gcc_unreachable ();
8378
}
8379
 
8380
#include "gt-optabs.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.