OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [simplify-rtx.c] - Blame information for rev 729

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* RTL simplification functions for GNU compiler.
2
   Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4
   2011  Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
 
23
#include "config.h"
24
#include "system.h"
25
#include "coretypes.h"
26
#include "tm.h"
27
#include "rtl.h"
28
#include "tree.h"
29
#include "tm_p.h"
30
#include "regs.h"
31
#include "hard-reg-set.h"
32
#include "flags.h"
33
#include "insn-config.h"
34
#include "recog.h"
35
#include "function.h"
36
#include "expr.h"
37
#include "diagnostic-core.h"
38
#include "output.h"
39
#include "ggc.h"
40
#include "target.h"
41
 
42
/* Simplification and canonicalization of RTL.  */
43
 
44
/* Much code operates on (low, high) pairs; the low value is an
45
   unsigned wide int, the high value a signed wide int.  We
46
   occasionally need to sign extend from low to high as if low were a
47
   signed wide int.  */
48
#define HWI_SIGN_EXTEND(low) \
49
 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50
 
51
static rtx neg_const_int (enum machine_mode, const_rtx);
52
static bool plus_minus_operand_p (const_rtx);
53
static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54
static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55
static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
56
                                  unsigned int);
57
static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58
                                           rtx, rtx);
59
static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60
                                            enum machine_mode, rtx, rtx);
61
static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62
static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
63
                                        rtx, rtx, rtx, rtx);
64
 
65
/* Negate a CONST_INT rtx, truncating (because a conversion from a
66
   maximally negative number can overflow).  */
67
static rtx
68
neg_const_int (enum machine_mode mode, const_rtx i)
69
{
70
  return gen_int_mode (- INTVAL (i), mode);
71
}
72
 
73
/* Test whether expression, X, is an immediate constant that represents
74
   the most significant bit of machine mode MODE.  */
75
 
76
bool
77
mode_signbit_p (enum machine_mode mode, const_rtx x)
78
{
79
  unsigned HOST_WIDE_INT val;
80
  unsigned int width;
81
 
82
  if (GET_MODE_CLASS (mode) != MODE_INT)
83
    return false;
84
 
85
  width = GET_MODE_PRECISION (mode);
86
  if (width == 0)
87
    return false;
88
 
89
  if (width <= HOST_BITS_PER_WIDE_INT
90
      && CONST_INT_P (x))
91
    val = INTVAL (x);
92
  else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93
           && GET_CODE (x) == CONST_DOUBLE
94
           && CONST_DOUBLE_LOW (x) == 0)
95
    {
96
      val = CONST_DOUBLE_HIGH (x);
97
      width -= HOST_BITS_PER_WIDE_INT;
98
    }
99
  else
100
    return false;
101
 
102
  if (width < HOST_BITS_PER_WIDE_INT)
103
    val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104
  return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105
}
106
 
107
/* Test whether VAL is equal to the most significant bit of mode MODE
108
   (after masking with the mode mask of MODE).  Returns false if the
109
   precision of MODE is too large to handle.  */
110
 
111
bool
112
val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113
{
114
  unsigned int width;
115
 
116
  if (GET_MODE_CLASS (mode) != MODE_INT)
117
    return false;
118
 
119
  width = GET_MODE_PRECISION (mode);
120
  if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121
    return false;
122
 
123
  val &= GET_MODE_MASK (mode);
124
  return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125
}
126
 
127
/* Test whether the most significant bit of mode MODE is set in VAL.
128
   Returns false if the precision of MODE is too large to handle.  */
129
bool
130
val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131
{
132
  unsigned int width;
133
 
134
  if (GET_MODE_CLASS (mode) != MODE_INT)
135
    return false;
136
 
137
  width = GET_MODE_PRECISION (mode);
138
  if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139
    return false;
140
 
141
  val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142
  return val != 0;
143
}
144
 
145
/* Test whether the most significant bit of mode MODE is clear in VAL.
146
   Returns false if the precision of MODE is too large to handle.  */
147
bool
148
val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149
{
150
  unsigned int width;
151
 
152
  if (GET_MODE_CLASS (mode) != MODE_INT)
153
    return false;
154
 
155
  width = GET_MODE_PRECISION (mode);
156
  if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157
    return false;
158
 
159
  val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160
  return val == 0;
161
}
162
 
163
/* Make a binary operation by properly ordering the operands and
164
   seeing if the expression folds.  */
165
 
166
rtx
167
simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168
                     rtx op1)
169
{
170
  rtx tem;
171
 
172
  /* If this simplifies, do it.  */
173
  tem = simplify_binary_operation (code, mode, op0, op1);
174
  if (tem)
175
    return tem;
176
 
177
  /* Put complex operands first and constants second if commutative.  */
178
  if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179
      && swap_commutative_operands_p (op0, op1))
180
    tem = op0, op0 = op1, op1 = tem;
181
 
182
  return gen_rtx_fmt_ee (code, mode, op0, op1);
183
}
184
 
185
/* If X is a MEM referencing the constant pool, return the real value.
186
   Otherwise return X.  */
187
rtx
188
avoid_constant_pool_reference (rtx x)
189
{
190
  rtx c, tmp, addr;
191
  enum machine_mode cmode;
192
  HOST_WIDE_INT offset = 0;
193
 
194
  switch (GET_CODE (x))
195
    {
196
    case MEM:
197
      break;
198
 
199
    case FLOAT_EXTEND:
200
      /* Handle float extensions of constant pool references.  */
201
      tmp = XEXP (x, 0);
202
      c = avoid_constant_pool_reference (tmp);
203
      if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
204
        {
205
          REAL_VALUE_TYPE d;
206
 
207
          REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208
          return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209
        }
210
      return x;
211
 
212
    default:
213
      return x;
214
    }
215
 
216
  if (GET_MODE (x) == BLKmode)
217
    return x;
218
 
219
  addr = XEXP (x, 0);
220
 
221
  /* Call target hook to avoid the effects of -fpic etc....  */
222
  addr = targetm.delegitimize_address (addr);
223
 
224
  /* Split the address into a base and integer offset.  */
225
  if (GET_CODE (addr) == CONST
226
      && GET_CODE (XEXP (addr, 0)) == PLUS
227
      && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228
    {
229
      offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230
      addr = XEXP (XEXP (addr, 0), 0);
231
    }
232
 
233
  if (GET_CODE (addr) == LO_SUM)
234
    addr = XEXP (addr, 1);
235
 
236
  /* If this is a constant pool reference, we can turn it into its
237
     constant and hope that simplifications happen.  */
238
  if (GET_CODE (addr) == SYMBOL_REF
239
      && CONSTANT_POOL_ADDRESS_P (addr))
240
    {
241
      c = get_pool_constant (addr);
242
      cmode = get_pool_mode (addr);
243
 
244
      /* If we're accessing the constant in a different mode than it was
245
         originally stored, attempt to fix that up via subreg simplifications.
246
         If that fails we have no choice but to return the original memory.  */
247
      if (offset != 0 || cmode != GET_MODE (x))
248
        {
249
          rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250
          if (tem && CONSTANT_P (tem))
251
            return tem;
252
        }
253
      else
254
        return c;
255
    }
256
 
257
  return x;
258
}
259
 
260
/* Simplify a MEM based on its attributes.  This is the default
261
   delegitimize_address target hook, and it's recommended that every
262
   overrider call it.  */
263
 
264
rtx
265
delegitimize_mem_from_attrs (rtx x)
266
{
267
  /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268
     use their base addresses as equivalent.  */
269
  if (MEM_P (x)
270
      && MEM_EXPR (x)
271
      && MEM_OFFSET_KNOWN_P (x))
272
    {
273
      tree decl = MEM_EXPR (x);
274
      enum machine_mode mode = GET_MODE (x);
275
      HOST_WIDE_INT offset = 0;
276
 
277
      switch (TREE_CODE (decl))
278
        {
279
        default:
280
          decl = NULL;
281
          break;
282
 
283
        case VAR_DECL:
284
          break;
285
 
286
        case ARRAY_REF:
287
        case ARRAY_RANGE_REF:
288
        case COMPONENT_REF:
289
        case BIT_FIELD_REF:
290
        case REALPART_EXPR:
291
        case IMAGPART_EXPR:
292
        case VIEW_CONVERT_EXPR:
293
          {
294
            HOST_WIDE_INT bitsize, bitpos;
295
            tree toffset;
296
            int unsignedp = 0, volatilep = 0;
297
 
298
            decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299
                                        &mode, &unsignedp, &volatilep, false);
300
            if (bitsize != GET_MODE_BITSIZE (mode)
301
                || (bitpos % BITS_PER_UNIT)
302
                || (toffset && !host_integerp (toffset, 0)))
303
              decl = NULL;
304
            else
305
              {
306
                offset += bitpos / BITS_PER_UNIT;
307
                if (toffset)
308
                  offset += TREE_INT_CST_LOW (toffset);
309
              }
310
            break;
311
          }
312
        }
313
 
314
      if (decl
315
          && mode == GET_MODE (x)
316
          && TREE_CODE (decl) == VAR_DECL
317
          && (TREE_STATIC (decl)
318
              || DECL_THREAD_LOCAL_P (decl))
319
          && DECL_RTL_SET_P (decl)
320
          && MEM_P (DECL_RTL (decl)))
321
        {
322
          rtx newx;
323
 
324
          offset += MEM_OFFSET (x);
325
 
326
          newx = DECL_RTL (decl);
327
 
328
          if (MEM_P (newx))
329
            {
330
              rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331
 
332
              /* Avoid creating a new MEM needlessly if we already had
333
                 the same address.  We do if there's no OFFSET and the
334
                 old address X is identical to NEWX, or if X is of the
335
                 form (plus NEWX OFFSET), or the NEWX is of the form
336
                 (plus Y (const_int Z)) and X is that with the offset
337
                 added: (plus Y (const_int Z+OFFSET)).  */
338
              if (!((offset == 0
339
                     || (GET_CODE (o) == PLUS
340
                         && GET_CODE (XEXP (o, 1)) == CONST_INT
341
                         && (offset == INTVAL (XEXP (o, 1))
342
                             || (GET_CODE (n) == PLUS
343
                                 && GET_CODE (XEXP (n, 1)) == CONST_INT
344
                                 && (INTVAL (XEXP (n, 1)) + offset
345
                                     == INTVAL (XEXP (o, 1)))
346
                                 && (n = XEXP (n, 0))))
347
                         && (o = XEXP (o, 0))))
348
                    && rtx_equal_p (o, n)))
349
                x = adjust_address_nv (newx, mode, offset);
350
            }
351
          else if (GET_MODE (x) == GET_MODE (newx)
352
                   && offset == 0)
353
            x = newx;
354
        }
355
    }
356
 
357
  return x;
358
}
359
 
360
/* Make a unary operation by first seeing if it folds and otherwise making
361
   the specified operation.  */
362
 
363
rtx
364
simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365
                    enum machine_mode op_mode)
366
{
367
  rtx tem;
368
 
369
  /* If this simplifies, use it.  */
370
  if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371
    return tem;
372
 
373
  return gen_rtx_fmt_e (code, mode, op);
374
}
375
 
376
/* Likewise for ternary operations.  */
377
 
378
rtx
379
simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380
                      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381
{
382
  rtx tem;
383
 
384
  /* If this simplifies, use it.  */
385
  if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386
                                              op0, op1, op2)))
387
    return tem;
388
 
389
  return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
390
}
391
 
392
/* Likewise, for relational operations.
393
   CMP_MODE specifies mode comparison is done in.  */
394
 
395
rtx
396
simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397
                         enum machine_mode cmp_mode, rtx op0, rtx op1)
398
{
399
  rtx tem;
400
 
401
  if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402
                                                 op0, op1)))
403
    return tem;
404
 
405
  return gen_rtx_fmt_ee (code, mode, op0, op1);
406
}
407
 
408
/* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409
   and simplify the result.  If FN is non-NULL, call this callback on each
410
   X, if it returns non-NULL, replace X with its return value and simplify the
411
   result.  */
412
 
413
rtx
414
simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415
                         rtx (*fn) (rtx, const_rtx, void *), void *data)
416
{
417
  enum rtx_code code = GET_CODE (x);
418
  enum machine_mode mode = GET_MODE (x);
419
  enum machine_mode op_mode;
420
  const char *fmt;
421
  rtx op0, op1, op2, newx, op;
422
  rtvec vec, newvec;
423
  int i, j;
424
 
425
  if (__builtin_expect (fn != NULL, 0))
426
    {
427
      newx = fn (x, old_rtx, data);
428
      if (newx)
429
        return newx;
430
    }
431
  else if (rtx_equal_p (x, old_rtx))
432
    return copy_rtx ((rtx) data);
433
 
434
  switch (GET_RTX_CLASS (code))
435
    {
436
    case RTX_UNARY:
437
      op0 = XEXP (x, 0);
438
      op_mode = GET_MODE (op0);
439
      op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440
      if (op0 == XEXP (x, 0))
441
        return x;
442
      return simplify_gen_unary (code, mode, op0, op_mode);
443
 
444
    case RTX_BIN_ARITH:
445
    case RTX_COMM_ARITH:
446
      op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447
      op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449
        return x;
450
      return simplify_gen_binary (code, mode, op0, op1);
451
 
452
    case RTX_COMPARE:
453
    case RTX_COMM_COMPARE:
454
      op0 = XEXP (x, 0);
455
      op1 = XEXP (x, 1);
456
      op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457
      op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458
      op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460
        return x;
461
      return simplify_gen_relational (code, mode, op_mode, op0, op1);
462
 
463
    case RTX_TERNARY:
464
    case RTX_BITFIELD_OPS:
465
      op0 = XEXP (x, 0);
466
      op_mode = GET_MODE (op0);
467
      op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468
      op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469
      op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471
        return x;
472
      if (op_mode == VOIDmode)
473
        op_mode = GET_MODE (op0);
474
      return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475
 
476
    case RTX_EXTRA:
477
      if (code == SUBREG)
478
        {
479
          op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480
          if (op0 == SUBREG_REG (x))
481
            return x;
482
          op0 = simplify_gen_subreg (GET_MODE (x), op0,
483
                                     GET_MODE (SUBREG_REG (x)),
484
                                     SUBREG_BYTE (x));
485
          return op0 ? op0 : x;
486
        }
487
      break;
488
 
489
    case RTX_OBJ:
490
      if (code == MEM)
491
        {
492
          op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493
          if (op0 == XEXP (x, 0))
494
            return x;
495
          return replace_equiv_address_nv (x, op0);
496
        }
497
      else if (code == LO_SUM)
498
        {
499
          op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500
          op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501
 
502
          /* (lo_sum (high x) x) -> x  */
503
          if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504
            return op1;
505
 
506
          if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507
            return x;
508
          return gen_rtx_LO_SUM (mode, op0, op1);
509
        }
510
      break;
511
 
512
    default:
513
      break;
514
    }
515
 
516
  newx = x;
517
  fmt = GET_RTX_FORMAT (code);
518
  for (i = 0; fmt[i]; i++)
519
    switch (fmt[i])
520
      {
521
      case 'E':
522
        vec = XVEC (x, i);
523
        newvec = XVEC (newx, i);
524
        for (j = 0; j < GET_NUM_ELEM (vec); j++)
525
          {
526
            op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527
                                          old_rtx, fn, data);
528
            if (op != RTVEC_ELT (vec, j))
529
              {
530
                if (newvec == vec)
531
                  {
532
                    newvec = shallow_copy_rtvec (vec);
533
                    if (x == newx)
534
                      newx = shallow_copy_rtx (x);
535
                    XVEC (newx, i) = newvec;
536
                  }
537
                RTVEC_ELT (newvec, j) = op;
538
              }
539
          }
540
        break;
541
 
542
      case 'e':
543
        if (XEXP (x, i))
544
          {
545
            op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546
            if (op != XEXP (x, i))
547
              {
548
                if (x == newx)
549
                  newx = shallow_copy_rtx (x);
550
                XEXP (newx, i) = op;
551
              }
552
          }
553
        break;
554
      }
555
  return newx;
556
}
557
 
558
/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559
   resulting RTX.  Return a new RTX which is as simplified as possible.  */
560
 
561
rtx
562
simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563
{
564
  return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565
}
566
 
567
/* Try to simplify a unary operation CODE whose output mode is to be
568
   MODE with input operand OP whose mode was originally OP_MODE.
569
   Return zero if no simplification can be made.  */
570
rtx
571
simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
572
                          rtx op, enum machine_mode op_mode)
573
{
574
  rtx trueop, tem;
575
 
576
  trueop = avoid_constant_pool_reference (op);
577
 
578
  tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
579
  if (tem)
580
    return tem;
581
 
582
  return simplify_unary_operation_1 (code, mode, op);
583
}
584
 
585
/* Perform some simplifications we can do even if the operands
586
   aren't constant.  */
587
static rtx
588
simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
589
{
590
  enum rtx_code reversed;
591
  rtx temp;
592
 
593
  switch (code)
594
    {
595
    case NOT:
596
      /* (not (not X)) == X.  */
597
      if (GET_CODE (op) == NOT)
598
        return XEXP (op, 0);
599
 
600
      /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601
         comparison is all ones.   */
602
      if (COMPARISON_P (op)
603
          && (mode == BImode || STORE_FLAG_VALUE == -1)
604
          && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
605
        return simplify_gen_relational (reversed, mode, VOIDmode,
606
                                        XEXP (op, 0), XEXP (op, 1));
607
 
608
      /* (not (plus X -1)) can become (neg X).  */
609
      if (GET_CODE (op) == PLUS
610
          && XEXP (op, 1) == constm1_rtx)
611
        return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
612
 
613
      /* Similarly, (not (neg X)) is (plus X -1).  */
614
      if (GET_CODE (op) == NEG)
615
        return plus_constant (XEXP (op, 0), -1);
616
 
617
      /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
618
      if (GET_CODE (op) == XOR
619
          && CONST_INT_P (XEXP (op, 1))
620
          && (temp = simplify_unary_operation (NOT, mode,
621
                                               XEXP (op, 1), mode)) != 0)
622
        return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
623
 
624
      /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
625
      if (GET_CODE (op) == PLUS
626
          && CONST_INT_P (XEXP (op, 1))
627
          && mode_signbit_p (mode, XEXP (op, 1))
628
          && (temp = simplify_unary_operation (NOT, mode,
629
                                               XEXP (op, 1), mode)) != 0)
630
        return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
631
 
632
 
633
      /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
634
         operands other than 1, but that is not valid.  We could do a
635
         similar simplification for (not (lshiftrt C X)) where C is
636
         just the sign bit, but this doesn't seem common enough to
637
         bother with.  */
638
      if (GET_CODE (op) == ASHIFT
639
          && XEXP (op, 0) == const1_rtx)
640
        {
641
          temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
642
          return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
643
        }
644
 
645
      /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646
         minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647
         so we can perform the above simplification.  */
648
 
649
      if (STORE_FLAG_VALUE == -1
650
          && GET_CODE (op) == ASHIFTRT
651
          && GET_CODE (XEXP (op, 1))
652
          && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
653
        return simplify_gen_relational (GE, mode, VOIDmode,
654
                                        XEXP (op, 0), const0_rtx);
655
 
656
 
657
      if (GET_CODE (op) == SUBREG
658
          && subreg_lowpart_p (op)
659
          && (GET_MODE_SIZE (GET_MODE (op))
660
              < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
661
          && GET_CODE (SUBREG_REG (op)) == ASHIFT
662
          && XEXP (SUBREG_REG (op), 0) == const1_rtx)
663
        {
664
          enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
665
          rtx x;
666
 
667
          x = gen_rtx_ROTATE (inner_mode,
668
                              simplify_gen_unary (NOT, inner_mode, const1_rtx,
669
                                                  inner_mode),
670
                              XEXP (SUBREG_REG (op), 1));
671
          return rtl_hooks.gen_lowpart_no_emit (mode, x);
672
        }
673
 
674
      /* Apply De Morgan's laws to reduce number of patterns for machines
675
         with negating logical insns (and-not, nand, etc.).  If result has
676
         only one NOT, put it first, since that is how the patterns are
677
         coded.  */
678
 
679
      if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
680
        {
681
          rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
682
          enum machine_mode op_mode;
683
 
684
          op_mode = GET_MODE (in1);
685
          in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
686
 
687
          op_mode = GET_MODE (in2);
688
          if (op_mode == VOIDmode)
689
            op_mode = mode;
690
          in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
691
 
692
          if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
693
            {
694
              rtx tem = in2;
695
              in2 = in1; in1 = tem;
696
            }
697
 
698
          return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
699
                                 mode, in1, in2);
700
        }
701
      break;
702
 
703
    case NEG:
704
      /* (neg (neg X)) == X.  */
705
      if (GET_CODE (op) == NEG)
706
        return XEXP (op, 0);
707
 
708
      /* (neg (plus X 1)) can become (not X).  */
709
      if (GET_CODE (op) == PLUS
710
          && XEXP (op, 1) == const1_rtx)
711
        return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
712
 
713
      /* Similarly, (neg (not X)) is (plus X 1).  */
714
      if (GET_CODE (op) == NOT)
715
        return plus_constant (XEXP (op, 0), 1);
716
 
717
      /* (neg (minus X Y)) can become (minus Y X).  This transformation
718
         isn't safe for modes with signed zeros, since if X and Y are
719
         both +0, (minus Y X) is the same as (minus X Y).  If the
720
         rounding mode is towards +infinity (or -infinity) then the two
721
         expressions will be rounded differently.  */
722
      if (GET_CODE (op) == MINUS
723
          && !HONOR_SIGNED_ZEROS (mode)
724
          && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
725
        return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
726
 
727
      if (GET_CODE (op) == PLUS
728
          && !HONOR_SIGNED_ZEROS (mode)
729
          && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
730
        {
731
          /* (neg (plus A C)) is simplified to (minus -C A).  */
732
          if (CONST_INT_P (XEXP (op, 1))
733
              || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
734
            {
735
              temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
736
              if (temp)
737
                return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
738
            }
739
 
740
          /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
741
          temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
742
          return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
743
        }
744
 
745
      /* (neg (mult A B)) becomes (mult A (neg B)).
746
         This works even for floating-point values.  */
747
      if (GET_CODE (op) == MULT
748
          && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
749
        {
750
          temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
751
          return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
752
        }
753
 
754
      /* NEG commutes with ASHIFT since it is multiplication.  Only do
755
         this if we can then eliminate the NEG (e.g., if the operand
756
         is a constant).  */
757
      if (GET_CODE (op) == ASHIFT)
758
        {
759
          temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
760
          if (temp)
761
            return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
762
        }
763
 
764
      /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765
         C is equal to the width of MODE minus 1.  */
766
      if (GET_CODE (op) == ASHIFTRT
767
          && CONST_INT_P (XEXP (op, 1))
768
          && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
769
        return simplify_gen_binary (LSHIFTRT, mode,
770
                                    XEXP (op, 0), XEXP (op, 1));
771
 
772
      /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773
         C is equal to the width of MODE minus 1.  */
774
      if (GET_CODE (op) == LSHIFTRT
775
          && CONST_INT_P (XEXP (op, 1))
776
          && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
777
        return simplify_gen_binary (ASHIFTRT, mode,
778
                                    XEXP (op, 0), XEXP (op, 1));
779
 
780
      /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
781
      if (GET_CODE (op) == XOR
782
          && XEXP (op, 1) == const1_rtx
783
          && nonzero_bits (XEXP (op, 0), mode) == 1)
784
        return plus_constant (XEXP (op, 0), -1);
785
 
786
      /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
787
      /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
788
      if (GET_CODE (op) == LT
789
          && XEXP (op, 1) == const0_rtx
790
          && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
791
        {
792
          enum machine_mode inner = GET_MODE (XEXP (op, 0));
793
          int isize = GET_MODE_PRECISION (inner);
794
          if (STORE_FLAG_VALUE == 1)
795
            {
796
              temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
797
                                          GEN_INT (isize - 1));
798
              if (mode == inner)
799
                return temp;
800
              if (GET_MODE_PRECISION (mode) > isize)
801
                return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
802
              return simplify_gen_unary (TRUNCATE, mode, temp, inner);
803
            }
804
          else if (STORE_FLAG_VALUE == -1)
805
            {
806
              temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
807
                                          GEN_INT (isize - 1));
808
              if (mode == inner)
809
                return temp;
810
              if (GET_MODE_PRECISION (mode) > isize)
811
                return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
812
              return simplify_gen_unary (TRUNCATE, mode, temp, inner);
813
            }
814
        }
815
      break;
816
 
817
    case TRUNCATE:
818
      /* We can't handle truncation to a partial integer mode here
819
         because we don't know the real bitsize of the partial
820
         integer mode.  */
821
      if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
822
        break;
823
 
824
      /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI.  */
825
      if ((GET_CODE (op) == SIGN_EXTEND
826
           || GET_CODE (op) == ZERO_EXTEND)
827
          && GET_MODE (XEXP (op, 0)) == mode)
828
        return XEXP (op, 0);
829
 
830
      /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831
         (OP:SI foo:SI) if OP is NEG or ABS.  */
832
      if ((GET_CODE (op) == ABS
833
           || GET_CODE (op) == NEG)
834
          && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
835
              || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
836
          && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
837
        return simplify_gen_unary (GET_CODE (op), mode,
838
                                   XEXP (XEXP (op, 0), 0), mode);
839
 
840
      /* (truncate:A (subreg:B (truncate:C X) 0)) is
841
         (truncate:A X).  */
842
      if (GET_CODE (op) == SUBREG
843
          && GET_CODE (SUBREG_REG (op)) == TRUNCATE
844
          && subreg_lowpart_p (op))
845
        return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
846
                                   GET_MODE (XEXP (SUBREG_REG (op), 0)));
847
 
848
      /* If we know that the value is already truncated, we can
849
         replace the TRUNCATE with a SUBREG.  Note that this is also
850
         valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851
         modes we just have to apply a different definition for
852
         truncation.  But don't do this for an (LSHIFTRT (MULT ...))
853
         since this will cause problems with the umulXi3_highpart
854
         patterns.  */
855
      if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
856
           ? (num_sign_bit_copies (op, GET_MODE (op))
857
              > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
858
                                - GET_MODE_PRECISION (mode)))
859
           : truncated_to_mode (mode, op))
860
          && ! (GET_CODE (op) == LSHIFTRT
861
                && GET_CODE (XEXP (op, 0)) == MULT))
862
        return rtl_hooks.gen_lowpart_no_emit (mode, op);
863
 
864
      /* A truncate of a comparison can be replaced with a subreg if
865
         STORE_FLAG_VALUE permits.  This is like the previous test,
866
         but it works even if the comparison is done in a mode larger
867
         than HOST_BITS_PER_WIDE_INT.  */
868
      if (HWI_COMPUTABLE_MODE_P (mode)
869
          && COMPARISON_P (op)
870
          && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
871
        return rtl_hooks.gen_lowpart_no_emit (mode, op);
872
      break;
873
 
874
    case FLOAT_TRUNCATE:
875
      if (DECIMAL_FLOAT_MODE_P (mode))
876
        break;
877
 
878
      /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
879
      if (GET_CODE (op) == FLOAT_EXTEND
880
          && GET_MODE (XEXP (op, 0)) == mode)
881
        return XEXP (op, 0);
882
 
883
      /* (float_truncate:SF (float_truncate:DF foo:XF))
884
         = (float_truncate:SF foo:XF).
885
         This may eliminate double rounding, so it is unsafe.
886
 
887
         (float_truncate:SF (float_extend:XF foo:DF))
888
         = (float_truncate:SF foo:DF).
889
 
890
         (float_truncate:DF (float_extend:XF foo:SF))
891
         = (float_extend:SF foo:DF).  */
892
      if ((GET_CODE (op) == FLOAT_TRUNCATE
893
           && flag_unsafe_math_optimizations)
894
          || GET_CODE (op) == FLOAT_EXTEND)
895
        return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
896
                                                            0)))
897
                                   > GET_MODE_SIZE (mode)
898
                                   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
899
                                   mode,
900
                                   XEXP (op, 0), mode);
901
 
902
      /*  (float_truncate (float x)) is (float x)  */
903
      if (GET_CODE (op) == FLOAT
904
          && (flag_unsafe_math_optimizations
905
              || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
906
                  && ((unsigned)significand_size (GET_MODE (op))
907
                      >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
908
                          - num_sign_bit_copies (XEXP (op, 0),
909
                                                 GET_MODE (XEXP (op, 0))))))))
910
        return simplify_gen_unary (FLOAT, mode,
911
                                   XEXP (op, 0),
912
                                   GET_MODE (XEXP (op, 0)));
913
 
914
      /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
915
         (OP:SF foo:SF) if OP is NEG or ABS.  */
916
      if ((GET_CODE (op) == ABS
917
           || GET_CODE (op) == NEG)
918
          && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
919
          && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
920
        return simplify_gen_unary (GET_CODE (op), mode,
921
                                   XEXP (XEXP (op, 0), 0), mode);
922
 
923
      /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
924
         is (float_truncate:SF x).  */
925
      if (GET_CODE (op) == SUBREG
926
          && subreg_lowpart_p (op)
927
          && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
928
        return SUBREG_REG (op);
929
      break;
930
 
931
    case FLOAT_EXTEND:
932
      if (DECIMAL_FLOAT_MODE_P (mode))
933
        break;
934
 
935
      /*  (float_extend (float_extend x)) is (float_extend x)
936
 
937
          (float_extend (float x)) is (float x) assuming that double
938
          rounding can't happen.
939
          */
940
      if (GET_CODE (op) == FLOAT_EXTEND
941
          || (GET_CODE (op) == FLOAT
942
              && SCALAR_FLOAT_MODE_P (GET_MODE (op))
943
              && ((unsigned)significand_size (GET_MODE (op))
944
                  >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
945
                      - num_sign_bit_copies (XEXP (op, 0),
946
                                             GET_MODE (XEXP (op, 0)))))))
947
        return simplify_gen_unary (GET_CODE (op), mode,
948
                                   XEXP (op, 0),
949
                                   GET_MODE (XEXP (op, 0)));
950
 
951
      break;
952
 
953
    case ABS:
954
      /* (abs (neg <foo>)) -> (abs <foo>) */
955
      if (GET_CODE (op) == NEG)
956
        return simplify_gen_unary (ABS, mode, XEXP (op, 0),
957
                                   GET_MODE (XEXP (op, 0)));
958
 
959
      /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
960
         do nothing.  */
961
      if (GET_MODE (op) == VOIDmode)
962
        break;
963
 
964
      /* If operand is something known to be positive, ignore the ABS.  */
965
      if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
966
          || val_signbit_known_clear_p (GET_MODE (op),
967
                                        nonzero_bits (op, GET_MODE (op))))
968
        return op;
969
 
970
      /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
971
      if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
972
        return gen_rtx_NEG (mode, op);
973
 
974
      break;
975
 
976
    case FFS:
977
      /* (ffs (*_extend <X>)) = (ffs <X>) */
978
      if (GET_CODE (op) == SIGN_EXTEND
979
          || GET_CODE (op) == ZERO_EXTEND)
980
        return simplify_gen_unary (FFS, mode, XEXP (op, 0),
981
                                   GET_MODE (XEXP (op, 0)));
982
      break;
983
 
984
    case POPCOUNT:
985
      switch (GET_CODE (op))
986
        {
987
        case BSWAP:
988
        case ZERO_EXTEND:
989
          /* (popcount (zero_extend <X>)) = (popcount <X>) */
990
          return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
991
                                     GET_MODE (XEXP (op, 0)));
992
 
993
        case ROTATE:
994
        case ROTATERT:
995
          /* Rotations don't affect popcount.  */
996
          if (!side_effects_p (XEXP (op, 1)))
997
            return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
998
                                       GET_MODE (XEXP (op, 0)));
999
          break;
1000
 
1001
        default:
1002
          break;
1003
        }
1004
      break;
1005
 
1006
    case PARITY:
1007
      switch (GET_CODE (op))
1008
        {
1009
        case NOT:
1010
        case BSWAP:
1011
        case ZERO_EXTEND:
1012
        case SIGN_EXTEND:
1013
          return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1014
                                     GET_MODE (XEXP (op, 0)));
1015
 
1016
        case ROTATE:
1017
        case ROTATERT:
1018
          /* Rotations don't affect parity.  */
1019
          if (!side_effects_p (XEXP (op, 1)))
1020
            return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1021
                                       GET_MODE (XEXP (op, 0)));
1022
          break;
1023
 
1024
        default:
1025
          break;
1026
        }
1027
      break;
1028
 
1029
    case BSWAP:
1030
      /* (bswap (bswap x)) -> x.  */
1031
      if (GET_CODE (op) == BSWAP)
1032
        return XEXP (op, 0);
1033
      break;
1034
 
1035
    case FLOAT:
1036
      /* (float (sign_extend <X>)) = (float <X>).  */
1037
      if (GET_CODE (op) == SIGN_EXTEND)
1038
        return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1039
                                   GET_MODE (XEXP (op, 0)));
1040
      break;
1041
 
1042
    case SIGN_EXTEND:
1043
      /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044
         becomes just the MINUS if its mode is MODE.  This allows
1045
         folding switch statements on machines using casesi (such as
1046
         the VAX).  */
1047
      if (GET_CODE (op) == TRUNCATE
1048
          && GET_MODE (XEXP (op, 0)) == mode
1049
          && GET_CODE (XEXP (op, 0)) == MINUS
1050
          && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1051
          && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1052
        return XEXP (op, 0);
1053
 
1054
      /* Extending a widening multiplication should be canonicalized to
1055
         a wider widening multiplication.  */
1056
      if (GET_CODE (op) == MULT)
1057
        {
1058
          rtx lhs = XEXP (op, 0);
1059
          rtx rhs = XEXP (op, 1);
1060
          enum rtx_code lcode = GET_CODE (lhs);
1061
          enum rtx_code rcode = GET_CODE (rhs);
1062
 
1063
          /* Widening multiplies usually extend both operands, but sometimes
1064
             they use a shift to extract a portion of a register.  */
1065
          if ((lcode == SIGN_EXTEND
1066
               || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1067
              && (rcode == SIGN_EXTEND
1068
                  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1069
            {
1070
              enum machine_mode lmode = GET_MODE (lhs);
1071
              enum machine_mode rmode = GET_MODE (rhs);
1072
              int bits;
1073
 
1074
              if (lcode == ASHIFTRT)
1075
                /* Number of bits not shifted off the end.  */
1076
                bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1077
              else /* lcode == SIGN_EXTEND */
1078
                /* Size of inner mode.  */
1079
                bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1080
 
1081
              if (rcode == ASHIFTRT)
1082
                bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1083
              else /* rcode == SIGN_EXTEND */
1084
                bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1085
 
1086
              /* We can only widen multiplies if the result is mathematiclly
1087
                 equivalent.  I.e. if overflow was impossible.  */
1088
              if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1089
                return simplify_gen_binary
1090
                         (MULT, mode,
1091
                          simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1092
                          simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1093
            }
1094
        }
1095
 
1096
      /* Check for a sign extension of a subreg of a promoted
1097
         variable, where the promotion is sign-extended, and the
1098
         target mode is the same as the variable's promotion.  */
1099
      if (GET_CODE (op) == SUBREG
1100
          && SUBREG_PROMOTED_VAR_P (op)
1101
          && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1102
          && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1103
        return rtl_hooks.gen_lowpart_no_emit (mode, op);
1104
 
1105
      /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1106
         (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1107
      if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1108
        {
1109
          gcc_assert (GET_MODE_BITSIZE (mode)
1110
                      > GET_MODE_BITSIZE (GET_MODE (op)));
1111
          return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1112
                                     GET_MODE (XEXP (op, 0)));
1113
        }
1114
 
1115
      /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1116
         is (sign_extend:M (subreg:O <X>)) if there is mode with
1117
         GET_MODE_BITSIZE (N) - I bits.
1118
         (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1119
         is similarly (zero_extend:M (subreg:O <X>)).  */
1120
      if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1121
          && GET_CODE (XEXP (op, 0)) == ASHIFT
1122
          && CONST_INT_P (XEXP (op, 1))
1123
          && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1124
          && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1125
        {
1126
          enum machine_mode tmode
1127
            = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1128
                             - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1129
          gcc_assert (GET_MODE_BITSIZE (mode)
1130
                      > GET_MODE_BITSIZE (GET_MODE (op)));
1131
          if (tmode != BLKmode)
1132
            {
1133
              rtx inner =
1134
                rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1135
              return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1136
                                         ? SIGN_EXTEND : ZERO_EXTEND,
1137
                                         mode, inner, tmode);
1138
            }
1139
        }
1140
 
1141
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1142
      /* As we do not know which address space the pointer is refering to,
1143
         we can do this only if the target does not support different pointer
1144
         or address modes depending on the address space.  */
1145
      if (target_default_pointer_address_modes_p ()
1146
          && ! POINTERS_EXTEND_UNSIGNED
1147
          && mode == Pmode && GET_MODE (op) == ptr_mode
1148
          && (CONSTANT_P (op)
1149
              || (GET_CODE (op) == SUBREG
1150
                  && REG_P (SUBREG_REG (op))
1151
                  && REG_POINTER (SUBREG_REG (op))
1152
                  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1153
        return convert_memory_address (Pmode, op);
1154
#endif
1155
      break;
1156
 
1157
    case ZERO_EXTEND:
1158
      /* Check for a zero extension of a subreg of a promoted
1159
         variable, where the promotion is zero-extended, and the
1160
         target mode is the same as the variable's promotion.  */
1161
      if (GET_CODE (op) == SUBREG
1162
          && SUBREG_PROMOTED_VAR_P (op)
1163
          && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1164
          && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1165
        return rtl_hooks.gen_lowpart_no_emit (mode, op);
1166
 
1167
      /* Extending a widening multiplication should be canonicalized to
1168
         a wider widening multiplication.  */
1169
      if (GET_CODE (op) == MULT)
1170
        {
1171
          rtx lhs = XEXP (op, 0);
1172
          rtx rhs = XEXP (op, 1);
1173
          enum rtx_code lcode = GET_CODE (lhs);
1174
          enum rtx_code rcode = GET_CODE (rhs);
1175
 
1176
          /* Widening multiplies usually extend both operands, but sometimes
1177
             they use a shift to extract a portion of a register.  */
1178
          if ((lcode == ZERO_EXTEND
1179
               || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1180
              && (rcode == ZERO_EXTEND
1181
                  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1182
            {
1183
              enum machine_mode lmode = GET_MODE (lhs);
1184
              enum machine_mode rmode = GET_MODE (rhs);
1185
              int bits;
1186
 
1187
              if (lcode == LSHIFTRT)
1188
                /* Number of bits not shifted off the end.  */
1189
                bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1190
              else /* lcode == ZERO_EXTEND */
1191
                /* Size of inner mode.  */
1192
                bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1193
 
1194
              if (rcode == LSHIFTRT)
1195
                bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1196
              else /* rcode == ZERO_EXTEND */
1197
                bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1198
 
1199
              /* We can only widen multiplies if the result is mathematiclly
1200
                 equivalent.  I.e. if overflow was impossible.  */
1201
              if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1202
                return simplify_gen_binary
1203
                         (MULT, mode,
1204
                          simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1205
                          simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1206
            }
1207
        }
1208
 
1209
      /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1210
      if (GET_CODE (op) == ZERO_EXTEND)
1211
        return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1212
                                   GET_MODE (XEXP (op, 0)));
1213
 
1214
      /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1215
         is (zero_extend:M (subreg:O <X>)) if there is mode with
1216
         GET_MODE_BITSIZE (N) - I bits.  */
1217
      if (GET_CODE (op) == LSHIFTRT
1218
          && GET_CODE (XEXP (op, 0)) == ASHIFT
1219
          && CONST_INT_P (XEXP (op, 1))
1220
          && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1221
          && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1222
        {
1223
          enum machine_mode tmode
1224
            = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1225
                             - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1226
          if (tmode != BLKmode)
1227
            {
1228
              rtx inner =
1229
                rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1230
              return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1231
            }
1232
        }
1233
 
1234
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1235
      /* As we do not know which address space the pointer is refering to,
1236
         we can do this only if the target does not support different pointer
1237
         or address modes depending on the address space.  */
1238
      if (target_default_pointer_address_modes_p ()
1239
          && POINTERS_EXTEND_UNSIGNED > 0
1240
          && mode == Pmode && GET_MODE (op) == ptr_mode
1241
          && (CONSTANT_P (op)
1242
              || (GET_CODE (op) == SUBREG
1243
                  && REG_P (SUBREG_REG (op))
1244
                  && REG_POINTER (SUBREG_REG (op))
1245
                  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1246
        return convert_memory_address (Pmode, op);
1247
#endif
1248
      break;
1249
 
1250
    default:
1251
      break;
1252
    }
1253
 
1254
  return 0;
1255
}
1256
 
1257
/* Try to compute the value of a unary operation CODE whose output mode is to
1258
   be MODE with input operand OP whose mode was originally OP_MODE.
1259
   Return zero if the value cannot be computed.  */
1260
rtx
1261
simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1262
                                rtx op, enum machine_mode op_mode)
1263
{
1264
  unsigned int width = GET_MODE_PRECISION (mode);
1265
  unsigned int op_width = GET_MODE_PRECISION (op_mode);
1266
 
1267
  if (code == VEC_DUPLICATE)
1268
    {
1269
      gcc_assert (VECTOR_MODE_P (mode));
1270
      if (GET_MODE (op) != VOIDmode)
1271
      {
1272
        if (!VECTOR_MODE_P (GET_MODE (op)))
1273
          gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1274
        else
1275
          gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1276
                                                (GET_MODE (op)));
1277
      }
1278
      if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1279
          || GET_CODE (op) == CONST_VECTOR)
1280
        {
1281
          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1282
          unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1283
          rtvec v = rtvec_alloc (n_elts);
1284
          unsigned int i;
1285
 
1286
          if (GET_CODE (op) != CONST_VECTOR)
1287
            for (i = 0; i < n_elts; i++)
1288
              RTVEC_ELT (v, i) = op;
1289
          else
1290
            {
1291
              enum machine_mode inmode = GET_MODE (op);
1292
              int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1293
              unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1294
 
1295
              gcc_assert (in_n_elts < n_elts);
1296
              gcc_assert ((n_elts % in_n_elts) == 0);
1297
              for (i = 0; i < n_elts; i++)
1298
                RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1299
            }
1300
          return gen_rtx_CONST_VECTOR (mode, v);
1301
        }
1302
    }
1303
 
1304
  if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1305
    {
1306
      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1307
      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1308
      enum machine_mode opmode = GET_MODE (op);
1309
      int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1310
      unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1311
      rtvec v = rtvec_alloc (n_elts);
1312
      unsigned int i;
1313
 
1314
      gcc_assert (op_n_elts == n_elts);
1315
      for (i = 0; i < n_elts; i++)
1316
        {
1317
          rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1318
                                            CONST_VECTOR_ELT (op, i),
1319
                                            GET_MODE_INNER (opmode));
1320
          if (!x)
1321
            return 0;
1322
          RTVEC_ELT (v, i) = x;
1323
        }
1324
      return gen_rtx_CONST_VECTOR (mode, v);
1325
    }
1326
 
1327
  /* The order of these tests is critical so that, for example, we don't
1328
     check the wrong mode (input vs. output) for a conversion operation,
1329
     such as FIX.  At some point, this should be simplified.  */
1330
 
1331
  if (code == FLOAT && GET_MODE (op) == VOIDmode
1332
      && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1333
    {
1334
      HOST_WIDE_INT hv, lv;
1335
      REAL_VALUE_TYPE d;
1336
 
1337
      if (CONST_INT_P (op))
1338
        lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1339
      else
1340
        lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1341
 
1342
      REAL_VALUE_FROM_INT (d, lv, hv, mode);
1343
      d = real_value_truncate (mode, d);
1344
      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1345
    }
1346
  else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1347
           && (GET_CODE (op) == CONST_DOUBLE
1348
               || CONST_INT_P (op)))
1349
    {
1350
      HOST_WIDE_INT hv, lv;
1351
      REAL_VALUE_TYPE d;
1352
 
1353
      if (CONST_INT_P (op))
1354
        lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1355
      else
1356
        lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1357
 
1358
      if (op_mode == VOIDmode)
1359
        {
1360
          /* We don't know how to interpret negative-looking numbers in
1361
             this case, so don't try to fold those.  */
1362
          if (hv < 0)
1363
            return 0;
1364
        }
1365
      else if (GET_MODE_PRECISION (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1366
        ;
1367
      else
1368
        hv = 0, lv &= GET_MODE_MASK (op_mode);
1369
 
1370
      REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1371
      d = real_value_truncate (mode, d);
1372
      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1373
    }
1374
 
1375
  if (CONST_INT_P (op)
1376
      && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1377
    {
1378
      HOST_WIDE_INT arg0 = INTVAL (op);
1379
      HOST_WIDE_INT val;
1380
 
1381
      switch (code)
1382
        {
1383
        case NOT:
1384
          val = ~ arg0;
1385
          break;
1386
 
1387
        case NEG:
1388
          val = - arg0;
1389
          break;
1390
 
1391
        case ABS:
1392
          val = (arg0 >= 0 ? arg0 : - arg0);
1393
          break;
1394
 
1395
        case FFS:
1396
          arg0 &= GET_MODE_MASK (mode);
1397
          val = ffs_hwi (arg0);
1398
          break;
1399
 
1400
        case CLZ:
1401
          arg0 &= GET_MODE_MASK (mode);
1402
          if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1403
            ;
1404
          else
1405
            val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1406
          break;
1407
 
1408
        case CLRSB:
1409
          arg0 &= GET_MODE_MASK (mode);
1410
          if (arg0 == 0)
1411
            val = GET_MODE_PRECISION (mode) - 1;
1412
          else if (arg0 >= 0)
1413
            val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1414
          else if (arg0 < 0)
1415
            val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1416
          break;
1417
 
1418
        case CTZ:
1419
          arg0 &= GET_MODE_MASK (mode);
1420
          if (arg0 == 0)
1421
            {
1422
              /* Even if the value at zero is undefined, we have to come
1423
                 up with some replacement.  Seems good enough.  */
1424
              if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1425
                val = GET_MODE_PRECISION (mode);
1426
            }
1427
          else
1428
            val = ctz_hwi (arg0);
1429
          break;
1430
 
1431
        case POPCOUNT:
1432
          arg0 &= GET_MODE_MASK (mode);
1433
          val = 0;
1434
          while (arg0)
1435
            val++, arg0 &= arg0 - 1;
1436
          break;
1437
 
1438
        case PARITY:
1439
          arg0 &= GET_MODE_MASK (mode);
1440
          val = 0;
1441
          while (arg0)
1442
            val++, arg0 &= arg0 - 1;
1443
          val &= 1;
1444
          break;
1445
 
1446
        case BSWAP:
1447
          {
1448
            unsigned int s;
1449
 
1450
            val = 0;
1451
            for (s = 0; s < width; s += 8)
1452
              {
1453
                unsigned int d = width - s - 8;
1454
                unsigned HOST_WIDE_INT byte;
1455
                byte = (arg0 >> s) & 0xff;
1456
                val |= byte << d;
1457
              }
1458
          }
1459
          break;
1460
 
1461
        case TRUNCATE:
1462
          val = arg0;
1463
          break;
1464
 
1465
        case ZERO_EXTEND:
1466
          /* When zero-extending a CONST_INT, we need to know its
1467
             original mode.  */
1468
          gcc_assert (op_mode != VOIDmode);
1469
          if (op_width == HOST_BITS_PER_WIDE_INT)
1470
            {
1471
              /* If we were really extending the mode,
1472
                 we would have to distinguish between zero-extension
1473
                 and sign-extension.  */
1474
              gcc_assert (width == op_width);
1475
              val = arg0;
1476
            }
1477
          else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1478
            val = arg0 & GET_MODE_MASK (op_mode);
1479
          else
1480
            return 0;
1481
          break;
1482
 
1483
        case SIGN_EXTEND:
1484
          if (op_mode == VOIDmode)
1485
            op_mode = mode;
1486
          op_width = GET_MODE_PRECISION (op_mode);
1487
          if (op_width == HOST_BITS_PER_WIDE_INT)
1488
            {
1489
              /* If we were really extending the mode,
1490
                 we would have to distinguish between zero-extension
1491
                 and sign-extension.  */
1492
              gcc_assert (width == op_width);
1493
              val = arg0;
1494
            }
1495
          else if (op_width < HOST_BITS_PER_WIDE_INT)
1496
            {
1497
              val = arg0 & GET_MODE_MASK (op_mode);
1498
              if (val_signbit_known_set_p (op_mode, val))
1499
                val |= ~GET_MODE_MASK (op_mode);
1500
            }
1501
          else
1502
            return 0;
1503
          break;
1504
 
1505
        case SQRT:
1506
        case FLOAT_EXTEND:
1507
        case FLOAT_TRUNCATE:
1508
        case SS_TRUNCATE:
1509
        case US_TRUNCATE:
1510
        case SS_NEG:
1511
        case US_NEG:
1512
        case SS_ABS:
1513
          return 0;
1514
 
1515
        default:
1516
          gcc_unreachable ();
1517
        }
1518
 
1519
      return gen_int_mode (val, mode);
1520
    }
1521
 
1522
  /* We can do some operations on integer CONST_DOUBLEs.  Also allow
1523
     for a DImode operation on a CONST_INT.  */
1524
  else if (GET_MODE (op) == VOIDmode
1525
           && width <= HOST_BITS_PER_WIDE_INT * 2
1526
           && (GET_CODE (op) == CONST_DOUBLE
1527
               || CONST_INT_P (op)))
1528
    {
1529
      unsigned HOST_WIDE_INT l1, lv;
1530
      HOST_WIDE_INT h1, hv;
1531
 
1532
      if (GET_CODE (op) == CONST_DOUBLE)
1533
        l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1534
      else
1535
        l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1536
 
1537
      switch (code)
1538
        {
1539
        case NOT:
1540
          lv = ~ l1;
1541
          hv = ~ h1;
1542
          break;
1543
 
1544
        case NEG:
1545
          neg_double (l1, h1, &lv, &hv);
1546
          break;
1547
 
1548
        case ABS:
1549
          if (h1 < 0)
1550
            neg_double (l1, h1, &lv, &hv);
1551
          else
1552
            lv = l1, hv = h1;
1553
          break;
1554
 
1555
        case FFS:
1556
          hv = 0;
1557
          if (l1 != 0)
1558
            lv = ffs_hwi (l1);
1559
          else if (h1 != 0)
1560
            lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1561
          else
1562
            lv = 0;
1563
          break;
1564
 
1565
        case CLZ:
1566
          hv = 0;
1567
          if (h1 != 0)
1568
            lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
1569
              - HOST_BITS_PER_WIDE_INT;
1570
          else if (l1 != 0)
1571
            lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
1572
          else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1573
            lv = GET_MODE_PRECISION (mode);
1574
          break;
1575
 
1576
        case CTZ:
1577
          hv = 0;
1578
          if (l1 != 0)
1579
            lv = ctz_hwi (l1);
1580
          else if (h1 != 0)
1581
            lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1582
          else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1583
            lv = GET_MODE_PRECISION (mode);
1584
          break;
1585
 
1586
        case POPCOUNT:
1587
          hv = 0;
1588
          lv = 0;
1589
          while (l1)
1590
            lv++, l1 &= l1 - 1;
1591
          while (h1)
1592
            lv++, h1 &= h1 - 1;
1593
          break;
1594
 
1595
        case PARITY:
1596
          hv = 0;
1597
          lv = 0;
1598
          while (l1)
1599
            lv++, l1 &= l1 - 1;
1600
          while (h1)
1601
            lv++, h1 &= h1 - 1;
1602
          lv &= 1;
1603
          break;
1604
 
1605
        case BSWAP:
1606
          {
1607
            unsigned int s;
1608
 
1609
            hv = 0;
1610
            lv = 0;
1611
            for (s = 0; s < width; s += 8)
1612
              {
1613
                unsigned int d = width - s - 8;
1614
                unsigned HOST_WIDE_INT byte;
1615
 
1616
                if (s < HOST_BITS_PER_WIDE_INT)
1617
                  byte = (l1 >> s) & 0xff;
1618
                else
1619
                  byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1620
 
1621
                if (d < HOST_BITS_PER_WIDE_INT)
1622
                  lv |= byte << d;
1623
                else
1624
                  hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1625
              }
1626
          }
1627
          break;
1628
 
1629
        case TRUNCATE:
1630
          /* This is just a change-of-mode, so do nothing.  */
1631
          lv = l1, hv = h1;
1632
          break;
1633
 
1634
        case ZERO_EXTEND:
1635
          gcc_assert (op_mode != VOIDmode);
1636
 
1637
          if (op_width > HOST_BITS_PER_WIDE_INT)
1638
            return 0;
1639
 
1640
          hv = 0;
1641
          lv = l1 & GET_MODE_MASK (op_mode);
1642
          break;
1643
 
1644
        case SIGN_EXTEND:
1645
          if (op_mode == VOIDmode
1646
              || op_width > HOST_BITS_PER_WIDE_INT)
1647
            return 0;
1648
          else
1649
            {
1650
              lv = l1 & GET_MODE_MASK (op_mode);
1651
              if (val_signbit_known_set_p (op_mode, lv))
1652
                lv |= ~GET_MODE_MASK (op_mode);
1653
 
1654
              hv = HWI_SIGN_EXTEND (lv);
1655
            }
1656
          break;
1657
 
1658
        case SQRT:
1659
          return 0;
1660
 
1661
        default:
1662
          return 0;
1663
        }
1664
 
1665
      return immed_double_const (lv, hv, mode);
1666
    }
1667
 
1668
  else if (GET_CODE (op) == CONST_DOUBLE
1669
           && SCALAR_FLOAT_MODE_P (mode)
1670
           && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1671
    {
1672
      REAL_VALUE_TYPE d, t;
1673
      REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1674
 
1675
      switch (code)
1676
        {
1677
        case SQRT:
1678
          if (HONOR_SNANS (mode) && real_isnan (&d))
1679
            return 0;
1680
          real_sqrt (&t, mode, &d);
1681
          d = t;
1682
          break;
1683
        case ABS:
1684
          d = real_value_abs (&d);
1685
          break;
1686
        case NEG:
1687
          d = real_value_negate (&d);
1688
          break;
1689
        case FLOAT_TRUNCATE:
1690
          d = real_value_truncate (mode, d);
1691
          break;
1692
        case FLOAT_EXTEND:
1693
          /* All this does is change the mode, unless changing
1694
             mode class.  */
1695
          if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1696
            real_convert (&d, mode, &d);
1697
          break;
1698
        case FIX:
1699
          real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1700
          break;
1701
        case NOT:
1702
          {
1703
            long tmp[4];
1704
            int i;
1705
 
1706
            real_to_target (tmp, &d, GET_MODE (op));
1707
            for (i = 0; i < 4; i++)
1708
              tmp[i] = ~tmp[i];
1709
            real_from_target (&d, tmp, mode);
1710
            break;
1711
          }
1712
        default:
1713
          gcc_unreachable ();
1714
        }
1715
      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1716
    }
1717
 
1718
  else if (GET_CODE (op) == CONST_DOUBLE
1719
           && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1720
           && GET_MODE_CLASS (mode) == MODE_INT
1721
           && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1722
    {
1723
      /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1724
         operators are intentionally left unspecified (to ease implementation
1725
         by target backends), for consistency, this routine implements the
1726
         same semantics for constant folding as used by the middle-end.  */
1727
 
1728
      /* This was formerly used only for non-IEEE float.
1729
         eggert@twinsun.com says it is safe for IEEE also.  */
1730
      HOST_WIDE_INT xh, xl, th, tl;
1731
      REAL_VALUE_TYPE x, t;
1732
      REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1733
      switch (code)
1734
        {
1735
        case FIX:
1736
          if (REAL_VALUE_ISNAN (x))
1737
            return const0_rtx;
1738
 
1739
          /* Test against the signed upper bound.  */
1740
          if (width > HOST_BITS_PER_WIDE_INT)
1741
            {
1742
              th = ((unsigned HOST_WIDE_INT) 1
1743
                    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1744
              tl = -1;
1745
            }
1746
          else
1747
            {
1748
              th = 0;
1749
              tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1750
            }
1751
          real_from_integer (&t, VOIDmode, tl, th, 0);
1752
          if (REAL_VALUES_LESS (t, x))
1753
            {
1754
              xh = th;
1755
              xl = tl;
1756
              break;
1757
            }
1758
 
1759
          /* Test against the signed lower bound.  */
1760
          if (width > HOST_BITS_PER_WIDE_INT)
1761
            {
1762
              th = (unsigned HOST_WIDE_INT) (-1)
1763
                   << (width - HOST_BITS_PER_WIDE_INT - 1);
1764
              tl = 0;
1765
            }
1766
          else
1767
            {
1768
              th = -1;
1769
              tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1770
            }
1771
          real_from_integer (&t, VOIDmode, tl, th, 0);
1772
          if (REAL_VALUES_LESS (x, t))
1773
            {
1774
              xh = th;
1775
              xl = tl;
1776
              break;
1777
            }
1778
          REAL_VALUE_TO_INT (&xl, &xh, x);
1779
          break;
1780
 
1781
        case UNSIGNED_FIX:
1782
          if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1783
            return const0_rtx;
1784
 
1785
          /* Test against the unsigned upper bound.  */
1786
          if (width == 2*HOST_BITS_PER_WIDE_INT)
1787
            {
1788
              th = -1;
1789
              tl = -1;
1790
            }
1791
          else if (width >= HOST_BITS_PER_WIDE_INT)
1792
            {
1793
              th = ((unsigned HOST_WIDE_INT) 1
1794
                    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1795
              tl = -1;
1796
            }
1797
          else
1798
            {
1799
              th = 0;
1800
              tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1801
            }
1802
          real_from_integer (&t, VOIDmode, tl, th, 1);
1803
          if (REAL_VALUES_LESS (t, x))
1804
            {
1805
              xh = th;
1806
              xl = tl;
1807
              break;
1808
            }
1809
 
1810
          REAL_VALUE_TO_INT (&xl, &xh, x);
1811
          break;
1812
 
1813
        default:
1814
          gcc_unreachable ();
1815
        }
1816
      return immed_double_const (xl, xh, mode);
1817
    }
1818
 
1819
  return NULL_RTX;
1820
}
1821
 
1822
/* Subroutine of simplify_binary_operation to simplify a commutative,
1823
   associative binary operation CODE with result mode MODE, operating
1824
   on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1825
   SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
1826
   canonicalization is possible.  */
1827
 
1828
static rtx
1829
simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1830
                                rtx op0, rtx op1)
1831
{
1832
  rtx tem;
1833
 
1834
  /* Linearize the operator to the left.  */
1835
  if (GET_CODE (op1) == code)
1836
    {
1837
      /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
1838
      if (GET_CODE (op0) == code)
1839
        {
1840
          tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1841
          return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1842
        }
1843
 
1844
      /* "a op (b op c)" becomes "(b op c) op a".  */
1845
      if (! swap_commutative_operands_p (op1, op0))
1846
        return simplify_gen_binary (code, mode, op1, op0);
1847
 
1848
      tem = op0;
1849
      op0 = op1;
1850
      op1 = tem;
1851
    }
1852
 
1853
  if (GET_CODE (op0) == code)
1854
    {
1855
      /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
1856
      if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1857
        {
1858
          tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1859
          return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1860
        }
1861
 
1862
      /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
1863
      tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1864
      if (tem != 0)
1865
        return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1866
 
1867
      /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
1868
      tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1869
      if (tem != 0)
1870
        return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1871
    }
1872
 
1873
  return 0;
1874
}
1875
 
1876
 
1877
/* Simplify a binary operation CODE with result mode MODE, operating on OP0
1878
   and OP1.  Return 0 if no simplification is possible.
1879
 
1880
   Don't use this for relational operations such as EQ or LT.
1881
   Use simplify_relational_operation instead.  */
1882
rtx
1883
simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1884
                           rtx op0, rtx op1)
1885
{
1886
  rtx trueop0, trueop1;
1887
  rtx tem;
1888
 
1889
  /* Relational operations don't work here.  We must know the mode
1890
     of the operands in order to do the comparison correctly.
1891
     Assuming a full word can give incorrect results.
1892
     Consider comparing 128 with -128 in QImode.  */
1893
  gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1894
  gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1895
 
1896
  /* Make sure the constant is second.  */
1897
  if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1898
      && swap_commutative_operands_p (op0, op1))
1899
    {
1900
      tem = op0, op0 = op1, op1 = tem;
1901
    }
1902
 
1903
  trueop0 = avoid_constant_pool_reference (op0);
1904
  trueop1 = avoid_constant_pool_reference (op1);
1905
 
1906
  tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1907
  if (tem)
1908
    return tem;
1909
  return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1910
}
1911
 
1912
/* Subroutine of simplify_binary_operation.  Simplify a binary operation
1913
   CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
1914
   OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1915
   actual constants.  */
1916
 
1917
static rtx
1918
simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1919
                             rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1920
{
1921
  rtx tem, reversed, opleft, opright;
1922
  HOST_WIDE_INT val;
1923
  unsigned int width = GET_MODE_PRECISION (mode);
1924
 
1925
  /* Even if we can't compute a constant result,
1926
     there are some cases worth simplifying.  */
1927
 
1928
  switch (code)
1929
    {
1930
    case PLUS:
1931
      /* Maybe simplify x + 0 to x.  The two expressions are equivalent
1932
         when x is NaN, infinite, or finite and nonzero.  They aren't
1933
         when x is -0 and the rounding mode is not towards -infinity,
1934
         since (-0) + 0 is then 0.  */
1935
      if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1936
        return op0;
1937
 
1938
      /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
1939
         transformations are safe even for IEEE.  */
1940
      if (GET_CODE (op0) == NEG)
1941
        return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1942
      else if (GET_CODE (op1) == NEG)
1943
        return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1944
 
1945
      /* (~a) + 1 -> -a */
1946
      if (INTEGRAL_MODE_P (mode)
1947
          && GET_CODE (op0) == NOT
1948
          && trueop1 == const1_rtx)
1949
        return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1950
 
1951
      /* Handle both-operands-constant cases.  We can only add
1952
         CONST_INTs to constants since the sum of relocatable symbols
1953
         can't be handled by most assemblers.  Don't add CONST_INT
1954
         to CONST_INT since overflow won't be computed properly if wider
1955
         than HOST_BITS_PER_WIDE_INT.  */
1956
 
1957
      if ((GET_CODE (op0) == CONST
1958
           || GET_CODE (op0) == SYMBOL_REF
1959
           || GET_CODE (op0) == LABEL_REF)
1960
          && CONST_INT_P (op1))
1961
        return plus_constant (op0, INTVAL (op1));
1962
      else if ((GET_CODE (op1) == CONST
1963
                || GET_CODE (op1) == SYMBOL_REF
1964
                || GET_CODE (op1) == LABEL_REF)
1965
               && CONST_INT_P (op0))
1966
        return plus_constant (op1, INTVAL (op0));
1967
 
1968
      /* See if this is something like X * C - X or vice versa or
1969
         if the multiplication is written as a shift.  If so, we can
1970
         distribute and make a new multiply, shift, or maybe just
1971
         have X (if C is 2 in the example above).  But don't make
1972
         something more expensive than we had before.  */
1973
 
1974
      if (SCALAR_INT_MODE_P (mode))
1975
        {
1976
          double_int coeff0, coeff1;
1977
          rtx lhs = op0, rhs = op1;
1978
 
1979
          coeff0 = double_int_one;
1980
          coeff1 = double_int_one;
1981
 
1982
          if (GET_CODE (lhs) == NEG)
1983
            {
1984
              coeff0 = double_int_minus_one;
1985
              lhs = XEXP (lhs, 0);
1986
            }
1987
          else if (GET_CODE (lhs) == MULT
1988
                   && CONST_INT_P (XEXP (lhs, 1)))
1989
            {
1990
              coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1991
              lhs = XEXP (lhs, 0);
1992
            }
1993
          else if (GET_CODE (lhs) == ASHIFT
1994
                   && CONST_INT_P (XEXP (lhs, 1))
1995
                   && INTVAL (XEXP (lhs, 1)) >= 0
1996
                   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1997
            {
1998
              coeff0 = double_int_setbit (double_int_zero,
1999
                                          INTVAL (XEXP (lhs, 1)));
2000
              lhs = XEXP (lhs, 0);
2001
            }
2002
 
2003
          if (GET_CODE (rhs) == NEG)
2004
            {
2005
              coeff1 = double_int_minus_one;
2006
              rhs = XEXP (rhs, 0);
2007
            }
2008
          else if (GET_CODE (rhs) == MULT
2009
                   && CONST_INT_P (XEXP (rhs, 1)))
2010
            {
2011
              coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
2012
              rhs = XEXP (rhs, 0);
2013
            }
2014
          else if (GET_CODE (rhs) == ASHIFT
2015
                   && CONST_INT_P (XEXP (rhs, 1))
2016
                   && INTVAL (XEXP (rhs, 1)) >= 0
2017
                   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2018
            {
2019
              coeff1 = double_int_setbit (double_int_zero,
2020
                                          INTVAL (XEXP (rhs, 1)));
2021
              rhs = XEXP (rhs, 0);
2022
            }
2023
 
2024
          if (rtx_equal_p (lhs, rhs))
2025
            {
2026
              rtx orig = gen_rtx_PLUS (mode, op0, op1);
2027
              rtx coeff;
2028
              double_int val;
2029
              bool speed = optimize_function_for_speed_p (cfun);
2030
 
2031
              val = double_int_add (coeff0, coeff1);
2032
              coeff = immed_double_int_const (val, mode);
2033
 
2034
              tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2035
              return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2036
                ? tem : 0;
2037
            }
2038
        }
2039
 
2040
      /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2041
      if ((CONST_INT_P (op1)
2042
           || GET_CODE (op1) == CONST_DOUBLE)
2043
          && GET_CODE (op0) == XOR
2044
          && (CONST_INT_P (XEXP (op0, 1))
2045
              || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2046
          && mode_signbit_p (mode, op1))
2047
        return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2048
                                    simplify_gen_binary (XOR, mode, op1,
2049
                                                         XEXP (op0, 1)));
2050
 
2051
      /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2052
      if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2053
          && GET_CODE (op0) == MULT
2054
          && GET_CODE (XEXP (op0, 0)) == NEG)
2055
        {
2056
          rtx in1, in2;
2057
 
2058
          in1 = XEXP (XEXP (op0, 0), 0);
2059
          in2 = XEXP (op0, 1);
2060
          return simplify_gen_binary (MINUS, mode, op1,
2061
                                      simplify_gen_binary (MULT, mode,
2062
                                                           in1, in2));
2063
        }
2064
 
2065
      /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2066
         C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2067
         is 1.  */
2068
      if (COMPARISON_P (op0)
2069
          && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2070
              || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2071
          && (reversed = reversed_comparison (op0, mode)))
2072
        return
2073
          simplify_gen_unary (NEG, mode, reversed, mode);
2074
 
2075
      /* If one of the operands is a PLUS or a MINUS, see if we can
2076
         simplify this by the associative law.
2077
         Don't use the associative law for floating point.
2078
         The inaccuracy makes it nonassociative,
2079
         and subtle programs can break if operations are associated.  */
2080
 
2081
      if (INTEGRAL_MODE_P (mode)
2082
          && (plus_minus_operand_p (op0)
2083
              || plus_minus_operand_p (op1))
2084
          && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2085
        return tem;
2086
 
2087
      /* Reassociate floating point addition only when the user
2088
         specifies associative math operations.  */
2089
      if (FLOAT_MODE_P (mode)
2090
          && flag_associative_math)
2091
        {
2092
          tem = simplify_associative_operation (code, mode, op0, op1);
2093
          if (tem)
2094
            return tem;
2095
        }
2096
      break;
2097
 
2098
    case COMPARE:
2099
      /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2100
      if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2101
           || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2102
          && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2103
        {
2104
          rtx xop00 = XEXP (op0, 0);
2105
          rtx xop10 = XEXP (op1, 0);
2106
 
2107
#ifdef HAVE_cc0
2108
          if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2109
#else
2110
            if (REG_P (xop00) && REG_P (xop10)
2111
                && GET_MODE (xop00) == GET_MODE (xop10)
2112
                && REGNO (xop00) == REGNO (xop10)
2113
                && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2114
                && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2115
#endif
2116
              return xop00;
2117
        }
2118
      break;
2119
 
2120
    case MINUS:
2121
      /* We can't assume x-x is 0 even with non-IEEE floating point,
2122
         but since it is zero except in very strange circumstances, we
2123
         will treat it as zero with -ffinite-math-only.  */
2124
      if (rtx_equal_p (trueop0, trueop1)
2125
          && ! side_effects_p (op0)
2126
          && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2127
        return CONST0_RTX (mode);
2128
 
2129
      /* Change subtraction from zero into negation.  (0 - x) is the
2130
         same as -x when x is NaN, infinite, or finite and nonzero.
2131
         But if the mode has signed zeros, and does not round towards
2132
         -infinity, then 0 - 0 is 0, not -0.  */
2133
      if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2134
        return simplify_gen_unary (NEG, mode, op1, mode);
2135
 
2136
      /* (-1 - a) is ~a.  */
2137
      if (trueop0 == constm1_rtx)
2138
        return simplify_gen_unary (NOT, mode, op1, mode);
2139
 
2140
      /* Subtracting 0 has no effect unless the mode has signed zeros
2141
         and supports rounding towards -infinity.  In such a case,
2142
 
2143
      if (!(HONOR_SIGNED_ZEROS (mode)
2144
            && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2145
          && trueop1 == CONST0_RTX (mode))
2146
        return op0;
2147
 
2148
      /* See if this is something like X * C - X or vice versa or
2149
         if the multiplication is written as a shift.  If so, we can
2150
         distribute and make a new multiply, shift, or maybe just
2151
         have X (if C is 2 in the example above).  But don't make
2152
         something more expensive than we had before.  */
2153
 
2154
      if (SCALAR_INT_MODE_P (mode))
2155
        {
2156
          double_int coeff0, negcoeff1;
2157
          rtx lhs = op0, rhs = op1;
2158
 
2159
          coeff0 = double_int_one;
2160
          negcoeff1 = double_int_minus_one;
2161
 
2162
          if (GET_CODE (lhs) == NEG)
2163
            {
2164
              coeff0 = double_int_minus_one;
2165
              lhs = XEXP (lhs, 0);
2166
            }
2167
          else if (GET_CODE (lhs) == MULT
2168
                   && CONST_INT_P (XEXP (lhs, 1)))
2169
            {
2170
              coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2171
              lhs = XEXP (lhs, 0);
2172
            }
2173
          else if (GET_CODE (lhs) == ASHIFT
2174
                   && CONST_INT_P (XEXP (lhs, 1))
2175
                   && INTVAL (XEXP (lhs, 1)) >= 0
2176
                   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2177
            {
2178
              coeff0 = double_int_setbit (double_int_zero,
2179
                                          INTVAL (XEXP (lhs, 1)));
2180
              lhs = XEXP (lhs, 0);
2181
            }
2182
 
2183
          if (GET_CODE (rhs) == NEG)
2184
            {
2185
              negcoeff1 = double_int_one;
2186
              rhs = XEXP (rhs, 0);
2187
            }
2188
          else if (GET_CODE (rhs) == MULT
2189
                   && CONST_INT_P (XEXP (rhs, 1)))
2190
            {
2191
              negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2192
              rhs = XEXP (rhs, 0);
2193
            }
2194
          else if (GET_CODE (rhs) == ASHIFT
2195
                   && CONST_INT_P (XEXP (rhs, 1))
2196
                   && INTVAL (XEXP (rhs, 1)) >= 0
2197
                   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2198
            {
2199
              negcoeff1 = double_int_setbit (double_int_zero,
2200
                                             INTVAL (XEXP (rhs, 1)));
2201
              negcoeff1 = double_int_neg (negcoeff1);
2202
              rhs = XEXP (rhs, 0);
2203
            }
2204
 
2205
          if (rtx_equal_p (lhs, rhs))
2206
            {
2207
              rtx orig = gen_rtx_MINUS (mode, op0, op1);
2208
              rtx coeff;
2209
              double_int val;
2210
              bool speed = optimize_function_for_speed_p (cfun);
2211
 
2212
              val = double_int_add (coeff0, negcoeff1);
2213
              coeff = immed_double_int_const (val, mode);
2214
 
2215
              tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2216
              return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2217
                ? tem : 0;
2218
            }
2219
        }
2220
 
2221
      /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2222
      if (GET_CODE (op1) == NEG)
2223
        return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2224
 
2225
      /* (-x - c) may be simplified as (-c - x).  */
2226
      if (GET_CODE (op0) == NEG
2227
          && (CONST_INT_P (op1)
2228
              || GET_CODE (op1) == CONST_DOUBLE))
2229
        {
2230
          tem = simplify_unary_operation (NEG, mode, op1, mode);
2231
          if (tem)
2232
            return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2233
        }
2234
 
2235
      /* Don't let a relocatable value get a negative coeff.  */
2236
      if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2237
        return simplify_gen_binary (PLUS, mode,
2238
                                    op0,
2239
                                    neg_const_int (mode, op1));
2240
 
2241
      /* (x - (x & y)) -> (x & ~y) */
2242
      if (GET_CODE (op1) == AND)
2243
        {
2244
          if (rtx_equal_p (op0, XEXP (op1, 0)))
2245
            {
2246
              tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2247
                                        GET_MODE (XEXP (op1, 1)));
2248
              return simplify_gen_binary (AND, mode, op0, tem);
2249
            }
2250
          if (rtx_equal_p (op0, XEXP (op1, 1)))
2251
            {
2252
              tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2253
                                        GET_MODE (XEXP (op1, 0)));
2254
              return simplify_gen_binary (AND, mode, op0, tem);
2255
            }
2256
        }
2257
 
2258
      /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2259
         by reversing the comparison code if valid.  */
2260
      if (STORE_FLAG_VALUE == 1
2261
          && trueop0 == const1_rtx
2262
          && COMPARISON_P (op1)
2263
          && (reversed = reversed_comparison (op1, mode)))
2264
        return reversed;
2265
 
2266
      /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2267
      if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2268
          && GET_CODE (op1) == MULT
2269
          && GET_CODE (XEXP (op1, 0)) == NEG)
2270
        {
2271
          rtx in1, in2;
2272
 
2273
          in1 = XEXP (XEXP (op1, 0), 0);
2274
          in2 = XEXP (op1, 1);
2275
          return simplify_gen_binary (PLUS, mode,
2276
                                      simplify_gen_binary (MULT, mode,
2277
                                                           in1, in2),
2278
                                      op0);
2279
        }
2280
 
2281
      /* Canonicalize (minus (neg A) (mult B C)) to
2282
         (minus (mult (neg B) C) A).  */
2283
      if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2284
          && GET_CODE (op1) == MULT
2285
          && GET_CODE (op0) == NEG)
2286
        {
2287
          rtx in1, in2;
2288
 
2289
          in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2290
          in2 = XEXP (op1, 1);
2291
          return simplify_gen_binary (MINUS, mode,
2292
                                      simplify_gen_binary (MULT, mode,
2293
                                                           in1, in2),
2294
                                      XEXP (op0, 0));
2295
        }
2296
 
2297
      /* If one of the operands is a PLUS or a MINUS, see if we can
2298
         simplify this by the associative law.  This will, for example,
2299
         canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2300
         Don't use the associative law for floating point.
2301
         The inaccuracy makes it nonassociative,
2302
         and subtle programs can break if operations are associated.  */
2303
 
2304
      if (INTEGRAL_MODE_P (mode)
2305
          && (plus_minus_operand_p (op0)
2306
              || plus_minus_operand_p (op1))
2307
          && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2308
        return tem;
2309
      break;
2310
 
2311
    case MULT:
2312
      if (trueop1 == constm1_rtx)
2313
        return simplify_gen_unary (NEG, mode, op0, mode);
2314
 
2315
      if (GET_CODE (op0) == NEG)
2316
        {
2317
          rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2318
          /* If op1 is a MULT as well and simplify_unary_operation
2319
             just moved the NEG to the second operand, simplify_gen_binary
2320
             below could through simplify_associative_operation move
2321
             the NEG around again and recurse endlessly.  */
2322
          if (temp
2323
              && GET_CODE (op1) == MULT
2324
              && GET_CODE (temp) == MULT
2325
              && XEXP (op1, 0) == XEXP (temp, 0)
2326
              && GET_CODE (XEXP (temp, 1)) == NEG
2327
              && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2328
            temp = NULL_RTX;
2329
          if (temp)
2330
            return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2331
        }
2332
      if (GET_CODE (op1) == NEG)
2333
        {
2334
          rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2335
          /* If op0 is a MULT as well and simplify_unary_operation
2336
             just moved the NEG to the second operand, simplify_gen_binary
2337
             below could through simplify_associative_operation move
2338
             the NEG around again and recurse endlessly.  */
2339
          if (temp
2340
              && GET_CODE (op0) == MULT
2341
              && GET_CODE (temp) == MULT
2342
              && XEXP (op0, 0) == XEXP (temp, 0)
2343
              && GET_CODE (XEXP (temp, 1)) == NEG
2344
              && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2345
            temp = NULL_RTX;
2346
          if (temp)
2347
            return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2348
        }
2349
 
2350
      /* Maybe simplify x * 0 to 0.  The reduction is not valid if
2351
         x is NaN, since x * 0 is then also NaN.  Nor is it valid
2352
         when the mode has signed zeros, since multiplying a negative
2353
         number by 0 will give -0, not 0.  */
2354
      if (!HONOR_NANS (mode)
2355
          && !HONOR_SIGNED_ZEROS (mode)
2356
          && trueop1 == CONST0_RTX (mode)
2357
          && ! side_effects_p (op0))
2358
        return op1;
2359
 
2360
      /* In IEEE floating point, x*1 is not equivalent to x for
2361
         signalling NaNs.  */
2362
      if (!HONOR_SNANS (mode)
2363
          && trueop1 == CONST1_RTX (mode))
2364
        return op0;
2365
 
2366
      /* Convert multiply by constant power of two into shift unless
2367
         we are still generating RTL.  This test is a kludge.  */
2368
      if (CONST_INT_P (trueop1)
2369
          && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2370
          /* If the mode is larger than the host word size, and the
2371
             uppermost bit is set, then this isn't a power of two due
2372
             to implicit sign extension.  */
2373
          && (width <= HOST_BITS_PER_WIDE_INT
2374
              || val != HOST_BITS_PER_WIDE_INT - 1))
2375
        return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2376
 
2377
      /* Likewise for multipliers wider than a word.  */
2378
      if (GET_CODE (trueop1) == CONST_DOUBLE
2379
          && (GET_MODE (trueop1) == VOIDmode
2380
              || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2381
          && GET_MODE (op0) == mode
2382
          && CONST_DOUBLE_LOW (trueop1) == 0
2383
          && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2384
        return simplify_gen_binary (ASHIFT, mode, op0,
2385
                                    GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2386
 
2387
      /* x*2 is x+x and x*(-1) is -x */
2388
      if (GET_CODE (trueop1) == CONST_DOUBLE
2389
          && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2390
          && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2391
          && GET_MODE (op0) == mode)
2392
        {
2393
          REAL_VALUE_TYPE d;
2394
          REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2395
 
2396
          if (REAL_VALUES_EQUAL (d, dconst2))
2397
            return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2398
 
2399
          if (!HONOR_SNANS (mode)
2400
              && REAL_VALUES_EQUAL (d, dconstm1))
2401
            return simplify_gen_unary (NEG, mode, op0, mode);
2402
        }
2403
 
2404
      /* Optimize -x * -x as x * x.  */
2405
      if (FLOAT_MODE_P (mode)
2406
          && GET_CODE (op0) == NEG
2407
          && GET_CODE (op1) == NEG
2408
          && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2409
          && !side_effects_p (XEXP (op0, 0)))
2410
        return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2411
 
2412
      /* Likewise, optimize abs(x) * abs(x) as x * x.  */
2413
      if (SCALAR_FLOAT_MODE_P (mode)
2414
          && GET_CODE (op0) == ABS
2415
          && GET_CODE (op1) == ABS
2416
          && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2417
          && !side_effects_p (XEXP (op0, 0)))
2418
        return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2419
 
2420
      /* Reassociate multiplication, but for floating point MULTs
2421
         only when the user specifies unsafe math optimizations.  */
2422
      if (! FLOAT_MODE_P (mode)
2423
          || flag_unsafe_math_optimizations)
2424
        {
2425
          tem = simplify_associative_operation (code, mode, op0, op1);
2426
          if (tem)
2427
            return tem;
2428
        }
2429
      break;
2430
 
2431
    case IOR:
2432
      if (trueop1 == CONST0_RTX (mode))
2433
        return op0;
2434
      if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2435
        return op1;
2436
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2437
        return op0;
2438
      /* A | (~A) -> -1 */
2439
      if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2440
           || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2441
          && ! side_effects_p (op0)
2442
          && SCALAR_INT_MODE_P (mode))
2443
        return constm1_rtx;
2444
 
2445
      /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
2446
      if (CONST_INT_P (op1)
2447
          && HWI_COMPUTABLE_MODE_P (mode)
2448
          && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2449
        return op1;
2450
 
2451
      /* Canonicalize (X & C1) | C2.  */
2452
      if (GET_CODE (op0) == AND
2453
          && CONST_INT_P (trueop1)
2454
          && CONST_INT_P (XEXP (op0, 1)))
2455
        {
2456
          HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2457
          HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2458
          HOST_WIDE_INT c2 = INTVAL (trueop1);
2459
 
2460
          /* If (C1&C2) == C1, then (X&C1)|C2 becomes X.  */
2461
          if ((c1 & c2) == c1
2462
              && !side_effects_p (XEXP (op0, 0)))
2463
            return trueop1;
2464
 
2465
          /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
2466
          if (((c1|c2) & mask) == mask)
2467
            return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2468
 
2469
          /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2.  */
2470
          if (((c1 & ~c2) & mask) != (c1 & mask))
2471
            {
2472
              tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2473
                                         gen_int_mode (c1 & ~c2, mode));
2474
              return simplify_gen_binary (IOR, mode, tem, op1);
2475
            }
2476
        }
2477
 
2478
      /* Convert (A & B) | A to A.  */
2479
      if (GET_CODE (op0) == AND
2480
          && (rtx_equal_p (XEXP (op0, 0), op1)
2481
              || rtx_equal_p (XEXP (op0, 1), op1))
2482
          && ! side_effects_p (XEXP (op0, 0))
2483
          && ! side_effects_p (XEXP (op0, 1)))
2484
        return op1;
2485
 
2486
      /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2487
         mode size to (rotate A CX).  */
2488
 
2489
      if (GET_CODE (op1) == ASHIFT
2490
          || GET_CODE (op1) == SUBREG)
2491
        {
2492
          opleft = op1;
2493
          opright = op0;
2494
        }
2495
      else
2496
        {
2497
          opright = op1;
2498
          opleft = op0;
2499
        }
2500
 
2501
      if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2502
          && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2503
          && CONST_INT_P (XEXP (opleft, 1))
2504
          && CONST_INT_P (XEXP (opright, 1))
2505
          && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2506
              == GET_MODE_PRECISION (mode)))
2507
        return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2508
 
2509
      /* Same, but for ashift that has been "simplified" to a wider mode
2510
        by simplify_shift_const.  */
2511
 
2512
      if (GET_CODE (opleft) == SUBREG
2513
          && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2514
          && GET_CODE (opright) == LSHIFTRT
2515
          && GET_CODE (XEXP (opright, 0)) == SUBREG
2516
          && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2517
          && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2518
          && (GET_MODE_SIZE (GET_MODE (opleft))
2519
              < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2520
          && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2521
                          SUBREG_REG (XEXP (opright, 0)))
2522
          && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2523
          && CONST_INT_P (XEXP (opright, 1))
2524
          && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2525
              == GET_MODE_PRECISION (mode)))
2526
        return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2527
                               XEXP (SUBREG_REG (opleft), 1));
2528
 
2529
      /* If we have (ior (and (X C1) C2)), simplify this by making
2530
         C1 as small as possible if C1 actually changes.  */
2531
      if (CONST_INT_P (op1)
2532
          && (HWI_COMPUTABLE_MODE_P (mode)
2533
              || INTVAL (op1) > 0)
2534
          && GET_CODE (op0) == AND
2535
          && CONST_INT_P (XEXP (op0, 1))
2536
          && CONST_INT_P (op1)
2537
          && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2538
        return simplify_gen_binary (IOR, mode,
2539
                                    simplify_gen_binary
2540
                                          (AND, mode, XEXP (op0, 0),
2541
                                           GEN_INT (UINTVAL (XEXP (op0, 1))
2542
                                                    & ~UINTVAL (op1))),
2543
                                    op1);
2544
 
2545
      /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2546
         a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2547
         the PLUS does not affect any of the bits in OP1: then we can do
2548
         the IOR as a PLUS and we can associate.  This is valid if OP1
2549
         can be safely shifted left C bits.  */
2550
      if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2551
          && GET_CODE (XEXP (op0, 0)) == PLUS
2552
          && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2553
          && CONST_INT_P (XEXP (op0, 1))
2554
          && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2555
        {
2556
          int count = INTVAL (XEXP (op0, 1));
2557
          HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2558
 
2559
          if (mask >> count == INTVAL (trueop1)
2560
              && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2561
            return simplify_gen_binary (ASHIFTRT, mode,
2562
                                        plus_constant (XEXP (op0, 0), mask),
2563
                                        XEXP (op0, 1));
2564
        }
2565
 
2566
      tem = simplify_associative_operation (code, mode, op0, op1);
2567
      if (tem)
2568
        return tem;
2569
      break;
2570
 
2571
    case XOR:
2572
      if (trueop1 == CONST0_RTX (mode))
2573
        return op0;
2574
      if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2575
        return simplify_gen_unary (NOT, mode, op0, mode);
2576
      if (rtx_equal_p (trueop0, trueop1)
2577
          && ! side_effects_p (op0)
2578
          && GET_MODE_CLASS (mode) != MODE_CC)
2579
         return CONST0_RTX (mode);
2580
 
2581
      /* Canonicalize XOR of the most significant bit to PLUS.  */
2582
      if ((CONST_INT_P (op1)
2583
           || GET_CODE (op1) == CONST_DOUBLE)
2584
          && mode_signbit_p (mode, op1))
2585
        return simplify_gen_binary (PLUS, mode, op0, op1);
2586
      /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2587
      if ((CONST_INT_P (op1)
2588
           || GET_CODE (op1) == CONST_DOUBLE)
2589
          && GET_CODE (op0) == PLUS
2590
          && (CONST_INT_P (XEXP (op0, 1))
2591
              || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2592
          && mode_signbit_p (mode, XEXP (op0, 1)))
2593
        return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2594
                                    simplify_gen_binary (XOR, mode, op1,
2595
                                                         XEXP (op0, 1)));
2596
 
2597
      /* If we are XORing two things that have no bits in common,
2598
         convert them into an IOR.  This helps to detect rotation encoded
2599
         using those methods and possibly other simplifications.  */
2600
 
2601
      if (HWI_COMPUTABLE_MODE_P (mode)
2602
          && (nonzero_bits (op0, mode)
2603
              & nonzero_bits (op1, mode)) == 0)
2604
        return (simplify_gen_binary (IOR, mode, op0, op1));
2605
 
2606
      /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2607
         Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2608
         (NOT y).  */
2609
      {
2610
        int num_negated = 0;
2611
 
2612
        if (GET_CODE (op0) == NOT)
2613
          num_negated++, op0 = XEXP (op0, 0);
2614
        if (GET_CODE (op1) == NOT)
2615
          num_negated++, op1 = XEXP (op1, 0);
2616
 
2617
        if (num_negated == 2)
2618
          return simplify_gen_binary (XOR, mode, op0, op1);
2619
        else if (num_negated == 1)
2620
          return simplify_gen_unary (NOT, mode,
2621
                                     simplify_gen_binary (XOR, mode, op0, op1),
2622
                                     mode);
2623
      }
2624
 
2625
      /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2626
         correspond to a machine insn or result in further simplifications
2627
         if B is a constant.  */
2628
 
2629
      if (GET_CODE (op0) == AND
2630
          && rtx_equal_p (XEXP (op0, 1), op1)
2631
          && ! side_effects_p (op1))
2632
        return simplify_gen_binary (AND, mode,
2633
                                    simplify_gen_unary (NOT, mode,
2634
                                                        XEXP (op0, 0), mode),
2635
                                    op1);
2636
 
2637
      else if (GET_CODE (op0) == AND
2638
               && rtx_equal_p (XEXP (op0, 0), op1)
2639
               && ! side_effects_p (op1))
2640
        return simplify_gen_binary (AND, mode,
2641
                                    simplify_gen_unary (NOT, mode,
2642
                                                        XEXP (op0, 1), mode),
2643
                                    op1);
2644
 
2645
      /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2646
         we can transform like this:
2647
            (A&B)^C == ~(A&B)&C | ~C&(A&B)
2648
                    == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
2649
                    == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
2650
         Attempt a few simplifications when B and C are both constants.  */
2651
      if (GET_CODE (op0) == AND
2652
          && CONST_INT_P (op1)
2653
          && CONST_INT_P (XEXP (op0, 1)))
2654
        {
2655
          rtx a = XEXP (op0, 0);
2656
          rtx b = XEXP (op0, 1);
2657
          rtx c = op1;
2658
          HOST_WIDE_INT bval = INTVAL (b);
2659
          HOST_WIDE_INT cval = INTVAL (c);
2660
 
2661
          rtx na_c
2662
            = simplify_binary_operation (AND, mode,
2663
                                         simplify_gen_unary (NOT, mode, a, mode),
2664
                                         c);
2665
          if ((~cval & bval) == 0)
2666
            {
2667
              /* Try to simplify ~A&C | ~B&C.  */
2668
              if (na_c != NULL_RTX)
2669
                return simplify_gen_binary (IOR, mode, na_c,
2670
                                            GEN_INT (~bval & cval));
2671
            }
2672
          else
2673
            {
2674
              /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
2675
              if (na_c == const0_rtx)
2676
                {
2677
                  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2678
                                                    GEN_INT (~cval & bval));
2679
                  return simplify_gen_binary (IOR, mode, a_nc_b,
2680
                                              GEN_INT (~bval & cval));
2681
                }
2682
            }
2683
        }
2684
 
2685
      /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2686
         comparison if STORE_FLAG_VALUE is 1.  */
2687
      if (STORE_FLAG_VALUE == 1
2688
          && trueop1 == const1_rtx
2689
          && COMPARISON_P (op0)
2690
          && (reversed = reversed_comparison (op0, mode)))
2691
        return reversed;
2692
 
2693
      /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2694
         is (lt foo (const_int 0)), so we can perform the above
2695
         simplification if STORE_FLAG_VALUE is 1.  */
2696
 
2697
      if (STORE_FLAG_VALUE == 1
2698
          && trueop1 == const1_rtx
2699
          && GET_CODE (op0) == LSHIFTRT
2700
          && CONST_INT_P (XEXP (op0, 1))
2701
          && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2702
        return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2703
 
2704
      /* (xor (comparison foo bar) (const_int sign-bit))
2705
         when STORE_FLAG_VALUE is the sign bit.  */
2706
      if (val_signbit_p (mode, STORE_FLAG_VALUE)
2707
          && trueop1 == const_true_rtx
2708
          && COMPARISON_P (op0)
2709
          && (reversed = reversed_comparison (op0, mode)))
2710
        return reversed;
2711
 
2712
      tem = simplify_associative_operation (code, mode, op0, op1);
2713
      if (tem)
2714
        return tem;
2715
      break;
2716
 
2717
    case AND:
2718
      if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2719
        return trueop1;
2720
      if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2721
        return op0;
2722
      if (HWI_COMPUTABLE_MODE_P (mode))
2723
        {
2724
          HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2725
          HOST_WIDE_INT nzop1;
2726
          if (CONST_INT_P (trueop1))
2727
            {
2728
              HOST_WIDE_INT val1 = INTVAL (trueop1);
2729
              /* If we are turning off bits already known off in OP0, we need
2730
                 not do an AND.  */
2731
              if ((nzop0 & ~val1) == 0)
2732
                return op0;
2733
            }
2734
          nzop1 = nonzero_bits (trueop1, mode);
2735
          /* If we are clearing all the nonzero bits, the result is zero.  */
2736
          if ((nzop1 & nzop0) == 0
2737
              && !side_effects_p (op0) && !side_effects_p (op1))
2738
            return CONST0_RTX (mode);
2739
        }
2740
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2741
          && GET_MODE_CLASS (mode) != MODE_CC)
2742
        return op0;
2743
      /* A & (~A) -> 0 */
2744
      if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2745
           || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2746
          && ! side_effects_p (op0)
2747
          && GET_MODE_CLASS (mode) != MODE_CC)
2748
        return CONST0_RTX (mode);
2749
 
2750
      /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2751
         there are no nonzero bits of C outside of X's mode.  */
2752
      if ((GET_CODE (op0) == SIGN_EXTEND
2753
           || GET_CODE (op0) == ZERO_EXTEND)
2754
          && CONST_INT_P (trueop1)
2755
          && HWI_COMPUTABLE_MODE_P (mode)
2756
          && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2757
              & UINTVAL (trueop1)) == 0)
2758
        {
2759
          enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2760
          tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2761
                                     gen_int_mode (INTVAL (trueop1),
2762
                                                   imode));
2763
          return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2764
        }
2765
 
2766
      /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
2767
         we might be able to further simplify the AND with X and potentially
2768
         remove the truncation altogether.  */
2769
      if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2770
        {
2771
          rtx x = XEXP (op0, 0);
2772
          enum machine_mode xmode = GET_MODE (x);
2773
          tem = simplify_gen_binary (AND, xmode, x,
2774
                                     gen_int_mode (INTVAL (trueop1), xmode));
2775
          return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2776
        }
2777
 
2778
      /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
2779
      if (GET_CODE (op0) == IOR
2780
          && CONST_INT_P (trueop1)
2781
          && CONST_INT_P (XEXP (op0, 1)))
2782
        {
2783
          HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2784
          return simplify_gen_binary (IOR, mode,
2785
                                      simplify_gen_binary (AND, mode,
2786
                                                           XEXP (op0, 0), op1),
2787
                                      gen_int_mode (tmp, mode));
2788
        }
2789
 
2790
      /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2791
         insn (and may simplify more).  */
2792
      if (GET_CODE (op0) == XOR
2793
          && rtx_equal_p (XEXP (op0, 0), op1)
2794
          && ! side_effects_p (op1))
2795
        return simplify_gen_binary (AND, mode,
2796
                                    simplify_gen_unary (NOT, mode,
2797
                                                        XEXP (op0, 1), mode),
2798
                                    op1);
2799
 
2800
      if (GET_CODE (op0) == XOR
2801
          && rtx_equal_p (XEXP (op0, 1), op1)
2802
          && ! side_effects_p (op1))
2803
        return simplify_gen_binary (AND, mode,
2804
                                    simplify_gen_unary (NOT, mode,
2805
                                                        XEXP (op0, 0), mode),
2806
                                    op1);
2807
 
2808
      /* Similarly for (~(A ^ B)) & A.  */
2809
      if (GET_CODE (op0) == NOT
2810
          && GET_CODE (XEXP (op0, 0)) == XOR
2811
          && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2812
          && ! side_effects_p (op1))
2813
        return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2814
 
2815
      if (GET_CODE (op0) == NOT
2816
          && GET_CODE (XEXP (op0, 0)) == XOR
2817
          && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2818
          && ! side_effects_p (op1))
2819
        return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2820
 
2821
      /* Convert (A | B) & A to A.  */
2822
      if (GET_CODE (op0) == IOR
2823
          && (rtx_equal_p (XEXP (op0, 0), op1)
2824
              || rtx_equal_p (XEXP (op0, 1), op1))
2825
          && ! side_effects_p (XEXP (op0, 0))
2826
          && ! side_effects_p (XEXP (op0, 1)))
2827
        return op1;
2828
 
2829
      /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2830
         ((A & N) + B) & M -> (A + B) & M
2831
         Similarly if (N & M) == 0,
2832
         ((A | N) + B) & M -> (A + B) & M
2833
         and for - instead of + and/or ^ instead of |.
2834
         Also, if (N & M) == 0, then
2835
         (A +- N) & M -> A & M.  */
2836
      if (CONST_INT_P (trueop1)
2837
          && HWI_COMPUTABLE_MODE_P (mode)
2838
          && ~UINTVAL (trueop1)
2839
          && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2840
          && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2841
        {
2842
          rtx pmop[2];
2843
          int which;
2844
 
2845
          pmop[0] = XEXP (op0, 0);
2846
          pmop[1] = XEXP (op0, 1);
2847
 
2848
          if (CONST_INT_P (pmop[1])
2849
              && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2850
            return simplify_gen_binary (AND, mode, pmop[0], op1);
2851
 
2852
          for (which = 0; which < 2; which++)
2853
            {
2854
              tem = pmop[which];
2855
              switch (GET_CODE (tem))
2856
                {
2857
                case AND:
2858
                  if (CONST_INT_P (XEXP (tem, 1))
2859
                      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2860
                      == UINTVAL (trueop1))
2861
                    pmop[which] = XEXP (tem, 0);
2862
                  break;
2863
                case IOR:
2864
                case XOR:
2865
                  if (CONST_INT_P (XEXP (tem, 1))
2866
                      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2867
                    pmop[which] = XEXP (tem, 0);
2868
                  break;
2869
                default:
2870
                  break;
2871
                }
2872
            }
2873
 
2874
          if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2875
            {
2876
              tem = simplify_gen_binary (GET_CODE (op0), mode,
2877
                                         pmop[0], pmop[1]);
2878
              return simplify_gen_binary (code, mode, tem, op1);
2879
            }
2880
        }
2881
 
2882
      /* (and X (ior (not X) Y) -> (and X Y) */
2883
      if (GET_CODE (op1) == IOR
2884
          && GET_CODE (XEXP (op1, 0)) == NOT
2885
          && op0 == XEXP (XEXP (op1, 0), 0))
2886
       return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2887
 
2888
      /* (and (ior (not X) Y) X) -> (and X Y) */
2889
      if (GET_CODE (op0) == IOR
2890
          && GET_CODE (XEXP (op0, 0)) == NOT
2891
          && op1 == XEXP (XEXP (op0, 0), 0))
2892
        return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2893
 
2894
      tem = simplify_associative_operation (code, mode, op0, op1);
2895
      if (tem)
2896
        return tem;
2897
      break;
2898
 
2899
    case UDIV:
2900
      /* 0/x is 0 (or x&0 if x has side-effects).  */
2901
      if (trueop0 == CONST0_RTX (mode))
2902
        {
2903
          if (side_effects_p (op1))
2904
            return simplify_gen_binary (AND, mode, op1, trueop0);
2905
          return trueop0;
2906
        }
2907
      /* x/1 is x.  */
2908
      if (trueop1 == CONST1_RTX (mode))
2909
        return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2910
      /* Convert divide by power of two into shift.  */
2911
      if (CONST_INT_P (trueop1)
2912
          && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2913
        return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2914
      break;
2915
 
2916
    case DIV:
2917
      /* Handle floating point and integers separately.  */
2918
      if (SCALAR_FLOAT_MODE_P (mode))
2919
        {
2920
          /* Maybe change 0.0 / x to 0.0.  This transformation isn't
2921
             safe for modes with NaNs, since 0.0 / 0.0 will then be
2922
             NaN rather than 0.0.  Nor is it safe for modes with signed
2923
             zeros, since dividing 0 by a negative number gives -0.0  */
2924
          if (trueop0 == CONST0_RTX (mode)
2925
              && !HONOR_NANS (mode)
2926
              && !HONOR_SIGNED_ZEROS (mode)
2927
              && ! side_effects_p (op1))
2928
            return op0;
2929
          /* x/1.0 is x.  */
2930
          if (trueop1 == CONST1_RTX (mode)
2931
              && !HONOR_SNANS (mode))
2932
            return op0;
2933
 
2934
          if (GET_CODE (trueop1) == CONST_DOUBLE
2935
              && trueop1 != CONST0_RTX (mode))
2936
            {
2937
              REAL_VALUE_TYPE d;
2938
              REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2939
 
2940
              /* x/-1.0 is -x.  */
2941
              if (REAL_VALUES_EQUAL (d, dconstm1)
2942
                  && !HONOR_SNANS (mode))
2943
                return simplify_gen_unary (NEG, mode, op0, mode);
2944
 
2945
              /* Change FP division by a constant into multiplication.
2946
                 Only do this with -freciprocal-math.  */
2947
              if (flag_reciprocal_math
2948
                  && !REAL_VALUES_EQUAL (d, dconst0))
2949
                {
2950
                  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2951
                  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2952
                  return simplify_gen_binary (MULT, mode, op0, tem);
2953
                }
2954
            }
2955
        }
2956
      else if (SCALAR_INT_MODE_P (mode))
2957
        {
2958
          /* 0/x is 0 (or x&0 if x has side-effects).  */
2959
          if (trueop0 == CONST0_RTX (mode)
2960
              && !cfun->can_throw_non_call_exceptions)
2961
            {
2962
              if (side_effects_p (op1))
2963
                return simplify_gen_binary (AND, mode, op1, trueop0);
2964
              return trueop0;
2965
            }
2966
          /* x/1 is x.  */
2967
          if (trueop1 == CONST1_RTX (mode))
2968
            return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2969
          /* x/-1 is -x.  */
2970
          if (trueop1 == constm1_rtx)
2971
            {
2972
              rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2973
              return simplify_gen_unary (NEG, mode, x, mode);
2974
            }
2975
        }
2976
      break;
2977
 
2978
    case UMOD:
2979
      /* 0%x is 0 (or x&0 if x has side-effects).  */
2980
      if (trueop0 == CONST0_RTX (mode))
2981
        {
2982
          if (side_effects_p (op1))
2983
            return simplify_gen_binary (AND, mode, op1, trueop0);
2984
          return trueop0;
2985
        }
2986
      /* x%1 is 0 (of x&0 if x has side-effects).  */
2987
      if (trueop1 == CONST1_RTX (mode))
2988
        {
2989
          if (side_effects_p (op0))
2990
            return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2991
          return CONST0_RTX (mode);
2992
        }
2993
      /* Implement modulus by power of two as AND.  */
2994
      if (CONST_INT_P (trueop1)
2995
          && exact_log2 (UINTVAL (trueop1)) > 0)
2996
        return simplify_gen_binary (AND, mode, op0,
2997
                                    GEN_INT (INTVAL (op1) - 1));
2998
      break;
2999
 
3000
    case MOD:
3001
      /* 0%x is 0 (or x&0 if x has side-effects).  */
3002
      if (trueop0 == CONST0_RTX (mode))
3003
        {
3004
          if (side_effects_p (op1))
3005
            return simplify_gen_binary (AND, mode, op1, trueop0);
3006
          return trueop0;
3007
        }
3008
      /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
3009
      if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3010
        {
3011
          if (side_effects_p (op0))
3012
            return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3013
          return CONST0_RTX (mode);
3014
        }
3015
      break;
3016
 
3017
    case ROTATERT:
3018
    case ROTATE:
3019
    case ASHIFTRT:
3020
      if (trueop1 == CONST0_RTX (mode))
3021
        return op0;
3022
      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3023
        return op0;
3024
      /* Rotating ~0 always results in ~0.  */
3025
      if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3026
          && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3027
          && ! side_effects_p (op1))
3028
        return op0;
3029
    canonicalize_shift:
3030
      if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3031
        {
3032
          val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3033
          if (val != INTVAL (op1))
3034
            return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3035
        }
3036
      break;
3037
 
3038
    case ASHIFT:
3039
    case SS_ASHIFT:
3040
    case US_ASHIFT:
3041
      if (trueop1 == CONST0_RTX (mode))
3042
        return op0;
3043
      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3044
        return op0;
3045
      goto canonicalize_shift;
3046
 
3047
    case LSHIFTRT:
3048
      if (trueop1 == CONST0_RTX (mode))
3049
        return op0;
3050
      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3051
        return op0;
3052
      /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
3053
      if (GET_CODE (op0) == CLZ
3054
          && CONST_INT_P (trueop1)
3055
          && STORE_FLAG_VALUE == 1
3056
          && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3057
        {
3058
          enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3059
          unsigned HOST_WIDE_INT zero_val = 0;
3060
 
3061
          if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3062
              && zero_val == GET_MODE_PRECISION (imode)
3063
              && INTVAL (trueop1) == exact_log2 (zero_val))
3064
            return simplify_gen_relational (EQ, mode, imode,
3065
                                            XEXP (op0, 0), const0_rtx);
3066
        }
3067
      goto canonicalize_shift;
3068
 
3069
    case SMIN:
3070
      if (width <= HOST_BITS_PER_WIDE_INT
3071
          && mode_signbit_p (mode, trueop1)
3072
          && ! side_effects_p (op0))
3073
        return op1;
3074
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3075
        return op0;
3076
      tem = simplify_associative_operation (code, mode, op0, op1);
3077
      if (tem)
3078
        return tem;
3079
      break;
3080
 
3081
    case SMAX:
3082
      if (width <= HOST_BITS_PER_WIDE_INT
3083
          && CONST_INT_P (trueop1)
3084
          && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3085
          && ! side_effects_p (op0))
3086
        return op1;
3087
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3088
        return op0;
3089
      tem = simplify_associative_operation (code, mode, op0, op1);
3090
      if (tem)
3091
        return tem;
3092
      break;
3093
 
3094
    case UMIN:
3095
      if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3096
        return op1;
3097
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3098
        return op0;
3099
      tem = simplify_associative_operation (code, mode, op0, op1);
3100
      if (tem)
3101
        return tem;
3102
      break;
3103
 
3104
    case UMAX:
3105
      if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3106
        return op1;
3107
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3108
        return op0;
3109
      tem = simplify_associative_operation (code, mode, op0, op1);
3110
      if (tem)
3111
        return tem;
3112
      break;
3113
 
3114
    case SS_PLUS:
3115
    case US_PLUS:
3116
    case SS_MINUS:
3117
    case US_MINUS:
3118
    case SS_MULT:
3119
    case US_MULT:
3120
    case SS_DIV:
3121
    case US_DIV:
3122
      /* ??? There are simplifications that can be done.  */
3123
      return 0;
3124
 
3125
    case VEC_SELECT:
3126
      if (!VECTOR_MODE_P (mode))
3127
        {
3128
          gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3129
          gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3130
          gcc_assert (GET_CODE (trueop1) == PARALLEL);
3131
          gcc_assert (XVECLEN (trueop1, 0) == 1);
3132
          gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3133
 
3134
          if (GET_CODE (trueop0) == CONST_VECTOR)
3135
            return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3136
                                                      (trueop1, 0, 0)));
3137
 
3138
          /* Extract a scalar element from a nested VEC_SELECT expression
3139
             (with optional nested VEC_CONCAT expression).  Some targets
3140
             (i386) extract scalar element from a vector using chain of
3141
             nested VEC_SELECT expressions.  When input operand is a memory
3142
             operand, this operation can be simplified to a simple scalar
3143
             load from an offseted memory address.  */
3144
          if (GET_CODE (trueop0) == VEC_SELECT)
3145
            {
3146
              rtx op0 = XEXP (trueop0, 0);
3147
              rtx op1 = XEXP (trueop0, 1);
3148
 
3149
              enum machine_mode opmode = GET_MODE (op0);
3150
              int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3151
              int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3152
 
3153
              int i = INTVAL (XVECEXP (trueop1, 0, 0));
3154
              int elem;
3155
 
3156
              rtvec vec;
3157
              rtx tmp_op, tmp;
3158
 
3159
              gcc_assert (GET_CODE (op1) == PARALLEL);
3160
              gcc_assert (i < n_elts);
3161
 
3162
              /* Select element, pointed by nested selector.  */
3163
              elem = INTVAL (XVECEXP (op1, 0, i));
3164
 
3165
              /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
3166
              if (GET_CODE (op0) == VEC_CONCAT)
3167
                {
3168
                  rtx op00 = XEXP (op0, 0);
3169
                  rtx op01 = XEXP (op0, 1);
3170
 
3171
                  enum machine_mode mode00, mode01;
3172
                  int n_elts00, n_elts01;
3173
 
3174
                  mode00 = GET_MODE (op00);
3175
                  mode01 = GET_MODE (op01);
3176
 
3177
                  /* Find out number of elements of each operand.  */
3178
                  if (VECTOR_MODE_P (mode00))
3179
                    {
3180
                      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3181
                      n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3182
                    }
3183
                  else
3184
                    n_elts00 = 1;
3185
 
3186
                  if (VECTOR_MODE_P (mode01))
3187
                    {
3188
                      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3189
                      n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3190
                    }
3191
                  else
3192
                    n_elts01 = 1;
3193
 
3194
                  gcc_assert (n_elts == n_elts00 + n_elts01);
3195
 
3196
                  /* Select correct operand of VEC_CONCAT
3197
                     and adjust selector. */
3198
                  if (elem < n_elts01)
3199
                    tmp_op = op00;
3200
                  else
3201
                    {
3202
                      tmp_op = op01;
3203
                      elem -= n_elts00;
3204
                    }
3205
                }
3206
              else
3207
                tmp_op = op0;
3208
 
3209
              vec = rtvec_alloc (1);
3210
              RTVEC_ELT (vec, 0) = GEN_INT (elem);
3211
 
3212
              tmp = gen_rtx_fmt_ee (code, mode,
3213
                                    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3214
              return tmp;
3215
            }
3216
          if (GET_CODE (trueop0) == VEC_DUPLICATE
3217
              && GET_MODE (XEXP (trueop0, 0)) == mode)
3218
            return XEXP (trueop0, 0);
3219
        }
3220
      else
3221
        {
3222
          gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3223
          gcc_assert (GET_MODE_INNER (mode)
3224
                      == GET_MODE_INNER (GET_MODE (trueop0)));
3225
          gcc_assert (GET_CODE (trueop1) == PARALLEL);
3226
 
3227
          if (GET_CODE (trueop0) == CONST_VECTOR)
3228
            {
3229
              int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3230
              unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3231
              rtvec v = rtvec_alloc (n_elts);
3232
              unsigned int i;
3233
 
3234
              gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3235
              for (i = 0; i < n_elts; i++)
3236
                {
3237
                  rtx x = XVECEXP (trueop1, 0, i);
3238
 
3239
                  gcc_assert (CONST_INT_P (x));
3240
                  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3241
                                                       INTVAL (x));
3242
                }
3243
 
3244
              return gen_rtx_CONST_VECTOR (mode, v);
3245
            }
3246
        }
3247
 
3248
      if (XVECLEN (trueop1, 0) == 1
3249
          && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3250
          && GET_CODE (trueop0) == VEC_CONCAT)
3251
        {
3252
          rtx vec = trueop0;
3253
          int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3254
 
3255
          /* Try to find the element in the VEC_CONCAT.  */
3256
          while (GET_MODE (vec) != mode
3257
                 && GET_CODE (vec) == VEC_CONCAT)
3258
            {
3259
              HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3260
              if (offset < vec_size)
3261
                vec = XEXP (vec, 0);
3262
              else
3263
                {
3264
                  offset -= vec_size;
3265
                  vec = XEXP (vec, 1);
3266
                }
3267
              vec = avoid_constant_pool_reference (vec);
3268
            }
3269
 
3270
          if (GET_MODE (vec) == mode)
3271
            return vec;
3272
        }
3273
 
3274
      return 0;
3275
    case VEC_CONCAT:
3276
      {
3277
        enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3278
                                      ? GET_MODE (trueop0)
3279
                                      : GET_MODE_INNER (mode));
3280
        enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3281
                                      ? GET_MODE (trueop1)
3282
                                      : GET_MODE_INNER (mode));
3283
 
3284
        gcc_assert (VECTOR_MODE_P (mode));
3285
        gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3286
                    == GET_MODE_SIZE (mode));
3287
 
3288
        if (VECTOR_MODE_P (op0_mode))
3289
          gcc_assert (GET_MODE_INNER (mode)
3290
                      == GET_MODE_INNER (op0_mode));
3291
        else
3292
          gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3293
 
3294
        if (VECTOR_MODE_P (op1_mode))
3295
          gcc_assert (GET_MODE_INNER (mode)
3296
                      == GET_MODE_INNER (op1_mode));
3297
        else
3298
          gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3299
 
3300
        if ((GET_CODE (trueop0) == CONST_VECTOR
3301
             || CONST_INT_P (trueop0)
3302
             || GET_CODE (trueop0) == CONST_DOUBLE)
3303
            && (GET_CODE (trueop1) == CONST_VECTOR
3304
                || CONST_INT_P (trueop1)
3305
                || GET_CODE (trueop1) == CONST_DOUBLE))
3306
          {
3307
            int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3308
            unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3309
            rtvec v = rtvec_alloc (n_elts);
3310
            unsigned int i;
3311
            unsigned in_n_elts = 1;
3312
 
3313
            if (VECTOR_MODE_P (op0_mode))
3314
              in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3315
            for (i = 0; i < n_elts; i++)
3316
              {
3317
                if (i < in_n_elts)
3318
                  {
3319
                    if (!VECTOR_MODE_P (op0_mode))
3320
                      RTVEC_ELT (v, i) = trueop0;
3321
                    else
3322
                      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3323
                  }
3324
                else
3325
                  {
3326
                    if (!VECTOR_MODE_P (op1_mode))
3327
                      RTVEC_ELT (v, i) = trueop1;
3328
                    else
3329
                      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3330
                                                           i - in_n_elts);
3331
                  }
3332
              }
3333
 
3334
            return gen_rtx_CONST_VECTOR (mode, v);
3335
          }
3336
      }
3337
      return 0;
3338
 
3339
    default:
3340
      gcc_unreachable ();
3341
    }
3342
 
3343
  return 0;
3344
}
3345
 
3346
rtx
3347
simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3348
                                 rtx op0, rtx op1)
3349
{
3350
  HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3351
  HOST_WIDE_INT val;
3352
  unsigned int width = GET_MODE_PRECISION (mode);
3353
 
3354
  if (VECTOR_MODE_P (mode)
3355
      && code != VEC_CONCAT
3356
      && GET_CODE (op0) == CONST_VECTOR
3357
      && GET_CODE (op1) == CONST_VECTOR)
3358
    {
3359
      unsigned n_elts = GET_MODE_NUNITS (mode);
3360
      enum machine_mode op0mode = GET_MODE (op0);
3361
      unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3362
      enum machine_mode op1mode = GET_MODE (op1);
3363
      unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3364
      rtvec v = rtvec_alloc (n_elts);
3365
      unsigned int i;
3366
 
3367
      gcc_assert (op0_n_elts == n_elts);
3368
      gcc_assert (op1_n_elts == n_elts);
3369
      for (i = 0; i < n_elts; i++)
3370
        {
3371
          rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3372
                                             CONST_VECTOR_ELT (op0, i),
3373
                                             CONST_VECTOR_ELT (op1, i));
3374
          if (!x)
3375
            return 0;
3376
          RTVEC_ELT (v, i) = x;
3377
        }
3378
 
3379
      return gen_rtx_CONST_VECTOR (mode, v);
3380
    }
3381
 
3382
  if (VECTOR_MODE_P (mode)
3383
      && code == VEC_CONCAT
3384
      && (CONST_INT_P (op0)
3385
          || GET_CODE (op0) == CONST_DOUBLE
3386
          || GET_CODE (op0) == CONST_FIXED)
3387
      && (CONST_INT_P (op1)
3388
          || GET_CODE (op1) == CONST_DOUBLE
3389
          || GET_CODE (op1) == CONST_FIXED))
3390
    {
3391
      unsigned n_elts = GET_MODE_NUNITS (mode);
3392
      rtvec v = rtvec_alloc (n_elts);
3393
 
3394
      gcc_assert (n_elts >= 2);
3395
      if (n_elts == 2)
3396
        {
3397
          gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3398
          gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3399
 
3400
          RTVEC_ELT (v, 0) = op0;
3401
          RTVEC_ELT (v, 1) = op1;
3402
        }
3403
      else
3404
        {
3405
          unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3406
          unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3407
          unsigned i;
3408
 
3409
          gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3410
          gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3411
          gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3412
 
3413
          for (i = 0; i < op0_n_elts; ++i)
3414
            RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3415
          for (i = 0; i < op1_n_elts; ++i)
3416
            RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3417
        }
3418
 
3419
      return gen_rtx_CONST_VECTOR (mode, v);
3420
    }
3421
 
3422
  if (SCALAR_FLOAT_MODE_P (mode)
3423
      && GET_CODE (op0) == CONST_DOUBLE
3424
      && GET_CODE (op1) == CONST_DOUBLE
3425
      && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3426
    {
3427
      if (code == AND
3428
          || code == IOR
3429
          || code == XOR)
3430
        {
3431
          long tmp0[4];
3432
          long tmp1[4];
3433
          REAL_VALUE_TYPE r;
3434
          int i;
3435
 
3436
          real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3437
                          GET_MODE (op0));
3438
          real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3439
                          GET_MODE (op1));
3440
          for (i = 0; i < 4; i++)
3441
            {
3442
              switch (code)
3443
              {
3444
              case AND:
3445
                tmp0[i] &= tmp1[i];
3446
                break;
3447
              case IOR:
3448
                tmp0[i] |= tmp1[i];
3449
                break;
3450
              case XOR:
3451
                tmp0[i] ^= tmp1[i];
3452
                break;
3453
              default:
3454
                gcc_unreachable ();
3455
              }
3456
            }
3457
           real_from_target (&r, tmp0, mode);
3458
           return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3459
        }
3460
      else
3461
        {
3462
          REAL_VALUE_TYPE f0, f1, value, result;
3463
          bool inexact;
3464
 
3465
          REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3466
          REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3467
          real_convert (&f0, mode, &f0);
3468
          real_convert (&f1, mode, &f1);
3469
 
3470
          if (HONOR_SNANS (mode)
3471
              && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3472
            return 0;
3473
 
3474
          if (code == DIV
3475
              && REAL_VALUES_EQUAL (f1, dconst0)
3476
              && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3477
            return 0;
3478
 
3479
          if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3480
              && flag_trapping_math
3481
              && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3482
            {
3483
              int s0 = REAL_VALUE_NEGATIVE (f0);
3484
              int s1 = REAL_VALUE_NEGATIVE (f1);
3485
 
3486
              switch (code)
3487
                {
3488
                case PLUS:
3489
                  /* Inf + -Inf = NaN plus exception.  */
3490
                  if (s0 != s1)
3491
                    return 0;
3492
                  break;
3493
                case MINUS:
3494
                  /* Inf - Inf = NaN plus exception.  */
3495
                  if (s0 == s1)
3496
                    return 0;
3497
                  break;
3498
                case DIV:
3499
                  /* Inf / Inf = NaN plus exception.  */
3500
                  return 0;
3501
                default:
3502
                  break;
3503
                }
3504
            }
3505
 
3506
          if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3507
              && flag_trapping_math
3508
              && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3509
                  || (REAL_VALUE_ISINF (f1)
3510
                      && REAL_VALUES_EQUAL (f0, dconst0))))
3511
            /* Inf * 0 = NaN plus exception.  */
3512
            return 0;
3513
 
3514
          inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3515
                                     &f0, &f1);
3516
          real_convert (&result, mode, &value);
3517
 
3518
          /* Don't constant fold this floating point operation if
3519
             the result has overflowed and flag_trapping_math.  */
3520
 
3521
          if (flag_trapping_math
3522
              && MODE_HAS_INFINITIES (mode)
3523
              && REAL_VALUE_ISINF (result)
3524
              && !REAL_VALUE_ISINF (f0)
3525
              && !REAL_VALUE_ISINF (f1))
3526
            /* Overflow plus exception.  */
3527
            return 0;
3528
 
3529
          /* Don't constant fold this floating point operation if the
3530
             result may dependent upon the run-time rounding mode and
3531
             flag_rounding_math is set, or if GCC's software emulation
3532
             is unable to accurately represent the result.  */
3533
 
3534
          if ((flag_rounding_math
3535
               || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3536
              && (inexact || !real_identical (&result, &value)))
3537
            return NULL_RTX;
3538
 
3539
          return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3540
        }
3541
    }
3542
 
3543
  /* We can fold some multi-word operations.  */
3544
  if (GET_MODE_CLASS (mode) == MODE_INT
3545
      && width == HOST_BITS_PER_DOUBLE_INT
3546
      && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3547
      && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3548
    {
3549
      double_int o0, o1, res, tmp;
3550
 
3551
      o0 = rtx_to_double_int (op0);
3552
      o1 = rtx_to_double_int (op1);
3553
 
3554
      switch (code)
3555
        {
3556
        case MINUS:
3557
          /* A - B == A + (-B).  */
3558
          o1 = double_int_neg (o1);
3559
 
3560
          /* Fall through....  */
3561
 
3562
        case PLUS:
3563
          res = double_int_add (o0, o1);
3564
          break;
3565
 
3566
        case MULT:
3567
          res = double_int_mul (o0, o1);
3568
          break;
3569
 
3570
        case DIV:
3571
          if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3572
                                    o0.low, o0.high, o1.low, o1.high,
3573
                                    &res.low, &res.high,
3574
                                    &tmp.low, &tmp.high))
3575
            return 0;
3576
          break;
3577
 
3578
        case MOD:
3579
          if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3580
                                    o0.low, o0.high, o1.low, o1.high,
3581
                                    &tmp.low, &tmp.high,
3582
                                    &res.low, &res.high))
3583
            return 0;
3584
          break;
3585
 
3586
        case UDIV:
3587
          if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3588
                                    o0.low, o0.high, o1.low, o1.high,
3589
                                    &res.low, &res.high,
3590
                                    &tmp.low, &tmp.high))
3591
            return 0;
3592
          break;
3593
 
3594
        case UMOD:
3595
          if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3596
                                    o0.low, o0.high, o1.low, o1.high,
3597
                                    &tmp.low, &tmp.high,
3598
                                    &res.low, &res.high))
3599
            return 0;
3600
          break;
3601
 
3602
        case AND:
3603
          res = double_int_and (o0, o1);
3604
          break;
3605
 
3606
        case IOR:
3607
          res = double_int_ior (o0, o1);
3608
          break;
3609
 
3610
        case XOR:
3611
          res = double_int_xor (o0, o1);
3612
          break;
3613
 
3614
        case SMIN:
3615
          res = double_int_smin (o0, o1);
3616
          break;
3617
 
3618
        case SMAX:
3619
          res = double_int_smax (o0, o1);
3620
          break;
3621
 
3622
        case UMIN:
3623
          res = double_int_umin (o0, o1);
3624
          break;
3625
 
3626
        case UMAX:
3627
          res = double_int_umax (o0, o1);
3628
          break;
3629
 
3630
        case LSHIFTRT:   case ASHIFTRT:
3631
        case ASHIFT:
3632
        case ROTATE:     case ROTATERT:
3633
          {
3634
            unsigned HOST_WIDE_INT cnt;
3635
 
3636
            if (SHIFT_COUNT_TRUNCATED)
3637
              o1 = double_int_zext (o1, GET_MODE_PRECISION (mode));
3638
 
3639
            if (!double_int_fits_in_uhwi_p (o1)
3640
                || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
3641
              return 0;
3642
 
3643
            cnt = double_int_to_uhwi (o1);
3644
 
3645
            if (code == LSHIFTRT || code == ASHIFTRT)
3646
              res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
3647
                                       code == ASHIFTRT);
3648
            else if (code == ASHIFT)
3649
              res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
3650
                                       true);
3651
            else if (code == ROTATE)
3652
              res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
3653
            else /* code == ROTATERT */
3654
              res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
3655
          }
3656
          break;
3657
 
3658
        default:
3659
          return 0;
3660
        }
3661
 
3662
      return immed_double_int_const (res, mode);
3663
    }
3664
 
3665
  if (CONST_INT_P (op0) && CONST_INT_P (op1)
3666
      && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3667
    {
3668
      /* Get the integer argument values in two forms:
3669
         zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
3670
 
3671
      arg0 = INTVAL (op0);
3672
      arg1 = INTVAL (op1);
3673
 
3674
      if (width < HOST_BITS_PER_WIDE_INT)
3675
        {
3676
          arg0 &= GET_MODE_MASK (mode);
3677
          arg1 &= GET_MODE_MASK (mode);
3678
 
3679
          arg0s = arg0;
3680
          if (val_signbit_known_set_p (mode, arg0s))
3681
            arg0s |= ~GET_MODE_MASK (mode);
3682
 
3683
          arg1s = arg1;
3684
          if (val_signbit_known_set_p (mode, arg1s))
3685
            arg1s |= ~GET_MODE_MASK (mode);
3686
        }
3687
      else
3688
        {
3689
          arg0s = arg0;
3690
          arg1s = arg1;
3691
        }
3692
 
3693
      /* Compute the value of the arithmetic.  */
3694
 
3695
      switch (code)
3696
        {
3697
        case PLUS:
3698
          val = arg0s + arg1s;
3699
          break;
3700
 
3701
        case MINUS:
3702
          val = arg0s - arg1s;
3703
          break;
3704
 
3705
        case MULT:
3706
          val = arg0s * arg1s;
3707
          break;
3708
 
3709
        case DIV:
3710
          if (arg1s == 0
3711
              || ((unsigned HOST_WIDE_INT) arg0s
3712
                  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3713
                  && arg1s == -1))
3714
            return 0;
3715
          val = arg0s / arg1s;
3716
          break;
3717
 
3718
        case MOD:
3719
          if (arg1s == 0
3720
              || ((unsigned HOST_WIDE_INT) arg0s
3721
                  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3722
                  && arg1s == -1))
3723
            return 0;
3724
          val = arg0s % arg1s;
3725
          break;
3726
 
3727
        case UDIV:
3728
          if (arg1 == 0
3729
              || ((unsigned HOST_WIDE_INT) arg0s
3730
                  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3731
                  && arg1s == -1))
3732
            return 0;
3733
          val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3734
          break;
3735
 
3736
        case UMOD:
3737
          if (arg1 == 0
3738
              || ((unsigned HOST_WIDE_INT) arg0s
3739
                  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3740
                  && arg1s == -1))
3741
            return 0;
3742
          val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3743
          break;
3744
 
3745
        case AND:
3746
          val = arg0 & arg1;
3747
          break;
3748
 
3749
        case IOR:
3750
          val = arg0 | arg1;
3751
          break;
3752
 
3753
        case XOR:
3754
          val = arg0 ^ arg1;
3755
          break;
3756
 
3757
        case LSHIFTRT:
3758
        case ASHIFT:
3759
        case ASHIFTRT:
3760
          /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3761
             the value is in range.  We can't return any old value for
3762
             out-of-range arguments because either the middle-end (via
3763
             shift_truncation_mask) or the back-end might be relying on
3764
             target-specific knowledge.  Nor can we rely on
3765
             shift_truncation_mask, since the shift might not be part of an
3766
             ashlM3, lshrM3 or ashrM3 instruction.  */
3767
          if (SHIFT_COUNT_TRUNCATED)
3768
            arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3769
          else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3770
            return 0;
3771
 
3772
          val = (code == ASHIFT
3773
                 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3774
                 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3775
 
3776
          /* Sign-extend the result for arithmetic right shifts.  */
3777
          if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3778
            val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3779
          break;
3780
 
3781
        case ROTATERT:
3782
          if (arg1 < 0)
3783
            return 0;
3784
 
3785
          arg1 %= width;
3786
          val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3787
                 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3788
          break;
3789
 
3790
        case ROTATE:
3791
          if (arg1 < 0)
3792
            return 0;
3793
 
3794
          arg1 %= width;
3795
          val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3796
                 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3797
          break;
3798
 
3799
        case COMPARE:
3800
          /* Do nothing here.  */
3801
          return 0;
3802
 
3803
        case SMIN:
3804
          val = arg0s <= arg1s ? arg0s : arg1s;
3805
          break;
3806
 
3807
        case UMIN:
3808
          val = ((unsigned HOST_WIDE_INT) arg0
3809
                 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3810
          break;
3811
 
3812
        case SMAX:
3813
          val = arg0s > arg1s ? arg0s : arg1s;
3814
          break;
3815
 
3816
        case UMAX:
3817
          val = ((unsigned HOST_WIDE_INT) arg0
3818
                 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3819
          break;
3820
 
3821
        case SS_PLUS:
3822
        case US_PLUS:
3823
        case SS_MINUS:
3824
        case US_MINUS:
3825
        case SS_MULT:
3826
        case US_MULT:
3827
        case SS_DIV:
3828
        case US_DIV:
3829
        case SS_ASHIFT:
3830
        case US_ASHIFT:
3831
          /* ??? There are simplifications that can be done.  */
3832
          return 0;
3833
 
3834
        default:
3835
          gcc_unreachable ();
3836
        }
3837
 
3838
      return gen_int_mode (val, mode);
3839
    }
3840
 
3841
  return NULL_RTX;
3842
}
3843
 
3844
 
3845
 
3846
/* Simplify a PLUS or MINUS, at least one of whose operands may be another
3847
   PLUS or MINUS.
3848
 
3849
   Rather than test for specific case, we do this by a brute-force method
3850
   and do all possible simplifications until no more changes occur.  Then
3851
   we rebuild the operation.  */
3852
 
3853
struct simplify_plus_minus_op_data
3854
{
3855
  rtx op;
3856
  short neg;
3857
};
3858
 
3859
static bool
3860
simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3861
{
3862
  int result;
3863
 
3864
  result = (commutative_operand_precedence (y)
3865
            - commutative_operand_precedence (x));
3866
  if (result)
3867
    return result > 0;
3868
 
3869
  /* Group together equal REGs to do more simplification.  */
3870
  if (REG_P (x) && REG_P (y))
3871
    return REGNO (x) > REGNO (y);
3872
  else
3873
    return false;
3874
}
3875
 
3876
static rtx
3877
simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3878
                     rtx op1)
3879
{
3880
  struct simplify_plus_minus_op_data ops[8];
3881
  rtx result, tem;
3882
  int n_ops = 2, input_ops = 2;
3883
  int changed, n_constants = 0, canonicalized = 0;
3884
  int i, j;
3885
 
3886
  memset (ops, 0, sizeof ops);
3887
 
3888
  /* Set up the two operands and then expand them until nothing has been
3889
     changed.  If we run out of room in our array, give up; this should
3890
     almost never happen.  */
3891
 
3892
  ops[0].op = op0;
3893
  ops[0].neg = 0;
3894
  ops[1].op = op1;
3895
  ops[1].neg = (code == MINUS);
3896
 
3897
  do
3898
    {
3899
      changed = 0;
3900
 
3901
      for (i = 0; i < n_ops; i++)
3902
        {
3903
          rtx this_op = ops[i].op;
3904
          int this_neg = ops[i].neg;
3905
          enum rtx_code this_code = GET_CODE (this_op);
3906
 
3907
          switch (this_code)
3908
            {
3909
            case PLUS:
3910
            case MINUS:
3911
              if (n_ops == 7)
3912
                return NULL_RTX;
3913
 
3914
              ops[n_ops].op = XEXP (this_op, 1);
3915
              ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3916
              n_ops++;
3917
 
3918
              ops[i].op = XEXP (this_op, 0);
3919
              input_ops++;
3920
              changed = 1;
3921
              canonicalized |= this_neg;
3922
              break;
3923
 
3924
            case NEG:
3925
              ops[i].op = XEXP (this_op, 0);
3926
              ops[i].neg = ! this_neg;
3927
              changed = 1;
3928
              canonicalized = 1;
3929
              break;
3930
 
3931
            case CONST:
3932
              if (n_ops < 7
3933
                  && GET_CODE (XEXP (this_op, 0)) == PLUS
3934
                  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3935
                  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3936
                {
3937
                  ops[i].op = XEXP (XEXP (this_op, 0), 0);
3938
                  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3939
                  ops[n_ops].neg = this_neg;
3940
                  n_ops++;
3941
                  changed = 1;
3942
                  canonicalized = 1;
3943
                }
3944
              break;
3945
 
3946
            case NOT:
3947
              /* ~a -> (-a - 1) */
3948
              if (n_ops != 7)
3949
                {
3950
                  ops[n_ops].op = CONSTM1_RTX (mode);
3951
                  ops[n_ops++].neg = this_neg;
3952
                  ops[i].op = XEXP (this_op, 0);
3953
                  ops[i].neg = !this_neg;
3954
                  changed = 1;
3955
                  canonicalized = 1;
3956
                }
3957
              break;
3958
 
3959
            case CONST_INT:
3960
              n_constants++;
3961
              if (this_neg)
3962
                {
3963
                  ops[i].op = neg_const_int (mode, this_op);
3964
                  ops[i].neg = 0;
3965
                  changed = 1;
3966
                  canonicalized = 1;
3967
                }
3968
              break;
3969
 
3970
            default:
3971
              break;
3972
            }
3973
        }
3974
    }
3975
  while (changed);
3976
 
3977
  if (n_constants > 1)
3978
    canonicalized = 1;
3979
 
3980
  gcc_assert (n_ops >= 2);
3981
 
3982
  /* If we only have two operands, we can avoid the loops.  */
3983
  if (n_ops == 2)
3984
    {
3985
      enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3986
      rtx lhs, rhs;
3987
 
3988
      /* Get the two operands.  Be careful with the order, especially for
3989
         the cases where code == MINUS.  */
3990
      if (ops[0].neg && ops[1].neg)
3991
        {
3992
          lhs = gen_rtx_NEG (mode, ops[0].op);
3993
          rhs = ops[1].op;
3994
        }
3995
      else if (ops[0].neg)
3996
        {
3997
          lhs = ops[1].op;
3998
          rhs = ops[0].op;
3999
        }
4000
      else
4001
        {
4002
          lhs = ops[0].op;
4003
          rhs = ops[1].op;
4004
        }
4005
 
4006
      return simplify_const_binary_operation (code, mode, lhs, rhs);
4007
    }
4008
 
4009
  /* Now simplify each pair of operands until nothing changes.  */
4010
  do
4011
    {
4012
      /* Insertion sort is good enough for an eight-element array.  */
4013
      for (i = 1; i < n_ops; i++)
4014
        {
4015
          struct simplify_plus_minus_op_data save;
4016
          j = i - 1;
4017
          if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4018
            continue;
4019
 
4020
          canonicalized = 1;
4021
          save = ops[i];
4022
          do
4023
            ops[j + 1] = ops[j];
4024
          while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4025
          ops[j + 1] = save;
4026
        }
4027
 
4028
      changed = 0;
4029
      for (i = n_ops - 1; i > 0; i--)
4030
        for (j = i - 1; j >= 0; j--)
4031
          {
4032
            rtx lhs = ops[j].op, rhs = ops[i].op;
4033
            int lneg = ops[j].neg, rneg = ops[i].neg;
4034
 
4035
            if (lhs != 0 && rhs != 0)
4036
              {
4037
                enum rtx_code ncode = PLUS;
4038
 
4039
                if (lneg != rneg)
4040
                  {
4041
                    ncode = MINUS;
4042
                    if (lneg)
4043
                      tem = lhs, lhs = rhs, rhs = tem;
4044
                  }
4045
                else if (swap_commutative_operands_p (lhs, rhs))
4046
                  tem = lhs, lhs = rhs, rhs = tem;
4047
 
4048
                if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4049
                    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4050
                  {
4051
                    rtx tem_lhs, tem_rhs;
4052
 
4053
                    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4054
                    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4055
                    tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4056
 
4057
                    if (tem && !CONSTANT_P (tem))
4058
                      tem = gen_rtx_CONST (GET_MODE (tem), tem);
4059
                  }
4060
                else
4061
                  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4062
 
4063
                /* Reject "simplifications" that just wrap the two
4064
                   arguments in a CONST.  Failure to do so can result
4065
                   in infinite recursion with simplify_binary_operation
4066
                   when it calls us to simplify CONST operations.  */
4067
                if (tem
4068
                    && ! (GET_CODE (tem) == CONST
4069
                          && GET_CODE (XEXP (tem, 0)) == ncode
4070
                          && XEXP (XEXP (tem, 0), 0) == lhs
4071
                          && XEXP (XEXP (tem, 0), 1) == rhs))
4072
                  {
4073
                    lneg &= rneg;
4074
                    if (GET_CODE (tem) == NEG)
4075
                      tem = XEXP (tem, 0), lneg = !lneg;
4076
                    if (CONST_INT_P (tem) && lneg)
4077
                      tem = neg_const_int (mode, tem), lneg = 0;
4078
 
4079
                    ops[i].op = tem;
4080
                    ops[i].neg = lneg;
4081
                    ops[j].op = NULL_RTX;
4082
                    changed = 1;
4083
                    canonicalized = 1;
4084
                  }
4085
              }
4086
          }
4087
 
4088
      /* If nothing changed, fail.  */
4089
      if (!canonicalized)
4090
        return NULL_RTX;
4091
 
4092
      /* Pack all the operands to the lower-numbered entries.  */
4093
      for (i = 0, j = 0; j < n_ops; j++)
4094
        if (ops[j].op)
4095
          {
4096
            ops[i] = ops[j];
4097
            i++;
4098
          }
4099
      n_ops = i;
4100
    }
4101
  while (changed);
4102
 
4103
  /* Create (minus -C X) instead of (neg (const (plus X C))).  */
4104
  if (n_ops == 2
4105
      && CONST_INT_P (ops[1].op)
4106
      && CONSTANT_P (ops[0].op)
4107
      && ops[0].neg)
4108
    return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4109
 
4110
  /* We suppressed creation of trivial CONST expressions in the
4111
     combination loop to avoid recursion.  Create one manually now.
4112
     The combination loop should have ensured that there is exactly
4113
     one CONST_INT, and the sort will have ensured that it is last
4114
     in the array and that any other constant will be next-to-last.  */
4115
 
4116
  if (n_ops > 1
4117
      && CONST_INT_P (ops[n_ops - 1].op)
4118
      && CONSTANT_P (ops[n_ops - 2].op))
4119
    {
4120
      rtx value = ops[n_ops - 1].op;
4121
      if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4122
        value = neg_const_int (mode, value);
4123
      ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
4124
      n_ops--;
4125
    }
4126
 
4127
  /* Put a non-negated operand first, if possible.  */
4128
 
4129
  for (i = 0; i < n_ops && ops[i].neg; i++)
4130
    continue;
4131
  if (i == n_ops)
4132
    ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4133
  else if (i != 0)
4134
    {
4135
      tem = ops[0].op;
4136
      ops[0] = ops[i];
4137
      ops[i].op = tem;
4138
      ops[i].neg = 1;
4139
    }
4140
 
4141
  /* Now make the result by performing the requested operations.  */
4142
  result = ops[0].op;
4143
  for (i = 1; i < n_ops; i++)
4144
    result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4145
                             mode, result, ops[i].op);
4146
 
4147
  return result;
4148
}
4149
 
4150
/* Check whether an operand is suitable for calling simplify_plus_minus.  */
4151
static bool
4152
plus_minus_operand_p (const_rtx x)
4153
{
4154
  return GET_CODE (x) == PLUS
4155
         || GET_CODE (x) == MINUS
4156
         || (GET_CODE (x) == CONST
4157
             && GET_CODE (XEXP (x, 0)) == PLUS
4158
             && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4159
             && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4160
}
4161
 
4162
/* Like simplify_binary_operation except used for relational operators.
4163
   MODE is the mode of the result. If MODE is VOIDmode, both operands must
4164
   not also be VOIDmode.
4165
 
4166
   CMP_MODE specifies in which mode the comparison is done in, so it is
4167
   the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
4168
   the operands or, if both are VOIDmode, the operands are compared in
4169
   "infinite precision".  */
4170
rtx
4171
simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4172
                               enum machine_mode cmp_mode, rtx op0, rtx op1)
4173
{
4174
  rtx tem, trueop0, trueop1;
4175
 
4176
  if (cmp_mode == VOIDmode)
4177
    cmp_mode = GET_MODE (op0);
4178
  if (cmp_mode == VOIDmode)
4179
    cmp_mode = GET_MODE (op1);
4180
 
4181
  tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4182
  if (tem)
4183
    {
4184
      if (SCALAR_FLOAT_MODE_P (mode))
4185
        {
4186
          if (tem == const0_rtx)
4187
            return CONST0_RTX (mode);
4188
#ifdef FLOAT_STORE_FLAG_VALUE
4189
          {
4190
            REAL_VALUE_TYPE val;
4191
            val = FLOAT_STORE_FLAG_VALUE (mode);
4192
            return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4193
          }
4194
#else
4195
          return NULL_RTX;
4196
#endif
4197
        }
4198
      if (VECTOR_MODE_P (mode))
4199
        {
4200
          if (tem == const0_rtx)
4201
            return CONST0_RTX (mode);
4202
#ifdef VECTOR_STORE_FLAG_VALUE
4203
          {
4204
            int i, units;
4205
            rtvec v;
4206
 
4207
            rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4208
            if (val == NULL_RTX)
4209
              return NULL_RTX;
4210
            if (val == const1_rtx)
4211
              return CONST1_RTX (mode);
4212
 
4213
            units = GET_MODE_NUNITS (mode);
4214
            v = rtvec_alloc (units);
4215
            for (i = 0; i < units; i++)
4216
              RTVEC_ELT (v, i) = val;
4217
            return gen_rtx_raw_CONST_VECTOR (mode, v);
4218
          }
4219
#else
4220
          return NULL_RTX;
4221
#endif
4222
        }
4223
 
4224
      return tem;
4225
    }
4226
 
4227
  /* For the following tests, ensure const0_rtx is op1.  */
4228
  if (swap_commutative_operands_p (op0, op1)
4229
      || (op0 == const0_rtx && op1 != const0_rtx))
4230
    tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4231
 
4232
  /* If op0 is a compare, extract the comparison arguments from it.  */
4233
  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4234
    return simplify_gen_relational (code, mode, VOIDmode,
4235
                                    XEXP (op0, 0), XEXP (op0, 1));
4236
 
4237
  if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4238
      || CC0_P (op0))
4239
    return NULL_RTX;
4240
 
4241
  trueop0 = avoid_constant_pool_reference (op0);
4242
  trueop1 = avoid_constant_pool_reference (op1);
4243
  return simplify_relational_operation_1 (code, mode, cmp_mode,
4244
                                          trueop0, trueop1);
4245
}
4246
 
4247
/* This part of simplify_relational_operation is only used when CMP_MODE
4248
   is not in class MODE_CC (i.e. it is a real comparison).
4249
 
4250
   MODE is the mode of the result, while CMP_MODE specifies in which
4251
   mode the comparison is done in, so it is the mode of the operands.  */
4252
 
4253
static rtx
4254
simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4255
                                 enum machine_mode cmp_mode, rtx op0, rtx op1)
4256
{
4257
  enum rtx_code op0code = GET_CODE (op0);
4258
 
4259
  if (op1 == const0_rtx && COMPARISON_P (op0))
4260
    {
4261
      /* If op0 is a comparison, extract the comparison arguments
4262
         from it.  */
4263
      if (code == NE)
4264
        {
4265
          if (GET_MODE (op0) == mode)
4266
            return simplify_rtx (op0);
4267
          else
4268
            return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4269
                                            XEXP (op0, 0), XEXP (op0, 1));
4270
        }
4271
      else if (code == EQ)
4272
        {
4273
          enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4274
          if (new_code != UNKNOWN)
4275
            return simplify_gen_relational (new_code, mode, VOIDmode,
4276
                                            XEXP (op0, 0), XEXP (op0, 1));
4277
        }
4278
    }
4279
 
4280
  /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4281
     (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
4282
  if ((code == LTU || code == GEU)
4283
      && GET_CODE (op0) == PLUS
4284
      && CONST_INT_P (XEXP (op0, 1))
4285
      && (rtx_equal_p (op1, XEXP (op0, 0))
4286
          || rtx_equal_p (op1, XEXP (op0, 1))))
4287
    {
4288
      rtx new_cmp
4289
        = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4290
      return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4291
                                      cmp_mode, XEXP (op0, 0), new_cmp);
4292
    }
4293
 
4294
  /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
4295
  if ((code == LTU || code == GEU)
4296
      && GET_CODE (op0) == PLUS
4297
      && rtx_equal_p (op1, XEXP (op0, 1))
4298
      /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
4299
      && !rtx_equal_p (op1, XEXP (op0, 0)))
4300
    return simplify_gen_relational (code, mode, cmp_mode, op0,
4301
                                    copy_rtx (XEXP (op0, 0)));
4302
 
4303
  if (op1 == const0_rtx)
4304
    {
4305
      /* Canonicalize (GTU x 0) as (NE x 0).  */
4306
      if (code == GTU)
4307
        return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4308
      /* Canonicalize (LEU x 0) as (EQ x 0).  */
4309
      if (code == LEU)
4310
        return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4311
    }
4312
  else if (op1 == const1_rtx)
4313
    {
4314
      switch (code)
4315
        {
4316
        case GE:
4317
          /* Canonicalize (GE x 1) as (GT x 0).  */
4318
          return simplify_gen_relational (GT, mode, cmp_mode,
4319
                                          op0, const0_rtx);
4320
        case GEU:
4321
          /* Canonicalize (GEU x 1) as (NE x 0).  */
4322
          return simplify_gen_relational (NE, mode, cmp_mode,
4323
                                          op0, const0_rtx);
4324
        case LT:
4325
          /* Canonicalize (LT x 1) as (LE x 0).  */
4326
          return simplify_gen_relational (LE, mode, cmp_mode,
4327
                                          op0, const0_rtx);
4328
        case LTU:
4329
          /* Canonicalize (LTU x 1) as (EQ x 0).  */
4330
          return simplify_gen_relational (EQ, mode, cmp_mode,
4331
                                          op0, const0_rtx);
4332
        default:
4333
          break;
4334
        }
4335
    }
4336
  else if (op1 == constm1_rtx)
4337
    {
4338
      /* Canonicalize (LE x -1) as (LT x 0).  */
4339
      if (code == LE)
4340
        return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4341
      /* Canonicalize (GT x -1) as (GE x 0).  */
4342
      if (code == GT)
4343
        return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4344
    }
4345
 
4346
  /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
4347
  if ((code == EQ || code == NE)
4348
      && (op0code == PLUS || op0code == MINUS)
4349
      && CONSTANT_P (op1)
4350
      && CONSTANT_P (XEXP (op0, 1))
4351
      && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4352
    {
4353
      rtx x = XEXP (op0, 0);
4354
      rtx c = XEXP (op0, 1);
4355
      enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4356
      rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4357
 
4358
      /* Detect an infinite recursive condition, where we oscillate at this
4359
         simplification case between:
4360
            A + B == C  <--->  C - B == A,
4361
         where A, B, and C are all constants with non-simplifiable expressions,
4362
         usually SYMBOL_REFs.  */
4363
      if (GET_CODE (tem) == invcode
4364
          && CONSTANT_P (x)
4365
          && rtx_equal_p (c, XEXP (tem, 1)))
4366
        return NULL_RTX;
4367
 
4368
      return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4369
    }
4370
 
4371
  /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4372
     the same as (zero_extract:SI FOO (const_int 1) BAR).  */
4373
  if (code == NE
4374
      && op1 == const0_rtx
4375
      && GET_MODE_CLASS (mode) == MODE_INT
4376
      && cmp_mode != VOIDmode
4377
      /* ??? Work-around BImode bugs in the ia64 backend.  */
4378
      && mode != BImode
4379
      && cmp_mode != BImode
4380
      && nonzero_bits (op0, cmp_mode) == 1
4381
      && STORE_FLAG_VALUE == 1)
4382
    return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4383
           ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4384
           : lowpart_subreg (mode, op0, cmp_mode);
4385
 
4386
  /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
4387
  if ((code == EQ || code == NE)
4388
      && op1 == const0_rtx
4389
      && op0code == XOR)
4390
    return simplify_gen_relational (code, mode, cmp_mode,
4391
                                    XEXP (op0, 0), XEXP (op0, 1));
4392
 
4393
  /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
4394
  if ((code == EQ || code == NE)
4395
      && op0code == XOR
4396
      && rtx_equal_p (XEXP (op0, 0), op1)
4397
      && !side_effects_p (XEXP (op0, 0)))
4398
    return simplify_gen_relational (code, mode, cmp_mode,
4399
                                    XEXP (op0, 1), const0_rtx);
4400
 
4401
  /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
4402
  if ((code == EQ || code == NE)
4403
      && op0code == XOR
4404
      && rtx_equal_p (XEXP (op0, 1), op1)
4405
      && !side_effects_p (XEXP (op0, 1)))
4406
    return simplify_gen_relational (code, mode, cmp_mode,
4407
                                    XEXP (op0, 0), const0_rtx);
4408
 
4409
  /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
4410
  if ((code == EQ || code == NE)
4411
      && op0code == XOR
4412
      && (CONST_INT_P (op1)
4413
          || GET_CODE (op1) == CONST_DOUBLE)
4414
      && (CONST_INT_P (XEXP (op0, 1))
4415
          || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4416
    return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4417
                                    simplify_gen_binary (XOR, cmp_mode,
4418
                                                         XEXP (op0, 1), op1));
4419
 
4420
  if (op0code == POPCOUNT && op1 == const0_rtx)
4421
    switch (code)
4422
      {
4423
      case EQ:
4424
      case LE:
4425
      case LEU:
4426
        /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
4427
        return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4428
                                        XEXP (op0, 0), const0_rtx);
4429
 
4430
      case NE:
4431
      case GT:
4432
      case GTU:
4433
        /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
4434
        return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4435
                                        XEXP (op0, 0), const0_rtx);
4436
 
4437
      default:
4438
        break;
4439
      }
4440
 
4441
  return NULL_RTX;
4442
}
4443
 
4444
enum
4445
{
4446
  CMP_EQ = 1,
4447
  CMP_LT = 2,
4448
  CMP_GT = 4,
4449
  CMP_LTU = 8,
4450
  CMP_GTU = 16
4451
};
4452
 
4453
 
4454
/* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4455
   KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4456
   For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4457
   logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4458
   For floating-point comparisons, assume that the operands were ordered.  */
4459
 
4460
static rtx
4461
comparison_result (enum rtx_code code, int known_results)
4462
{
4463
  switch (code)
4464
    {
4465
    case EQ:
4466
    case UNEQ:
4467
      return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4468
    case NE:
4469
    case LTGT:
4470
      return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4471
 
4472
    case LT:
4473
    case UNLT:
4474
      return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4475
    case GE:
4476
    case UNGE:
4477
      return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4478
 
4479
    case GT:
4480
    case UNGT:
4481
      return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4482
    case LE:
4483
    case UNLE:
4484
      return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4485
 
4486
    case LTU:
4487
      return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4488
    case GEU:
4489
      return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4490
 
4491
    case GTU:
4492
      return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4493
    case LEU:
4494
      return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4495
 
4496
    case ORDERED:
4497
      return const_true_rtx;
4498
    case UNORDERED:
4499
      return const0_rtx;
4500
    default:
4501
      gcc_unreachable ();
4502
    }
4503
}
4504
 
4505
/* Check if the given comparison (done in the given MODE) is actually a
4506
   tautology or a contradiction.
4507
   If no simplification is possible, this function returns zero.
4508
   Otherwise, it returns either const_true_rtx or const0_rtx.  */
4509
 
4510
rtx
4511
simplify_const_relational_operation (enum rtx_code code,
4512
                                     enum machine_mode mode,
4513
                                     rtx op0, rtx op1)
4514
{
4515
  rtx tem;
4516
  rtx trueop0;
4517
  rtx trueop1;
4518
 
4519
  gcc_assert (mode != VOIDmode
4520
              || (GET_MODE (op0) == VOIDmode
4521
                  && GET_MODE (op1) == VOIDmode));
4522
 
4523
  /* If op0 is a compare, extract the comparison arguments from it.  */
4524
  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4525
    {
4526
      op1 = XEXP (op0, 1);
4527
      op0 = XEXP (op0, 0);
4528
 
4529
      if (GET_MODE (op0) != VOIDmode)
4530
        mode = GET_MODE (op0);
4531
      else if (GET_MODE (op1) != VOIDmode)
4532
        mode = GET_MODE (op1);
4533
      else
4534
        return 0;
4535
    }
4536
 
4537
  /* We can't simplify MODE_CC values since we don't know what the
4538
     actual comparison is.  */
4539
  if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4540
    return 0;
4541
 
4542
  /* Make sure the constant is second.  */
4543
  if (swap_commutative_operands_p (op0, op1))
4544
    {
4545
      tem = op0, op0 = op1, op1 = tem;
4546
      code = swap_condition (code);
4547
    }
4548
 
4549
  trueop0 = avoid_constant_pool_reference (op0);
4550
  trueop1 = avoid_constant_pool_reference (op1);
4551
 
4552
  /* For integer comparisons of A and B maybe we can simplify A - B and can
4553
     then simplify a comparison of that with zero.  If A and B are both either
4554
     a register or a CONST_INT, this can't help; testing for these cases will
4555
     prevent infinite recursion here and speed things up.
4556
 
4557
     We can only do this for EQ and NE comparisons as otherwise we may
4558
     lose or introduce overflow which we cannot disregard as undefined as
4559
     we do not know the signedness of the operation on either the left or
4560
     the right hand side of the comparison.  */
4561
 
4562
  if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4563
      && (code == EQ || code == NE)
4564
      && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4565
            && (REG_P (op1) || CONST_INT_P (trueop1)))
4566
      && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4567
      /* We cannot do this if tem is a nonzero address.  */
4568
      && ! nonzero_address_p (tem))
4569
    return simplify_const_relational_operation (signed_condition (code),
4570
                                                mode, tem, const0_rtx);
4571
 
4572
  if (! HONOR_NANS (mode) && code == ORDERED)
4573
    return const_true_rtx;
4574
 
4575
  if (! HONOR_NANS (mode) && code == UNORDERED)
4576
    return const0_rtx;
4577
 
4578
  /* For modes without NaNs, if the two operands are equal, we know the
4579
     result except if they have side-effects.  Even with NaNs we know
4580
     the result of unordered comparisons and, if signaling NaNs are
4581
     irrelevant, also the result of LT/GT/LTGT.  */
4582
  if ((! HONOR_NANS (GET_MODE (trueop0))
4583
       || code == UNEQ || code == UNLE || code == UNGE
4584
       || ((code == LT || code == GT || code == LTGT)
4585
           && ! HONOR_SNANS (GET_MODE (trueop0))))
4586
      && rtx_equal_p (trueop0, trueop1)
4587
      && ! side_effects_p (trueop0))
4588
    return comparison_result (code, CMP_EQ);
4589
 
4590
  /* If the operands are floating-point constants, see if we can fold
4591
     the result.  */
4592
  if (GET_CODE (trueop0) == CONST_DOUBLE
4593
      && GET_CODE (trueop1) == CONST_DOUBLE
4594
      && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4595
    {
4596
      REAL_VALUE_TYPE d0, d1;
4597
 
4598
      REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4599
      REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4600
 
4601
      /* Comparisons are unordered iff at least one of the values is NaN.  */
4602
      if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4603
        switch (code)
4604
          {
4605
          case UNEQ:
4606
          case UNLT:
4607
          case UNGT:
4608
          case UNLE:
4609
          case UNGE:
4610
          case NE:
4611
          case UNORDERED:
4612
            return const_true_rtx;
4613
          case EQ:
4614
          case LT:
4615
          case GT:
4616
          case LE:
4617
          case GE:
4618
          case LTGT:
4619
          case ORDERED:
4620
            return const0_rtx;
4621
          default:
4622
            return 0;
4623
          }
4624
 
4625
      return comparison_result (code,
4626
                                (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4627
                                 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4628
    }
4629
 
4630
  /* Otherwise, see if the operands are both integers.  */
4631
  if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4632
       && (GET_CODE (trueop0) == CONST_DOUBLE
4633
           || CONST_INT_P (trueop0))
4634
       && (GET_CODE (trueop1) == CONST_DOUBLE
4635
           || CONST_INT_P (trueop1)))
4636
    {
4637
      int width = GET_MODE_PRECISION (mode);
4638
      HOST_WIDE_INT l0s, h0s, l1s, h1s;
4639
      unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4640
 
4641
      /* Get the two words comprising each integer constant.  */
4642
      if (GET_CODE (trueop0) == CONST_DOUBLE)
4643
        {
4644
          l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4645
          h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4646
        }
4647
      else
4648
        {
4649
          l0u = l0s = INTVAL (trueop0);
4650
          h0u = h0s = HWI_SIGN_EXTEND (l0s);
4651
        }
4652
 
4653
      if (GET_CODE (trueop1) == CONST_DOUBLE)
4654
        {
4655
          l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4656
          h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4657
        }
4658
      else
4659
        {
4660
          l1u = l1s = INTVAL (trueop1);
4661
          h1u = h1s = HWI_SIGN_EXTEND (l1s);
4662
        }
4663
 
4664
      /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4665
         we have to sign or zero-extend the values.  */
4666
      if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4667
        {
4668
          l0u &= GET_MODE_MASK (mode);
4669
          l1u &= GET_MODE_MASK (mode);
4670
 
4671
          if (val_signbit_known_set_p (mode, l0s))
4672
            l0s |= ~GET_MODE_MASK (mode);
4673
 
4674
          if (val_signbit_known_set_p (mode, l1s))
4675
            l1s |= ~GET_MODE_MASK (mode);
4676
        }
4677
      if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4678
        h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4679
 
4680
      if (h0u == h1u && l0u == l1u)
4681
        return comparison_result (code, CMP_EQ);
4682
      else
4683
        {
4684
          int cr;
4685
          cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4686
          cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4687
          return comparison_result (code, cr);
4688
        }
4689
    }
4690
 
4691
  /* Optimize comparisons with upper and lower bounds.  */
4692
  if (HWI_COMPUTABLE_MODE_P (mode)
4693
      && CONST_INT_P (trueop1))
4694
    {
4695
      int sign;
4696
      unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4697
      HOST_WIDE_INT val = INTVAL (trueop1);
4698
      HOST_WIDE_INT mmin, mmax;
4699
 
4700
      if (code == GEU
4701
          || code == LEU
4702
          || code == GTU
4703
          || code == LTU)
4704
        sign = 0;
4705
      else
4706
        sign = 1;
4707
 
4708
      /* Get a reduced range if the sign bit is zero.  */
4709
      if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4710
        {
4711
          mmin = 0;
4712
          mmax = nonzero;
4713
        }
4714
      else
4715
        {
4716
          rtx mmin_rtx, mmax_rtx;
4717
          get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4718
 
4719
          mmin = INTVAL (mmin_rtx);
4720
          mmax = INTVAL (mmax_rtx);
4721
          if (sign)
4722
            {
4723
              unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4724
 
4725
              mmin >>= (sign_copies - 1);
4726
              mmax >>= (sign_copies - 1);
4727
            }
4728
        }
4729
 
4730
      switch (code)
4731
        {
4732
        /* x >= y is always true for y <= mmin, always false for y > mmax.  */
4733
        case GEU:
4734
          if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4735
            return const_true_rtx;
4736
          if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4737
            return const0_rtx;
4738
          break;
4739
        case GE:
4740
          if (val <= mmin)
4741
            return const_true_rtx;
4742
          if (val > mmax)
4743
            return const0_rtx;
4744
          break;
4745
 
4746
        /* x <= y is always true for y >= mmax, always false for y < mmin.  */
4747
        case LEU:
4748
          if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4749
            return const_true_rtx;
4750
          if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4751
            return const0_rtx;
4752
          break;
4753
        case LE:
4754
          if (val >= mmax)
4755
            return const_true_rtx;
4756
          if (val < mmin)
4757
            return const0_rtx;
4758
          break;
4759
 
4760
        case EQ:
4761
          /* x == y is always false for y out of range.  */
4762
          if (val < mmin || val > mmax)
4763
            return const0_rtx;
4764
          break;
4765
 
4766
        /* x > y is always false for y >= mmax, always true for y < mmin.  */
4767
        case GTU:
4768
          if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4769
            return const0_rtx;
4770
          if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4771
            return const_true_rtx;
4772
          break;
4773
        case GT:
4774
          if (val >= mmax)
4775
            return const0_rtx;
4776
          if (val < mmin)
4777
            return const_true_rtx;
4778
          break;
4779
 
4780
        /* x < y is always false for y <= mmin, always true for y > mmax.  */
4781
        case LTU:
4782
          if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4783
            return const0_rtx;
4784
          if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4785
            return const_true_rtx;
4786
          break;
4787
        case LT:
4788
          if (val <= mmin)
4789
            return const0_rtx;
4790
          if (val > mmax)
4791
            return const_true_rtx;
4792
          break;
4793
 
4794
        case NE:
4795
          /* x != y is always true for y out of range.  */
4796
          if (val < mmin || val > mmax)
4797
            return const_true_rtx;
4798
          break;
4799
 
4800
        default:
4801
          break;
4802
        }
4803
    }
4804
 
4805
  /* Optimize integer comparisons with zero.  */
4806
  if (trueop1 == const0_rtx)
4807
    {
4808
      /* Some addresses are known to be nonzero.  We don't know
4809
         their sign, but equality comparisons are known.  */
4810
      if (nonzero_address_p (trueop0))
4811
        {
4812
          if (code == EQ || code == LEU)
4813
            return const0_rtx;
4814
          if (code == NE || code == GTU)
4815
            return const_true_rtx;
4816
        }
4817
 
4818
      /* See if the first operand is an IOR with a constant.  If so, we
4819
         may be able to determine the result of this comparison.  */
4820
      if (GET_CODE (op0) == IOR)
4821
        {
4822
          rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4823
          if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4824
            {
4825
              int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4826
              int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4827
                              && (UINTVAL (inner_const)
4828
                                  & ((unsigned HOST_WIDE_INT) 1
4829
                                     << sign_bitnum)));
4830
 
4831
              switch (code)
4832
                {
4833
                case EQ:
4834
                case LEU:
4835
                  return const0_rtx;
4836
                case NE:
4837
                case GTU:
4838
                  return const_true_rtx;
4839
                case LT:
4840
                case LE:
4841
                  if (has_sign)
4842
                    return const_true_rtx;
4843
                  break;
4844
                case GT:
4845
                case GE:
4846
                  if (has_sign)
4847
                    return const0_rtx;
4848
                  break;
4849
                default:
4850
                  break;
4851
                }
4852
            }
4853
        }
4854
    }
4855
 
4856
  /* Optimize comparison of ABS with zero.  */
4857
  if (trueop1 == CONST0_RTX (mode)
4858
      && (GET_CODE (trueop0) == ABS
4859
          || (GET_CODE (trueop0) == FLOAT_EXTEND
4860
              && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4861
    {
4862
      switch (code)
4863
        {
4864
        case LT:
4865
          /* Optimize abs(x) < 0.0.  */
4866
          if (!HONOR_SNANS (mode)
4867
              && (!INTEGRAL_MODE_P (mode)
4868
                  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4869
            {
4870
              if (INTEGRAL_MODE_P (mode)
4871
                  && (issue_strict_overflow_warning
4872
                      (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4873
                warning (OPT_Wstrict_overflow,
4874
                         ("assuming signed overflow does not occur when "
4875
                          "assuming abs (x) < 0 is false"));
4876
               return const0_rtx;
4877
            }
4878
          break;
4879
 
4880
        case GE:
4881
          /* Optimize abs(x) >= 0.0.  */
4882
          if (!HONOR_NANS (mode)
4883
              && (!INTEGRAL_MODE_P (mode)
4884
                  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4885
            {
4886
              if (INTEGRAL_MODE_P (mode)
4887
                  && (issue_strict_overflow_warning
4888
                  (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4889
                warning (OPT_Wstrict_overflow,
4890
                         ("assuming signed overflow does not occur when "
4891
                          "assuming abs (x) >= 0 is true"));
4892
              return const_true_rtx;
4893
            }
4894
          break;
4895
 
4896
        case UNGE:
4897
          /* Optimize ! (abs(x) < 0.0).  */
4898
          return const_true_rtx;
4899
 
4900
        default:
4901
          break;
4902
        }
4903
    }
4904
 
4905
  return 0;
4906
}
4907
 
4908
/* Simplify CODE, an operation with result mode MODE and three operands,
4909
   OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
4910
   a constant.  Return 0 if no simplifications is possible.  */
4911
 
4912
rtx
4913
simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4914
                            enum machine_mode op0_mode, rtx op0, rtx op1,
4915
                            rtx op2)
4916
{
4917
  unsigned int width = GET_MODE_PRECISION (mode);
4918
  bool any_change = false;
4919
  rtx tem;
4920
 
4921
  /* VOIDmode means "infinite" precision.  */
4922
  if (width == 0)
4923
    width = HOST_BITS_PER_WIDE_INT;
4924
 
4925
  switch (code)
4926
    {
4927
    case FMA:
4928
      /* Simplify negations around the multiplication.  */
4929
      /* -a * -b + c  =>  a * b + c.  */
4930
      if (GET_CODE (op0) == NEG)
4931
        {
4932
          tem = simplify_unary_operation (NEG, mode, op1, mode);
4933
          if (tem)
4934
            op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4935
        }
4936
      else if (GET_CODE (op1) == NEG)
4937
        {
4938
          tem = simplify_unary_operation (NEG, mode, op0, mode);
4939
          if (tem)
4940
            op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4941
        }
4942
 
4943
      /* Canonicalize the two multiplication operands.  */
4944
      /* a * -b + c  =>  -b * a + c.  */
4945
      if (swap_commutative_operands_p (op0, op1))
4946
        tem = op0, op0 = op1, op1 = tem, any_change = true;
4947
 
4948
      if (any_change)
4949
        return gen_rtx_FMA (mode, op0, op1, op2);
4950
      return NULL_RTX;
4951
 
4952
    case SIGN_EXTRACT:
4953
    case ZERO_EXTRACT:
4954
      if (CONST_INT_P (op0)
4955
          && CONST_INT_P (op1)
4956
          && CONST_INT_P (op2)
4957
          && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4958
          && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4959
        {
4960
          /* Extracting a bit-field from a constant */
4961
          unsigned HOST_WIDE_INT val = UINTVAL (op0);
4962
          HOST_WIDE_INT op1val = INTVAL (op1);
4963
          HOST_WIDE_INT op2val = INTVAL (op2);
4964
          if (BITS_BIG_ENDIAN)
4965
            val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4966
          else
4967
            val >>= op2val;
4968
 
4969
          if (HOST_BITS_PER_WIDE_INT != op1val)
4970
            {
4971
              /* First zero-extend.  */
4972
              val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
4973
              /* If desired, propagate sign bit.  */
4974
              if (code == SIGN_EXTRACT
4975
                  && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
4976
                     != 0)
4977
                val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
4978
            }
4979
 
4980
          return gen_int_mode (val, mode);
4981
        }
4982
      break;
4983
 
4984
    case IF_THEN_ELSE:
4985
      if (CONST_INT_P (op0))
4986
        return op0 != const0_rtx ? op1 : op2;
4987
 
4988
      /* Convert c ? a : a into "a".  */
4989
      if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4990
        return op1;
4991
 
4992
      /* Convert a != b ? a : b into "a".  */
4993
      if (GET_CODE (op0) == NE
4994
          && ! side_effects_p (op0)
4995
          && ! HONOR_NANS (mode)
4996
          && ! HONOR_SIGNED_ZEROS (mode)
4997
          && ((rtx_equal_p (XEXP (op0, 0), op1)
4998
               && rtx_equal_p (XEXP (op0, 1), op2))
4999
              || (rtx_equal_p (XEXP (op0, 0), op2)
5000
                  && rtx_equal_p (XEXP (op0, 1), op1))))
5001
        return op1;
5002
 
5003
      /* Convert a == b ? a : b into "b".  */
5004
      if (GET_CODE (op0) == EQ
5005
          && ! side_effects_p (op0)
5006
          && ! HONOR_NANS (mode)
5007
          && ! HONOR_SIGNED_ZEROS (mode)
5008
          && ((rtx_equal_p (XEXP (op0, 0), op1)
5009
               && rtx_equal_p (XEXP (op0, 1), op2))
5010
              || (rtx_equal_p (XEXP (op0, 0), op2)
5011
                  && rtx_equal_p (XEXP (op0, 1), op1))))
5012
        return op2;
5013
 
5014
      if (COMPARISON_P (op0) && ! side_effects_p (op0))
5015
        {
5016
          enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5017
                                        ? GET_MODE (XEXP (op0, 1))
5018
                                        : GET_MODE (XEXP (op0, 0)));
5019
          rtx temp;
5020
 
5021
          /* Look for happy constants in op1 and op2.  */
5022
          if (CONST_INT_P (op1) && CONST_INT_P (op2))
5023
            {
5024
              HOST_WIDE_INT t = INTVAL (op1);
5025
              HOST_WIDE_INT f = INTVAL (op2);
5026
 
5027
              if (t == STORE_FLAG_VALUE && f == 0)
5028
                code = GET_CODE (op0);
5029
              else if (t == 0 && f == STORE_FLAG_VALUE)
5030
                {
5031
                  enum rtx_code tmp;
5032
                  tmp = reversed_comparison_code (op0, NULL_RTX);
5033
                  if (tmp == UNKNOWN)
5034
                    break;
5035
                  code = tmp;
5036
                }
5037
              else
5038
                break;
5039
 
5040
              return simplify_gen_relational (code, mode, cmp_mode,
5041
                                              XEXP (op0, 0), XEXP (op0, 1));
5042
            }
5043
 
5044
          if (cmp_mode == VOIDmode)
5045
            cmp_mode = op0_mode;
5046
          temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5047
                                                cmp_mode, XEXP (op0, 0),
5048
                                                XEXP (op0, 1));
5049
 
5050
          /* See if any simplifications were possible.  */
5051
          if (temp)
5052
            {
5053
              if (CONST_INT_P (temp))
5054
                return temp == const0_rtx ? op2 : op1;
5055
              else if (temp)
5056
                return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5057
            }
5058
        }
5059
      break;
5060
 
5061
    case VEC_MERGE:
5062
      gcc_assert (GET_MODE (op0) == mode);
5063
      gcc_assert (GET_MODE (op1) == mode);
5064
      gcc_assert (VECTOR_MODE_P (mode));
5065
      op2 = avoid_constant_pool_reference (op2);
5066
      if (CONST_INT_P (op2))
5067
        {
5068
          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5069
          unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5070
          int mask = (1 << n_elts) - 1;
5071
 
5072
          if (!(INTVAL (op2) & mask))
5073
            return op1;
5074
          if ((INTVAL (op2) & mask) == mask)
5075
            return op0;
5076
 
5077
          op0 = avoid_constant_pool_reference (op0);
5078
          op1 = avoid_constant_pool_reference (op1);
5079
          if (GET_CODE (op0) == CONST_VECTOR
5080
              && GET_CODE (op1) == CONST_VECTOR)
5081
            {
5082
              rtvec v = rtvec_alloc (n_elts);
5083
              unsigned int i;
5084
 
5085
              for (i = 0; i < n_elts; i++)
5086
                RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5087
                                    ? CONST_VECTOR_ELT (op0, i)
5088
                                    : CONST_VECTOR_ELT (op1, i));
5089
              return gen_rtx_CONST_VECTOR (mode, v);
5090
            }
5091
        }
5092
      break;
5093
 
5094
    default:
5095
      gcc_unreachable ();
5096
    }
5097
 
5098
  return 0;
5099
}
5100
 
5101
/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5102
   or CONST_VECTOR,
5103
   returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5104
 
5105
   Works by unpacking OP into a collection of 8-bit values
5106
   represented as a little-endian array of 'unsigned char', selecting by BYTE,
5107
   and then repacking them again for OUTERMODE.  */
5108
 
5109
static rtx
5110
simplify_immed_subreg (enum machine_mode outermode, rtx op,
5111
                       enum machine_mode innermode, unsigned int byte)
5112
{
5113
  /* We support up to 512-bit values (for V8DFmode).  */
5114
  enum {
5115
    max_bitsize = 512,
5116
    value_bit = 8,
5117
    value_mask = (1 << value_bit) - 1
5118
  };
5119
  unsigned char value[max_bitsize / value_bit];
5120
  int value_start;
5121
  int i;
5122
  int elem;
5123
 
5124
  int num_elem;
5125
  rtx * elems;
5126
  int elem_bitsize;
5127
  rtx result_s;
5128
  rtvec result_v = NULL;
5129
  enum mode_class outer_class;
5130
  enum machine_mode outer_submode;
5131
 
5132
  /* Some ports misuse CCmode.  */
5133
  if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5134
    return op;
5135
 
5136
  /* We have no way to represent a complex constant at the rtl level.  */
5137
  if (COMPLEX_MODE_P (outermode))
5138
    return NULL_RTX;
5139
 
5140
  /* Unpack the value.  */
5141
 
5142
  if (GET_CODE (op) == CONST_VECTOR)
5143
    {
5144
      num_elem = CONST_VECTOR_NUNITS (op);
5145
      elems = &CONST_VECTOR_ELT (op, 0);
5146
      elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5147
    }
5148
  else
5149
    {
5150
      num_elem = 1;
5151
      elems = &op;
5152
      elem_bitsize = max_bitsize;
5153
    }
5154
  /* If this asserts, it is too complicated; reducing value_bit may help.  */
5155
  gcc_assert (BITS_PER_UNIT % value_bit == 0);
5156
  /* I don't know how to handle endianness of sub-units.  */
5157
  gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5158
 
5159
  for (elem = 0; elem < num_elem; elem++)
5160
    {
5161
      unsigned char * vp;
5162
      rtx el = elems[elem];
5163
 
5164
      /* Vectors are kept in target memory order.  (This is probably
5165
         a mistake.)  */
5166
      {
5167
        unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5168
        unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5169
                          / BITS_PER_UNIT);
5170
        unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5171
        unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5172
        unsigned bytele = (subword_byte % UNITS_PER_WORD
5173
                         + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5174
        vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5175
      }
5176
 
5177
      switch (GET_CODE (el))
5178
        {
5179
        case CONST_INT:
5180
          for (i = 0;
5181
               i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5182
               i += value_bit)
5183
            *vp++ = INTVAL (el) >> i;
5184
          /* CONST_INTs are always logically sign-extended.  */
5185
          for (; i < elem_bitsize; i += value_bit)
5186
            *vp++ = INTVAL (el) < 0 ? -1 : 0;
5187
          break;
5188
 
5189
        case CONST_DOUBLE:
5190
          if (GET_MODE (el) == VOIDmode)
5191
            {
5192
              /* If this triggers, someone should have generated a
5193
                 CONST_INT instead.  */
5194
              gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5195
 
5196
              for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5197
                *vp++ = CONST_DOUBLE_LOW (el) >> i;
5198
              while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
5199
                {
5200
                  *vp++
5201
                    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5202
                  i += value_bit;
5203
                }
5204
              /* It shouldn't matter what's done here, so fill it with
5205
                 zero.  */
5206
              for (; i < elem_bitsize; i += value_bit)
5207
                *vp++ = 0;
5208
            }
5209
          else
5210
            {
5211
              long tmp[max_bitsize / 32];
5212
              int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5213
 
5214
              gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5215
              gcc_assert (bitsize <= elem_bitsize);
5216
              gcc_assert (bitsize % value_bit == 0);
5217
 
5218
              real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5219
                              GET_MODE (el));
5220
 
5221
              /* real_to_target produces its result in words affected by
5222
                 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5223
                 and use WORDS_BIG_ENDIAN instead; see the documentation
5224
                 of SUBREG in rtl.texi.  */
5225
              for (i = 0; i < bitsize; i += value_bit)
5226
                {
5227
                  int ibase;
5228
                  if (WORDS_BIG_ENDIAN)
5229
                    ibase = bitsize - 1 - i;
5230
                  else
5231
                    ibase = i;
5232
                  *vp++ = tmp[ibase / 32] >> i % 32;
5233
                }
5234
 
5235
              /* It shouldn't matter what's done here, so fill it with
5236
                 zero.  */
5237
              for (; i < elem_bitsize; i += value_bit)
5238
                *vp++ = 0;
5239
            }
5240
          break;
5241
 
5242
        case CONST_FIXED:
5243
          if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5244
            {
5245
              for (i = 0; i < elem_bitsize; i += value_bit)
5246
                *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5247
            }
5248
          else
5249
            {
5250
              for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5251
                *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5252
              for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5253
                   i += value_bit)
5254
                *vp++ = CONST_FIXED_VALUE_HIGH (el)
5255
                        >> (i - HOST_BITS_PER_WIDE_INT);
5256
              for (; i < elem_bitsize; i += value_bit)
5257
                *vp++ = 0;
5258
            }
5259
          break;
5260
 
5261
        default:
5262
          gcc_unreachable ();
5263
        }
5264
    }
5265
 
5266
  /* Now, pick the right byte to start with.  */
5267
  /* Renumber BYTE so that the least-significant byte is byte 0.  A special
5268
     case is paradoxical SUBREGs, which shouldn't be adjusted since they
5269
     will already have offset 0.  */
5270
  if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5271
    {
5272
      unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5273
                        - byte);
5274
      unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5275
      unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5276
      byte = (subword_byte % UNITS_PER_WORD
5277
              + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5278
    }
5279
 
5280
  /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
5281
     so if it's become negative it will instead be very large.)  */
5282
  gcc_assert (byte < GET_MODE_SIZE (innermode));
5283
 
5284
  /* Convert from bytes to chunks of size value_bit.  */
5285
  value_start = byte * (BITS_PER_UNIT / value_bit);
5286
 
5287
  /* Re-pack the value.  */
5288
 
5289
  if (VECTOR_MODE_P (outermode))
5290
    {
5291
      num_elem = GET_MODE_NUNITS (outermode);
5292
      result_v = rtvec_alloc (num_elem);
5293
      elems = &RTVEC_ELT (result_v, 0);
5294
      outer_submode = GET_MODE_INNER (outermode);
5295
    }
5296
  else
5297
    {
5298
      num_elem = 1;
5299
      elems = &result_s;
5300
      outer_submode = outermode;
5301
    }
5302
 
5303
  outer_class = GET_MODE_CLASS (outer_submode);
5304
  elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5305
 
5306
  gcc_assert (elem_bitsize % value_bit == 0);
5307
  gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5308
 
5309
  for (elem = 0; elem < num_elem; elem++)
5310
    {
5311
      unsigned char *vp;
5312
 
5313
      /* Vectors are stored in target memory order.  (This is probably
5314
         a mistake.)  */
5315
      {
5316
        unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5317
        unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5318
                          / BITS_PER_UNIT);
5319
        unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5320
        unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5321
        unsigned bytele = (subword_byte % UNITS_PER_WORD
5322
                         + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5323
        vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5324
      }
5325
 
5326
      switch (outer_class)
5327
        {
5328
        case MODE_INT:
5329
        case MODE_PARTIAL_INT:
5330
          {
5331
            unsigned HOST_WIDE_INT hi = 0, lo = 0;
5332
 
5333
            for (i = 0;
5334
                 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5335
                 i += value_bit)
5336
              lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5337
            for (; i < elem_bitsize; i += value_bit)
5338
              hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5339
                     << (i - HOST_BITS_PER_WIDE_INT);
5340
 
5341
            /* immed_double_const doesn't call trunc_int_for_mode.  I don't
5342
               know why.  */
5343
            if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5344
              elems[elem] = gen_int_mode (lo, outer_submode);
5345
            else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5346
              elems[elem] = immed_double_const (lo, hi, outer_submode);
5347
            else
5348
              return NULL_RTX;
5349
          }
5350
          break;
5351
 
5352
        case MODE_FLOAT:
5353
        case MODE_DECIMAL_FLOAT:
5354
          {
5355
            REAL_VALUE_TYPE r;
5356
            long tmp[max_bitsize / 32];
5357
 
5358
            /* real_from_target wants its input in words affected by
5359
               FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5360
               and use WORDS_BIG_ENDIAN instead; see the documentation
5361
               of SUBREG in rtl.texi.  */
5362
            for (i = 0; i < max_bitsize / 32; i++)
5363
              tmp[i] = 0;
5364
            for (i = 0; i < elem_bitsize; i += value_bit)
5365
              {
5366
                int ibase;
5367
                if (WORDS_BIG_ENDIAN)
5368
                  ibase = elem_bitsize - 1 - i;
5369
                else
5370
                  ibase = i;
5371
                tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5372
              }
5373
 
5374
            real_from_target (&r, tmp, outer_submode);
5375
            elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5376
          }
5377
          break;
5378
 
5379
        case MODE_FRACT:
5380
        case MODE_UFRACT:
5381
        case MODE_ACCUM:
5382
        case MODE_UACCUM:
5383
          {
5384
            FIXED_VALUE_TYPE f;
5385
            f.data.low = 0;
5386
            f.data.high = 0;
5387
            f.mode = outer_submode;
5388
 
5389
            for (i = 0;
5390
                 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5391
                 i += value_bit)
5392
              f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5393
            for (; i < elem_bitsize; i += value_bit)
5394
              f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5395
                             << (i - HOST_BITS_PER_WIDE_INT));
5396
 
5397
            elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5398
          }
5399
          break;
5400
 
5401
        default:
5402
          gcc_unreachable ();
5403
        }
5404
    }
5405
  if (VECTOR_MODE_P (outermode))
5406
    return gen_rtx_CONST_VECTOR (outermode, result_v);
5407
  else
5408
    return result_s;
5409
}
5410
 
5411
/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5412
   Return 0 if no simplifications are possible.  */
5413
rtx
5414
simplify_subreg (enum machine_mode outermode, rtx op,
5415
                 enum machine_mode innermode, unsigned int byte)
5416
{
5417
  /* Little bit of sanity checking.  */
5418
  gcc_assert (innermode != VOIDmode);
5419
  gcc_assert (outermode != VOIDmode);
5420
  gcc_assert (innermode != BLKmode);
5421
  gcc_assert (outermode != BLKmode);
5422
 
5423
  gcc_assert (GET_MODE (op) == innermode
5424
              || GET_MODE (op) == VOIDmode);
5425
 
5426
  gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5427
  gcc_assert (byte < GET_MODE_SIZE (innermode));
5428
 
5429
  if (outermode == innermode && !byte)
5430
    return op;
5431
 
5432
  if (CONST_INT_P (op)
5433
      || GET_CODE (op) == CONST_DOUBLE
5434
      || GET_CODE (op) == CONST_FIXED
5435
      || GET_CODE (op) == CONST_VECTOR)
5436
    return simplify_immed_subreg (outermode, op, innermode, byte);
5437
 
5438
  /* Changing mode twice with SUBREG => just change it once,
5439
     or not at all if changing back op starting mode.  */
5440
  if (GET_CODE (op) == SUBREG)
5441
    {
5442
      enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5443
      int final_offset = byte + SUBREG_BYTE (op);
5444
      rtx newx;
5445
 
5446
      if (outermode == innermostmode
5447
          && byte == 0 && SUBREG_BYTE (op) == 0)
5448
        return SUBREG_REG (op);
5449
 
5450
      /* The SUBREG_BYTE represents offset, as if the value were stored
5451
         in memory.  Irritating exception is paradoxical subreg, where
5452
         we define SUBREG_BYTE to be 0.  On big endian machines, this
5453
         value should be negative.  For a moment, undo this exception.  */
5454
      if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5455
        {
5456
          int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5457
          if (WORDS_BIG_ENDIAN)
5458
            final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5459
          if (BYTES_BIG_ENDIAN)
5460
            final_offset += difference % UNITS_PER_WORD;
5461
        }
5462
      if (SUBREG_BYTE (op) == 0
5463
          && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5464
        {
5465
          int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5466
          if (WORDS_BIG_ENDIAN)
5467
            final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5468
          if (BYTES_BIG_ENDIAN)
5469
            final_offset += difference % UNITS_PER_WORD;
5470
        }
5471
 
5472
      /* See whether resulting subreg will be paradoxical.  */
5473
      if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5474
        {
5475
          /* In nonparadoxical subregs we can't handle negative offsets.  */
5476
          if (final_offset < 0)
5477
            return NULL_RTX;
5478
          /* Bail out in case resulting subreg would be incorrect.  */
5479
          if (final_offset % GET_MODE_SIZE (outermode)
5480
              || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5481
            return NULL_RTX;
5482
        }
5483
      else
5484
        {
5485
          int offset = 0;
5486
          int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5487
 
5488
          /* In paradoxical subreg, see if we are still looking on lower part.
5489
             If so, our SUBREG_BYTE will be 0.  */
5490
          if (WORDS_BIG_ENDIAN)
5491
            offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5492
          if (BYTES_BIG_ENDIAN)
5493
            offset += difference % UNITS_PER_WORD;
5494
          if (offset == final_offset)
5495
            final_offset = 0;
5496
          else
5497
            return NULL_RTX;
5498
        }
5499
 
5500
      /* Recurse for further possible simplifications.  */
5501
      newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5502
                              final_offset);
5503
      if (newx)
5504
        return newx;
5505
      if (validate_subreg (outermode, innermostmode,
5506
                           SUBREG_REG (op), final_offset))
5507
        {
5508
          newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5509
          if (SUBREG_PROMOTED_VAR_P (op)
5510
              && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5511
              && GET_MODE_CLASS (outermode) == MODE_INT
5512
              && IN_RANGE (GET_MODE_SIZE (outermode),
5513
                           GET_MODE_SIZE (innermode),
5514
                           GET_MODE_SIZE (innermostmode))
5515
              && subreg_lowpart_p (newx))
5516
            {
5517
              SUBREG_PROMOTED_VAR_P (newx) = 1;
5518
              SUBREG_PROMOTED_UNSIGNED_SET
5519
                (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5520
            }
5521
          return newx;
5522
        }
5523
      return NULL_RTX;
5524
    }
5525
 
5526
  /* Merge implicit and explicit truncations.  */
5527
 
5528
  if (GET_CODE (op) == TRUNCATE
5529
      && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5530
      && subreg_lowpart_offset (outermode, innermode) == byte)
5531
    return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5532
                               GET_MODE (XEXP (op, 0)));
5533
 
5534
  /* SUBREG of a hard register => just change the register number
5535
     and/or mode.  If the hard register is not valid in that mode,
5536
     suppress this simplification.  If the hard register is the stack,
5537
     frame, or argument pointer, leave this as a SUBREG.  */
5538
 
5539
  if (REG_P (op) && HARD_REGISTER_P (op))
5540
    {
5541
      unsigned int regno, final_regno;
5542
 
5543
      regno = REGNO (op);
5544
      final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5545
      if (HARD_REGISTER_NUM_P (final_regno))
5546
        {
5547
          rtx x;
5548
          int final_offset = byte;
5549
 
5550
          /* Adjust offset for paradoxical subregs.  */
5551
          if (byte == 0
5552
              && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5553
            {
5554
              int difference = (GET_MODE_SIZE (innermode)
5555
                                - GET_MODE_SIZE (outermode));
5556
              if (WORDS_BIG_ENDIAN)
5557
                final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5558
              if (BYTES_BIG_ENDIAN)
5559
                final_offset += difference % UNITS_PER_WORD;
5560
            }
5561
 
5562
          x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5563
 
5564
          /* Propagate original regno.  We don't have any way to specify
5565
             the offset inside original regno, so do so only for lowpart.
5566
             The information is used only by alias analysis that can not
5567
             grog partial register anyway.  */
5568
 
5569
          if (subreg_lowpart_offset (outermode, innermode) == byte)
5570
            ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5571
          return x;
5572
        }
5573
    }
5574
 
5575
  /* If we have a SUBREG of a register that we are replacing and we are
5576
     replacing it with a MEM, make a new MEM and try replacing the
5577
     SUBREG with it.  Don't do this if the MEM has a mode-dependent address
5578
     or if we would be widening it.  */
5579
 
5580
  if (MEM_P (op)
5581
      && ! mode_dependent_address_p (XEXP (op, 0))
5582
      /* Allow splitting of volatile memory references in case we don't
5583
         have instruction to move the whole thing.  */
5584
      && (! MEM_VOLATILE_P (op)
5585
          || ! have_insn_for (SET, innermode))
5586
      && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5587
    return adjust_address_nv (op, outermode, byte);
5588
 
5589
  /* Handle complex values represented as CONCAT
5590
     of real and imaginary part.  */
5591
  if (GET_CODE (op) == CONCAT)
5592
    {
5593
      unsigned int part_size, final_offset;
5594
      rtx part, res;
5595
 
5596
      part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5597
      if (byte < part_size)
5598
        {
5599
          part = XEXP (op, 0);
5600
          final_offset = byte;
5601
        }
5602
      else
5603
        {
5604
          part = XEXP (op, 1);
5605
          final_offset = byte - part_size;
5606
        }
5607
 
5608
      if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5609
        return NULL_RTX;
5610
 
5611
      res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5612
      if (res)
5613
        return res;
5614
      if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5615
        return gen_rtx_SUBREG (outermode, part, final_offset);
5616
      return NULL_RTX;
5617
    }
5618
 
5619
  /* Optimize SUBREG truncations of zero and sign extended values.  */
5620
  if ((GET_CODE (op) == ZERO_EXTEND
5621
       || GET_CODE (op) == SIGN_EXTEND)
5622
      && SCALAR_INT_MODE_P (innermode)
5623
      && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5624
    {
5625
      unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5626
 
5627
      /* If we're requesting the lowpart of a zero or sign extension,
5628
         there are three possibilities.  If the outermode is the same
5629
         as the origmode, we can omit both the extension and the subreg.
5630
         If the outermode is not larger than the origmode, we can apply
5631
         the truncation without the extension.  Finally, if the outermode
5632
         is larger than the origmode, but both are integer modes, we
5633
         can just extend to the appropriate mode.  */
5634
      if (bitpos == 0)
5635
        {
5636
          enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5637
          if (outermode == origmode)
5638
            return XEXP (op, 0);
5639
          if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5640
            return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5641
                                        subreg_lowpart_offset (outermode,
5642
                                                               origmode));
5643
          if (SCALAR_INT_MODE_P (outermode))
5644
            return simplify_gen_unary (GET_CODE (op), outermode,
5645
                                       XEXP (op, 0), origmode);
5646
        }
5647
 
5648
      /* A SUBREG resulting from a zero extension may fold to zero if
5649
         it extracts higher bits that the ZERO_EXTEND's source bits.  */
5650
      if (GET_CODE (op) == ZERO_EXTEND
5651
          && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5652
        return CONST0_RTX (outermode);
5653
    }
5654
 
5655
  /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5656
     to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5657
     the outer subreg is effectively a truncation to the original mode.  */
5658
  if ((GET_CODE (op) == LSHIFTRT
5659
       || GET_CODE (op) == ASHIFTRT)
5660
      && SCALAR_INT_MODE_P (outermode)
5661
      && SCALAR_INT_MODE_P (innermode)
5662
      /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5663
         to avoid the possibility that an outer LSHIFTRT shifts by more
5664
         than the sign extension's sign_bit_copies and introduces zeros
5665
         into the high bits of the result.  */
5666
      && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5667
      && CONST_INT_P (XEXP (op, 1))
5668
      && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5669
      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5670
      && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5671
      && subreg_lsb_1 (outermode, innermode, byte) == 0)
5672
    return simplify_gen_binary (ASHIFTRT, outermode,
5673
                                XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5674
 
5675
  /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5676
     to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5677
     the outer subreg is effectively a truncation to the original mode.  */
5678
  if ((GET_CODE (op) == LSHIFTRT
5679
       || GET_CODE (op) == ASHIFTRT)
5680
      && SCALAR_INT_MODE_P (outermode)
5681
      && SCALAR_INT_MODE_P (innermode)
5682
      && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5683
      && CONST_INT_P (XEXP (op, 1))
5684
      && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5685
      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5686
      && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5687
      && subreg_lsb_1 (outermode, innermode, byte) == 0)
5688
    return simplify_gen_binary (LSHIFTRT, outermode,
5689
                                XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5690
 
5691
  /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5692
     to (ashift:QI (x:QI) C), where C is a suitable small constant and
5693
     the outer subreg is effectively a truncation to the original mode.  */
5694
  if (GET_CODE (op) == ASHIFT
5695
      && SCALAR_INT_MODE_P (outermode)
5696
      && SCALAR_INT_MODE_P (innermode)
5697
      && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5698
      && CONST_INT_P (XEXP (op, 1))
5699
      && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5700
          || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5701
      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5702
      && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5703
      && subreg_lsb_1 (outermode, innermode, byte) == 0)
5704
    return simplify_gen_binary (ASHIFT, outermode,
5705
                                XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5706
 
5707
  /* Recognize a word extraction from a multi-word subreg.  */
5708
  if ((GET_CODE (op) == LSHIFTRT
5709
       || GET_CODE (op) == ASHIFTRT)
5710
      && SCALAR_INT_MODE_P (innermode)
5711
      && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5712
      && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5713
      && CONST_INT_P (XEXP (op, 1))
5714
      && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5715
      && INTVAL (XEXP (op, 1)) >= 0
5716
      && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5717
      && byte == subreg_lowpart_offset (outermode, innermode))
5718
    {
5719
      int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5720
      return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5721
                                  (WORDS_BIG_ENDIAN
5722
                                   ? byte - shifted_bytes
5723
                                   : byte + shifted_bytes));
5724
    }
5725
 
5726
  /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5727
     and try replacing the SUBREG and shift with it.  Don't do this if
5728
     the MEM has a mode-dependent address or if we would be widening it.  */
5729
 
5730
  if ((GET_CODE (op) == LSHIFTRT
5731
       || GET_CODE (op) == ASHIFTRT)
5732
      && SCALAR_INT_MODE_P (innermode)
5733
      && MEM_P (XEXP (op, 0))
5734
      && CONST_INT_P (XEXP (op, 1))
5735
      && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5736
      && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5737
      && INTVAL (XEXP (op, 1)) > 0
5738
      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5739
      && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5740
      && ! MEM_VOLATILE_P (XEXP (op, 0))
5741
      && byte == subreg_lowpart_offset (outermode, innermode)
5742
      && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5743
          || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5744
    {
5745
      int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5746
      return adjust_address_nv (XEXP (op, 0), outermode,
5747
                                (WORDS_BIG_ENDIAN
5748
                                 ? byte - shifted_bytes
5749
                                 : byte + shifted_bytes));
5750
    }
5751
 
5752
  return NULL_RTX;
5753
}
5754
 
5755
/* Make a SUBREG operation or equivalent if it folds.  */
5756
 
5757
rtx
5758
simplify_gen_subreg (enum machine_mode outermode, rtx op,
5759
                     enum machine_mode innermode, unsigned int byte)
5760
{
5761
  rtx newx;
5762
 
5763
  newx = simplify_subreg (outermode, op, innermode, byte);
5764
  if (newx)
5765
    return newx;
5766
 
5767
  if (GET_CODE (op) == SUBREG
5768
      || GET_CODE (op) == CONCAT
5769
      || GET_MODE (op) == VOIDmode)
5770
    return NULL_RTX;
5771
 
5772
  if (validate_subreg (outermode, innermode, op, byte))
5773
    return gen_rtx_SUBREG (outermode, op, byte);
5774
 
5775
  return NULL_RTX;
5776
}
5777
 
5778
/* Simplify X, an rtx expression.
5779
 
5780
   Return the simplified expression or NULL if no simplifications
5781
   were possible.
5782
 
5783
   This is the preferred entry point into the simplification routines;
5784
   however, we still allow passes to call the more specific routines.
5785
 
5786
   Right now GCC has three (yes, three) major bodies of RTL simplification
5787
   code that need to be unified.
5788
 
5789
        1. fold_rtx in cse.c.  This code uses various CSE specific
5790
           information to aid in RTL simplification.
5791
 
5792
        2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
5793
           it uses combine specific information to aid in RTL
5794
           simplification.
5795
 
5796
        3. The routines in this file.
5797
 
5798
 
5799
   Long term we want to only have one body of simplification code; to
5800
   get to that state I recommend the following steps:
5801
 
5802
        1. Pour over fold_rtx & simplify_rtx and move any simplifications
5803
           which are not pass dependent state into these routines.
5804
 
5805
        2. As code is moved by #1, change fold_rtx & simplify_rtx to
5806
           use this routine whenever possible.
5807
 
5808
        3. Allow for pass dependent state to be provided to these
5809
           routines and add simplifications based on the pass dependent
5810
           state.  Remove code from cse.c & combine.c that becomes
5811
           redundant/dead.
5812
 
5813
    It will take time, but ultimately the compiler will be easier to
5814
    maintain and improve.  It's totally silly that when we add a
5815
    simplification that it needs to be added to 4 places (3 for RTL
5816
    simplification and 1 for tree simplification.  */
5817
 
5818
rtx
5819
simplify_rtx (const_rtx x)
5820
{
5821
  const enum rtx_code code = GET_CODE (x);
5822
  const enum machine_mode mode = GET_MODE (x);
5823
 
5824
  switch (GET_RTX_CLASS (code))
5825
    {
5826
    case RTX_UNARY:
5827
      return simplify_unary_operation (code, mode,
5828
                                       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5829
    case RTX_COMM_ARITH:
5830
      if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5831
        return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5832
 
5833
      /* Fall through....  */
5834
 
5835
    case RTX_BIN_ARITH:
5836
      return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5837
 
5838
    case RTX_TERNARY:
5839
    case RTX_BITFIELD_OPS:
5840
      return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5841
                                         XEXP (x, 0), XEXP (x, 1),
5842
                                         XEXP (x, 2));
5843
 
5844
    case RTX_COMPARE:
5845
    case RTX_COMM_COMPARE:
5846
      return simplify_relational_operation (code, mode,
5847
                                            ((GET_MODE (XEXP (x, 0))
5848
                                             != VOIDmode)
5849
                                            ? GET_MODE (XEXP (x, 0))
5850
                                            : GET_MODE (XEXP (x, 1))),
5851
                                            XEXP (x, 0),
5852
                                            XEXP (x, 1));
5853
 
5854
    case RTX_EXTRA:
5855
      if (code == SUBREG)
5856
        return simplify_subreg (mode, SUBREG_REG (x),
5857
                                GET_MODE (SUBREG_REG (x)),
5858
                                SUBREG_BYTE (x));
5859
      break;
5860
 
5861
    case RTX_OBJ:
5862
      if (code == LO_SUM)
5863
        {
5864
          /* Convert (lo_sum (high FOO) FOO) to FOO.  */
5865
          if (GET_CODE (XEXP (x, 0)) == HIGH
5866
              && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5867
          return XEXP (x, 1);
5868
        }
5869
      break;
5870
 
5871
    default:
5872
      break;
5873
    }
5874
  return NULL;
5875
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.