OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [simplify-rtx.c] - Blame information for rev 816

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* RTL simplification functions for GNU compiler.
2
   Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4
   Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
 
23
#include "config.h"
24
#include "system.h"
25
#include "coretypes.h"
26
#include "tm.h"
27
#include "rtl.h"
28
#include "tree.h"
29
#include "tm_p.h"
30
#include "regs.h"
31
#include "hard-reg-set.h"
32
#include "flags.h"
33
#include "real.h"
34
#include "insn-config.h"
35
#include "recog.h"
36
#include "function.h"
37
#include "expr.h"
38
#include "toplev.h"
39
#include "output.h"
40
#include "ggc.h"
41
#include "target.h"
42
 
43
/* Simplification and canonicalization of RTL.  */
44
 
45
/* Much code operates on (low, high) pairs; the low value is an
46
   unsigned wide int, the high value a signed wide int.  We
47
   occasionally need to sign extend from low to high as if low were a
48
   signed wide int.  */
49
#define HWI_SIGN_EXTEND(low) \
50
 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
 
52
static rtx neg_const_int (enum machine_mode, rtx);
53
static bool plus_minus_operand_p (rtx);
54
static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55
static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56
static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57
                                  unsigned int);
58
static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59
                                           rtx, rtx);
60
static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61
                                            enum machine_mode, rtx, rtx);
62
static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63
static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64
                                        rtx, rtx, rtx, rtx);
65
 
66
/* Negate a CONST_INT rtx, truncating (because a conversion from a
67
   maximally negative number can overflow).  */
68
static rtx
69
neg_const_int (enum machine_mode mode, rtx i)
70
{
71
  return gen_int_mode (- INTVAL (i), mode);
72
}
73
 
74
/* Test whether expression, X, is an immediate constant that represents
75
   the most significant bit of machine mode MODE.  */
76
 
77
bool
78
mode_signbit_p (enum machine_mode mode, rtx x)
79
{
80
  unsigned HOST_WIDE_INT val;
81
  unsigned int width;
82
 
83
  if (GET_MODE_CLASS (mode) != MODE_INT)
84
    return false;
85
 
86
  width = GET_MODE_BITSIZE (mode);
87
  if (width == 0)
88
    return false;
89
 
90
  if (width <= HOST_BITS_PER_WIDE_INT
91
      && GET_CODE (x) == CONST_INT)
92
    val = INTVAL (x);
93
  else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94
           && GET_CODE (x) == CONST_DOUBLE
95
           && CONST_DOUBLE_LOW (x) == 0)
96
    {
97
      val = CONST_DOUBLE_HIGH (x);
98
      width -= HOST_BITS_PER_WIDE_INT;
99
    }
100
  else
101
    return false;
102
 
103
  if (width < HOST_BITS_PER_WIDE_INT)
104
    val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105
  return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106
}
107
 
108
/* Make a binary operation by properly ordering the operands and
109
   seeing if the expression folds.  */
110
 
111
rtx
112
simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113
                     rtx op1)
114
{
115
  rtx tem;
116
 
117
  /* If this simplifies, do it.  */
118
  tem = simplify_binary_operation (code, mode, op0, op1);
119
  if (tem)
120
    return tem;
121
 
122
  /* Put complex operands first and constants second if commutative.  */
123
  if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124
      && swap_commutative_operands_p (op0, op1))
125
    tem = op0, op0 = op1, op1 = tem;
126
 
127
  return gen_rtx_fmt_ee (code, mode, op0, op1);
128
}
129
 
130
/* If X is a MEM referencing the constant pool, return the real value.
131
   Otherwise return X.  */
132
rtx
133
avoid_constant_pool_reference (rtx x)
134
{
135
  rtx c, tmp, addr;
136
  enum machine_mode cmode;
137
  HOST_WIDE_INT offset = 0;
138
 
139
  switch (GET_CODE (x))
140
    {
141
    case MEM:
142
      break;
143
 
144
    case FLOAT_EXTEND:
145
      /* Handle float extensions of constant pool references.  */
146
      tmp = XEXP (x, 0);
147
      c = avoid_constant_pool_reference (tmp);
148
      if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149
        {
150
          REAL_VALUE_TYPE d;
151
 
152
          REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153
          return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154
        }
155
      return x;
156
 
157
    default:
158
      return x;
159
    }
160
 
161
  addr = XEXP (x, 0);
162
 
163
  /* Call target hook to avoid the effects of -fpic etc....  */
164
  addr = targetm.delegitimize_address (addr);
165
 
166
  /* Split the address into a base and integer offset.  */
167
  if (GET_CODE (addr) == CONST
168
      && GET_CODE (XEXP (addr, 0)) == PLUS
169
      && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
170
    {
171
      offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172
      addr = XEXP (XEXP (addr, 0), 0);
173
    }
174
 
175
  if (GET_CODE (addr) == LO_SUM)
176
    addr = XEXP (addr, 1);
177
 
178
  /* If this is a constant pool reference, we can turn it into its
179
     constant and hope that simplifications happen.  */
180
  if (GET_CODE (addr) == SYMBOL_REF
181
      && CONSTANT_POOL_ADDRESS_P (addr))
182
    {
183
      c = get_pool_constant (addr);
184
      cmode = get_pool_mode (addr);
185
 
186
      /* If we're accessing the constant in a different mode than it was
187
         originally stored, attempt to fix that up via subreg simplifications.
188
         If that fails we have no choice but to return the original memory.  */
189
      if (offset != 0 || cmode != GET_MODE (x))
190
        {
191
          rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192
          if (tem && CONSTANT_P (tem))
193
            return tem;
194
        }
195
      else
196
        return c;
197
    }
198
 
199
  return x;
200
}
201
 
202
/* Return true if X is a MEM referencing the constant pool.  */
203
 
204
bool
205
constant_pool_reference_p (rtx x)
206
{
207
  return avoid_constant_pool_reference (x) != x;
208
}
209
 
210
/* Make a unary operation by first seeing if it folds and otherwise making
211
   the specified operation.  */
212
 
213
rtx
214
simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215
                    enum machine_mode op_mode)
216
{
217
  rtx tem;
218
 
219
  /* If this simplifies, use it.  */
220
  if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
221
    return tem;
222
 
223
  return gen_rtx_fmt_e (code, mode, op);
224
}
225
 
226
/* Likewise for ternary operations.  */
227
 
228
rtx
229
simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230
                      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
231
{
232
  rtx tem;
233
 
234
  /* If this simplifies, use it.  */
235
  if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
236
                                              op0, op1, op2)))
237
    return tem;
238
 
239
  return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
240
}
241
 
242
/* Likewise, for relational operations.
243
   CMP_MODE specifies mode comparison is done in.  */
244
 
245
rtx
246
simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247
                         enum machine_mode cmp_mode, rtx op0, rtx op1)
248
{
249
  rtx tem;
250
 
251
  if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
252
                                                 op0, op1)))
253
    return tem;
254
 
255
  return gen_rtx_fmt_ee (code, mode, op0, op1);
256
}
257
 
258
/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259
   resulting RTX.  Return a new RTX which is as simplified as possible.  */
260
 
261
rtx
262
simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
263
{
264
  enum rtx_code code = GET_CODE (x);
265
  enum machine_mode mode = GET_MODE (x);
266
  enum machine_mode op_mode;
267
  rtx op0, op1, op2;
268
 
269
  /* If X is OLD_RTX, return NEW_RTX.  Otherwise, if this is an expression, try
270
     to build a new expression substituting recursively.  If we can't do
271
     anything, return our input.  */
272
 
273
  if (x == old_rtx)
274
    return new_rtx;
275
 
276
  switch (GET_RTX_CLASS (code))
277
    {
278
    case RTX_UNARY:
279
      op0 = XEXP (x, 0);
280
      op_mode = GET_MODE (op0);
281
      op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282
      if (op0 == XEXP (x, 0))
283
        return x;
284
      return simplify_gen_unary (code, mode, op0, op_mode);
285
 
286
    case RTX_BIN_ARITH:
287
    case RTX_COMM_ARITH:
288
      op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289
      op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291
        return x;
292
      return simplify_gen_binary (code, mode, op0, op1);
293
 
294
    case RTX_COMPARE:
295
    case RTX_COMM_COMPARE:
296
      op0 = XEXP (x, 0);
297
      op1 = XEXP (x, 1);
298
      op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299
      op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300
      op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302
        return x;
303
      return simplify_gen_relational (code, mode, op_mode, op0, op1);
304
 
305
    case RTX_TERNARY:
306
    case RTX_BITFIELD_OPS:
307
      op0 = XEXP (x, 0);
308
      op_mode = GET_MODE (op0);
309
      op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310
      op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311
      op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313
        return x;
314
      if (op_mode == VOIDmode)
315
        op_mode = GET_MODE (op0);
316
      return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
317
 
318
    case RTX_EXTRA:
319
      /* The only case we try to handle is a SUBREG.  */
320
      if (code == SUBREG)
321
        {
322
          op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323
          if (op0 == SUBREG_REG (x))
324
            return x;
325
          op0 = simplify_gen_subreg (GET_MODE (x), op0,
326
                                     GET_MODE (SUBREG_REG (x)),
327
                                     SUBREG_BYTE (x));
328
          return op0 ? op0 : x;
329
        }
330
      break;
331
 
332
    case RTX_OBJ:
333
      if (code == MEM)
334
        {
335
          op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336
          if (op0 == XEXP (x, 0))
337
            return x;
338
          return replace_equiv_address_nv (x, op0);
339
        }
340
      else if (code == LO_SUM)
341
        {
342
          op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343
          op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
344
 
345
          /* (lo_sum (high x) x) -> x  */
346
          if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347
            return op1;
348
 
349
          if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350
            return x;
351
          return gen_rtx_LO_SUM (mode, op0, op1);
352
        }
353
      else if (code == REG)
354
        {
355
          if (rtx_equal_p (x, old_rtx))
356
            return new_rtx;
357
        }
358
      break;
359
 
360
    default:
361
      break;
362
    }
363
  return x;
364
}
365
 
366
/* Try to simplify a unary operation CODE whose output mode is to be
367
   MODE with input operand OP whose mode was originally OP_MODE.
368
   Return zero if no simplification can be made.  */
369
rtx
370
simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371
                          rtx op, enum machine_mode op_mode)
372
{
373
  rtx trueop, tem;
374
 
375
  if (GET_CODE (op) == CONST)
376
    op = XEXP (op, 0);
377
 
378
  trueop = avoid_constant_pool_reference (op);
379
 
380
  tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
381
  if (tem)
382
    return tem;
383
 
384
  return simplify_unary_operation_1 (code, mode, op);
385
}
386
 
387
/* Perform some simplifications we can do even if the operands
388
   aren't constant.  */
389
static rtx
390
simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
391
{
392
  enum rtx_code reversed;
393
  rtx temp;
394
 
395
  switch (code)
396
    {
397
    case NOT:
398
      /* (not (not X)) == X.  */
399
      if (GET_CODE (op) == NOT)
400
        return XEXP (op, 0);
401
 
402
      /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403
         comparison is all ones.   */
404
      if (COMPARISON_P (op)
405
          && (mode == BImode || STORE_FLAG_VALUE == -1)
406
          && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407
        return simplify_gen_relational (reversed, mode, VOIDmode,
408
                                        XEXP (op, 0), XEXP (op, 1));
409
 
410
      /* (not (plus X -1)) can become (neg X).  */
411
      if (GET_CODE (op) == PLUS
412
          && XEXP (op, 1) == constm1_rtx)
413
        return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
414
 
415
      /* Similarly, (not (neg X)) is (plus X -1).  */
416
      if (GET_CODE (op) == NEG)
417
        return plus_constant (XEXP (op, 0), -1);
418
 
419
      /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
420
      if (GET_CODE (op) == XOR
421
          && GET_CODE (XEXP (op, 1)) == CONST_INT
422
          && (temp = simplify_unary_operation (NOT, mode,
423
                                               XEXP (op, 1), mode)) != 0)
424
        return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
425
 
426
      /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
427
      if (GET_CODE (op) == PLUS
428
          && GET_CODE (XEXP (op, 1)) == CONST_INT
429
          && mode_signbit_p (mode, XEXP (op, 1))
430
          && (temp = simplify_unary_operation (NOT, mode,
431
                                               XEXP (op, 1), mode)) != 0)
432
        return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
433
 
434
 
435
      /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
436
         operands other than 1, but that is not valid.  We could do a
437
         similar simplification for (not (lshiftrt C X)) where C is
438
         just the sign bit, but this doesn't seem common enough to
439
         bother with.  */
440
      if (GET_CODE (op) == ASHIFT
441
          && XEXP (op, 0) == const1_rtx)
442
        {
443
          temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444
          return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
445
        }
446
 
447
      /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448
         minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449
         so we can perform the above simplification.  */
450
 
451
      if (STORE_FLAG_VALUE == -1
452
          && GET_CODE (op) == ASHIFTRT
453
          && GET_CODE (XEXP (op, 1)) == CONST_INT
454
          && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455
        return simplify_gen_relational (GE, mode, VOIDmode,
456
                                        XEXP (op, 0), const0_rtx);
457
 
458
 
459
      if (GET_CODE (op) == SUBREG
460
          && subreg_lowpart_p (op)
461
          && (GET_MODE_SIZE (GET_MODE (op))
462
              < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463
          && GET_CODE (SUBREG_REG (op)) == ASHIFT
464
          && XEXP (SUBREG_REG (op), 0) == const1_rtx)
465
        {
466
          enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
467
          rtx x;
468
 
469
          x = gen_rtx_ROTATE (inner_mode,
470
                              simplify_gen_unary (NOT, inner_mode, const1_rtx,
471
                                                  inner_mode),
472
                              XEXP (SUBREG_REG (op), 1));
473
          return rtl_hooks.gen_lowpart_no_emit (mode, x);
474
        }
475
 
476
      /* Apply De Morgan's laws to reduce number of patterns for machines
477
         with negating logical insns (and-not, nand, etc.).  If result has
478
         only one NOT, put it first, since that is how the patterns are
479
         coded.  */
480
 
481
      if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
482
        {
483
          rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484
          enum machine_mode op_mode;
485
 
486
          op_mode = GET_MODE (in1);
487
          in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
488
 
489
          op_mode = GET_MODE (in2);
490
          if (op_mode == VOIDmode)
491
            op_mode = mode;
492
          in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
493
 
494
          if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
495
            {
496
              rtx tem = in2;
497
              in2 = in1; in1 = tem;
498
            }
499
 
500
          return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501
                                 mode, in1, in2);
502
        }
503
      break;
504
 
505
    case NEG:
506
      /* (neg (neg X)) == X.  */
507
      if (GET_CODE (op) == NEG)
508
        return XEXP (op, 0);
509
 
510
      /* (neg (plus X 1)) can become (not X).  */
511
      if (GET_CODE (op) == PLUS
512
          && XEXP (op, 1) == const1_rtx)
513
        return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
514
 
515
      /* Similarly, (neg (not X)) is (plus X 1).  */
516
      if (GET_CODE (op) == NOT)
517
        return plus_constant (XEXP (op, 0), 1);
518
 
519
      /* (neg (minus X Y)) can become (minus Y X).  This transformation
520
         isn't safe for modes with signed zeros, since if X and Y are
521
         both +0, (minus Y X) is the same as (minus X Y).  If the
522
         rounding mode is towards +infinity (or -infinity) then the two
523
         expressions will be rounded differently.  */
524
      if (GET_CODE (op) == MINUS
525
          && !HONOR_SIGNED_ZEROS (mode)
526
          && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527
        return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
528
 
529
      if (GET_CODE (op) == PLUS
530
          && !HONOR_SIGNED_ZEROS (mode)
531
          && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
532
        {
533
          /* (neg (plus A C)) is simplified to (minus -C A).  */
534
          if (GET_CODE (XEXP (op, 1)) == CONST_INT
535
              || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
536
            {
537
              temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
538
              if (temp)
539
                return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
540
            }
541
 
542
          /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
543
          temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544
          return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
545
        }
546
 
547
      /* (neg (mult A B)) becomes (mult (neg A) B).
548
         This works even for floating-point values.  */
549
      if (GET_CODE (op) == MULT
550
          && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
551
        {
552
          temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553
          return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
554
        }
555
 
556
      /* NEG commutes with ASHIFT since it is multiplication.  Only do
557
         this if we can then eliminate the NEG (e.g., if the operand
558
         is a constant).  */
559
      if (GET_CODE (op) == ASHIFT)
560
        {
561
          temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
562
          if (temp)
563
            return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
564
        }
565
 
566
      /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567
         C is equal to the width of MODE minus 1.  */
568
      if (GET_CODE (op) == ASHIFTRT
569
          && GET_CODE (XEXP (op, 1)) == CONST_INT
570
          && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571
        return simplify_gen_binary (LSHIFTRT, mode,
572
                                    XEXP (op, 0), XEXP (op, 1));
573
 
574
      /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575
         C is equal to the width of MODE minus 1.  */
576
      if (GET_CODE (op) == LSHIFTRT
577
          && GET_CODE (XEXP (op, 1)) == CONST_INT
578
          && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579
        return simplify_gen_binary (ASHIFTRT, mode,
580
                                    XEXP (op, 0), XEXP (op, 1));
581
 
582
      /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
583
      if (GET_CODE (op) == XOR
584
          && XEXP (op, 1) == const1_rtx
585
          && nonzero_bits (XEXP (op, 0), mode) == 1)
586
        return plus_constant (XEXP (op, 0), -1);
587
 
588
      /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
589
      /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
590
      if (GET_CODE (op) == LT
591
          && XEXP (op, 1) == const0_rtx
592
          && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
593
        {
594
          enum machine_mode inner = GET_MODE (XEXP (op, 0));
595
          int isize = GET_MODE_BITSIZE (inner);
596
          if (STORE_FLAG_VALUE == 1)
597
            {
598
              temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
599
                                          GEN_INT (isize - 1));
600
              if (mode == inner)
601
                return temp;
602
              if (GET_MODE_BITSIZE (mode) > isize)
603
                return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
604
              return simplify_gen_unary (TRUNCATE, mode, temp, inner);
605
            }
606
          else if (STORE_FLAG_VALUE == -1)
607
            {
608
              temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
609
                                          GEN_INT (isize - 1));
610
              if (mode == inner)
611
                return temp;
612
              if (GET_MODE_BITSIZE (mode) > isize)
613
                return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
614
              return simplify_gen_unary (TRUNCATE, mode, temp, inner);
615
            }
616
        }
617
      break;
618
 
619
    case TRUNCATE:
620
      /* We can't handle truncation to a partial integer mode here
621
         because we don't know the real bitsize of the partial
622
         integer mode.  */
623
      if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
624
        break;
625
 
626
      /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI.  */
627
      if ((GET_CODE (op) == SIGN_EXTEND
628
           || GET_CODE (op) == ZERO_EXTEND)
629
          && GET_MODE (XEXP (op, 0)) == mode)
630
        return XEXP (op, 0);
631
 
632
      /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
633
         (OP:SI foo:SI) if OP is NEG or ABS.  */
634
      if ((GET_CODE (op) == ABS
635
           || GET_CODE (op) == NEG)
636
          && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
637
              || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
638
          && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
639
        return simplify_gen_unary (GET_CODE (op), mode,
640
                                   XEXP (XEXP (op, 0), 0), mode);
641
 
642
      /* (truncate:A (subreg:B (truncate:C X) 0)) is
643
         (truncate:A X).  */
644
      if (GET_CODE (op) == SUBREG
645
          && GET_CODE (SUBREG_REG (op)) == TRUNCATE
646
          && subreg_lowpart_p (op))
647
        return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
648
                                   GET_MODE (XEXP (SUBREG_REG (op), 0)));
649
 
650
      /* If we know that the value is already truncated, we can
651
         replace the TRUNCATE with a SUBREG.  Note that this is also
652
         valid if TRULY_NOOP_TRUNCATION is false for the corresponding
653
         modes we just have to apply a different definition for
654
         truncation.  But don't do this for an (LSHIFTRT (MULT ...))
655
         since this will cause problems with the umulXi3_highpart
656
         patterns.  */
657
      if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
658
                                 GET_MODE_BITSIZE (GET_MODE (op)))
659
           ? (num_sign_bit_copies (op, GET_MODE (op))
660
              > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
661
                                - GET_MODE_BITSIZE (mode)))
662
           : truncated_to_mode (mode, op))
663
          && ! (GET_CODE (op) == LSHIFTRT
664
                && GET_CODE (XEXP (op, 0)) == MULT))
665
        return rtl_hooks.gen_lowpart_no_emit (mode, op);
666
 
667
      /* A truncate of a comparison can be replaced with a subreg if
668
         STORE_FLAG_VALUE permits.  This is like the previous test,
669
         but it works even if the comparison is done in a mode larger
670
         than HOST_BITS_PER_WIDE_INT.  */
671
      if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
672
          && COMPARISON_P (op)
673
          && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
674
        return rtl_hooks.gen_lowpart_no_emit (mode, op);
675
      break;
676
 
677
    case FLOAT_TRUNCATE:
678
      if (DECIMAL_FLOAT_MODE_P (mode))
679
        break;
680
 
681
      /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
682
      if (GET_CODE (op) == FLOAT_EXTEND
683
          && GET_MODE (XEXP (op, 0)) == mode)
684
        return XEXP (op, 0);
685
 
686
      /* (float_truncate:SF (float_truncate:DF foo:XF))
687
         = (float_truncate:SF foo:XF).
688
         This may eliminate double rounding, so it is unsafe.
689
 
690
         (float_truncate:SF (float_extend:XF foo:DF))
691
         = (float_truncate:SF foo:DF).
692
 
693
         (float_truncate:DF (float_extend:XF foo:SF))
694
         = (float_extend:SF foo:DF).  */
695
      if ((GET_CODE (op) == FLOAT_TRUNCATE
696
           && flag_unsafe_math_optimizations)
697
          || GET_CODE (op) == FLOAT_EXTEND)
698
        return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
699
                                                            0)))
700
                                   > GET_MODE_SIZE (mode)
701
                                   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
702
                                   mode,
703
                                   XEXP (op, 0), mode);
704
 
705
      /*  (float_truncate (float x)) is (float x)  */
706
      if (GET_CODE (op) == FLOAT
707
          && (flag_unsafe_math_optimizations
708
              || ((unsigned)significand_size (GET_MODE (op))
709
                  >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
710
                      - num_sign_bit_copies (XEXP (op, 0),
711
                                             GET_MODE (XEXP (op, 0)))))))
712
        return simplify_gen_unary (FLOAT, mode,
713
                                   XEXP (op, 0),
714
                                   GET_MODE (XEXP (op, 0)));
715
 
716
      /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
717
         (OP:SF foo:SF) if OP is NEG or ABS.  */
718
      if ((GET_CODE (op) == ABS
719
           || GET_CODE (op) == NEG)
720
          && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
721
          && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
722
        return simplify_gen_unary (GET_CODE (op), mode,
723
                                   XEXP (XEXP (op, 0), 0), mode);
724
 
725
      /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
726
         is (float_truncate:SF x).  */
727
      if (GET_CODE (op) == SUBREG
728
          && subreg_lowpart_p (op)
729
          && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
730
        return SUBREG_REG (op);
731
      break;
732
 
733
    case FLOAT_EXTEND:
734
      if (DECIMAL_FLOAT_MODE_P (mode))
735
        break;
736
 
737
      /*  (float_extend (float_extend x)) is (float_extend x)
738
 
739
          (float_extend (float x)) is (float x) assuming that double
740
          rounding can't happen.
741
          */
742
      if (GET_CODE (op) == FLOAT_EXTEND
743
          || (GET_CODE (op) == FLOAT
744
              && ((unsigned)significand_size (GET_MODE (op))
745
                  >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
746
                      - num_sign_bit_copies (XEXP (op, 0),
747
                                             GET_MODE (XEXP (op, 0)))))))
748
        return simplify_gen_unary (GET_CODE (op), mode,
749
                                   XEXP (op, 0),
750
                                   GET_MODE (XEXP (op, 0)));
751
 
752
      break;
753
 
754
    case ABS:
755
      /* (abs (neg <foo>)) -> (abs <foo>) */
756
      if (GET_CODE (op) == NEG)
757
        return simplify_gen_unary (ABS, mode, XEXP (op, 0),
758
                                   GET_MODE (XEXP (op, 0)));
759
 
760
      /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
761
         do nothing.  */
762
      if (GET_MODE (op) == VOIDmode)
763
        break;
764
 
765
      /* If operand is something known to be positive, ignore the ABS.  */
766
      if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
767
          || ((GET_MODE_BITSIZE (GET_MODE (op))
768
               <= HOST_BITS_PER_WIDE_INT)
769
              && ((nonzero_bits (op, GET_MODE (op))
770
                   & ((HOST_WIDE_INT) 1
771
                      << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
772
                  == 0)))
773
        return op;
774
 
775
      /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
776
      if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
777
        return gen_rtx_NEG (mode, op);
778
 
779
      break;
780
 
781
    case FFS:
782
      /* (ffs (*_extend <X>)) = (ffs <X>) */
783
      if (GET_CODE (op) == SIGN_EXTEND
784
          || GET_CODE (op) == ZERO_EXTEND)
785
        return simplify_gen_unary (FFS, mode, XEXP (op, 0),
786
                                   GET_MODE (XEXP (op, 0)));
787
      break;
788
 
789
    case POPCOUNT:
790
    case PARITY:
791
      /* (pop* (zero_extend <X>)) = (pop* <X>) */
792
      if (GET_CODE (op) == ZERO_EXTEND)
793
        return simplify_gen_unary (code, mode, XEXP (op, 0),
794
                                   GET_MODE (XEXP (op, 0)));
795
      break;
796
 
797
    case FLOAT:
798
      /* (float (sign_extend <X>)) = (float <X>).  */
799
      if (GET_CODE (op) == SIGN_EXTEND)
800
        return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
801
                                   GET_MODE (XEXP (op, 0)));
802
      break;
803
 
804
    case SIGN_EXTEND:
805
      /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
806
         becomes just the MINUS if its mode is MODE.  This allows
807
         folding switch statements on machines using casesi (such as
808
         the VAX).  */
809
      if (GET_CODE (op) == TRUNCATE
810
          && GET_MODE (XEXP (op, 0)) == mode
811
          && GET_CODE (XEXP (op, 0)) == MINUS
812
          && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
813
          && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
814
        return XEXP (op, 0);
815
 
816
      /* Check for a sign extension of a subreg of a promoted
817
         variable, where the promotion is sign-extended, and the
818
         target mode is the same as the variable's promotion.  */
819
      if (GET_CODE (op) == SUBREG
820
          && SUBREG_PROMOTED_VAR_P (op)
821
          && ! SUBREG_PROMOTED_UNSIGNED_P (op)
822
          && GET_MODE (XEXP (op, 0)) == mode)
823
        return XEXP (op, 0);
824
 
825
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
826
      if (! POINTERS_EXTEND_UNSIGNED
827
          && mode == Pmode && GET_MODE (op) == ptr_mode
828
          && (CONSTANT_P (op)
829
              || (GET_CODE (op) == SUBREG
830
                  && REG_P (SUBREG_REG (op))
831
                  && REG_POINTER (SUBREG_REG (op))
832
                  && GET_MODE (SUBREG_REG (op)) == Pmode)))
833
        return convert_memory_address (Pmode, op);
834
#endif
835
      break;
836
 
837
    case ZERO_EXTEND:
838
      /* Check for a zero extension of a subreg of a promoted
839
         variable, where the promotion is zero-extended, and the
840
         target mode is the same as the variable's promotion.  */
841
      if (GET_CODE (op) == SUBREG
842
          && SUBREG_PROMOTED_VAR_P (op)
843
          && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
844
          && GET_MODE (XEXP (op, 0)) == mode)
845
        return XEXP (op, 0);
846
 
847
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
848
      if (POINTERS_EXTEND_UNSIGNED > 0
849
          && mode == Pmode && GET_MODE (op) == ptr_mode
850
          && (CONSTANT_P (op)
851
              || (GET_CODE (op) == SUBREG
852
                  && REG_P (SUBREG_REG (op))
853
                  && REG_POINTER (SUBREG_REG (op))
854
                  && GET_MODE (SUBREG_REG (op)) == Pmode)))
855
        return convert_memory_address (Pmode, op);
856
#endif
857
      break;
858
 
859
    default:
860
      break;
861
    }
862
 
863
  return 0;
864
}
865
 
866
/* Try to compute the value of a unary operation CODE whose output mode is to
867
   be MODE with input operand OP whose mode was originally OP_MODE.
868
   Return zero if the value cannot be computed.  */
869
rtx
870
simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
871
                                rtx op, enum machine_mode op_mode)
872
{
873
  unsigned int width = GET_MODE_BITSIZE (mode);
874
 
875
  if (code == VEC_DUPLICATE)
876
    {
877
      gcc_assert (VECTOR_MODE_P (mode));
878
      if (GET_MODE (op) != VOIDmode)
879
      {
880
        if (!VECTOR_MODE_P (GET_MODE (op)))
881
          gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
882
        else
883
          gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
884
                                                (GET_MODE (op)));
885
      }
886
      if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
887
          || GET_CODE (op) == CONST_VECTOR)
888
        {
889
          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
890
          unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
891
          rtvec v = rtvec_alloc (n_elts);
892
          unsigned int i;
893
 
894
          if (GET_CODE (op) != CONST_VECTOR)
895
            for (i = 0; i < n_elts; i++)
896
              RTVEC_ELT (v, i) = op;
897
          else
898
            {
899
              enum machine_mode inmode = GET_MODE (op);
900
              int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
901
              unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
902
 
903
              gcc_assert (in_n_elts < n_elts);
904
              gcc_assert ((n_elts % in_n_elts) == 0);
905
              for (i = 0; i < n_elts; i++)
906
                RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
907
            }
908
          return gen_rtx_CONST_VECTOR (mode, v);
909
        }
910
    }
911
 
912
  if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
913
    {
914
      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
915
      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
916
      enum machine_mode opmode = GET_MODE (op);
917
      int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
918
      unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
919
      rtvec v = rtvec_alloc (n_elts);
920
      unsigned int i;
921
 
922
      gcc_assert (op_n_elts == n_elts);
923
      for (i = 0; i < n_elts; i++)
924
        {
925
          rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
926
                                            CONST_VECTOR_ELT (op, i),
927
                                            GET_MODE_INNER (opmode));
928
          if (!x)
929
            return 0;
930
          RTVEC_ELT (v, i) = x;
931
        }
932
      return gen_rtx_CONST_VECTOR (mode, v);
933
    }
934
 
935
  /* The order of these tests is critical so that, for example, we don't
936
     check the wrong mode (input vs. output) for a conversion operation,
937
     such as FIX.  At some point, this should be simplified.  */
938
 
939
  if (code == FLOAT && GET_MODE (op) == VOIDmode
940
      && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
941
    {
942
      HOST_WIDE_INT hv, lv;
943
      REAL_VALUE_TYPE d;
944
 
945
      if (GET_CODE (op) == CONST_INT)
946
        lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
947
      else
948
        lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
949
 
950
      REAL_VALUE_FROM_INT (d, lv, hv, mode);
951
      d = real_value_truncate (mode, d);
952
      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
953
    }
954
  else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
955
           && (GET_CODE (op) == CONST_DOUBLE
956
               || GET_CODE (op) == CONST_INT))
957
    {
958
      HOST_WIDE_INT hv, lv;
959
      REAL_VALUE_TYPE d;
960
 
961
      if (GET_CODE (op) == CONST_INT)
962
        lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
963
      else
964
        lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
965
 
966
      if (op_mode == VOIDmode)
967
        {
968
          /* We don't know how to interpret negative-looking numbers in
969
             this case, so don't try to fold those.  */
970
          if (hv < 0)
971
            return 0;
972
        }
973
      else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
974
        ;
975
      else
976
        hv = 0, lv &= GET_MODE_MASK (op_mode);
977
 
978
      REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
979
      d = real_value_truncate (mode, d);
980
      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
981
    }
982
 
983
  if (GET_CODE (op) == CONST_INT
984
      && width <= HOST_BITS_PER_WIDE_INT && width > 0)
985
    {
986
      HOST_WIDE_INT arg0 = INTVAL (op);
987
      HOST_WIDE_INT val;
988
 
989
      switch (code)
990
        {
991
        case NOT:
992
          val = ~ arg0;
993
          break;
994
 
995
        case NEG:
996
          val = - arg0;
997
          break;
998
 
999
        case ABS:
1000
          val = (arg0 >= 0 ? arg0 : - arg0);
1001
          break;
1002
 
1003
        case FFS:
1004
          /* Don't use ffs here.  Instead, get low order bit and then its
1005
             number.  If arg0 is zero, this will return 0, as desired.  */
1006
          arg0 &= GET_MODE_MASK (mode);
1007
          val = exact_log2 (arg0 & (- arg0)) + 1;
1008
          break;
1009
 
1010
        case CLZ:
1011
          arg0 &= GET_MODE_MASK (mode);
1012
          if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1013
            ;
1014
          else
1015
            val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1016
          break;
1017
 
1018
        case CTZ:
1019
          arg0 &= GET_MODE_MASK (mode);
1020
          if (arg0 == 0)
1021
            {
1022
              /* Even if the value at zero is undefined, we have to come
1023
                 up with some replacement.  Seems good enough.  */
1024
              if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1025
                val = GET_MODE_BITSIZE (mode);
1026
            }
1027
          else
1028
            val = exact_log2 (arg0 & -arg0);
1029
          break;
1030
 
1031
        case POPCOUNT:
1032
          arg0 &= GET_MODE_MASK (mode);
1033
          val = 0;
1034
          while (arg0)
1035
            val++, arg0 &= arg0 - 1;
1036
          break;
1037
 
1038
        case PARITY:
1039
          arg0 &= GET_MODE_MASK (mode);
1040
          val = 0;
1041
          while (arg0)
1042
            val++, arg0 &= arg0 - 1;
1043
          val &= 1;
1044
          break;
1045
 
1046
        case TRUNCATE:
1047
          val = arg0;
1048
          break;
1049
 
1050
        case ZERO_EXTEND:
1051
          /* When zero-extending a CONST_INT, we need to know its
1052
             original mode.  */
1053
          gcc_assert (op_mode != VOIDmode);
1054
          if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1055
            {
1056
              /* If we were really extending the mode,
1057
                 we would have to distinguish between zero-extension
1058
                 and sign-extension.  */
1059
              gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1060
              val = arg0;
1061
            }
1062
          else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1063
            val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1064
          else
1065
            return 0;
1066
          break;
1067
 
1068
        case SIGN_EXTEND:
1069
          if (op_mode == VOIDmode)
1070
            op_mode = mode;
1071
          if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1072
            {
1073
              /* If we were really extending the mode,
1074
                 we would have to distinguish between zero-extension
1075
                 and sign-extension.  */
1076
              gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1077
              val = arg0;
1078
            }
1079
          else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1080
            {
1081
              val
1082
                = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1083
              if (val
1084
                  & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1085
                val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1086
            }
1087
          else
1088
            return 0;
1089
          break;
1090
 
1091
        case SQRT:
1092
        case FLOAT_EXTEND:
1093
        case FLOAT_TRUNCATE:
1094
        case SS_TRUNCATE:
1095
        case US_TRUNCATE:
1096
        case SS_NEG:
1097
          return 0;
1098
 
1099
        default:
1100
          gcc_unreachable ();
1101
        }
1102
 
1103
      return gen_int_mode (val, mode);
1104
    }
1105
 
1106
  /* We can do some operations on integer CONST_DOUBLEs.  Also allow
1107
     for a DImode operation on a CONST_INT.  */
1108
  else if (GET_MODE (op) == VOIDmode
1109
           && width <= HOST_BITS_PER_WIDE_INT * 2
1110
           && (GET_CODE (op) == CONST_DOUBLE
1111
               || GET_CODE (op) == CONST_INT))
1112
    {
1113
      unsigned HOST_WIDE_INT l1, lv;
1114
      HOST_WIDE_INT h1, hv;
1115
 
1116
      if (GET_CODE (op) == CONST_DOUBLE)
1117
        l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1118
      else
1119
        l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1120
 
1121
      switch (code)
1122
        {
1123
        case NOT:
1124
          lv = ~ l1;
1125
          hv = ~ h1;
1126
          break;
1127
 
1128
        case NEG:
1129
          neg_double (l1, h1, &lv, &hv);
1130
          break;
1131
 
1132
        case ABS:
1133
          if (h1 < 0)
1134
            neg_double (l1, h1, &lv, &hv);
1135
          else
1136
            lv = l1, hv = h1;
1137
          break;
1138
 
1139
        case FFS:
1140
          hv = 0;
1141
          if (l1 == 0)
1142
            {
1143
              if (h1 == 0)
1144
                lv = 0;
1145
              else
1146
                lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1147
            }
1148
          else
1149
            lv = exact_log2 (l1 & -l1) + 1;
1150
          break;
1151
 
1152
        case CLZ:
1153
          hv = 0;
1154
          if (h1 != 0)
1155
            lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1156
              - HOST_BITS_PER_WIDE_INT;
1157
          else if (l1 != 0)
1158
            lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1159
          else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1160
            lv = GET_MODE_BITSIZE (mode);
1161
          break;
1162
 
1163
        case CTZ:
1164
          hv = 0;
1165
          if (l1 != 0)
1166
            lv = exact_log2 (l1 & -l1);
1167
          else if (h1 != 0)
1168
            lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1169
          else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1170
            lv = GET_MODE_BITSIZE (mode);
1171
          break;
1172
 
1173
        case POPCOUNT:
1174
          hv = 0;
1175
          lv = 0;
1176
          while (l1)
1177
            lv++, l1 &= l1 - 1;
1178
          while (h1)
1179
            lv++, h1 &= h1 - 1;
1180
          break;
1181
 
1182
        case PARITY:
1183
          hv = 0;
1184
          lv = 0;
1185
          while (l1)
1186
            lv++, l1 &= l1 - 1;
1187
          while (h1)
1188
            lv++, h1 &= h1 - 1;
1189
          lv &= 1;
1190
          break;
1191
 
1192
        case TRUNCATE:
1193
          /* This is just a change-of-mode, so do nothing.  */
1194
          lv = l1, hv = h1;
1195
          break;
1196
 
1197
        case ZERO_EXTEND:
1198
          gcc_assert (op_mode != VOIDmode);
1199
 
1200
          if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1201
            return 0;
1202
 
1203
          hv = 0;
1204
          lv = l1 & GET_MODE_MASK (op_mode);
1205
          break;
1206
 
1207
        case SIGN_EXTEND:
1208
          if (op_mode == VOIDmode
1209
              || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1210
            return 0;
1211
          else
1212
            {
1213
              lv = l1 & GET_MODE_MASK (op_mode);
1214
              if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1215
                  && (lv & ((HOST_WIDE_INT) 1
1216
                            << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1217
                lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1218
 
1219
              hv = HWI_SIGN_EXTEND (lv);
1220
            }
1221
          break;
1222
 
1223
        case SQRT:
1224
          return 0;
1225
 
1226
        default:
1227
          return 0;
1228
        }
1229
 
1230
      return immed_double_const (lv, hv, mode);
1231
    }
1232
 
1233
  else if (GET_CODE (op) == CONST_DOUBLE
1234
           && SCALAR_FLOAT_MODE_P (mode))
1235
    {
1236
      REAL_VALUE_TYPE d, t;
1237
      REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1238
 
1239
      switch (code)
1240
        {
1241
        case SQRT:
1242
          if (HONOR_SNANS (mode) && real_isnan (&d))
1243
            return 0;
1244
          real_sqrt (&t, mode, &d);
1245
          d = t;
1246
          break;
1247
        case ABS:
1248
          d = REAL_VALUE_ABS (d);
1249
          break;
1250
        case NEG:
1251
          d = REAL_VALUE_NEGATE (d);
1252
          break;
1253
        case FLOAT_TRUNCATE:
1254
          d = real_value_truncate (mode, d);
1255
          break;
1256
        case FLOAT_EXTEND:
1257
          /* All this does is change the mode.  */
1258
          break;
1259
        case FIX:
1260
          real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1261
          break;
1262
        case NOT:
1263
          {
1264
            long tmp[4];
1265
            int i;
1266
 
1267
            real_to_target (tmp, &d, GET_MODE (op));
1268
            for (i = 0; i < 4; i++)
1269
              tmp[i] = ~tmp[i];
1270
            real_from_target (&d, tmp, mode);
1271
            break;
1272
          }
1273
        default:
1274
          gcc_unreachable ();
1275
        }
1276
      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1277
    }
1278
 
1279
  else if (GET_CODE (op) == CONST_DOUBLE
1280
           && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1281
           && GET_MODE_CLASS (mode) == MODE_INT
1282
           && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1283
    {
1284
      /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1285
         operators are intentionally left unspecified (to ease implementation
1286
         by target backends), for consistency, this routine implements the
1287
         same semantics for constant folding as used by the middle-end.  */
1288
 
1289
      /* This was formerly used only for non-IEEE float.
1290
         eggert@twinsun.com says it is safe for IEEE also.  */
1291
      HOST_WIDE_INT xh, xl, th, tl;
1292
      REAL_VALUE_TYPE x, t;
1293
      REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1294
      switch (code)
1295
        {
1296
        case FIX:
1297
          if (REAL_VALUE_ISNAN (x))
1298
            return const0_rtx;
1299
 
1300
          /* Test against the signed upper bound.  */
1301
          if (width > HOST_BITS_PER_WIDE_INT)
1302
            {
1303
              th = ((unsigned HOST_WIDE_INT) 1
1304
                    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1305
              tl = -1;
1306
            }
1307
          else
1308
            {
1309
              th = 0;
1310
              tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1311
            }
1312
          real_from_integer (&t, VOIDmode, tl, th, 0);
1313
          if (REAL_VALUES_LESS (t, x))
1314
            {
1315
              xh = th;
1316
              xl = tl;
1317
              break;
1318
            }
1319
 
1320
          /* Test against the signed lower bound.  */
1321
          if (width > HOST_BITS_PER_WIDE_INT)
1322
            {
1323
              th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1324
              tl = 0;
1325
            }
1326
          else
1327
            {
1328
              th = -1;
1329
              tl = (HOST_WIDE_INT) -1 << (width - 1);
1330
            }
1331
          real_from_integer (&t, VOIDmode, tl, th, 0);
1332
          if (REAL_VALUES_LESS (x, t))
1333
            {
1334
              xh = th;
1335
              xl = tl;
1336
              break;
1337
            }
1338
          REAL_VALUE_TO_INT (&xl, &xh, x);
1339
          break;
1340
 
1341
        case UNSIGNED_FIX:
1342
          if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1343
            return const0_rtx;
1344
 
1345
          /* Test against the unsigned upper bound.  */
1346
          if (width == 2*HOST_BITS_PER_WIDE_INT)
1347
            {
1348
              th = -1;
1349
              tl = -1;
1350
            }
1351
          else if (width >= HOST_BITS_PER_WIDE_INT)
1352
            {
1353
              th = ((unsigned HOST_WIDE_INT) 1
1354
                    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1355
              tl = -1;
1356
            }
1357
          else
1358
            {
1359
              th = 0;
1360
              tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1361
            }
1362
          real_from_integer (&t, VOIDmode, tl, th, 1);
1363
          if (REAL_VALUES_LESS (t, x))
1364
            {
1365
              xh = th;
1366
              xl = tl;
1367
              break;
1368
            }
1369
 
1370
          REAL_VALUE_TO_INT (&xl, &xh, x);
1371
          break;
1372
 
1373
        default:
1374
          gcc_unreachable ();
1375
        }
1376
      return immed_double_const (xl, xh, mode);
1377
    }
1378
 
1379
  return NULL_RTX;
1380
}
1381
 
1382
/* Subroutine of simplify_binary_operation to simplify a commutative,
1383
   associative binary operation CODE with result mode MODE, operating
1384
   on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1385
   SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
1386
   canonicalization is possible.  */
1387
 
1388
static rtx
1389
simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1390
                                rtx op0, rtx op1)
1391
{
1392
  rtx tem;
1393
 
1394
  /* Linearize the operator to the left.  */
1395
  if (GET_CODE (op1) == code)
1396
    {
1397
      /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
1398
      if (GET_CODE (op0) == code)
1399
        {
1400
          tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1401
          return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1402
        }
1403
 
1404
      /* "a op (b op c)" becomes "(b op c) op a".  */
1405
      if (! swap_commutative_operands_p (op1, op0))
1406
        return simplify_gen_binary (code, mode, op1, op0);
1407
 
1408
      tem = op0;
1409
      op0 = op1;
1410
      op1 = tem;
1411
    }
1412
 
1413
  if (GET_CODE (op0) == code)
1414
    {
1415
      /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
1416
      if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1417
        {
1418
          tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1419
          return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1420
        }
1421
 
1422
      /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
1423
      tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1424
            ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1425
            : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1426
      if (tem != 0)
1427
        return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1428
 
1429
      /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
1430
      tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1431
            ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1432
            : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1433
      if (tem != 0)
1434
        return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1435
    }
1436
 
1437
  return 0;
1438
}
1439
 
1440
 
1441
/* Simplify a binary operation CODE with result mode MODE, operating on OP0
1442
   and OP1.  Return 0 if no simplification is possible.
1443
 
1444
   Don't use this for relational operations such as EQ or LT.
1445
   Use simplify_relational_operation instead.  */
1446
rtx
1447
simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1448
                           rtx op0, rtx op1)
1449
{
1450
  rtx trueop0, trueop1;
1451
  rtx tem;
1452
 
1453
  /* Relational operations don't work here.  We must know the mode
1454
     of the operands in order to do the comparison correctly.
1455
     Assuming a full word can give incorrect results.
1456
     Consider comparing 128 with -128 in QImode.  */
1457
  gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1458
  gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1459
 
1460
  /* Make sure the constant is second.  */
1461
  if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1462
      && swap_commutative_operands_p (op0, op1))
1463
    {
1464
      tem = op0, op0 = op1, op1 = tem;
1465
    }
1466
 
1467
  trueop0 = avoid_constant_pool_reference (op0);
1468
  trueop1 = avoid_constant_pool_reference (op1);
1469
 
1470
  tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1471
  if (tem)
1472
    return tem;
1473
  return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1474
}
1475
 
1476
/* Subroutine of simplify_binary_operation.  Simplify a binary operation
1477
   CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
1478
   OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1479
   actual constants.  */
1480
 
1481
static rtx
1482
simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1483
                             rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1484
{
1485
  rtx tem, reversed, opleft, opright;
1486
  HOST_WIDE_INT val;
1487
  unsigned int width = GET_MODE_BITSIZE (mode);
1488
 
1489
  /* Even if we can't compute a constant result,
1490
     there are some cases worth simplifying.  */
1491
 
1492
  switch (code)
1493
    {
1494
    case PLUS:
1495
      /* Maybe simplify x + 0 to x.  The two expressions are equivalent
1496
         when x is NaN, infinite, or finite and nonzero.  They aren't
1497
         when x is -0 and the rounding mode is not towards -infinity,
1498
         since (-0) + 0 is then 0.  */
1499
      if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1500
        return op0;
1501
 
1502
      /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
1503
         transformations are safe even for IEEE.  */
1504
      if (GET_CODE (op0) == NEG)
1505
        return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1506
      else if (GET_CODE (op1) == NEG)
1507
        return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1508
 
1509
      /* (~a) + 1 -> -a */
1510
      if (INTEGRAL_MODE_P (mode)
1511
          && GET_CODE (op0) == NOT
1512
          && trueop1 == const1_rtx)
1513
        return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1514
 
1515
      /* Handle both-operands-constant cases.  We can only add
1516
         CONST_INTs to constants since the sum of relocatable symbols
1517
         can't be handled by most assemblers.  Don't add CONST_INT
1518
         to CONST_INT since overflow won't be computed properly if wider
1519
         than HOST_BITS_PER_WIDE_INT.  */
1520
 
1521
      if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1522
          && GET_CODE (op1) == CONST_INT)
1523
        return plus_constant (op0, INTVAL (op1));
1524
      else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1525
               && GET_CODE (op0) == CONST_INT)
1526
        return plus_constant (op1, INTVAL (op0));
1527
 
1528
      /* See if this is something like X * C - X or vice versa or
1529
         if the multiplication is written as a shift.  If so, we can
1530
         distribute and make a new multiply, shift, or maybe just
1531
         have X (if C is 2 in the example above).  But don't make
1532
         something more expensive than we had before.  */
1533
 
1534
      if (SCALAR_INT_MODE_P (mode))
1535
        {
1536
          HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1537
          unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1538
          rtx lhs = op0, rhs = op1;
1539
 
1540
          if (GET_CODE (lhs) == NEG)
1541
            {
1542
              coeff0l = -1;
1543
              coeff0h = -1;
1544
              lhs = XEXP (lhs, 0);
1545
            }
1546
          else if (GET_CODE (lhs) == MULT
1547
                   && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1548
            {
1549
              coeff0l = INTVAL (XEXP (lhs, 1));
1550
              coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1551
              lhs = XEXP (lhs, 0);
1552
            }
1553
          else if (GET_CODE (lhs) == ASHIFT
1554
                   && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1555
                   && INTVAL (XEXP (lhs, 1)) >= 0
1556
                   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1557
            {
1558
              coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1559
              coeff0h = 0;
1560
              lhs = XEXP (lhs, 0);
1561
            }
1562
 
1563
          if (GET_CODE (rhs) == NEG)
1564
            {
1565
              coeff1l = -1;
1566
              coeff1h = -1;
1567
              rhs = XEXP (rhs, 0);
1568
            }
1569
          else if (GET_CODE (rhs) == MULT
1570
                   && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1571
            {
1572
              coeff1l = INTVAL (XEXP (rhs, 1));
1573
              coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1574
              rhs = XEXP (rhs, 0);
1575
            }
1576
          else if (GET_CODE (rhs) == ASHIFT
1577
                   && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1578
                   && INTVAL (XEXP (rhs, 1)) >= 0
1579
                   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1580
            {
1581
              coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1582
              coeff1h = 0;
1583
              rhs = XEXP (rhs, 0);
1584
            }
1585
 
1586
          if (rtx_equal_p (lhs, rhs))
1587
            {
1588
              rtx orig = gen_rtx_PLUS (mode, op0, op1);
1589
              rtx coeff;
1590
              unsigned HOST_WIDE_INT l;
1591
              HOST_WIDE_INT h;
1592
 
1593
              add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1594
              coeff = immed_double_const (l, h, mode);
1595
 
1596
              tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1597
              return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1598
                ? tem : 0;
1599
            }
1600
        }
1601
 
1602
      /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
1603
      if ((GET_CODE (op1) == CONST_INT
1604
           || GET_CODE (op1) == CONST_DOUBLE)
1605
          && GET_CODE (op0) == XOR
1606
          && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1607
              || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1608
          && mode_signbit_p (mode, op1))
1609
        return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1610
                                    simplify_gen_binary (XOR, mode, op1,
1611
                                                         XEXP (op0, 1)));
1612
 
1613
      /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
1614
      if (GET_CODE (op0) == MULT
1615
          && GET_CODE (XEXP (op0, 0)) == NEG)
1616
        {
1617
          rtx in1, in2;
1618
 
1619
          in1 = XEXP (XEXP (op0, 0), 0);
1620
          in2 = XEXP (op0, 1);
1621
          return simplify_gen_binary (MINUS, mode, op1,
1622
                                      simplify_gen_binary (MULT, mode,
1623
                                                           in1, in2));
1624
        }
1625
 
1626
      /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1627
         C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1628
         is 1.  */
1629
      if (COMPARISON_P (op0)
1630
          && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1631
              || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1632
          && (reversed = reversed_comparison (op0, mode)))
1633
        return
1634
          simplify_gen_unary (NEG, mode, reversed, mode);
1635
 
1636
      /* If one of the operands is a PLUS or a MINUS, see if we can
1637
         simplify this by the associative law.
1638
         Don't use the associative law for floating point.
1639
         The inaccuracy makes it nonassociative,
1640
         and subtle programs can break if operations are associated.  */
1641
 
1642
      if (INTEGRAL_MODE_P (mode)
1643
          && (plus_minus_operand_p (op0)
1644
              || plus_minus_operand_p (op1))
1645
          && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1646
        return tem;
1647
 
1648
      /* Reassociate floating point addition only when the user
1649
         specifies unsafe math optimizations.  */
1650
      if (FLOAT_MODE_P (mode)
1651
          && flag_unsafe_math_optimizations)
1652
        {
1653
          tem = simplify_associative_operation (code, mode, op0, op1);
1654
          if (tem)
1655
            return tem;
1656
        }
1657
      break;
1658
 
1659
    case COMPARE:
1660
#ifdef HAVE_cc0
1661
      /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1662
         using cc0, in which case we want to leave it as a COMPARE
1663
         so we can distinguish it from a register-register-copy.
1664
 
1665
         In IEEE floating point, x-0 is not the same as x.  */
1666
 
1667
      if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1668
           || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1669
          && trueop1 == CONST0_RTX (mode))
1670
        return op0;
1671
#endif
1672
 
1673
      /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
1674
      if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1675
           || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1676
          && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1677
        {
1678
          rtx xop00 = XEXP (op0, 0);
1679
          rtx xop10 = XEXP (op1, 0);
1680
 
1681
#ifdef HAVE_cc0
1682
          if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1683
#else
1684
            if (REG_P (xop00) && REG_P (xop10)
1685
                && GET_MODE (xop00) == GET_MODE (xop10)
1686
                && REGNO (xop00) == REGNO (xop10)
1687
                && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1688
                && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1689
#endif
1690
              return xop00;
1691
        }
1692
      break;
1693
 
1694
    case MINUS:
1695
      /* We can't assume x-x is 0 even with non-IEEE floating point,
1696
         but since it is zero except in very strange circumstances, we
1697
         will treat it as zero with -funsafe-math-optimizations.  */
1698
      if (rtx_equal_p (trueop0, trueop1)
1699
          && ! side_effects_p (op0)
1700
          && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1701
        return CONST0_RTX (mode);
1702
 
1703
      /* Change subtraction from zero into negation.  (0 - x) is the
1704
         same as -x when x is NaN, infinite, or finite and nonzero.
1705
         But if the mode has signed zeros, and does not round towards
1706
         -infinity, then 0 - 0 is 0, not -0.  */
1707
      if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1708
        return simplify_gen_unary (NEG, mode, op1, mode);
1709
 
1710
      /* (-1 - a) is ~a.  */
1711
      if (trueop0 == constm1_rtx)
1712
        return simplify_gen_unary (NOT, mode, op1, mode);
1713
 
1714
      /* Subtracting 0 has no effect unless the mode has signed zeros
1715
         and supports rounding towards -infinity.  In such a case,
1716
 
1717
      if (!(HONOR_SIGNED_ZEROS (mode)
1718
            && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1719
          && trueop1 == CONST0_RTX (mode))
1720
        return op0;
1721
 
1722
      /* See if this is something like X * C - X or vice versa or
1723
         if the multiplication is written as a shift.  If so, we can
1724
         distribute and make a new multiply, shift, or maybe just
1725
         have X (if C is 2 in the example above).  But don't make
1726
         something more expensive than we had before.  */
1727
 
1728
      if (SCALAR_INT_MODE_P (mode))
1729
        {
1730
          HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1731
          unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1732
          rtx lhs = op0, rhs = op1;
1733
 
1734
          if (GET_CODE (lhs) == NEG)
1735
            {
1736
              coeff0l = -1;
1737
              coeff0h = -1;
1738
              lhs = XEXP (lhs, 0);
1739
            }
1740
          else if (GET_CODE (lhs) == MULT
1741
                   && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1742
            {
1743
              coeff0l = INTVAL (XEXP (lhs, 1));
1744
              coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1745
              lhs = XEXP (lhs, 0);
1746
            }
1747
          else if (GET_CODE (lhs) == ASHIFT
1748
                   && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1749
                   && INTVAL (XEXP (lhs, 1)) >= 0
1750
                   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1751
            {
1752
              coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1753
              coeff0h = 0;
1754
              lhs = XEXP (lhs, 0);
1755
            }
1756
 
1757
          if (GET_CODE (rhs) == NEG)
1758
            {
1759
              negcoeff1l = 1;
1760
              negcoeff1h = 0;
1761
              rhs = XEXP (rhs, 0);
1762
            }
1763
          else if (GET_CODE (rhs) == MULT
1764
                   && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1765
            {
1766
              negcoeff1l = -INTVAL (XEXP (rhs, 1));
1767
              negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1768
              rhs = XEXP (rhs, 0);
1769
            }
1770
          else if (GET_CODE (rhs) == ASHIFT
1771
                   && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1772
                   && INTVAL (XEXP (rhs, 1)) >= 0
1773
                   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1774
            {
1775
              negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1776
              negcoeff1h = -1;
1777
              rhs = XEXP (rhs, 0);
1778
            }
1779
 
1780
          if (rtx_equal_p (lhs, rhs))
1781
            {
1782
              rtx orig = gen_rtx_MINUS (mode, op0, op1);
1783
              rtx coeff;
1784
              unsigned HOST_WIDE_INT l;
1785
              HOST_WIDE_INT h;
1786
 
1787
              add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1788
              coeff = immed_double_const (l, h, mode);
1789
 
1790
              tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1791
              return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1792
                ? tem : 0;
1793
            }
1794
        }
1795
 
1796
      /* (a - (-b)) -> (a + b).  True even for IEEE.  */
1797
      if (GET_CODE (op1) == NEG)
1798
        return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1799
 
1800
      /* (-x - c) may be simplified as (-c - x).  */
1801
      if (GET_CODE (op0) == NEG
1802
          && (GET_CODE (op1) == CONST_INT
1803
              || GET_CODE (op1) == CONST_DOUBLE))
1804
        {
1805
          tem = simplify_unary_operation (NEG, mode, op1, mode);
1806
          if (tem)
1807
            return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1808
        }
1809
 
1810
      /* Don't let a relocatable value get a negative coeff.  */
1811
      if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1812
        return simplify_gen_binary (PLUS, mode,
1813
                                    op0,
1814
                                    neg_const_int (mode, op1));
1815
 
1816
      /* (x - (x & y)) -> (x & ~y) */
1817
      if (GET_CODE (op1) == AND)
1818
        {
1819
          if (rtx_equal_p (op0, XEXP (op1, 0)))
1820
            {
1821
              tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1822
                                        GET_MODE (XEXP (op1, 1)));
1823
              return simplify_gen_binary (AND, mode, op0, tem);
1824
            }
1825
          if (rtx_equal_p (op0, XEXP (op1, 1)))
1826
            {
1827
              tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1828
                                        GET_MODE (XEXP (op1, 0)));
1829
              return simplify_gen_binary (AND, mode, op0, tem);
1830
            }
1831
        }
1832
 
1833
      /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1834
         by reversing the comparison code if valid.  */
1835
      if (STORE_FLAG_VALUE == 1
1836
          && trueop0 == const1_rtx
1837
          && COMPARISON_P (op1)
1838
          && (reversed = reversed_comparison (op1, mode)))
1839
        return reversed;
1840
 
1841
      /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
1842
      if (GET_CODE (op1) == MULT
1843
          && GET_CODE (XEXP (op1, 0)) == NEG)
1844
        {
1845
          rtx in1, in2;
1846
 
1847
          in1 = XEXP (XEXP (op1, 0), 0);
1848
          in2 = XEXP (op1, 1);
1849
          return simplify_gen_binary (PLUS, mode,
1850
                                      simplify_gen_binary (MULT, mode,
1851
                                                           in1, in2),
1852
                                      op0);
1853
        }
1854
 
1855
      /* Canonicalize (minus (neg A) (mult B C)) to
1856
         (minus (mult (neg B) C) A).  */
1857
      if (GET_CODE (op1) == MULT
1858
          && GET_CODE (op0) == NEG)
1859
        {
1860
          rtx in1, in2;
1861
 
1862
          in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1863
          in2 = XEXP (op1, 1);
1864
          return simplify_gen_binary (MINUS, mode,
1865
                                      simplify_gen_binary (MULT, mode,
1866
                                                           in1, in2),
1867
                                      XEXP (op0, 0));
1868
        }
1869
 
1870
      /* If one of the operands is a PLUS or a MINUS, see if we can
1871
         simplify this by the associative law.  This will, for example,
1872
         canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1873
         Don't use the associative law for floating point.
1874
         The inaccuracy makes it nonassociative,
1875
         and subtle programs can break if operations are associated.  */
1876
 
1877
      if (INTEGRAL_MODE_P (mode)
1878
          && (plus_minus_operand_p (op0)
1879
              || plus_minus_operand_p (op1))
1880
          && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1881
        return tem;
1882
      break;
1883
 
1884
    case MULT:
1885
      if (trueop1 == constm1_rtx)
1886
        return simplify_gen_unary (NEG, mode, op0, mode);
1887
 
1888
      /* Maybe simplify x * 0 to 0.  The reduction is not valid if
1889
         x is NaN, since x * 0 is then also NaN.  Nor is it valid
1890
         when the mode has signed zeros, since multiplying a negative
1891
         number by 0 will give -0, not 0.  */
1892
      if (!HONOR_NANS (mode)
1893
          && !HONOR_SIGNED_ZEROS (mode)
1894
          && trueop1 == CONST0_RTX (mode)
1895
          && ! side_effects_p (op0))
1896
        return op1;
1897
 
1898
      /* In IEEE floating point, x*1 is not equivalent to x for
1899
         signalling NaNs.  */
1900
      if (!HONOR_SNANS (mode)
1901
          && trueop1 == CONST1_RTX (mode))
1902
        return op0;
1903
 
1904
      /* Convert multiply by constant power of two into shift unless
1905
         we are still generating RTL.  This test is a kludge.  */
1906
      if (GET_CODE (trueop1) == CONST_INT
1907
          && (val = exact_log2 (INTVAL (trueop1))) >= 0
1908
          /* If the mode is larger than the host word size, and the
1909
             uppermost bit is set, then this isn't a power of two due
1910
             to implicit sign extension.  */
1911
          && (width <= HOST_BITS_PER_WIDE_INT
1912
              || val != HOST_BITS_PER_WIDE_INT - 1))
1913
        return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1914
 
1915
      /* Likewise for multipliers wider than a word.  */
1916
      if (GET_CODE (trueop1) == CONST_DOUBLE
1917
          && (GET_MODE (trueop1) == VOIDmode
1918
              || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1919
          && GET_MODE (op0) == mode
1920
          && CONST_DOUBLE_LOW (trueop1) == 0
1921
          && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1922
        return simplify_gen_binary (ASHIFT, mode, op0,
1923
                                    GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1924
 
1925
      /* x*2 is x+x and x*(-1) is -x */
1926
      if (GET_CODE (trueop1) == CONST_DOUBLE
1927
          && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1928
          && GET_MODE (op0) == mode)
1929
        {
1930
          REAL_VALUE_TYPE d;
1931
          REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1932
 
1933
          if (REAL_VALUES_EQUAL (d, dconst2))
1934
            return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1935
 
1936
          if (!HONOR_SNANS (mode)
1937
              && REAL_VALUES_EQUAL (d, dconstm1))
1938
            return simplify_gen_unary (NEG, mode, op0, mode);
1939
        }
1940
 
1941
      /* Optimize -x * -x as x * x.  */
1942
      if (FLOAT_MODE_P (mode)
1943
          && GET_CODE (op0) == NEG
1944
          && GET_CODE (op1) == NEG
1945
          && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1946
          && !side_effects_p (XEXP (op0, 0)))
1947
        return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1948
 
1949
      /* Likewise, optimize abs(x) * abs(x) as x * x.  */
1950
      if (SCALAR_FLOAT_MODE_P (mode)
1951
          && GET_CODE (op0) == ABS
1952
          && GET_CODE (op1) == ABS
1953
          && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1954
          && !side_effects_p (XEXP (op0, 0)))
1955
        return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1956
 
1957
      /* Reassociate multiplication, but for floating point MULTs
1958
         only when the user specifies unsafe math optimizations.  */
1959
      if (! FLOAT_MODE_P (mode)
1960
          || flag_unsafe_math_optimizations)
1961
        {
1962
          tem = simplify_associative_operation (code, mode, op0, op1);
1963
          if (tem)
1964
            return tem;
1965
        }
1966
      break;
1967
 
1968
    case IOR:
1969
      if (trueop1 == const0_rtx)
1970
        return op0;
1971
      if (GET_CODE (trueop1) == CONST_INT
1972
          && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1973
              == GET_MODE_MASK (mode)))
1974
        return op1;
1975
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1976
        return op0;
1977
      /* A | (~A) -> -1 */
1978
      if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1979
           || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1980
          && ! side_effects_p (op0)
1981
          && SCALAR_INT_MODE_P (mode))
1982
        return constm1_rtx;
1983
 
1984
      /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
1985
      if (GET_CODE (op1) == CONST_INT
1986
          && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1987
          && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1988
        return op1;
1989
 
1990
      /* Convert (A & B) | A to A.  */
1991
      if (GET_CODE (op0) == AND
1992
          && (rtx_equal_p (XEXP (op0, 0), op1)
1993
              || rtx_equal_p (XEXP (op0, 1), op1))
1994
          && ! side_effects_p (XEXP (op0, 0))
1995
          && ! side_effects_p (XEXP (op0, 1)))
1996
        return op1;
1997
 
1998
      /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1999
         mode size to (rotate A CX).  */
2000
 
2001
      if (GET_CODE (op1) == ASHIFT
2002
          || GET_CODE (op1) == SUBREG)
2003
        {
2004
          opleft = op1;
2005
          opright = op0;
2006
        }
2007
      else
2008
        {
2009
          opright = op1;
2010
          opleft = op0;
2011
        }
2012
 
2013
      if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2014
          && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2015
          && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2016
          && GET_CODE (XEXP (opright, 1)) == CONST_INT
2017
          && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2018
              == GET_MODE_BITSIZE (mode)))
2019
        return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2020
 
2021
      /* Same, but for ashift that has been "simplified" to a wider mode
2022
        by simplify_shift_const.  */
2023
 
2024
      if (GET_CODE (opleft) == SUBREG
2025
          && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2026
          && GET_CODE (opright) == LSHIFTRT
2027
          && GET_CODE (XEXP (opright, 0)) == SUBREG
2028
          && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2029
          && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2030
          && (GET_MODE_SIZE (GET_MODE (opleft))
2031
              < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2032
          && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2033
                          SUBREG_REG (XEXP (opright, 0)))
2034
          && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2035
          && GET_CODE (XEXP (opright, 1)) == CONST_INT
2036
          && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2037
              == GET_MODE_BITSIZE (mode)))
2038
        return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2039
                               XEXP (SUBREG_REG (opleft), 1));
2040
 
2041
      /* If we have (ior (and (X C1) C2)), simplify this by making
2042
         C1 as small as possible if C1 actually changes.  */
2043
      if (GET_CODE (op1) == CONST_INT
2044
          && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2045
              || INTVAL (op1) > 0)
2046
          && GET_CODE (op0) == AND
2047
          && GET_CODE (XEXP (op0, 1)) == CONST_INT
2048
          && GET_CODE (op1) == CONST_INT
2049
          && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2050
        return simplify_gen_binary (IOR, mode,
2051
                                    simplify_gen_binary
2052
                                          (AND, mode, XEXP (op0, 0),
2053
                                           GEN_INT (INTVAL (XEXP (op0, 1))
2054
                                                    & ~INTVAL (op1))),
2055
                                    op1);
2056
 
2057
      /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2058
         a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2059
         the PLUS does not affect any of the bits in OP1: then we can do
2060
         the IOR as a PLUS and we can associate.  This is valid if OP1
2061
         can be safely shifted left C bits.  */
2062
      if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2063
          && GET_CODE (XEXP (op0, 0)) == PLUS
2064
          && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2065
          && GET_CODE (XEXP (op0, 1)) == CONST_INT
2066
          && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2067
        {
2068
          int count = INTVAL (XEXP (op0, 1));
2069
          HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2070
 
2071
          if (mask >> count == INTVAL (trueop1)
2072
              && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2073
            return simplify_gen_binary (ASHIFTRT, mode,
2074
                                        plus_constant (XEXP (op0, 0), mask),
2075
                                        XEXP (op0, 1));
2076
        }
2077
 
2078
      tem = simplify_associative_operation (code, mode, op0, op1);
2079
      if (tem)
2080
        return tem;
2081
      break;
2082
 
2083
    case XOR:
2084
      if (trueop1 == const0_rtx)
2085
        return op0;
2086
      if (GET_CODE (trueop1) == CONST_INT
2087
          && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2088
              == GET_MODE_MASK (mode)))
2089
        return simplify_gen_unary (NOT, mode, op0, mode);
2090
      if (rtx_equal_p (trueop0, trueop1)
2091
          && ! side_effects_p (op0)
2092
          && GET_MODE_CLASS (mode) != MODE_CC)
2093
         return CONST0_RTX (mode);
2094
 
2095
      /* Canonicalize XOR of the most significant bit to PLUS.  */
2096
      if ((GET_CODE (op1) == CONST_INT
2097
           || GET_CODE (op1) == CONST_DOUBLE)
2098
          && mode_signbit_p (mode, op1))
2099
        return simplify_gen_binary (PLUS, mode, op0, op1);
2100
      /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2101
      if ((GET_CODE (op1) == CONST_INT
2102
           || GET_CODE (op1) == CONST_DOUBLE)
2103
          && GET_CODE (op0) == PLUS
2104
          && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2105
              || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2106
          && mode_signbit_p (mode, XEXP (op0, 1)))
2107
        return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2108
                                    simplify_gen_binary (XOR, mode, op1,
2109
                                                         XEXP (op0, 1)));
2110
 
2111
      /* If we are XORing two things that have no bits in common,
2112
         convert them into an IOR.  This helps to detect rotation encoded
2113
         using those methods and possibly other simplifications.  */
2114
 
2115
      if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2116
          && (nonzero_bits (op0, mode)
2117
              & nonzero_bits (op1, mode)) == 0)
2118
        return (simplify_gen_binary (IOR, mode, op0, op1));
2119
 
2120
      /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2121
         Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2122
         (NOT y).  */
2123
      {
2124
        int num_negated = 0;
2125
 
2126
        if (GET_CODE (op0) == NOT)
2127
          num_negated++, op0 = XEXP (op0, 0);
2128
        if (GET_CODE (op1) == NOT)
2129
          num_negated++, op1 = XEXP (op1, 0);
2130
 
2131
        if (num_negated == 2)
2132
          return simplify_gen_binary (XOR, mode, op0, op1);
2133
        else if (num_negated == 1)
2134
          return simplify_gen_unary (NOT, mode,
2135
                                     simplify_gen_binary (XOR, mode, op0, op1),
2136
                                     mode);
2137
      }
2138
 
2139
      /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2140
         correspond to a machine insn or result in further simplifications
2141
         if B is a constant.  */
2142
 
2143
      if (GET_CODE (op0) == AND
2144
          && rtx_equal_p (XEXP (op0, 1), op1)
2145
          && ! side_effects_p (op1))
2146
        return simplify_gen_binary (AND, mode,
2147
                                    simplify_gen_unary (NOT, mode,
2148
                                                        XEXP (op0, 0), mode),
2149
                                    op1);
2150
 
2151
      else if (GET_CODE (op0) == AND
2152
               && rtx_equal_p (XEXP (op0, 0), op1)
2153
               && ! side_effects_p (op1))
2154
        return simplify_gen_binary (AND, mode,
2155
                                    simplify_gen_unary (NOT, mode,
2156
                                                        XEXP (op0, 1), mode),
2157
                                    op1);
2158
 
2159
      /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2160
         comparison if STORE_FLAG_VALUE is 1.  */
2161
      if (STORE_FLAG_VALUE == 1
2162
          && trueop1 == const1_rtx
2163
          && COMPARISON_P (op0)
2164
          && (reversed = reversed_comparison (op0, mode)))
2165
        return reversed;
2166
 
2167
      /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2168
         is (lt foo (const_int 0)), so we can perform the above
2169
         simplification if STORE_FLAG_VALUE is 1.  */
2170
 
2171
      if (STORE_FLAG_VALUE == 1
2172
          && trueop1 == const1_rtx
2173
          && GET_CODE (op0) == LSHIFTRT
2174
          && GET_CODE (XEXP (op0, 1)) == CONST_INT
2175
          && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2176
        return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2177
 
2178
      /* (xor (comparison foo bar) (const_int sign-bit))
2179
         when STORE_FLAG_VALUE is the sign bit.  */
2180
      if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2181
          && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2182
              == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2183
          && trueop1 == const_true_rtx
2184
          && COMPARISON_P (op0)
2185
          && (reversed = reversed_comparison (op0, mode)))
2186
        return reversed;
2187
 
2188
      break;
2189
 
2190
      tem = simplify_associative_operation (code, mode, op0, op1);
2191
      if (tem)
2192
        return tem;
2193
      break;
2194
 
2195
    case AND:
2196
      if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2197
        return trueop1;
2198
      /* If we are turning off bits already known off in OP0, we need
2199
         not do an AND.  */
2200
      if (GET_CODE (trueop1) == CONST_INT
2201
          && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2202
          && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2203
        return op0;
2204
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2205
          && GET_MODE_CLASS (mode) != MODE_CC)
2206
        return op0;
2207
      /* A & (~A) -> 0 */
2208
      if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2209
           || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2210
          && ! side_effects_p (op0)
2211
          && GET_MODE_CLASS (mode) != MODE_CC)
2212
        return CONST0_RTX (mode);
2213
 
2214
      /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2215
         there are no nonzero bits of C outside of X's mode.  */
2216
      if ((GET_CODE (op0) == SIGN_EXTEND
2217
           || GET_CODE (op0) == ZERO_EXTEND)
2218
          && GET_CODE (trueop1) == CONST_INT
2219
          && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2220
          && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2221
              & INTVAL (trueop1)) == 0)
2222
        {
2223
          enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2224
          tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2225
                                     gen_int_mode (INTVAL (trueop1),
2226
                                                   imode));
2227
          return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2228
        }
2229
 
2230
      /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2231
         insn (and may simplify more).  */
2232
      if (GET_CODE (op0) == XOR
2233
          && rtx_equal_p (XEXP (op0, 0), op1)
2234
          && ! side_effects_p (op1))
2235
        return simplify_gen_binary (AND, mode,
2236
                                    simplify_gen_unary (NOT, mode,
2237
                                                        XEXP (op0, 1), mode),
2238
                                    op1);
2239
 
2240
      if (GET_CODE (op0) == XOR
2241
          && rtx_equal_p (XEXP (op0, 1), op1)
2242
          && ! side_effects_p (op1))
2243
        return simplify_gen_binary (AND, mode,
2244
                                    simplify_gen_unary (NOT, mode,
2245
                                                        XEXP (op0, 0), mode),
2246
                                    op1);
2247
 
2248
      /* Similarly for (~(A ^ B)) & A.  */
2249
      if (GET_CODE (op0) == NOT
2250
          && GET_CODE (XEXP (op0, 0)) == XOR
2251
          && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2252
          && ! side_effects_p (op1))
2253
        return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2254
 
2255
      if (GET_CODE (op0) == NOT
2256
          && GET_CODE (XEXP (op0, 0)) == XOR
2257
          && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2258
          && ! side_effects_p (op1))
2259
        return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2260
 
2261
      /* Convert (A | B) & A to A.  */
2262
      if (GET_CODE (op0) == IOR
2263
          && (rtx_equal_p (XEXP (op0, 0), op1)
2264
              || rtx_equal_p (XEXP (op0, 1), op1))
2265
          && ! side_effects_p (XEXP (op0, 0))
2266
          && ! side_effects_p (XEXP (op0, 1)))
2267
        return op1;
2268
 
2269
      /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2270
         ((A & N) + B) & M -> (A + B) & M
2271
         Similarly if (N & M) == 0,
2272
         ((A | N) + B) & M -> (A + B) & M
2273
         and for - instead of + and/or ^ instead of |.  */
2274
      if (GET_CODE (trueop1) == CONST_INT
2275
          && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2276
          && ~INTVAL (trueop1)
2277
          && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2278
          && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2279
        {
2280
          rtx pmop[2];
2281
          int which;
2282
 
2283
          pmop[0] = XEXP (op0, 0);
2284
          pmop[1] = XEXP (op0, 1);
2285
 
2286
          for (which = 0; which < 2; which++)
2287
            {
2288
              tem = pmop[which];
2289
              switch (GET_CODE (tem))
2290
                {
2291
                case AND:
2292
                  if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2293
                      && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2294
                      == INTVAL (trueop1))
2295
                    pmop[which] = XEXP (tem, 0);
2296
                  break;
2297
                case IOR:
2298
                case XOR:
2299
                  if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2300
                      && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2301
                    pmop[which] = XEXP (tem, 0);
2302
                  break;
2303
                default:
2304
                  break;
2305
                }
2306
            }
2307
 
2308
          if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2309
            {
2310
              tem = simplify_gen_binary (GET_CODE (op0), mode,
2311
                                         pmop[0], pmop[1]);
2312
              return simplify_gen_binary (code, mode, tem, op1);
2313
            }
2314
        }
2315
      tem = simplify_associative_operation (code, mode, op0, op1);
2316
      if (tem)
2317
        return tem;
2318
      break;
2319
 
2320
    case UDIV:
2321
      /* 0/x is 0 (or x&0 if x has side-effects).  */
2322
      if (trueop0 == CONST0_RTX (mode))
2323
        {
2324
          if (side_effects_p (op1))
2325
            return simplify_gen_binary (AND, mode, op1, trueop0);
2326
          return trueop0;
2327
        }
2328
      /* x/1 is x.  */
2329
      if (trueop1 == CONST1_RTX (mode))
2330
        return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2331
      /* Convert divide by power of two into shift.  */
2332
      if (GET_CODE (trueop1) == CONST_INT
2333
          && (val = exact_log2 (INTVAL (trueop1))) > 0)
2334
        return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2335
      break;
2336
 
2337
    case DIV:
2338
      /* Handle floating point and integers separately.  */
2339
      if (SCALAR_FLOAT_MODE_P (mode))
2340
        {
2341
          /* Maybe change 0.0 / x to 0.0.  This transformation isn't
2342
             safe for modes with NaNs, since 0.0 / 0.0 will then be
2343
             NaN rather than 0.0.  Nor is it safe for modes with signed
2344
             zeros, since dividing 0 by a negative number gives -0.0  */
2345
          if (trueop0 == CONST0_RTX (mode)
2346
              && !HONOR_NANS (mode)
2347
              && !HONOR_SIGNED_ZEROS (mode)
2348
              && ! side_effects_p (op1))
2349
            return op0;
2350
          /* x/1.0 is x.  */
2351
          if (trueop1 == CONST1_RTX (mode)
2352
              && !HONOR_SNANS (mode))
2353
            return op0;
2354
 
2355
          if (GET_CODE (trueop1) == CONST_DOUBLE
2356
              && trueop1 != CONST0_RTX (mode))
2357
            {
2358
              REAL_VALUE_TYPE d;
2359
              REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2360
 
2361
              /* x/-1.0 is -x.  */
2362
              if (REAL_VALUES_EQUAL (d, dconstm1)
2363
                  && !HONOR_SNANS (mode))
2364
                return simplify_gen_unary (NEG, mode, op0, mode);
2365
 
2366
              /* Change FP division by a constant into multiplication.
2367
                 Only do this with -funsafe-math-optimizations.  */
2368
              if (flag_unsafe_math_optimizations
2369
                  && !REAL_VALUES_EQUAL (d, dconst0))
2370
                {
2371
                  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2372
                  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2373
                  return simplify_gen_binary (MULT, mode, op0, tem);
2374
                }
2375
            }
2376
        }
2377
      else
2378
        {
2379
          /* 0/x is 0 (or x&0 if x has side-effects).  */
2380
          if (trueop0 == CONST0_RTX (mode))
2381
            {
2382
              if (side_effects_p (op1))
2383
                return simplify_gen_binary (AND, mode, op1, trueop0);
2384
              return trueop0;
2385
            }
2386
          /* x/1 is x.  */
2387
          if (trueop1 == CONST1_RTX (mode))
2388
            return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2389
          /* x/-1 is -x.  */
2390
          if (trueop1 == constm1_rtx)
2391
            {
2392
              rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2393
              return simplify_gen_unary (NEG, mode, x, mode);
2394
            }
2395
        }
2396
      break;
2397
 
2398
    case UMOD:
2399
      /* 0%x is 0 (or x&0 if x has side-effects).  */
2400
      if (trueop0 == CONST0_RTX (mode))
2401
        {
2402
          if (side_effects_p (op1))
2403
            return simplify_gen_binary (AND, mode, op1, trueop0);
2404
          return trueop0;
2405
        }
2406
      /* x%1 is 0 (of x&0 if x has side-effects).  */
2407
      if (trueop1 == CONST1_RTX (mode))
2408
        {
2409
          if (side_effects_p (op0))
2410
            return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2411
          return CONST0_RTX (mode);
2412
        }
2413
      /* Implement modulus by power of two as AND.  */
2414
      if (GET_CODE (trueop1) == CONST_INT
2415
          && exact_log2 (INTVAL (trueop1)) > 0)
2416
        return simplify_gen_binary (AND, mode, op0,
2417
                                    GEN_INT (INTVAL (op1) - 1));
2418
      break;
2419
 
2420
    case MOD:
2421
      /* 0%x is 0 (or x&0 if x has side-effects).  */
2422
      if (trueop0 == CONST0_RTX (mode))
2423
        {
2424
          if (side_effects_p (op1))
2425
            return simplify_gen_binary (AND, mode, op1, trueop0);
2426
          return trueop0;
2427
        }
2428
      /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
2429
      if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2430
        {
2431
          if (side_effects_p (op0))
2432
            return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2433
          return CONST0_RTX (mode);
2434
        }
2435
      break;
2436
 
2437
    case ROTATERT:
2438
    case ROTATE:
2439
    case ASHIFTRT:
2440
      if (trueop1 == CONST0_RTX (mode))
2441
        return op0;
2442
      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2443
        return op0;
2444
      /* Rotating ~0 always results in ~0.  */
2445
      if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2446
          && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2447
          && ! side_effects_p (op1))
2448
        return op0;
2449
      break;
2450
 
2451
    case ASHIFT:
2452
    case SS_ASHIFT:
2453
      if (trueop1 == CONST0_RTX (mode))
2454
        return op0;
2455
      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2456
        return op0;
2457
      break;
2458
 
2459
    case LSHIFTRT:
2460
      if (trueop1 == CONST0_RTX (mode))
2461
        return op0;
2462
      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2463
        return op0;
2464
      /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
2465
      if (GET_CODE (op0) == CLZ
2466
          && GET_CODE (trueop1) == CONST_INT
2467
          && STORE_FLAG_VALUE == 1
2468
          && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2469
        {
2470
          enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2471
          unsigned HOST_WIDE_INT zero_val = 0;
2472
 
2473
          if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2474
              && zero_val == GET_MODE_BITSIZE (imode)
2475
              && INTVAL (trueop1) == exact_log2 (zero_val))
2476
            return simplify_gen_relational (EQ, mode, imode,
2477
                                            XEXP (op0, 0), const0_rtx);
2478
        }
2479
      break;
2480
 
2481
    case SMIN:
2482
      if (width <= HOST_BITS_PER_WIDE_INT
2483
          && GET_CODE (trueop1) == CONST_INT
2484
          && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2485
          && ! side_effects_p (op0))
2486
        return op1;
2487
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2488
        return op0;
2489
      tem = simplify_associative_operation (code, mode, op0, op1);
2490
      if (tem)
2491
        return tem;
2492
      break;
2493
 
2494
    case SMAX:
2495
      if (width <= HOST_BITS_PER_WIDE_INT
2496
          && GET_CODE (trueop1) == CONST_INT
2497
          && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2498
              == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2499
          && ! side_effects_p (op0))
2500
        return op1;
2501
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2502
        return op0;
2503
      tem = simplify_associative_operation (code, mode, op0, op1);
2504
      if (tem)
2505
        return tem;
2506
      break;
2507
 
2508
    case UMIN:
2509
      if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2510
        return op1;
2511
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2512
        return op0;
2513
      tem = simplify_associative_operation (code, mode, op0, op1);
2514
      if (tem)
2515
        return tem;
2516
      break;
2517
 
2518
    case UMAX:
2519
      if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2520
        return op1;
2521
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2522
        return op0;
2523
      tem = simplify_associative_operation (code, mode, op0, op1);
2524
      if (tem)
2525
        return tem;
2526
      break;
2527
 
2528
    case SS_PLUS:
2529
    case US_PLUS:
2530
    case SS_MINUS:
2531
    case US_MINUS:
2532
      /* ??? There are simplifications that can be done.  */
2533
      return 0;
2534
 
2535
    case VEC_SELECT:
2536
      if (!VECTOR_MODE_P (mode))
2537
        {
2538
          gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2539
          gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2540
          gcc_assert (GET_CODE (trueop1) == PARALLEL);
2541
          gcc_assert (XVECLEN (trueop1, 0) == 1);
2542
          gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2543
 
2544
          if (GET_CODE (trueop0) == CONST_VECTOR)
2545
            return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2546
                                                      (trueop1, 0, 0)));
2547
        }
2548
      else
2549
        {
2550
          gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2551
          gcc_assert (GET_MODE_INNER (mode)
2552
                      == GET_MODE_INNER (GET_MODE (trueop0)));
2553
          gcc_assert (GET_CODE (trueop1) == PARALLEL);
2554
 
2555
          if (GET_CODE (trueop0) == CONST_VECTOR)
2556
            {
2557
              int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2558
              unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2559
              rtvec v = rtvec_alloc (n_elts);
2560
              unsigned int i;
2561
 
2562
              gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2563
              for (i = 0; i < n_elts; i++)
2564
                {
2565
                  rtx x = XVECEXP (trueop1, 0, i);
2566
 
2567
                  gcc_assert (GET_CODE (x) == CONST_INT);
2568
                  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2569
                                                       INTVAL (x));
2570
                }
2571
 
2572
              return gen_rtx_CONST_VECTOR (mode, v);
2573
            }
2574
        }
2575
 
2576
      if (XVECLEN (trueop1, 0) == 1
2577
          && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2578
          && GET_CODE (trueop0) == VEC_CONCAT)
2579
        {
2580
          rtx vec = trueop0;
2581
          int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2582
 
2583
          /* Try to find the element in the VEC_CONCAT.  */
2584
          while (GET_MODE (vec) != mode
2585
                 && GET_CODE (vec) == VEC_CONCAT)
2586
            {
2587
              HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2588
              if (offset < vec_size)
2589
                vec = XEXP (vec, 0);
2590
              else
2591
                {
2592
                  offset -= vec_size;
2593
                  vec = XEXP (vec, 1);
2594
                }
2595
              vec = avoid_constant_pool_reference (vec);
2596
            }
2597
 
2598
          if (GET_MODE (vec) == mode)
2599
            return vec;
2600
        }
2601
 
2602
      return 0;
2603
    case VEC_CONCAT:
2604
      {
2605
        enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2606
                                      ? GET_MODE (trueop0)
2607
                                      : GET_MODE_INNER (mode));
2608
        enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2609
                                      ? GET_MODE (trueop1)
2610
                                      : GET_MODE_INNER (mode));
2611
 
2612
        gcc_assert (VECTOR_MODE_P (mode));
2613
        gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2614
                    == GET_MODE_SIZE (mode));
2615
 
2616
        if (VECTOR_MODE_P (op0_mode))
2617
          gcc_assert (GET_MODE_INNER (mode)
2618
                      == GET_MODE_INNER (op0_mode));
2619
        else
2620
          gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2621
 
2622
        if (VECTOR_MODE_P (op1_mode))
2623
          gcc_assert (GET_MODE_INNER (mode)
2624
                      == GET_MODE_INNER (op1_mode));
2625
        else
2626
          gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2627
 
2628
        if ((GET_CODE (trueop0) == CONST_VECTOR
2629
             || GET_CODE (trueop0) == CONST_INT
2630
             || GET_CODE (trueop0) == CONST_DOUBLE)
2631
            && (GET_CODE (trueop1) == CONST_VECTOR
2632
                || GET_CODE (trueop1) == CONST_INT
2633
                || GET_CODE (trueop1) == CONST_DOUBLE))
2634
          {
2635
            int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2636
            unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2637
            rtvec v = rtvec_alloc (n_elts);
2638
            unsigned int i;
2639
            unsigned in_n_elts = 1;
2640
 
2641
            if (VECTOR_MODE_P (op0_mode))
2642
              in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2643
            for (i = 0; i < n_elts; i++)
2644
              {
2645
                if (i < in_n_elts)
2646
                  {
2647
                    if (!VECTOR_MODE_P (op0_mode))
2648
                      RTVEC_ELT (v, i) = trueop0;
2649
                    else
2650
                      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2651
                  }
2652
                else
2653
                  {
2654
                    if (!VECTOR_MODE_P (op1_mode))
2655
                      RTVEC_ELT (v, i) = trueop1;
2656
                    else
2657
                      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2658
                                                           i - in_n_elts);
2659
                  }
2660
              }
2661
 
2662
            return gen_rtx_CONST_VECTOR (mode, v);
2663
          }
2664
      }
2665
      return 0;
2666
 
2667
    default:
2668
      gcc_unreachable ();
2669
    }
2670
 
2671
  return 0;
2672
}
2673
 
2674
rtx
2675
simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2676
                                 rtx op0, rtx op1)
2677
{
2678
  HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2679
  HOST_WIDE_INT val;
2680
  unsigned int width = GET_MODE_BITSIZE (mode);
2681
 
2682
  if (VECTOR_MODE_P (mode)
2683
      && code != VEC_CONCAT
2684
      && GET_CODE (op0) == CONST_VECTOR
2685
      && GET_CODE (op1) == CONST_VECTOR)
2686
    {
2687
      unsigned n_elts = GET_MODE_NUNITS (mode);
2688
      enum machine_mode op0mode = GET_MODE (op0);
2689
      unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2690
      enum machine_mode op1mode = GET_MODE (op1);
2691
      unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2692
      rtvec v = rtvec_alloc (n_elts);
2693
      unsigned int i;
2694
 
2695
      gcc_assert (op0_n_elts == n_elts);
2696
      gcc_assert (op1_n_elts == n_elts);
2697
      for (i = 0; i < n_elts; i++)
2698
        {
2699
          rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2700
                                             CONST_VECTOR_ELT (op0, i),
2701
                                             CONST_VECTOR_ELT (op1, i));
2702
          if (!x)
2703
            return 0;
2704
          RTVEC_ELT (v, i) = x;
2705
        }
2706
 
2707
      return gen_rtx_CONST_VECTOR (mode, v);
2708
    }
2709
 
2710
  if (VECTOR_MODE_P (mode)
2711
      && code == VEC_CONCAT
2712
      && CONSTANT_P (op0) && CONSTANT_P (op1))
2713
    {
2714
      unsigned n_elts = GET_MODE_NUNITS (mode);
2715
      rtvec v = rtvec_alloc (n_elts);
2716
 
2717
      gcc_assert (n_elts >= 2);
2718
      if (n_elts == 2)
2719
        {
2720
          gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2721
          gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2722
 
2723
          RTVEC_ELT (v, 0) = op0;
2724
          RTVEC_ELT (v, 1) = op1;
2725
        }
2726
      else
2727
        {
2728
          unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2729
          unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2730
          unsigned i;
2731
 
2732
          gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2733
          gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2734
          gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2735
 
2736
          for (i = 0; i < op0_n_elts; ++i)
2737
            RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2738
          for (i = 0; i < op1_n_elts; ++i)
2739
            RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2740
        }
2741
 
2742
      return gen_rtx_CONST_VECTOR (mode, v);
2743
    }
2744
 
2745
  if (SCALAR_FLOAT_MODE_P (mode)
2746
      && GET_CODE (op0) == CONST_DOUBLE
2747
      && GET_CODE (op1) == CONST_DOUBLE
2748
      && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2749
    {
2750
      if (code == AND
2751
          || code == IOR
2752
          || code == XOR)
2753
        {
2754
          long tmp0[4];
2755
          long tmp1[4];
2756
          REAL_VALUE_TYPE r;
2757
          int i;
2758
 
2759
          real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2760
                          GET_MODE (op0));
2761
          real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2762
                          GET_MODE (op1));
2763
          for (i = 0; i < 4; i++)
2764
            {
2765
              switch (code)
2766
              {
2767
              case AND:
2768
                tmp0[i] &= tmp1[i];
2769
                break;
2770
              case IOR:
2771
                tmp0[i] |= tmp1[i];
2772
                break;
2773
              case XOR:
2774
                tmp0[i] ^= tmp1[i];
2775
                break;
2776
              default:
2777
                gcc_unreachable ();
2778
              }
2779
            }
2780
           real_from_target (&r, tmp0, mode);
2781
           return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2782
        }
2783
      else
2784
        {
2785
          REAL_VALUE_TYPE f0, f1, value, result;
2786
          bool inexact;
2787
 
2788
          REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2789
          REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2790
          real_convert (&f0, mode, &f0);
2791
          real_convert (&f1, mode, &f1);
2792
 
2793
          if (HONOR_SNANS (mode)
2794
              && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2795
            return 0;
2796
 
2797
          if (code == DIV
2798
              && REAL_VALUES_EQUAL (f1, dconst0)
2799
              && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2800
            return 0;
2801
 
2802
          if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2803
              && flag_trapping_math
2804
              && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2805
            {
2806
              int s0 = REAL_VALUE_NEGATIVE (f0);
2807
              int s1 = REAL_VALUE_NEGATIVE (f1);
2808
 
2809
              switch (code)
2810
                {
2811
                case PLUS:
2812
                  /* Inf + -Inf = NaN plus exception.  */
2813
                  if (s0 != s1)
2814
                    return 0;
2815
                  break;
2816
                case MINUS:
2817
                  /* Inf - Inf = NaN plus exception.  */
2818
                  if (s0 == s1)
2819
                    return 0;
2820
                  break;
2821
                case DIV:
2822
                  /* Inf / Inf = NaN plus exception.  */
2823
                  return 0;
2824
                default:
2825
                  break;
2826
                }
2827
            }
2828
 
2829
          if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2830
              && flag_trapping_math
2831
              && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2832
                  || (REAL_VALUE_ISINF (f1)
2833
                      && REAL_VALUES_EQUAL (f0, dconst0))))
2834
            /* Inf * 0 = NaN plus exception.  */
2835
            return 0;
2836
 
2837
          inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2838
                                     &f0, &f1);
2839
          real_convert (&result, mode, &value);
2840
 
2841
          /* Don't constant fold this floating point operation if
2842
             the result has overflowed and flag_trapping_math.  */
2843
 
2844
          if (flag_trapping_math
2845
              && MODE_HAS_INFINITIES (mode)
2846
              && REAL_VALUE_ISINF (result)
2847
              && !REAL_VALUE_ISINF (f0)
2848
              && !REAL_VALUE_ISINF (f1))
2849
            /* Overflow plus exception.  */
2850
            return 0;
2851
 
2852
          /* Don't constant fold this floating point operation if the
2853
             result may dependent upon the run-time rounding mode and
2854
             flag_rounding_math is set, or if GCC's software emulation
2855
             is unable to accurately represent the result.  */
2856
 
2857
          if ((flag_rounding_math
2858
               || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2859
                   && !flag_unsafe_math_optimizations))
2860
              && (inexact || !real_identical (&result, &value)))
2861
            return NULL_RTX;
2862
 
2863
          return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2864
        }
2865
    }
2866
 
2867
  /* We can fold some multi-word operations.  */
2868
  if (GET_MODE_CLASS (mode) == MODE_INT
2869
      && width == HOST_BITS_PER_WIDE_INT * 2
2870
      && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2871
      && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2872
    {
2873
      unsigned HOST_WIDE_INT l1, l2, lv, lt;
2874
      HOST_WIDE_INT h1, h2, hv, ht;
2875
 
2876
      if (GET_CODE (op0) == CONST_DOUBLE)
2877
        l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2878
      else
2879
        l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2880
 
2881
      if (GET_CODE (op1) == CONST_DOUBLE)
2882
        l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2883
      else
2884
        l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2885
 
2886
      switch (code)
2887
        {
2888
        case MINUS:
2889
          /* A - B == A + (-B).  */
2890
          neg_double (l2, h2, &lv, &hv);
2891
          l2 = lv, h2 = hv;
2892
 
2893
          /* Fall through....  */
2894
 
2895
        case PLUS:
2896
          add_double (l1, h1, l2, h2, &lv, &hv);
2897
          break;
2898
 
2899
        case MULT:
2900
          mul_double (l1, h1, l2, h2, &lv, &hv);
2901
          break;
2902
 
2903
        case DIV:
2904
          if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2905
                                    &lv, &hv, &lt, &ht))
2906
            return 0;
2907
          break;
2908
 
2909
        case MOD:
2910
          if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2911
                                    &lt, &ht, &lv, &hv))
2912
            return 0;
2913
          break;
2914
 
2915
        case UDIV:
2916
          if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2917
                                    &lv, &hv, &lt, &ht))
2918
            return 0;
2919
          break;
2920
 
2921
        case UMOD:
2922
          if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2923
                                    &lt, &ht, &lv, &hv))
2924
            return 0;
2925
          break;
2926
 
2927
        case AND:
2928
          lv = l1 & l2, hv = h1 & h2;
2929
          break;
2930
 
2931
        case IOR:
2932
          lv = l1 | l2, hv = h1 | h2;
2933
          break;
2934
 
2935
        case XOR:
2936
          lv = l1 ^ l2, hv = h1 ^ h2;
2937
          break;
2938
 
2939
        case SMIN:
2940
          if (h1 < h2
2941
              || (h1 == h2
2942
                  && ((unsigned HOST_WIDE_INT) l1
2943
                      < (unsigned HOST_WIDE_INT) l2)))
2944
            lv = l1, hv = h1;
2945
          else
2946
            lv = l2, hv = h2;
2947
          break;
2948
 
2949
        case SMAX:
2950
          if (h1 > h2
2951
              || (h1 == h2
2952
                  && ((unsigned HOST_WIDE_INT) l1
2953
                      > (unsigned HOST_WIDE_INT) l2)))
2954
            lv = l1, hv = h1;
2955
          else
2956
            lv = l2, hv = h2;
2957
          break;
2958
 
2959
        case UMIN:
2960
          if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2961
              || (h1 == h2
2962
                  && ((unsigned HOST_WIDE_INT) l1
2963
                      < (unsigned HOST_WIDE_INT) l2)))
2964
            lv = l1, hv = h1;
2965
          else
2966
            lv = l2, hv = h2;
2967
          break;
2968
 
2969
        case UMAX:
2970
          if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2971
              || (h1 == h2
2972
                  && ((unsigned HOST_WIDE_INT) l1
2973
                      > (unsigned HOST_WIDE_INT) l2)))
2974
            lv = l1, hv = h1;
2975
          else
2976
            lv = l2, hv = h2;
2977
          break;
2978
 
2979
        case LSHIFTRT:   case ASHIFTRT:
2980
        case ASHIFT:
2981
        case ROTATE:     case ROTATERT:
2982
          if (SHIFT_COUNT_TRUNCATED)
2983
            l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2984
 
2985
          if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2986
            return 0;
2987
 
2988
          if (code == LSHIFTRT || code == ASHIFTRT)
2989
            rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2990
                           code == ASHIFTRT);
2991
          else if (code == ASHIFT)
2992
            lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2993
          else if (code == ROTATE)
2994
            lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2995
          else /* code == ROTATERT */
2996
            rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2997
          break;
2998
 
2999
        default:
3000
          return 0;
3001
        }
3002
 
3003
      return immed_double_const (lv, hv, mode);
3004
    }
3005
 
3006
  if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3007
      && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3008
    {
3009
      /* Get the integer argument values in two forms:
3010
         zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
3011
 
3012
      arg0 = INTVAL (op0);
3013
      arg1 = INTVAL (op1);
3014
 
3015
      if (width < HOST_BITS_PER_WIDE_INT)
3016
        {
3017
          arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3018
          arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3019
 
3020
          arg0s = arg0;
3021
          if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3022
            arg0s |= ((HOST_WIDE_INT) (-1) << width);
3023
 
3024
          arg1s = arg1;
3025
          if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3026
            arg1s |= ((HOST_WIDE_INT) (-1) << width);
3027
        }
3028
      else
3029
        {
3030
          arg0s = arg0;
3031
          arg1s = arg1;
3032
        }
3033
 
3034
      /* Compute the value of the arithmetic.  */
3035
 
3036
      switch (code)
3037
        {
3038
        case PLUS:
3039
          val = arg0s + arg1s;
3040
          break;
3041
 
3042
        case MINUS:
3043
          val = arg0s - arg1s;
3044
          break;
3045
 
3046
        case MULT:
3047
          val = arg0s * arg1s;
3048
          break;
3049
 
3050
        case DIV:
3051
          if (arg1s == 0
3052
              || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3053
                  && arg1s == -1))
3054
            return 0;
3055
          val = arg0s / arg1s;
3056
          break;
3057
 
3058
        case MOD:
3059
          if (arg1s == 0
3060
              || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3061
                  && arg1s == -1))
3062
            return 0;
3063
          val = arg0s % arg1s;
3064
          break;
3065
 
3066
        case UDIV:
3067
          if (arg1 == 0
3068
              || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3069
                  && arg1s == -1))
3070
            return 0;
3071
          val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3072
          break;
3073
 
3074
        case UMOD:
3075
          if (arg1 == 0
3076
              || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3077
                  && arg1s == -1))
3078
            return 0;
3079
          val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3080
          break;
3081
 
3082
        case AND:
3083
          val = arg0 & arg1;
3084
          break;
3085
 
3086
        case IOR:
3087
          val = arg0 | arg1;
3088
          break;
3089
 
3090
        case XOR:
3091
          val = arg0 ^ arg1;
3092
          break;
3093
 
3094
        case LSHIFTRT:
3095
        case ASHIFT:
3096
        case ASHIFTRT:
3097
          /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3098
             the value is in range.  We can't return any old value for
3099
             out-of-range arguments because either the middle-end (via
3100
             shift_truncation_mask) or the back-end might be relying on
3101
             target-specific knowledge.  Nor can we rely on
3102
             shift_truncation_mask, since the shift might not be part of an
3103
             ashlM3, lshrM3 or ashrM3 instruction.  */
3104
          if (SHIFT_COUNT_TRUNCATED)
3105
            arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3106
          else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3107
            return 0;
3108
 
3109
          val = (code == ASHIFT
3110
                 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3111
                 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3112
 
3113
          /* Sign-extend the result for arithmetic right shifts.  */
3114
          if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3115
            val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3116
          break;
3117
 
3118
        case ROTATERT:
3119
          if (arg1 < 0)
3120
            return 0;
3121
 
3122
          arg1 %= width;
3123
          val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3124
                 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3125
          break;
3126
 
3127
        case ROTATE:
3128
          if (arg1 < 0)
3129
            return 0;
3130
 
3131
          arg1 %= width;
3132
          val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3133
                 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3134
          break;
3135
 
3136
        case COMPARE:
3137
          /* Do nothing here.  */
3138
          return 0;
3139
 
3140
        case SMIN:
3141
          val = arg0s <= arg1s ? arg0s : arg1s;
3142
          break;
3143
 
3144
        case UMIN:
3145
          val = ((unsigned HOST_WIDE_INT) arg0
3146
                 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3147
          break;
3148
 
3149
        case SMAX:
3150
          val = arg0s > arg1s ? arg0s : arg1s;
3151
          break;
3152
 
3153
        case UMAX:
3154
          val = ((unsigned HOST_WIDE_INT) arg0
3155
                 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3156
          break;
3157
 
3158
        case SS_PLUS:
3159
        case US_PLUS:
3160
        case SS_MINUS:
3161
        case US_MINUS:
3162
        case SS_ASHIFT:
3163
          /* ??? There are simplifications that can be done.  */
3164
          return 0;
3165
 
3166
        default:
3167
          gcc_unreachable ();
3168
        }
3169
 
3170
      return gen_int_mode (val, mode);
3171
    }
3172
 
3173
  return NULL_RTX;
3174
}
3175
 
3176
 
3177
 
3178
/* Simplify a PLUS or MINUS, at least one of whose operands may be another
3179
   PLUS or MINUS.
3180
 
3181
   Rather than test for specific case, we do this by a brute-force method
3182
   and do all possible simplifications until no more changes occur.  Then
3183
   we rebuild the operation.  */
3184
 
3185
struct simplify_plus_minus_op_data
3186
{
3187
  rtx op;
3188
  short neg;
3189
};
3190
 
3191
static int
3192
simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3193
{
3194
  const struct simplify_plus_minus_op_data *d1 = p1;
3195
  const struct simplify_plus_minus_op_data *d2 = p2;
3196
  int result;
3197
 
3198
  result = (commutative_operand_precedence (d2->op)
3199
            - commutative_operand_precedence (d1->op));
3200
  if (result)
3201
    return result;
3202
 
3203
  /* Group together equal REGs to do more simplification.  */
3204
  if (REG_P (d1->op) && REG_P (d2->op))
3205
    return REGNO (d1->op) - REGNO (d2->op);
3206
  else
3207
    return 0;
3208
}
3209
 
3210
static rtx
3211
simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3212
                     rtx op1)
3213
{
3214
  struct simplify_plus_minus_op_data ops[8];
3215
  rtx result, tem;
3216
  int n_ops = 2, input_ops = 2;
3217
  int changed, n_constants = 0, canonicalized = 0;
3218
  int i, j;
3219
 
3220
  memset (ops, 0, sizeof ops);
3221
 
3222
  /* Set up the two operands and then expand them until nothing has been
3223
     changed.  If we run out of room in our array, give up; this should
3224
     almost never happen.  */
3225
 
3226
  ops[0].op = op0;
3227
  ops[0].neg = 0;
3228
  ops[1].op = op1;
3229
  ops[1].neg = (code == MINUS);
3230
 
3231
  do
3232
    {
3233
      changed = 0;
3234
 
3235
      for (i = 0; i < n_ops; i++)
3236
        {
3237
          rtx this_op = ops[i].op;
3238
          int this_neg = ops[i].neg;
3239
          enum rtx_code this_code = GET_CODE (this_op);
3240
 
3241
          switch (this_code)
3242
            {
3243
            case PLUS:
3244
            case MINUS:
3245
              if (n_ops == 7)
3246
                return NULL_RTX;
3247
 
3248
              ops[n_ops].op = XEXP (this_op, 1);
3249
              ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3250
              n_ops++;
3251
 
3252
              ops[i].op = XEXP (this_op, 0);
3253
              input_ops++;
3254
              changed = 1;
3255
              canonicalized |= this_neg;
3256
              break;
3257
 
3258
            case NEG:
3259
              ops[i].op = XEXP (this_op, 0);
3260
              ops[i].neg = ! this_neg;
3261
              changed = 1;
3262
              canonicalized = 1;
3263
              break;
3264
 
3265
            case CONST:
3266
              if (n_ops < 7
3267
                  && GET_CODE (XEXP (this_op, 0)) == PLUS
3268
                  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3269
                  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3270
                {
3271
                  ops[i].op = XEXP (XEXP (this_op, 0), 0);
3272
                  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3273
                  ops[n_ops].neg = this_neg;
3274
                  n_ops++;
3275
                  changed = 1;
3276
                  canonicalized = 1;
3277
                }
3278
              break;
3279
 
3280
            case NOT:
3281
              /* ~a -> (-a - 1) */
3282
              if (n_ops != 7)
3283
                {
3284
                  ops[n_ops].op = constm1_rtx;
3285
                  ops[n_ops++].neg = this_neg;
3286
                  ops[i].op = XEXP (this_op, 0);
3287
                  ops[i].neg = !this_neg;
3288
                  changed = 1;
3289
                  canonicalized = 1;
3290
                }
3291
              break;
3292
 
3293
            case CONST_INT:
3294
              n_constants++;
3295
              if (this_neg)
3296
                {
3297
                  ops[i].op = neg_const_int (mode, this_op);
3298
                  ops[i].neg = 0;
3299
                  changed = 1;
3300
                  canonicalized = 1;
3301
                }
3302
              break;
3303
 
3304
            default:
3305
              break;
3306
            }
3307
        }
3308
    }
3309
  while (changed);
3310
 
3311
  if (n_constants > 1)
3312
    canonicalized = 1;
3313
 
3314
  gcc_assert (n_ops >= 2);
3315
 
3316
  /* If we only have two operands, we can avoid the loops.  */
3317
  if (n_ops == 2)
3318
    {
3319
      enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3320
      rtx lhs, rhs;
3321
 
3322
      /* Get the two operands.  Be careful with the order, especially for
3323
         the cases where code == MINUS.  */
3324
      if (ops[0].neg && ops[1].neg)
3325
        {
3326
          lhs = gen_rtx_NEG (mode, ops[0].op);
3327
          rhs = ops[1].op;
3328
        }
3329
      else if (ops[0].neg)
3330
        {
3331
          lhs = ops[1].op;
3332
          rhs = ops[0].op;
3333
        }
3334
      else
3335
        {
3336
          lhs = ops[0].op;
3337
          rhs = ops[1].op;
3338
        }
3339
 
3340
      return simplify_const_binary_operation (code, mode, lhs, rhs);
3341
    }
3342
 
3343
  /* Now simplify each pair of operands until nothing changes.  */
3344
  do
3345
    {
3346
      /* Insertion sort is good enough for an eight-element array.  */
3347
      for (i = 1; i < n_ops; i++)
3348
        {
3349
          struct simplify_plus_minus_op_data save;
3350
          j = i - 1;
3351
          if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3352
            continue;
3353
 
3354
          canonicalized = 1;
3355
          save = ops[i];
3356
          do
3357
            ops[j + 1] = ops[j];
3358
          while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3359
          ops[j + 1] = save;
3360
        }
3361
 
3362
      /* This is only useful the first time through.  */
3363
      if (!canonicalized)
3364
        return NULL_RTX;
3365
 
3366
      changed = 0;
3367
      for (i = n_ops - 1; i > 0; i--)
3368
        for (j = i - 1; j >= 0; j--)
3369
          {
3370
            rtx lhs = ops[j].op, rhs = ops[i].op;
3371
            int lneg = ops[j].neg, rneg = ops[i].neg;
3372
 
3373
            if (lhs != 0 && rhs != 0)
3374
              {
3375
                enum rtx_code ncode = PLUS;
3376
 
3377
                if (lneg != rneg)
3378
                  {
3379
                    ncode = MINUS;
3380
                    if (lneg)
3381
                      tem = lhs, lhs = rhs, rhs = tem;
3382
                  }
3383
                else if (swap_commutative_operands_p (lhs, rhs))
3384
                  tem = lhs, lhs = rhs, rhs = tem;
3385
 
3386
                if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3387
                    && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3388
                  {
3389
                    rtx tem_lhs, tem_rhs;
3390
 
3391
                    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3392
                    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3393
                    tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3394
 
3395
                    if (tem && !CONSTANT_P (tem))
3396
                      tem = gen_rtx_CONST (GET_MODE (tem), tem);
3397
                  }
3398
                else
3399
                  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3400
 
3401
                /* Reject "simplifications" that just wrap the two
3402
                   arguments in a CONST.  Failure to do so can result
3403
                   in infinite recursion with simplify_binary_operation
3404
                   when it calls us to simplify CONST operations.  */
3405
                if (tem
3406
                    && ! (GET_CODE (tem) == CONST
3407
                          && GET_CODE (XEXP (tem, 0)) == ncode
3408
                          && XEXP (XEXP (tem, 0), 0) == lhs
3409
                          && XEXP (XEXP (tem, 0), 1) == rhs))
3410
                  {
3411
                    lneg &= rneg;
3412
                    if (GET_CODE (tem) == NEG)
3413
                      tem = XEXP (tem, 0), lneg = !lneg;
3414
                    if (GET_CODE (tem) == CONST_INT && lneg)
3415
                      tem = neg_const_int (mode, tem), lneg = 0;
3416
 
3417
                    ops[i].op = tem;
3418
                    ops[i].neg = lneg;
3419
                    ops[j].op = NULL_RTX;
3420
                    changed = 1;
3421
                  }
3422
              }
3423
          }
3424
 
3425
      /* Pack all the operands to the lower-numbered entries.  */
3426
      for (i = 0, j = 0; j < n_ops; j++)
3427
        if (ops[j].op)
3428
          {
3429
            ops[i] = ops[j];
3430
            i++;
3431
          }
3432
      n_ops = i;
3433
    }
3434
  while (changed);
3435
 
3436
  /* Create (minus -C X) instead of (neg (const (plus X C))).  */
3437
  if (n_ops == 2
3438
      && GET_CODE (ops[1].op) == CONST_INT
3439
      && CONSTANT_P (ops[0].op)
3440
      && ops[0].neg)
3441
    return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3442
 
3443
  /* We suppressed creation of trivial CONST expressions in the
3444
     combination loop to avoid recursion.  Create one manually now.
3445
     The combination loop should have ensured that there is exactly
3446
     one CONST_INT, and the sort will have ensured that it is last
3447
     in the array and that any other constant will be next-to-last.  */
3448
 
3449
  if (n_ops > 1
3450
      && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3451
      && CONSTANT_P (ops[n_ops - 2].op))
3452
    {
3453
      rtx value = ops[n_ops - 1].op;
3454
      if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3455
        value = neg_const_int (mode, value);
3456
      ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3457
      n_ops--;
3458
    }
3459
 
3460
  /* Put a non-negated operand first, if possible.  */
3461
 
3462
  for (i = 0; i < n_ops && ops[i].neg; i++)
3463
    continue;
3464
  if (i == n_ops)
3465
    ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3466
  else if (i != 0)
3467
    {
3468
      tem = ops[0].op;
3469
      ops[0] = ops[i];
3470
      ops[i].op = tem;
3471
      ops[i].neg = 1;
3472
    }
3473
 
3474
  /* Now make the result by performing the requested operations.  */
3475
  result = ops[0].op;
3476
  for (i = 1; i < n_ops; i++)
3477
    result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3478
                             mode, result, ops[i].op);
3479
 
3480
  return result;
3481
}
3482
 
3483
/* Check whether an operand is suitable for calling simplify_plus_minus.  */
3484
static bool
3485
plus_minus_operand_p (rtx x)
3486
{
3487
  return GET_CODE (x) == PLUS
3488
         || GET_CODE (x) == MINUS
3489
         || (GET_CODE (x) == CONST
3490
             && GET_CODE (XEXP (x, 0)) == PLUS
3491
             && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3492
             && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3493
}
3494
 
3495
/* Like simplify_binary_operation except used for relational operators.
3496
   MODE is the mode of the result. If MODE is VOIDmode, both operands must
3497
   not also be VOIDmode.
3498
 
3499
   CMP_MODE specifies in which mode the comparison is done in, so it is
3500
   the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
3501
   the operands or, if both are VOIDmode, the operands are compared in
3502
   "infinite precision".  */
3503
rtx
3504
simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3505
                               enum machine_mode cmp_mode, rtx op0, rtx op1)
3506
{
3507
  rtx tem, trueop0, trueop1;
3508
 
3509
  if (cmp_mode == VOIDmode)
3510
    cmp_mode = GET_MODE (op0);
3511
  if (cmp_mode == VOIDmode)
3512
    cmp_mode = GET_MODE (op1);
3513
 
3514
  tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3515
  if (tem)
3516
    {
3517
      if (SCALAR_FLOAT_MODE_P (mode))
3518
        {
3519
          if (tem == const0_rtx)
3520
            return CONST0_RTX (mode);
3521
#ifdef FLOAT_STORE_FLAG_VALUE
3522
          {
3523
            REAL_VALUE_TYPE val;
3524
            val = FLOAT_STORE_FLAG_VALUE (mode);
3525
            return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3526
          }
3527
#else
3528
          return NULL_RTX;
3529
#endif 
3530
        }
3531
      if (VECTOR_MODE_P (mode))
3532
        {
3533
          if (tem == const0_rtx)
3534
            return CONST0_RTX (mode);
3535
#ifdef VECTOR_STORE_FLAG_VALUE
3536
          {
3537
            int i, units;
3538
            rtvec v;
3539
 
3540
            rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3541
            if (val == NULL_RTX)
3542
              return NULL_RTX;
3543
            if (val == const1_rtx)
3544
              return CONST1_RTX (mode);
3545
 
3546
            units = GET_MODE_NUNITS (mode);
3547
            v = rtvec_alloc (units);
3548
            for (i = 0; i < units; i++)
3549
              RTVEC_ELT (v, i) = val;
3550
            return gen_rtx_raw_CONST_VECTOR (mode, v);
3551
          }
3552
#else
3553
          return NULL_RTX;
3554
#endif
3555
        }
3556
 
3557
      return tem;
3558
    }
3559
 
3560
  /* For the following tests, ensure const0_rtx is op1.  */
3561
  if (swap_commutative_operands_p (op0, op1)
3562
      || (op0 == const0_rtx && op1 != const0_rtx))
3563
    tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3564
 
3565
  /* If op0 is a compare, extract the comparison arguments from it.  */
3566
  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3567
    return simplify_relational_operation (code, mode, VOIDmode,
3568
                                          XEXP (op0, 0), XEXP (op0, 1));
3569
 
3570
  if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3571
      || CC0_P (op0))
3572
    return NULL_RTX;
3573
 
3574
  trueop0 = avoid_constant_pool_reference (op0);
3575
  trueop1 = avoid_constant_pool_reference (op1);
3576
  return simplify_relational_operation_1 (code, mode, cmp_mode,
3577
                                          trueop0, trueop1);
3578
}
3579
 
3580
/* This part of simplify_relational_operation is only used when CMP_MODE
3581
   is not in class MODE_CC (i.e. it is a real comparison).
3582
 
3583
   MODE is the mode of the result, while CMP_MODE specifies in which
3584
   mode the comparison is done in, so it is the mode of the operands.  */
3585
 
3586
static rtx
3587
simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3588
                                 enum machine_mode cmp_mode, rtx op0, rtx op1)
3589
{
3590
  enum rtx_code op0code = GET_CODE (op0);
3591
 
3592
  if (GET_CODE (op1) == CONST_INT)
3593
    {
3594
      if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3595
        {
3596
          /* If op0 is a comparison, extract the comparison arguments
3597
             from it.  */
3598
          if (code == NE)
3599
            {
3600
              if (GET_MODE (op0) == mode)
3601
                return simplify_rtx (op0);
3602
              else
3603
                return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3604
                                                XEXP (op0, 0), XEXP (op0, 1));
3605
            }
3606
          else if (code == EQ)
3607
            {
3608
              enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3609
              if (new_code != UNKNOWN)
3610
                return simplify_gen_relational (new_code, mode, VOIDmode,
3611
                                                XEXP (op0, 0), XEXP (op0, 1));
3612
            }
3613
        }
3614
    }
3615
 
3616
  /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
3617
  if ((code == EQ || code == NE)
3618
      && (op0code == PLUS || op0code == MINUS)
3619
      && CONSTANT_P (op1)
3620
      && CONSTANT_P (XEXP (op0, 1))
3621
      && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3622
    {
3623
      rtx x = XEXP (op0, 0);
3624
      rtx c = XEXP (op0, 1);
3625
 
3626
      c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3627
                               cmp_mode, op1, c);
3628
      return simplify_gen_relational (code, mode, cmp_mode, x, c);
3629
    }
3630
 
3631
  /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3632
     the same as (zero_extract:SI FOO (const_int 1) BAR).  */
3633
  if (code == NE
3634
      && op1 == const0_rtx
3635
      && GET_MODE_CLASS (mode) == MODE_INT
3636
      && cmp_mode != VOIDmode
3637
      /* ??? Work-around BImode bugs in the ia64 backend.  */
3638
      && mode != BImode
3639
      && cmp_mode != BImode
3640
      && nonzero_bits (op0, cmp_mode) == 1
3641
      && STORE_FLAG_VALUE == 1)
3642
    return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3643
           ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3644
           : lowpart_subreg (mode, op0, cmp_mode);
3645
 
3646
  /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
3647
  if ((code == EQ || code == NE)
3648
      && op1 == const0_rtx
3649
      && op0code == XOR)
3650
    return simplify_gen_relational (code, mode, cmp_mode,
3651
                                    XEXP (op0, 0), XEXP (op0, 1));
3652
 
3653
  /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
3654
  if ((code == EQ || code == NE)
3655
      && op0code == XOR
3656
      && rtx_equal_p (XEXP (op0, 0), op1)
3657
      && !side_effects_p (XEXP (op0, 0)))
3658
    return simplify_gen_relational (code, mode, cmp_mode,
3659
                                    XEXP (op0, 1), const0_rtx);
3660
 
3661
  /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
3662
  if ((code == EQ || code == NE)
3663
      && op0code == XOR
3664
      && rtx_equal_p (XEXP (op0, 1), op1)
3665
      && !side_effects_p (XEXP (op0, 1)))
3666
    return simplify_gen_relational (code, mode, cmp_mode,
3667
                                    XEXP (op0, 0), const0_rtx);
3668
 
3669
  /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
3670
  if ((code == EQ || code == NE)
3671
      && op0code == XOR
3672
      && (GET_CODE (op1) == CONST_INT
3673
          || GET_CODE (op1) == CONST_DOUBLE)
3674
      && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3675
          || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3676
    return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3677
                                    simplify_gen_binary (XOR, cmp_mode,
3678
                                                         XEXP (op0, 1), op1));
3679
 
3680
  return NULL_RTX;
3681
}
3682
 
3683
/* Check if the given comparison (done in the given MODE) is actually a
3684
   tautology or a contradiction.
3685
   If no simplification is possible, this function returns zero.
3686
   Otherwise, it returns either const_true_rtx or const0_rtx.  */
3687
 
3688
rtx
3689
simplify_const_relational_operation (enum rtx_code code,
3690
                                     enum machine_mode mode,
3691
                                     rtx op0, rtx op1)
3692
{
3693
  int equal, op0lt, op0ltu, op1lt, op1ltu;
3694
  rtx tem;
3695
  rtx trueop0;
3696
  rtx trueop1;
3697
 
3698
  gcc_assert (mode != VOIDmode
3699
              || (GET_MODE (op0) == VOIDmode
3700
                  && GET_MODE (op1) == VOIDmode));
3701
 
3702
  /* If op0 is a compare, extract the comparison arguments from it.  */
3703
  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3704
    {
3705
      op1 = XEXP (op0, 1);
3706
      op0 = XEXP (op0, 0);
3707
 
3708
      if (GET_MODE (op0) != VOIDmode)
3709
        mode = GET_MODE (op0);
3710
      else if (GET_MODE (op1) != VOIDmode)
3711
        mode = GET_MODE (op1);
3712
      else
3713
        return 0;
3714
    }
3715
 
3716
  /* We can't simplify MODE_CC values since we don't know what the
3717
     actual comparison is.  */
3718
  if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3719
    return 0;
3720
 
3721
  /* Make sure the constant is second.  */
3722
  if (swap_commutative_operands_p (op0, op1))
3723
    {
3724
      tem = op0, op0 = op1, op1 = tem;
3725
      code = swap_condition (code);
3726
    }
3727
 
3728
  trueop0 = avoid_constant_pool_reference (op0);
3729
  trueop1 = avoid_constant_pool_reference (op1);
3730
 
3731
  /* For integer comparisons of A and B maybe we can simplify A - B and can
3732
     then simplify a comparison of that with zero.  If A and B are both either
3733
     a register or a CONST_INT, this can't help; testing for these cases will
3734
     prevent infinite recursion here and speed things up.
3735
 
3736
     We can only do this for EQ and NE comparisons as otherwise we may
3737
     lose or introduce overflow which we cannot disregard as undefined as
3738
     we do not know the signedness of the operation on either the left or
3739
     the right hand side of the comparison.  */
3740
 
3741
  if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3742
      && (code == EQ || code == NE)
3743
      && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3744
            && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3745
      && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3746
      /* We cannot do this if tem is a nonzero address.  */
3747
      && ! nonzero_address_p (tem))
3748
    return simplify_const_relational_operation (signed_condition (code),
3749
                                                mode, tem, const0_rtx);
3750
 
3751
  if (! HONOR_NANS (mode) && code == ORDERED)
3752
    return const_true_rtx;
3753
 
3754
  if (! HONOR_NANS (mode) && code == UNORDERED)
3755
    return const0_rtx;
3756
 
3757
  /* For modes without NaNs, if the two operands are equal, we know the
3758
     result except if they have side-effects.  */
3759
  if (! HONOR_NANS (GET_MODE (trueop0))
3760
      && rtx_equal_p (trueop0, trueop1)
3761
      && ! side_effects_p (trueop0))
3762
    equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3763
 
3764
  /* If the operands are floating-point constants, see if we can fold
3765
     the result.  */
3766
  else if (GET_CODE (trueop0) == CONST_DOUBLE
3767
           && GET_CODE (trueop1) == CONST_DOUBLE
3768
           && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3769
    {
3770
      REAL_VALUE_TYPE d0, d1;
3771
 
3772
      REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3773
      REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3774
 
3775
      /* Comparisons are unordered iff at least one of the values is NaN.  */
3776
      if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3777
        switch (code)
3778
          {
3779
          case UNEQ:
3780
          case UNLT:
3781
          case UNGT:
3782
          case UNLE:
3783
          case UNGE:
3784
          case NE:
3785
          case UNORDERED:
3786
            return const_true_rtx;
3787
          case EQ:
3788
          case LT:
3789
          case GT:
3790
          case LE:
3791
          case GE:
3792
          case LTGT:
3793
          case ORDERED:
3794
            return const0_rtx;
3795
          default:
3796
            return 0;
3797
          }
3798
 
3799
      equal = REAL_VALUES_EQUAL (d0, d1);
3800
      op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3801
      op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3802
    }
3803
 
3804
  /* Otherwise, see if the operands are both integers.  */
3805
  else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3806
           && (GET_CODE (trueop0) == CONST_DOUBLE
3807
               || GET_CODE (trueop0) == CONST_INT)
3808
           && (GET_CODE (trueop1) == CONST_DOUBLE
3809
               || GET_CODE (trueop1) == CONST_INT))
3810
    {
3811
      int width = GET_MODE_BITSIZE (mode);
3812
      HOST_WIDE_INT l0s, h0s, l1s, h1s;
3813
      unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3814
 
3815
      /* Get the two words comprising each integer constant.  */
3816
      if (GET_CODE (trueop0) == CONST_DOUBLE)
3817
        {
3818
          l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3819
          h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3820
        }
3821
      else
3822
        {
3823
          l0u = l0s = INTVAL (trueop0);
3824
          h0u = h0s = HWI_SIGN_EXTEND (l0s);
3825
        }
3826
 
3827
      if (GET_CODE (trueop1) == CONST_DOUBLE)
3828
        {
3829
          l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3830
          h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3831
        }
3832
      else
3833
        {
3834
          l1u = l1s = INTVAL (trueop1);
3835
          h1u = h1s = HWI_SIGN_EXTEND (l1s);
3836
        }
3837
 
3838
      /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3839
         we have to sign or zero-extend the values.  */
3840
      if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3841
        {
3842
          l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3843
          l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3844
 
3845
          if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3846
            l0s |= ((HOST_WIDE_INT) (-1) << width);
3847
 
3848
          if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3849
            l1s |= ((HOST_WIDE_INT) (-1) << width);
3850
        }
3851
      if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3852
        h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3853
 
3854
      equal = (h0u == h1u && l0u == l1u);
3855
      op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3856
      op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3857
      op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3858
      op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3859
    }
3860
 
3861
  /* Otherwise, there are some code-specific tests we can make.  */
3862
  else
3863
    {
3864
      /* Optimize comparisons with upper and lower bounds.  */
3865
      if (SCALAR_INT_MODE_P (mode)
3866
          && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3867
        {
3868
          rtx mmin, mmax;
3869
          int sign;
3870
 
3871
          if (code == GEU
3872
              || code == LEU
3873
              || code == GTU
3874
              || code == LTU)
3875
            sign = 0;
3876
          else
3877
            sign = 1;
3878
 
3879
          get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3880
 
3881
          tem = NULL_RTX;
3882
          switch (code)
3883
            {
3884
            case GEU:
3885
            case GE:
3886
              /* x >= min is always true.  */
3887
              if (rtx_equal_p (trueop1, mmin))
3888
                tem = const_true_rtx;
3889
              else
3890
              break;
3891
 
3892
            case LEU:
3893
            case LE:
3894
              /* x <= max is always true.  */
3895
              if (rtx_equal_p (trueop1, mmax))
3896
                tem = const_true_rtx;
3897
              break;
3898
 
3899
            case GTU:
3900
            case GT:
3901
              /* x > max is always false.  */
3902
              if (rtx_equal_p (trueop1, mmax))
3903
                tem = const0_rtx;
3904
              break;
3905
 
3906
            case LTU:
3907
            case LT:
3908
              /* x < min is always false.  */
3909
              if (rtx_equal_p (trueop1, mmin))
3910
                tem = const0_rtx;
3911
              break;
3912
 
3913
            default:
3914
              break;
3915
            }
3916
          if (tem == const0_rtx
3917
              || tem == const_true_rtx)
3918
            return tem;
3919
        }
3920
 
3921
      switch (code)
3922
        {
3923
        case EQ:
3924
          if (trueop1 == const0_rtx && nonzero_address_p (op0))
3925
            return const0_rtx;
3926
          break;
3927
 
3928
        case NE:
3929
          if (trueop1 == const0_rtx && nonzero_address_p (op0))
3930
            return const_true_rtx;
3931
          break;
3932
 
3933
        case LT:
3934
          /* Optimize abs(x) < 0.0.  */
3935
          if (trueop1 == CONST0_RTX (mode)
3936
              && !HONOR_SNANS (mode)
3937
              && (!INTEGRAL_MODE_P (mode)
3938
                  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3939
            {
3940
              tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3941
                                                       : trueop0;
3942
              if (GET_CODE (tem) == ABS)
3943
                {
3944
                  if (INTEGRAL_MODE_P (mode)
3945
                      && (issue_strict_overflow_warning
3946
                          (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3947
                    warning (OPT_Wstrict_overflow,
3948
                             ("assuming signed overflow does not occur when "
3949
                              "assuming abs (x) < 0 is false"));
3950
                  return const0_rtx;
3951
                }
3952
            }
3953
          break;
3954
 
3955
        case GE:
3956
          /* Optimize abs(x) >= 0.0.  */
3957
          if (trueop1 == CONST0_RTX (mode)
3958
              && !HONOR_NANS (mode)
3959
              && (!INTEGRAL_MODE_P (mode)
3960
                  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3961
            {
3962
              tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3963
                                                       : trueop0;
3964
              if (GET_CODE (tem) == ABS)
3965
                {
3966
                  if (INTEGRAL_MODE_P (mode)
3967
                      && (issue_strict_overflow_warning
3968
                          (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3969
                    warning (OPT_Wstrict_overflow,
3970
                             ("assuming signed overflow does not occur when "
3971
                              "assuming abs (x) >= 0 is true"));
3972
                  return const_true_rtx;
3973
                }
3974
            }
3975
          break;
3976
 
3977
        case UNGE:
3978
          /* Optimize ! (abs(x) < 0.0).  */
3979
          if (trueop1 == CONST0_RTX (mode))
3980
            {
3981
              tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3982
                                                       : trueop0;
3983
              if (GET_CODE (tem) == ABS)
3984
                return const_true_rtx;
3985
            }
3986
          break;
3987
 
3988
        default:
3989
          break;
3990
        }
3991
 
3992
      return 0;
3993
    }
3994
 
3995
  /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3996
     as appropriate.  */
3997
  switch (code)
3998
    {
3999
    case EQ:
4000
    case UNEQ:
4001
      return equal ? const_true_rtx : const0_rtx;
4002
    case NE:
4003
    case LTGT:
4004
      return ! equal ? const_true_rtx : const0_rtx;
4005
    case LT:
4006
    case UNLT:
4007
      return op0lt ? const_true_rtx : const0_rtx;
4008
    case GT:
4009
    case UNGT:
4010
      return op1lt ? const_true_rtx : const0_rtx;
4011
    case LTU:
4012
      return op0ltu ? const_true_rtx : const0_rtx;
4013
    case GTU:
4014
      return op1ltu ? const_true_rtx : const0_rtx;
4015
    case LE:
4016
    case UNLE:
4017
      return equal || op0lt ? const_true_rtx : const0_rtx;
4018
    case GE:
4019
    case UNGE:
4020
      return equal || op1lt ? const_true_rtx : const0_rtx;
4021
    case LEU:
4022
      return equal || op0ltu ? const_true_rtx : const0_rtx;
4023
    case GEU:
4024
      return equal || op1ltu ? const_true_rtx : const0_rtx;
4025
    case ORDERED:
4026
      return const_true_rtx;
4027
    case UNORDERED:
4028
      return const0_rtx;
4029
    default:
4030
      gcc_unreachable ();
4031
    }
4032
}
4033
 
4034
/* Simplify CODE, an operation with result mode MODE and three operands,
4035
   OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
4036
   a constant.  Return 0 if no simplifications is possible.  */
4037
 
4038
rtx
4039
simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4040
                            enum machine_mode op0_mode, rtx op0, rtx op1,
4041
                            rtx op2)
4042
{
4043
  unsigned int width = GET_MODE_BITSIZE (mode);
4044
 
4045
  /* VOIDmode means "infinite" precision.  */
4046
  if (width == 0)
4047
    width = HOST_BITS_PER_WIDE_INT;
4048
 
4049
  switch (code)
4050
    {
4051
    case SIGN_EXTRACT:
4052
    case ZERO_EXTRACT:
4053
      if (GET_CODE (op0) == CONST_INT
4054
          && GET_CODE (op1) == CONST_INT
4055
          && GET_CODE (op2) == CONST_INT
4056
          && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4057
          && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4058
        {
4059
          /* Extracting a bit-field from a constant */
4060
          HOST_WIDE_INT val = INTVAL (op0);
4061
 
4062
          if (BITS_BIG_ENDIAN)
4063
            val >>= (GET_MODE_BITSIZE (op0_mode)
4064
                     - INTVAL (op2) - INTVAL (op1));
4065
          else
4066
            val >>= INTVAL (op2);
4067
 
4068
          if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4069
            {
4070
              /* First zero-extend.  */
4071
              val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4072
              /* If desired, propagate sign bit.  */
4073
              if (code == SIGN_EXTRACT
4074
                  && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4075
                val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4076
            }
4077
 
4078
          /* Clear the bits that don't belong in our mode,
4079
             unless they and our sign bit are all one.
4080
             So we get either a reasonable negative value or a reasonable
4081
             unsigned value for this mode.  */
4082
          if (width < HOST_BITS_PER_WIDE_INT
4083
              && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4084
                  != ((HOST_WIDE_INT) (-1) << (width - 1))))
4085
            val &= ((HOST_WIDE_INT) 1 << width) - 1;
4086
 
4087
          return gen_int_mode (val, mode);
4088
        }
4089
      break;
4090
 
4091
    case IF_THEN_ELSE:
4092
      if (GET_CODE (op0) == CONST_INT)
4093
        return op0 != const0_rtx ? op1 : op2;
4094
 
4095
      /* Convert c ? a : a into "a".  */
4096
      if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4097
        return op1;
4098
 
4099
      /* Convert a != b ? a : b into "a".  */
4100
      if (GET_CODE (op0) == NE
4101
          && ! side_effects_p (op0)
4102
          && ! HONOR_NANS (mode)
4103
          && ! HONOR_SIGNED_ZEROS (mode)
4104
          && ((rtx_equal_p (XEXP (op0, 0), op1)
4105
               && rtx_equal_p (XEXP (op0, 1), op2))
4106
              || (rtx_equal_p (XEXP (op0, 0), op2)
4107
                  && rtx_equal_p (XEXP (op0, 1), op1))))
4108
        return op1;
4109
 
4110
      /* Convert a == b ? a : b into "b".  */
4111
      if (GET_CODE (op0) == EQ
4112
          && ! side_effects_p (op0)
4113
          && ! HONOR_NANS (mode)
4114
          && ! HONOR_SIGNED_ZEROS (mode)
4115
          && ((rtx_equal_p (XEXP (op0, 0), op1)
4116
               && rtx_equal_p (XEXP (op0, 1), op2))
4117
              || (rtx_equal_p (XEXP (op0, 0), op2)
4118
                  && rtx_equal_p (XEXP (op0, 1), op1))))
4119
        return op2;
4120
 
4121
      if (COMPARISON_P (op0) && ! side_effects_p (op0))
4122
        {
4123
          enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4124
                                        ? GET_MODE (XEXP (op0, 1))
4125
                                        : GET_MODE (XEXP (op0, 0)));
4126
          rtx temp;
4127
 
4128
          /* Look for happy constants in op1 and op2.  */
4129
          if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4130
            {
4131
              HOST_WIDE_INT t = INTVAL (op1);
4132
              HOST_WIDE_INT f = INTVAL (op2);
4133
 
4134
              if (t == STORE_FLAG_VALUE && f == 0)
4135
                code = GET_CODE (op0);
4136
              else if (t == 0 && f == STORE_FLAG_VALUE)
4137
                {
4138
                  enum rtx_code tmp;
4139
                  tmp = reversed_comparison_code (op0, NULL_RTX);
4140
                  if (tmp == UNKNOWN)
4141
                    break;
4142
                  code = tmp;
4143
                }
4144
              else
4145
                break;
4146
 
4147
              return simplify_gen_relational (code, mode, cmp_mode,
4148
                                              XEXP (op0, 0), XEXP (op0, 1));
4149
            }
4150
 
4151
          if (cmp_mode == VOIDmode)
4152
            cmp_mode = op0_mode;
4153
          temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4154
                                                cmp_mode, XEXP (op0, 0),
4155
                                                XEXP (op0, 1));
4156
 
4157
          /* See if any simplifications were possible.  */
4158
          if (temp)
4159
            {
4160
              if (GET_CODE (temp) == CONST_INT)
4161
                return temp == const0_rtx ? op2 : op1;
4162
              else if (temp)
4163
                return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4164
            }
4165
        }
4166
      break;
4167
 
4168
    case VEC_MERGE:
4169
      gcc_assert (GET_MODE (op0) == mode);
4170
      gcc_assert (GET_MODE (op1) == mode);
4171
      gcc_assert (VECTOR_MODE_P (mode));
4172
      op2 = avoid_constant_pool_reference (op2);
4173
      if (GET_CODE (op2) == CONST_INT)
4174
        {
4175
          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4176
          unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4177
          int mask = (1 << n_elts) - 1;
4178
 
4179
          if (!(INTVAL (op2) & mask))
4180
            return op1;
4181
          if ((INTVAL (op2) & mask) == mask)
4182
            return op0;
4183
 
4184
          op0 = avoid_constant_pool_reference (op0);
4185
          op1 = avoid_constant_pool_reference (op1);
4186
          if (GET_CODE (op0) == CONST_VECTOR
4187
              && GET_CODE (op1) == CONST_VECTOR)
4188
            {
4189
              rtvec v = rtvec_alloc (n_elts);
4190
              unsigned int i;
4191
 
4192
              for (i = 0; i < n_elts; i++)
4193
                RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4194
                                    ? CONST_VECTOR_ELT (op0, i)
4195
                                    : CONST_VECTOR_ELT (op1, i));
4196
              return gen_rtx_CONST_VECTOR (mode, v);
4197
            }
4198
        }
4199
      break;
4200
 
4201
    default:
4202
      gcc_unreachable ();
4203
    }
4204
 
4205
  return 0;
4206
}
4207
 
4208
/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4209
   returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4210
 
4211
   Works by unpacking OP into a collection of 8-bit values
4212
   represented as a little-endian array of 'unsigned char', selecting by BYTE,
4213
   and then repacking them again for OUTERMODE.  */
4214
 
4215
static rtx
4216
simplify_immed_subreg (enum machine_mode outermode, rtx op,
4217
                       enum machine_mode innermode, unsigned int byte)
4218
{
4219
  /* We support up to 512-bit values (for V8DFmode).  */
4220
  enum {
4221
    max_bitsize = 512,
4222
    value_bit = 8,
4223
    value_mask = (1 << value_bit) - 1
4224
  };
4225
  unsigned char value[max_bitsize / value_bit];
4226
  int value_start;
4227
  int i;
4228
  int elem;
4229
 
4230
  int num_elem;
4231
  rtx * elems;
4232
  int elem_bitsize;
4233
  rtx result_s;
4234
  rtvec result_v = NULL;
4235
  enum mode_class outer_class;
4236
  enum machine_mode outer_submode;
4237
 
4238
  /* Some ports misuse CCmode.  */
4239
  if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4240
    return op;
4241
 
4242
  /* We have no way to represent a complex constant at the rtl level.  */
4243
  if (COMPLEX_MODE_P (outermode))
4244
    return NULL_RTX;
4245
 
4246
  /* Unpack the value.  */
4247
 
4248
  if (GET_CODE (op) == CONST_VECTOR)
4249
    {
4250
      num_elem = CONST_VECTOR_NUNITS (op);
4251
      elems = &CONST_VECTOR_ELT (op, 0);
4252
      elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4253
    }
4254
  else
4255
    {
4256
      num_elem = 1;
4257
      elems = &op;
4258
      elem_bitsize = max_bitsize;
4259
    }
4260
  /* If this asserts, it is too complicated; reducing value_bit may help.  */
4261
  gcc_assert (BITS_PER_UNIT % value_bit == 0);
4262
  /* I don't know how to handle endianness of sub-units.  */
4263
  gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4264
 
4265
  for (elem = 0; elem < num_elem; elem++)
4266
    {
4267
      unsigned char * vp;
4268
      rtx el = elems[elem];
4269
 
4270
      /* Vectors are kept in target memory order.  (This is probably
4271
         a mistake.)  */
4272
      {
4273
        unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4274
        unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4275
                          / BITS_PER_UNIT);
4276
        unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4277
        unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4278
        unsigned bytele = (subword_byte % UNITS_PER_WORD
4279
                         + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4280
        vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4281
      }
4282
 
4283
      switch (GET_CODE (el))
4284
        {
4285
        case CONST_INT:
4286
          for (i = 0;
4287
               i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4288
               i += value_bit)
4289
            *vp++ = INTVAL (el) >> i;
4290
          /* CONST_INTs are always logically sign-extended.  */
4291
          for (; i < elem_bitsize; i += value_bit)
4292
            *vp++ = INTVAL (el) < 0 ? -1 : 0;
4293
          break;
4294
 
4295
        case CONST_DOUBLE:
4296
          if (GET_MODE (el) == VOIDmode)
4297
            {
4298
              /* If this triggers, someone should have generated a
4299
                 CONST_INT instead.  */
4300
              gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4301
 
4302
              for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4303
                *vp++ = CONST_DOUBLE_LOW (el) >> i;
4304
              while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4305
                {
4306
                  *vp++
4307
                    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4308
                  i += value_bit;
4309
                }
4310
              /* It shouldn't matter what's done here, so fill it with
4311
                 zero.  */
4312
              for (; i < elem_bitsize; i += value_bit)
4313
                *vp++ = 0;
4314
            }
4315
          else
4316
            {
4317
              long tmp[max_bitsize / 32];
4318
              int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4319
 
4320
              gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4321
              gcc_assert (bitsize <= elem_bitsize);
4322
              gcc_assert (bitsize % value_bit == 0);
4323
 
4324
              real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4325
                              GET_MODE (el));
4326
 
4327
              /* real_to_target produces its result in words affected by
4328
                 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
4329
                 and use WORDS_BIG_ENDIAN instead; see the documentation
4330
                 of SUBREG in rtl.texi.  */
4331
              for (i = 0; i < bitsize; i += value_bit)
4332
                {
4333
                  int ibase;
4334
                  if (WORDS_BIG_ENDIAN)
4335
                    ibase = bitsize - 1 - i;
4336
                  else
4337
                    ibase = i;
4338
                  *vp++ = tmp[ibase / 32] >> i % 32;
4339
                }
4340
 
4341
              /* It shouldn't matter what's done here, so fill it with
4342
                 zero.  */
4343
              for (; i < elem_bitsize; i += value_bit)
4344
                *vp++ = 0;
4345
            }
4346
          break;
4347
 
4348
        default:
4349
          gcc_unreachable ();
4350
        }
4351
    }
4352
 
4353
  /* Now, pick the right byte to start with.  */
4354
  /* Renumber BYTE so that the least-significant byte is byte 0.  A special
4355
     case is paradoxical SUBREGs, which shouldn't be adjusted since they
4356
     will already have offset 0.  */
4357
  if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4358
    {
4359
      unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4360
                        - byte);
4361
      unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4362
      unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4363
      byte = (subword_byte % UNITS_PER_WORD
4364
              + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4365
    }
4366
 
4367
  /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
4368
     so if it's become negative it will instead be very large.)  */
4369
  gcc_assert (byte < GET_MODE_SIZE (innermode));
4370
 
4371
  /* Convert from bytes to chunks of size value_bit.  */
4372
  value_start = byte * (BITS_PER_UNIT / value_bit);
4373
 
4374
  /* Re-pack the value.  */
4375
 
4376
  if (VECTOR_MODE_P (outermode))
4377
    {
4378
      num_elem = GET_MODE_NUNITS (outermode);
4379
      result_v = rtvec_alloc (num_elem);
4380
      elems = &RTVEC_ELT (result_v, 0);
4381
      outer_submode = GET_MODE_INNER (outermode);
4382
    }
4383
  else
4384
    {
4385
      num_elem = 1;
4386
      elems = &result_s;
4387
      outer_submode = outermode;
4388
    }
4389
 
4390
  outer_class = GET_MODE_CLASS (outer_submode);
4391
  elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4392
 
4393
  gcc_assert (elem_bitsize % value_bit == 0);
4394
  gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4395
 
4396
  for (elem = 0; elem < num_elem; elem++)
4397
    {
4398
      unsigned char *vp;
4399
 
4400
      /* Vectors are stored in target memory order.  (This is probably
4401
         a mistake.)  */
4402
      {
4403
        unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4404
        unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4405
                          / BITS_PER_UNIT);
4406
        unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4407
        unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4408
        unsigned bytele = (subword_byte % UNITS_PER_WORD
4409
                         + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4410
        vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4411
      }
4412
 
4413
      switch (outer_class)
4414
        {
4415
        case MODE_INT:
4416
        case MODE_PARTIAL_INT:
4417
          {
4418
            unsigned HOST_WIDE_INT hi = 0, lo = 0;
4419
 
4420
            for (i = 0;
4421
                 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4422
                 i += value_bit)
4423
              lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4424
            for (; i < elem_bitsize; i += value_bit)
4425
              hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4426
                     << (i - HOST_BITS_PER_WIDE_INT));
4427
 
4428
            /* immed_double_const doesn't call trunc_int_for_mode.  I don't
4429
               know why.  */
4430
            if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4431
              elems[elem] = gen_int_mode (lo, outer_submode);
4432
            else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4433
              elems[elem] = immed_double_const (lo, hi, outer_submode);
4434
            else
4435
              return NULL_RTX;
4436
          }
4437
          break;
4438
 
4439
        case MODE_FLOAT:
4440
        case MODE_DECIMAL_FLOAT:
4441
          {
4442
            REAL_VALUE_TYPE r;
4443
            long tmp[max_bitsize / 32];
4444
 
4445
            /* real_from_target wants its input in words affected by
4446
               FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
4447
               and use WORDS_BIG_ENDIAN instead; see the documentation
4448
               of SUBREG in rtl.texi.  */
4449
            for (i = 0; i < max_bitsize / 32; i++)
4450
              tmp[i] = 0;
4451
            for (i = 0; i < elem_bitsize; i += value_bit)
4452
              {
4453
                int ibase;
4454
                if (WORDS_BIG_ENDIAN)
4455
                  ibase = elem_bitsize - 1 - i;
4456
                else
4457
                  ibase = i;
4458
                tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4459
              }
4460
 
4461
            real_from_target (&r, tmp, outer_submode);
4462
            elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4463
          }
4464
          break;
4465
 
4466
        default:
4467
          gcc_unreachable ();
4468
        }
4469
    }
4470
  if (VECTOR_MODE_P (outermode))
4471
    return gen_rtx_CONST_VECTOR (outermode, result_v);
4472
  else
4473
    return result_s;
4474
}
4475
 
4476
/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4477
   Return 0 if no simplifications are possible.  */
4478
rtx
4479
simplify_subreg (enum machine_mode outermode, rtx op,
4480
                 enum machine_mode innermode, unsigned int byte)
4481
{
4482
  /* Little bit of sanity checking.  */
4483
  gcc_assert (innermode != VOIDmode);
4484
  gcc_assert (outermode != VOIDmode);
4485
  gcc_assert (innermode != BLKmode);
4486
  gcc_assert (outermode != BLKmode);
4487
 
4488
  gcc_assert (GET_MODE (op) == innermode
4489
              || GET_MODE (op) == VOIDmode);
4490
 
4491
  gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4492
  gcc_assert (byte < GET_MODE_SIZE (innermode));
4493
 
4494
  if (outermode == innermode && !byte)
4495
    return op;
4496
 
4497
  if (GET_CODE (op) == CONST_INT
4498
      || GET_CODE (op) == CONST_DOUBLE
4499
      || GET_CODE (op) == CONST_VECTOR)
4500
    return simplify_immed_subreg (outermode, op, innermode, byte);
4501
 
4502
  /* Changing mode twice with SUBREG => just change it once,
4503
     or not at all if changing back op starting mode.  */
4504
  if (GET_CODE (op) == SUBREG)
4505
    {
4506
      enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4507
      int final_offset = byte + SUBREG_BYTE (op);
4508
      rtx newx;
4509
 
4510
      if (outermode == innermostmode
4511
          && byte == 0 && SUBREG_BYTE (op) == 0)
4512
        return SUBREG_REG (op);
4513
 
4514
      /* The SUBREG_BYTE represents offset, as if the value were stored
4515
         in memory.  Irritating exception is paradoxical subreg, where
4516
         we define SUBREG_BYTE to be 0.  On big endian machines, this
4517
         value should be negative.  For a moment, undo this exception.  */
4518
      if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4519
        {
4520
          int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4521
          if (WORDS_BIG_ENDIAN)
4522
            final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4523
          if (BYTES_BIG_ENDIAN)
4524
            final_offset += difference % UNITS_PER_WORD;
4525
        }
4526
      if (SUBREG_BYTE (op) == 0
4527
          && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4528
        {
4529
          int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4530
          if (WORDS_BIG_ENDIAN)
4531
            final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4532
          if (BYTES_BIG_ENDIAN)
4533
            final_offset += difference % UNITS_PER_WORD;
4534
        }
4535
 
4536
      /* See whether resulting subreg will be paradoxical.  */
4537
      if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4538
        {
4539
          /* In nonparadoxical subregs we can't handle negative offsets.  */
4540
          if (final_offset < 0)
4541
            return NULL_RTX;
4542
          /* Bail out in case resulting subreg would be incorrect.  */
4543
          if (final_offset % GET_MODE_SIZE (outermode)
4544
              || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4545
            return NULL_RTX;
4546
        }
4547
      else
4548
        {
4549
          int offset = 0;
4550
          int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4551
 
4552
          /* In paradoxical subreg, see if we are still looking on lower part.
4553
             If so, our SUBREG_BYTE will be 0.  */
4554
          if (WORDS_BIG_ENDIAN)
4555
            offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4556
          if (BYTES_BIG_ENDIAN)
4557
            offset += difference % UNITS_PER_WORD;
4558
          if (offset == final_offset)
4559
            final_offset = 0;
4560
          else
4561
            return NULL_RTX;
4562
        }
4563
 
4564
      /* Recurse for further possible simplifications.  */
4565
      newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4566
                              final_offset);
4567
      if (newx)
4568
        return newx;
4569
      if (validate_subreg (outermode, innermostmode,
4570
                           SUBREG_REG (op), final_offset))
4571
        return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4572
      return NULL_RTX;
4573
    }
4574
 
4575
  /* Merge implicit and explicit truncations.  */
4576
 
4577
  if (GET_CODE (op) == TRUNCATE
4578
      && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4579
      && subreg_lowpart_offset (outermode, innermode) == byte)
4580
    return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4581
                               GET_MODE (XEXP (op, 0)));
4582
 
4583
  /* SUBREG of a hard register => just change the register number
4584
     and/or mode.  If the hard register is not valid in that mode,
4585
     suppress this simplification.  If the hard register is the stack,
4586
     frame, or argument pointer, leave this as a SUBREG.  */
4587
 
4588
  if (REG_P (op)
4589
      && REGNO (op) < FIRST_PSEUDO_REGISTER
4590
#ifdef CANNOT_CHANGE_MODE_CLASS
4591
      && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4592
            && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4593
            && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4594
#endif
4595
      && ((reload_completed && !frame_pointer_needed)
4596
          || (REGNO (op) != FRAME_POINTER_REGNUM
4597
#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4598
              && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4599
#endif
4600
             ))
4601
#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4602
      && REGNO (op) != ARG_POINTER_REGNUM
4603
#endif
4604
      && REGNO (op) != STACK_POINTER_REGNUM
4605
      && subreg_offset_representable_p (REGNO (op), innermode,
4606
                                        byte, outermode))
4607
    {
4608
      unsigned int regno = REGNO (op);
4609
      unsigned int final_regno
4610
        = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4611
 
4612
      /* ??? We do allow it if the current REG is not valid for
4613
         its mode.  This is a kludge to work around how float/complex
4614
         arguments are passed on 32-bit SPARC and should be fixed.  */
4615
      if (HARD_REGNO_MODE_OK (final_regno, outermode)
4616
          || ! HARD_REGNO_MODE_OK (regno, innermode))
4617
        {
4618
          rtx x;
4619
          int final_offset = byte;
4620
 
4621
          /* Adjust offset for paradoxical subregs.  */
4622
          if (byte == 0
4623
              && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4624
            {
4625
              int difference = (GET_MODE_SIZE (innermode)
4626
                                - GET_MODE_SIZE (outermode));
4627
              if (WORDS_BIG_ENDIAN)
4628
                final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4629
              if (BYTES_BIG_ENDIAN)
4630
                final_offset += difference % UNITS_PER_WORD;
4631
            }
4632
 
4633
          x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4634
 
4635
          /* Propagate original regno.  We don't have any way to specify
4636
             the offset inside original regno, so do so only for lowpart.
4637
             The information is used only by alias analysis that can not
4638
             grog partial register anyway.  */
4639
 
4640
          if (subreg_lowpart_offset (outermode, innermode) == byte)
4641
            ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4642
          return x;
4643
        }
4644
    }
4645
 
4646
  /* If we have a SUBREG of a register that we are replacing and we are
4647
     replacing it with a MEM, make a new MEM and try replacing the
4648
     SUBREG with it.  Don't do this if the MEM has a mode-dependent address
4649
     or if we would be widening it.  */
4650
 
4651
  if (MEM_P (op)
4652
      && ! mode_dependent_address_p (XEXP (op, 0))
4653
      /* Allow splitting of volatile memory references in case we don't
4654
         have instruction to move the whole thing.  */
4655
      && (! MEM_VOLATILE_P (op)
4656
          || ! have_insn_for (SET, innermode))
4657
      && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4658
    return adjust_address_nv (op, outermode, byte);
4659
 
4660
  /* Handle complex values represented as CONCAT
4661
     of real and imaginary part.  */
4662
  if (GET_CODE (op) == CONCAT)
4663
    {
4664
      unsigned int inner_size, final_offset;
4665
      rtx part, res;
4666
 
4667
      inner_size = GET_MODE_UNIT_SIZE (innermode);
4668
      part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4669
      final_offset = byte % inner_size;
4670
      if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4671
        return NULL_RTX;
4672
 
4673
      res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4674
      if (res)
4675
        return res;
4676
      if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4677
        return gen_rtx_SUBREG (outermode, part, final_offset);
4678
      return NULL_RTX;
4679
    }
4680
 
4681
  /* Optimize SUBREG truncations of zero and sign extended values.  */
4682
  if ((GET_CODE (op) == ZERO_EXTEND
4683
       || GET_CODE (op) == SIGN_EXTEND)
4684
      && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4685
    {
4686
      unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4687
 
4688
      /* If we're requesting the lowpart of a zero or sign extension,
4689
         there are three possibilities.  If the outermode is the same
4690
         as the origmode, we can omit both the extension and the subreg.
4691
         If the outermode is not larger than the origmode, we can apply
4692
         the truncation without the extension.  Finally, if the outermode
4693
         is larger than the origmode, but both are integer modes, we
4694
         can just extend to the appropriate mode.  */
4695
      if (bitpos == 0)
4696
        {
4697
          enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4698
          if (outermode == origmode)
4699
            return XEXP (op, 0);
4700
          if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4701
            return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4702
                                        subreg_lowpart_offset (outermode,
4703
                                                               origmode));
4704
          if (SCALAR_INT_MODE_P (outermode))
4705
            return simplify_gen_unary (GET_CODE (op), outermode,
4706
                                       XEXP (op, 0), origmode);
4707
        }
4708
 
4709
      /* A SUBREG resulting from a zero extension may fold to zero if
4710
         it extracts higher bits that the ZERO_EXTEND's source bits.  */
4711
      if (GET_CODE (op) == ZERO_EXTEND
4712
          && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4713
        return CONST0_RTX (outermode);
4714
    }
4715
 
4716
  /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4717
     to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4718
     the outer subreg is effectively a truncation to the original mode.  */
4719
  if ((GET_CODE (op) == LSHIFTRT
4720
       || GET_CODE (op) == ASHIFTRT)
4721
      && SCALAR_INT_MODE_P (outermode)
4722
      /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4723
         to avoid the possibility that an outer LSHIFTRT shifts by more
4724
         than the sign extension's sign_bit_copies and introduces zeros
4725
         into the high bits of the result.  */
4726
      && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4727
      && GET_CODE (XEXP (op, 1)) == CONST_INT
4728
      && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4729
      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4730
      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4731
      && subreg_lsb_1 (outermode, innermode, byte) == 0)
4732
    return simplify_gen_binary (ASHIFTRT, outermode,
4733
                                XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4734
 
4735
  /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4736
     to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4737
     the outer subreg is effectively a truncation to the original mode.  */
4738
  if ((GET_CODE (op) == LSHIFTRT
4739
       || GET_CODE (op) == ASHIFTRT)
4740
      && SCALAR_INT_MODE_P (outermode)
4741
      && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4742
      && GET_CODE (XEXP (op, 1)) == CONST_INT
4743
      && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4744
      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4745
      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4746
      && subreg_lsb_1 (outermode, innermode, byte) == 0)
4747
    return simplify_gen_binary (LSHIFTRT, outermode,
4748
                                XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4749
 
4750
  /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4751
     to (ashift:QI (x:QI) C), where C is a suitable small constant and
4752
     the outer subreg is effectively a truncation to the original mode.  */
4753
  if (GET_CODE (op) == ASHIFT
4754
      && SCALAR_INT_MODE_P (outermode)
4755
      && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4756
      && GET_CODE (XEXP (op, 1)) == CONST_INT
4757
      && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4758
          || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4759
      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4760
      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4761
      && subreg_lsb_1 (outermode, innermode, byte) == 0)
4762
    return simplify_gen_binary (ASHIFT, outermode,
4763
                                XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4764
 
4765
  return NULL_RTX;
4766
}
4767
 
4768
/* Make a SUBREG operation or equivalent if it folds.  */
4769
 
4770
rtx
4771
simplify_gen_subreg (enum machine_mode outermode, rtx op,
4772
                     enum machine_mode innermode, unsigned int byte)
4773
{
4774
  rtx newx;
4775
 
4776
  newx = simplify_subreg (outermode, op, innermode, byte);
4777
  if (newx)
4778
    return newx;
4779
 
4780
  if (GET_CODE (op) == SUBREG
4781
      || GET_CODE (op) == CONCAT
4782
      || GET_MODE (op) == VOIDmode)
4783
    return NULL_RTX;
4784
 
4785
  if (validate_subreg (outermode, innermode, op, byte))
4786
    return gen_rtx_SUBREG (outermode, op, byte);
4787
 
4788
  return NULL_RTX;
4789
}
4790
 
4791
/* Simplify X, an rtx expression.
4792
 
4793
   Return the simplified expression or NULL if no simplifications
4794
   were possible.
4795
 
4796
   This is the preferred entry point into the simplification routines;
4797
   however, we still allow passes to call the more specific routines.
4798
 
4799
   Right now GCC has three (yes, three) major bodies of RTL simplification
4800
   code that need to be unified.
4801
 
4802
        1. fold_rtx in cse.c.  This code uses various CSE specific
4803
           information to aid in RTL simplification.
4804
 
4805
        2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
4806
           it uses combine specific information to aid in RTL
4807
           simplification.
4808
 
4809
        3. The routines in this file.
4810
 
4811
 
4812
   Long term we want to only have one body of simplification code; to
4813
   get to that state I recommend the following steps:
4814
 
4815
        1. Pour over fold_rtx & simplify_rtx and move any simplifications
4816
           which are not pass dependent state into these routines.
4817
 
4818
        2. As code is moved by #1, change fold_rtx & simplify_rtx to
4819
           use this routine whenever possible.
4820
 
4821
        3. Allow for pass dependent state to be provided to these
4822
           routines and add simplifications based on the pass dependent
4823
           state.  Remove code from cse.c & combine.c that becomes
4824
           redundant/dead.
4825
 
4826
    It will take time, but ultimately the compiler will be easier to
4827
    maintain and improve.  It's totally silly that when we add a
4828
    simplification that it needs to be added to 4 places (3 for RTL
4829
    simplification and 1 for tree simplification.  */
4830
 
4831
rtx
4832
simplify_rtx (rtx x)
4833
{
4834
  enum rtx_code code = GET_CODE (x);
4835
  enum machine_mode mode = GET_MODE (x);
4836
 
4837
  switch (GET_RTX_CLASS (code))
4838
    {
4839
    case RTX_UNARY:
4840
      return simplify_unary_operation (code, mode,
4841
                                       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4842
    case RTX_COMM_ARITH:
4843
      if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4844
        return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4845
 
4846
      /* Fall through....  */
4847
 
4848
    case RTX_BIN_ARITH:
4849
      return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4850
 
4851
    case RTX_TERNARY:
4852
    case RTX_BITFIELD_OPS:
4853
      return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4854
                                         XEXP (x, 0), XEXP (x, 1),
4855
                                         XEXP (x, 2));
4856
 
4857
    case RTX_COMPARE:
4858
    case RTX_COMM_COMPARE:
4859
      return simplify_relational_operation (code, mode,
4860
                                            ((GET_MODE (XEXP (x, 0))
4861
                                             != VOIDmode)
4862
                                            ? GET_MODE (XEXP (x, 0))
4863
                                            : GET_MODE (XEXP (x, 1))),
4864
                                            XEXP (x, 0),
4865
                                            XEXP (x, 1));
4866
 
4867
    case RTX_EXTRA:
4868
      if (code == SUBREG)
4869
        return simplify_gen_subreg (mode, SUBREG_REG (x),
4870
                                    GET_MODE (SUBREG_REG (x)),
4871
                                    SUBREG_BYTE (x));
4872
      break;
4873
 
4874
    case RTX_OBJ:
4875
      if (code == LO_SUM)
4876
        {
4877
          /* Convert (lo_sum (high FOO) FOO) to FOO.  */
4878
          if (GET_CODE (XEXP (x, 0)) == HIGH
4879
              && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4880
          return XEXP (x, 1);
4881
        }
4882
      break;
4883
 
4884
    default:
4885
      break;
4886
    }
4887
  return NULL;
4888
}
4889
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.