OpenCores
URL https://opencores.org/ocsvn/scarts/scarts/trunk

Subversion Repositories scarts

[/] [scarts/] [trunk/] [toolchain/] [scarts-gcc/] [gcc-4.1.1/] [gcc/] [simplify-rtx.c] - Blame information for rev 20

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 12 jlechner
/* RTL simplification functions for GNU compiler.
2
   Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
 
5
This file is part of GCC.
6
 
7
GCC is free software; you can redistribute it and/or modify it under
8
the terms of the GNU General Public License as published by the Free
9
Software Foundation; either version 2, or (at your option) any later
10
version.
11
 
12
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13
WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15
for more details.
16
 
17
You should have received a copy of the GNU General Public License
18
along with GCC; see the file COPYING.  If not, write to the Free
19
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20
02110-1301, USA.  */
21
 
22
 
23
#include "config.h"
24
#include "system.h"
25
#include "coretypes.h"
26
#include "tm.h"
27
#include "rtl.h"
28
#include "tree.h"
29
#include "tm_p.h"
30
#include "regs.h"
31
#include "hard-reg-set.h"
32
#include "flags.h"
33
#include "real.h"
34
#include "insn-config.h"
35
#include "recog.h"
36
#include "function.h"
37
#include "expr.h"
38
#include "toplev.h"
39
#include "output.h"
40
#include "ggc.h"
41
#include "target.h"
42
 
43
/* Simplification and canonicalization of RTL.  */
44
 
45
/* Much code operates on (low, high) pairs; the low value is an
46
   unsigned wide int, the high value a signed wide int.  We
47
   occasionally need to sign extend from low to high as if low were a
48
   signed wide int.  */
49
#define HWI_SIGN_EXTEND(low) \
50
 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
 
52
static rtx neg_const_int (enum machine_mode, rtx);
53
static bool plus_minus_operand_p (rtx);
54
static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55
static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56
                                rtx, int);
57
static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58
                                  unsigned int);
59
static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60
                                           rtx, rtx);
61
static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62
                                            enum machine_mode, rtx, rtx);
63
static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64
static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65
                                        rtx, rtx, rtx, rtx);
66
 
67
/* Negate a CONST_INT rtx, truncating (because a conversion from a
68
   maximally negative number can overflow).  */
69
static rtx
70
neg_const_int (enum machine_mode mode, rtx i)
71
{
72
  return gen_int_mode (- INTVAL (i), mode);
73
}
74
 
75
/* Test whether expression, X, is an immediate constant that represents
76
   the most significant bit of machine mode MODE.  */
77
 
78
bool
79
mode_signbit_p (enum machine_mode mode, rtx x)
80
{
81
  unsigned HOST_WIDE_INT val;
82
  unsigned int width;
83
 
84
  if (GET_MODE_CLASS (mode) != MODE_INT)
85
    return false;
86
 
87
  width = GET_MODE_BITSIZE (mode);
88
  if (width == 0)
89
    return false;
90
 
91
  if (width <= HOST_BITS_PER_WIDE_INT
92
      && GET_CODE (x) == CONST_INT)
93
    val = INTVAL (x);
94
  else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95
           && GET_CODE (x) == CONST_DOUBLE
96
           && CONST_DOUBLE_LOW (x) == 0)
97
    {
98
      val = CONST_DOUBLE_HIGH (x);
99
      width -= HOST_BITS_PER_WIDE_INT;
100
    }
101
  else
102
    return false;
103
 
104
  if (width < HOST_BITS_PER_WIDE_INT)
105
    val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106
  return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107
}
108
 
109
/* Make a binary operation by properly ordering the operands and
110
   seeing if the expression folds.  */
111
 
112
rtx
113
simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114
                     rtx op1)
115
{
116
  rtx tem;
117
 
118
  /* Put complex operands first and constants second if commutative.  */
119
  if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
120
      && swap_commutative_operands_p (op0, op1))
121
    tem = op0, op0 = op1, op1 = tem;
122
 
123
  /* If this simplifies, do it.  */
124
  tem = simplify_binary_operation (code, mode, op0, op1);
125
  if (tem)
126
    return tem;
127
 
128
  /* Handle addition and subtraction specially.  Otherwise, just form
129
     the operation.  */
130
 
131
  if (code == PLUS || code == MINUS)
132
    {
133
      tem = simplify_plus_minus (code, mode, op0, op1, 1);
134
      if (tem)
135
        return tem;
136
    }
137
 
138
  return gen_rtx_fmt_ee (code, mode, op0, op1);
139
}
140
 
141
/* If X is a MEM referencing the constant pool, return the real value.
142
   Otherwise return X.  */
143
rtx
144
avoid_constant_pool_reference (rtx x)
145
{
146
  rtx c, tmp, addr;
147
  enum machine_mode cmode;
148
  HOST_WIDE_INT offset = 0;
149
 
150
  switch (GET_CODE (x))
151
    {
152
    case MEM:
153
      break;
154
 
155
    case FLOAT_EXTEND:
156
      /* Handle float extensions of constant pool references.  */
157
      tmp = XEXP (x, 0);
158
      c = avoid_constant_pool_reference (tmp);
159
      if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
160
        {
161
          REAL_VALUE_TYPE d;
162
 
163
          REAL_VALUE_FROM_CONST_DOUBLE (d, c);
164
          return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
165
        }
166
      return x;
167
 
168
    default:
169
      return x;
170
    }
171
 
172
  addr = XEXP (x, 0);
173
 
174
  /* Call target hook to avoid the effects of -fpic etc....  */
175
  addr = targetm.delegitimize_address (addr);
176
 
177
  /* Split the address into a base and integer offset.  */
178
  if (GET_CODE (addr) == CONST
179
      && GET_CODE (XEXP (addr, 0)) == PLUS
180
      && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
181
    {
182
      offset = INTVAL (XEXP (XEXP (addr, 0), 1));
183
      addr = XEXP (XEXP (addr, 0), 0);
184
    }
185
 
186
  if (GET_CODE (addr) == LO_SUM)
187
    addr = XEXP (addr, 1);
188
 
189
  /* If this is a constant pool reference, we can turn it into its
190
     constant and hope that simplifications happen.  */
191
  if (GET_CODE (addr) == SYMBOL_REF
192
      && CONSTANT_POOL_ADDRESS_P (addr))
193
    {
194
      c = get_pool_constant (addr);
195
      cmode = get_pool_mode (addr);
196
 
197
      /* If we're accessing the constant in a different mode than it was
198
         originally stored, attempt to fix that up via subreg simplifications.
199
         If that fails we have no choice but to return the original memory.  */
200
      if (offset != 0 || cmode != GET_MODE (x))
201
        {
202
          rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
203
          if (tem && CONSTANT_P (tem))
204
            return tem;
205
        }
206
      else
207
        return c;
208
    }
209
 
210
  return x;
211
}
212
 
213
/* Return true if X is a MEM referencing the constant pool.  */
214
 
215
bool
216
constant_pool_reference_p (rtx x)
217
{
218
  return avoid_constant_pool_reference (x) != x;
219
}
220
 
221
/* Make a unary operation by first seeing if it folds and otherwise making
222
   the specified operation.  */
223
 
224
rtx
225
simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
226
                    enum machine_mode op_mode)
227
{
228
  rtx tem;
229
 
230
  /* If this simplifies, use it.  */
231
  if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
232
    return tem;
233
 
234
  return gen_rtx_fmt_e (code, mode, op);
235
}
236
 
237
/* Likewise for ternary operations.  */
238
 
239
rtx
240
simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
241
                      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
242
{
243
  rtx tem;
244
 
245
  /* If this simplifies, use it.  */
246
  if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
247
                                              op0, op1, op2)))
248
    return tem;
249
 
250
  return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
251
}
252
 
253
/* Likewise, for relational operations.
254
   CMP_MODE specifies mode comparison is done in.  */
255
 
256
rtx
257
simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
258
                         enum machine_mode cmp_mode, rtx op0, rtx op1)
259
{
260
  rtx tem;
261
 
262
  if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
263
                                                 op0, op1)))
264
    return tem;
265
 
266
  return gen_rtx_fmt_ee (code, mode, op0, op1);
267
}
268
 
269
/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
270
   resulting RTX.  Return a new RTX which is as simplified as possible.  */
271
 
272
rtx
273
simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
274
{
275
  enum rtx_code code = GET_CODE (x);
276
  enum machine_mode mode = GET_MODE (x);
277
  enum machine_mode op_mode;
278
  rtx op0, op1, op2;
279
 
280
  /* If X is OLD_RTX, return NEW_RTX.  Otherwise, if this is an expression, try
281
     to build a new expression substituting recursively.  If we can't do
282
     anything, return our input.  */
283
 
284
  if (x == old_rtx)
285
    return new_rtx;
286
 
287
  switch (GET_RTX_CLASS (code))
288
    {
289
    case RTX_UNARY:
290
      op0 = XEXP (x, 0);
291
      op_mode = GET_MODE (op0);
292
      op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
293
      if (op0 == XEXP (x, 0))
294
        return x;
295
      return simplify_gen_unary (code, mode, op0, op_mode);
296
 
297
    case RTX_BIN_ARITH:
298
    case RTX_COMM_ARITH:
299
      op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
300
      op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
301
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302
        return x;
303
      return simplify_gen_binary (code, mode, op0, op1);
304
 
305
    case RTX_COMPARE:
306
    case RTX_COMM_COMPARE:
307
      op0 = XEXP (x, 0);
308
      op1 = XEXP (x, 1);
309
      op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
310
      op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
311
      op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
312
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
313
        return x;
314
      return simplify_gen_relational (code, mode, op_mode, op0, op1);
315
 
316
    case RTX_TERNARY:
317
    case RTX_BITFIELD_OPS:
318
      op0 = XEXP (x, 0);
319
      op_mode = GET_MODE (op0);
320
      op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
321
      op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
322
      op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
323
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
324
        return x;
325
      if (op_mode == VOIDmode)
326
        op_mode = GET_MODE (op0);
327
      return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
328
 
329
    case RTX_EXTRA:
330
      /* The only case we try to handle is a SUBREG.  */
331
      if (code == SUBREG)
332
        {
333
          op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
334
          if (op0 == SUBREG_REG (x))
335
            return x;
336
          op0 = simplify_gen_subreg (GET_MODE (x), op0,
337
                                     GET_MODE (SUBREG_REG (x)),
338
                                     SUBREG_BYTE (x));
339
          return op0 ? op0 : x;
340
        }
341
      break;
342
 
343
    case RTX_OBJ:
344
      if (code == MEM)
345
        {
346
          op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
347
          if (op0 == XEXP (x, 0))
348
            return x;
349
          return replace_equiv_address_nv (x, op0);
350
        }
351
      else if (code == LO_SUM)
352
        {
353
          op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
354
          op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
355
 
356
          /* (lo_sum (high x) x) -> x  */
357
          if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
358
            return op1;
359
 
360
          if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
361
            return x;
362
          return gen_rtx_LO_SUM (mode, op0, op1);
363
        }
364
      else if (code == REG)
365
        {
366
          if (rtx_equal_p (x, old_rtx))
367
            return new_rtx;
368
        }
369
      break;
370
 
371
    default:
372
      break;
373
    }
374
  return x;
375
}
376
 
377
/* Try to simplify a unary operation CODE whose output mode is to be
378
   MODE with input operand OP whose mode was originally OP_MODE.
379
   Return zero if no simplification can be made.  */
380
rtx
381
simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
382
                          rtx op, enum machine_mode op_mode)
383
{
384
  rtx trueop, tem;
385
 
386
  if (GET_CODE (op) == CONST)
387
    op = XEXP (op, 0);
388
 
389
  trueop = avoid_constant_pool_reference (op);
390
 
391
  tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
392
  if (tem)
393
    return tem;
394
 
395
  return simplify_unary_operation_1 (code, mode, op);
396
}
397
 
398
/* Perform some simplifications we can do even if the operands
399
   aren't constant.  */
400
static rtx
401
simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
402
{
403
  enum rtx_code reversed;
404
  rtx temp;
405
 
406
  switch (code)
407
    {
408
    case NOT:
409
      /* (not (not X)) == X.  */
410
      if (GET_CODE (op) == NOT)
411
        return XEXP (op, 0);
412
 
413
      /* (not (eq X Y)) == (ne X Y), etc.  */
414
      if (COMPARISON_P (op)
415
          && (mode == BImode || STORE_FLAG_VALUE == -1)
416
          && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
417
        return simplify_gen_relational (reversed, mode, VOIDmode,
418
                                        XEXP (op, 0), XEXP (op, 1));
419
 
420
      /* (not (plus X -1)) can become (neg X).  */
421
      if (GET_CODE (op) == PLUS
422
          && XEXP (op, 1) == constm1_rtx)
423
        return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
424
 
425
      /* Similarly, (not (neg X)) is (plus X -1).  */
426
      if (GET_CODE (op) == NEG)
427
        return plus_constant (XEXP (op, 0), -1);
428
 
429
      /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
430
      if (GET_CODE (op) == XOR
431
          && GET_CODE (XEXP (op, 1)) == CONST_INT
432
          && (temp = simplify_unary_operation (NOT, mode,
433
                                               XEXP (op, 1), mode)) != 0)
434
        return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
435
 
436
      /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
437
      if (GET_CODE (op) == PLUS
438
          && GET_CODE (XEXP (op, 1)) == CONST_INT
439
          && mode_signbit_p (mode, XEXP (op, 1))
440
          && (temp = simplify_unary_operation (NOT, mode,
441
                                               XEXP (op, 1), mode)) != 0)
442
        return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
443
 
444
 
445
      /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
446
         operands other than 1, but that is not valid.  We could do a
447
         similar simplification for (not (lshiftrt C X)) where C is
448
         just the sign bit, but this doesn't seem common enough to
449
         bother with.  */
450
      if (GET_CODE (op) == ASHIFT
451
          && XEXP (op, 0) == const1_rtx)
452
        {
453
          temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
454
          return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
455
        }
456
 
457
      /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
458
         by reversing the comparison code if valid.  */
459
      if (STORE_FLAG_VALUE == -1
460
          && COMPARISON_P (op)
461
          && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
462
        return simplify_gen_relational (reversed, mode, VOIDmode,
463
                                        XEXP (op, 0), XEXP (op, 1));
464
 
465
      /* (not (ashiftrt foo C)) where C is the number of bits in FOO
466
         minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
467
         so we can perform the above simplification.  */
468
 
469
      if (STORE_FLAG_VALUE == -1
470
          && GET_CODE (op) == ASHIFTRT
471
          && GET_CODE (XEXP (op, 1)) == CONST_INT
472
          && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
473
        return simplify_gen_relational (GE, mode, VOIDmode,
474
                                        XEXP (op, 0), const0_rtx);
475
 
476
      break;
477
 
478
    case NEG:
479
      /* (neg (neg X)) == X.  */
480
      if (GET_CODE (op) == NEG)
481
        return XEXP (op, 0);
482
 
483
      /* (neg (plus X 1)) can become (not X).  */
484
      if (GET_CODE (op) == PLUS
485
          && XEXP (op, 1) == const1_rtx)
486
        return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
487
 
488
      /* Similarly, (neg (not X)) is (plus X 1).  */
489
      if (GET_CODE (op) == NOT)
490
        return plus_constant (XEXP (op, 0), 1);
491
 
492
      /* (neg (minus X Y)) can become (minus Y X).  This transformation
493
         isn't safe for modes with signed zeros, since if X and Y are
494
         both +0, (minus Y X) is the same as (minus X Y).  If the
495
         rounding mode is towards +infinity (or -infinity) then the two
496
         expressions will be rounded differently.  */
497
      if (GET_CODE (op) == MINUS
498
          && !HONOR_SIGNED_ZEROS (mode)
499
          && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
500
        return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
501
 
502
      if (GET_CODE (op) == PLUS
503
          && !HONOR_SIGNED_ZEROS (mode)
504
          && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
505
        {
506
          /* (neg (plus A C)) is simplified to (minus -C A).  */
507
          if (GET_CODE (XEXP (op, 1)) == CONST_INT
508
              || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
509
            {
510
              temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
511
              if (temp)
512
                return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
513
            }
514
 
515
          /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
516
          temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
517
          return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
518
        }
519
 
520
      /* (neg (mult A B)) becomes (mult (neg A) B).
521
         This works even for floating-point values.  */
522
      if (GET_CODE (op) == MULT
523
          && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
524
        {
525
          temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
526
          return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
527
        }
528
 
529
      /* NEG commutes with ASHIFT since it is multiplication.  Only do
530
         this if we can then eliminate the NEG (e.g., if the operand
531
         is a constant).  */
532
      if (GET_CODE (op) == ASHIFT)
533
        {
534
          temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
535
          if (temp)
536
            return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
537
        }
538
 
539
      /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
540
         C is equal to the width of MODE minus 1.  */
541
      if (GET_CODE (op) == ASHIFTRT
542
          && GET_CODE (XEXP (op, 1)) == CONST_INT
543
          && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
544
        return simplify_gen_binary (LSHIFTRT, mode,
545
                                    XEXP (op, 0), XEXP (op, 1));
546
 
547
      /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
548
         C is equal to the width of MODE minus 1.  */
549
      if (GET_CODE (op) == LSHIFTRT
550
          && GET_CODE (XEXP (op, 1)) == CONST_INT
551
          && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
552
        return simplify_gen_binary (ASHIFTRT, mode,
553
                                    XEXP (op, 0), XEXP (op, 1));
554
 
555
      break;
556
 
557
    case SIGN_EXTEND:
558
      /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
559
         becomes just the MINUS if its mode is MODE.  This allows
560
         folding switch statements on machines using casesi (such as
561
         the VAX).  */
562
      if (GET_CODE (op) == TRUNCATE
563
          && GET_MODE (XEXP (op, 0)) == mode
564
          && GET_CODE (XEXP (op, 0)) == MINUS
565
          && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
566
          && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
567
        return XEXP (op, 0);
568
 
569
      /* Check for a sign extension of a subreg of a promoted
570
         variable, where the promotion is sign-extended, and the
571
         target mode is the same as the variable's promotion.  */
572
      if (GET_CODE (op) == SUBREG
573
          && SUBREG_PROMOTED_VAR_P (op)
574
          && ! SUBREG_PROMOTED_UNSIGNED_P (op)
575
          && GET_MODE (XEXP (op, 0)) == mode)
576
        return XEXP (op, 0);
577
 
578
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
579
      if (! POINTERS_EXTEND_UNSIGNED
580
          && mode == Pmode && GET_MODE (op) == ptr_mode
581
          && (CONSTANT_P (op)
582
              || (GET_CODE (op) == SUBREG
583
                  && REG_P (SUBREG_REG (op))
584
                  && REG_POINTER (SUBREG_REG (op))
585
                  && GET_MODE (SUBREG_REG (op)) == Pmode)))
586
        return convert_memory_address (Pmode, op);
587
#endif
588
      break;
589
 
590
    case ZERO_EXTEND:
591
      /* Check for a zero extension of a subreg of a promoted
592
         variable, where the promotion is zero-extended, and the
593
         target mode is the same as the variable's promotion.  */
594
      if (GET_CODE (op) == SUBREG
595
          && SUBREG_PROMOTED_VAR_P (op)
596
          && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
597
          && GET_MODE (XEXP (op, 0)) == mode)
598
        return XEXP (op, 0);
599
 
600
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
601
      if (POINTERS_EXTEND_UNSIGNED > 0
602
          && mode == Pmode && GET_MODE (op) == ptr_mode
603
          && (CONSTANT_P (op)
604
              || (GET_CODE (op) == SUBREG
605
                  && REG_P (SUBREG_REG (op))
606
                  && REG_POINTER (SUBREG_REG (op))
607
                  && GET_MODE (SUBREG_REG (op)) == Pmode)))
608
        return convert_memory_address (Pmode, op);
609
#endif
610
      break;
611
 
612
    default:
613
      break;
614
    }
615
 
616
  return 0;
617
}
618
 
619
/* Try to compute the value of a unary operation CODE whose output mode is to
620
   be MODE with input operand OP whose mode was originally OP_MODE.
621
   Return zero if the value cannot be computed.  */
622
rtx
623
simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
624
                                rtx op, enum machine_mode op_mode)
625
{
626
  unsigned int width = GET_MODE_BITSIZE (mode);
627
 
628
  if (code == VEC_DUPLICATE)
629
    {
630
      gcc_assert (VECTOR_MODE_P (mode));
631
      if (GET_MODE (op) != VOIDmode)
632
      {
633
        if (!VECTOR_MODE_P (GET_MODE (op)))
634
          gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
635
        else
636
          gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
637
                                                (GET_MODE (op)));
638
      }
639
      if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
640
          || GET_CODE (op) == CONST_VECTOR)
641
        {
642
          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
643
          unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
644
          rtvec v = rtvec_alloc (n_elts);
645
          unsigned int i;
646
 
647
          if (GET_CODE (op) != CONST_VECTOR)
648
            for (i = 0; i < n_elts; i++)
649
              RTVEC_ELT (v, i) = op;
650
          else
651
            {
652
              enum machine_mode inmode = GET_MODE (op);
653
              int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
654
              unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
655
 
656
              gcc_assert (in_n_elts < n_elts);
657
              gcc_assert ((n_elts % in_n_elts) == 0);
658
              for (i = 0; i < n_elts; i++)
659
                RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
660
            }
661
          return gen_rtx_CONST_VECTOR (mode, v);
662
        }
663
    }
664
 
665
  if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
666
    {
667
      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
668
      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
669
      enum machine_mode opmode = GET_MODE (op);
670
      int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
671
      unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
672
      rtvec v = rtvec_alloc (n_elts);
673
      unsigned int i;
674
 
675
      gcc_assert (op_n_elts == n_elts);
676
      for (i = 0; i < n_elts; i++)
677
        {
678
          rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
679
                                            CONST_VECTOR_ELT (op, i),
680
                                            GET_MODE_INNER (opmode));
681
          if (!x)
682
            return 0;
683
          RTVEC_ELT (v, i) = x;
684
        }
685
      return gen_rtx_CONST_VECTOR (mode, v);
686
    }
687
 
688
  /* The order of these tests is critical so that, for example, we don't
689
     check the wrong mode (input vs. output) for a conversion operation,
690
     such as FIX.  At some point, this should be simplified.  */
691
 
692
  if (code == FLOAT && GET_MODE (op) == VOIDmode
693
      && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
694
    {
695
      HOST_WIDE_INT hv, lv;
696
      REAL_VALUE_TYPE d;
697
 
698
      if (GET_CODE (op) == CONST_INT)
699
        lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
700
      else
701
        lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
702
 
703
      REAL_VALUE_FROM_INT (d, lv, hv, mode);
704
      d = real_value_truncate (mode, d);
705
      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
706
    }
707
  else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
708
           && (GET_CODE (op) == CONST_DOUBLE
709
               || GET_CODE (op) == CONST_INT))
710
    {
711
      HOST_WIDE_INT hv, lv;
712
      REAL_VALUE_TYPE d;
713
 
714
      if (GET_CODE (op) == CONST_INT)
715
        lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
716
      else
717
        lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
718
 
719
      if (op_mode == VOIDmode)
720
        {
721
          /* We don't know how to interpret negative-looking numbers in
722
             this case, so don't try to fold those.  */
723
          if (hv < 0)
724
            return 0;
725
        }
726
      else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
727
        ;
728
      else
729
        hv = 0, lv &= GET_MODE_MASK (op_mode);
730
 
731
      REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
732
      d = real_value_truncate (mode, d);
733
      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
734
    }
735
 
736
  if (GET_CODE (op) == CONST_INT
737
      && width <= HOST_BITS_PER_WIDE_INT && width > 0)
738
    {
739
      HOST_WIDE_INT arg0 = INTVAL (op);
740
      HOST_WIDE_INT val;
741
 
742
      switch (code)
743
        {
744
        case NOT:
745
          val = ~ arg0;
746
          break;
747
 
748
        case NEG:
749
          val = - arg0;
750
          break;
751
 
752
        case ABS:
753
          val = (arg0 >= 0 ? arg0 : - arg0);
754
          break;
755
 
756
        case FFS:
757
          /* Don't use ffs here.  Instead, get low order bit and then its
758
             number.  If arg0 is zero, this will return 0, as desired.  */
759
          arg0 &= GET_MODE_MASK (mode);
760
          val = exact_log2 (arg0 & (- arg0)) + 1;
761
          break;
762
 
763
        case CLZ:
764
          arg0 &= GET_MODE_MASK (mode);
765
          if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
766
            ;
767
          else
768
            val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
769
          break;
770
 
771
        case CTZ:
772
          arg0 &= GET_MODE_MASK (mode);
773
          if (arg0 == 0)
774
            {
775
              /* Even if the value at zero is undefined, we have to come
776
                 up with some replacement.  Seems good enough.  */
777
              if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
778
                val = GET_MODE_BITSIZE (mode);
779
            }
780
          else
781
            val = exact_log2 (arg0 & -arg0);
782
          break;
783
 
784
        case POPCOUNT:
785
          arg0 &= GET_MODE_MASK (mode);
786
          val = 0;
787
          while (arg0)
788
            val++, arg0 &= arg0 - 1;
789
          break;
790
 
791
        case PARITY:
792
          arg0 &= GET_MODE_MASK (mode);
793
          val = 0;
794
          while (arg0)
795
            val++, arg0 &= arg0 - 1;
796
          val &= 1;
797
          break;
798
 
799
        case TRUNCATE:
800
          val = arg0;
801
          break;
802
 
803
        case ZERO_EXTEND:
804
          /* When zero-extending a CONST_INT, we need to know its
805
             original mode.  */
806
          gcc_assert (op_mode != VOIDmode);
807
          if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
808
            {
809
              /* If we were really extending the mode,
810
                 we would have to distinguish between zero-extension
811
                 and sign-extension.  */
812
              gcc_assert (width == GET_MODE_BITSIZE (op_mode));
813
              val = arg0;
814
            }
815
          else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
816
            val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
817
          else
818
            return 0;
819
          break;
820
 
821
        case SIGN_EXTEND:
822
          if (op_mode == VOIDmode)
823
            op_mode = mode;
824
          if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
825
            {
826
              /* If we were really extending the mode,
827
                 we would have to distinguish between zero-extension
828
                 and sign-extension.  */
829
              gcc_assert (width == GET_MODE_BITSIZE (op_mode));
830
              val = arg0;
831
            }
832
          else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
833
            {
834
              val
835
                = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
836
              if (val
837
                  & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
838
                val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
839
            }
840
          else
841
            return 0;
842
          break;
843
 
844
        case SQRT:
845
        case FLOAT_EXTEND:
846
        case FLOAT_TRUNCATE:
847
        case SS_TRUNCATE:
848
        case US_TRUNCATE:
849
          return 0;
850
 
851
        default:
852
          gcc_unreachable ();
853
        }
854
 
855
      return gen_int_mode (val, mode);
856
    }
857
 
858
  /* We can do some operations on integer CONST_DOUBLEs.  Also allow
859
     for a DImode operation on a CONST_INT.  */
860
  else if (GET_MODE (op) == VOIDmode
861
           && width <= HOST_BITS_PER_WIDE_INT * 2
862
           && (GET_CODE (op) == CONST_DOUBLE
863
               || GET_CODE (op) == CONST_INT))
864
    {
865
      unsigned HOST_WIDE_INT l1, lv;
866
      HOST_WIDE_INT h1, hv;
867
 
868
      if (GET_CODE (op) == CONST_DOUBLE)
869
        l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
870
      else
871
        l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
872
 
873
      switch (code)
874
        {
875
        case NOT:
876
          lv = ~ l1;
877
          hv = ~ h1;
878
          break;
879
 
880
        case NEG:
881
          neg_double (l1, h1, &lv, &hv);
882
          break;
883
 
884
        case ABS:
885
          if (h1 < 0)
886
            neg_double (l1, h1, &lv, &hv);
887
          else
888
            lv = l1, hv = h1;
889
          break;
890
 
891
        case FFS:
892
          hv = 0;
893
          if (l1 == 0)
894
            {
895
              if (h1 == 0)
896
                lv = 0;
897
              else
898
                lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
899
            }
900
          else
901
            lv = exact_log2 (l1 & -l1) + 1;
902
          break;
903
 
904
        case CLZ:
905
          hv = 0;
906
          if (h1 != 0)
907
            lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
908
              - HOST_BITS_PER_WIDE_INT;
909
          else if (l1 != 0)
910
            lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
911
          else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
912
            lv = GET_MODE_BITSIZE (mode);
913
          break;
914
 
915
        case CTZ:
916
          hv = 0;
917
          if (l1 != 0)
918
            lv = exact_log2 (l1 & -l1);
919
          else if (h1 != 0)
920
            lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
921
          else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
922
            lv = GET_MODE_BITSIZE (mode);
923
          break;
924
 
925
        case POPCOUNT:
926
          hv = 0;
927
          lv = 0;
928
          while (l1)
929
            lv++, l1 &= l1 - 1;
930
          while (h1)
931
            lv++, h1 &= h1 - 1;
932
          break;
933
 
934
        case PARITY:
935
          hv = 0;
936
          lv = 0;
937
          while (l1)
938
            lv++, l1 &= l1 - 1;
939
          while (h1)
940
            lv++, h1 &= h1 - 1;
941
          lv &= 1;
942
          break;
943
 
944
        case TRUNCATE:
945
          /* This is just a change-of-mode, so do nothing.  */
946
          lv = l1, hv = h1;
947
          break;
948
 
949
        case ZERO_EXTEND:
950
          gcc_assert (op_mode != VOIDmode);
951
 
952
          if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
953
            return 0;
954
 
955
          hv = 0;
956
          lv = l1 & GET_MODE_MASK (op_mode);
957
          break;
958
 
959
        case SIGN_EXTEND:
960
          if (op_mode == VOIDmode
961
              || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
962
            return 0;
963
          else
964
            {
965
              lv = l1 & GET_MODE_MASK (op_mode);
966
              if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
967
                  && (lv & ((HOST_WIDE_INT) 1
968
                            << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
969
                lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
970
 
971
              hv = HWI_SIGN_EXTEND (lv);
972
            }
973
          break;
974
 
975
        case SQRT:
976
          return 0;
977
 
978
        default:
979
          return 0;
980
        }
981
 
982
      return immed_double_const (lv, hv, mode);
983
    }
984
 
985
  else if (GET_CODE (op) == CONST_DOUBLE
986
           && GET_MODE_CLASS (mode) == MODE_FLOAT)
987
    {
988
      REAL_VALUE_TYPE d, t;
989
      REAL_VALUE_FROM_CONST_DOUBLE (d, op);
990
 
991
      switch (code)
992
        {
993
        case SQRT:
994
          if (HONOR_SNANS (mode) && real_isnan (&d))
995
            return 0;
996
          real_sqrt (&t, mode, &d);
997
          d = t;
998
          break;
999
        case ABS:
1000
          d = REAL_VALUE_ABS (d);
1001
          break;
1002
        case NEG:
1003
          d = REAL_VALUE_NEGATE (d);
1004
          break;
1005
        case FLOAT_TRUNCATE:
1006
          d = real_value_truncate (mode, d);
1007
          break;
1008
        case FLOAT_EXTEND:
1009
          /* All this does is change the mode.  */
1010
          break;
1011
        case FIX:
1012
          real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1013
          break;
1014
        case NOT:
1015
          {
1016
            long tmp[4];
1017
            int i;
1018
 
1019
            real_to_target (tmp, &d, GET_MODE (op));
1020
            for (i = 0; i < 4; i++)
1021
              tmp[i] = ~tmp[i];
1022
            real_from_target (&d, tmp, mode);
1023
            break;
1024
          }
1025
        default:
1026
          gcc_unreachable ();
1027
        }
1028
      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1029
    }
1030
 
1031
  else if (GET_CODE (op) == CONST_DOUBLE
1032
           && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
1033
           && GET_MODE_CLASS (mode) == MODE_INT
1034
           && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1035
    {
1036
      /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1037
         operators are intentionally left unspecified (to ease implementation
1038
         by target backends), for consistency, this routine implements the
1039
         same semantics for constant folding as used by the middle-end.  */
1040
 
1041
      /* This was formerly used only for non-IEEE float.
1042
         eggert@twinsun.com says it is safe for IEEE also.  */
1043
      HOST_WIDE_INT xh, xl, th, tl;
1044
      REAL_VALUE_TYPE x, t;
1045
      REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1046
      switch (code)
1047
        {
1048
        case FIX:
1049
          if (REAL_VALUE_ISNAN (x))
1050
            return const0_rtx;
1051
 
1052
          /* Test against the signed upper bound.  */
1053
          if (width > HOST_BITS_PER_WIDE_INT)
1054
            {
1055
              th = ((unsigned HOST_WIDE_INT) 1
1056
                    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1057
              tl = -1;
1058
            }
1059
          else
1060
            {
1061
              th = 0;
1062
              tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1063
            }
1064
          real_from_integer (&t, VOIDmode, tl, th, 0);
1065
          if (REAL_VALUES_LESS (t, x))
1066
            {
1067
              xh = th;
1068
              xl = tl;
1069
              break;
1070
            }
1071
 
1072
          /* Test against the signed lower bound.  */
1073
          if (width > HOST_BITS_PER_WIDE_INT)
1074
            {
1075
              th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1076
              tl = 0;
1077
            }
1078
          else
1079
            {
1080
              th = -1;
1081
              tl = (HOST_WIDE_INT) -1 << (width - 1);
1082
            }
1083
          real_from_integer (&t, VOIDmode, tl, th, 0);
1084
          if (REAL_VALUES_LESS (x, t))
1085
            {
1086
              xh = th;
1087
              xl = tl;
1088
              break;
1089
            }
1090
          REAL_VALUE_TO_INT (&xl, &xh, x);
1091
          break;
1092
 
1093
        case UNSIGNED_FIX:
1094
          if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1095
            return const0_rtx;
1096
 
1097
          /* Test against the unsigned upper bound.  */
1098
          if (width == 2*HOST_BITS_PER_WIDE_INT)
1099
            {
1100
              th = -1;
1101
              tl = -1;
1102
            }
1103
          else if (width >= HOST_BITS_PER_WIDE_INT)
1104
            {
1105
              th = ((unsigned HOST_WIDE_INT) 1
1106
                    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1107
              tl = -1;
1108
            }
1109
          else
1110
            {
1111
              th = 0;
1112
              tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1113
            }
1114
          real_from_integer (&t, VOIDmode, tl, th, 1);
1115
          if (REAL_VALUES_LESS (t, x))
1116
            {
1117
              xh = th;
1118
              xl = tl;
1119
              break;
1120
            }
1121
 
1122
          REAL_VALUE_TO_INT (&xl, &xh, x);
1123
          break;
1124
 
1125
        default:
1126
          gcc_unreachable ();
1127
        }
1128
      return immed_double_const (xl, xh, mode);
1129
    }
1130
 
1131
  return NULL_RTX;
1132
}
1133
 
1134
/* Subroutine of simplify_binary_operation to simplify a commutative,
1135
   associative binary operation CODE with result mode MODE, operating
1136
   on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1137
   SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
1138
   canonicalization is possible.  */
1139
 
1140
static rtx
1141
simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1142
                                rtx op0, rtx op1)
1143
{
1144
  rtx tem;
1145
 
1146
  /* Linearize the operator to the left.  */
1147
  if (GET_CODE (op1) == code)
1148
    {
1149
      /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
1150
      if (GET_CODE (op0) == code)
1151
        {
1152
          tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1153
          return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1154
        }
1155
 
1156
      /* "a op (b op c)" becomes "(b op c) op a".  */
1157
      if (! swap_commutative_operands_p (op1, op0))
1158
        return simplify_gen_binary (code, mode, op1, op0);
1159
 
1160
      tem = op0;
1161
      op0 = op1;
1162
      op1 = tem;
1163
    }
1164
 
1165
  if (GET_CODE (op0) == code)
1166
    {
1167
      /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
1168
      if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1169
        {
1170
          tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1171
          return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1172
        }
1173
 
1174
      /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
1175
      tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1176
            ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1177
            : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1178
      if (tem != 0)
1179
        return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1180
 
1181
      /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
1182
      tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1183
            ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1184
            : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1185
      if (tem != 0)
1186
        return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1187
    }
1188
 
1189
  return 0;
1190
}
1191
 
1192
 
1193
/* Simplify a binary operation CODE with result mode MODE, operating on OP0
1194
   and OP1.  Return 0 if no simplification is possible.
1195
 
1196
   Don't use this for relational operations such as EQ or LT.
1197
   Use simplify_relational_operation instead.  */
1198
rtx
1199
simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1200
                           rtx op0, rtx op1)
1201
{
1202
  rtx trueop0, trueop1;
1203
  rtx tem;
1204
 
1205
  /* Relational operations don't work here.  We must know the mode
1206
     of the operands in order to do the comparison correctly.
1207
     Assuming a full word can give incorrect results.
1208
     Consider comparing 128 with -128 in QImode.  */
1209
  gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1210
  gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1211
 
1212
  /* Make sure the constant is second.  */
1213
  if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1214
      && swap_commutative_operands_p (op0, op1))
1215
    {
1216
      tem = op0, op0 = op1, op1 = tem;
1217
    }
1218
 
1219
  trueop0 = avoid_constant_pool_reference (op0);
1220
  trueop1 = avoid_constant_pool_reference (op1);
1221
 
1222
  tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1223
  if (tem)
1224
    return tem;
1225
  return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1226
}
1227
 
1228
static rtx
1229
simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1230
                             rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1231
{
1232
  rtx tem;
1233
  HOST_WIDE_INT val;
1234
  unsigned int width = GET_MODE_BITSIZE (mode);
1235
 
1236
  /* Even if we can't compute a constant result,
1237
     there are some cases worth simplifying.  */
1238
 
1239
  switch (code)
1240
    {
1241
    case PLUS:
1242
      /* Maybe simplify x + 0 to x.  The two expressions are equivalent
1243
         when x is NaN, infinite, or finite and nonzero.  They aren't
1244
         when x is -0 and the rounding mode is not towards -infinity,
1245
         since (-0) + 0 is then 0.  */
1246
      if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1247
        return op0;
1248
 
1249
      /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
1250
         transformations are safe even for IEEE.  */
1251
      if (GET_CODE (op0) == NEG)
1252
        return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1253
      else if (GET_CODE (op1) == NEG)
1254
        return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1255
 
1256
      /* (~a) + 1 -> -a */
1257
      if (INTEGRAL_MODE_P (mode)
1258
          && GET_CODE (op0) == NOT
1259
          && trueop1 == const1_rtx)
1260
        return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1261
 
1262
      /* Handle both-operands-constant cases.  We can only add
1263
         CONST_INTs to constants since the sum of relocatable symbols
1264
         can't be handled by most assemblers.  Don't add CONST_INT
1265
         to CONST_INT since overflow won't be computed properly if wider
1266
         than HOST_BITS_PER_WIDE_INT.  */
1267
 
1268
      if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1269
          && GET_CODE (op1) == CONST_INT)
1270
        return plus_constant (op0, INTVAL (op1));
1271
      else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1272
               && GET_CODE (op0) == CONST_INT)
1273
        return plus_constant (op1, INTVAL (op0));
1274
 
1275
      /* See if this is something like X * C - X or vice versa or
1276
         if the multiplication is written as a shift.  If so, we can
1277
         distribute and make a new multiply, shift, or maybe just
1278
         have X (if C is 2 in the example above).  But don't make
1279
         something more expensive than we had before.  */
1280
 
1281
      if (SCALAR_INT_MODE_P (mode))
1282
        {
1283
          HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1284
          unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1285
          rtx lhs = op0, rhs = op1;
1286
 
1287
          if (GET_CODE (lhs) == NEG)
1288
            {
1289
              coeff0l = -1;
1290
              coeff0h = -1;
1291
              lhs = XEXP (lhs, 0);
1292
            }
1293
          else if (GET_CODE (lhs) == MULT
1294
                   && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1295
            {
1296
              coeff0l = INTVAL (XEXP (lhs, 1));
1297
              coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1298
              lhs = XEXP (lhs, 0);
1299
            }
1300
          else if (GET_CODE (lhs) == ASHIFT
1301
                   && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1302
                   && INTVAL (XEXP (lhs, 1)) >= 0
1303
                   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1304
            {
1305
              coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1306
              coeff0h = 0;
1307
              lhs = XEXP (lhs, 0);
1308
            }
1309
 
1310
          if (GET_CODE (rhs) == NEG)
1311
            {
1312
              coeff1l = -1;
1313
              coeff1h = -1;
1314
              rhs = XEXP (rhs, 0);
1315
            }
1316
          else if (GET_CODE (rhs) == MULT
1317
                   && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1318
            {
1319
              coeff1l = INTVAL (XEXP (rhs, 1));
1320
              coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1321
              rhs = XEXP (rhs, 0);
1322
            }
1323
          else if (GET_CODE (rhs) == ASHIFT
1324
                   && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1325
                   && INTVAL (XEXP (rhs, 1)) >= 0
1326
                   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1327
            {
1328
              coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1329
              coeff1h = 0;
1330
              rhs = XEXP (rhs, 0);
1331
            }
1332
 
1333
          if (rtx_equal_p (lhs, rhs))
1334
            {
1335
              rtx orig = gen_rtx_PLUS (mode, op0, op1);
1336
              rtx coeff;
1337
              unsigned HOST_WIDE_INT l;
1338
              HOST_WIDE_INT h;
1339
 
1340
              add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1341
              coeff = immed_double_const (l, h, mode);
1342
 
1343
              tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1344
              return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1345
                ? tem : 0;
1346
            }
1347
        }
1348
 
1349
      /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
1350
      if ((GET_CODE (op1) == CONST_INT
1351
           || GET_CODE (op1) == CONST_DOUBLE)
1352
          && GET_CODE (op0) == XOR
1353
          && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1354
              || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1355
          && mode_signbit_p (mode, op1))
1356
        return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1357
                                    simplify_gen_binary (XOR, mode, op1,
1358
                                                         XEXP (op0, 1)));
1359
 
1360
      /* If one of the operands is a PLUS or a MINUS, see if we can
1361
         simplify this by the associative law.
1362
         Don't use the associative law for floating point.
1363
         The inaccuracy makes it nonassociative,
1364
         and subtle programs can break if operations are associated.  */
1365
 
1366
      if (INTEGRAL_MODE_P (mode)
1367
          && (plus_minus_operand_p (op0)
1368
              || plus_minus_operand_p (op1))
1369
          && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1370
        return tem;
1371
 
1372
      /* Reassociate floating point addition only when the user
1373
         specifies unsafe math optimizations.  */
1374
      if (FLOAT_MODE_P (mode)
1375
          && flag_unsafe_math_optimizations)
1376
        {
1377
          tem = simplify_associative_operation (code, mode, op0, op1);
1378
          if (tem)
1379
            return tem;
1380
        }
1381
      break;
1382
 
1383
    case COMPARE:
1384
#ifdef HAVE_cc0
1385
      /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1386
         using cc0, in which case we want to leave it as a COMPARE
1387
         so we can distinguish it from a register-register-copy.
1388
 
1389
         In IEEE floating point, x-0 is not the same as x.  */
1390
 
1391
      if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1392
           || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1393
          && trueop1 == CONST0_RTX (mode))
1394
        return op0;
1395
#endif
1396
 
1397
      /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
1398
      if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1399
           || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1400
          && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1401
        {
1402
          rtx xop00 = XEXP (op0, 0);
1403
          rtx xop10 = XEXP (op1, 0);
1404
 
1405
#ifdef HAVE_cc0
1406
          if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1407
#else
1408
            if (REG_P (xop00) && REG_P (xop10)
1409
                && GET_MODE (xop00) == GET_MODE (xop10)
1410
                && REGNO (xop00) == REGNO (xop10)
1411
                && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1412
                && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1413
#endif
1414
              return xop00;
1415
        }
1416
      break;
1417
 
1418
    case MINUS:
1419
      /* We can't assume x-x is 0 even with non-IEEE floating point,
1420
         but since it is zero except in very strange circumstances, we
1421
         will treat it as zero with -funsafe-math-optimizations.  */
1422
      if (rtx_equal_p (trueop0, trueop1)
1423
          && ! side_effects_p (op0)
1424
          && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1425
        return CONST0_RTX (mode);
1426
 
1427
      /* Change subtraction from zero into negation.  (0 - x) is the
1428
         same as -x when x is NaN, infinite, or finite and nonzero.
1429
         But if the mode has signed zeros, and does not round towards
1430
         -infinity, then 0 - 0 is 0, not -0.  */
1431
      if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1432
        return simplify_gen_unary (NEG, mode, op1, mode);
1433
 
1434
      /* (-1 - a) is ~a.  */
1435
      if (trueop0 == constm1_rtx)
1436
        return simplify_gen_unary (NOT, mode, op1, mode);
1437
 
1438
      /* Subtracting 0 has no effect unless the mode has signed zeros
1439
         and supports rounding towards -infinity.  In such a case,
1440
 
1441
      if (!(HONOR_SIGNED_ZEROS (mode)
1442
            && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1443
          && trueop1 == CONST0_RTX (mode))
1444
        return op0;
1445
 
1446
      /* See if this is something like X * C - X or vice versa or
1447
         if the multiplication is written as a shift.  If so, we can
1448
         distribute and make a new multiply, shift, or maybe just
1449
         have X (if C is 2 in the example above).  But don't make
1450
         something more expensive than we had before.  */
1451
 
1452
      if (SCALAR_INT_MODE_P (mode))
1453
        {
1454
          HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1455
          unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1456
          rtx lhs = op0, rhs = op1;
1457
 
1458
          if (GET_CODE (lhs) == NEG)
1459
            {
1460
              coeff0l = -1;
1461
              coeff0h = -1;
1462
              lhs = XEXP (lhs, 0);
1463
            }
1464
          else if (GET_CODE (lhs) == MULT
1465
                   && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1466
            {
1467
              coeff0l = INTVAL (XEXP (lhs, 1));
1468
              coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1469
              lhs = XEXP (lhs, 0);
1470
            }
1471
          else if (GET_CODE (lhs) == ASHIFT
1472
                   && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1473
                   && INTVAL (XEXP (lhs, 1)) >= 0
1474
                   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1475
            {
1476
              coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1477
              coeff0h = 0;
1478
              lhs = XEXP (lhs, 0);
1479
            }
1480
 
1481
          if (GET_CODE (rhs) == NEG)
1482
            {
1483
              negcoeff1l = 1;
1484
              negcoeff1h = 0;
1485
              rhs = XEXP (rhs, 0);
1486
            }
1487
          else if (GET_CODE (rhs) == MULT
1488
                   && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1489
            {
1490
              negcoeff1l = -INTVAL (XEXP (rhs, 1));
1491
              negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1492
              rhs = XEXP (rhs, 0);
1493
            }
1494
          else if (GET_CODE (rhs) == ASHIFT
1495
                   && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1496
                   && INTVAL (XEXP (rhs, 1)) >= 0
1497
                   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1498
            {
1499
              negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1500
              negcoeff1h = -1;
1501
              rhs = XEXP (rhs, 0);
1502
            }
1503
 
1504
          if (rtx_equal_p (lhs, rhs))
1505
            {
1506
              rtx orig = gen_rtx_MINUS (mode, op0, op1);
1507
              rtx coeff;
1508
              unsigned HOST_WIDE_INT l;
1509
              HOST_WIDE_INT h;
1510
 
1511
              add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1512
              coeff = immed_double_const (l, h, mode);
1513
 
1514
              tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1515
              return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1516
                ? tem : 0;
1517
            }
1518
        }
1519
 
1520
      /* (a - (-b)) -> (a + b).  True even for IEEE.  */
1521
      if (GET_CODE (op1) == NEG)
1522
        return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1523
 
1524
      /* (-x - c) may be simplified as (-c - x).  */
1525
      if (GET_CODE (op0) == NEG
1526
          && (GET_CODE (op1) == CONST_INT
1527
              || GET_CODE (op1) == CONST_DOUBLE))
1528
        {
1529
          tem = simplify_unary_operation (NEG, mode, op1, mode);
1530
          if (tem)
1531
            return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1532
        }
1533
 
1534
      /* If one of the operands is a PLUS or a MINUS, see if we can
1535
         simplify this by the associative law.
1536
         Don't use the associative law for floating point.
1537
         The inaccuracy makes it nonassociative,
1538
         and subtle programs can break if operations are associated.  */
1539
 
1540
      if (INTEGRAL_MODE_P (mode)
1541
          && (plus_minus_operand_p (op0)
1542
              || plus_minus_operand_p (op1))
1543
          && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1544
        return tem;
1545
 
1546
      /* Don't let a relocatable value get a negative coeff.  */
1547
      if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1548
        return simplify_gen_binary (PLUS, mode,
1549
                                    op0,
1550
                                    neg_const_int (mode, op1));
1551
 
1552
      /* (x - (x & y)) -> (x & ~y) */
1553
      if (GET_CODE (op1) == AND)
1554
        {
1555
          if (rtx_equal_p (op0, XEXP (op1, 0)))
1556
            {
1557
              tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1558
                                        GET_MODE (XEXP (op1, 1)));
1559
              return simplify_gen_binary (AND, mode, op0, tem);
1560
            }
1561
          if (rtx_equal_p (op0, XEXP (op1, 1)))
1562
            {
1563
              tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1564
                                        GET_MODE (XEXP (op1, 0)));
1565
              return simplify_gen_binary (AND, mode, op0, tem);
1566
            }
1567
        }
1568
      break;
1569
 
1570
    case MULT:
1571
      if (trueop1 == constm1_rtx)
1572
        return simplify_gen_unary (NEG, mode, op0, mode);
1573
 
1574
      /* Maybe simplify x * 0 to 0.  The reduction is not valid if
1575
         x is NaN, since x * 0 is then also NaN.  Nor is it valid
1576
         when the mode has signed zeros, since multiplying a negative
1577
         number by 0 will give -0, not 0.  */
1578
      if (!HONOR_NANS (mode)
1579
          && !HONOR_SIGNED_ZEROS (mode)
1580
          && trueop1 == CONST0_RTX (mode)
1581
          && ! side_effects_p (op0))
1582
        return op1;
1583
 
1584
      /* In IEEE floating point, x*1 is not equivalent to x for
1585
         signalling NaNs.  */
1586
      if (!HONOR_SNANS (mode)
1587
          && trueop1 == CONST1_RTX (mode))
1588
        return op0;
1589
 
1590
      /* Convert multiply by constant power of two into shift unless
1591
         we are still generating RTL.  This test is a kludge.  */
1592
      if (GET_CODE (trueop1) == CONST_INT
1593
          && (val = exact_log2 (INTVAL (trueop1))) >= 0
1594
          /* If the mode is larger than the host word size, and the
1595
             uppermost bit is set, then this isn't a power of two due
1596
             to implicit sign extension.  */
1597
          && (width <= HOST_BITS_PER_WIDE_INT
1598
              || val != HOST_BITS_PER_WIDE_INT - 1))
1599
        return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1600
 
1601
      /* Likewise for multipliers wider than a word.  */
1602
      else if (GET_CODE (trueop1) == CONST_DOUBLE
1603
               && (GET_MODE (trueop1) == VOIDmode
1604
                   || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1605
               && GET_MODE (op0) == mode
1606
               && CONST_DOUBLE_LOW (trueop1) == 0
1607
               && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1608
        return simplify_gen_binary (ASHIFT, mode, op0,
1609
                                    GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1610
 
1611
      /* x*2 is x+x and x*(-1) is -x */
1612
      if (GET_CODE (trueop1) == CONST_DOUBLE
1613
          && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1614
          && GET_MODE (op0) == mode)
1615
        {
1616
          REAL_VALUE_TYPE d;
1617
          REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1618
 
1619
          if (REAL_VALUES_EQUAL (d, dconst2))
1620
            return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1621
 
1622
          if (REAL_VALUES_EQUAL (d, dconstm1))
1623
            return simplify_gen_unary (NEG, mode, op0, mode);
1624
        }
1625
 
1626
      /* Reassociate multiplication, but for floating point MULTs
1627
         only when the user specifies unsafe math optimizations.  */
1628
      if (! FLOAT_MODE_P (mode)
1629
          || flag_unsafe_math_optimizations)
1630
        {
1631
          tem = simplify_associative_operation (code, mode, op0, op1);
1632
          if (tem)
1633
            return tem;
1634
        }
1635
      break;
1636
 
1637
    case IOR:
1638
      if (trueop1 == const0_rtx)
1639
        return op0;
1640
      if (GET_CODE (trueop1) == CONST_INT
1641
          && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1642
              == GET_MODE_MASK (mode)))
1643
        return op1;
1644
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1645
        return op0;
1646
      /* A | (~A) -> -1 */
1647
      if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1648
           || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1649
          && ! side_effects_p (op0)
1650
          && SCALAR_INT_MODE_P (mode))
1651
        return constm1_rtx;
1652
      tem = simplify_associative_operation (code, mode, op0, op1);
1653
      if (tem)
1654
        return tem;
1655
      break;
1656
 
1657
    case XOR:
1658
      if (trueop1 == const0_rtx)
1659
        return op0;
1660
      if (GET_CODE (trueop1) == CONST_INT
1661
          && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1662
              == GET_MODE_MASK (mode)))
1663
        return simplify_gen_unary (NOT, mode, op0, mode);
1664
      if (rtx_equal_p (trueop0, trueop1)
1665
          && ! side_effects_p (op0)
1666
          && GET_MODE_CLASS (mode) != MODE_CC)
1667
         return CONST0_RTX (mode);
1668
 
1669
      /* Canonicalize XOR of the most significant bit to PLUS.  */
1670
      if ((GET_CODE (op1) == CONST_INT
1671
           || GET_CODE (op1) == CONST_DOUBLE)
1672
          && mode_signbit_p (mode, op1))
1673
        return simplify_gen_binary (PLUS, mode, op0, op1);
1674
      /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
1675
      if ((GET_CODE (op1) == CONST_INT
1676
           || GET_CODE (op1) == CONST_DOUBLE)
1677
          && GET_CODE (op0) == PLUS
1678
          && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1679
              || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1680
          && mode_signbit_p (mode, XEXP (op0, 1)))
1681
        return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1682
                                    simplify_gen_binary (XOR, mode, op1,
1683
                                                         XEXP (op0, 1)));
1684
 
1685
      tem = simplify_associative_operation (code, mode, op0, op1);
1686
      if (tem)
1687
        return tem;
1688
      break;
1689
 
1690
    case AND:
1691
      if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1692
        return trueop1;
1693
      /* If we are turning off bits already known off in OP0, we need
1694
         not do an AND.  */
1695
      if (GET_CODE (trueop1) == CONST_INT
1696
          && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1697
          && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1698
        return op0;
1699
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
1700
          && GET_MODE_CLASS (mode) != MODE_CC)
1701
        return op0;
1702
      /* A & (~A) -> 0 */
1703
      if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1704
           || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1705
          && ! side_effects_p (op0)
1706
          && GET_MODE_CLASS (mode) != MODE_CC)
1707
        return CONST0_RTX (mode);
1708
 
1709
      /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1710
         there are no nonzero bits of C outside of X's mode.  */
1711
      if ((GET_CODE (op0) == SIGN_EXTEND
1712
           || GET_CODE (op0) == ZERO_EXTEND)
1713
          && GET_CODE (trueop1) == CONST_INT
1714
          && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1715
          && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1716
              & INTVAL (trueop1)) == 0)
1717
        {
1718
          enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1719
          tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1720
                                     gen_int_mode (INTVAL (trueop1),
1721
                                                   imode));
1722
          return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1723
        }
1724
 
1725
      /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1726
         ((A & N) + B) & M -> (A + B) & M
1727
         Similarly if (N & M) == 0,
1728
         ((A | N) + B) & M -> (A + B) & M
1729
         and for - instead of + and/or ^ instead of |.  */
1730
      if (GET_CODE (trueop1) == CONST_INT
1731
          && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1732
          && ~INTVAL (trueop1)
1733
          && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1734
          && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1735
        {
1736
          rtx pmop[2];
1737
          int which;
1738
 
1739
          pmop[0] = XEXP (op0, 0);
1740
          pmop[1] = XEXP (op0, 1);
1741
 
1742
          for (which = 0; which < 2; which++)
1743
            {
1744
              tem = pmop[which];
1745
              switch (GET_CODE (tem))
1746
                {
1747
                case AND:
1748
                  if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1749
                      && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1750
                      == INTVAL (trueop1))
1751
                    pmop[which] = XEXP (tem, 0);
1752
                  break;
1753
                case IOR:
1754
                case XOR:
1755
                  if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1756
                      && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1757
                    pmop[which] = XEXP (tem, 0);
1758
                  break;
1759
                default:
1760
                  break;
1761
                }
1762
            }
1763
 
1764
          if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1765
            {
1766
              tem = simplify_gen_binary (GET_CODE (op0), mode,
1767
                                         pmop[0], pmop[1]);
1768
              return simplify_gen_binary (code, mode, tem, op1);
1769
            }
1770
        }
1771
      tem = simplify_associative_operation (code, mode, op0, op1);
1772
      if (tem)
1773
        return tem;
1774
      break;
1775
 
1776
    case UDIV:
1777
      /* 0/x is 0 (or x&0 if x has side-effects).  */
1778
      if (trueop0 == CONST0_RTX (mode))
1779
        {
1780
          if (side_effects_p (op1))
1781
            return simplify_gen_binary (AND, mode, op1, trueop0);
1782
          return trueop0;
1783
        }
1784
      /* x/1 is x.  */
1785
      if (trueop1 == CONST1_RTX (mode))
1786
        return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1787
      /* Convert divide by power of two into shift.  */
1788
      if (GET_CODE (trueop1) == CONST_INT
1789
          && (val = exact_log2 (INTVAL (trueop1))) > 0)
1790
        return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
1791
      break;
1792
 
1793
    case DIV:
1794
      /* Handle floating point and integers separately.  */
1795
      if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1796
        {
1797
          /* Maybe change 0.0 / x to 0.0.  This transformation isn't
1798
             safe for modes with NaNs, since 0.0 / 0.0 will then be
1799
             NaN rather than 0.0.  Nor is it safe for modes with signed
1800
             zeros, since dividing 0 by a negative number gives -0.0  */
1801
          if (trueop0 == CONST0_RTX (mode)
1802
              && !HONOR_NANS (mode)
1803
              && !HONOR_SIGNED_ZEROS (mode)
1804
              && ! side_effects_p (op1))
1805
            return op0;
1806
          /* x/1.0 is x.  */
1807
          if (trueop1 == CONST1_RTX (mode)
1808
              && !HONOR_SNANS (mode))
1809
            return op0;
1810
 
1811
          if (GET_CODE (trueop1) == CONST_DOUBLE
1812
              && trueop1 != CONST0_RTX (mode))
1813
            {
1814
              REAL_VALUE_TYPE d;
1815
              REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1816
 
1817
              /* x/-1.0 is -x.  */
1818
              if (REAL_VALUES_EQUAL (d, dconstm1)
1819
                  && !HONOR_SNANS (mode))
1820
                return simplify_gen_unary (NEG, mode, op0, mode);
1821
 
1822
              /* Change FP division by a constant into multiplication.
1823
                 Only do this with -funsafe-math-optimizations.  */
1824
              if (flag_unsafe_math_optimizations
1825
                  && !REAL_VALUES_EQUAL (d, dconst0))
1826
                {
1827
                  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1828
                  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1829
                  return simplify_gen_binary (MULT, mode, op0, tem);
1830
                }
1831
            }
1832
        }
1833
      else
1834
        {
1835
          /* 0/x is 0 (or x&0 if x has side-effects).  */
1836
          if (trueop0 == CONST0_RTX (mode))
1837
            {
1838
              if (side_effects_p (op1))
1839
                return simplify_gen_binary (AND, mode, op1, trueop0);
1840
              return trueop0;
1841
            }
1842
          /* x/1 is x.  */
1843
          if (trueop1 == CONST1_RTX (mode))
1844
            return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1845
          /* x/-1 is -x.  */
1846
          if (trueop1 == constm1_rtx)
1847
            {
1848
              rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
1849
              return simplify_gen_unary (NEG, mode, x, mode);
1850
            }
1851
        }
1852
      break;
1853
 
1854
    case UMOD:
1855
      /* 0%x is 0 (or x&0 if x has side-effects).  */
1856
      if (trueop0 == CONST0_RTX (mode))
1857
        {
1858
          if (side_effects_p (op1))
1859
            return simplify_gen_binary (AND, mode, op1, trueop0);
1860
          return trueop0;
1861
        }
1862
      /* x%1 is 0 (of x&0 if x has side-effects).  */
1863
      if (trueop1 == CONST1_RTX (mode))
1864
        {
1865
          if (side_effects_p (op0))
1866
            return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
1867
          return CONST0_RTX (mode);
1868
        }
1869
      /* Implement modulus by power of two as AND.  */
1870
      if (GET_CODE (trueop1) == CONST_INT
1871
          && exact_log2 (INTVAL (trueop1)) > 0)
1872
        return simplify_gen_binary (AND, mode, op0,
1873
                                    GEN_INT (INTVAL (op1) - 1));
1874
      break;
1875
 
1876
    case MOD:
1877
      /* 0%x is 0 (or x&0 if x has side-effects).  */
1878
      if (trueop0 == CONST0_RTX (mode))
1879
        {
1880
          if (side_effects_p (op1))
1881
            return simplify_gen_binary (AND, mode, op1, trueop0);
1882
          return trueop0;
1883
        }
1884
      /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
1885
      if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
1886
        {
1887
          if (side_effects_p (op0))
1888
            return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
1889
          return CONST0_RTX (mode);
1890
        }
1891
      break;
1892
 
1893
    case ROTATERT:
1894
    case ROTATE:
1895
    case ASHIFTRT:
1896
      /* Rotating ~0 always results in ~0.  */
1897
      if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1898
          && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1899
          && ! side_effects_p (op1))
1900
        return op0;
1901
 
1902
      /* Fall through....  */
1903
 
1904
    case ASHIFT:
1905
    case LSHIFTRT:
1906
      if (trueop1 == CONST0_RTX (mode))
1907
        return op0;
1908
      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
1909
        return op0;
1910
      break;
1911
 
1912
    case SMIN:
1913
      if (width <= HOST_BITS_PER_WIDE_INT
1914
          && GET_CODE (trueop1) == CONST_INT
1915
          && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1916
          && ! side_effects_p (op0))
1917
        return op1;
1918
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1919
        return op0;
1920
      tem = simplify_associative_operation (code, mode, op0, op1);
1921
      if (tem)
1922
        return tem;
1923
      break;
1924
 
1925
    case SMAX:
1926
      if (width <= HOST_BITS_PER_WIDE_INT
1927
          && GET_CODE (trueop1) == CONST_INT
1928
          && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1929
              == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1930
          && ! side_effects_p (op0))
1931
        return op1;
1932
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1933
        return op0;
1934
      tem = simplify_associative_operation (code, mode, op0, op1);
1935
      if (tem)
1936
        return tem;
1937
      break;
1938
 
1939
    case UMIN:
1940
      if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1941
        return op1;
1942
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1943
        return op0;
1944
      tem = simplify_associative_operation (code, mode, op0, op1);
1945
      if (tem)
1946
        return tem;
1947
      break;
1948
 
1949
    case UMAX:
1950
      if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1951
        return op1;
1952
      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1953
        return op0;
1954
      tem = simplify_associative_operation (code, mode, op0, op1);
1955
      if (tem)
1956
        return tem;
1957
      break;
1958
 
1959
    case SS_PLUS:
1960
    case US_PLUS:
1961
    case SS_MINUS:
1962
    case US_MINUS:
1963
      /* ??? There are simplifications that can be done.  */
1964
      return 0;
1965
 
1966
    case VEC_SELECT:
1967
      if (!VECTOR_MODE_P (mode))
1968
        {
1969
          gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1970
          gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
1971
          gcc_assert (GET_CODE (trueop1) == PARALLEL);
1972
          gcc_assert (XVECLEN (trueop1, 0) == 1);
1973
          gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
1974
 
1975
          if (GET_CODE (trueop0) == CONST_VECTOR)
1976
            return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
1977
                                                      (trueop1, 0, 0)));
1978
        }
1979
      else
1980
        {
1981
          gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1982
          gcc_assert (GET_MODE_INNER (mode)
1983
                      == GET_MODE_INNER (GET_MODE (trueop0)));
1984
          gcc_assert (GET_CODE (trueop1) == PARALLEL);
1985
 
1986
          if (GET_CODE (trueop0) == CONST_VECTOR)
1987
            {
1988
              int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1989
              unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1990
              rtvec v = rtvec_alloc (n_elts);
1991
              unsigned int i;
1992
 
1993
              gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
1994
              for (i = 0; i < n_elts; i++)
1995
                {
1996
                  rtx x = XVECEXP (trueop1, 0, i);
1997
 
1998
                  gcc_assert (GET_CODE (x) == CONST_INT);
1999
                  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2000
                                                       INTVAL (x));
2001
                }
2002
 
2003
              return gen_rtx_CONST_VECTOR (mode, v);
2004
            }
2005
        }
2006
      return 0;
2007
    case VEC_CONCAT:
2008
      {
2009
        enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2010
                                      ? GET_MODE (trueop0)
2011
                                      : GET_MODE_INNER (mode));
2012
        enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2013
                                      ? GET_MODE (trueop1)
2014
                                      : GET_MODE_INNER (mode));
2015
 
2016
        gcc_assert (VECTOR_MODE_P (mode));
2017
        gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2018
                    == GET_MODE_SIZE (mode));
2019
 
2020
        if (VECTOR_MODE_P (op0_mode))
2021
          gcc_assert (GET_MODE_INNER (mode)
2022
                      == GET_MODE_INNER (op0_mode));
2023
        else
2024
          gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2025
 
2026
        if (VECTOR_MODE_P (op1_mode))
2027
          gcc_assert (GET_MODE_INNER (mode)
2028
                      == GET_MODE_INNER (op1_mode));
2029
        else
2030
          gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2031
 
2032
        if ((GET_CODE (trueop0) == CONST_VECTOR
2033
             || GET_CODE (trueop0) == CONST_INT
2034
             || GET_CODE (trueop0) == CONST_DOUBLE)
2035
            && (GET_CODE (trueop1) == CONST_VECTOR
2036
                || GET_CODE (trueop1) == CONST_INT
2037
                || GET_CODE (trueop1) == CONST_DOUBLE))
2038
          {
2039
            int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2040
            unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2041
            rtvec v = rtvec_alloc (n_elts);
2042
            unsigned int i;
2043
            unsigned in_n_elts = 1;
2044
 
2045
            if (VECTOR_MODE_P (op0_mode))
2046
              in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2047
            for (i = 0; i < n_elts; i++)
2048
              {
2049
                if (i < in_n_elts)
2050
                  {
2051
                    if (!VECTOR_MODE_P (op0_mode))
2052
                      RTVEC_ELT (v, i) = trueop0;
2053
                    else
2054
                      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2055
                  }
2056
                else
2057
                  {
2058
                    if (!VECTOR_MODE_P (op1_mode))
2059
                      RTVEC_ELT (v, i) = trueop1;
2060
                    else
2061
                      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2062
                                                           i - in_n_elts);
2063
                  }
2064
              }
2065
 
2066
            return gen_rtx_CONST_VECTOR (mode, v);
2067
          }
2068
      }
2069
      return 0;
2070
 
2071
    default:
2072
      gcc_unreachable ();
2073
    }
2074
 
2075
  return 0;
2076
}
2077
 
2078
rtx
2079
simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2080
                                 rtx op0, rtx op1)
2081
{
2082
  HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2083
  HOST_WIDE_INT val;
2084
  unsigned int width = GET_MODE_BITSIZE (mode);
2085
 
2086
  if (VECTOR_MODE_P (mode)
2087
      && code != VEC_CONCAT
2088
      && GET_CODE (op0) == CONST_VECTOR
2089
      && GET_CODE (op1) == CONST_VECTOR)
2090
    {
2091
      unsigned n_elts = GET_MODE_NUNITS (mode);
2092
      enum machine_mode op0mode = GET_MODE (op0);
2093
      unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2094
      enum machine_mode op1mode = GET_MODE (op1);
2095
      unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2096
      rtvec v = rtvec_alloc (n_elts);
2097
      unsigned int i;
2098
 
2099
      gcc_assert (op0_n_elts == n_elts);
2100
      gcc_assert (op1_n_elts == n_elts);
2101
      for (i = 0; i < n_elts; i++)
2102
        {
2103
          rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2104
                                             CONST_VECTOR_ELT (op0, i),
2105
                                             CONST_VECTOR_ELT (op1, i));
2106
          if (!x)
2107
            return 0;
2108
          RTVEC_ELT (v, i) = x;
2109
        }
2110
 
2111
      return gen_rtx_CONST_VECTOR (mode, v);
2112
    }
2113
 
2114
  if (VECTOR_MODE_P (mode)
2115
      && code == VEC_CONCAT
2116
      && CONSTANT_P (op0) && CONSTANT_P (op1))
2117
    {
2118
      unsigned n_elts = GET_MODE_NUNITS (mode);
2119
      rtvec v = rtvec_alloc (n_elts);
2120
 
2121
      gcc_assert (n_elts >= 2);
2122
      if (n_elts == 2)
2123
        {
2124
          gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2125
          gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2126
 
2127
          RTVEC_ELT (v, 0) = op0;
2128
          RTVEC_ELT (v, 1) = op1;
2129
        }
2130
      else
2131
        {
2132
          unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2133
          unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2134
          unsigned i;
2135
 
2136
          gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2137
          gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2138
          gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2139
 
2140
          for (i = 0; i < op0_n_elts; ++i)
2141
            RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2142
          for (i = 0; i < op1_n_elts; ++i)
2143
            RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2144
        }
2145
 
2146
      return gen_rtx_CONST_VECTOR (mode, v);
2147
    }
2148
 
2149
  if (GET_MODE_CLASS (mode) == MODE_FLOAT
2150
      && GET_CODE (op0) == CONST_DOUBLE
2151
      && GET_CODE (op1) == CONST_DOUBLE
2152
      && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2153
    {
2154
      if (code == AND
2155
          || code == IOR
2156
          || code == XOR)
2157
        {
2158
          long tmp0[4];
2159
          long tmp1[4];
2160
          REAL_VALUE_TYPE r;
2161
          int i;
2162
 
2163
          real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2164
                          GET_MODE (op0));
2165
          real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2166
                          GET_MODE (op1));
2167
          for (i = 0; i < 4; i++)
2168
            {
2169
              switch (code)
2170
              {
2171
              case AND:
2172
                tmp0[i] &= tmp1[i];
2173
                break;
2174
              case IOR:
2175
                tmp0[i] |= tmp1[i];
2176
                break;
2177
              case XOR:
2178
                tmp0[i] ^= tmp1[i];
2179
                break;
2180
              default:
2181
                gcc_unreachable ();
2182
              }
2183
            }
2184
           real_from_target (&r, tmp0, mode);
2185
           return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2186
        }
2187
      else
2188
        {
2189
          REAL_VALUE_TYPE f0, f1, value, result;
2190
          bool inexact;
2191
 
2192
          REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2193
          REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2194
          real_convert (&f0, mode, &f0);
2195
          real_convert (&f1, mode, &f1);
2196
 
2197
          if (HONOR_SNANS (mode)
2198
              && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2199
            return 0;
2200
 
2201
          if (code == DIV
2202
              && REAL_VALUES_EQUAL (f1, dconst0)
2203
              && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2204
            return 0;
2205
 
2206
          if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2207
              && flag_trapping_math
2208
              && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2209
            {
2210
              int s0 = REAL_VALUE_NEGATIVE (f0);
2211
              int s1 = REAL_VALUE_NEGATIVE (f1);
2212
 
2213
              switch (code)
2214
                {
2215
                case PLUS:
2216
                  /* Inf + -Inf = NaN plus exception.  */
2217
                  if (s0 != s1)
2218
                    return 0;
2219
                  break;
2220
                case MINUS:
2221
                  /* Inf - Inf = NaN plus exception.  */
2222
                  if (s0 == s1)
2223
                    return 0;
2224
                  break;
2225
                case DIV:
2226
                  /* Inf / Inf = NaN plus exception.  */
2227
                  return 0;
2228
                default:
2229
                  break;
2230
                }
2231
            }
2232
 
2233
          if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2234
              && flag_trapping_math
2235
              && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2236
                  || (REAL_VALUE_ISINF (f1)
2237
                      && REAL_VALUES_EQUAL (f0, dconst0))))
2238
            /* Inf * 0 = NaN plus exception.  */
2239
            return 0;
2240
 
2241
          inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2242
                                     &f0, &f1);
2243
          real_convert (&result, mode, &value);
2244
 
2245
          /* Don't constant fold this floating point operation if
2246
             the result has overflowed and flag_trapping_math.  */
2247
 
2248
          if (flag_trapping_math
2249
              && MODE_HAS_INFINITIES (mode)
2250
              && REAL_VALUE_ISINF (result)
2251
              && !REAL_VALUE_ISINF (f0)
2252
              && !REAL_VALUE_ISINF (f1))
2253
            /* Overflow plus exception.  */
2254
            return 0;
2255
 
2256
          /* Don't constant fold this floating point operation if the
2257
             result may dependent upon the run-time rounding mode and
2258
             flag_rounding_math is set, or if GCC's software emulation
2259
             is unable to accurately represent the result.  */
2260
 
2261
          if ((flag_rounding_math
2262
               || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2263
                   && !flag_unsafe_math_optimizations))
2264
              && (inexact || !real_identical (&result, &value)))
2265
            return NULL_RTX;
2266
 
2267
          return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2268
        }
2269
    }
2270
 
2271
  /* We can fold some multi-word operations.  */
2272
  if (GET_MODE_CLASS (mode) == MODE_INT
2273
      && width == HOST_BITS_PER_WIDE_INT * 2
2274
      && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2275
      && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2276
    {
2277
      unsigned HOST_WIDE_INT l1, l2, lv, lt;
2278
      HOST_WIDE_INT h1, h2, hv, ht;
2279
 
2280
      if (GET_CODE (op0) == CONST_DOUBLE)
2281
        l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2282
      else
2283
        l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2284
 
2285
      if (GET_CODE (op1) == CONST_DOUBLE)
2286
        l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2287
      else
2288
        l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2289
 
2290
      switch (code)
2291
        {
2292
        case MINUS:
2293
          /* A - B == A + (-B).  */
2294
          neg_double (l2, h2, &lv, &hv);
2295
          l2 = lv, h2 = hv;
2296
 
2297
          /* Fall through....  */
2298
 
2299
        case PLUS:
2300
          add_double (l1, h1, l2, h2, &lv, &hv);
2301
          break;
2302
 
2303
        case MULT:
2304
          mul_double (l1, h1, l2, h2, &lv, &hv);
2305
          break;
2306
 
2307
        case DIV:
2308
          if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2309
                                    &lv, &hv, &lt, &ht))
2310
            return 0;
2311
          break;
2312
 
2313
        case MOD:
2314
          if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2315
                                    &lt, &ht, &lv, &hv))
2316
            return 0;
2317
          break;
2318
 
2319
        case UDIV:
2320
          if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2321
                                    &lv, &hv, &lt, &ht))
2322
            return 0;
2323
          break;
2324
 
2325
        case UMOD:
2326
          if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2327
                                    &lt, &ht, &lv, &hv))
2328
            return 0;
2329
          break;
2330
 
2331
        case AND:
2332
          lv = l1 & l2, hv = h1 & h2;
2333
          break;
2334
 
2335
        case IOR:
2336
          lv = l1 | l2, hv = h1 | h2;
2337
          break;
2338
 
2339
        case XOR:
2340
          lv = l1 ^ l2, hv = h1 ^ h2;
2341
          break;
2342
 
2343
        case SMIN:
2344
          if (h1 < h2
2345
              || (h1 == h2
2346
                  && ((unsigned HOST_WIDE_INT) l1
2347
                      < (unsigned HOST_WIDE_INT) l2)))
2348
            lv = l1, hv = h1;
2349
          else
2350
            lv = l2, hv = h2;
2351
          break;
2352
 
2353
        case SMAX:
2354
          if (h1 > h2
2355
              || (h1 == h2
2356
                  && ((unsigned HOST_WIDE_INT) l1
2357
                      > (unsigned HOST_WIDE_INT) l2)))
2358
            lv = l1, hv = h1;
2359
          else
2360
            lv = l2, hv = h2;
2361
          break;
2362
 
2363
        case UMIN:
2364
          if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2365
              || (h1 == h2
2366
                  && ((unsigned HOST_WIDE_INT) l1
2367
                      < (unsigned HOST_WIDE_INT) l2)))
2368
            lv = l1, hv = h1;
2369
          else
2370
            lv = l2, hv = h2;
2371
          break;
2372
 
2373
        case UMAX:
2374
          if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2375
              || (h1 == h2
2376
                  && ((unsigned HOST_WIDE_INT) l1
2377
                      > (unsigned HOST_WIDE_INT) l2)))
2378
            lv = l1, hv = h1;
2379
          else
2380
            lv = l2, hv = h2;
2381
          break;
2382
 
2383
        case LSHIFTRT:   case ASHIFTRT:
2384
        case ASHIFT:
2385
        case ROTATE:     case ROTATERT:
2386
          if (SHIFT_COUNT_TRUNCATED)
2387
            l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2388
 
2389
          if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2390
            return 0;
2391
 
2392
          if (code == LSHIFTRT || code == ASHIFTRT)
2393
            rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2394
                           code == ASHIFTRT);
2395
          else if (code == ASHIFT)
2396
            lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2397
          else if (code == ROTATE)
2398
            lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2399
          else /* code == ROTATERT */
2400
            rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2401
          break;
2402
 
2403
        default:
2404
          return 0;
2405
        }
2406
 
2407
      return immed_double_const (lv, hv, mode);
2408
    }
2409
 
2410
  if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2411
      && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2412
    {
2413
      /* Get the integer argument values in two forms:
2414
         zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
2415
 
2416
      arg0 = INTVAL (op0);
2417
      arg1 = INTVAL (op1);
2418
 
2419
      if (width < HOST_BITS_PER_WIDE_INT)
2420
        {
2421
          arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2422
          arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2423
 
2424
          arg0s = arg0;
2425
          if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2426
            arg0s |= ((HOST_WIDE_INT) (-1) << width);
2427
 
2428
          arg1s = arg1;
2429
          if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2430
            arg1s |= ((HOST_WIDE_INT) (-1) << width);
2431
        }
2432
      else
2433
        {
2434
          arg0s = arg0;
2435
          arg1s = arg1;
2436
        }
2437
 
2438
      /* Compute the value of the arithmetic.  */
2439
 
2440
      switch (code)
2441
        {
2442
        case PLUS:
2443
          val = arg0s + arg1s;
2444
          break;
2445
 
2446
        case MINUS:
2447
          val = arg0s - arg1s;
2448
          break;
2449
 
2450
        case MULT:
2451
          val = arg0s * arg1s;
2452
          break;
2453
 
2454
        case DIV:
2455
          if (arg1s == 0
2456
              || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2457
                  && arg1s == -1))
2458
            return 0;
2459
          val = arg0s / arg1s;
2460
          break;
2461
 
2462
        case MOD:
2463
          if (arg1s == 0
2464
              || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2465
                  && arg1s == -1))
2466
            return 0;
2467
          val = arg0s % arg1s;
2468
          break;
2469
 
2470
        case UDIV:
2471
          if (arg1 == 0
2472
              || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2473
                  && arg1s == -1))
2474
            return 0;
2475
          val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2476
          break;
2477
 
2478
        case UMOD:
2479
          if (arg1 == 0
2480
              || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2481
                  && arg1s == -1))
2482
            return 0;
2483
          val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2484
          break;
2485
 
2486
        case AND:
2487
          val = arg0 & arg1;
2488
          break;
2489
 
2490
        case IOR:
2491
          val = arg0 | arg1;
2492
          break;
2493
 
2494
        case XOR:
2495
          val = arg0 ^ arg1;
2496
          break;
2497
 
2498
        case LSHIFTRT:
2499
        case ASHIFT:
2500
        case ASHIFTRT:
2501
          /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2502
             the value is in range.  We can't return any old value for
2503
             out-of-range arguments because either the middle-end (via
2504
             shift_truncation_mask) or the back-end might be relying on
2505
             target-specific knowledge.  Nor can we rely on
2506
             shift_truncation_mask, since the shift might not be part of an
2507
             ashlM3, lshrM3 or ashrM3 instruction.  */
2508
          if (SHIFT_COUNT_TRUNCATED)
2509
            arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2510
          else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2511
            return 0;
2512
 
2513
          val = (code == ASHIFT
2514
                 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2515
                 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2516
 
2517
          /* Sign-extend the result for arithmetic right shifts.  */
2518
          if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2519
            val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2520
          break;
2521
 
2522
        case ROTATERT:
2523
          if (arg1 < 0)
2524
            return 0;
2525
 
2526
          arg1 %= width;
2527
          val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2528
                 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2529
          break;
2530
 
2531
        case ROTATE:
2532
          if (arg1 < 0)
2533
            return 0;
2534
 
2535
          arg1 %= width;
2536
          val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2537
                 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2538
          break;
2539
 
2540
        case COMPARE:
2541
          /* Do nothing here.  */
2542
          return 0;
2543
 
2544
        case SMIN:
2545
          val = arg0s <= arg1s ? arg0s : arg1s;
2546
          break;
2547
 
2548
        case UMIN:
2549
          val = ((unsigned HOST_WIDE_INT) arg0
2550
                 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2551
          break;
2552
 
2553
        case SMAX:
2554
          val = arg0s > arg1s ? arg0s : arg1s;
2555
          break;
2556
 
2557
        case UMAX:
2558
          val = ((unsigned HOST_WIDE_INT) arg0
2559
                 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2560
          break;
2561
 
2562
        case SS_PLUS:
2563
        case US_PLUS:
2564
        case SS_MINUS:
2565
        case US_MINUS:
2566
          /* ??? There are simplifications that can be done.  */
2567
          return 0;
2568
 
2569
        default:
2570
          gcc_unreachable ();
2571
        }
2572
 
2573
      return gen_int_mode (val, mode);
2574
    }
2575
 
2576
  return NULL_RTX;
2577
}
2578
 
2579
 
2580
 
2581
/* Simplify a PLUS or MINUS, at least one of whose operands may be another
2582
   PLUS or MINUS.
2583
 
2584
   Rather than test for specific case, we do this by a brute-force method
2585
   and do all possible simplifications until no more changes occur.  Then
2586
   we rebuild the operation.
2587
 
2588
   If FORCE is true, then always generate the rtx.  This is used to
2589
   canonicalize stuff emitted from simplify_gen_binary.  Note that this
2590
   can still fail if the rtx is too complex.  It won't fail just because
2591
   the result is not 'simpler' than the input, however.  */
2592
 
2593
struct simplify_plus_minus_op_data
2594
{
2595
  rtx op;
2596
  short neg;
2597
  short ix;
2598
};
2599
 
2600
static int
2601
simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2602
{
2603
  const struct simplify_plus_minus_op_data *d1 = p1;
2604
  const struct simplify_plus_minus_op_data *d2 = p2;
2605
  int result;
2606
 
2607
  result = (commutative_operand_precedence (d2->op)
2608
            - commutative_operand_precedence (d1->op));
2609
  if (result)
2610
    return result;
2611
  return d1->ix - d2->ix;
2612
}
2613
 
2614
static rtx
2615
simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2616
                     rtx op1, int force)
2617
{
2618
  struct simplify_plus_minus_op_data ops[8];
2619
  rtx result, tem;
2620
  int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2621
  int first, changed;
2622
  int i, j;
2623
 
2624
  memset (ops, 0, sizeof ops);
2625
 
2626
  /* Set up the two operands and then expand them until nothing has been
2627
     changed.  If we run out of room in our array, give up; this should
2628
     almost never happen.  */
2629
 
2630
  ops[0].op = op0;
2631
  ops[0].neg = 0;
2632
  ops[1].op = op1;
2633
  ops[1].neg = (code == MINUS);
2634
 
2635
  do
2636
    {
2637
      changed = 0;
2638
 
2639
      for (i = 0; i < n_ops; i++)
2640
        {
2641
          rtx this_op = ops[i].op;
2642
          int this_neg = ops[i].neg;
2643
          enum rtx_code this_code = GET_CODE (this_op);
2644
 
2645
          switch (this_code)
2646
            {
2647
            case PLUS:
2648
            case MINUS:
2649
              if (n_ops == 7)
2650
                return NULL_RTX;
2651
 
2652
              ops[n_ops].op = XEXP (this_op, 1);
2653
              ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2654
              n_ops++;
2655
 
2656
              ops[i].op = XEXP (this_op, 0);
2657
              input_ops++;
2658
              changed = 1;
2659
              break;
2660
 
2661
            case NEG:
2662
              ops[i].op = XEXP (this_op, 0);
2663
              ops[i].neg = ! this_neg;
2664
              changed = 1;
2665
              break;
2666
 
2667
            case CONST:
2668
              if (n_ops < 7
2669
                  && GET_CODE (XEXP (this_op, 0)) == PLUS
2670
                  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2671
                  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2672
                {
2673
                  ops[i].op = XEXP (XEXP (this_op, 0), 0);
2674
                  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2675
                  ops[n_ops].neg = this_neg;
2676
                  n_ops++;
2677
                  input_consts++;
2678
                  changed = 1;
2679
                }
2680
              break;
2681
 
2682
            case NOT:
2683
              /* ~a -> (-a - 1) */
2684
              if (n_ops != 7)
2685
                {
2686
                  ops[n_ops].op = constm1_rtx;
2687
                  ops[n_ops++].neg = this_neg;
2688
                  ops[i].op = XEXP (this_op, 0);
2689
                  ops[i].neg = !this_neg;
2690
                  changed = 1;
2691
                }
2692
              break;
2693
 
2694
            case CONST_INT:
2695
              if (this_neg)
2696
                {
2697
                  ops[i].op = neg_const_int (mode, this_op);
2698
                  ops[i].neg = 0;
2699
                  changed = 1;
2700
                }
2701
              break;
2702
 
2703
            default:
2704
              break;
2705
            }
2706
        }
2707
    }
2708
  while (changed);
2709
 
2710
  /* If we only have two operands, we can't do anything.  */
2711
  if (n_ops <= 2 && !force)
2712
    return NULL_RTX;
2713
 
2714
  /* Count the number of CONSTs we didn't split above.  */
2715
  for (i = 0; i < n_ops; i++)
2716
    if (GET_CODE (ops[i].op) == CONST)
2717
      input_consts++;
2718
 
2719
  /* Now simplify each pair of operands until nothing changes.  The first
2720
     time through just simplify constants against each other.  */
2721
 
2722
  first = 1;
2723
  do
2724
    {
2725
      changed = first;
2726
 
2727
      for (i = 0; i < n_ops - 1; i++)
2728
        for (j = i + 1; j < n_ops; j++)
2729
          {
2730
            rtx lhs = ops[i].op, rhs = ops[j].op;
2731
            int lneg = ops[i].neg, rneg = ops[j].neg;
2732
 
2733
            if (lhs != 0 && rhs != 0
2734
                && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2735
              {
2736
                enum rtx_code ncode = PLUS;
2737
 
2738
                if (lneg != rneg)
2739
                  {
2740
                    ncode = MINUS;
2741
                    if (lneg)
2742
                      tem = lhs, lhs = rhs, rhs = tem;
2743
                  }
2744
                else if (swap_commutative_operands_p (lhs, rhs))
2745
                  tem = lhs, lhs = rhs, rhs = tem;
2746
 
2747
                tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2748
 
2749
                /* Reject "simplifications" that just wrap the two
2750
                   arguments in a CONST.  Failure to do so can result
2751
                   in infinite recursion with simplify_binary_operation
2752
                   when it calls us to simplify CONST operations.  */
2753
                if (tem
2754
                    && ! (GET_CODE (tem) == CONST
2755
                          && GET_CODE (XEXP (tem, 0)) == ncode
2756
                          && XEXP (XEXP (tem, 0), 0) == lhs
2757
                          && XEXP (XEXP (tem, 0), 1) == rhs)
2758
                    /* Don't allow -x + -1 -> ~x simplifications in the
2759
                       first pass.  This allows us the chance to combine
2760
                       the -1 with other constants.  */
2761
                    && ! (first
2762
                          && GET_CODE (tem) == NOT
2763
                          && XEXP (tem, 0) == rhs))
2764
                  {
2765
                    lneg &= rneg;
2766
                    if (GET_CODE (tem) == NEG)
2767
                      tem = XEXP (tem, 0), lneg = !lneg;
2768
                    if (GET_CODE (tem) == CONST_INT && lneg)
2769
                      tem = neg_const_int (mode, tem), lneg = 0;
2770
 
2771
                    ops[i].op = tem;
2772
                    ops[i].neg = lneg;
2773
                    ops[j].op = NULL_RTX;
2774
                    changed = 1;
2775
                  }
2776
              }
2777
          }
2778
 
2779
      first = 0;
2780
    }
2781
  while (changed);
2782
 
2783
  /* Pack all the operands to the lower-numbered entries.  */
2784
  for (i = 0, j = 0; j < n_ops; j++)
2785
    if (ops[j].op)
2786
      {
2787
        ops[i] = ops[j];
2788
        /* Stabilize sort.  */
2789
        ops[i].ix = i;
2790
        i++;
2791
      }
2792
  n_ops = i;
2793
 
2794
  /* Sort the operations based on swap_commutative_operands_p.  */
2795
  qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2796
 
2797
  /* Create (minus -C X) instead of (neg (const (plus X C))).  */
2798
  if (n_ops == 2
2799
      && GET_CODE (ops[1].op) == CONST_INT
2800
      && CONSTANT_P (ops[0].op)
2801
      && ops[0].neg)
2802
    return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2803
 
2804
  /* We suppressed creation of trivial CONST expressions in the
2805
     combination loop to avoid recursion.  Create one manually now.
2806
     The combination loop should have ensured that there is exactly
2807
     one CONST_INT, and the sort will have ensured that it is last
2808
     in the array and that any other constant will be next-to-last.  */
2809
 
2810
  if (n_ops > 1
2811
      && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2812
      && CONSTANT_P (ops[n_ops - 2].op))
2813
    {
2814
      rtx value = ops[n_ops - 1].op;
2815
      if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2816
        value = neg_const_int (mode, value);
2817
      ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2818
      n_ops--;
2819
    }
2820
 
2821
  /* Count the number of CONSTs that we generated.  */
2822
  n_consts = 0;
2823
  for (i = 0; i < n_ops; i++)
2824
    if (GET_CODE (ops[i].op) == CONST)
2825
      n_consts++;
2826
 
2827
  /* Give up if we didn't reduce the number of operands we had.  Make
2828
     sure we count a CONST as two operands.  If we have the same
2829
     number of operands, but have made more CONSTs than before, this
2830
     is also an improvement, so accept it.  */
2831
  if (!force
2832
      && (n_ops + n_consts > input_ops
2833
          || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2834
    return NULL_RTX;
2835
 
2836
  /* Put a non-negated operand first, if possible.  */
2837
 
2838
  for (i = 0; i < n_ops && ops[i].neg; i++)
2839
    continue;
2840
  if (i == n_ops)
2841
    ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2842
  else if (i != 0)
2843
    {
2844
      tem = ops[0].op;
2845
      ops[0] = ops[i];
2846
      ops[i].op = tem;
2847
      ops[i].neg = 1;
2848
    }
2849
 
2850
  /* Now make the result by performing the requested operations.  */
2851
  result = ops[0].op;
2852
  for (i = 1; i < n_ops; i++)
2853
    result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2854
                             mode, result, ops[i].op);
2855
 
2856
  return result;
2857
}
2858
 
2859
/* Check whether an operand is suitable for calling simplify_plus_minus.  */
2860
static bool
2861
plus_minus_operand_p (rtx x)
2862
{
2863
  return GET_CODE (x) == PLUS
2864
         || GET_CODE (x) == MINUS
2865
         || (GET_CODE (x) == CONST
2866
             && GET_CODE (XEXP (x, 0)) == PLUS
2867
             && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2868
             && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2869
}
2870
 
2871
/* Like simplify_binary_operation except used for relational operators.
2872
   MODE is the mode of the result. If MODE is VOIDmode, both operands must
2873
   not also be VOIDmode.
2874
 
2875
   CMP_MODE specifies in which mode the comparison is done in, so it is
2876
   the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
2877
   the operands or, if both are VOIDmode, the operands are compared in
2878
   "infinite precision".  */
2879
rtx
2880
simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2881
                               enum machine_mode cmp_mode, rtx op0, rtx op1)
2882
{
2883
  rtx tem, trueop0, trueop1;
2884
 
2885
  if (cmp_mode == VOIDmode)
2886
    cmp_mode = GET_MODE (op0);
2887
  if (cmp_mode == VOIDmode)
2888
    cmp_mode = GET_MODE (op1);
2889
 
2890
  tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2891
  if (tem)
2892
    {
2893
      if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2894
        {
2895
          if (tem == const0_rtx)
2896
            return CONST0_RTX (mode);
2897
#ifdef FLOAT_STORE_FLAG_VALUE
2898
          {
2899
            REAL_VALUE_TYPE val;
2900
            val = FLOAT_STORE_FLAG_VALUE (mode);
2901
            return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2902
          }
2903
#else
2904
          return NULL_RTX;
2905
#endif 
2906
        }
2907
      if (VECTOR_MODE_P (mode))
2908
        {
2909
          if (tem == const0_rtx)
2910
            return CONST0_RTX (mode);
2911
#ifdef VECTOR_STORE_FLAG_VALUE
2912
          {
2913
            int i, units;
2914
            rtvec v;
2915
 
2916
            rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2917
            if (val == NULL_RTX)
2918
              return NULL_RTX;
2919
            if (val == const1_rtx)
2920
              return CONST1_RTX (mode);
2921
 
2922
            units = GET_MODE_NUNITS (mode);
2923
            v = rtvec_alloc (units);
2924
            for (i = 0; i < units; i++)
2925
              RTVEC_ELT (v, i) = val;
2926
            return gen_rtx_raw_CONST_VECTOR (mode, v);
2927
          }
2928
#else
2929
          return NULL_RTX;
2930
#endif
2931
        }
2932
 
2933
      return tem;
2934
    }
2935
 
2936
  /* For the following tests, ensure const0_rtx is op1.  */
2937
  if (swap_commutative_operands_p (op0, op1)
2938
      || (op0 == const0_rtx && op1 != const0_rtx))
2939
    tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2940
 
2941
  /* If op0 is a compare, extract the comparison arguments from it.  */
2942
  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2943
    return simplify_relational_operation (code, mode, VOIDmode,
2944
                                          XEXP (op0, 0), XEXP (op0, 1));
2945
 
2946
  if (mode == VOIDmode
2947
      || GET_MODE_CLASS (cmp_mode) == MODE_CC
2948
      || CC0_P (op0))
2949
    return NULL_RTX;
2950
 
2951
  trueop0 = avoid_constant_pool_reference (op0);
2952
  trueop1 = avoid_constant_pool_reference (op1);
2953
  return simplify_relational_operation_1 (code, mode, cmp_mode,
2954
                                          trueop0, trueop1);
2955
}
2956
 
2957
/* This part of simplify_relational_operation is only used when CMP_MODE
2958
   is not in class MODE_CC (i.e. it is a real comparison).
2959
 
2960
   MODE is the mode of the result, while CMP_MODE specifies in which
2961
   mode the comparison is done in, so it is the mode of the operands.  */
2962
 
2963
static rtx
2964
simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2965
                                 enum machine_mode cmp_mode, rtx op0, rtx op1)
2966
{
2967
  enum rtx_code op0code = GET_CODE (op0);
2968
 
2969
  if (GET_CODE (op1) == CONST_INT)
2970
    {
2971
      if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2972
        {
2973
          /* If op0 is a comparison, extract the comparison arguments form it.  */
2974
          if (code == NE)
2975
            {
2976
              if (GET_MODE (op0) == mode)
2977
                return simplify_rtx (op0);
2978
              else
2979
                return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2980
                                                XEXP (op0, 0), XEXP (op0, 1));
2981
            }
2982
          else if (code == EQ)
2983
            {
2984
              enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2985
              if (new_code != UNKNOWN)
2986
                return simplify_gen_relational (new_code, mode, VOIDmode,
2987
                                                XEXP (op0, 0), XEXP (op0, 1));
2988
            }
2989
        }
2990
    }
2991
 
2992
  /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
2993
  if ((code == EQ || code == NE)
2994
      && (op0code == PLUS || op0code == MINUS)
2995
      && CONSTANT_P (op1)
2996
      && CONSTANT_P (XEXP (op0, 1))
2997
      && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2998
    {
2999
      rtx x = XEXP (op0, 0);
3000
      rtx c = XEXP (op0, 1);
3001
 
3002
      c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3003
                               cmp_mode, op1, c);
3004
      return simplify_gen_relational (code, mode, cmp_mode, x, c);
3005
    }
3006
 
3007
  /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3008
     the same as (zero_extract:SI FOO (const_int 1) BAR).  */
3009
  if (code == NE
3010
      && op1 == const0_rtx
3011
      && GET_MODE_CLASS (mode) == MODE_INT
3012
      && cmp_mode != VOIDmode
3013
      /* ??? Work-around BImode bugs in the ia64 backend.  */
3014
      && mode != BImode
3015
      && cmp_mode != BImode
3016
      && nonzero_bits (op0, cmp_mode) == 1
3017
      && STORE_FLAG_VALUE == 1)
3018
    return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3019
           ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3020
           : lowpart_subreg (mode, op0, cmp_mode);
3021
 
3022
  return NULL_RTX;
3023
}
3024
 
3025
/* Check if the given comparison (done in the given MODE) is actually a
3026
   tautology or a contradiction.
3027
   If no simplification is possible, this function returns zero.
3028
   Otherwise, it returns either const_true_rtx or const0_rtx.  */
3029
 
3030
rtx
3031
simplify_const_relational_operation (enum rtx_code code,
3032
                                     enum machine_mode mode,
3033
                                     rtx op0, rtx op1)
3034
{
3035
  int equal, op0lt, op0ltu, op1lt, op1ltu;
3036
  rtx tem;
3037
  rtx trueop0;
3038
  rtx trueop1;
3039
 
3040
  gcc_assert (mode != VOIDmode
3041
              || (GET_MODE (op0) == VOIDmode
3042
                  && GET_MODE (op1) == VOIDmode));
3043
 
3044
  /* If op0 is a compare, extract the comparison arguments from it.  */
3045
  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3046
    {
3047
      op1 = XEXP (op0, 1);
3048
      op0 = XEXP (op0, 0);
3049
 
3050
      if (GET_MODE (op0) != VOIDmode)
3051
        mode = GET_MODE (op0);
3052
      else if (GET_MODE (op1) != VOIDmode)
3053
        mode = GET_MODE (op1);
3054
      else
3055
        return 0;
3056
    }
3057
 
3058
  /* We can't simplify MODE_CC values since we don't know what the
3059
     actual comparison is.  */
3060
  if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3061
    return 0;
3062
 
3063
  /* Make sure the constant is second.  */
3064
  if (swap_commutative_operands_p (op0, op1))
3065
    {
3066
      tem = op0, op0 = op1, op1 = tem;
3067
      code = swap_condition (code);
3068
    }
3069
 
3070
  trueop0 = avoid_constant_pool_reference (op0);
3071
  trueop1 = avoid_constant_pool_reference (op1);
3072
 
3073
  /* For integer comparisons of A and B maybe we can simplify A - B and can
3074
     then simplify a comparison of that with zero.  If A and B are both either
3075
     a register or a CONST_INT, this can't help; testing for these cases will
3076
     prevent infinite recursion here and speed things up.
3077
 
3078
     If CODE is an unsigned comparison, then we can never do this optimization,
3079
     because it gives an incorrect result if the subtraction wraps around zero.
3080
     ANSI C defines unsigned operations such that they never overflow, and
3081
     thus such cases can not be ignored; but we cannot do it even for
3082
     signed comparisons for languages such as Java, so test flag_wrapv.  */
3083
 
3084
  if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3085
      && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3086
            && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3087
      && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3088
      /* We cannot do this for == or != if tem is a nonzero address.  */
3089
      && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3090
      && code != GTU && code != GEU && code != LTU && code != LEU)
3091
    return simplify_const_relational_operation (signed_condition (code),
3092
                                                mode, tem, const0_rtx);
3093
 
3094
  if (flag_unsafe_math_optimizations && code == ORDERED)
3095
    return const_true_rtx;
3096
 
3097
  if (flag_unsafe_math_optimizations && code == UNORDERED)
3098
    return const0_rtx;
3099
 
3100
  /* For modes without NaNs, if the two operands are equal, we know the
3101
     result except if they have side-effects.  */
3102
  if (! HONOR_NANS (GET_MODE (trueop0))
3103
      && rtx_equal_p (trueop0, trueop1)
3104
      && ! side_effects_p (trueop0))
3105
    equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3106
 
3107
  /* If the operands are floating-point constants, see if we can fold
3108
     the result.  */
3109
  else if (GET_CODE (trueop0) == CONST_DOUBLE
3110
           && GET_CODE (trueop1) == CONST_DOUBLE
3111
           && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
3112
    {
3113
      REAL_VALUE_TYPE d0, d1;
3114
 
3115
      REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3116
      REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3117
 
3118
      /* Comparisons are unordered iff at least one of the values is NaN.  */
3119
      if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3120
        switch (code)
3121
          {
3122
          case UNEQ:
3123
          case UNLT:
3124
          case UNGT:
3125
          case UNLE:
3126
          case UNGE:
3127
          case NE:
3128
          case UNORDERED:
3129
            return const_true_rtx;
3130
          case EQ:
3131
          case LT:
3132
          case GT:
3133
          case LE:
3134
          case GE:
3135
          case LTGT:
3136
          case ORDERED:
3137
            return const0_rtx;
3138
          default:
3139
            return 0;
3140
          }
3141
 
3142
      equal = REAL_VALUES_EQUAL (d0, d1);
3143
      op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3144
      op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3145
    }
3146
 
3147
  /* Otherwise, see if the operands are both integers.  */
3148
  else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3149
           && (GET_CODE (trueop0) == CONST_DOUBLE
3150
               || GET_CODE (trueop0) == CONST_INT)
3151
           && (GET_CODE (trueop1) == CONST_DOUBLE
3152
               || GET_CODE (trueop1) == CONST_INT))
3153
    {
3154
      int width = GET_MODE_BITSIZE (mode);
3155
      HOST_WIDE_INT l0s, h0s, l1s, h1s;
3156
      unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3157
 
3158
      /* Get the two words comprising each integer constant.  */
3159
      if (GET_CODE (trueop0) == CONST_DOUBLE)
3160
        {
3161
          l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3162
          h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3163
        }
3164
      else
3165
        {
3166
          l0u = l0s = INTVAL (trueop0);
3167
          h0u = h0s = HWI_SIGN_EXTEND (l0s);
3168
        }
3169
 
3170
      if (GET_CODE (trueop1) == CONST_DOUBLE)
3171
        {
3172
          l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3173
          h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3174
        }
3175
      else
3176
        {
3177
          l1u = l1s = INTVAL (trueop1);
3178
          h1u = h1s = HWI_SIGN_EXTEND (l1s);
3179
        }
3180
 
3181
      /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3182
         we have to sign or zero-extend the values.  */
3183
      if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3184
        {
3185
          l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3186
          l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3187
 
3188
          if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3189
            l0s |= ((HOST_WIDE_INT) (-1) << width);
3190
 
3191
          if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3192
            l1s |= ((HOST_WIDE_INT) (-1) << width);
3193
        }
3194
      if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3195
        h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3196
 
3197
      equal = (h0u == h1u && l0u == l1u);
3198
      op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3199
      op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3200
      op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3201
      op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3202
    }
3203
 
3204
  /* Otherwise, there are some code-specific tests we can make.  */
3205
  else
3206
    {
3207
      /* Optimize comparisons with upper and lower bounds.  */
3208
      if (SCALAR_INT_MODE_P (mode)
3209
          && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3210
        {
3211
          rtx mmin, mmax;
3212
          int sign;
3213
 
3214
          if (code == GEU
3215
              || code == LEU
3216
              || code == GTU
3217
              || code == LTU)
3218
            sign = 0;
3219
          else
3220
            sign = 1;
3221
 
3222
          get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3223
 
3224
          tem = NULL_RTX;
3225
          switch (code)
3226
            {
3227
            case GEU:
3228
            case GE:
3229
              /* x >= min is always true.  */
3230
              if (rtx_equal_p (trueop1, mmin))
3231
                tem = const_true_rtx;
3232
              else
3233
              break;
3234
 
3235
            case LEU:
3236
            case LE:
3237
              /* x <= max is always true.  */
3238
              if (rtx_equal_p (trueop1, mmax))
3239
                tem = const_true_rtx;
3240
              break;
3241
 
3242
            case GTU:
3243
            case GT:
3244
              /* x > max is always false.  */
3245
              if (rtx_equal_p (trueop1, mmax))
3246
                tem = const0_rtx;
3247
              break;
3248
 
3249
            case LTU:
3250
            case LT:
3251
              /* x < min is always false.  */
3252
              if (rtx_equal_p (trueop1, mmin))
3253
                tem = const0_rtx;
3254
              break;
3255
 
3256
            default:
3257
              break;
3258
            }
3259
          if (tem == const0_rtx
3260
              || tem == const_true_rtx)
3261
            return tem;
3262
        }
3263
 
3264
      switch (code)
3265
        {
3266
        case EQ:
3267
          if (trueop1 == const0_rtx && nonzero_address_p (op0))
3268
            return const0_rtx;
3269
          break;
3270
 
3271
        case NE:
3272
          if (trueop1 == const0_rtx && nonzero_address_p (op0))
3273
            return const_true_rtx;
3274
          break;
3275
 
3276
        case LT:
3277
          /* Optimize abs(x) < 0.0.  */
3278
          if (trueop1 == CONST0_RTX (mode)
3279
              && !HONOR_SNANS (mode)
3280
              && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3281
            {
3282
              tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3283
                                                       : trueop0;
3284
              if (GET_CODE (tem) == ABS)
3285
                return const0_rtx;
3286
            }
3287
          break;
3288
 
3289
        case GE:
3290
          /* Optimize abs(x) >= 0.0.  */
3291
          if (trueop1 == CONST0_RTX (mode)
3292
              && !HONOR_NANS (mode)
3293
              && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3294
            {
3295
              tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3296
                                                       : trueop0;
3297
              if (GET_CODE (tem) == ABS)
3298
                return const_true_rtx;
3299
            }
3300
          break;
3301
 
3302
        case UNGE:
3303
          /* Optimize ! (abs(x) < 0.0).  */
3304
          if (trueop1 == CONST0_RTX (mode))
3305
            {
3306
              tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3307
                                                       : trueop0;
3308
              if (GET_CODE (tem) == ABS)
3309
                return const_true_rtx;
3310
            }
3311
          break;
3312
 
3313
        default:
3314
          break;
3315
        }
3316
 
3317
      return 0;
3318
    }
3319
 
3320
  /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3321
     as appropriate.  */
3322
  switch (code)
3323
    {
3324
    case EQ:
3325
    case UNEQ:
3326
      return equal ? const_true_rtx : const0_rtx;
3327
    case NE:
3328
    case LTGT:
3329
      return ! equal ? const_true_rtx : const0_rtx;
3330
    case LT:
3331
    case UNLT:
3332
      return op0lt ? const_true_rtx : const0_rtx;
3333
    case GT:
3334
    case UNGT:
3335
      return op1lt ? const_true_rtx : const0_rtx;
3336
    case LTU:
3337
      return op0ltu ? const_true_rtx : const0_rtx;
3338
    case GTU:
3339
      return op1ltu ? const_true_rtx : const0_rtx;
3340
    case LE:
3341
    case UNLE:
3342
      return equal || op0lt ? const_true_rtx : const0_rtx;
3343
    case GE:
3344
    case UNGE:
3345
      return equal || op1lt ? const_true_rtx : const0_rtx;
3346
    case LEU:
3347
      return equal || op0ltu ? const_true_rtx : const0_rtx;
3348
    case GEU:
3349
      return equal || op1ltu ? const_true_rtx : const0_rtx;
3350
    case ORDERED:
3351
      return const_true_rtx;
3352
    case UNORDERED:
3353
      return const0_rtx;
3354
    default:
3355
      gcc_unreachable ();
3356
    }
3357
}
3358
 
3359
/* Simplify CODE, an operation with result mode MODE and three operands,
3360
   OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
3361
   a constant.  Return 0 if no simplifications is possible.  */
3362
 
3363
rtx
3364
simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3365
                            enum machine_mode op0_mode, rtx op0, rtx op1,
3366
                            rtx op2)
3367
{
3368
  unsigned int width = GET_MODE_BITSIZE (mode);
3369
 
3370
  /* VOIDmode means "infinite" precision.  */
3371
  if (width == 0)
3372
    width = HOST_BITS_PER_WIDE_INT;
3373
 
3374
  switch (code)
3375
    {
3376
    case SIGN_EXTRACT:
3377
    case ZERO_EXTRACT:
3378
      if (GET_CODE (op0) == CONST_INT
3379
          && GET_CODE (op1) == CONST_INT
3380
          && GET_CODE (op2) == CONST_INT
3381
          && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3382
          && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3383
        {
3384
          /* Extracting a bit-field from a constant */
3385
          HOST_WIDE_INT val = INTVAL (op0);
3386
 
3387
          if (BITS_BIG_ENDIAN)
3388
            val >>= (GET_MODE_BITSIZE (op0_mode)
3389
                     - INTVAL (op2) - INTVAL (op1));
3390
          else
3391
            val >>= INTVAL (op2);
3392
 
3393
          if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3394
            {
3395
              /* First zero-extend.  */
3396
              val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3397
              /* If desired, propagate sign bit.  */
3398
              if (code == SIGN_EXTRACT
3399
                  && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3400
                val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3401
            }
3402
 
3403
          /* Clear the bits that don't belong in our mode,
3404
             unless they and our sign bit are all one.
3405
             So we get either a reasonable negative value or a reasonable
3406
             unsigned value for this mode.  */
3407
          if (width < HOST_BITS_PER_WIDE_INT
3408
              && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3409
                  != ((HOST_WIDE_INT) (-1) << (width - 1))))
3410
            val &= ((HOST_WIDE_INT) 1 << width) - 1;
3411
 
3412
          return gen_int_mode (val, mode);
3413
        }
3414
      break;
3415
 
3416
    case IF_THEN_ELSE:
3417
      if (GET_CODE (op0) == CONST_INT)
3418
        return op0 != const0_rtx ? op1 : op2;
3419
 
3420
      /* Convert c ? a : a into "a".  */
3421
      if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3422
        return op1;
3423
 
3424
      /* Convert a != b ? a : b into "a".  */
3425
      if (GET_CODE (op0) == NE
3426
          && ! side_effects_p (op0)
3427
          && ! HONOR_NANS (mode)
3428
          && ! HONOR_SIGNED_ZEROS (mode)
3429
          && ((rtx_equal_p (XEXP (op0, 0), op1)
3430
               && rtx_equal_p (XEXP (op0, 1), op2))
3431
              || (rtx_equal_p (XEXP (op0, 0), op2)
3432
                  && rtx_equal_p (XEXP (op0, 1), op1))))
3433
        return op1;
3434
 
3435
      /* Convert a == b ? a : b into "b".  */
3436
      if (GET_CODE (op0) == EQ
3437
          && ! side_effects_p (op0)
3438
          && ! HONOR_NANS (mode)
3439
          && ! HONOR_SIGNED_ZEROS (mode)
3440
          && ((rtx_equal_p (XEXP (op0, 0), op1)
3441
               && rtx_equal_p (XEXP (op0, 1), op2))
3442
              || (rtx_equal_p (XEXP (op0, 0), op2)
3443
                  && rtx_equal_p (XEXP (op0, 1), op1))))
3444
        return op2;
3445
 
3446
      if (COMPARISON_P (op0) && ! side_effects_p (op0))
3447
        {
3448
          enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3449
                                        ? GET_MODE (XEXP (op0, 1))
3450
                                        : GET_MODE (XEXP (op0, 0)));
3451
          rtx temp;
3452
 
3453
          /* Look for happy constants in op1 and op2.  */
3454
          if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3455
            {
3456
              HOST_WIDE_INT t = INTVAL (op1);
3457
              HOST_WIDE_INT f = INTVAL (op2);
3458
 
3459
              if (t == STORE_FLAG_VALUE && f == 0)
3460
                code = GET_CODE (op0);
3461
              else if (t == 0 && f == STORE_FLAG_VALUE)
3462
                {
3463
                  enum rtx_code tmp;
3464
                  tmp = reversed_comparison_code (op0, NULL_RTX);
3465
                  if (tmp == UNKNOWN)
3466
                    break;
3467
                  code = tmp;
3468
                }
3469
              else
3470
                break;
3471
 
3472
              return simplify_gen_relational (code, mode, cmp_mode,
3473
                                              XEXP (op0, 0), XEXP (op0, 1));
3474
            }
3475
 
3476
          if (cmp_mode == VOIDmode)
3477
            cmp_mode = op0_mode;
3478
          temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3479
                                                cmp_mode, XEXP (op0, 0),
3480
                                                XEXP (op0, 1));
3481
 
3482
          /* See if any simplifications were possible.  */
3483
          if (temp)
3484
            {
3485
              if (GET_CODE (temp) == CONST_INT)
3486
                return temp == const0_rtx ? op2 : op1;
3487
              else if (temp)
3488
                return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3489
            }
3490
        }
3491
      break;
3492
 
3493
    case VEC_MERGE:
3494
      gcc_assert (GET_MODE (op0) == mode);
3495
      gcc_assert (GET_MODE (op1) == mode);
3496
      gcc_assert (VECTOR_MODE_P (mode));
3497
      op2 = avoid_constant_pool_reference (op2);
3498
      if (GET_CODE (op2) == CONST_INT)
3499
        {
3500
          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3501
          unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3502
          int mask = (1 << n_elts) - 1;
3503
 
3504
          if (!(INTVAL (op2) & mask))
3505
            return op1;
3506
          if ((INTVAL (op2) & mask) == mask)
3507
            return op0;
3508
 
3509
          op0 = avoid_constant_pool_reference (op0);
3510
          op1 = avoid_constant_pool_reference (op1);
3511
          if (GET_CODE (op0) == CONST_VECTOR
3512
              && GET_CODE (op1) == CONST_VECTOR)
3513
            {
3514
              rtvec v = rtvec_alloc (n_elts);
3515
              unsigned int i;
3516
 
3517
              for (i = 0; i < n_elts; i++)
3518
                RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3519
                                    ? CONST_VECTOR_ELT (op0, i)
3520
                                    : CONST_VECTOR_ELT (op1, i));
3521
              return gen_rtx_CONST_VECTOR (mode, v);
3522
            }
3523
        }
3524
      break;
3525
 
3526
    default:
3527
      gcc_unreachable ();
3528
    }
3529
 
3530
  return 0;
3531
}
3532
 
3533
/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3534
   returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3535
 
3536
   Works by unpacking OP into a collection of 8-bit values
3537
   represented as a little-endian array of 'unsigned char', selecting by BYTE,
3538
   and then repacking them again for OUTERMODE.  */
3539
 
3540
static rtx
3541
simplify_immed_subreg (enum machine_mode outermode, rtx op,
3542
                       enum machine_mode innermode, unsigned int byte)
3543
{
3544
  /* We support up to 512-bit values (for V8DFmode).  */
3545
  enum {
3546
    max_bitsize = 512,
3547
    value_bit = 8,
3548
    value_mask = (1 << value_bit) - 1
3549
  };
3550
  unsigned char value[max_bitsize / value_bit];
3551
  int value_start;
3552
  int i;
3553
  int elem;
3554
 
3555
  int num_elem;
3556
  rtx * elems;
3557
  int elem_bitsize;
3558
  rtx result_s;
3559
  rtvec result_v = NULL;
3560
  enum mode_class outer_class;
3561
  enum machine_mode outer_submode;
3562
 
3563
  /* Some ports misuse CCmode.  */
3564
  if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3565
    return op;
3566
 
3567
  /* We have no way to represent a complex constant at the rtl level.  */
3568
  if (COMPLEX_MODE_P (outermode))
3569
    return NULL_RTX;
3570
 
3571
  /* Unpack the value.  */
3572
 
3573
  if (GET_CODE (op) == CONST_VECTOR)
3574
    {
3575
      num_elem = CONST_VECTOR_NUNITS (op);
3576
      elems = &CONST_VECTOR_ELT (op, 0);
3577
      elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3578
    }
3579
  else
3580
    {
3581
      num_elem = 1;
3582
      elems = &op;
3583
      elem_bitsize = max_bitsize;
3584
    }
3585
  /* If this asserts, it is too complicated; reducing value_bit may help.  */
3586
  gcc_assert (BITS_PER_UNIT % value_bit == 0);
3587
  /* I don't know how to handle endianness of sub-units.  */
3588
  gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3589
 
3590
  for (elem = 0; elem < num_elem; elem++)
3591
    {
3592
      unsigned char * vp;
3593
      rtx el = elems[elem];
3594
 
3595
      /* Vectors are kept in target memory order.  (This is probably
3596
         a mistake.)  */
3597
      {
3598
        unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3599
        unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3600
                          / BITS_PER_UNIT);
3601
        unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3602
        unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3603
        unsigned bytele = (subword_byte % UNITS_PER_WORD
3604
                         + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3605
        vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3606
      }
3607
 
3608
      switch (GET_CODE (el))
3609
        {
3610
        case CONST_INT:
3611
          for (i = 0;
3612
               i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3613
               i += value_bit)
3614
            *vp++ = INTVAL (el) >> i;
3615
          /* CONST_INTs are always logically sign-extended.  */
3616
          for (; i < elem_bitsize; i += value_bit)
3617
            *vp++ = INTVAL (el) < 0 ? -1 : 0;
3618
          break;
3619
 
3620
        case CONST_DOUBLE:
3621
          if (GET_MODE (el) == VOIDmode)
3622
            {
3623
              /* If this triggers, someone should have generated a
3624
                 CONST_INT instead.  */
3625
              gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3626
 
3627
              for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3628
                *vp++ = CONST_DOUBLE_LOW (el) >> i;
3629
              while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3630
                {
3631
                  *vp++
3632
                    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3633
                  i += value_bit;
3634
                }
3635
              /* It shouldn't matter what's done here, so fill it with
3636
                 zero.  */
3637
              for (; i < elem_bitsize; i += value_bit)
3638
                *vp++ = 0;
3639
            }
3640
          else
3641
            {
3642
              long tmp[max_bitsize / 32];
3643
              int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3644
 
3645
              gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3646
              gcc_assert (bitsize <= elem_bitsize);
3647
              gcc_assert (bitsize % value_bit == 0);
3648
 
3649
              real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3650
                              GET_MODE (el));
3651
 
3652
              /* real_to_target produces its result in words affected by
3653
                 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
3654
                 and use WORDS_BIG_ENDIAN instead; see the documentation
3655
                 of SUBREG in rtl.texi.  */
3656
              for (i = 0; i < bitsize; i += value_bit)
3657
                {
3658
                  int ibase;
3659
                  if (WORDS_BIG_ENDIAN)
3660
                    ibase = bitsize - 1 - i;
3661
                  else
3662
                    ibase = i;
3663
                  *vp++ = tmp[ibase / 32] >> i % 32;
3664
                }
3665
 
3666
              /* It shouldn't matter what's done here, so fill it with
3667
                 zero.  */
3668
              for (; i < elem_bitsize; i += value_bit)
3669
                *vp++ = 0;
3670
            }
3671
          break;
3672
 
3673
        default:
3674
          gcc_unreachable ();
3675
        }
3676
    }
3677
 
3678
  /* Now, pick the right byte to start with.  */
3679
  /* Renumber BYTE so that the least-significant byte is byte 0.  A special
3680
     case is paradoxical SUBREGs, which shouldn't be adjusted since they
3681
     will already have offset 0.  */
3682
  if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3683
    {
3684
      unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3685
                        - byte);
3686
      unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3687
      unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3688
      byte = (subword_byte % UNITS_PER_WORD
3689
              + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3690
    }
3691
 
3692
  /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
3693
     so if it's become negative it will instead be very large.)  */
3694
  gcc_assert (byte < GET_MODE_SIZE (innermode));
3695
 
3696
  /* Convert from bytes to chunks of size value_bit.  */
3697
  value_start = byte * (BITS_PER_UNIT / value_bit);
3698
 
3699
  /* Re-pack the value.  */
3700
 
3701
  if (VECTOR_MODE_P (outermode))
3702
    {
3703
      num_elem = GET_MODE_NUNITS (outermode);
3704
      result_v = rtvec_alloc (num_elem);
3705
      elems = &RTVEC_ELT (result_v, 0);
3706
      outer_submode = GET_MODE_INNER (outermode);
3707
    }
3708
  else
3709
    {
3710
      num_elem = 1;
3711
      elems = &result_s;
3712
      outer_submode = outermode;
3713
    }
3714
 
3715
  outer_class = GET_MODE_CLASS (outer_submode);
3716
  elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3717
 
3718
  gcc_assert (elem_bitsize % value_bit == 0);
3719
  gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3720
 
3721
  for (elem = 0; elem < num_elem; elem++)
3722
    {
3723
      unsigned char *vp;
3724
 
3725
      /* Vectors are stored in target memory order.  (This is probably
3726
         a mistake.)  */
3727
      {
3728
        unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3729
        unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3730
                          / BITS_PER_UNIT);
3731
        unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3732
        unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3733
        unsigned bytele = (subword_byte % UNITS_PER_WORD
3734
                         + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3735
        vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3736
      }
3737
 
3738
      switch (outer_class)
3739
        {
3740
        case MODE_INT:
3741
        case MODE_PARTIAL_INT:
3742
          {
3743
            unsigned HOST_WIDE_INT hi = 0, lo = 0;
3744
 
3745
            for (i = 0;
3746
                 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3747
                 i += value_bit)
3748
              lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3749
            for (; i < elem_bitsize; i += value_bit)
3750
              hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3751
                     << (i - HOST_BITS_PER_WIDE_INT));
3752
 
3753
            /* immed_double_const doesn't call trunc_int_for_mode.  I don't
3754
               know why.  */
3755
            if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3756
              elems[elem] = gen_int_mode (lo, outer_submode);
3757
            else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
3758
              elems[elem] = immed_double_const (lo, hi, outer_submode);
3759
            else
3760
              return NULL_RTX;
3761
          }
3762
          break;
3763
 
3764
        case MODE_FLOAT:
3765
          {
3766
            REAL_VALUE_TYPE r;
3767
            long tmp[max_bitsize / 32];
3768
 
3769
            /* real_from_target wants its input in words affected by
3770
               FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
3771
               and use WORDS_BIG_ENDIAN instead; see the documentation
3772
               of SUBREG in rtl.texi.  */
3773
            for (i = 0; i < max_bitsize / 32; i++)
3774
              tmp[i] = 0;
3775
            for (i = 0; i < elem_bitsize; i += value_bit)
3776
              {
3777
                int ibase;
3778
                if (WORDS_BIG_ENDIAN)
3779
                  ibase = elem_bitsize - 1 - i;
3780
                else
3781
                  ibase = i;
3782
                tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3783
              }
3784
 
3785
            real_from_target (&r, tmp, outer_submode);
3786
            elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3787
          }
3788
          break;
3789
 
3790
        default:
3791
          gcc_unreachable ();
3792
        }
3793
    }
3794
  if (VECTOR_MODE_P (outermode))
3795
    return gen_rtx_CONST_VECTOR (outermode, result_v);
3796
  else
3797
    return result_s;
3798
}
3799
 
3800
/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3801
   Return 0 if no simplifications are possible.  */
3802
rtx
3803
simplify_subreg (enum machine_mode outermode, rtx op,
3804
                 enum machine_mode innermode, unsigned int byte)
3805
{
3806
  /* Little bit of sanity checking.  */
3807
  gcc_assert (innermode != VOIDmode);
3808
  gcc_assert (outermode != VOIDmode);
3809
  gcc_assert (innermode != BLKmode);
3810
  gcc_assert (outermode != BLKmode);
3811
 
3812
  gcc_assert (GET_MODE (op) == innermode
3813
              || GET_MODE (op) == VOIDmode);
3814
 
3815
  gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3816
  gcc_assert (byte < GET_MODE_SIZE (innermode));
3817
 
3818
  if (outermode == innermode && !byte)
3819
    return op;
3820
 
3821
  if (GET_CODE (op) == CONST_INT
3822
      || GET_CODE (op) == CONST_DOUBLE
3823
      || GET_CODE (op) == CONST_VECTOR)
3824
    return simplify_immed_subreg (outermode, op, innermode, byte);
3825
 
3826
  /* Changing mode twice with SUBREG => just change it once,
3827
     or not at all if changing back op starting mode.  */
3828
  if (GET_CODE (op) == SUBREG)
3829
    {
3830
      enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3831
      int final_offset = byte + SUBREG_BYTE (op);
3832
      rtx newx;
3833
 
3834
      if (outermode == innermostmode
3835
          && byte == 0 && SUBREG_BYTE (op) == 0)
3836
        return SUBREG_REG (op);
3837
 
3838
      /* The SUBREG_BYTE represents offset, as if the value were stored
3839
         in memory.  Irritating exception is paradoxical subreg, where
3840
         we define SUBREG_BYTE to be 0.  On big endian machines, this
3841
         value should be negative.  For a moment, undo this exception.  */
3842
      if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3843
        {
3844
          int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3845
          if (WORDS_BIG_ENDIAN)
3846
            final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3847
          if (BYTES_BIG_ENDIAN)
3848
            final_offset += difference % UNITS_PER_WORD;
3849
        }
3850
      if (SUBREG_BYTE (op) == 0
3851
          && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3852
        {
3853
          int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3854
          if (WORDS_BIG_ENDIAN)
3855
            final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3856
          if (BYTES_BIG_ENDIAN)
3857
            final_offset += difference % UNITS_PER_WORD;
3858
        }
3859
 
3860
      /* See whether resulting subreg will be paradoxical.  */
3861
      if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3862
        {
3863
          /* In nonparadoxical subregs we can't handle negative offsets.  */
3864
          if (final_offset < 0)
3865
            return NULL_RTX;
3866
          /* Bail out in case resulting subreg would be incorrect.  */
3867
          if (final_offset % GET_MODE_SIZE (outermode)
3868
              || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3869
            return NULL_RTX;
3870
        }
3871
      else
3872
        {
3873
          int offset = 0;
3874
          int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3875
 
3876
          /* In paradoxical subreg, see if we are still looking on lower part.
3877
             If so, our SUBREG_BYTE will be 0.  */
3878
          if (WORDS_BIG_ENDIAN)
3879
            offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3880
          if (BYTES_BIG_ENDIAN)
3881
            offset += difference % UNITS_PER_WORD;
3882
          if (offset == final_offset)
3883
            final_offset = 0;
3884
          else
3885
            return NULL_RTX;
3886
        }
3887
 
3888
      /* Recurse for further possible simplifications.  */
3889
      newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3890
                              final_offset);
3891
      if (newx)
3892
        return newx;
3893
      if (validate_subreg (outermode, innermostmode,
3894
                           SUBREG_REG (op), final_offset))
3895
        return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3896
      return NULL_RTX;
3897
    }
3898
 
3899
  /* SUBREG of a hard register => just change the register number
3900
     and/or mode.  If the hard register is not valid in that mode,
3901
     suppress this simplification.  If the hard register is the stack,
3902
     frame, or argument pointer, leave this as a SUBREG.  */
3903
 
3904
  if (REG_P (op)
3905
      && REGNO (op) < FIRST_PSEUDO_REGISTER
3906
#ifdef CANNOT_CHANGE_MODE_CLASS
3907
      && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3908
            && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3909
            && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3910
#endif
3911
      && ((reload_completed && !frame_pointer_needed)
3912
          || (REGNO (op) != FRAME_POINTER_REGNUM
3913
#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3914
              && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3915
#endif
3916
             ))
3917
#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3918
      && REGNO (op) != ARG_POINTER_REGNUM
3919
#endif
3920
      && REGNO (op) != STACK_POINTER_REGNUM
3921
      && subreg_offset_representable_p (REGNO (op), innermode,
3922
                                        byte, outermode))
3923
    {
3924
      unsigned int regno = REGNO (op);
3925
      unsigned int final_regno
3926
        = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3927
 
3928
      /* ??? We do allow it if the current REG is not valid for
3929
         its mode.  This is a kludge to work around how float/complex
3930
         arguments are passed on 32-bit SPARC and should be fixed.  */
3931
      if (HARD_REGNO_MODE_OK (final_regno, outermode)
3932
          || ! HARD_REGNO_MODE_OK (regno, innermode))
3933
        {
3934
          rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3935
 
3936
          /* Propagate original regno.  We don't have any way to specify
3937
             the offset inside original regno, so do so only for lowpart.
3938
             The information is used only by alias analysis that can not
3939
             grog partial register anyway.  */
3940
 
3941
          if (subreg_lowpart_offset (outermode, innermode) == byte)
3942
            ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3943
          return x;
3944
        }
3945
    }
3946
 
3947
  /* If we have a SUBREG of a register that we are replacing and we are
3948
     replacing it with a MEM, make a new MEM and try replacing the
3949
     SUBREG with it.  Don't do this if the MEM has a mode-dependent address
3950
     or if we would be widening it.  */
3951
 
3952
  if (MEM_P (op)
3953
      && ! mode_dependent_address_p (XEXP (op, 0))
3954
      /* Allow splitting of volatile memory references in case we don't
3955
         have instruction to move the whole thing.  */
3956
      && (! MEM_VOLATILE_P (op)
3957
          || ! have_insn_for (SET, innermode))
3958
      && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3959
    return adjust_address_nv (op, outermode, byte);
3960
 
3961
  /* Handle complex values represented as CONCAT
3962
     of real and imaginary part.  */
3963
  if (GET_CODE (op) == CONCAT)
3964
    {
3965
      unsigned int inner_size, final_offset;
3966
      rtx part, res;
3967
 
3968
      inner_size = GET_MODE_UNIT_SIZE (innermode);
3969
      part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3970
      final_offset = byte % inner_size;
3971
      if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3972
        return NULL_RTX;
3973
 
3974
      res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3975
      if (res)
3976
        return res;
3977
      if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3978
        return gen_rtx_SUBREG (outermode, part, final_offset);
3979
      return NULL_RTX;
3980
    }
3981
 
3982
  /* Optimize SUBREG truncations of zero and sign extended values.  */
3983
  if ((GET_CODE (op) == ZERO_EXTEND
3984
       || GET_CODE (op) == SIGN_EXTEND)
3985
      && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3986
    {
3987
      unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3988
 
3989
      /* If we're requesting the lowpart of a zero or sign extension,
3990
         there are three possibilities.  If the outermode is the same
3991
         as the origmode, we can omit both the extension and the subreg.
3992
         If the outermode is not larger than the origmode, we can apply
3993
         the truncation without the extension.  Finally, if the outermode
3994
         is larger than the origmode, but both are integer modes, we
3995
         can just extend to the appropriate mode.  */
3996
      if (bitpos == 0)
3997
        {
3998
          enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3999
          if (outermode == origmode)
4000
            return XEXP (op, 0);
4001
          if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4002
            return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4003
                                        subreg_lowpart_offset (outermode,
4004
                                                               origmode));
4005
          if (SCALAR_INT_MODE_P (outermode))
4006
            return simplify_gen_unary (GET_CODE (op), outermode,
4007
                                       XEXP (op, 0), origmode);
4008
        }
4009
 
4010
      /* A SUBREG resulting from a zero extension may fold to zero if
4011
         it extracts higher bits that the ZERO_EXTEND's source bits.  */
4012
      if (GET_CODE (op) == ZERO_EXTEND
4013
          && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4014
        return CONST0_RTX (outermode);
4015
    }
4016
 
4017
  /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4018
     to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4019
     the outer subreg is effectively a truncation to the original mode.  */
4020
  if ((GET_CODE (op) == LSHIFTRT
4021
       || GET_CODE (op) == ASHIFTRT)
4022
      && SCALAR_INT_MODE_P (outermode)
4023
      /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4024
         to avoid the possibility that an outer LSHIFTRT shifts by more
4025
         than the sign extension's sign_bit_copies and introduces zeros
4026
         into the high bits of the result.  */
4027
      && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4028
      && GET_CODE (XEXP (op, 1)) == CONST_INT
4029
      && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4030
      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4031
      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4032
      && subreg_lsb_1 (outermode, innermode, byte) == 0)
4033
    return simplify_gen_binary (ASHIFTRT, outermode,
4034
                                XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4035
 
4036
  /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4037
     to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4038
     the outer subreg is effectively a truncation to the original mode.  */
4039
  if ((GET_CODE (op) == LSHIFTRT
4040
       || GET_CODE (op) == ASHIFTRT)
4041
      && SCALAR_INT_MODE_P (outermode)
4042
      && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4043
      && GET_CODE (XEXP (op, 1)) == CONST_INT
4044
      && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4045
      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4046
      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4047
      && subreg_lsb_1 (outermode, innermode, byte) == 0)
4048
    return simplify_gen_binary (LSHIFTRT, outermode,
4049
                                XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4050
 
4051
  /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4052
     to (ashift:QI (x:QI) C), where C is a suitable small constant and
4053
     the outer subreg is effectively a truncation to the original mode.  */
4054
  if (GET_CODE (op) == ASHIFT
4055
      && SCALAR_INT_MODE_P (outermode)
4056
      && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4057
      && GET_CODE (XEXP (op, 1)) == CONST_INT
4058
      && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4059
          || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4060
      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4061
      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4062
      && subreg_lsb_1 (outermode, innermode, byte) == 0)
4063
    return simplify_gen_binary (ASHIFT, outermode,
4064
                                XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4065
 
4066
  return NULL_RTX;
4067
}
4068
 
4069
/* Make a SUBREG operation or equivalent if it folds.  */
4070
 
4071
rtx
4072
simplify_gen_subreg (enum machine_mode outermode, rtx op,
4073
                     enum machine_mode innermode, unsigned int byte)
4074
{
4075
  rtx newx;
4076
 
4077
  newx = simplify_subreg (outermode, op, innermode, byte);
4078
  if (newx)
4079
    return newx;
4080
 
4081
  if (GET_CODE (op) == SUBREG
4082
      || GET_CODE (op) == CONCAT
4083
      || GET_MODE (op) == VOIDmode)
4084
    return NULL_RTX;
4085
 
4086
  if (validate_subreg (outermode, innermode, op, byte))
4087
    return gen_rtx_SUBREG (outermode, op, byte);
4088
 
4089
  return NULL_RTX;
4090
}
4091
 
4092
/* Simplify X, an rtx expression.
4093
 
4094
   Return the simplified expression or NULL if no simplifications
4095
   were possible.
4096
 
4097
   This is the preferred entry point into the simplification routines;
4098
   however, we still allow passes to call the more specific routines.
4099
 
4100
   Right now GCC has three (yes, three) major bodies of RTL simplification
4101
   code that need to be unified.
4102
 
4103
        1. fold_rtx in cse.c.  This code uses various CSE specific
4104
           information to aid in RTL simplification.
4105
 
4106
        2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
4107
           it uses combine specific information to aid in RTL
4108
           simplification.
4109
 
4110
        3. The routines in this file.
4111
 
4112
 
4113
   Long term we want to only have one body of simplification code; to
4114
   get to that state I recommend the following steps:
4115
 
4116
        1. Pour over fold_rtx & simplify_rtx and move any simplifications
4117
           which are not pass dependent state into these routines.
4118
 
4119
        2. As code is moved by #1, change fold_rtx & simplify_rtx to
4120
           use this routine whenever possible.
4121
 
4122
        3. Allow for pass dependent state to be provided to these
4123
           routines and add simplifications based on the pass dependent
4124
           state.  Remove code from cse.c & combine.c that becomes
4125
           redundant/dead.
4126
 
4127
    It will take time, but ultimately the compiler will be easier to
4128
    maintain and improve.  It's totally silly that when we add a
4129
    simplification that it needs to be added to 4 places (3 for RTL
4130
    simplification and 1 for tree simplification.  */
4131
 
4132
rtx
4133
simplify_rtx (rtx x)
4134
{
4135
  enum rtx_code code = GET_CODE (x);
4136
  enum machine_mode mode = GET_MODE (x);
4137
 
4138
  switch (GET_RTX_CLASS (code))
4139
    {
4140
    case RTX_UNARY:
4141
      return simplify_unary_operation (code, mode,
4142
                                       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4143
    case RTX_COMM_ARITH:
4144
      if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4145
        return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4146
 
4147
      /* Fall through....  */
4148
 
4149
    case RTX_BIN_ARITH:
4150
      return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4151
 
4152
    case RTX_TERNARY:
4153
    case RTX_BITFIELD_OPS:
4154
      return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4155
                                         XEXP (x, 0), XEXP (x, 1),
4156
                                         XEXP (x, 2));
4157
 
4158
    case RTX_COMPARE:
4159
    case RTX_COMM_COMPARE:
4160
      return simplify_relational_operation (code, mode,
4161
                                            ((GET_MODE (XEXP (x, 0))
4162
                                             != VOIDmode)
4163
                                            ? GET_MODE (XEXP (x, 0))
4164
                                            : GET_MODE (XEXP (x, 1))),
4165
                                            XEXP (x, 0),
4166
                                            XEXP (x, 1));
4167
 
4168
    case RTX_EXTRA:
4169
      if (code == SUBREG)
4170
        return simplify_gen_subreg (mode, SUBREG_REG (x),
4171
                                    GET_MODE (SUBREG_REG (x)),
4172
                                    SUBREG_BYTE (x));
4173
      break;
4174
 
4175
    case RTX_OBJ:
4176
      if (code == LO_SUM)
4177
        {
4178
          /* Convert (lo_sum (high FOO) FOO) to FOO.  */
4179
          if (GET_CODE (XEXP (x, 0)) == HIGH
4180
              && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4181
          return XEXP (x, 1);
4182
        }
4183
      break;
4184
 
4185
    default:
4186
      break;
4187
    }
4188
  return NULL;
4189
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.