OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [tree-ssa-loop-niter.c] - Blame information for rev 849

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Functions to determine/estimate number of iterations of a loop.
2
   Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010
3
   Free Software Foundation, Inc.
4
 
5
This file is part of GCC.
6
 
7
GCC is free software; you can redistribute it and/or modify it
8
under the terms of the GNU General Public License as published by the
9
Free Software Foundation; either version 3, or (at your option) any
10
later version.
11
 
12
GCC is distributed in the hope that it will be useful, but WITHOUT
13
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15
for more details.
16
 
17
You should have received a copy of the GNU General Public License
18
along with GCC; see the file COPYING3.  If not see
19
<http://www.gnu.org/licenses/>.  */
20
 
21
#include "config.h"
22
#include "system.h"
23
#include "coretypes.h"
24
#include "tm.h"
25
#include "tree.h"
26
#include "tm_p.h"
27
#include "basic-block.h"
28
#include "output.h"
29
#include "tree-pretty-print.h"
30
#include "gimple-pretty-print.h"
31
#include "intl.h"
32
#include "tree-flow.h"
33
#include "tree-dump.h"
34
#include "cfgloop.h"
35
#include "tree-pass.h"
36
#include "ggc.h"
37
#include "tree-chrec.h"
38
#include "tree-scalar-evolution.h"
39
#include "tree-data-ref.h"
40
#include "params.h"
41
#include "flags.h"
42
#include "diagnostic-core.h"
43
#include "tree-inline.h"
44
#include "gmp.h"
45
 
46
#define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
47
 
48
/* The maximum number of dominator BBs we search for conditions
49
   of loop header copies we use for simplifying a conditional
50
   expression.  */
51
#define MAX_DOMINATORS_TO_WALK 8
52
 
53
/*
54
 
55
   Analysis of number of iterations of an affine exit test.
56
 
57
*/
58
 
59
/* Bounds on some value, BELOW <= X <= UP.  */
60
 
61
typedef struct
62
{
63
  mpz_t below, up;
64
} bounds;
65
 
66
 
67
/* Splits expression EXPR to a variable part VAR and constant OFFSET.  */
68
 
69
static void
70
split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
71
{
72
  tree type = TREE_TYPE (expr);
73
  tree op0, op1;
74
  double_int off;
75
  bool negate = false;
76
 
77
  *var = expr;
78
  mpz_set_ui (offset, 0);
79
 
80
  switch (TREE_CODE (expr))
81
    {
82
    case MINUS_EXPR:
83
      negate = true;
84
      /* Fallthru.  */
85
 
86
    case PLUS_EXPR:
87
    case POINTER_PLUS_EXPR:
88
      op0 = TREE_OPERAND (expr, 0);
89
      op1 = TREE_OPERAND (expr, 1);
90
 
91
      if (TREE_CODE (op1) != INTEGER_CST)
92
        break;
93
 
94
      *var = op0;
95
      /* Always sign extend the offset.  */
96
      off = tree_to_double_int (op1);
97
      off = double_int_sext (off, TYPE_PRECISION (type));
98
      mpz_set_double_int (offset, off, false);
99
      if (negate)
100
        mpz_neg (offset, offset);
101
      break;
102
 
103
    case INTEGER_CST:
104
      *var = build_int_cst_type (type, 0);
105
      off = tree_to_double_int (expr);
106
      mpz_set_double_int (offset, off, TYPE_UNSIGNED (type));
107
      break;
108
 
109
    default:
110
      break;
111
    }
112
}
113
 
114
/* Stores estimate on the minimum/maximum value of the expression VAR + OFF
115
   in TYPE to MIN and MAX.  */
116
 
117
static void
118
determine_value_range (tree type, tree var, mpz_t off,
119
                       mpz_t min, mpz_t max)
120
{
121
  /* If the expression is a constant, we know its value exactly.  */
122
  if (integer_zerop (var))
123
    {
124
      mpz_set (min, off);
125
      mpz_set (max, off);
126
      return;
127
    }
128
 
129
  /* If the computation may wrap, we know nothing about the value, except for
130
     the range of the type.  */
131
  get_type_static_bounds (type, min, max);
132
  if (!nowrap_type_p (type))
133
    return;
134
 
135
  /* Since the addition of OFF does not wrap, if OFF is positive, then we may
136
     add it to MIN, otherwise to MAX.  */
137
  if (mpz_sgn (off) < 0)
138
    mpz_add (max, max, off);
139
  else
140
    mpz_add (min, min, off);
141
}
142
 
143
/* Stores the bounds on the difference of the values of the expressions
144
   (var + X) and (var + Y), computed in TYPE, to BNDS.  */
145
 
146
static void
147
bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
148
                                    bounds *bnds)
149
{
150
  int rel = mpz_cmp (x, y);
151
  bool may_wrap = !nowrap_type_p (type);
152
  mpz_t m;
153
 
154
  /* If X == Y, then the expressions are always equal.
155
     If X > Y, there are the following possibilities:
156
       a) neither of var + X and var + Y overflow or underflow, or both of
157
          them do.  Then their difference is X - Y.
158
       b) var + X overflows, and var + Y does not.  Then the values of the
159
          expressions are var + X - M and var + Y, where M is the range of
160
          the type, and their difference is X - Y - M.
161
       c) var + Y underflows and var + X does not.  Their difference again
162
          is M - X + Y.
163
       Therefore, if the arithmetics in type does not overflow, then the
164
       bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
165
     Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
166
     (X - Y, X - Y + M).  */
167
 
168
  if (rel == 0)
169
    {
170
      mpz_set_ui (bnds->below, 0);
171
      mpz_set_ui (bnds->up, 0);
172
      return;
173
    }
174
 
175
  mpz_init (m);
176
  mpz_set_double_int (m, double_int_mask (TYPE_PRECISION (type)), true);
177
  mpz_add_ui (m, m, 1);
178
  mpz_sub (bnds->up, x, y);
179
  mpz_set (bnds->below, bnds->up);
180
 
181
  if (may_wrap)
182
    {
183
      if (rel > 0)
184
        mpz_sub (bnds->below, bnds->below, m);
185
      else
186
        mpz_add (bnds->up, bnds->up, m);
187
    }
188
 
189
  mpz_clear (m);
190
}
191
 
192
/* From condition C0 CMP C1 derives information regarding the
193
   difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
194
   and stores it to BNDS.  */
195
 
196
static void
197
refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
198
                           tree vary, mpz_t offy,
199
                           tree c0, enum tree_code cmp, tree c1,
200
                           bounds *bnds)
201
{
202
  tree varc0, varc1, tmp, ctype;
203
  mpz_t offc0, offc1, loffx, loffy, bnd;
204
  bool lbound = false;
205
  bool no_wrap = nowrap_type_p (type);
206
  bool x_ok, y_ok;
207
 
208
  switch (cmp)
209
    {
210
    case LT_EXPR:
211
    case LE_EXPR:
212
    case GT_EXPR:
213
    case GE_EXPR:
214
      STRIP_SIGN_NOPS (c0);
215
      STRIP_SIGN_NOPS (c1);
216
      ctype = TREE_TYPE (c0);
217
      if (!useless_type_conversion_p (ctype, type))
218
        return;
219
 
220
      break;
221
 
222
    case EQ_EXPR:
223
      /* We could derive quite precise information from EQ_EXPR, however, such
224
         a guard is unlikely to appear, so we do not bother with handling
225
         it.  */
226
      return;
227
 
228
    case NE_EXPR:
229
      /* NE_EXPR comparisons do not contain much of useful information, except for
230
         special case of comparing with the bounds of the type.  */
231
      if (TREE_CODE (c1) != INTEGER_CST
232
          || !INTEGRAL_TYPE_P (type))
233
        return;
234
 
235
      /* Ensure that the condition speaks about an expression in the same type
236
         as X and Y.  */
237
      ctype = TREE_TYPE (c0);
238
      if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
239
        return;
240
      c0 = fold_convert (type, c0);
241
      c1 = fold_convert (type, c1);
242
 
243
      if (TYPE_MIN_VALUE (type)
244
          && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
245
        {
246
          cmp = GT_EXPR;
247
          break;
248
        }
249
      if (TYPE_MAX_VALUE (type)
250
          && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
251
        {
252
          cmp = LT_EXPR;
253
          break;
254
        }
255
 
256
      return;
257
    default:
258
      return;
259
    }
260
 
261
  mpz_init (offc0);
262
  mpz_init (offc1);
263
  split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
264
  split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
265
 
266
  /* We are only interested in comparisons of expressions based on VARX and
267
     VARY.  TODO -- we might also be able to derive some bounds from
268
     expressions containing just one of the variables.  */
269
 
270
  if (operand_equal_p (varx, varc1, 0))
271
    {
272
      tmp = varc0; varc0 = varc1; varc1 = tmp;
273
      mpz_swap (offc0, offc1);
274
      cmp = swap_tree_comparison (cmp);
275
    }
276
 
277
  if (!operand_equal_p (varx, varc0, 0)
278
      || !operand_equal_p (vary, varc1, 0))
279
    goto end;
280
 
281
  mpz_init_set (loffx, offx);
282
  mpz_init_set (loffy, offy);
283
 
284
  if (cmp == GT_EXPR || cmp == GE_EXPR)
285
    {
286
      tmp = varx; varx = vary; vary = tmp;
287
      mpz_swap (offc0, offc1);
288
      mpz_swap (loffx, loffy);
289
      cmp = swap_tree_comparison (cmp);
290
      lbound = true;
291
    }
292
 
293
  /* If there is no overflow, the condition implies that
294
 
295
     (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
296
 
297
     The overflows and underflows may complicate things a bit; each
298
     overflow decreases the appropriate offset by M, and underflow
299
     increases it by M.  The above inequality would not necessarily be
300
     true if
301
 
302
     -- VARX + OFFX underflows and VARX + OFFC0 does not, or
303
        VARX + OFFC0 overflows, but VARX + OFFX does not.
304
        This may only happen if OFFX < OFFC0.
305
     -- VARY + OFFY overflows and VARY + OFFC1 does not, or
306
        VARY + OFFC1 underflows and VARY + OFFY does not.
307
        This may only happen if OFFY > OFFC1.  */
308
 
309
  if (no_wrap)
310
    {
311
      x_ok = true;
312
      y_ok = true;
313
    }
314
  else
315
    {
316
      x_ok = (integer_zerop (varx)
317
              || mpz_cmp (loffx, offc0) >= 0);
318
      y_ok = (integer_zerop (vary)
319
              || mpz_cmp (loffy, offc1) <= 0);
320
    }
321
 
322
  if (x_ok && y_ok)
323
    {
324
      mpz_init (bnd);
325
      mpz_sub (bnd, loffx, loffy);
326
      mpz_add (bnd, bnd, offc1);
327
      mpz_sub (bnd, bnd, offc0);
328
 
329
      if (cmp == LT_EXPR)
330
        mpz_sub_ui (bnd, bnd, 1);
331
 
332
      if (lbound)
333
        {
334
          mpz_neg (bnd, bnd);
335
          if (mpz_cmp (bnds->below, bnd) < 0)
336
            mpz_set (bnds->below, bnd);
337
        }
338
      else
339
        {
340
          if (mpz_cmp (bnd, bnds->up) < 0)
341
            mpz_set (bnds->up, bnd);
342
        }
343
      mpz_clear (bnd);
344
    }
345
 
346
  mpz_clear (loffx);
347
  mpz_clear (loffy);
348
end:
349
  mpz_clear (offc0);
350
  mpz_clear (offc1);
351
}
352
 
353
/* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
354
   The subtraction is considered to be performed in arbitrary precision,
355
   without overflows.
356
 
357
   We do not attempt to be too clever regarding the value ranges of X and
358
   Y; most of the time, they are just integers or ssa names offsetted by
359
   integer.  However, we try to use the information contained in the
360
   comparisons before the loop (usually created by loop header copying).  */
361
 
362
static void
363
bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
364
{
365
  tree type = TREE_TYPE (x);
366
  tree varx, vary;
367
  mpz_t offx, offy;
368
  mpz_t minx, maxx, miny, maxy;
369
  int cnt = 0;
370
  edge e;
371
  basic_block bb;
372
  tree c0, c1;
373
  gimple cond;
374
  enum tree_code cmp;
375
 
376
  /* Get rid of unnecessary casts, but preserve the value of
377
     the expressions.  */
378
  STRIP_SIGN_NOPS (x);
379
  STRIP_SIGN_NOPS (y);
380
 
381
  mpz_init (bnds->below);
382
  mpz_init (bnds->up);
383
  mpz_init (offx);
384
  mpz_init (offy);
385
  split_to_var_and_offset (x, &varx, offx);
386
  split_to_var_and_offset (y, &vary, offy);
387
 
388
  if (!integer_zerop (varx)
389
      && operand_equal_p (varx, vary, 0))
390
    {
391
      /* Special case VARX == VARY -- we just need to compare the
392
         offsets.  The matters are a bit more complicated in the
393
         case addition of offsets may wrap.  */
394
      bound_difference_of_offsetted_base (type, offx, offy, bnds);
395
    }
396
  else
397
    {
398
      /* Otherwise, use the value ranges to determine the initial
399
         estimates on below and up.  */
400
      mpz_init (minx);
401
      mpz_init (maxx);
402
      mpz_init (miny);
403
      mpz_init (maxy);
404
      determine_value_range (type, varx, offx, minx, maxx);
405
      determine_value_range (type, vary, offy, miny, maxy);
406
 
407
      mpz_sub (bnds->below, minx, maxy);
408
      mpz_sub (bnds->up, maxx, miny);
409
      mpz_clear (minx);
410
      mpz_clear (maxx);
411
      mpz_clear (miny);
412
      mpz_clear (maxy);
413
    }
414
 
415
  /* If both X and Y are constants, we cannot get any more precise.  */
416
  if (integer_zerop (varx) && integer_zerop (vary))
417
    goto end;
418
 
419
  /* Now walk the dominators of the loop header and use the entry
420
     guards to refine the estimates.  */
421
  for (bb = loop->header;
422
       bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
423
       bb = get_immediate_dominator (CDI_DOMINATORS, bb))
424
    {
425
      if (!single_pred_p (bb))
426
        continue;
427
      e = single_pred_edge (bb);
428
 
429
      if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
430
        continue;
431
 
432
      cond = last_stmt (e->src);
433
      c0 = gimple_cond_lhs (cond);
434
      cmp = gimple_cond_code (cond);
435
      c1 = gimple_cond_rhs (cond);
436
 
437
      if (e->flags & EDGE_FALSE_VALUE)
438
        cmp = invert_tree_comparison (cmp, false);
439
 
440
      refine_bounds_using_guard (type, varx, offx, vary, offy,
441
                                 c0, cmp, c1, bnds);
442
      ++cnt;
443
    }
444
 
445
end:
446
  mpz_clear (offx);
447
  mpz_clear (offy);
448
}
449
 
450
/* Update the bounds in BNDS that restrict the value of X to the bounds
451
   that restrict the value of X + DELTA.  X can be obtained as a
452
   difference of two values in TYPE.  */
453
 
454
static void
455
bounds_add (bounds *bnds, double_int delta, tree type)
456
{
457
  mpz_t mdelta, max;
458
 
459
  mpz_init (mdelta);
460
  mpz_set_double_int (mdelta, delta, false);
461
 
462
  mpz_init (max);
463
  mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);
464
 
465
  mpz_add (bnds->up, bnds->up, mdelta);
466
  mpz_add (bnds->below, bnds->below, mdelta);
467
 
468
  if (mpz_cmp (bnds->up, max) > 0)
469
    mpz_set (bnds->up, max);
470
 
471
  mpz_neg (max, max);
472
  if (mpz_cmp (bnds->below, max) < 0)
473
    mpz_set (bnds->below, max);
474
 
475
  mpz_clear (mdelta);
476
  mpz_clear (max);
477
}
478
 
479
/* Update the bounds in BNDS that restrict the value of X to the bounds
480
   that restrict the value of -X.  */
481
 
482
static void
483
bounds_negate (bounds *bnds)
484
{
485
  mpz_t tmp;
486
 
487
  mpz_init_set (tmp, bnds->up);
488
  mpz_neg (bnds->up, bnds->below);
489
  mpz_neg (bnds->below, tmp);
490
  mpz_clear (tmp);
491
}
492
 
493
/* Returns inverse of X modulo 2^s, where MASK = 2^s-1.  */
494
 
495
static tree
496
inverse (tree x, tree mask)
497
{
498
  tree type = TREE_TYPE (x);
499
  tree rslt;
500
  unsigned ctr = tree_floor_log2 (mask);
501
 
502
  if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
503
    {
504
      unsigned HOST_WIDE_INT ix;
505
      unsigned HOST_WIDE_INT imask;
506
      unsigned HOST_WIDE_INT irslt = 1;
507
 
508
      gcc_assert (cst_and_fits_in_hwi (x));
509
      gcc_assert (cst_and_fits_in_hwi (mask));
510
 
511
      ix = int_cst_value (x);
512
      imask = int_cst_value (mask);
513
 
514
      for (; ctr; ctr--)
515
        {
516
          irslt *= ix;
517
          ix *= ix;
518
        }
519
      irslt &= imask;
520
 
521
      rslt = build_int_cst_type (type, irslt);
522
    }
523
  else
524
    {
525
      rslt = build_int_cst (type, 1);
526
      for (; ctr; ctr--)
527
        {
528
          rslt = int_const_binop (MULT_EXPR, rslt, x);
529
          x = int_const_binop (MULT_EXPR, x, x);
530
        }
531
      rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
532
    }
533
 
534
  return rslt;
535
}
536
 
537
/* Derives the upper bound BND on the number of executions of loop with exit
538
   condition S * i <> C.  If NO_OVERFLOW is true, then the control variable of
539
   the loop does not overflow.  EXIT_MUST_BE_TAKEN is true if we are guaranteed
540
   that the loop ends through this exit, i.e., the induction variable ever
541
   reaches the value of C.
542
 
543
   The value C is equal to final - base, where final and base are the final and
544
   initial value of the actual induction variable in the analysed loop.  BNDS
545
   bounds the value of this difference when computed in signed type with
546
   unbounded range, while the computation of C is performed in an unsigned
547
   type with the range matching the range of the type of the induction variable.
548
   In particular, BNDS.up contains an upper bound on C in the following cases:
549
   -- if the iv must reach its final value without overflow, i.e., if
550
      NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
551
   -- if final >= base, which we know to hold when BNDS.below >= 0.  */
552
 
553
static void
554
number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
555
                             bounds *bnds, bool exit_must_be_taken)
556
{
557
  double_int max;
558
  mpz_t d;
559
  bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
560
                       || mpz_sgn (bnds->below) >= 0);
561
 
562
  if (multiple_of_p (TREE_TYPE (c), c, s))
563
    {
564
      /* If C is an exact multiple of S, then its value will be reached before
565
         the induction variable overflows (unless the loop is exited in some
566
         other way before).  Note that the actual induction variable in the
567
         loop (which ranges from base to final instead of from 0 to C) may
568
         overflow, in which case BNDS.up will not be giving a correct upper
569
         bound on C; thus, BNDS_U_VALID had to be computed in advance.  */
570
      no_overflow = true;
571
      exit_must_be_taken = true;
572
    }
573
 
574
  /* If the induction variable can overflow, the number of iterations is at
575
     most the period of the control variable (or infinite, but in that case
576
     the whole # of iterations analysis will fail).  */
577
  if (!no_overflow)
578
    {
579
      max = double_int_mask (TYPE_PRECISION (TREE_TYPE (c))
580
                             - tree_low_cst (num_ending_zeros (s), 1));
581
      mpz_set_double_int (bnd, max, true);
582
      return;
583
    }
584
 
585
  /* Now we know that the induction variable does not overflow, so the loop
586
     iterates at most (range of type / S) times.  */
587
  mpz_set_double_int (bnd, double_int_mask (TYPE_PRECISION (TREE_TYPE (c))),
588
                      true);
589
 
590
  /* If the induction variable is guaranteed to reach the value of C before
591
     overflow, ... */
592
  if (exit_must_be_taken)
593
    {
594
      /* ... then we can strenghten this to C / S, and possibly we can use
595
         the upper bound on C given by BNDS.  */
596
      if (TREE_CODE (c) == INTEGER_CST)
597
        mpz_set_double_int (bnd, tree_to_double_int (c), true);
598
      else if (bnds_u_valid)
599
        mpz_set (bnd, bnds->up);
600
    }
601
 
602
  mpz_init (d);
603
  mpz_set_double_int (d, tree_to_double_int (s), true);
604
  mpz_fdiv_q (bnd, bnd, d);
605
  mpz_clear (d);
606
}
607
 
608
/* Determines number of iterations of loop whose ending condition
609
   is IV <> FINAL.  TYPE is the type of the iv.  The number of
610
   iterations is stored to NITER.  EXIT_MUST_BE_TAKEN is true if
611
   we know that the exit must be taken eventually, i.e., that the IV
612
   ever reaches the value FINAL (we derived this earlier, and possibly set
613
   NITER->assumptions to make sure this is the case).  BNDS contains the
614
   bounds on the difference FINAL - IV->base.  */
615
 
616
static bool
617
number_of_iterations_ne (tree type, affine_iv *iv, tree final,
618
                         struct tree_niter_desc *niter, bool exit_must_be_taken,
619
                         bounds *bnds)
620
{
621
  tree niter_type = unsigned_type_for (type);
622
  tree s, c, d, bits, assumption, tmp, bound;
623
  mpz_t max;
624
 
625
  niter->control = *iv;
626
  niter->bound = final;
627
  niter->cmp = NE_EXPR;
628
 
629
  /* Rearrange the terms so that we get inequality S * i <> C, with S
630
     positive.  Also cast everything to the unsigned type.  If IV does
631
     not overflow, BNDS bounds the value of C.  Also, this is the
632
     case if the computation |FINAL - IV->base| does not overflow, i.e.,
633
     if BNDS->below in the result is nonnegative.  */
634
  if (tree_int_cst_sign_bit (iv->step))
635
    {
636
      s = fold_convert (niter_type,
637
                        fold_build1 (NEGATE_EXPR, type, iv->step));
638
      c = fold_build2 (MINUS_EXPR, niter_type,
639
                       fold_convert (niter_type, iv->base),
640
                       fold_convert (niter_type, final));
641
      bounds_negate (bnds);
642
    }
643
  else
644
    {
645
      s = fold_convert (niter_type, iv->step);
646
      c = fold_build2 (MINUS_EXPR, niter_type,
647
                       fold_convert (niter_type, final),
648
                       fold_convert (niter_type, iv->base));
649
    }
650
 
651
  mpz_init (max);
652
  number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
653
                               exit_must_be_taken);
654
  niter->max = mpz_get_double_int (niter_type, max, false);
655
  mpz_clear (max);
656
 
657
  /* First the trivial cases -- when the step is 1.  */
658
  if (integer_onep (s))
659
    {
660
      niter->niter = c;
661
      return true;
662
    }
663
 
664
  /* Let nsd (step, size of mode) = d.  If d does not divide c, the loop
665
     is infinite.  Otherwise, the number of iterations is
666
     (inverse(s/d) * (c/d)) mod (size of mode/d).  */
667
  bits = num_ending_zeros (s);
668
  bound = build_low_bits_mask (niter_type,
669
                               (TYPE_PRECISION (niter_type)
670
                                - tree_low_cst (bits, 1)));
671
 
672
  d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
673
                               build_int_cst (niter_type, 1), bits);
674
  s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
675
 
676
  if (!exit_must_be_taken)
677
    {
678
      /* If we cannot assume that the exit is taken eventually, record the
679
         assumptions for divisibility of c.  */
680
      assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
681
      assumption = fold_build2 (EQ_EXPR, boolean_type_node,
682
                                assumption, build_int_cst (niter_type, 0));
683
      if (!integer_nonzerop (assumption))
684
        niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
685
                                          niter->assumptions, assumption);
686
    }
687
 
688
  c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
689
  tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
690
  niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
691
  return true;
692
}
693
 
694
/* Checks whether we can determine the final value of the control variable
695
   of the loop with ending condition IV0 < IV1 (computed in TYPE).
696
   DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
697
   of the step.  The assumptions necessary to ensure that the computation
698
   of the final value does not overflow are recorded in NITER.  If we
699
   find the final value, we adjust DELTA and return TRUE.  Otherwise
700
   we return false.  BNDS bounds the value of IV1->base - IV0->base,
701
   and will be updated by the same amount as DELTA.  EXIT_MUST_BE_TAKEN is
702
   true if we know that the exit must be taken eventually.  */
703
 
704
static bool
705
number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
706
                               struct tree_niter_desc *niter,
707
                               tree *delta, tree step,
708
                               bool exit_must_be_taken, bounds *bnds)
709
{
710
  tree niter_type = TREE_TYPE (step);
711
  tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
712
  tree tmod;
713
  mpz_t mmod;
714
  tree assumption = boolean_true_node, bound, noloop;
715
  bool ret = false, fv_comp_no_overflow;
716
  tree type1 = type;
717
  if (POINTER_TYPE_P (type))
718
    type1 = sizetype;
719
 
720
  if (TREE_CODE (mod) != INTEGER_CST)
721
    return false;
722
  if (integer_nonzerop (mod))
723
    mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
724
  tmod = fold_convert (type1, mod);
725
 
726
  mpz_init (mmod);
727
  mpz_set_double_int (mmod, tree_to_double_int (mod), true);
728
  mpz_neg (mmod, mmod);
729
 
730
  /* If the induction variable does not overflow and the exit is taken,
731
     then the computation of the final value does not overflow.  This is
732
     also obviously the case if the new final value is equal to the
733
     current one.  Finally, we postulate this for pointer type variables,
734
     as the code cannot rely on the object to that the pointer points being
735
     placed at the end of the address space (and more pragmatically,
736
     TYPE_{MIN,MAX}_VALUE is not defined for pointers).  */
737
  if (integer_zerop (mod) || POINTER_TYPE_P (type))
738
    fv_comp_no_overflow = true;
739
  else if (!exit_must_be_taken)
740
    fv_comp_no_overflow = false;
741
  else
742
    fv_comp_no_overflow =
743
            (iv0->no_overflow && integer_nonzerop (iv0->step))
744
            || (iv1->no_overflow && integer_nonzerop (iv1->step));
745
 
746
  if (integer_nonzerop (iv0->step))
747
    {
748
      /* The final value of the iv is iv1->base + MOD, assuming that this
749
         computation does not overflow, and that
750
         iv0->base <= iv1->base + MOD.  */
751
      if (!fv_comp_no_overflow)
752
        {
753
          bound = fold_build2 (MINUS_EXPR, type1,
754
                               TYPE_MAX_VALUE (type1), tmod);
755
          assumption = fold_build2 (LE_EXPR, boolean_type_node,
756
                                    iv1->base, bound);
757
          if (integer_zerop (assumption))
758
            goto end;
759
        }
760
      if (mpz_cmp (mmod, bnds->below) < 0)
761
        noloop = boolean_false_node;
762
      else if (POINTER_TYPE_P (type))
763
        noloop = fold_build2 (GT_EXPR, boolean_type_node,
764
                              iv0->base,
765
                              fold_build_pointer_plus (iv1->base, tmod));
766
      else
767
        noloop = fold_build2 (GT_EXPR, boolean_type_node,
768
                              iv0->base,
769
                              fold_build2 (PLUS_EXPR, type1,
770
                                           iv1->base, tmod));
771
    }
772
  else
773
    {
774
      /* The final value of the iv is iv0->base - MOD, assuming that this
775
         computation does not overflow, and that
776
         iv0->base - MOD <= iv1->base. */
777
      if (!fv_comp_no_overflow)
778
        {
779
          bound = fold_build2 (PLUS_EXPR, type1,
780
                               TYPE_MIN_VALUE (type1), tmod);
781
          assumption = fold_build2 (GE_EXPR, boolean_type_node,
782
                                    iv0->base, bound);
783
          if (integer_zerop (assumption))
784
            goto end;
785
        }
786
      if (mpz_cmp (mmod, bnds->below) < 0)
787
        noloop = boolean_false_node;
788
      else if (POINTER_TYPE_P (type))
789
        noloop = fold_build2 (GT_EXPR, boolean_type_node,
790
                              fold_build_pointer_plus (iv0->base,
791
                                                       fold_build1 (NEGATE_EXPR,
792
                                                                    type1, tmod)),
793
                              iv1->base);
794
      else
795
        noloop = fold_build2 (GT_EXPR, boolean_type_node,
796
                              fold_build2 (MINUS_EXPR, type1,
797
                                           iv0->base, tmod),
798
                              iv1->base);
799
    }
800
 
801
  if (!integer_nonzerop (assumption))
802
    niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
803
                                      niter->assumptions,
804
                                      assumption);
805
  if (!integer_zerop (noloop))
806
    niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
807
                                      niter->may_be_zero,
808
                                      noloop);
809
  bounds_add (bnds, tree_to_double_int (mod), type);
810
  *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
811
 
812
  ret = true;
813
end:
814
  mpz_clear (mmod);
815
  return ret;
816
}
817
 
818
/* Add assertions to NITER that ensure that the control variable of the loop
819
   with ending condition IV0 < IV1 does not overflow.  Types of IV0 and IV1
820
   are TYPE.  Returns false if we can prove that there is an overflow, true
821
   otherwise.  STEP is the absolute value of the step.  */
822
 
823
static bool
824
assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
825
                       struct tree_niter_desc *niter, tree step)
826
{
827
  tree bound, d, assumption, diff;
828
  tree niter_type = TREE_TYPE (step);
829
 
830
  if (integer_nonzerop (iv0->step))
831
    {
832
      /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
833
      if (iv0->no_overflow)
834
        return true;
835
 
836
      /* If iv0->base is a constant, we can determine the last value before
837
         overflow precisely; otherwise we conservatively assume
838
         MAX - STEP + 1.  */
839
 
840
      if (TREE_CODE (iv0->base) == INTEGER_CST)
841
        {
842
          d = fold_build2 (MINUS_EXPR, niter_type,
843
                           fold_convert (niter_type, TYPE_MAX_VALUE (type)),
844
                           fold_convert (niter_type, iv0->base));
845
          diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
846
        }
847
      else
848
        diff = fold_build2 (MINUS_EXPR, niter_type, step,
849
                            build_int_cst (niter_type, 1));
850
      bound = fold_build2 (MINUS_EXPR, type,
851
                           TYPE_MAX_VALUE (type), fold_convert (type, diff));
852
      assumption = fold_build2 (LE_EXPR, boolean_type_node,
853
                                iv1->base, bound);
854
    }
855
  else
856
    {
857
      /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
858
      if (iv1->no_overflow)
859
        return true;
860
 
861
      if (TREE_CODE (iv1->base) == INTEGER_CST)
862
        {
863
          d = fold_build2 (MINUS_EXPR, niter_type,
864
                           fold_convert (niter_type, iv1->base),
865
                           fold_convert (niter_type, TYPE_MIN_VALUE (type)));
866
          diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
867
        }
868
      else
869
        diff = fold_build2 (MINUS_EXPR, niter_type, step,
870
                            build_int_cst (niter_type, 1));
871
      bound = fold_build2 (PLUS_EXPR, type,
872
                           TYPE_MIN_VALUE (type), fold_convert (type, diff));
873
      assumption = fold_build2 (GE_EXPR, boolean_type_node,
874
                                iv0->base, bound);
875
    }
876
 
877
  if (integer_zerop (assumption))
878
    return false;
879
  if (!integer_nonzerop (assumption))
880
    niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
881
                                      niter->assumptions, assumption);
882
 
883
  iv0->no_overflow = true;
884
  iv1->no_overflow = true;
885
  return true;
886
}
887
 
888
/* Add an assumption to NITER that a loop whose ending condition
889
   is IV0 < IV1 rolls.  TYPE is the type of the control iv.  BNDS
890
   bounds the value of IV1->base - IV0->base.  */
891
 
892
static void
893
assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
894
                      struct tree_niter_desc *niter, bounds *bnds)
895
{
896
  tree assumption = boolean_true_node, bound, diff;
897
  tree mbz, mbzl, mbzr, type1;
898
  bool rolls_p, no_overflow_p;
899
  double_int dstep;
900
  mpz_t mstep, max;
901
 
902
  /* We are going to compute the number of iterations as
903
     (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
904
     variant of TYPE.  This formula only works if
905
 
906
     -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
907
 
908
     (where MAX is the maximum value of the unsigned variant of TYPE, and
909
     the computations in this formula are performed in full precision,
910
     i.e., without overflows).
911
 
912
     Usually, for loops with exit condition iv0->base + step * i < iv1->base,
913
     we have a condition of the form iv0->base - step < iv1->base before the loop,
914
     and for loops iv0->base < iv1->base - step * i the condition
915
     iv0->base < iv1->base + step, due to loop header copying, which enable us
916
     to prove the lower bound.
917
 
918
     The upper bound is more complicated.  Unless the expressions for initial
919
     and final value themselves contain enough information, we usually cannot
920
     derive it from the context.  */
921
 
922
  /* First check whether the answer does not follow from the bounds we gathered
923
     before.  */
924
  if (integer_nonzerop (iv0->step))
925
    dstep = tree_to_double_int (iv0->step);
926
  else
927
    {
928
      dstep = double_int_sext (tree_to_double_int (iv1->step),
929
                               TYPE_PRECISION (type));
930
      dstep = double_int_neg (dstep);
931
    }
932
 
933
  mpz_init (mstep);
934
  mpz_set_double_int (mstep, dstep, true);
935
  mpz_neg (mstep, mstep);
936
  mpz_add_ui (mstep, mstep, 1);
937
 
938
  rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
939
 
940
  mpz_init (max);
941
  mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);
942
  mpz_add (max, max, mstep);
943
  no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
944
                   /* For pointers, only values lying inside a single object
945
                      can be compared or manipulated by pointer arithmetics.
946
                      Gcc in general does not allow or handle objects larger
947
                      than half of the address space, hence the upper bound
948
                      is satisfied for pointers.  */
949
                   || POINTER_TYPE_P (type));
950
  mpz_clear (mstep);
951
  mpz_clear (max);
952
 
953
  if (rolls_p && no_overflow_p)
954
    return;
955
 
956
  type1 = type;
957
  if (POINTER_TYPE_P (type))
958
    type1 = sizetype;
959
 
960
  /* Now the hard part; we must formulate the assumption(s) as expressions, and
961
     we must be careful not to introduce overflow.  */
962
 
963
  if (integer_nonzerop (iv0->step))
964
    {
965
      diff = fold_build2 (MINUS_EXPR, type1,
966
                          iv0->step, build_int_cst (type1, 1));
967
 
968
      /* We need to know that iv0->base >= MIN + iv0->step - 1.  Since
969
 
970
         pointers.  */
971
      if (!POINTER_TYPE_P (type))
972
        {
973
          bound = fold_build2 (PLUS_EXPR, type1,
974
                               TYPE_MIN_VALUE (type), diff);
975
          assumption = fold_build2 (GE_EXPR, boolean_type_node,
976
                                    iv0->base, bound);
977
        }
978
 
979
      /* And then we can compute iv0->base - diff, and compare it with
980
         iv1->base.  */
981
      mbzl = fold_build2 (MINUS_EXPR, type1,
982
                          fold_convert (type1, iv0->base), diff);
983
      mbzr = fold_convert (type1, iv1->base);
984
    }
985
  else
986
    {
987
      diff = fold_build2 (PLUS_EXPR, type1,
988
                          iv1->step, build_int_cst (type1, 1));
989
 
990
      if (!POINTER_TYPE_P (type))
991
        {
992
          bound = fold_build2 (PLUS_EXPR, type1,
993
                               TYPE_MAX_VALUE (type), diff);
994
          assumption = fold_build2 (LE_EXPR, boolean_type_node,
995
                                    iv1->base, bound);
996
        }
997
 
998
      mbzl = fold_convert (type1, iv0->base);
999
      mbzr = fold_build2 (MINUS_EXPR, type1,
1000
                          fold_convert (type1, iv1->base), diff);
1001
    }
1002
 
1003
  if (!integer_nonzerop (assumption))
1004
    niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1005
                                      niter->assumptions, assumption);
1006
  if (!rolls_p)
1007
    {
1008
      mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1009
      niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1010
                                        niter->may_be_zero, mbz);
1011
    }
1012
}
1013
 
1014
/* Determines number of iterations of loop whose ending condition
1015
   is IV0 < IV1.  TYPE is the type of the iv.  The number of
1016
   iterations is stored to NITER.  BNDS bounds the difference
1017
   IV1->base - IV0->base.  EXIT_MUST_BE_TAKEN is true if we know
1018
   that the exit must be taken eventually.  */
1019
 
1020
static bool
1021
number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1022
                         struct tree_niter_desc *niter,
1023
                         bool exit_must_be_taken, bounds *bnds)
1024
{
1025
  tree niter_type = unsigned_type_for (type);
1026
  tree delta, step, s;
1027
  mpz_t mstep, tmp;
1028
 
1029
  if (integer_nonzerop (iv0->step))
1030
    {
1031
      niter->control = *iv0;
1032
      niter->cmp = LT_EXPR;
1033
      niter->bound = iv1->base;
1034
    }
1035
  else
1036
    {
1037
      niter->control = *iv1;
1038
      niter->cmp = GT_EXPR;
1039
      niter->bound = iv0->base;
1040
    }
1041
 
1042
  delta = fold_build2 (MINUS_EXPR, niter_type,
1043
                       fold_convert (niter_type, iv1->base),
1044
                       fold_convert (niter_type, iv0->base));
1045
 
1046
  /* First handle the special case that the step is +-1.  */
1047
  if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1048
      || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1049
    {
1050
      /* for (i = iv0->base; i < iv1->base; i++)
1051
 
1052
         or
1053
 
1054
         for (i = iv1->base; i > iv0->base; i--).
1055
 
1056
         In both cases # of iterations is iv1->base - iv0->base, assuming that
1057
         iv1->base >= iv0->base.
1058
 
1059
         First try to derive a lower bound on the value of
1060
         iv1->base - iv0->base, computed in full precision.  If the difference
1061
         is nonnegative, we are done, otherwise we must record the
1062
         condition.  */
1063
 
1064
      if (mpz_sgn (bnds->below) < 0)
1065
        niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1066
                                          iv1->base, iv0->base);
1067
      niter->niter = delta;
1068
      niter->max = mpz_get_double_int (niter_type, bnds->up, false);
1069
      return true;
1070
    }
1071
 
1072
  if (integer_nonzerop (iv0->step))
1073
    step = fold_convert (niter_type, iv0->step);
1074
  else
1075
    step = fold_convert (niter_type,
1076
                         fold_build1 (NEGATE_EXPR, type, iv1->step));
1077
 
1078
  /* If we can determine the final value of the control iv exactly, we can
1079
     transform the condition to != comparison.  In particular, this will be
1080
     the case if DELTA is constant.  */
1081
  if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1082
                                     exit_must_be_taken, bnds))
1083
    {
1084
      affine_iv zps;
1085
 
1086
      zps.base = build_int_cst (niter_type, 0);
1087
      zps.step = step;
1088
      /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1089
         zps does not overflow.  */
1090
      zps.no_overflow = true;
1091
 
1092
      return number_of_iterations_ne (type, &zps, delta, niter, true, bnds);
1093
    }
1094
 
1095
  /* Make sure that the control iv does not overflow.  */
1096
  if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1097
    return false;
1098
 
1099
  /* We determine the number of iterations as (delta + step - 1) / step.  For
1100
     this to work, we must know that iv1->base >= iv0->base - step + 1,
1101
     otherwise the loop does not roll.  */
1102
  assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1103
 
1104
  s = fold_build2 (MINUS_EXPR, niter_type,
1105
                   step, build_int_cst (niter_type, 1));
1106
  delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1107
  niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1108
 
1109
  mpz_init (mstep);
1110
  mpz_init (tmp);
1111
  mpz_set_double_int (mstep, tree_to_double_int (step), true);
1112
  mpz_add (tmp, bnds->up, mstep);
1113
  mpz_sub_ui (tmp, tmp, 1);
1114
  mpz_fdiv_q (tmp, tmp, mstep);
1115
  niter->max = mpz_get_double_int (niter_type, tmp, false);
1116
  mpz_clear (mstep);
1117
  mpz_clear (tmp);
1118
 
1119
  return true;
1120
}
1121
 
1122
/* Determines number of iterations of loop whose ending condition
1123
   is IV0 <= IV1.  TYPE is the type of the iv.  The number of
1124
   iterations is stored to NITER.  EXIT_MUST_BE_TAKEN is true if
1125
   we know that this condition must eventually become false (we derived this
1126
   earlier, and possibly set NITER->assumptions to make sure this
1127
   is the case).  BNDS bounds the difference IV1->base - IV0->base.  */
1128
 
1129
static bool
1130
number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
1131
                         struct tree_niter_desc *niter, bool exit_must_be_taken,
1132
                         bounds *bnds)
1133
{
1134
  tree assumption;
1135
  tree type1 = type;
1136
  if (POINTER_TYPE_P (type))
1137
    type1 = sizetype;
1138
 
1139
  /* Say that IV0 is the control variable.  Then IV0 <= IV1 iff
1140
     IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1141
     value of the type.  This we must know anyway, since if it is
1142
     equal to this value, the loop rolls forever.  We do not check
1143
     this condition for pointer type ivs, as the code cannot rely on
1144
     the object to that the pointer points being placed at the end of
1145
     the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1146
     not defined for pointers).  */
1147
 
1148
  if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1149
    {
1150
      if (integer_nonzerop (iv0->step))
1151
        assumption = fold_build2 (NE_EXPR, boolean_type_node,
1152
                                  iv1->base, TYPE_MAX_VALUE (type));
1153
      else
1154
        assumption = fold_build2 (NE_EXPR, boolean_type_node,
1155
                                  iv0->base, TYPE_MIN_VALUE (type));
1156
 
1157
      if (integer_zerop (assumption))
1158
        return false;
1159
      if (!integer_nonzerop (assumption))
1160
        niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1161
                                          niter->assumptions, assumption);
1162
    }
1163
 
1164
  if (integer_nonzerop (iv0->step))
1165
    {
1166
      if (POINTER_TYPE_P (type))
1167
        iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1168
      else
1169
        iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1170
                                 build_int_cst (type1, 1));
1171
    }
1172
  else if (POINTER_TYPE_P (type))
1173
    iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1174
  else
1175
    iv0->base = fold_build2 (MINUS_EXPR, type1,
1176
                             iv0->base, build_int_cst (type1, 1));
1177
 
1178
  bounds_add (bnds, double_int_one, type1);
1179
 
1180
  return number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
1181
                                  bnds);
1182
}
1183
 
1184
/* Dumps description of affine induction variable IV to FILE.  */
1185
 
1186
static void
1187
dump_affine_iv (FILE *file, affine_iv *iv)
1188
{
1189
  if (!integer_zerop (iv->step))
1190
    fprintf (file, "[");
1191
 
1192
  print_generic_expr (dump_file, iv->base, TDF_SLIM);
1193
 
1194
  if (!integer_zerop (iv->step))
1195
    {
1196
      fprintf (file, ", + , ");
1197
      print_generic_expr (dump_file, iv->step, TDF_SLIM);
1198
      fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1199
    }
1200
}
1201
 
1202
/* Determine the number of iterations according to condition (for staying
1203
   inside loop) which compares two induction variables using comparison
1204
   operator CODE.  The induction variable on left side of the comparison
1205
   is IV0, the right-hand side is IV1.  Both induction variables must have
1206
   type TYPE, which must be an integer or pointer type.  The steps of the
1207
   ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1208
 
1209
   LOOP is the loop whose number of iterations we are determining.
1210
 
1211
   ONLY_EXIT is true if we are sure this is the only way the loop could be
1212
   exited (including possibly non-returning function calls, exceptions, etc.)
1213
   -- in this case we can use the information whether the control induction
1214
   variables can overflow or not in a more efficient way.
1215
 
1216
   The results (number of iterations and assumptions as described in
1217
   comments at struct tree_niter_desc in tree-flow.h) are stored to NITER.
1218
   Returns false if it fails to determine number of iterations, true if it
1219
   was determined (possibly with some assumptions).  */
1220
 
1221
static bool
1222
number_of_iterations_cond (struct loop *loop,
1223
                           tree type, affine_iv *iv0, enum tree_code code,
1224
                           affine_iv *iv1, struct tree_niter_desc *niter,
1225
                           bool only_exit)
1226
{
1227
  bool exit_must_be_taken = false, ret;
1228
  bounds bnds;
1229
 
1230
  /* The meaning of these assumptions is this:
1231
     if !assumptions
1232
       then the rest of information does not have to be valid
1233
     if may_be_zero then the loop does not roll, even if
1234
       niter != 0.  */
1235
  niter->assumptions = boolean_true_node;
1236
  niter->may_be_zero = boolean_false_node;
1237
  niter->niter = NULL_TREE;
1238
  niter->max = double_int_zero;
1239
 
1240
  niter->bound = NULL_TREE;
1241
  niter->cmp = ERROR_MARK;
1242
 
1243
  /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1244
     the control variable is on lhs.  */
1245
  if (code == GE_EXPR || code == GT_EXPR
1246
      || (code == NE_EXPR && integer_zerop (iv0->step)))
1247
    {
1248
      SWAP (iv0, iv1);
1249
      code = swap_tree_comparison (code);
1250
    }
1251
 
1252
  if (POINTER_TYPE_P (type))
1253
    {
1254
      /* Comparison of pointers is undefined unless both iv0 and iv1 point
1255
         to the same object.  If they do, the control variable cannot wrap
1256
         (as wrap around the bounds of memory will never return a pointer
1257
         that would be guaranteed to point to the same object, even if we
1258
         avoid undefined behavior by casting to size_t and back).  */
1259
      iv0->no_overflow = true;
1260
      iv1->no_overflow = true;
1261
    }
1262
 
1263
  /* If the control induction variable does not overflow and the only exit
1264
     from the loop is the one that we analyze, we know it must be taken
1265
     eventually.  */
1266
  if (only_exit)
1267
    {
1268
      if (!integer_zerop (iv0->step) && iv0->no_overflow)
1269
        exit_must_be_taken = true;
1270
      else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1271
        exit_must_be_taken = true;
1272
    }
1273
 
1274
  /* We can handle the case when neither of the sides of the comparison is
1275
     invariant, provided that the test is NE_EXPR.  This rarely occurs in
1276
     practice, but it is simple enough to manage.  */
1277
  if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1278
    {
1279
      if (code != NE_EXPR)
1280
        return false;
1281
 
1282
      iv0->step = fold_binary_to_constant (MINUS_EXPR, type,
1283
                                           iv0->step, iv1->step);
1284
      iv0->no_overflow = false;
1285
      iv1->step = build_int_cst (type, 0);
1286
      iv1->no_overflow = true;
1287
    }
1288
 
1289
  /* If the result of the comparison is a constant,  the loop is weird.  More
1290
     precise handling would be possible, but the situation is not common enough
1291
     to waste time on it.  */
1292
  if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1293
    return false;
1294
 
1295
  /* Ignore loops of while (i-- < 10) type.  */
1296
  if (code != NE_EXPR)
1297
    {
1298
      if (iv0->step && tree_int_cst_sign_bit (iv0->step))
1299
        return false;
1300
 
1301
      if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
1302
        return false;
1303
    }
1304
 
1305
  /* If the loop exits immediately, there is nothing to do.  */
1306
  if (integer_zerop (fold_build2 (code, boolean_type_node, iv0->base, iv1->base)))
1307
    {
1308
      niter->niter = build_int_cst (unsigned_type_for (type), 0);
1309
      niter->max = double_int_zero;
1310
      return true;
1311
    }
1312
 
1313
  /* OK, now we know we have a senseful loop.  Handle several cases, depending
1314
     on what comparison operator is used.  */
1315
  bound_difference (loop, iv1->base, iv0->base, &bnds);
1316
 
1317
  if (dump_file && (dump_flags & TDF_DETAILS))
1318
    {
1319
      fprintf (dump_file,
1320
               "Analyzing # of iterations of loop %d\n", loop->num);
1321
 
1322
      fprintf (dump_file, "  exit condition ");
1323
      dump_affine_iv (dump_file, iv0);
1324
      fprintf (dump_file, " %s ",
1325
               code == NE_EXPR ? "!="
1326
               : code == LT_EXPR ? "<"
1327
               : "<=");
1328
      dump_affine_iv (dump_file, iv1);
1329
      fprintf (dump_file, "\n");
1330
 
1331
      fprintf (dump_file, "  bounds on difference of bases: ");
1332
      mpz_out_str (dump_file, 10, bnds.below);
1333
      fprintf (dump_file, " ... ");
1334
      mpz_out_str (dump_file, 10, bnds.up);
1335
      fprintf (dump_file, "\n");
1336
    }
1337
 
1338
  switch (code)
1339
    {
1340
    case NE_EXPR:
1341
      gcc_assert (integer_zerop (iv1->step));
1342
      ret = number_of_iterations_ne (type, iv0, iv1->base, niter,
1343
                                     exit_must_be_taken, &bnds);
1344
      break;
1345
 
1346
    case LT_EXPR:
1347
      ret = number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
1348
                                     &bnds);
1349
      break;
1350
 
1351
    case LE_EXPR:
1352
      ret = number_of_iterations_le (type, iv0, iv1, niter, exit_must_be_taken,
1353
                                     &bnds);
1354
      break;
1355
 
1356
    default:
1357
      gcc_unreachable ();
1358
    }
1359
 
1360
  mpz_clear (bnds.up);
1361
  mpz_clear (bnds.below);
1362
 
1363
  if (dump_file && (dump_flags & TDF_DETAILS))
1364
    {
1365
      if (ret)
1366
        {
1367
          fprintf (dump_file, "  result:\n");
1368
          if (!integer_nonzerop (niter->assumptions))
1369
            {
1370
              fprintf (dump_file, "    under assumptions ");
1371
              print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1372
              fprintf (dump_file, "\n");
1373
            }
1374
 
1375
          if (!integer_zerop (niter->may_be_zero))
1376
            {
1377
              fprintf (dump_file, "    zero if ");
1378
              print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1379
              fprintf (dump_file, "\n");
1380
            }
1381
 
1382
          fprintf (dump_file, "    # of iterations ");
1383
          print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1384
          fprintf (dump_file, ", bounded by ");
1385
          dump_double_int (dump_file, niter->max, true);
1386
          fprintf (dump_file, "\n");
1387
        }
1388
      else
1389
        fprintf (dump_file, "  failed\n\n");
1390
    }
1391
  return ret;
1392
}
1393
 
1394
/* Substitute NEW for OLD in EXPR and fold the result.  */
1395
 
1396
static tree
1397
simplify_replace_tree (tree expr, tree old, tree new_tree)
1398
{
1399
  unsigned i, n;
1400
  tree ret = NULL_TREE, e, se;
1401
 
1402
  if (!expr)
1403
    return NULL_TREE;
1404
 
1405
  /* Do not bother to replace constants.  */
1406
  if (CONSTANT_CLASS_P (old))
1407
    return expr;
1408
 
1409
  if (expr == old
1410
      || operand_equal_p (expr, old, 0))
1411
    return unshare_expr (new_tree);
1412
 
1413
  if (!EXPR_P (expr))
1414
    return expr;
1415
 
1416
  n = TREE_OPERAND_LENGTH (expr);
1417
  for (i = 0; i < n; i++)
1418
    {
1419
      e = TREE_OPERAND (expr, i);
1420
      se = simplify_replace_tree (e, old, new_tree);
1421
      if (e == se)
1422
        continue;
1423
 
1424
      if (!ret)
1425
        ret = copy_node (expr);
1426
 
1427
      TREE_OPERAND (ret, i) = se;
1428
    }
1429
 
1430
  return (ret ? fold (ret) : expr);
1431
}
1432
 
1433
/* Expand definitions of ssa names in EXPR as long as they are simple
1434
   enough, and return the new expression.  */
1435
 
1436
tree
1437
expand_simple_operations (tree expr)
1438
{
1439
  unsigned i, n;
1440
  tree ret = NULL_TREE, e, ee, e1;
1441
  enum tree_code code;
1442
  gimple stmt;
1443
 
1444
  if (expr == NULL_TREE)
1445
    return expr;
1446
 
1447
  if (is_gimple_min_invariant (expr))
1448
    return expr;
1449
 
1450
  code = TREE_CODE (expr);
1451
  if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1452
    {
1453
      n = TREE_OPERAND_LENGTH (expr);
1454
      for (i = 0; i < n; i++)
1455
        {
1456
          e = TREE_OPERAND (expr, i);
1457
          ee = expand_simple_operations (e);
1458
          if (e == ee)
1459
            continue;
1460
 
1461
          if (!ret)
1462
            ret = copy_node (expr);
1463
 
1464
          TREE_OPERAND (ret, i) = ee;
1465
        }
1466
 
1467
      if (!ret)
1468
        return expr;
1469
 
1470
      fold_defer_overflow_warnings ();
1471
      ret = fold (ret);
1472
      fold_undefer_and_ignore_overflow_warnings ();
1473
      return ret;
1474
    }
1475
 
1476
  if (TREE_CODE (expr) != SSA_NAME)
1477
    return expr;
1478
 
1479
  stmt = SSA_NAME_DEF_STMT (expr);
1480
  if (gimple_code (stmt) == GIMPLE_PHI)
1481
    {
1482
      basic_block src, dest;
1483
 
1484
      if (gimple_phi_num_args (stmt) != 1)
1485
        return expr;
1486
      e = PHI_ARG_DEF (stmt, 0);
1487
 
1488
      /* Avoid propagating through loop exit phi nodes, which
1489
         could break loop-closed SSA form restrictions.  */
1490
      dest = gimple_bb (stmt);
1491
      src = single_pred (dest);
1492
      if (TREE_CODE (e) == SSA_NAME
1493
          && src->loop_father != dest->loop_father)
1494
        return expr;
1495
 
1496
      return expand_simple_operations (e);
1497
    }
1498
  if (gimple_code (stmt) != GIMPLE_ASSIGN)
1499
    return expr;
1500
 
1501
  e = gimple_assign_rhs1 (stmt);
1502
  code = gimple_assign_rhs_code (stmt);
1503
  if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1504
    {
1505
      if (is_gimple_min_invariant (e))
1506
        return e;
1507
 
1508
      if (code == SSA_NAME)
1509
        return expand_simple_operations (e);
1510
 
1511
      return expr;
1512
    }
1513
 
1514
  switch (code)
1515
    {
1516
    CASE_CONVERT:
1517
      /* Casts are simple.  */
1518
      ee = expand_simple_operations (e);
1519
      return fold_build1 (code, TREE_TYPE (expr), ee);
1520
 
1521
    case PLUS_EXPR:
1522
    case MINUS_EXPR:
1523
    case POINTER_PLUS_EXPR:
1524
      /* And increments and decrements by a constant are simple.  */
1525
      e1 = gimple_assign_rhs2 (stmt);
1526
      if (!is_gimple_min_invariant (e1))
1527
        return expr;
1528
 
1529
      ee = expand_simple_operations (e);
1530
      return fold_build2 (code, TREE_TYPE (expr), ee, e1);
1531
 
1532
    default:
1533
      return expr;
1534
    }
1535
}
1536
 
1537
/* Tries to simplify EXPR using the condition COND.  Returns the simplified
1538
   expression (or EXPR unchanged, if no simplification was possible).  */
1539
 
1540
static tree
1541
tree_simplify_using_condition_1 (tree cond, tree expr)
1542
{
1543
  bool changed;
1544
  tree e, te, e0, e1, e2, notcond;
1545
  enum tree_code code = TREE_CODE (expr);
1546
 
1547
  if (code == INTEGER_CST)
1548
    return expr;
1549
 
1550
  if (code == TRUTH_OR_EXPR
1551
      || code == TRUTH_AND_EXPR
1552
      || code == COND_EXPR)
1553
    {
1554
      changed = false;
1555
 
1556
      e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
1557
      if (TREE_OPERAND (expr, 0) != e0)
1558
        changed = true;
1559
 
1560
      e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
1561
      if (TREE_OPERAND (expr, 1) != e1)
1562
        changed = true;
1563
 
1564
      if (code == COND_EXPR)
1565
        {
1566
          e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
1567
          if (TREE_OPERAND (expr, 2) != e2)
1568
            changed = true;
1569
        }
1570
      else
1571
        e2 = NULL_TREE;
1572
 
1573
      if (changed)
1574
        {
1575
          if (code == COND_EXPR)
1576
            expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1577
          else
1578
            expr = fold_build2 (code, boolean_type_node, e0, e1);
1579
        }
1580
 
1581
      return expr;
1582
    }
1583
 
1584
  /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1585
     propagation, and vice versa.  Fold does not handle this, since it is
1586
     considered too expensive.  */
1587
  if (TREE_CODE (cond) == EQ_EXPR)
1588
    {
1589
      e0 = TREE_OPERAND (cond, 0);
1590
      e1 = TREE_OPERAND (cond, 1);
1591
 
1592
      /* We know that e0 == e1.  Check whether we cannot simplify expr
1593
         using this fact.  */
1594
      e = simplify_replace_tree (expr, e0, e1);
1595
      if (integer_zerop (e) || integer_nonzerop (e))
1596
        return e;
1597
 
1598
      e = simplify_replace_tree (expr, e1, e0);
1599
      if (integer_zerop (e) || integer_nonzerop (e))
1600
        return e;
1601
    }
1602
  if (TREE_CODE (expr) == EQ_EXPR)
1603
    {
1604
      e0 = TREE_OPERAND (expr, 0);
1605
      e1 = TREE_OPERAND (expr, 1);
1606
 
1607
      /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true.  */
1608
      e = simplify_replace_tree (cond, e0, e1);
1609
      if (integer_zerop (e))
1610
        return e;
1611
      e = simplify_replace_tree (cond, e1, e0);
1612
      if (integer_zerop (e))
1613
        return e;
1614
    }
1615
  if (TREE_CODE (expr) == NE_EXPR)
1616
    {
1617
      e0 = TREE_OPERAND (expr, 0);
1618
      e1 = TREE_OPERAND (expr, 1);
1619
 
1620
      /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true.  */
1621
      e = simplify_replace_tree (cond, e0, e1);
1622
      if (integer_zerop (e))
1623
        return boolean_true_node;
1624
      e = simplify_replace_tree (cond, e1, e0);
1625
      if (integer_zerop (e))
1626
        return boolean_true_node;
1627
    }
1628
 
1629
  te = expand_simple_operations (expr);
1630
 
1631
  /* Check whether COND ==> EXPR.  */
1632
  notcond = invert_truthvalue (cond);
1633
  e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
1634
  if (e && integer_nonzerop (e))
1635
    return e;
1636
 
1637
  /* Check whether COND ==> not EXPR.  */
1638
  e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
1639
  if (e && integer_zerop (e))
1640
    return e;
1641
 
1642
  return expr;
1643
}
1644
 
1645
/* Tries to simplify EXPR using the condition COND.  Returns the simplified
1646
   expression (or EXPR unchanged, if no simplification was possible).
1647
   Wrapper around tree_simplify_using_condition_1 that ensures that chains
1648
   of simple operations in definitions of ssa names in COND are expanded,
1649
   so that things like casts or incrementing the value of the bound before
1650
   the loop do not cause us to fail.  */
1651
 
1652
static tree
1653
tree_simplify_using_condition (tree cond, tree expr)
1654
{
1655
  cond = expand_simple_operations (cond);
1656
 
1657
  return tree_simplify_using_condition_1 (cond, expr);
1658
}
1659
 
1660
/* Tries to simplify EXPR using the conditions on entry to LOOP.
1661
   Returns the simplified expression (or EXPR unchanged, if no
1662
   simplification was possible).*/
1663
 
1664
static tree
1665
simplify_using_initial_conditions (struct loop *loop, tree expr)
1666
{
1667
  edge e;
1668
  basic_block bb;
1669
  gimple stmt;
1670
  tree cond;
1671
  int cnt = 0;
1672
 
1673
  if (TREE_CODE (expr) == INTEGER_CST)
1674
    return expr;
1675
 
1676
  /* Limit walking the dominators to avoid quadraticness in
1677
     the number of BBs times the number of loops in degenerate
1678
     cases.  */
1679
  for (bb = loop->header;
1680
       bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
1681
       bb = get_immediate_dominator (CDI_DOMINATORS, bb))
1682
    {
1683
      if (!single_pred_p (bb))
1684
        continue;
1685
      e = single_pred_edge (bb);
1686
 
1687
      if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
1688
        continue;
1689
 
1690
      stmt = last_stmt (e->src);
1691
      cond = fold_build2 (gimple_cond_code (stmt),
1692
                          boolean_type_node,
1693
                          gimple_cond_lhs (stmt),
1694
                          gimple_cond_rhs (stmt));
1695
      if (e->flags & EDGE_FALSE_VALUE)
1696
        cond = invert_truthvalue (cond);
1697
      expr = tree_simplify_using_condition (cond, expr);
1698
      ++cnt;
1699
    }
1700
 
1701
  return expr;
1702
}
1703
 
1704
/* Tries to simplify EXPR using the evolutions of the loop invariants
1705
   in the superloops of LOOP.  Returns the simplified expression
1706
   (or EXPR unchanged, if no simplification was possible).  */
1707
 
1708
static tree
1709
simplify_using_outer_evolutions (struct loop *loop, tree expr)
1710
{
1711
  enum tree_code code = TREE_CODE (expr);
1712
  bool changed;
1713
  tree e, e0, e1, e2;
1714
 
1715
  if (is_gimple_min_invariant (expr))
1716
    return expr;
1717
 
1718
  if (code == TRUTH_OR_EXPR
1719
      || code == TRUTH_AND_EXPR
1720
      || code == COND_EXPR)
1721
    {
1722
      changed = false;
1723
 
1724
      e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
1725
      if (TREE_OPERAND (expr, 0) != e0)
1726
        changed = true;
1727
 
1728
      e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
1729
      if (TREE_OPERAND (expr, 1) != e1)
1730
        changed = true;
1731
 
1732
      if (code == COND_EXPR)
1733
        {
1734
          e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
1735
          if (TREE_OPERAND (expr, 2) != e2)
1736
            changed = true;
1737
        }
1738
      else
1739
        e2 = NULL_TREE;
1740
 
1741
      if (changed)
1742
        {
1743
          if (code == COND_EXPR)
1744
            expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1745
          else
1746
            expr = fold_build2 (code, boolean_type_node, e0, e1);
1747
        }
1748
 
1749
      return expr;
1750
    }
1751
 
1752
  e = instantiate_parameters (loop, expr);
1753
  if (is_gimple_min_invariant (e))
1754
    return e;
1755
 
1756
  return expr;
1757
}
1758
 
1759
/* Returns true if EXIT is the only possible exit from LOOP.  */
1760
 
1761
bool
1762
loop_only_exit_p (const struct loop *loop, const_edge exit)
1763
{
1764
  basic_block *body;
1765
  gimple_stmt_iterator bsi;
1766
  unsigned i;
1767
  gimple call;
1768
 
1769
  if (exit != single_exit (loop))
1770
    return false;
1771
 
1772
  body = get_loop_body (loop);
1773
  for (i = 0; i < loop->num_nodes; i++)
1774
    {
1775
      for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
1776
        {
1777
          call = gsi_stmt (bsi);
1778
          if (gimple_code (call) != GIMPLE_CALL)
1779
            continue;
1780
 
1781
          if (gimple_has_side_effects (call))
1782
            {
1783
              free (body);
1784
              return false;
1785
            }
1786
        }
1787
    }
1788
 
1789
  free (body);
1790
  return true;
1791
}
1792
 
1793
/* Stores description of number of iterations of LOOP derived from
1794
   EXIT (an exit edge of the LOOP) in NITER.  Returns true if some
1795
   useful information could be derived (and fields of NITER has
1796
   meaning described in comments at struct tree_niter_desc
1797
   declaration), false otherwise.  If WARN is true and
1798
   -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
1799
   potentially unsafe assumptions.  */
1800
 
1801
bool
1802
number_of_iterations_exit (struct loop *loop, edge exit,
1803
                           struct tree_niter_desc *niter,
1804
                           bool warn)
1805
{
1806
  gimple stmt;
1807
  tree type;
1808
  tree op0, op1;
1809
  enum tree_code code;
1810
  affine_iv iv0, iv1;
1811
 
1812
  if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src))
1813
    return false;
1814
 
1815
  niter->assumptions = boolean_false_node;
1816
  stmt = last_stmt (exit->src);
1817
  if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1818
    return false;
1819
 
1820
  /* We want the condition for staying inside loop.  */
1821
  code = gimple_cond_code (stmt);
1822
  if (exit->flags & EDGE_TRUE_VALUE)
1823
    code = invert_tree_comparison (code, false);
1824
 
1825
  switch (code)
1826
    {
1827
    case GT_EXPR:
1828
    case GE_EXPR:
1829
    case NE_EXPR:
1830
    case LT_EXPR:
1831
    case LE_EXPR:
1832
      break;
1833
 
1834
    default:
1835
      return false;
1836
    }
1837
 
1838
  op0 = gimple_cond_lhs (stmt);
1839
  op1 = gimple_cond_rhs (stmt);
1840
  type = TREE_TYPE (op0);
1841
 
1842
  if (TREE_CODE (type) != INTEGER_TYPE
1843
      && !POINTER_TYPE_P (type))
1844
    return false;
1845
 
1846
  if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, false))
1847
    return false;
1848
  if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, false))
1849
    return false;
1850
 
1851
  /* We don't want to see undefined signed overflow warnings while
1852
     computing the number of iterations.  */
1853
  fold_defer_overflow_warnings ();
1854
 
1855
  iv0.base = expand_simple_operations (iv0.base);
1856
  iv1.base = expand_simple_operations (iv1.base);
1857
  if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
1858
                                  loop_only_exit_p (loop, exit)))
1859
    {
1860
      fold_undefer_and_ignore_overflow_warnings ();
1861
      return false;
1862
    }
1863
 
1864
  if (optimize >= 3)
1865
    {
1866
      niter->assumptions = simplify_using_outer_evolutions (loop,
1867
                                                            niter->assumptions);
1868
      niter->may_be_zero = simplify_using_outer_evolutions (loop,
1869
                                                            niter->may_be_zero);
1870
      niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
1871
    }
1872
 
1873
  niter->assumptions
1874
          = simplify_using_initial_conditions (loop,
1875
                                               niter->assumptions);
1876
  niter->may_be_zero
1877
          = simplify_using_initial_conditions (loop,
1878
                                               niter->may_be_zero);
1879
 
1880
  fold_undefer_and_ignore_overflow_warnings ();
1881
 
1882
  if (integer_onep (niter->assumptions))
1883
    return true;
1884
 
1885
  /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
1886
     But if we can prove that there is overflow or some other source of weird
1887
     behavior, ignore the loop even with -funsafe-loop-optimizations.  */
1888
  if (integer_zerop (niter->assumptions) || !single_exit (loop))
1889
    return false;
1890
 
1891
  if (flag_unsafe_loop_optimizations)
1892
    niter->assumptions = boolean_true_node;
1893
 
1894
  if (warn)
1895
    {
1896
      const char *wording;
1897
      location_t loc = gimple_location (stmt);
1898
 
1899
      /* We can provide a more specific warning if one of the operator is
1900
         constant and the other advances by +1 or -1.  */
1901
      if (!integer_zerop (iv1.step)
1902
          ? (integer_zerop (iv0.step)
1903
             && (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
1904
          : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
1905
        wording =
1906
          flag_unsafe_loop_optimizations
1907
          ? N_("assuming that the loop is not infinite")
1908
          : N_("cannot optimize possibly infinite loops");
1909
      else
1910
        wording =
1911
          flag_unsafe_loop_optimizations
1912
          ? N_("assuming that the loop counter does not overflow")
1913
          : N_("cannot optimize loop, the loop counter may overflow");
1914
 
1915
      warning_at ((LOCATION_LINE (loc) > 0) ? loc : input_location,
1916
                  OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
1917
    }
1918
 
1919
  return flag_unsafe_loop_optimizations;
1920
}
1921
 
1922
/* Try to determine the number of iterations of LOOP.  If we succeed,
1923
   expression giving number of iterations is returned and *EXIT is
1924
   set to the edge from that the information is obtained.  Otherwise
1925
   chrec_dont_know is returned.  */
1926
 
1927
tree
1928
find_loop_niter (struct loop *loop, edge *exit)
1929
{
1930
  unsigned i;
1931
  VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1932
  edge ex;
1933
  tree niter = NULL_TREE, aniter;
1934
  struct tree_niter_desc desc;
1935
 
1936
  *exit = NULL;
1937
  FOR_EACH_VEC_ELT (edge, exits, i, ex)
1938
    {
1939
      if (!just_once_each_iteration_p (loop, ex->src))
1940
        continue;
1941
 
1942
      if (!number_of_iterations_exit (loop, ex, &desc, false))
1943
        continue;
1944
 
1945
      if (integer_nonzerop (desc.may_be_zero))
1946
        {
1947
          /* We exit in the first iteration through this exit.
1948
             We won't find anything better.  */
1949
          niter = build_int_cst (unsigned_type_node, 0);
1950
          *exit = ex;
1951
          break;
1952
        }
1953
 
1954
      if (!integer_zerop (desc.may_be_zero))
1955
        continue;
1956
 
1957
      aniter = desc.niter;
1958
 
1959
      if (!niter)
1960
        {
1961
          /* Nothing recorded yet.  */
1962
          niter = aniter;
1963
          *exit = ex;
1964
          continue;
1965
        }
1966
 
1967
      /* Prefer constants, the lower the better.  */
1968
      if (TREE_CODE (aniter) != INTEGER_CST)
1969
        continue;
1970
 
1971
      if (TREE_CODE (niter) != INTEGER_CST)
1972
        {
1973
          niter = aniter;
1974
          *exit = ex;
1975
          continue;
1976
        }
1977
 
1978
      if (tree_int_cst_lt (aniter, niter))
1979
        {
1980
          niter = aniter;
1981
          *exit = ex;
1982
          continue;
1983
        }
1984
    }
1985
  VEC_free (edge, heap, exits);
1986
 
1987
  return niter ? niter : chrec_dont_know;
1988
}
1989
 
1990
/* Return true if loop is known to have bounded number of iterations.  */
1991
 
1992
bool
1993
finite_loop_p (struct loop *loop)
1994
{
1995
  unsigned i;
1996
  VEC (edge, heap) *exits;
1997
  edge ex;
1998
  struct tree_niter_desc desc;
1999
  bool finite = false;
2000
  int flags;
2001
 
2002
  if (flag_unsafe_loop_optimizations)
2003
    return true;
2004
  flags = flags_from_decl_or_type (current_function_decl);
2005
  if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2006
    {
2007
      if (dump_file && (dump_flags & TDF_DETAILS))
2008
        fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2009
                 loop->num);
2010
      return true;
2011
    }
2012
 
2013
  exits = get_loop_exit_edges (loop);
2014
  FOR_EACH_VEC_ELT (edge, exits, i, ex)
2015
    {
2016
      if (!just_once_each_iteration_p (loop, ex->src))
2017
        continue;
2018
 
2019
      if (number_of_iterations_exit (loop, ex, &desc, false))
2020
        {
2021
          if (dump_file && (dump_flags & TDF_DETAILS))
2022
            {
2023
              fprintf (dump_file, "Found loop %i to be finite: iterating ", loop->num);
2024
              print_generic_expr (dump_file, desc.niter, TDF_SLIM);
2025
              fprintf (dump_file, " times\n");
2026
            }
2027
          finite = true;
2028
          break;
2029
        }
2030
    }
2031
  VEC_free (edge, heap, exits);
2032
  return finite;
2033
}
2034
 
2035
/*
2036
 
2037
   Analysis of a number of iterations of a loop by a brute-force evaluation.
2038
 
2039
*/
2040
 
2041
/* Bound on the number of iterations we try to evaluate.  */
2042
 
2043
#define MAX_ITERATIONS_TO_TRACK \
2044
  ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2045
 
2046
/* Returns the loop phi node of LOOP such that ssa name X is derived from its
2047
   result by a chain of operations such that all but exactly one of their
2048
   operands are constants.  */
2049
 
2050
static gimple
2051
chain_of_csts_start (struct loop *loop, tree x)
2052
{
2053
  gimple stmt = SSA_NAME_DEF_STMT (x);
2054
  tree use;
2055
  basic_block bb = gimple_bb (stmt);
2056
  enum tree_code code;
2057
 
2058
  if (!bb
2059
      || !flow_bb_inside_loop_p (loop, bb))
2060
    return NULL;
2061
 
2062
  if (gimple_code (stmt) == GIMPLE_PHI)
2063
    {
2064
      if (bb == loop->header)
2065
        return stmt;
2066
 
2067
      return NULL;
2068
    }
2069
 
2070
  if (gimple_code (stmt) != GIMPLE_ASSIGN)
2071
    return NULL;
2072
 
2073
  code = gimple_assign_rhs_code (stmt);
2074
  if (gimple_references_memory_p (stmt)
2075
      || TREE_CODE_CLASS (code) == tcc_reference
2076
      || (code == ADDR_EXPR
2077
          && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2078
    return NULL;
2079
 
2080
  use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2081
  if (use == NULL_TREE)
2082
    return NULL;
2083
 
2084
  return chain_of_csts_start (loop, use);
2085
}
2086
 
2087
/* Determines whether the expression X is derived from a result of a phi node
2088
   in header of LOOP such that
2089
 
2090
   * the derivation of X consists only from operations with constants
2091
   * the initial value of the phi node is constant
2092
   * the value of the phi node in the next iteration can be derived from the
2093
     value in the current iteration by a chain of operations with constants.
2094
 
2095
   If such phi node exists, it is returned, otherwise NULL is returned.  */
2096
 
2097
static gimple
2098
get_base_for (struct loop *loop, tree x)
2099
{
2100
  gimple phi;
2101
  tree init, next;
2102
 
2103
  if (is_gimple_min_invariant (x))
2104
    return NULL;
2105
 
2106
  phi = chain_of_csts_start (loop, x);
2107
  if (!phi)
2108
    return NULL;
2109
 
2110
  init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2111
  next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2112
 
2113
  if (TREE_CODE (next) != SSA_NAME)
2114
    return NULL;
2115
 
2116
  if (!is_gimple_min_invariant (init))
2117
    return NULL;
2118
 
2119
  if (chain_of_csts_start (loop, next) != phi)
2120
    return NULL;
2121
 
2122
  return phi;
2123
}
2124
 
2125
/* Given an expression X, then
2126
 
2127
   * if X is NULL_TREE, we return the constant BASE.
2128
   * otherwise X is a SSA name, whose value in the considered loop is derived
2129
     by a chain of operations with constant from a result of a phi node in
2130
     the header of the loop.  Then we return value of X when the value of the
2131
     result of this phi node is given by the constant BASE.  */
2132
 
2133
static tree
2134
get_val_for (tree x, tree base)
2135
{
2136
  gimple stmt;
2137
 
2138
  gcc_assert (is_gimple_min_invariant (base));
2139
 
2140
  if (!x)
2141
    return base;
2142
 
2143
  stmt = SSA_NAME_DEF_STMT (x);
2144
  if (gimple_code (stmt) == GIMPLE_PHI)
2145
    return base;
2146
 
2147
  gcc_assert (is_gimple_assign (stmt));
2148
 
2149
  /* STMT must be either an assignment of a single SSA name or an
2150
     expression involving an SSA name and a constant.  Try to fold that
2151
     expression using the value for the SSA name.  */
2152
  if (gimple_assign_ssa_name_copy_p (stmt))
2153
    return get_val_for (gimple_assign_rhs1 (stmt), base);
2154
  else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2155
           && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2156
    {
2157
      return fold_build1 (gimple_assign_rhs_code (stmt),
2158
                          gimple_expr_type (stmt),
2159
                          get_val_for (gimple_assign_rhs1 (stmt), base));
2160
    }
2161
  else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2162
    {
2163
      tree rhs1 = gimple_assign_rhs1 (stmt);
2164
      tree rhs2 = gimple_assign_rhs2 (stmt);
2165
      if (TREE_CODE (rhs1) == SSA_NAME)
2166
        rhs1 = get_val_for (rhs1, base);
2167
      else if (TREE_CODE (rhs2) == SSA_NAME)
2168
        rhs2 = get_val_for (rhs2, base);
2169
      else
2170
        gcc_unreachable ();
2171
      return fold_build2 (gimple_assign_rhs_code (stmt),
2172
                          gimple_expr_type (stmt), rhs1, rhs2);
2173
    }
2174
  else
2175
    gcc_unreachable ();
2176
}
2177
 
2178
 
2179
/* Tries to count the number of iterations of LOOP till it exits by EXIT
2180
   by brute force -- i.e. by determining the value of the operands of the
2181
   condition at EXIT in first few iterations of the loop (assuming that
2182
   these values are constant) and determining the first one in that the
2183
   condition is not satisfied.  Returns the constant giving the number
2184
   of the iterations of LOOP if successful, chrec_dont_know otherwise.  */
2185
 
2186
tree
2187
loop_niter_by_eval (struct loop *loop, edge exit)
2188
{
2189
  tree acnd;
2190
  tree op[2], val[2], next[2], aval[2];
2191
  gimple phi, cond;
2192
  unsigned i, j;
2193
  enum tree_code cmp;
2194
 
2195
  cond = last_stmt (exit->src);
2196
  if (!cond || gimple_code (cond) != GIMPLE_COND)
2197
    return chrec_dont_know;
2198
 
2199
  cmp = gimple_cond_code (cond);
2200
  if (exit->flags & EDGE_TRUE_VALUE)
2201
    cmp = invert_tree_comparison (cmp, false);
2202
 
2203
  switch (cmp)
2204
    {
2205
    case EQ_EXPR:
2206
    case NE_EXPR:
2207
    case GT_EXPR:
2208
    case GE_EXPR:
2209
    case LT_EXPR:
2210
    case LE_EXPR:
2211
      op[0] = gimple_cond_lhs (cond);
2212
      op[1] = gimple_cond_rhs (cond);
2213
      break;
2214
 
2215
    default:
2216
      return chrec_dont_know;
2217
    }
2218
 
2219
  for (j = 0; j < 2; j++)
2220
    {
2221
      if (is_gimple_min_invariant (op[j]))
2222
        {
2223
          val[j] = op[j];
2224
          next[j] = NULL_TREE;
2225
          op[j] = NULL_TREE;
2226
        }
2227
      else
2228
        {
2229
          phi = get_base_for (loop, op[j]);
2230
          if (!phi)
2231
            return chrec_dont_know;
2232
          val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2233
          next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2234
        }
2235
    }
2236
 
2237
  /* Don't issue signed overflow warnings.  */
2238
  fold_defer_overflow_warnings ();
2239
 
2240
  for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
2241
    {
2242
      for (j = 0; j < 2; j++)
2243
        aval[j] = get_val_for (op[j], val[j]);
2244
 
2245
      acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
2246
      if (acnd && integer_zerop (acnd))
2247
        {
2248
          fold_undefer_and_ignore_overflow_warnings ();
2249
          if (dump_file && (dump_flags & TDF_DETAILS))
2250
            fprintf (dump_file,
2251
                     "Proved that loop %d iterates %d times using brute force.\n",
2252
                     loop->num, i);
2253
          return build_int_cst (unsigned_type_node, i);
2254
        }
2255
 
2256
      for (j = 0; j < 2; j++)
2257
        {
2258
          val[j] = get_val_for (next[j], val[j]);
2259
          if (!is_gimple_min_invariant (val[j]))
2260
            {
2261
              fold_undefer_and_ignore_overflow_warnings ();
2262
              return chrec_dont_know;
2263
            }
2264
        }
2265
    }
2266
 
2267
  fold_undefer_and_ignore_overflow_warnings ();
2268
 
2269
  return chrec_dont_know;
2270
}
2271
 
2272
/* Finds the exit of the LOOP by that the loop exits after a constant
2273
   number of iterations and stores the exit edge to *EXIT.  The constant
2274
   giving the number of iterations of LOOP is returned.  The number of
2275
   iterations is determined using loop_niter_by_eval (i.e. by brute force
2276
   evaluation).  If we are unable to find the exit for that loop_niter_by_eval
2277
   determines the number of iterations, chrec_dont_know is returned.  */
2278
 
2279
tree
2280
find_loop_niter_by_eval (struct loop *loop, edge *exit)
2281
{
2282
  unsigned i;
2283
  VEC (edge, heap) *exits = get_loop_exit_edges (loop);
2284
  edge ex;
2285
  tree niter = NULL_TREE, aniter;
2286
 
2287
  *exit = NULL;
2288
 
2289
  /* Loops with multiple exits are expensive to handle and less important.  */
2290
  if (!flag_expensive_optimizations
2291
      && VEC_length (edge, exits) > 1)
2292
    return chrec_dont_know;
2293
 
2294
  FOR_EACH_VEC_ELT (edge, exits, i, ex)
2295
    {
2296
      if (!just_once_each_iteration_p (loop, ex->src))
2297
        continue;
2298
 
2299
      aniter = loop_niter_by_eval (loop, ex);
2300
      if (chrec_contains_undetermined (aniter))
2301
        continue;
2302
 
2303
      if (niter
2304
          && !tree_int_cst_lt (aniter, niter))
2305
        continue;
2306
 
2307
      niter = aniter;
2308
      *exit = ex;
2309
    }
2310
  VEC_free (edge, heap, exits);
2311
 
2312
  return niter ? niter : chrec_dont_know;
2313
}
2314
 
2315
/*
2316
 
2317
   Analysis of upper bounds on number of iterations of a loop.
2318
 
2319
*/
2320
 
2321
static double_int derive_constant_upper_bound_ops (tree, tree,
2322
                                                   enum tree_code, tree);
2323
 
2324
/* Returns a constant upper bound on the value of the right-hand side of
2325
   an assignment statement STMT.  */
2326
 
2327
static double_int
2328
derive_constant_upper_bound_assign (gimple stmt)
2329
{
2330
  enum tree_code code = gimple_assign_rhs_code (stmt);
2331
  tree op0 = gimple_assign_rhs1 (stmt);
2332
  tree op1 = gimple_assign_rhs2 (stmt);
2333
 
2334
  return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
2335
                                          op0, code, op1);
2336
}
2337
 
2338
/* Returns a constant upper bound on the value of expression VAL.  VAL
2339
   is considered to be unsigned.  If its type is signed, its value must
2340
   be nonnegative.  */
2341
 
2342
static double_int
2343
derive_constant_upper_bound (tree val)
2344
{
2345
  enum tree_code code;
2346
  tree op0, op1;
2347
 
2348
  extract_ops_from_tree (val, &code, &op0, &op1);
2349
  return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
2350
}
2351
 
2352
/* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2353
   whose type is TYPE.  The expression is considered to be unsigned.  If
2354
   its type is signed, its value must be nonnegative.  */
2355
 
2356
static double_int
2357
derive_constant_upper_bound_ops (tree type, tree op0,
2358
                                 enum tree_code code, tree op1)
2359
{
2360
  tree subtype, maxt;
2361
  double_int bnd, max, mmax, cst;
2362
  gimple stmt;
2363
 
2364
  if (INTEGRAL_TYPE_P (type))
2365
    maxt = TYPE_MAX_VALUE (type);
2366
  else
2367
    maxt = upper_bound_in_type (type, type);
2368
 
2369
  max = tree_to_double_int (maxt);
2370
 
2371
  switch (code)
2372
    {
2373
    case INTEGER_CST:
2374
      return tree_to_double_int (op0);
2375
 
2376
    CASE_CONVERT:
2377
      subtype = TREE_TYPE (op0);
2378
      if (!TYPE_UNSIGNED (subtype)
2379
          /* If TYPE is also signed, the fact that VAL is nonnegative implies
2380
             that OP0 is nonnegative.  */
2381
          && TYPE_UNSIGNED (type)
2382
          && !tree_expr_nonnegative_p (op0))
2383
        {
2384
          /* If we cannot prove that the casted expression is nonnegative,
2385
             we cannot establish more useful upper bound than the precision
2386
             of the type gives us.  */
2387
          return max;
2388
        }
2389
 
2390
      /* We now know that op0 is an nonnegative value.  Try deriving an upper
2391
         bound for it.  */
2392
      bnd = derive_constant_upper_bound (op0);
2393
 
2394
      /* If the bound does not fit in TYPE, max. value of TYPE could be
2395
         attained.  */
2396
      if (double_int_ucmp (max, bnd) < 0)
2397
        return max;
2398
 
2399
      return bnd;
2400
 
2401
    case PLUS_EXPR:
2402
    case POINTER_PLUS_EXPR:
2403
    case MINUS_EXPR:
2404
      if (TREE_CODE (op1) != INTEGER_CST
2405
          || !tree_expr_nonnegative_p (op0))
2406
        return max;
2407
 
2408
      /* Canonicalize to OP0 - CST.  Consider CST to be signed, in order to
2409
         choose the most logical way how to treat this constant regardless
2410
         of the signedness of the type.  */
2411
      cst = tree_to_double_int (op1);
2412
      cst = double_int_sext (cst, TYPE_PRECISION (type));
2413
      if (code != MINUS_EXPR)
2414
        cst = double_int_neg (cst);
2415
 
2416
      bnd = derive_constant_upper_bound (op0);
2417
 
2418
      if (double_int_negative_p (cst))
2419
        {
2420
          cst = double_int_neg (cst);
2421
          /* Avoid CST == 0x80000...  */
2422
          if (double_int_negative_p (cst))
2423
            return max;;
2424
 
2425
          /* OP0 + CST.  We need to check that
2426
             BND <= MAX (type) - CST.  */
2427
 
2428
          mmax = double_int_sub (max, cst);
2429
          if (double_int_ucmp (bnd, mmax) > 0)
2430
            return max;
2431
 
2432
          return double_int_add (bnd, cst);
2433
        }
2434
      else
2435
        {
2436
          /* OP0 - CST, where CST >= 0.
2437
 
2438
             If TYPE is signed, we have already verified that OP0 >= 0, and we
2439
             know that the result is nonnegative.  This implies that
2440
             VAL <= BND - CST.
2441
 
2442
             If TYPE is unsigned, we must additionally know that OP0 >= CST,
2443
             otherwise the operation underflows.
2444
           */
2445
 
2446
          /* This should only happen if the type is unsigned; however, for
2447
             buggy programs that use overflowing signed arithmetics even with
2448
             -fno-wrapv, this condition may also be true for signed values.  */
2449
          if (double_int_ucmp (bnd, cst) < 0)
2450
            return max;
2451
 
2452
          if (TYPE_UNSIGNED (type))
2453
            {
2454
              tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
2455
                                      double_int_to_tree (type, cst));
2456
              if (!tem || integer_nonzerop (tem))
2457
                return max;
2458
            }
2459
 
2460
          bnd = double_int_sub (bnd, cst);
2461
        }
2462
 
2463
      return bnd;
2464
 
2465
    case FLOOR_DIV_EXPR:
2466
    case EXACT_DIV_EXPR:
2467
      if (TREE_CODE (op1) != INTEGER_CST
2468
          || tree_int_cst_sign_bit (op1))
2469
        return max;
2470
 
2471
      bnd = derive_constant_upper_bound (op0);
2472
      return double_int_udiv (bnd, tree_to_double_int (op1), FLOOR_DIV_EXPR);
2473
 
2474
    case BIT_AND_EXPR:
2475
      if (TREE_CODE (op1) != INTEGER_CST
2476
          || tree_int_cst_sign_bit (op1))
2477
        return max;
2478
      return tree_to_double_int (op1);
2479
 
2480
    case SSA_NAME:
2481
      stmt = SSA_NAME_DEF_STMT (op0);
2482
      if (gimple_code (stmt) != GIMPLE_ASSIGN
2483
          || gimple_assign_lhs (stmt) != op0)
2484
        return max;
2485
      return derive_constant_upper_bound_assign (stmt);
2486
 
2487
    default:
2488
      return max;
2489
    }
2490
}
2491
 
2492
/* Records that every statement in LOOP is executed I_BOUND times.
2493
   REALISTIC is true if I_BOUND is expected to be close to the real number
2494
   of iterations.  UPPER is true if we are sure the loop iterates at most
2495
   I_BOUND times.  */
2496
 
2497
static void
2498
record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
2499
                    bool upper)
2500
{
2501
  /* Update the bounds only when there is no previous estimation, or when the current
2502
     estimation is smaller.  */
2503
  if (upper
2504
      && (!loop->any_upper_bound
2505
          || double_int_ucmp (i_bound, loop->nb_iterations_upper_bound) < 0))
2506
    {
2507
      loop->any_upper_bound = true;
2508
      loop->nb_iterations_upper_bound = i_bound;
2509
    }
2510
  if (realistic
2511
      && (!loop->any_estimate
2512
          || double_int_ucmp (i_bound, loop->nb_iterations_estimate) < 0))
2513
    {
2514
      loop->any_estimate = true;
2515
      loop->nb_iterations_estimate = i_bound;
2516
    }
2517
}
2518
 
2519
/* Records that AT_STMT is executed at most BOUND + 1 times in LOOP.  IS_EXIT
2520
   is true if the loop is exited immediately after STMT, and this exit
2521
   is taken at last when the STMT is executed BOUND + 1 times.
2522
   REALISTIC is true if BOUND is expected to be close to the real number
2523
   of iterations.  UPPER is true if we are sure the loop iterates at most
2524
   BOUND times.  I_BOUND is an unsigned double_int upper estimate on BOUND.  */
2525
 
2526
static void
2527
record_estimate (struct loop *loop, tree bound, double_int i_bound,
2528
                 gimple at_stmt, bool is_exit, bool realistic, bool upper)
2529
{
2530
  double_int delta;
2531
  edge exit;
2532
 
2533
  if (dump_file && (dump_flags & TDF_DETAILS))
2534
    {
2535
      fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
2536
      print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
2537
      fprintf (dump_file, " is %sexecuted at most ",
2538
               upper ? "" : "probably ");
2539
      print_generic_expr (dump_file, bound, TDF_SLIM);
2540
      fprintf (dump_file, " (bounded by ");
2541
      dump_double_int (dump_file, i_bound, true);
2542
      fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
2543
    }
2544
 
2545
  /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2546
     real number of iterations.  */
2547
  if (TREE_CODE (bound) != INTEGER_CST)
2548
    realistic = false;
2549
  if (!upper && !realistic)
2550
    return;
2551
 
2552
  /* If we have a guaranteed upper bound, record it in the appropriate
2553
     list.  */
2554
  if (upper)
2555
    {
2556
      struct nb_iter_bound *elt = ggc_alloc_nb_iter_bound ();
2557
 
2558
      elt->bound = i_bound;
2559
      elt->stmt = at_stmt;
2560
      elt->is_exit = is_exit;
2561
      elt->next = loop->bounds;
2562
      loop->bounds = elt;
2563
    }
2564
 
2565
  /* Update the number of iteration estimates according to the bound.
2566
     If at_stmt is an exit or dominates the single exit from the loop,
2567
     then the loop latch is executed at most BOUND times, otherwise
2568
     it can be executed BOUND + 1 times.  */
2569
  exit = single_exit (loop);
2570
  if (is_exit
2571
      || (exit != NULL
2572
          && dominated_by_p (CDI_DOMINATORS,
2573
                             exit->src, gimple_bb (at_stmt))))
2574
    delta = double_int_zero;
2575
  else
2576
    delta = double_int_one;
2577
  i_bound = double_int_add (i_bound, delta);
2578
 
2579
  /* If an overflow occurred, ignore the result.  */
2580
  if (double_int_ucmp (i_bound, delta) < 0)
2581
    return;
2582
 
2583
  record_niter_bound (loop, i_bound, realistic, upper);
2584
}
2585
 
2586
/* Record the estimate on number of iterations of LOOP based on the fact that
2587
   the induction variable BASE + STEP * i evaluated in STMT does not wrap and
2588
   its values belong to the range <LOW, HIGH>.  REALISTIC is true if the
2589
   estimated number of iterations is expected to be close to the real one.
2590
   UPPER is true if we are sure the induction variable does not wrap.  */
2591
 
2592
static void
2593
record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
2594
                       tree low, tree high, bool realistic, bool upper)
2595
{
2596
  tree niter_bound, extreme, delta;
2597
  tree type = TREE_TYPE (base), unsigned_type;
2598
  double_int max;
2599
 
2600
  if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
2601
    return;
2602
 
2603
  if (dump_file && (dump_flags & TDF_DETAILS))
2604
    {
2605
      fprintf (dump_file, "Induction variable (");
2606
      print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
2607
      fprintf (dump_file, ") ");
2608
      print_generic_expr (dump_file, base, TDF_SLIM);
2609
      fprintf (dump_file, " + ");
2610
      print_generic_expr (dump_file, step, TDF_SLIM);
2611
      fprintf (dump_file, " * iteration does not wrap in statement ");
2612
      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2613
      fprintf (dump_file, " in loop %d.\n", loop->num);
2614
    }
2615
 
2616
  unsigned_type = unsigned_type_for (type);
2617
  base = fold_convert (unsigned_type, base);
2618
  step = fold_convert (unsigned_type, step);
2619
 
2620
  if (tree_int_cst_sign_bit (step))
2621
    {
2622
      extreme = fold_convert (unsigned_type, low);
2623
      if (TREE_CODE (base) != INTEGER_CST)
2624
        base = fold_convert (unsigned_type, high);
2625
      delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
2626
      step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
2627
    }
2628
  else
2629
    {
2630
      extreme = fold_convert (unsigned_type, high);
2631
      if (TREE_CODE (base) != INTEGER_CST)
2632
        base = fold_convert (unsigned_type, low);
2633
      delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
2634
    }
2635
 
2636
  /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
2637
     would get out of the range.  */
2638
  niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
2639
  max = derive_constant_upper_bound (niter_bound);
2640
  record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
2641
}
2642
 
2643
/* Returns true if REF is a reference to an array at the end of a dynamically
2644
   allocated structure.  If this is the case, the array may be allocated larger
2645
   than its upper bound implies.  */
2646
 
2647
bool
2648
array_at_struct_end_p (tree ref)
2649
{
2650
  tree base = get_base_address (ref);
2651
  tree parent, field;
2652
 
2653
  /* Unless the reference is through a pointer, the size of the array matches
2654
     its declaration.  */
2655
  if (!base || (!INDIRECT_REF_P (base) && TREE_CODE (base) != MEM_REF))
2656
    return false;
2657
 
2658
  for (;handled_component_p (ref); ref = parent)
2659
    {
2660
      parent = TREE_OPERAND (ref, 0);
2661
 
2662
      if (TREE_CODE (ref) == COMPONENT_REF)
2663
        {
2664
          /* All fields of a union are at its end.  */
2665
          if (TREE_CODE (TREE_TYPE (parent)) == UNION_TYPE)
2666
            continue;
2667
 
2668
          /* Unless the field is at the end of the struct, we are done.  */
2669
          field = TREE_OPERAND (ref, 1);
2670
          if (DECL_CHAIN (field))
2671
            return false;
2672
        }
2673
 
2674
      /* The other options are ARRAY_REF, ARRAY_RANGE_REF, VIEW_CONVERT_EXPR.
2675
         In all these cases, we might be accessing the last element, and
2676
         although in practice this will probably never happen, it is legal for
2677
         the indices of this last element to exceed the bounds of the array.
2678
         Therefore, continue checking.  */
2679
    }
2680
 
2681
  return true;
2682
}
2683
 
2684
/* Determine information about number of iterations a LOOP from the index
2685
   IDX of a data reference accessed in STMT.  RELIABLE is true if STMT is
2686
   guaranteed to be executed in every iteration of LOOP.  Callback for
2687
   for_each_index.  */
2688
 
2689
struct ilb_data
2690
{
2691
  struct loop *loop;
2692
  gimple stmt;
2693
  bool reliable;
2694
};
2695
 
2696
static bool
2697
idx_infer_loop_bounds (tree base, tree *idx, void *dta)
2698
{
2699
  struct ilb_data *data = (struct ilb_data *) dta;
2700
  tree ev, init, step;
2701
  tree low, high, type, next;
2702
  bool sign, upper = data->reliable, at_end = false;
2703
  struct loop *loop = data->loop;
2704
 
2705
  if (TREE_CODE (base) != ARRAY_REF)
2706
    return true;
2707
 
2708
  /* For arrays at the end of the structure, we are not guaranteed that they
2709
     do not really extend over their declared size.  However, for arrays of
2710
     size greater than one, this is unlikely to be intended.  */
2711
  if (array_at_struct_end_p (base))
2712
    {
2713
      at_end = true;
2714
      upper = false;
2715
    }
2716
 
2717
  ev = instantiate_parameters (loop, analyze_scalar_evolution (loop, *idx));
2718
  init = initial_condition (ev);
2719
  step = evolution_part_in_loop_num (ev, loop->num);
2720
 
2721
  if (!init
2722
      || !step
2723
      || TREE_CODE (step) != INTEGER_CST
2724
      || integer_zerop (step)
2725
      || tree_contains_chrecs (init, NULL)
2726
      || chrec_contains_symbols_defined_in_loop (init, loop->num))
2727
    return true;
2728
 
2729
  low = array_ref_low_bound (base);
2730
  high = array_ref_up_bound (base);
2731
 
2732
  /* The case of nonconstant bounds could be handled, but it would be
2733
     complicated.  */
2734
  if (TREE_CODE (low) != INTEGER_CST
2735
      || !high
2736
      || TREE_CODE (high) != INTEGER_CST)
2737
    return true;
2738
  sign = tree_int_cst_sign_bit (step);
2739
  type = TREE_TYPE (step);
2740
 
2741
  /* The array of length 1 at the end of a structure most likely extends
2742
     beyond its bounds.  */
2743
  if (at_end
2744
      && operand_equal_p (low, high, 0))
2745
    return true;
2746
 
2747
  /* In case the relevant bound of the array does not fit in type, or
2748
     it does, but bound + step (in type) still belongs into the range of the
2749
     array, the index may wrap and still stay within the range of the array
2750
     (consider e.g. if the array is indexed by the full range of
2751
     unsigned char).
2752
 
2753
     To make things simpler, we require both bounds to fit into type, although
2754
     there are cases where this would not be strictly necessary.  */
2755
  if (!int_fits_type_p (high, type)
2756
      || !int_fits_type_p (low, type))
2757
    return true;
2758
  low = fold_convert (type, low);
2759
  high = fold_convert (type, high);
2760
 
2761
  if (sign)
2762
    next = fold_binary (PLUS_EXPR, type, low, step);
2763
  else
2764
    next = fold_binary (PLUS_EXPR, type, high, step);
2765
 
2766
  if (tree_int_cst_compare (low, next) <= 0
2767
      && tree_int_cst_compare (next, high) <= 0)
2768
    return true;
2769
 
2770
  record_nonwrapping_iv (loop, init, step, data->stmt, low, high, true, upper);
2771
  return true;
2772
}
2773
 
2774
/* Determine information about number of iterations a LOOP from the bounds
2775
   of arrays in the data reference REF accessed in STMT.  RELIABLE is true if
2776
   STMT is guaranteed to be executed in every iteration of LOOP.*/
2777
 
2778
static void
2779
infer_loop_bounds_from_ref (struct loop *loop, gimple stmt, tree ref,
2780
                            bool reliable)
2781
{
2782
  struct ilb_data data;
2783
 
2784
  data.loop = loop;
2785
  data.stmt = stmt;
2786
  data.reliable = reliable;
2787
  for_each_index (&ref, idx_infer_loop_bounds, &data);
2788
}
2789
 
2790
/* Determine information about number of iterations of a LOOP from the way
2791
   arrays are used in STMT.  RELIABLE is true if STMT is guaranteed to be
2792
   executed in every iteration of LOOP.  */
2793
 
2794
static void
2795
infer_loop_bounds_from_array (struct loop *loop, gimple stmt, bool reliable)
2796
{
2797
  if (is_gimple_assign (stmt))
2798
    {
2799
      tree op0 = gimple_assign_lhs (stmt);
2800
      tree op1 = gimple_assign_rhs1 (stmt);
2801
 
2802
      /* For each memory access, analyze its access function
2803
         and record a bound on the loop iteration domain.  */
2804
      if (REFERENCE_CLASS_P (op0))
2805
        infer_loop_bounds_from_ref (loop, stmt, op0, reliable);
2806
 
2807
      if (REFERENCE_CLASS_P (op1))
2808
        infer_loop_bounds_from_ref (loop, stmt, op1, reliable);
2809
    }
2810
  else if (is_gimple_call (stmt))
2811
    {
2812
      tree arg, lhs;
2813
      unsigned i, n = gimple_call_num_args (stmt);
2814
 
2815
      lhs = gimple_call_lhs (stmt);
2816
      if (lhs && REFERENCE_CLASS_P (lhs))
2817
        infer_loop_bounds_from_ref (loop, stmt, lhs, reliable);
2818
 
2819
      for (i = 0; i < n; i++)
2820
        {
2821
          arg = gimple_call_arg (stmt, i);
2822
          if (REFERENCE_CLASS_P (arg))
2823
            infer_loop_bounds_from_ref (loop, stmt, arg, reliable);
2824
        }
2825
    }
2826
}
2827
 
2828
/* Determine information about number of iterations of a LOOP from the fact
2829
   that pointer arithmetics in STMT does not overflow.  */
2830
 
2831
static void
2832
infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple stmt)
2833
{
2834
  tree def, base, step, scev, type, low, high;
2835
  tree var, ptr;
2836
 
2837
  if (!is_gimple_assign (stmt)
2838
      || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
2839
    return;
2840
 
2841
  def = gimple_assign_lhs (stmt);
2842
  if (TREE_CODE (def) != SSA_NAME)
2843
    return;
2844
 
2845
  type = TREE_TYPE (def);
2846
  if (!nowrap_type_p (type))
2847
    return;
2848
 
2849
  ptr = gimple_assign_rhs1 (stmt);
2850
  if (!expr_invariant_in_loop_p (loop, ptr))
2851
    return;
2852
 
2853
  var = gimple_assign_rhs2 (stmt);
2854
  if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
2855
    return;
2856
 
2857
  scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
2858
  if (chrec_contains_undetermined (scev))
2859
    return;
2860
 
2861
  base = initial_condition_in_loop_num (scev, loop->num);
2862
  step = evolution_part_in_loop_num (scev, loop->num);
2863
 
2864
  if (!base || !step
2865
      || TREE_CODE (step) != INTEGER_CST
2866
      || tree_contains_chrecs (base, NULL)
2867
      || chrec_contains_symbols_defined_in_loop (base, loop->num))
2868
    return;
2869
 
2870
  low = lower_bound_in_type (type, type);
2871
  high = upper_bound_in_type (type, type);
2872
 
2873
  /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
2874
     produce a NULL pointer.  The contrary would mean NULL points to an object,
2875
     while NULL is supposed to compare unequal with the address of all objects.
2876
     Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
2877
     NULL pointer since that would mean wrapping, which we assume here not to
2878
     happen.  So, we can exclude NULL from the valid range of pointer
2879
     arithmetic.  */
2880
  if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
2881
    low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
2882
 
2883
  record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
2884
}
2885
 
2886
/* Determine information about number of iterations of a LOOP from the fact
2887
   that signed arithmetics in STMT does not overflow.  */
2888
 
2889
static void
2890
infer_loop_bounds_from_signedness (struct loop *loop, gimple stmt)
2891
{
2892
  tree def, base, step, scev, type, low, high;
2893
 
2894
  if (gimple_code (stmt) != GIMPLE_ASSIGN)
2895
    return;
2896
 
2897
  def = gimple_assign_lhs (stmt);
2898
 
2899
  if (TREE_CODE (def) != SSA_NAME)
2900
    return;
2901
 
2902
  type = TREE_TYPE (def);
2903
  if (!INTEGRAL_TYPE_P (type)
2904
      || !TYPE_OVERFLOW_UNDEFINED (type))
2905
    return;
2906
 
2907
  scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
2908
  if (chrec_contains_undetermined (scev))
2909
    return;
2910
 
2911
  base = initial_condition_in_loop_num (scev, loop->num);
2912
  step = evolution_part_in_loop_num (scev, loop->num);
2913
 
2914
  if (!base || !step
2915
      || TREE_CODE (step) != INTEGER_CST
2916
      || tree_contains_chrecs (base, NULL)
2917
      || chrec_contains_symbols_defined_in_loop (base, loop->num))
2918
    return;
2919
 
2920
  low = lower_bound_in_type (type, type);
2921
  high = upper_bound_in_type (type, type);
2922
 
2923
  record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
2924
}
2925
 
2926
/* The following analyzers are extracting informations on the bounds
2927
   of LOOP from the following undefined behaviors:
2928
 
2929
   - data references should not access elements over the statically
2930
     allocated size,
2931
 
2932
   - signed variables should not overflow when flag_wrapv is not set.
2933
*/
2934
 
2935
static void
2936
infer_loop_bounds_from_undefined (struct loop *loop)
2937
{
2938
  unsigned i;
2939
  basic_block *bbs;
2940
  gimple_stmt_iterator bsi;
2941
  basic_block bb;
2942
  bool reliable;
2943
 
2944
  bbs = get_loop_body (loop);
2945
 
2946
  for (i = 0; i < loop->num_nodes; i++)
2947
    {
2948
      bb = bbs[i];
2949
 
2950
      /* If BB is not executed in each iteration of the loop, we cannot
2951
         use the operations in it to infer reliable upper bound on the
2952
         # of iterations of the loop.  However, we can use it as a guess.  */
2953
      reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
2954
 
2955
      for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2956
        {
2957
          gimple stmt = gsi_stmt (bsi);
2958
 
2959
          infer_loop_bounds_from_array (loop, stmt, reliable);
2960
 
2961
          if (reliable)
2962
            {
2963
              infer_loop_bounds_from_signedness (loop, stmt);
2964
              infer_loop_bounds_from_pointer_arith (loop, stmt);
2965
            }
2966
        }
2967
 
2968
    }
2969
 
2970
  free (bbs);
2971
}
2972
 
2973
/* Converts VAL to double_int.  */
2974
 
2975
static double_int
2976
gcov_type_to_double_int (gcov_type val)
2977
{
2978
  double_int ret;
2979
 
2980
  ret.low = (unsigned HOST_WIDE_INT) val;
2981
  /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
2982
     the size of type.  */
2983
  val >>= HOST_BITS_PER_WIDE_INT - 1;
2984
  val >>= 1;
2985
  ret.high = (unsigned HOST_WIDE_INT) val;
2986
 
2987
  return ret;
2988
}
2989
 
2990
/* Records estimates on numbers of iterations of LOOP.  If USE_UNDEFINED_P
2991
   is true also use estimates derived from undefined behavior.  */
2992
 
2993
void
2994
estimate_numbers_of_iterations_loop (struct loop *loop, bool use_undefined_p)
2995
{
2996
  VEC (edge, heap) *exits;
2997
  tree niter, type;
2998
  unsigned i;
2999
  struct tree_niter_desc niter_desc;
3000
  edge ex;
3001
  double_int bound;
3002
 
3003
  /* Give up if we already have tried to compute an estimation.  */
3004
  if (loop->estimate_state != EST_NOT_COMPUTED)
3005
    return;
3006
  loop->estimate_state = EST_AVAILABLE;
3007
  loop->any_upper_bound = false;
3008
  loop->any_estimate = false;
3009
 
3010
  exits = get_loop_exit_edges (loop);
3011
  FOR_EACH_VEC_ELT (edge, exits, i, ex)
3012
    {
3013
      if (!number_of_iterations_exit (loop, ex, &niter_desc, false))
3014
        continue;
3015
 
3016
      niter = niter_desc.niter;
3017
      type = TREE_TYPE (niter);
3018
      if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
3019
        niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
3020
                        build_int_cst (type, 0),
3021
                        niter);
3022
      record_estimate (loop, niter, niter_desc.max,
3023
                       last_stmt (ex->src),
3024
                       true, true, true);
3025
    }
3026
  VEC_free (edge, heap, exits);
3027
 
3028
  if (use_undefined_p)
3029
    infer_loop_bounds_from_undefined (loop);
3030
 
3031
  /* If we have a measured profile, use it to estimate the number of
3032
     iterations.  */
3033
  if (loop->header->count != 0)
3034
    {
3035
      gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
3036
      bound = gcov_type_to_double_int (nit);
3037
      record_niter_bound (loop, bound, true, false);
3038
    }
3039
 
3040
  /* If an upper bound is smaller than the realistic estimate of the
3041
     number of iterations, use the upper bound instead.  */
3042
  if (loop->any_upper_bound
3043
      && loop->any_estimate
3044
      && double_int_ucmp (loop->nb_iterations_upper_bound,
3045
                          loop->nb_iterations_estimate) < 0)
3046
    loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
3047
}
3048
 
3049
/* Sets NIT to the estimated number of executions of the latch of the
3050
   LOOP.  If CONSERVATIVE is true, we must be sure that NIT is at least as
3051
   large as the number of iterations.  If we have no reliable estimate,
3052
   the function returns false, otherwise returns true.  */
3053
 
3054
bool
3055
estimated_loop_iterations (struct loop *loop, bool conservative,
3056
                           double_int *nit)
3057
{
3058
  estimate_numbers_of_iterations_loop (loop, true);
3059
  if (conservative)
3060
    {
3061
      if (!loop->any_upper_bound)
3062
        return false;
3063
 
3064
      *nit = loop->nb_iterations_upper_bound;
3065
    }
3066
  else
3067
    {
3068
      if (!loop->any_estimate)
3069
        return false;
3070
 
3071
      *nit = loop->nb_iterations_estimate;
3072
    }
3073
 
3074
  return true;
3075
}
3076
 
3077
/* Similar to estimated_loop_iterations, but returns the estimate only
3078
   if it fits to HOST_WIDE_INT.  If this is not the case, or the estimate
3079
   on the number of iterations of LOOP could not be derived, returns -1.  */
3080
 
3081
HOST_WIDE_INT
3082
estimated_loop_iterations_int (struct loop *loop, bool conservative)
3083
{
3084
  double_int nit;
3085
  HOST_WIDE_INT hwi_nit;
3086
 
3087
  if (!estimated_loop_iterations (loop, conservative, &nit))
3088
    return -1;
3089
 
3090
  if (!double_int_fits_in_shwi_p (nit))
3091
    return -1;
3092
  hwi_nit = double_int_to_shwi (nit);
3093
 
3094
  return hwi_nit < 0 ? -1 : hwi_nit;
3095
}
3096
 
3097
/* Returns an upper bound on the number of executions of statements
3098
   in the LOOP.  For statements before the loop exit, this exceeds
3099
   the number of execution of the latch by one.  */
3100
 
3101
HOST_WIDE_INT
3102
max_stmt_executions_int (struct loop *loop, bool conservative)
3103
{
3104
  HOST_WIDE_INT nit = estimated_loop_iterations_int (loop, conservative);
3105
  HOST_WIDE_INT snit;
3106
 
3107
  if (nit == -1)
3108
    return -1;
3109
 
3110
  snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
3111
 
3112
  /* If the computation overflows, return -1.  */
3113
  return snit < 0 ? -1 : snit;
3114
}
3115
 
3116
/* Sets NIT to the estimated number of executions of the latch of the
3117
   LOOP, plus one.  If CONSERVATIVE is true, we must be sure that NIT is at
3118
   least as large as the number of iterations.  If we have no reliable
3119
   estimate, the function returns false, otherwise returns true.  */
3120
 
3121
bool
3122
max_stmt_executions (struct loop *loop, bool conservative, double_int *nit)
3123
{
3124
  double_int nit_minus_one;
3125
 
3126
  if (!estimated_loop_iterations (loop, conservative, nit))
3127
    return false;
3128
 
3129
  nit_minus_one = *nit;
3130
 
3131
  *nit = double_int_add (*nit, double_int_one);
3132
 
3133
  return double_int_ucmp (*nit, nit_minus_one) > 0;
3134
}
3135
 
3136
/* Records estimates on numbers of iterations of loops.  */
3137
 
3138
void
3139
estimate_numbers_of_iterations (bool use_undefined_p)
3140
{
3141
  loop_iterator li;
3142
  struct loop *loop;
3143
 
3144
  /* We don't want to issue signed overflow warnings while getting
3145
     loop iteration estimates.  */
3146
  fold_defer_overflow_warnings ();
3147
 
3148
  FOR_EACH_LOOP (li, loop, 0)
3149
    {
3150
      estimate_numbers_of_iterations_loop (loop, use_undefined_p);
3151
    }
3152
 
3153
  fold_undefer_and_ignore_overflow_warnings ();
3154
}
3155
 
3156
/* Returns true if statement S1 dominates statement S2.  */
3157
 
3158
bool
3159
stmt_dominates_stmt_p (gimple s1, gimple s2)
3160
{
3161
  basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
3162
 
3163
  if (!bb1
3164
      || s1 == s2)
3165
    return true;
3166
 
3167
  if (bb1 == bb2)
3168
    {
3169
      gimple_stmt_iterator bsi;
3170
 
3171
      if (gimple_code (s2) == GIMPLE_PHI)
3172
        return false;
3173
 
3174
      if (gimple_code (s1) == GIMPLE_PHI)
3175
        return true;
3176
 
3177
      for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
3178
        if (gsi_stmt (bsi) == s1)
3179
          return true;
3180
 
3181
      return false;
3182
    }
3183
 
3184
  return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
3185
}
3186
 
3187
/* Returns true when we can prove that the number of executions of
3188
   STMT in the loop is at most NITER, according to the bound on
3189
   the number of executions of the statement NITER_BOUND->stmt recorded in
3190
   NITER_BOUND.  If STMT is NULL, we must prove this bound for all
3191
   statements in the loop.  */
3192
 
3193
static bool
3194
n_of_executions_at_most (gimple stmt,
3195
                         struct nb_iter_bound *niter_bound,
3196
                         tree niter)
3197
{
3198
  double_int bound = niter_bound->bound;
3199
  tree nit_type = TREE_TYPE (niter), e;
3200
  enum tree_code cmp;
3201
 
3202
  gcc_assert (TYPE_UNSIGNED (nit_type));
3203
 
3204
  /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
3205
     the number of iterations is small.  */
3206
  if (!double_int_fits_to_tree_p (nit_type, bound))
3207
    return false;
3208
 
3209
  /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
3210
     times.  This means that:
3211
 
3212
     -- if NITER_BOUND->is_exit is true, then everything before
3213
        NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
3214
        times, and everything after it at most NITER_BOUND->bound times.
3215
 
3216
     -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
3217
        is executed, then NITER_BOUND->stmt is executed as well in the same
3218
        iteration (we conclude that if both statements belong to the same
3219
        basic block, or if STMT is after NITER_BOUND->stmt), then STMT
3220
        is executed at most NITER_BOUND->bound + 1 times.  Otherwise STMT is
3221
        executed at most NITER_BOUND->bound + 2 times.  */
3222
 
3223
  if (niter_bound->is_exit)
3224
    {
3225
      if (stmt
3226
          && stmt != niter_bound->stmt
3227
          && stmt_dominates_stmt_p (niter_bound->stmt, stmt))
3228
        cmp = GE_EXPR;
3229
      else
3230
        cmp = GT_EXPR;
3231
    }
3232
  else
3233
    {
3234
      if (!stmt
3235
          || (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
3236
              && !stmt_dominates_stmt_p (niter_bound->stmt, stmt)))
3237
        {
3238
          bound = double_int_add (bound, double_int_one);
3239
          if (double_int_zero_p (bound)
3240
              || !double_int_fits_to_tree_p (nit_type, bound))
3241
            return false;
3242
        }
3243
      cmp = GT_EXPR;
3244
    }
3245
 
3246
  e = fold_binary (cmp, boolean_type_node,
3247
                   niter, double_int_to_tree (nit_type, bound));
3248
  return e && integer_nonzerop (e);
3249
}
3250
 
3251
/* Returns true if the arithmetics in TYPE can be assumed not to wrap.  */
3252
 
3253
bool
3254
nowrap_type_p (tree type)
3255
{
3256
  if (INTEGRAL_TYPE_P (type)
3257
      && TYPE_OVERFLOW_UNDEFINED (type))
3258
    return true;
3259
 
3260
  if (POINTER_TYPE_P (type))
3261
    return true;
3262
 
3263
  return false;
3264
}
3265
 
3266
/* Return false only when the induction variable BASE + STEP * I is
3267
   known to not overflow: i.e. when the number of iterations is small
3268
   enough with respect to the step and initial condition in order to
3269
   keep the evolution confined in TYPEs bounds.  Return true when the
3270
   iv is known to overflow or when the property is not computable.
3271
 
3272
   USE_OVERFLOW_SEMANTICS is true if this function should assume that
3273
   the rules for overflow of the given language apply (e.g., that signed
3274
   arithmetics in C does not overflow).  */
3275
 
3276
bool
3277
scev_probably_wraps_p (tree base, tree step,
3278
                       gimple at_stmt, struct loop *loop,
3279
                       bool use_overflow_semantics)
3280
{
3281
  struct nb_iter_bound *bound;
3282
  tree delta, step_abs;
3283
  tree unsigned_type, valid_niter;
3284
  tree type = TREE_TYPE (step);
3285
 
3286
  /* FIXME: We really need something like
3287
     http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
3288
 
3289
     We used to test for the following situation that frequently appears
3290
     during address arithmetics:
3291
 
3292
       D.1621_13 = (long unsigned intD.4) D.1620_12;
3293
       D.1622_14 = D.1621_13 * 8;
3294
       D.1623_15 = (doubleD.29 *) D.1622_14;
3295
 
3296
     And derived that the sequence corresponding to D_14
3297
     can be proved to not wrap because it is used for computing a
3298
     memory access; however, this is not really the case -- for example,
3299
     if D_12 = (unsigned char) [254,+,1], then D_14 has values
3300
     2032, 2040, 0, 8, ..., but the code is still legal.  */
3301
 
3302
  if (chrec_contains_undetermined (base)
3303
      || chrec_contains_undetermined (step))
3304
    return true;
3305
 
3306
  if (integer_zerop (step))
3307
    return false;
3308
 
3309
  /* If we can use the fact that signed and pointer arithmetics does not
3310
     wrap, we are done.  */
3311
  if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
3312
    return false;
3313
 
3314
  /* To be able to use estimates on number of iterations of the loop,
3315
     we must have an upper bound on the absolute value of the step.  */
3316
  if (TREE_CODE (step) != INTEGER_CST)
3317
    return true;
3318
 
3319
  /* Don't issue signed overflow warnings.  */
3320
  fold_defer_overflow_warnings ();
3321
 
3322
  /* Otherwise, compute the number of iterations before we reach the
3323
     bound of the type, and verify that the loop is exited before this
3324
     occurs.  */
3325
  unsigned_type = unsigned_type_for (type);
3326
  base = fold_convert (unsigned_type, base);
3327
 
3328
  if (tree_int_cst_sign_bit (step))
3329
    {
3330
      tree extreme = fold_convert (unsigned_type,
3331
                                   lower_bound_in_type (type, type));
3332
      delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3333
      step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
3334
                              fold_convert (unsigned_type, step));
3335
    }
3336
  else
3337
    {
3338
      tree extreme = fold_convert (unsigned_type,
3339
                                   upper_bound_in_type (type, type));
3340
      delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3341
      step_abs = fold_convert (unsigned_type, step);
3342
    }
3343
 
3344
  valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
3345
 
3346
  estimate_numbers_of_iterations_loop (loop, true);
3347
  for (bound = loop->bounds; bound; bound = bound->next)
3348
    {
3349
      if (n_of_executions_at_most (at_stmt, bound, valid_niter))
3350
        {
3351
          fold_undefer_and_ignore_overflow_warnings ();
3352
          return false;
3353
        }
3354
    }
3355
 
3356
  fold_undefer_and_ignore_overflow_warnings ();
3357
 
3358
  /* At this point we still don't have a proof that the iv does not
3359
     overflow: give up.  */
3360
  return true;
3361
}
3362
 
3363
/* Frees the information on upper bounds on numbers of iterations of LOOP.  */
3364
 
3365
void
3366
free_numbers_of_iterations_estimates_loop (struct loop *loop)
3367
{
3368
  struct nb_iter_bound *bound, *next;
3369
 
3370
  loop->nb_iterations = NULL;
3371
  loop->estimate_state = EST_NOT_COMPUTED;
3372
  for (bound = loop->bounds; bound; bound = next)
3373
    {
3374
      next = bound->next;
3375
      ggc_free (bound);
3376
    }
3377
 
3378
  loop->bounds = NULL;
3379
}
3380
 
3381
/* Frees the information on upper bounds on numbers of iterations of loops.  */
3382
 
3383
void
3384
free_numbers_of_iterations_estimates (void)
3385
{
3386
  loop_iterator li;
3387
  struct loop *loop;
3388
 
3389
  FOR_EACH_LOOP (li, loop, 0)
3390
    {
3391
      free_numbers_of_iterations_estimates_loop (loop);
3392
    }
3393
}
3394
 
3395
/* Substitute value VAL for ssa name NAME inside expressions held
3396
   at LOOP.  */
3397
 
3398
void
3399
substitute_in_loop_info (struct loop *loop, tree name, tree val)
3400
{
3401
  loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
3402
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.