OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [tree-ssa-dom.c] - Blame information for rev 801

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* SSA Dominator optimizations for trees
2
   Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3
   Free Software Foundation, Inc.
4
   Contributed by Diego Novillo <dnovillo@redhat.com>
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify
9
it under the terms of the GNU General Public License as published by
10
the Free Software Foundation; either version 3, or (at your option)
11
any later version.
12
 
13
GCC is distributed in the hope that it will be useful,
14
but WITHOUT ANY WARRANTY; without even the implied warranty of
15
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
GNU General Public License for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
#include "config.h"
23
#include "system.h"
24
#include "coretypes.h"
25
#include "tm.h"
26
#include "tree.h"
27
#include "flags.h"
28
#include "tm_p.h"
29
#include "basic-block.h"
30
#include "cfgloop.h"
31
#include "output.h"
32
#include "function.h"
33
#include "tree-pretty-print.h"
34
#include "gimple-pretty-print.h"
35
#include "timevar.h"
36
#include "tree-dump.h"
37
#include "tree-flow.h"
38
#include "domwalk.h"
39
#include "tree-pass.h"
40
#include "tree-ssa-propagate.h"
41
#include "langhooks.h"
42
#include "params.h"
43
 
44
/* This file implements optimizations on the dominator tree.  */
45
 
46
/* Representation of a "naked" right-hand-side expression, to be used
47
   in recording available expressions in the expression hash table.  */
48
 
49
enum expr_kind
50
{
51
  EXPR_SINGLE,
52
  EXPR_UNARY,
53
  EXPR_BINARY,
54
  EXPR_TERNARY,
55
  EXPR_CALL,
56
  EXPR_PHI
57
};
58
 
59
struct hashable_expr
60
{
61
  tree type;
62
  enum expr_kind kind;
63
  union {
64
    struct { tree rhs; } single;
65
    struct { enum tree_code op;  tree opnd; } unary;
66
    struct { enum tree_code op;  tree opnd0, opnd1; } binary;
67
    struct { enum tree_code op;  tree opnd0, opnd1, opnd2; } ternary;
68
    struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
69
    struct { size_t nargs; tree *args; } phi;
70
  } ops;
71
};
72
 
73
/* Structure for recording known values of a conditional expression
74
   at the exits from its block.  */
75
 
76
typedef struct cond_equivalence_s
77
{
78
  struct hashable_expr cond;
79
  tree value;
80
} cond_equivalence;
81
 
82
DEF_VEC_O(cond_equivalence);
83
DEF_VEC_ALLOC_O(cond_equivalence,heap);
84
 
85
/* Structure for recording edge equivalences as well as any pending
86
   edge redirections during the dominator optimizer.
87
 
88
   Computing and storing the edge equivalences instead of creating
89
   them on-demand can save significant amounts of time, particularly
90
   for pathological cases involving switch statements.
91
 
92
   These structures live for a single iteration of the dominator
93
   optimizer in the edge's AUX field.  At the end of an iteration we
94
   free each of these structures and update the AUX field to point
95
   to any requested redirection target (the code for updating the
96
   CFG and SSA graph for edge redirection expects redirection edge
97
   targets to be in the AUX field for each edge.  */
98
 
99
struct edge_info
100
{
101
  /* If this edge creates a simple equivalence, the LHS and RHS of
102
     the equivalence will be stored here.  */
103
  tree lhs;
104
  tree rhs;
105
 
106
  /* Traversing an edge may also indicate one or more particular conditions
107
     are true or false.  */
108
  VEC(cond_equivalence, heap) *cond_equivalences;
109
};
110
 
111
/* Hash table with expressions made available during the renaming process.
112
   When an assignment of the form X_i = EXPR is found, the statement is
113
   stored in this table.  If the same expression EXPR is later found on the
114
   RHS of another statement, it is replaced with X_i (thus performing
115
   global redundancy elimination).  Similarly as we pass through conditionals
116
   we record the conditional itself as having either a true or false value
117
   in this table.  */
118
static htab_t avail_exprs;
119
 
120
/* Stack of available expressions in AVAIL_EXPRs.  Each block pushes any
121
   expressions it enters into the hash table along with a marker entry
122
   (null).  When we finish processing the block, we pop off entries and
123
   remove the expressions from the global hash table until we hit the
124
   marker.  */
125
typedef struct expr_hash_elt * expr_hash_elt_t;
126
DEF_VEC_P(expr_hash_elt_t);
127
DEF_VEC_ALLOC_P(expr_hash_elt_t,heap);
128
 
129
static VEC(expr_hash_elt_t,heap) *avail_exprs_stack;
130
 
131
/* Structure for entries in the expression hash table.  */
132
 
133
struct expr_hash_elt
134
{
135
  /* The value (lhs) of this expression.  */
136
  tree lhs;
137
 
138
  /* The expression (rhs) we want to record.  */
139
  struct hashable_expr expr;
140
 
141
  /* The stmt pointer if this element corresponds to a statement.  */
142
  gimple stmt;
143
 
144
  /* The hash value for RHS.  */
145
  hashval_t hash;
146
 
147
  /* A unique stamp, typically the address of the hash
148
     element itself, used in removing entries from the table.  */
149
  struct expr_hash_elt *stamp;
150
};
151
 
152
/* Stack of dest,src pairs that need to be restored during finalization.
153
 
154
   A NULL entry is used to mark the end of pairs which need to be
155
   restored during finalization of this block.  */
156
static VEC(tree,heap) *const_and_copies_stack;
157
 
158
/* Track whether or not we have changed the control flow graph.  */
159
static bool cfg_altered;
160
 
161
/* Bitmap of blocks that have had EH statements cleaned.  We should
162
   remove their dead edges eventually.  */
163
static bitmap need_eh_cleanup;
164
 
165
/* Statistics for dominator optimizations.  */
166
struct opt_stats_d
167
{
168
  long num_stmts;
169
  long num_exprs_considered;
170
  long num_re;
171
  long num_const_prop;
172
  long num_copy_prop;
173
};
174
 
175
static struct opt_stats_d opt_stats;
176
 
177
/* Local functions.  */
178
static void optimize_stmt (basic_block, gimple_stmt_iterator);
179
static tree lookup_avail_expr (gimple, bool);
180
static hashval_t avail_expr_hash (const void *);
181
static hashval_t real_avail_expr_hash (const void *);
182
static int avail_expr_eq (const void *, const void *);
183
static void htab_statistics (FILE *, htab_t);
184
static void record_cond (cond_equivalence *);
185
static void record_const_or_copy (tree, tree);
186
static void record_equality (tree, tree);
187
static void record_equivalences_from_phis (basic_block);
188
static void record_equivalences_from_incoming_edge (basic_block);
189
static void eliminate_redundant_computations (gimple_stmt_iterator *);
190
static void record_equivalences_from_stmt (gimple, int);
191
static void dom_thread_across_edge (struct dom_walk_data *, edge);
192
static void dom_opt_leave_block (struct dom_walk_data *, basic_block);
193
static void dom_opt_enter_block (struct dom_walk_data *, basic_block);
194
static void remove_local_expressions_from_table (void);
195
static void restore_vars_to_original_value (void);
196
static edge single_incoming_edge_ignoring_loop_edges (basic_block);
197
 
198
 
199
/* Given a statement STMT, initialize the hash table element pointed to
200
   by ELEMENT.  */
201
 
202
static void
203
initialize_hash_element (gimple stmt, tree lhs,
204
                         struct expr_hash_elt *element)
205
{
206
  enum gimple_code code = gimple_code (stmt);
207
  struct hashable_expr *expr = &element->expr;
208
 
209
  if (code == GIMPLE_ASSIGN)
210
    {
211
      enum tree_code subcode = gimple_assign_rhs_code (stmt);
212
 
213
      switch (get_gimple_rhs_class (subcode))
214
        {
215
        case GIMPLE_SINGLE_RHS:
216
          expr->kind = EXPR_SINGLE;
217
          expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
218
          expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
219
          break;
220
        case GIMPLE_UNARY_RHS:
221
          expr->kind = EXPR_UNARY;
222
          expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
223
          expr->ops.unary.op = subcode;
224
          expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
225
          break;
226
        case GIMPLE_BINARY_RHS:
227
          expr->kind = EXPR_BINARY;
228
          expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
229
          expr->ops.binary.op = subcode;
230
          expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
231
          expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
232
          break;
233
        case GIMPLE_TERNARY_RHS:
234
          expr->kind = EXPR_TERNARY;
235
          expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
236
          expr->ops.ternary.op = subcode;
237
          expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
238
          expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
239
          expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
240
          break;
241
        default:
242
          gcc_unreachable ();
243
        }
244
    }
245
  else if (code == GIMPLE_COND)
246
    {
247
      expr->type = boolean_type_node;
248
      expr->kind = EXPR_BINARY;
249
      expr->ops.binary.op = gimple_cond_code (stmt);
250
      expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
251
      expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
252
    }
253
  else if (code == GIMPLE_CALL)
254
    {
255
      size_t nargs = gimple_call_num_args (stmt);
256
      size_t i;
257
 
258
      gcc_assert (gimple_call_lhs (stmt));
259
 
260
      expr->type = TREE_TYPE (gimple_call_lhs (stmt));
261
      expr->kind = EXPR_CALL;
262
      expr->ops.call.fn_from = stmt;
263
 
264
      if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
265
        expr->ops.call.pure = true;
266
      else
267
        expr->ops.call.pure = false;
268
 
269
      expr->ops.call.nargs = nargs;
270
      expr->ops.call.args = XCNEWVEC (tree, nargs);
271
      for (i = 0; i < nargs; i++)
272
        expr->ops.call.args[i] = gimple_call_arg (stmt, i);
273
    }
274
  else if (code == GIMPLE_SWITCH)
275
    {
276
      expr->type = TREE_TYPE (gimple_switch_index (stmt));
277
      expr->kind = EXPR_SINGLE;
278
      expr->ops.single.rhs = gimple_switch_index (stmt);
279
    }
280
  else if (code == GIMPLE_GOTO)
281
    {
282
      expr->type = TREE_TYPE (gimple_goto_dest (stmt));
283
      expr->kind = EXPR_SINGLE;
284
      expr->ops.single.rhs = gimple_goto_dest (stmt);
285
    }
286
  else if (code == GIMPLE_PHI)
287
    {
288
      size_t nargs = gimple_phi_num_args (stmt);
289
      size_t i;
290
 
291
      expr->type = TREE_TYPE (gimple_phi_result (stmt));
292
      expr->kind = EXPR_PHI;
293
      expr->ops.phi.nargs = nargs;
294
      expr->ops.phi.args = XCNEWVEC (tree, nargs);
295
 
296
      for (i = 0; i < nargs; i++)
297
        expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
298
    }
299
  else
300
    gcc_unreachable ();
301
 
302
  element->lhs = lhs;
303
  element->stmt = stmt;
304
  element->hash = avail_expr_hash (element);
305
  element->stamp = element;
306
}
307
 
308
/* Given a conditional expression COND as a tree, initialize
309
   a hashable_expr expression EXPR.  The conditional must be a
310
   comparison or logical negation.  A constant or a variable is
311
   not permitted.  */
312
 
313
static void
314
initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
315
{
316
  expr->type = boolean_type_node;
317
 
318
  if (COMPARISON_CLASS_P (cond))
319
    {
320
      expr->kind = EXPR_BINARY;
321
      expr->ops.binary.op = TREE_CODE (cond);
322
      expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
323
      expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
324
    }
325
  else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
326
    {
327
      expr->kind = EXPR_UNARY;
328
      expr->ops.unary.op = TRUTH_NOT_EXPR;
329
      expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
330
    }
331
  else
332
    gcc_unreachable ();
333
}
334
 
335
/* Given a hashable_expr expression EXPR and an LHS,
336
   initialize the hash table element pointed to by ELEMENT.  */
337
 
338
static void
339
initialize_hash_element_from_expr (struct hashable_expr *expr,
340
                                   tree lhs,
341
                                   struct expr_hash_elt *element)
342
{
343
  element->expr = *expr;
344
  element->lhs = lhs;
345
  element->stmt = NULL;
346
  element->hash = avail_expr_hash (element);
347
  element->stamp = element;
348
}
349
 
350
/* Compare two hashable_expr structures for equivalence.
351
   They are considered equivalent when the the expressions
352
   they denote must necessarily be equal.  The logic is intended
353
   to follow that of operand_equal_p in fold-const.c  */
354
 
355
static bool
356
hashable_expr_equal_p (const struct hashable_expr *expr0,
357
                        const struct hashable_expr *expr1)
358
{
359
  tree type0 = expr0->type;
360
  tree type1 = expr1->type;
361
 
362
  /* If either type is NULL, there is nothing to check.  */
363
  if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
364
    return false;
365
 
366
  /* If both types don't have the same signedness, precision, and mode,
367
     then we can't consider  them equal.  */
368
  if (type0 != type1
369
      && (TREE_CODE (type0) == ERROR_MARK
370
          || TREE_CODE (type1) == ERROR_MARK
371
          || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
372
          || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
373
          || TYPE_MODE (type0) != TYPE_MODE (type1)))
374
    return false;
375
 
376
  if (expr0->kind != expr1->kind)
377
    return false;
378
 
379
  switch (expr0->kind)
380
    {
381
    case EXPR_SINGLE:
382
      return operand_equal_p (expr0->ops.single.rhs,
383
                              expr1->ops.single.rhs, 0);
384
 
385
    case EXPR_UNARY:
386
      if (expr0->ops.unary.op != expr1->ops.unary.op)
387
        return false;
388
 
389
      if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
390
           || expr0->ops.unary.op == NON_LVALUE_EXPR)
391
          && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
392
        return false;
393
 
394
      return operand_equal_p (expr0->ops.unary.opnd,
395
                              expr1->ops.unary.opnd, 0);
396
 
397
    case EXPR_BINARY:
398
      if (expr0->ops.binary.op != expr1->ops.binary.op)
399
        return false;
400
 
401
      if (operand_equal_p (expr0->ops.binary.opnd0,
402
                           expr1->ops.binary.opnd0, 0)
403
          && operand_equal_p (expr0->ops.binary.opnd1,
404
                              expr1->ops.binary.opnd1, 0))
405
        return true;
406
 
407
      /* For commutative ops, allow the other order.  */
408
      return (commutative_tree_code (expr0->ops.binary.op)
409
              && operand_equal_p (expr0->ops.binary.opnd0,
410
                                  expr1->ops.binary.opnd1, 0)
411
              && operand_equal_p (expr0->ops.binary.opnd1,
412
                                  expr1->ops.binary.opnd0, 0));
413
 
414
    case EXPR_TERNARY:
415
      if (expr0->ops.ternary.op != expr1->ops.ternary.op
416
          || !operand_equal_p (expr0->ops.ternary.opnd2,
417
                               expr1->ops.ternary.opnd2, 0))
418
        return false;
419
 
420
      if (operand_equal_p (expr0->ops.ternary.opnd0,
421
                           expr1->ops.ternary.opnd0, 0)
422
          && operand_equal_p (expr0->ops.ternary.opnd1,
423
                              expr1->ops.ternary.opnd1, 0))
424
        return true;
425
 
426
      /* For commutative ops, allow the other order.  */
427
      return (commutative_ternary_tree_code (expr0->ops.ternary.op)
428
              && operand_equal_p (expr0->ops.ternary.opnd0,
429
                                  expr1->ops.ternary.opnd1, 0)
430
              && operand_equal_p (expr0->ops.ternary.opnd1,
431
                                  expr1->ops.ternary.opnd0, 0));
432
 
433
    case EXPR_CALL:
434
      {
435
        size_t i;
436
 
437
        /* If the calls are to different functions, then they
438
           clearly cannot be equal.  */
439
        if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
440
                                        expr1->ops.call.fn_from))
441
          return false;
442
 
443
        if (! expr0->ops.call.pure)
444
          return false;
445
 
446
        if (expr0->ops.call.nargs !=  expr1->ops.call.nargs)
447
          return false;
448
 
449
        for (i = 0; i < expr0->ops.call.nargs; i++)
450
          if (! operand_equal_p (expr0->ops.call.args[i],
451
                                 expr1->ops.call.args[i], 0))
452
            return false;
453
 
454
        return true;
455
      }
456
 
457
    case EXPR_PHI:
458
      {
459
        size_t i;
460
 
461
        if (expr0->ops.phi.nargs !=  expr1->ops.phi.nargs)
462
          return false;
463
 
464
        for (i = 0; i < expr0->ops.phi.nargs; i++)
465
          if (! operand_equal_p (expr0->ops.phi.args[i],
466
                                 expr1->ops.phi.args[i], 0))
467
            return false;
468
 
469
        return true;
470
      }
471
 
472
    default:
473
      gcc_unreachable ();
474
    }
475
}
476
 
477
/* Compute a hash value for a hashable_expr value EXPR and a
478
   previously accumulated hash value VAL.  If two hashable_expr
479
   values compare equal with hashable_expr_equal_p, they must
480
   hash to the same value, given an identical value of VAL.
481
   The logic is intended to follow iterative_hash_expr in tree.c.  */
482
 
483
static hashval_t
484
iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
485
{
486
  switch (expr->kind)
487
    {
488
    case EXPR_SINGLE:
489
      val = iterative_hash_expr (expr->ops.single.rhs, val);
490
      break;
491
 
492
    case EXPR_UNARY:
493
      val = iterative_hash_object (expr->ops.unary.op, val);
494
 
495
      /* Make sure to include signedness in the hash computation.
496
         Don't hash the type, that can lead to having nodes which
497
         compare equal according to operand_equal_p, but which
498
         have different hash codes.  */
499
      if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
500
          || expr->ops.unary.op == NON_LVALUE_EXPR)
501
        val += TYPE_UNSIGNED (expr->type);
502
 
503
      val = iterative_hash_expr (expr->ops.unary.opnd, val);
504
      break;
505
 
506
    case EXPR_BINARY:
507
      val = iterative_hash_object (expr->ops.binary.op, val);
508
      if (commutative_tree_code (expr->ops.binary.op))
509
        val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
510
                                                expr->ops.binary.opnd1, val);
511
      else
512
        {
513
          val = iterative_hash_expr (expr->ops.binary.opnd0, val);
514
          val = iterative_hash_expr (expr->ops.binary.opnd1, val);
515
        }
516
      break;
517
 
518
    case EXPR_TERNARY:
519
      val = iterative_hash_object (expr->ops.ternary.op, val);
520
      if (commutative_ternary_tree_code (expr->ops.ternary.op))
521
        val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
522
                                                expr->ops.ternary.opnd1, val);
523
      else
524
        {
525
          val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
526
          val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
527
        }
528
      val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
529
      break;
530
 
531
    case EXPR_CALL:
532
      {
533
        size_t i;
534
        enum tree_code code = CALL_EXPR;
535
        gimple fn_from;
536
 
537
        val = iterative_hash_object (code, val);
538
        fn_from = expr->ops.call.fn_from;
539
        if (gimple_call_internal_p (fn_from))
540
          val = iterative_hash_hashval_t
541
            ((hashval_t) gimple_call_internal_fn (fn_from), val);
542
        else
543
          val = iterative_hash_expr (gimple_call_fn (fn_from), val);
544
        for (i = 0; i < expr->ops.call.nargs; i++)
545
          val = iterative_hash_expr (expr->ops.call.args[i], val);
546
      }
547
      break;
548
 
549
    case EXPR_PHI:
550
      {
551
        size_t i;
552
 
553
        for (i = 0; i < expr->ops.phi.nargs; i++)
554
          val = iterative_hash_expr (expr->ops.phi.args[i], val);
555
      }
556
      break;
557
 
558
    default:
559
      gcc_unreachable ();
560
    }
561
 
562
  return val;
563
}
564
 
565
/* Print a diagnostic dump of an expression hash table entry.  */
566
 
567
static void
568
print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
569
{
570
  if (element->stmt)
571
    fprintf (stream, "STMT ");
572
  else
573
    fprintf (stream, "COND ");
574
 
575
  if (element->lhs)
576
    {
577
      print_generic_expr (stream, element->lhs, 0);
578
      fprintf (stream, " = ");
579
    }
580
 
581
  switch (element->expr.kind)
582
    {
583
      case EXPR_SINGLE:
584
        print_generic_expr (stream, element->expr.ops.single.rhs, 0);
585
        break;
586
 
587
      case EXPR_UNARY:
588
        fprintf (stream, "%s ", tree_code_name[element->expr.ops.unary.op]);
589
        print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
590
        break;
591
 
592
      case EXPR_BINARY:
593
        print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
594
        fprintf (stream, " %s ", tree_code_name[element->expr.ops.binary.op]);
595
        print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
596
        break;
597
 
598
      case EXPR_TERNARY:
599
        fprintf (stream, " %s <", tree_code_name[element->expr.ops.ternary.op]);
600
        print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
601
        fputs (", ", stream);
602
        print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
603
        fputs (", ", stream);
604
        print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
605
        fputs (">", stream);
606
        break;
607
 
608
      case EXPR_CALL:
609
        {
610
          size_t i;
611
          size_t nargs = element->expr.ops.call.nargs;
612
          gimple fn_from;
613
 
614
          fn_from = element->expr.ops.call.fn_from;
615
          if (gimple_call_internal_p (fn_from))
616
            fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
617
                   stream);
618
          else
619
            print_generic_expr (stream, gimple_call_fn (fn_from), 0);
620
          fprintf (stream, " (");
621
          for (i = 0; i < nargs; i++)
622
            {
623
              print_generic_expr (stream, element->expr.ops.call.args[i], 0);
624
              if (i + 1 < nargs)
625
                fprintf (stream, ", ");
626
            }
627
          fprintf (stream, ")");
628
        }
629
        break;
630
 
631
      case EXPR_PHI:
632
        {
633
          size_t i;
634
          size_t nargs = element->expr.ops.phi.nargs;
635
 
636
          fprintf (stream, "PHI <");
637
          for (i = 0; i < nargs; i++)
638
            {
639
              print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
640
              if (i + 1 < nargs)
641
                fprintf (stream, ", ");
642
            }
643
          fprintf (stream, ">");
644
        }
645
        break;
646
    }
647
  fprintf (stream, "\n");
648
 
649
  if (element->stmt)
650
    {
651
      fprintf (stream, "          ");
652
      print_gimple_stmt (stream, element->stmt, 0, 0);
653
    }
654
}
655
 
656
/* Delete an expr_hash_elt and reclaim its storage.  */
657
 
658
static void
659
free_expr_hash_elt (void *elt)
660
{
661
  struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
662
 
663
  if (element->expr.kind == EXPR_CALL)
664
    free (element->expr.ops.call.args);
665
 
666
  if (element->expr.kind == EXPR_PHI)
667
    free (element->expr.ops.phi.args);
668
 
669
  free (element);
670
}
671
 
672
/* Allocate an EDGE_INFO for edge E and attach it to E.
673
   Return the new EDGE_INFO structure.  */
674
 
675
static struct edge_info *
676
allocate_edge_info (edge e)
677
{
678
  struct edge_info *edge_info;
679
 
680
  edge_info = XCNEW (struct edge_info);
681
 
682
  e->aux = edge_info;
683
  return edge_info;
684
}
685
 
686
/* Free all EDGE_INFO structures associated with edges in the CFG.
687
   If a particular edge can be threaded, copy the redirection
688
   target from the EDGE_INFO structure into the edge's AUX field
689
   as required by code to update the CFG and SSA graph for
690
   jump threading.  */
691
 
692
static void
693
free_all_edge_infos (void)
694
{
695
  basic_block bb;
696
  edge_iterator ei;
697
  edge e;
698
 
699
  FOR_EACH_BB (bb)
700
    {
701
      FOR_EACH_EDGE (e, ei, bb->preds)
702
        {
703
         struct edge_info *edge_info = (struct edge_info *) e->aux;
704
 
705
          if (edge_info)
706
            {
707
              if (edge_info->cond_equivalences)
708
                VEC_free (cond_equivalence, heap, edge_info->cond_equivalences);
709
              free (edge_info);
710
              e->aux = NULL;
711
            }
712
        }
713
    }
714
}
715
 
716
/* Jump threading, redundancy elimination and const/copy propagation.
717
 
718
   This pass may expose new symbols that need to be renamed into SSA.  For
719
   every new symbol exposed, its corresponding bit will be set in
720
   VARS_TO_RENAME.  */
721
 
722
static unsigned int
723
tree_ssa_dominator_optimize (void)
724
{
725
  struct dom_walk_data walk_data;
726
 
727
  memset (&opt_stats, 0, sizeof (opt_stats));
728
 
729
  /* Create our hash tables.  */
730
  avail_exprs = htab_create (1024, real_avail_expr_hash, avail_expr_eq, free_expr_hash_elt);
731
  avail_exprs_stack = VEC_alloc (expr_hash_elt_t, heap, 20);
732
  const_and_copies_stack = VEC_alloc (tree, heap, 20);
733
  need_eh_cleanup = BITMAP_ALLOC (NULL);
734
 
735
  /* Setup callbacks for the generic dominator tree walker.  */
736
  walk_data.dom_direction = CDI_DOMINATORS;
737
  walk_data.initialize_block_local_data = NULL;
738
  walk_data.before_dom_children = dom_opt_enter_block;
739
  walk_data.after_dom_children = dom_opt_leave_block;
740
  /* Right now we only attach a dummy COND_EXPR to the global data pointer.
741
     When we attach more stuff we'll need to fill this out with a real
742
     structure.  */
743
  walk_data.global_data = NULL;
744
  walk_data.block_local_data_size = 0;
745
 
746
  /* Now initialize the dominator walker.  */
747
  init_walk_dominator_tree (&walk_data);
748
 
749
  calculate_dominance_info (CDI_DOMINATORS);
750
  cfg_altered = false;
751
 
752
  /* We need to know loop structures in order to avoid destroying them
753
     in jump threading.  Note that we still can e.g. thread through loop
754
     headers to an exit edge, or through loop header to the loop body, assuming
755
     that we update the loop info.  */
756
  loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
757
 
758
  /* Initialize the value-handle array.  */
759
  threadedge_initialize_values ();
760
 
761
  /* We need accurate information regarding back edges in the CFG
762
     for jump threading; this may include back edges that are not part of
763
     a single loop.  */
764
  mark_dfs_back_edges ();
765
 
766
  /* Recursively walk the dominator tree optimizing statements.  */
767
  walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
768
 
769
  {
770
    gimple_stmt_iterator gsi;
771
    basic_block bb;
772
    FOR_EACH_BB (bb)
773
      {
774
        for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
775
          update_stmt_if_modified (gsi_stmt (gsi));
776
      }
777
  }
778
 
779
  /* If we exposed any new variables, go ahead and put them into
780
     SSA form now, before we handle jump threading.  This simplifies
781
     interactions between rewriting of _DECL nodes into SSA form
782
     and rewriting SSA_NAME nodes into SSA form after block
783
     duplication and CFG manipulation.  */
784
  update_ssa (TODO_update_ssa);
785
 
786
  free_all_edge_infos ();
787
 
788
  /* Thread jumps, creating duplicate blocks as needed.  */
789
  cfg_altered |= thread_through_all_blocks (first_pass_instance);
790
 
791
  if (cfg_altered)
792
    free_dominance_info (CDI_DOMINATORS);
793
 
794
  /* Removal of statements may make some EH edges dead.  Purge
795
     such edges from the CFG as needed.  */
796
  if (!bitmap_empty_p (need_eh_cleanup))
797
    {
798
      unsigned i;
799
      bitmap_iterator bi;
800
 
801
      /* Jump threading may have created forwarder blocks from blocks
802
         needing EH cleanup; the new successor of these blocks, which
803
         has inherited from the original block, needs the cleanup.  */
804
      EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
805
        {
806
          basic_block bb = BASIC_BLOCK (i);
807
          if (bb
808
              && single_succ_p (bb)
809
              && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
810
            {
811
              bitmap_clear_bit (need_eh_cleanup, i);
812
              bitmap_set_bit (need_eh_cleanup, single_succ (bb)->index);
813
            }
814
        }
815
 
816
      gimple_purge_all_dead_eh_edges (need_eh_cleanup);
817
      bitmap_zero (need_eh_cleanup);
818
    }
819
 
820
  statistics_counter_event (cfun, "Redundant expressions eliminated",
821
                            opt_stats.num_re);
822
  statistics_counter_event (cfun, "Constants propagated",
823
                            opt_stats.num_const_prop);
824
  statistics_counter_event (cfun, "Copies propagated",
825
                            opt_stats.num_copy_prop);
826
 
827
  /* Debugging dumps.  */
828
  if (dump_file && (dump_flags & TDF_STATS))
829
    dump_dominator_optimization_stats (dump_file);
830
 
831
  loop_optimizer_finalize ();
832
 
833
  /* Delete our main hashtable.  */
834
  htab_delete (avail_exprs);
835
 
836
  /* And finalize the dominator walker.  */
837
  fini_walk_dominator_tree (&walk_data);
838
 
839
  /* Free asserted bitmaps and stacks.  */
840
  BITMAP_FREE (need_eh_cleanup);
841
 
842
  VEC_free (expr_hash_elt_t, heap, avail_exprs_stack);
843
  VEC_free (tree, heap, const_and_copies_stack);
844
 
845
  /* Free the value-handle array.  */
846
  threadedge_finalize_values ();
847
  ssa_name_values = NULL;
848
 
849
  return 0;
850
}
851
 
852
static bool
853
gate_dominator (void)
854
{
855
  return flag_tree_dom != 0;
856
}
857
 
858
struct gimple_opt_pass pass_dominator =
859
{
860
 {
861
  GIMPLE_PASS,
862
  "dom",                                /* name */
863
  gate_dominator,                       /* gate */
864
  tree_ssa_dominator_optimize,          /* execute */
865
  NULL,                                 /* sub */
866
  NULL,                                 /* next */
867
  0,                                     /* static_pass_number */
868
  TV_TREE_SSA_DOMINATOR_OPTS,           /* tv_id */
869
  PROP_cfg | PROP_ssa,                  /* properties_required */
870
  0,                                     /* properties_provided */
871
  0,                                     /* properties_destroyed */
872
  0,                                     /* todo_flags_start */
873
  TODO_cleanup_cfg
874
    | TODO_update_ssa
875
    | TODO_verify_ssa
876
    | TODO_verify_flow                  /* todo_flags_finish */
877
 }
878
};
879
 
880
 
881
/* Given a conditional statement CONDSTMT, convert the
882
   condition to a canonical form.  */
883
 
884
static void
885
canonicalize_comparison (gimple condstmt)
886
{
887
  tree op0;
888
  tree op1;
889
  enum tree_code code;
890
 
891
  gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
892
 
893
  op0 = gimple_cond_lhs (condstmt);
894
  op1 = gimple_cond_rhs (condstmt);
895
 
896
  code = gimple_cond_code (condstmt);
897
 
898
  /* If it would be profitable to swap the operands, then do so to
899
     canonicalize the statement, enabling better optimization.
900
 
901
     By placing canonicalization of such expressions here we
902
     transparently keep statements in canonical form, even
903
     when the statement is modified.  */
904
  if (tree_swap_operands_p (op0, op1, false))
905
    {
906
      /* For relationals we need to swap the operands
907
         and change the code.  */
908
      if (code == LT_EXPR
909
          || code == GT_EXPR
910
          || code == LE_EXPR
911
          || code == GE_EXPR)
912
        {
913
          code = swap_tree_comparison (code);
914
 
915
          gimple_cond_set_code (condstmt, code);
916
          gimple_cond_set_lhs (condstmt, op1);
917
          gimple_cond_set_rhs (condstmt, op0);
918
 
919
          update_stmt (condstmt);
920
        }
921
    }
922
}
923
 
924
/* Initialize local stacks for this optimizer and record equivalences
925
   upon entry to BB.  Equivalences can come from the edge traversed to
926
   reach BB or they may come from PHI nodes at the start of BB.  */
927
 
928
/* Remove all the expressions in LOCALS from TABLE, stopping when there are
929
   LIMIT entries left in LOCALs.  */
930
 
931
static void
932
remove_local_expressions_from_table (void)
933
{
934
  /* Remove all the expressions made available in this block.  */
935
  while (VEC_length (expr_hash_elt_t, avail_exprs_stack) > 0)
936
    {
937
      expr_hash_elt_t victim = VEC_pop (expr_hash_elt_t, avail_exprs_stack);
938
      void **slot;
939
 
940
      if (victim == NULL)
941
        break;
942
 
943
      /* This must precede the actual removal from the hash table,
944
         as ELEMENT and the table entry may share a call argument
945
         vector which will be freed during removal.  */
946
      if (dump_file && (dump_flags & TDF_DETAILS))
947
        {
948
          fprintf (dump_file, "<<<< ");
949
          print_expr_hash_elt (dump_file, victim);
950
        }
951
 
952
      slot = htab_find_slot_with_hash (avail_exprs,
953
                                       victim, victim->hash, NO_INSERT);
954
      gcc_assert (slot && *slot == (void *) victim);
955
      htab_clear_slot (avail_exprs, slot);
956
    }
957
}
958
 
959
/* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
960
   CONST_AND_COPIES to its original state, stopping when we hit a
961
   NULL marker.  */
962
 
963
static void
964
restore_vars_to_original_value (void)
965
{
966
  while (VEC_length (tree, const_and_copies_stack) > 0)
967
    {
968
      tree prev_value, dest;
969
 
970
      dest = VEC_pop (tree, const_and_copies_stack);
971
 
972
      if (dest == NULL)
973
        break;
974
 
975
      if (dump_file && (dump_flags & TDF_DETAILS))
976
        {
977
          fprintf (dump_file, "<<<< COPY ");
978
          print_generic_expr (dump_file, dest, 0);
979
          fprintf (dump_file, " = ");
980
          print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
981
          fprintf (dump_file, "\n");
982
        }
983
 
984
      prev_value = VEC_pop (tree, const_and_copies_stack);
985
      set_ssa_name_value (dest, prev_value);
986
    }
987
}
988
 
989
/* A trivial wrapper so that we can present the generic jump
990
   threading code with a simple API for simplifying statements.  */
991
static tree
992
simplify_stmt_for_jump_threading (gimple stmt,
993
                                  gimple within_stmt ATTRIBUTE_UNUSED)
994
{
995
  return lookup_avail_expr (stmt, false);
996
}
997
 
998
/* Wrapper for common code to attempt to thread an edge.  For example,
999
   it handles lazily building the dummy condition and the bookkeeping
1000
   when jump threading is successful.  */
1001
 
1002
static void
1003
dom_thread_across_edge (struct dom_walk_data *walk_data, edge e)
1004
{
1005
  if (! walk_data->global_data)
1006
  {
1007
    gimple dummy_cond =
1008
        gimple_build_cond (NE_EXPR,
1009
                           integer_zero_node, integer_zero_node,
1010
                           NULL, NULL);
1011
    walk_data->global_data = dummy_cond;
1012
  }
1013
 
1014
  thread_across_edge ((gimple) walk_data->global_data, e, false,
1015
                      &const_and_copies_stack,
1016
                      simplify_stmt_for_jump_threading);
1017
}
1018
 
1019
/* PHI nodes can create equivalences too.
1020
 
1021
   Ignoring any alternatives which are the same as the result, if
1022
   all the alternatives are equal, then the PHI node creates an
1023
   equivalence.  */
1024
 
1025
static void
1026
record_equivalences_from_phis (basic_block bb)
1027
{
1028
  gimple_stmt_iterator gsi;
1029
 
1030
  for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1031
    {
1032
      gimple phi = gsi_stmt (gsi);
1033
 
1034
      tree lhs = gimple_phi_result (phi);
1035
      tree rhs = NULL;
1036
      size_t i;
1037
 
1038
      for (i = 0; i < gimple_phi_num_args (phi); i++)
1039
        {
1040
          tree t = gimple_phi_arg_def (phi, i);
1041
 
1042
          /* Ignore alternatives which are the same as our LHS.  Since
1043
             LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1044
             can simply compare pointers.  */
1045
          if (lhs == t)
1046
            continue;
1047
 
1048
          /* If we have not processed an alternative yet, then set
1049
             RHS to this alternative.  */
1050
          if (rhs == NULL)
1051
            rhs = t;
1052
          /* If we have processed an alternative (stored in RHS), then
1053
             see if it is equal to this one.  If it isn't, then stop
1054
             the search.  */
1055
          else if (! operand_equal_for_phi_arg_p (rhs, t))
1056
            break;
1057
        }
1058
 
1059
      /* If we had no interesting alternatives, then all the RHS alternatives
1060
         must have been the same as LHS.  */
1061
      if (!rhs)
1062
        rhs = lhs;
1063
 
1064
      /* If we managed to iterate through each PHI alternative without
1065
         breaking out of the loop, then we have a PHI which may create
1066
         a useful equivalence.  We do not need to record unwind data for
1067
         this, since this is a true assignment and not an equivalence
1068
         inferred from a comparison.  All uses of this ssa name are dominated
1069
         by this assignment, so unwinding just costs time and space.  */
1070
      if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
1071
        set_ssa_name_value (lhs, rhs);
1072
    }
1073
}
1074
 
1075
/* Ignoring loop backedges, if BB has precisely one incoming edge then
1076
   return that edge.  Otherwise return NULL.  */
1077
static edge
1078
single_incoming_edge_ignoring_loop_edges (basic_block bb)
1079
{
1080
  edge retval = NULL;
1081
  edge e;
1082
  edge_iterator ei;
1083
 
1084
  FOR_EACH_EDGE (e, ei, bb->preds)
1085
    {
1086
      /* A loop back edge can be identified by the destination of
1087
         the edge dominating the source of the edge.  */
1088
      if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1089
        continue;
1090
 
1091
      /* If we have already seen a non-loop edge, then we must have
1092
         multiple incoming non-loop edges and thus we return NULL.  */
1093
      if (retval)
1094
        return NULL;
1095
 
1096
      /* This is the first non-loop incoming edge we have found.  Record
1097
         it.  */
1098
      retval = e;
1099
    }
1100
 
1101
  return retval;
1102
}
1103
 
1104
/* Record any equivalences created by the incoming edge to BB.  If BB
1105
   has more than one incoming edge, then no equivalence is created.  */
1106
 
1107
static void
1108
record_equivalences_from_incoming_edge (basic_block bb)
1109
{
1110
  edge e;
1111
  basic_block parent;
1112
  struct edge_info *edge_info;
1113
 
1114
  /* If our parent block ended with a control statement, then we may be
1115
     able to record some equivalences based on which outgoing edge from
1116
     the parent was followed.  */
1117
  parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1118
 
1119
  e = single_incoming_edge_ignoring_loop_edges (bb);
1120
 
1121
  /* If we had a single incoming edge from our parent block, then enter
1122
     any data associated with the edge into our tables.  */
1123
  if (e && e->src == parent)
1124
    {
1125
      unsigned int i;
1126
 
1127
      edge_info = (struct edge_info *) e->aux;
1128
 
1129
      if (edge_info)
1130
        {
1131
          tree lhs = edge_info->lhs;
1132
          tree rhs = edge_info->rhs;
1133
          cond_equivalence *eq;
1134
 
1135
          if (lhs)
1136
            record_equality (lhs, rhs);
1137
 
1138
          for (i = 0; VEC_iterate (cond_equivalence,
1139
                                   edge_info->cond_equivalences, i, eq); ++i)
1140
            record_cond (eq);
1141
        }
1142
    }
1143
}
1144
 
1145
/* Dump SSA statistics on FILE.  */
1146
 
1147
void
1148
dump_dominator_optimization_stats (FILE *file)
1149
{
1150
  fprintf (file, "Total number of statements:                   %6ld\n\n",
1151
           opt_stats.num_stmts);
1152
  fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1153
           opt_stats.num_exprs_considered);
1154
 
1155
  fprintf (file, "\nHash table statistics:\n");
1156
 
1157
  fprintf (file, "    avail_exprs: ");
1158
  htab_statistics (file, avail_exprs);
1159
}
1160
 
1161
 
1162
/* Dump SSA statistics on stderr.  */
1163
 
1164
DEBUG_FUNCTION void
1165
debug_dominator_optimization_stats (void)
1166
{
1167
  dump_dominator_optimization_stats (stderr);
1168
}
1169
 
1170
 
1171
/* Dump statistics for the hash table HTAB.  */
1172
 
1173
static void
1174
htab_statistics (FILE *file, htab_t htab)
1175
{
1176
  fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1177
           (long) htab_size (htab),
1178
           (long) htab_elements (htab),
1179
           htab_collisions (htab));
1180
}
1181
 
1182
 
1183
/* Enter condition equivalence into the expression hash table.
1184
   This indicates that a conditional expression has a known
1185
   boolean value.  */
1186
 
1187
static void
1188
record_cond (cond_equivalence *p)
1189
{
1190
  struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1191
  void **slot;
1192
 
1193
  initialize_hash_element_from_expr (&p->cond, p->value, element);
1194
 
1195
  slot = htab_find_slot_with_hash (avail_exprs, (void *)element,
1196
                                   element->hash, INSERT);
1197
  if (*slot == NULL)
1198
    {
1199
      *slot = (void *) element;
1200
 
1201
      if (dump_file && (dump_flags & TDF_DETAILS))
1202
        {
1203
          fprintf (dump_file, "1>>> ");
1204
          print_expr_hash_elt (dump_file, element);
1205
        }
1206
 
1207
      VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element);
1208
    }
1209
  else
1210
    free (element);
1211
}
1212
 
1213
/* Build a cond_equivalence record indicating that the comparison
1214
   CODE holds between operands OP0 and OP1 and push it to **P.  */
1215
 
1216
static void
1217
build_and_record_new_cond (enum tree_code code,
1218
                           tree op0, tree op1,
1219
                           VEC(cond_equivalence, heap) **p)
1220
{
1221
  cond_equivalence c;
1222
  struct hashable_expr *cond = &c.cond;
1223
 
1224
  gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1225
 
1226
  cond->type = boolean_type_node;
1227
  cond->kind = EXPR_BINARY;
1228
  cond->ops.binary.op = code;
1229
  cond->ops.binary.opnd0 = op0;
1230
  cond->ops.binary.opnd1 = op1;
1231
 
1232
  c.value = boolean_true_node;
1233
  VEC_safe_push (cond_equivalence, heap, *p, &c);
1234
}
1235
 
1236
/* Record that COND is true and INVERTED is false into the edge information
1237
   structure.  Also record that any conditions dominated by COND are true
1238
   as well.
1239
 
1240
   For example, if a < b is true, then a <= b must also be true.  */
1241
 
1242
static void
1243
record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1244
{
1245
  tree op0, op1;
1246
  cond_equivalence c;
1247
 
1248
  if (!COMPARISON_CLASS_P (cond))
1249
    return;
1250
 
1251
  op0 = TREE_OPERAND (cond, 0);
1252
  op1 = TREE_OPERAND (cond, 1);
1253
 
1254
  switch (TREE_CODE (cond))
1255
    {
1256
    case LT_EXPR:
1257
    case GT_EXPR:
1258
      if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1259
        {
1260
          build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1261
                                     &edge_info->cond_equivalences);
1262
          build_and_record_new_cond (LTGT_EXPR, op0, op1,
1263
                                     &edge_info->cond_equivalences);
1264
        }
1265
 
1266
      build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1267
                                  ? LE_EXPR : GE_EXPR),
1268
                                 op0, op1, &edge_info->cond_equivalences);
1269
      build_and_record_new_cond (NE_EXPR, op0, op1,
1270
                                 &edge_info->cond_equivalences);
1271
      break;
1272
 
1273
    case GE_EXPR:
1274
    case LE_EXPR:
1275
      if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1276
        {
1277
          build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1278
                                     &edge_info->cond_equivalences);
1279
        }
1280
      break;
1281
 
1282
    case EQ_EXPR:
1283
      if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1284
        {
1285
          build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1286
                                     &edge_info->cond_equivalences);
1287
        }
1288
      build_and_record_new_cond (LE_EXPR, op0, op1,
1289
                                 &edge_info->cond_equivalences);
1290
      build_and_record_new_cond (GE_EXPR, op0, op1,
1291
                                 &edge_info->cond_equivalences);
1292
      break;
1293
 
1294
    case UNORDERED_EXPR:
1295
      build_and_record_new_cond (NE_EXPR, op0, op1,
1296
                                 &edge_info->cond_equivalences);
1297
      build_and_record_new_cond (UNLE_EXPR, op0, op1,
1298
                                 &edge_info->cond_equivalences);
1299
      build_and_record_new_cond (UNGE_EXPR, op0, op1,
1300
                                 &edge_info->cond_equivalences);
1301
      build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1302
                                 &edge_info->cond_equivalences);
1303
      build_and_record_new_cond (UNLT_EXPR, op0, op1,
1304
                                 &edge_info->cond_equivalences);
1305
      build_and_record_new_cond (UNGT_EXPR, op0, op1,
1306
                                 &edge_info->cond_equivalences);
1307
      break;
1308
 
1309
    case UNLT_EXPR:
1310
    case UNGT_EXPR:
1311
      build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1312
                                  ? UNLE_EXPR : UNGE_EXPR),
1313
                                 op0, op1, &edge_info->cond_equivalences);
1314
      build_and_record_new_cond (NE_EXPR, op0, op1,
1315
                                 &edge_info->cond_equivalences);
1316
      break;
1317
 
1318
    case UNEQ_EXPR:
1319
      build_and_record_new_cond (UNLE_EXPR, op0, op1,
1320
                                 &edge_info->cond_equivalences);
1321
      build_and_record_new_cond (UNGE_EXPR, op0, op1,
1322
                                 &edge_info->cond_equivalences);
1323
      break;
1324
 
1325
    case LTGT_EXPR:
1326
      build_and_record_new_cond (NE_EXPR, op0, op1,
1327
                                 &edge_info->cond_equivalences);
1328
      build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1329
                                 &edge_info->cond_equivalences);
1330
      break;
1331
 
1332
    default:
1333
      break;
1334
    }
1335
 
1336
  /* Now store the original true and false conditions into the first
1337
     two slots.  */
1338
  initialize_expr_from_cond (cond, &c.cond);
1339
  c.value = boolean_true_node;
1340
  VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, &c);
1341
 
1342
  /* It is possible for INVERTED to be the negation of a comparison,
1343
     and not a valid RHS or GIMPLE_COND condition.  This happens because
1344
     invert_truthvalue may return such an expression when asked to invert
1345
     a floating-point comparison.  These comparisons are not assumed to
1346
     obey the trichotomy law.  */
1347
  initialize_expr_from_cond (inverted, &c.cond);
1348
  c.value = boolean_false_node;
1349
  VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, &c);
1350
}
1351
 
1352
/* A helper function for record_const_or_copy and record_equality.
1353
   Do the work of recording the value and undo info.  */
1354
 
1355
static void
1356
record_const_or_copy_1 (tree x, tree y, tree prev_x)
1357
{
1358
  set_ssa_name_value (x, y);
1359
 
1360
  if (dump_file && (dump_flags & TDF_DETAILS))
1361
    {
1362
      fprintf (dump_file, "0>>> COPY ");
1363
      print_generic_expr (dump_file, x, 0);
1364
      fprintf (dump_file, " = ");
1365
      print_generic_expr (dump_file, y, 0);
1366
      fprintf (dump_file, "\n");
1367
    }
1368
 
1369
  VEC_reserve (tree, heap, const_and_copies_stack, 2);
1370
  VEC_quick_push (tree, const_and_copies_stack, prev_x);
1371
  VEC_quick_push (tree, const_and_copies_stack, x);
1372
}
1373
 
1374
/* Return the loop depth of the basic block of the defining statement of X.
1375
   This number should not be treated as absolutely correct because the loop
1376
   information may not be completely up-to-date when dom runs.  However, it
1377
   will be relatively correct, and as more passes are taught to keep loop info
1378
   up to date, the result will become more and more accurate.  */
1379
 
1380
int
1381
loop_depth_of_name (tree x)
1382
{
1383
  gimple defstmt;
1384
  basic_block defbb;
1385
 
1386
  /* If it's not an SSA_NAME, we have no clue where the definition is.  */
1387
  if (TREE_CODE (x) != SSA_NAME)
1388
    return 0;
1389
 
1390
  /* Otherwise return the loop depth of the defining statement's bb.
1391
     Note that there may not actually be a bb for this statement, if the
1392
     ssa_name is live on entry.  */
1393
  defstmt = SSA_NAME_DEF_STMT (x);
1394
  defbb = gimple_bb (defstmt);
1395
  if (!defbb)
1396
    return 0;
1397
 
1398
  return defbb->loop_depth;
1399
}
1400
 
1401
/* Record that X is equal to Y in const_and_copies.  Record undo
1402
   information in the block-local vector.  */
1403
 
1404
static void
1405
record_const_or_copy (tree x, tree y)
1406
{
1407
  tree prev_x = SSA_NAME_VALUE (x);
1408
 
1409
  gcc_assert (TREE_CODE (x) == SSA_NAME);
1410
 
1411
  if (TREE_CODE (y) == SSA_NAME)
1412
    {
1413
      tree tmp = SSA_NAME_VALUE (y);
1414
      if (tmp)
1415
        y = tmp;
1416
    }
1417
 
1418
  record_const_or_copy_1 (x, y, prev_x);
1419
}
1420
 
1421
/* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1422
   This constrains the cases in which we may treat this as assignment.  */
1423
 
1424
static void
1425
record_equality (tree x, tree y)
1426
{
1427
  tree prev_x = NULL, prev_y = NULL;
1428
 
1429
  if (TREE_CODE (x) == SSA_NAME)
1430
    prev_x = SSA_NAME_VALUE (x);
1431
  if (TREE_CODE (y) == SSA_NAME)
1432
    prev_y = SSA_NAME_VALUE (y);
1433
 
1434
  /* If one of the previous values is invariant, or invariant in more loops
1435
     (by depth), then use that.
1436
     Otherwise it doesn't matter which value we choose, just so
1437
     long as we canonicalize on one value.  */
1438
  if (is_gimple_min_invariant (y))
1439
    ;
1440
  else if (is_gimple_min_invariant (x)
1441
           || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1442
    prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1443
  else if (prev_x && is_gimple_min_invariant (prev_x))
1444
    x = y, y = prev_x, prev_x = prev_y;
1445
  else if (prev_y)
1446
    y = prev_y;
1447
 
1448
  /* After the swapping, we must have one SSA_NAME.  */
1449
  if (TREE_CODE (x) != SSA_NAME)
1450
    return;
1451
 
1452
  /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1453
     variable compared against zero.  If we're honoring signed zeros,
1454
     then we cannot record this value unless we know that the value is
1455
     nonzero.  */
1456
  if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1457
      && (TREE_CODE (y) != REAL_CST
1458
          || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1459
    return;
1460
 
1461
  record_const_or_copy_1 (x, y, prev_x);
1462
}
1463
 
1464
/* Returns true when STMT is a simple iv increment.  It detects the
1465
   following situation:
1466
 
1467
   i_1 = phi (..., i_2)
1468
   i_2 = i_1 +/- ...  */
1469
 
1470
bool
1471
simple_iv_increment_p (gimple stmt)
1472
{
1473
  enum tree_code code;
1474
  tree lhs, preinc;
1475
  gimple phi;
1476
  size_t i;
1477
 
1478
  if (gimple_code (stmt) != GIMPLE_ASSIGN)
1479
    return false;
1480
 
1481
  lhs = gimple_assign_lhs (stmt);
1482
  if (TREE_CODE (lhs) != SSA_NAME)
1483
    return false;
1484
 
1485
  code = gimple_assign_rhs_code (stmt);
1486
  if (code != PLUS_EXPR
1487
      && code != MINUS_EXPR
1488
      && code != POINTER_PLUS_EXPR)
1489
    return false;
1490
 
1491
  preinc = gimple_assign_rhs1 (stmt);
1492
  if (TREE_CODE (preinc) != SSA_NAME)
1493
    return false;
1494
 
1495
  phi = SSA_NAME_DEF_STMT (preinc);
1496
  if (gimple_code (phi) != GIMPLE_PHI)
1497
    return false;
1498
 
1499
  for (i = 0; i < gimple_phi_num_args (phi); i++)
1500
    if (gimple_phi_arg_def (phi, i) == lhs)
1501
      return true;
1502
 
1503
  return false;
1504
}
1505
 
1506
/* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1507
   known value for that SSA_NAME (or NULL if no value is known).
1508
 
1509
   Propagate values from CONST_AND_COPIES into the PHI nodes of the
1510
   successors of BB.  */
1511
 
1512
static void
1513
cprop_into_successor_phis (basic_block bb)
1514
{
1515
  edge e;
1516
  edge_iterator ei;
1517
 
1518
  FOR_EACH_EDGE (e, ei, bb->succs)
1519
    {
1520
      int indx;
1521
      gimple_stmt_iterator gsi;
1522
 
1523
      /* If this is an abnormal edge, then we do not want to copy propagate
1524
         into the PHI alternative associated with this edge.  */
1525
      if (e->flags & EDGE_ABNORMAL)
1526
        continue;
1527
 
1528
      gsi = gsi_start_phis (e->dest);
1529
      if (gsi_end_p (gsi))
1530
        continue;
1531
 
1532
      indx = e->dest_idx;
1533
      for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1534
        {
1535
          tree new_val;
1536
          use_operand_p orig_p;
1537
          tree orig_val;
1538
          gimple phi = gsi_stmt (gsi);
1539
 
1540
          /* The alternative may be associated with a constant, so verify
1541
             it is an SSA_NAME before doing anything with it.  */
1542
          orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1543
          orig_val = get_use_from_ptr (orig_p);
1544
          if (TREE_CODE (orig_val) != SSA_NAME)
1545
            continue;
1546
 
1547
          /* If we have *ORIG_P in our constant/copy table, then replace
1548
             ORIG_P with its value in our constant/copy table.  */
1549
          new_val = SSA_NAME_VALUE (orig_val);
1550
          if (new_val
1551
              && new_val != orig_val
1552
              && (TREE_CODE (new_val) == SSA_NAME
1553
                  || is_gimple_min_invariant (new_val))
1554
              && may_propagate_copy (orig_val, new_val))
1555
            propagate_value (orig_p, new_val);
1556
        }
1557
    }
1558
}
1559
 
1560
/* We have finished optimizing BB, record any information implied by
1561
   taking a specific outgoing edge from BB.  */
1562
 
1563
static void
1564
record_edge_info (basic_block bb)
1565
{
1566
  gimple_stmt_iterator gsi = gsi_last_bb (bb);
1567
  struct edge_info *edge_info;
1568
 
1569
  if (! gsi_end_p (gsi))
1570
    {
1571
      gimple stmt = gsi_stmt (gsi);
1572
      location_t loc = gimple_location (stmt);
1573
 
1574
      if (gimple_code (stmt) == GIMPLE_SWITCH)
1575
        {
1576
          tree index = gimple_switch_index (stmt);
1577
 
1578
          if (TREE_CODE (index) == SSA_NAME)
1579
            {
1580
              int i;
1581
              int n_labels = gimple_switch_num_labels (stmt);
1582
              tree *info = XCNEWVEC (tree, last_basic_block);
1583
              edge e;
1584
              edge_iterator ei;
1585
 
1586
              for (i = 0; i < n_labels; i++)
1587
                {
1588
                  tree label = gimple_switch_label (stmt, i);
1589
                  basic_block target_bb = label_to_block (CASE_LABEL (label));
1590
                  if (CASE_HIGH (label)
1591
                      || !CASE_LOW (label)
1592
                      || info[target_bb->index])
1593
                    info[target_bb->index] = error_mark_node;
1594
                  else
1595
                    info[target_bb->index] = label;
1596
                }
1597
 
1598
              FOR_EACH_EDGE (e, ei, bb->succs)
1599
                {
1600
                  basic_block target_bb = e->dest;
1601
                  tree label = info[target_bb->index];
1602
 
1603
                  if (label != NULL && label != error_mark_node)
1604
                    {
1605
                      tree x = fold_convert_loc (loc, TREE_TYPE (index),
1606
                                                 CASE_LOW (label));
1607
                      edge_info = allocate_edge_info (e);
1608
                      edge_info->lhs = index;
1609
                      edge_info->rhs = x;
1610
                    }
1611
                }
1612
              free (info);
1613
            }
1614
        }
1615
 
1616
      /* A COND_EXPR may create equivalences too.  */
1617
      if (gimple_code (stmt) == GIMPLE_COND)
1618
        {
1619
          edge true_edge;
1620
          edge false_edge;
1621
 
1622
          tree op0 = gimple_cond_lhs (stmt);
1623
          tree op1 = gimple_cond_rhs (stmt);
1624
          enum tree_code code = gimple_cond_code (stmt);
1625
 
1626
          extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1627
 
1628
          /* Special case comparing booleans against a constant as we
1629
             know the value of OP0 on both arms of the branch.  i.e., we
1630
             can record an equivalence for OP0 rather than COND.  */
1631
          if ((code == EQ_EXPR || code == NE_EXPR)
1632
              && TREE_CODE (op0) == SSA_NAME
1633
              && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1634
              && is_gimple_min_invariant (op1))
1635
            {
1636
              if (code == EQ_EXPR)
1637
                {
1638
                  edge_info = allocate_edge_info (true_edge);
1639
                  edge_info->lhs = op0;
1640
                  edge_info->rhs = (integer_zerop (op1)
1641
                                    ? boolean_false_node
1642
                                    : boolean_true_node);
1643
 
1644
                  edge_info = allocate_edge_info (false_edge);
1645
                  edge_info->lhs = op0;
1646
                  edge_info->rhs = (integer_zerop (op1)
1647
                                    ? boolean_true_node
1648
                                    : boolean_false_node);
1649
                }
1650
              else
1651
                {
1652
                  edge_info = allocate_edge_info (true_edge);
1653
                  edge_info->lhs = op0;
1654
                  edge_info->rhs = (integer_zerop (op1)
1655
                                    ? boolean_true_node
1656
                                    : boolean_false_node);
1657
 
1658
                  edge_info = allocate_edge_info (false_edge);
1659
                  edge_info->lhs = op0;
1660
                  edge_info->rhs = (integer_zerop (op1)
1661
                                    ? boolean_false_node
1662
                                    : boolean_true_node);
1663
                }
1664
            }
1665
          else if (is_gimple_min_invariant (op0)
1666
                   && (TREE_CODE (op1) == SSA_NAME
1667
                       || is_gimple_min_invariant (op1)))
1668
            {
1669
              tree cond = build2 (code, boolean_type_node, op0, op1);
1670
              tree inverted = invert_truthvalue_loc (loc, cond);
1671
              bool can_infer_simple_equiv
1672
                = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1673
                    && real_zerop (op0));
1674
              struct edge_info *edge_info;
1675
 
1676
              edge_info = allocate_edge_info (true_edge);
1677
              record_conditions (edge_info, cond, inverted);
1678
 
1679
              if (can_infer_simple_equiv && code == EQ_EXPR)
1680
                {
1681
                  edge_info->lhs = op1;
1682
                  edge_info->rhs = op0;
1683
                }
1684
 
1685
              edge_info = allocate_edge_info (false_edge);
1686
              record_conditions (edge_info, inverted, cond);
1687
 
1688
              if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1689
                {
1690
                  edge_info->lhs = op1;
1691
                  edge_info->rhs = op0;
1692
                }
1693
            }
1694
 
1695
          else if (TREE_CODE (op0) == SSA_NAME
1696
                   && (TREE_CODE (op1) == SSA_NAME
1697
                       || is_gimple_min_invariant (op1)))
1698
            {
1699
              tree cond = build2 (code, boolean_type_node, op0, op1);
1700
              tree inverted = invert_truthvalue_loc (loc, cond);
1701
              bool can_infer_simple_equiv
1702
                = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1703
                    && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1704
              struct edge_info *edge_info;
1705
 
1706
              edge_info = allocate_edge_info (true_edge);
1707
              record_conditions (edge_info, cond, inverted);
1708
 
1709
              if (can_infer_simple_equiv && code == EQ_EXPR)
1710
                {
1711
                  edge_info->lhs = op0;
1712
                  edge_info->rhs = op1;
1713
                }
1714
 
1715
              edge_info = allocate_edge_info (false_edge);
1716
              record_conditions (edge_info, inverted, cond);
1717
 
1718
              if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1719
                {
1720
                  edge_info->lhs = op0;
1721
                  edge_info->rhs = op1;
1722
                }
1723
            }
1724
        }
1725
 
1726
      /* ??? TRUTH_NOT_EXPR can create an equivalence too.  */
1727
    }
1728
}
1729
 
1730
static void
1731
dom_opt_enter_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
1732
                     basic_block bb)
1733
{
1734
  gimple_stmt_iterator gsi;
1735
 
1736
  if (dump_file && (dump_flags & TDF_DETAILS))
1737
    fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1738
 
1739
  /* Push a marker on the stacks of local information so that we know how
1740
     far to unwind when we finalize this block.  */
1741
  VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1742
  VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1743
 
1744
  record_equivalences_from_incoming_edge (bb);
1745
 
1746
  /* PHI nodes can create equivalences too.  */
1747
  record_equivalences_from_phis (bb);
1748
 
1749
  /* Create equivalences from redundant PHIs.  PHIs are only truly
1750
     redundant when they exist in the same block, so push another
1751
     marker and unwind right afterwards.  */
1752
  VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1753
  for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1754
    eliminate_redundant_computations (&gsi);
1755
  remove_local_expressions_from_table ();
1756
 
1757
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1758
    optimize_stmt (bb, gsi);
1759
 
1760
  /* Now prepare to process dominated blocks.  */
1761
  record_edge_info (bb);
1762
  cprop_into_successor_phis (bb);
1763
}
1764
 
1765
/* We have finished processing the dominator children of BB, perform
1766
   any finalization actions in preparation for leaving this node in
1767
   the dominator tree.  */
1768
 
1769
static void
1770
dom_opt_leave_block (struct dom_walk_data *walk_data, basic_block bb)
1771
{
1772
  gimple last;
1773
 
1774
  /* If we have an outgoing edge to a block with multiple incoming and
1775
     outgoing edges, then we may be able to thread the edge, i.e., we
1776
     may be able to statically determine which of the outgoing edges
1777
     will be traversed when the incoming edge from BB is traversed.  */
1778
  if (single_succ_p (bb)
1779
      && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1780
      && potentially_threadable_block (single_succ (bb)))
1781
    {
1782
      /* Push a marker on the stack, which thread_across_edge expects
1783
         and will remove.  */
1784
      VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1785
      dom_thread_across_edge (walk_data, single_succ_edge (bb));
1786
    }
1787
  else if ((last = last_stmt (bb))
1788
           && gimple_code (last) == GIMPLE_COND
1789
           && EDGE_COUNT (bb->succs) == 2
1790
           && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1791
           && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1792
    {
1793
      edge true_edge, false_edge;
1794
 
1795
      extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1796
 
1797
      /* Only try to thread the edge if it reaches a target block with
1798
         more than one predecessor and more than one successor.  */
1799
      if (potentially_threadable_block (true_edge->dest))
1800
        {
1801
          struct edge_info *edge_info;
1802
          unsigned int i;
1803
 
1804
          /* Push a marker onto the available expression stack so that we
1805
             unwind any expressions related to the TRUE arm before processing
1806
             the false arm below.  */
1807
          VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1808
          VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1809
 
1810
          edge_info = (struct edge_info *) true_edge->aux;
1811
 
1812
          /* If we have info associated with this edge, record it into
1813
             our equivalence tables.  */
1814
          if (edge_info)
1815
            {
1816
              cond_equivalence *eq;
1817
              tree lhs = edge_info->lhs;
1818
              tree rhs = edge_info->rhs;
1819
 
1820
              /* If we have a simple NAME = VALUE equivalence, record it.  */
1821
              if (lhs && TREE_CODE (lhs) == SSA_NAME)
1822
                record_const_or_copy (lhs, rhs);
1823
 
1824
              /* If we have 0 = COND or 1 = COND equivalences, record them
1825
                 into our expression hash tables.  */
1826
              for (i = 0; VEC_iterate (cond_equivalence,
1827
                                       edge_info->cond_equivalences, i, eq); ++i)
1828
                record_cond (eq);
1829
            }
1830
 
1831
          dom_thread_across_edge (walk_data, true_edge);
1832
 
1833
          /* And restore the various tables to their state before
1834
             we threaded this edge.  */
1835
          remove_local_expressions_from_table ();
1836
        }
1837
 
1838
      /* Similarly for the ELSE arm.  */
1839
      if (potentially_threadable_block (false_edge->dest))
1840
        {
1841
          struct edge_info *edge_info;
1842
          unsigned int i;
1843
 
1844
          VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1845
          edge_info = (struct edge_info *) false_edge->aux;
1846
 
1847
          /* If we have info associated with this edge, record it into
1848
             our equivalence tables.  */
1849
          if (edge_info)
1850
            {
1851
              cond_equivalence *eq;
1852
              tree lhs = edge_info->lhs;
1853
              tree rhs = edge_info->rhs;
1854
 
1855
              /* If we have a simple NAME = VALUE equivalence, record it.  */
1856
              if (lhs && TREE_CODE (lhs) == SSA_NAME)
1857
                record_const_or_copy (lhs, rhs);
1858
 
1859
              /* If we have 0 = COND or 1 = COND equivalences, record them
1860
                 into our expression hash tables.  */
1861
              for (i = 0; VEC_iterate (cond_equivalence,
1862
                                       edge_info->cond_equivalences, i, eq); ++i)
1863
                record_cond (eq);
1864
            }
1865
 
1866
          /* Now thread the edge.  */
1867
          dom_thread_across_edge (walk_data, false_edge);
1868
 
1869
          /* No need to remove local expressions from our tables
1870
             or restore vars to their original value as that will
1871
             be done immediately below.  */
1872
        }
1873
    }
1874
 
1875
  remove_local_expressions_from_table ();
1876
  restore_vars_to_original_value ();
1877
}
1878
 
1879
/* Search for redundant computations in STMT.  If any are found, then
1880
   replace them with the variable holding the result of the computation.
1881
 
1882
   If safe, record this expression into the available expression hash
1883
   table.  */
1884
 
1885
static void
1886
eliminate_redundant_computations (gimple_stmt_iterator* gsi)
1887
{
1888
  tree expr_type;
1889
  tree cached_lhs;
1890
  tree def;
1891
  bool insert = true;
1892
  bool assigns_var_p = false;
1893
 
1894
  gimple stmt = gsi_stmt (*gsi);
1895
 
1896
  if (gimple_code (stmt) == GIMPLE_PHI)
1897
    def = gimple_phi_result (stmt);
1898
  else
1899
    def = gimple_get_lhs (stmt);
1900
 
1901
  /* Certain expressions on the RHS can be optimized away, but can not
1902
     themselves be entered into the hash tables.  */
1903
  if (! def
1904
      || TREE_CODE (def) != SSA_NAME
1905
      || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1906
      || gimple_vdef (stmt)
1907
      /* Do not record equivalences for increments of ivs.  This would create
1908
         overlapping live ranges for a very questionable gain.  */
1909
      || simple_iv_increment_p (stmt))
1910
    insert = false;
1911
 
1912
  /* Check if the expression has been computed before.  */
1913
  cached_lhs = lookup_avail_expr (stmt, insert);
1914
 
1915
  opt_stats.num_exprs_considered++;
1916
 
1917
  /* Get the type of the expression we are trying to optimize.  */
1918
  if (is_gimple_assign (stmt))
1919
    {
1920
      expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1921
      assigns_var_p = true;
1922
    }
1923
  else if (gimple_code (stmt) == GIMPLE_COND)
1924
    expr_type = boolean_type_node;
1925
  else if (is_gimple_call (stmt))
1926
    {
1927
      gcc_assert (gimple_call_lhs (stmt));
1928
      expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1929
      assigns_var_p = true;
1930
    }
1931
  else if (gimple_code (stmt) == GIMPLE_SWITCH)
1932
    expr_type = TREE_TYPE (gimple_switch_index (stmt));
1933
  else if (gimple_code (stmt) == GIMPLE_PHI)
1934
    /* We can't propagate into a phi, so the logic below doesn't apply.
1935
       Instead record an equivalence between the cached LHS and the
1936
       PHI result of this statement, provided they are in the same block.
1937
       This should be sufficient to kill the redundant phi.  */
1938
    {
1939
      if (def && cached_lhs)
1940
        record_const_or_copy (def, cached_lhs);
1941
      return;
1942
    }
1943
  else
1944
    gcc_unreachable ();
1945
 
1946
  if (!cached_lhs)
1947
    return;
1948
 
1949
  /* It is safe to ignore types here since we have already done
1950
     type checking in the hashing and equality routines.  In fact
1951
     type checking here merely gets in the way of constant
1952
     propagation.  Also, make sure that it is safe to propagate
1953
     CACHED_LHS into the expression in STMT.  */
1954
  if ((TREE_CODE (cached_lhs) != SSA_NAME
1955
       && (assigns_var_p
1956
           || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1957
      || may_propagate_copy_into_stmt (stmt, cached_lhs))
1958
  {
1959
      gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
1960
                           || is_gimple_min_invariant (cached_lhs));
1961
 
1962
      if (dump_file && (dump_flags & TDF_DETAILS))
1963
        {
1964
          fprintf (dump_file, "  Replaced redundant expr '");
1965
          print_gimple_expr (dump_file, stmt, 0, dump_flags);
1966
          fprintf (dump_file, "' with '");
1967
          print_generic_expr (dump_file, cached_lhs, dump_flags);
1968
          fprintf (dump_file, "'\n");
1969
        }
1970
 
1971
      opt_stats.num_re++;
1972
 
1973
      if (assigns_var_p
1974
          && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1975
        cached_lhs = fold_convert (expr_type, cached_lhs);
1976
 
1977
      propagate_tree_value_into_stmt (gsi, cached_lhs);
1978
 
1979
      /* Since it is always necessary to mark the result as modified,
1980
         perhaps we should move this into propagate_tree_value_into_stmt
1981
         itself.  */
1982
      gimple_set_modified (gsi_stmt (*gsi), true);
1983
  }
1984
}
1985
 
1986
/* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1987
   the available expressions table or the const_and_copies table.
1988
   Detect and record those equivalences.  */
1989
/* We handle only very simple copy equivalences here.  The heavy
1990
   lifing is done by eliminate_redundant_computations.  */
1991
 
1992
static void
1993
record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
1994
{
1995
  tree lhs;
1996
  enum tree_code lhs_code;
1997
 
1998
  gcc_assert (is_gimple_assign (stmt));
1999
 
2000
  lhs = gimple_assign_lhs (stmt);
2001
  lhs_code = TREE_CODE (lhs);
2002
 
2003
  if (lhs_code == SSA_NAME
2004
      && gimple_assign_single_p (stmt))
2005
    {
2006
      tree rhs = gimple_assign_rhs1 (stmt);
2007
 
2008
      /* If the RHS of the assignment is a constant or another variable that
2009
         may be propagated, register it in the CONST_AND_COPIES table.  We
2010
         do not need to record unwind data for this, since this is a true
2011
         assignment and not an equivalence inferred from a comparison.  All
2012
         uses of this ssa name are dominated by this assignment, so unwinding
2013
         just costs time and space.  */
2014
      if (may_optimize_p
2015
          && (TREE_CODE (rhs) == SSA_NAME
2016
              || is_gimple_min_invariant (rhs)))
2017
      {
2018
        if (dump_file && (dump_flags & TDF_DETAILS))
2019
          {
2020
            fprintf (dump_file, "==== ASGN ");
2021
            print_generic_expr (dump_file, lhs, 0);
2022
            fprintf (dump_file, " = ");
2023
            print_generic_expr (dump_file, rhs, 0);
2024
            fprintf (dump_file, "\n");
2025
          }
2026
 
2027
        set_ssa_name_value (lhs, rhs);
2028
      }
2029
    }
2030
 
2031
  /* A memory store, even an aliased store, creates a useful
2032
     equivalence.  By exchanging the LHS and RHS, creating suitable
2033
     vops and recording the result in the available expression table,
2034
     we may be able to expose more redundant loads.  */
2035
  if (!gimple_has_volatile_ops (stmt)
2036
      && gimple_references_memory_p (stmt)
2037
      && gimple_assign_single_p (stmt)
2038
      && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2039
          || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2040
      && !is_gimple_reg (lhs))
2041
    {
2042
      tree rhs = gimple_assign_rhs1 (stmt);
2043
      gimple new_stmt;
2044
 
2045
      /* Build a new statement with the RHS and LHS exchanged.  */
2046
      if (TREE_CODE (rhs) == SSA_NAME)
2047
        {
2048
          /* NOTE tuples.  The call to gimple_build_assign below replaced
2049
             a call to build_gimple_modify_stmt, which did not set the
2050
             SSA_NAME_DEF_STMT on the LHS of the assignment.  Doing so
2051
             may cause an SSA validation failure, as the LHS may be a
2052
             default-initialized name and should have no definition.  I'm
2053
             a bit dubious of this, as the artificial statement that we
2054
             generate here may in fact be ill-formed, but it is simply
2055
             used as an internal device in this pass, and never becomes
2056
             part of the CFG.  */
2057
          gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2058
          new_stmt = gimple_build_assign (rhs, lhs);
2059
          SSA_NAME_DEF_STMT (rhs) = defstmt;
2060
        }
2061
      else
2062
        new_stmt = gimple_build_assign (rhs, lhs);
2063
 
2064
      gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2065
 
2066
      /* Finally enter the statement into the available expression
2067
         table.  */
2068
      lookup_avail_expr (new_stmt, true);
2069
    }
2070
}
2071
 
2072
/* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2073
   CONST_AND_COPIES.  */
2074
 
2075
static void
2076
cprop_operand (gimple stmt, use_operand_p op_p)
2077
{
2078
  tree val;
2079
  tree op = USE_FROM_PTR (op_p);
2080
 
2081
  /* If the operand has a known constant value or it is known to be a
2082
     copy of some other variable, use the value or copy stored in
2083
     CONST_AND_COPIES.  */
2084
  val = SSA_NAME_VALUE (op);
2085
  if (val && val != op)
2086
    {
2087
      /* Do not replace hard register operands in asm statements.  */
2088
      if (gimple_code (stmt) == GIMPLE_ASM
2089
          && !may_propagate_copy_into_asm (op))
2090
        return;
2091
 
2092
      /* Certain operands are not allowed to be copy propagated due
2093
         to their interaction with exception handling and some GCC
2094
         extensions.  */
2095
      if (!may_propagate_copy (op, val))
2096
        return;
2097
 
2098
      /* Do not propagate addresses that point to volatiles into memory
2099
         stmts without volatile operands.  */
2100
      if (POINTER_TYPE_P (TREE_TYPE (val))
2101
          && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
2102
          && gimple_has_mem_ops (stmt)
2103
          && !gimple_has_volatile_ops (stmt))
2104
        return;
2105
 
2106
      /* Do not propagate copies if the propagated value is at a deeper loop
2107
         depth than the propagatee.  Otherwise, this may move loop variant
2108
         variables outside of their loops and prevent coalescing
2109
         opportunities.  If the value was loop invariant, it will be hoisted
2110
         by LICM and exposed for copy propagation.  */
2111
      if (loop_depth_of_name (val) > loop_depth_of_name (op))
2112
        return;
2113
 
2114
      /* Do not propagate copies into simple IV increment statements.
2115
         See PR23821 for how this can disturb IV analysis.  */
2116
      if (TREE_CODE (val) != INTEGER_CST
2117
          && simple_iv_increment_p (stmt))
2118
        return;
2119
 
2120
      /* Dump details.  */
2121
      if (dump_file && (dump_flags & TDF_DETAILS))
2122
        {
2123
          fprintf (dump_file, "  Replaced '");
2124
          print_generic_expr (dump_file, op, dump_flags);
2125
          fprintf (dump_file, "' with %s '",
2126
                   (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2127
          print_generic_expr (dump_file, val, dump_flags);
2128
          fprintf (dump_file, "'\n");
2129
        }
2130
 
2131
      if (TREE_CODE (val) != SSA_NAME)
2132
        opt_stats.num_const_prop++;
2133
      else
2134
        opt_stats.num_copy_prop++;
2135
 
2136
      propagate_value (op_p, val);
2137
 
2138
      /* And note that we modified this statement.  This is now
2139
         safe, even if we changed virtual operands since we will
2140
         rescan the statement and rewrite its operands again.  */
2141
      gimple_set_modified (stmt, true);
2142
    }
2143
}
2144
 
2145
/* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2146
   known value for that SSA_NAME (or NULL if no value is known).
2147
 
2148
   Propagate values from CONST_AND_COPIES into the uses, vuses and
2149
   vdef_ops of STMT.  */
2150
 
2151
static void
2152
cprop_into_stmt (gimple stmt)
2153
{
2154
  use_operand_p op_p;
2155
  ssa_op_iter iter;
2156
 
2157
  FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2158
    cprop_operand (stmt, op_p);
2159
}
2160
 
2161
/* Optimize the statement pointed to by iterator SI.
2162
 
2163
   We try to perform some simplistic global redundancy elimination and
2164
   constant propagation:
2165
 
2166
   1- To detect global redundancy, we keep track of expressions that have
2167
      been computed in this block and its dominators.  If we find that the
2168
      same expression is computed more than once, we eliminate repeated
2169
      computations by using the target of the first one.
2170
 
2171
   2- Constant values and copy assignments.  This is used to do very
2172
      simplistic constant and copy propagation.  When a constant or copy
2173
      assignment is found, we map the value on the RHS of the assignment to
2174
      the variable in the LHS in the CONST_AND_COPIES table.  */
2175
 
2176
static void
2177
optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2178
{
2179
  gimple stmt, old_stmt;
2180
  bool may_optimize_p;
2181
  bool modified_p = false;
2182
 
2183
  old_stmt = stmt = gsi_stmt (si);
2184
 
2185
  if (dump_file && (dump_flags & TDF_DETAILS))
2186
    {
2187
      fprintf (dump_file, "Optimizing statement ");
2188
      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2189
    }
2190
 
2191
  if (gimple_code (stmt) == GIMPLE_COND)
2192
    canonicalize_comparison (stmt);
2193
 
2194
  update_stmt_if_modified (stmt);
2195
  opt_stats.num_stmts++;
2196
 
2197
  /* Const/copy propagate into USES, VUSES and the RHS of VDEFs.  */
2198
  cprop_into_stmt (stmt);
2199
 
2200
  /* If the statement has been modified with constant replacements,
2201
     fold its RHS before checking for redundant computations.  */
2202
  if (gimple_modified_p (stmt))
2203
    {
2204
      tree rhs = NULL;
2205
 
2206
      /* Try to fold the statement making sure that STMT is kept
2207
         up to date.  */
2208
      if (fold_stmt (&si))
2209
        {
2210
          stmt = gsi_stmt (si);
2211
          gimple_set_modified (stmt, true);
2212
 
2213
          if (dump_file && (dump_flags & TDF_DETAILS))
2214
            {
2215
              fprintf (dump_file, "  Folded to: ");
2216
              print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2217
            }
2218
        }
2219
 
2220
      /* We only need to consider cases that can yield a gimple operand.  */
2221
      if (gimple_assign_single_p (stmt))
2222
        rhs = gimple_assign_rhs1 (stmt);
2223
      else if (gimple_code (stmt) == GIMPLE_GOTO)
2224
        rhs = gimple_goto_dest (stmt);
2225
      else if (gimple_code (stmt) == GIMPLE_SWITCH)
2226
        /* This should never be an ADDR_EXPR.  */
2227
        rhs = gimple_switch_index (stmt);
2228
 
2229
      if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2230
        recompute_tree_invariant_for_addr_expr (rhs);
2231
 
2232
      /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2233
         even if fold_stmt updated the stmt already and thus cleared
2234
         gimple_modified_p flag on it.  */
2235
      modified_p = true;
2236
    }
2237
 
2238
  /* Check for redundant computations.  Do this optimization only
2239
     for assignments that have no volatile ops and conditionals.  */
2240
  may_optimize_p = (!gimple_has_side_effects (stmt)
2241
                    && (is_gimple_assign (stmt)
2242
                        || (is_gimple_call (stmt)
2243
                            && gimple_call_lhs (stmt) != NULL_TREE)
2244
                        || gimple_code (stmt) == GIMPLE_COND
2245
                        || gimple_code (stmt) == GIMPLE_SWITCH));
2246
 
2247
  if (may_optimize_p)
2248
    {
2249
      if (gimple_code (stmt) == GIMPLE_CALL)
2250
        {
2251
          /* Resolve __builtin_constant_p.  If it hasn't been
2252
             folded to integer_one_node by now, it's fairly
2253
             certain that the value simply isn't constant.  */
2254
          tree callee = gimple_call_fndecl (stmt);
2255
          if (callee
2256
              && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2257
              && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2258
            {
2259
              propagate_tree_value_into_stmt (&si, integer_zero_node);
2260
              stmt = gsi_stmt (si);
2261
            }
2262
        }
2263
 
2264
      update_stmt_if_modified (stmt);
2265
      eliminate_redundant_computations (&si);
2266
      stmt = gsi_stmt (si);
2267
 
2268
      /* Perform simple redundant store elimination.  */
2269
      if (gimple_assign_single_p (stmt)
2270
          && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2271
        {
2272
          tree lhs = gimple_assign_lhs (stmt);
2273
          tree rhs = gimple_assign_rhs1 (stmt);
2274
          tree cached_lhs;
2275
          gimple new_stmt;
2276
          if (TREE_CODE (rhs) == SSA_NAME)
2277
            {
2278
              tree tem = SSA_NAME_VALUE (rhs);
2279
              if (tem)
2280
                rhs = tem;
2281
            }
2282
          /* Build a new statement with the RHS and LHS exchanged.  */
2283
          if (TREE_CODE (rhs) == SSA_NAME)
2284
            {
2285
              gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2286
              new_stmt = gimple_build_assign (rhs, lhs);
2287
              SSA_NAME_DEF_STMT (rhs) = defstmt;
2288
            }
2289
          else
2290
            new_stmt = gimple_build_assign (rhs, lhs);
2291
          gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2292
          cached_lhs = lookup_avail_expr (new_stmt, false);
2293
          if (cached_lhs
2294
              && rhs == cached_lhs)
2295
            {
2296
              basic_block bb = gimple_bb (stmt);
2297
              int lp_nr = lookup_stmt_eh_lp (stmt);
2298
              unlink_stmt_vdef (stmt);
2299
              gsi_remove (&si, true);
2300
              if (lp_nr != 0)
2301
                {
2302
                  bitmap_set_bit (need_eh_cleanup, bb->index);
2303
                  if (dump_file && (dump_flags & TDF_DETAILS))
2304
                    fprintf (dump_file, "  Flagged to clear EH edges.\n");
2305
                }
2306
              return;
2307
            }
2308
        }
2309
    }
2310
 
2311
  /* Record any additional equivalences created by this statement.  */
2312
  if (is_gimple_assign (stmt))
2313
    record_equivalences_from_stmt (stmt, may_optimize_p);
2314
 
2315
  /* If STMT is a COND_EXPR and it was modified, then we may know
2316
     where it goes.  If that is the case, then mark the CFG as altered.
2317
 
2318
     This will cause us to later call remove_unreachable_blocks and
2319
     cleanup_tree_cfg when it is safe to do so.  It is not safe to
2320
     clean things up here since removal of edges and such can trigger
2321
     the removal of PHI nodes, which in turn can release SSA_NAMEs to
2322
     the manager.
2323
 
2324
     That's all fine and good, except that once SSA_NAMEs are released
2325
     to the manager, we must not call create_ssa_name until all references
2326
     to released SSA_NAMEs have been eliminated.
2327
 
2328
     All references to the deleted SSA_NAMEs can not be eliminated until
2329
     we remove unreachable blocks.
2330
 
2331
     We can not remove unreachable blocks until after we have completed
2332
     any queued jump threading.
2333
 
2334
     We can not complete any queued jump threads until we have taken
2335
     appropriate variables out of SSA form.  Taking variables out of
2336
     SSA form can call create_ssa_name and thus we lose.
2337
 
2338
     Ultimately I suspect we're going to need to change the interface
2339
     into the SSA_NAME manager.  */
2340
  if (gimple_modified_p (stmt) || modified_p)
2341
    {
2342
      tree val = NULL;
2343
 
2344
      update_stmt_if_modified (stmt);
2345
 
2346
      if (gimple_code (stmt) == GIMPLE_COND)
2347
        val = fold_binary_loc (gimple_location (stmt),
2348
                           gimple_cond_code (stmt), boolean_type_node,
2349
                           gimple_cond_lhs (stmt),  gimple_cond_rhs (stmt));
2350
      else if (gimple_code (stmt) == GIMPLE_SWITCH)
2351
        val = gimple_switch_index (stmt);
2352
 
2353
      if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2354
        cfg_altered = true;
2355
 
2356
      /* If we simplified a statement in such a way as to be shown that it
2357
         cannot trap, update the eh information and the cfg to match.  */
2358
      if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2359
        {
2360
          bitmap_set_bit (need_eh_cleanup, bb->index);
2361
          if (dump_file && (dump_flags & TDF_DETAILS))
2362
            fprintf (dump_file, "  Flagged to clear EH edges.\n");
2363
        }
2364
    }
2365
}
2366
 
2367
/* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2368
   If found, return its LHS. Otherwise insert STMT in the table and
2369
   return NULL_TREE.
2370
 
2371
   Also, when an expression is first inserted in the  table, it is also
2372
   is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2373
   we finish processing this block and its children.  */
2374
 
2375
static tree
2376
lookup_avail_expr (gimple stmt, bool insert)
2377
{
2378
  void **slot;
2379
  tree lhs;
2380
  tree temp;
2381
  struct expr_hash_elt element;
2382
 
2383
  /* Get LHS of phi, assignment, or call; else NULL_TREE.  */
2384
  if (gimple_code (stmt) == GIMPLE_PHI)
2385
    lhs = gimple_phi_result (stmt);
2386
  else
2387
    lhs = gimple_get_lhs (stmt);
2388
 
2389
  initialize_hash_element (stmt, lhs, &element);
2390
 
2391
  if (dump_file && (dump_flags & TDF_DETAILS))
2392
    {
2393
      fprintf (dump_file, "LKUP ");
2394
      print_expr_hash_elt (dump_file, &element);
2395
    }
2396
 
2397
  /* Don't bother remembering constant assignments and copy operations.
2398
     Constants and copy operations are handled by the constant/copy propagator
2399
     in optimize_stmt.  */
2400
  if (element.expr.kind == EXPR_SINGLE
2401
      && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2402
          || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2403
    return NULL_TREE;
2404
 
2405
  /* Finally try to find the expression in the main expression hash table.  */
2406
  slot = htab_find_slot_with_hash (avail_exprs, &element, element.hash,
2407
                                   (insert ? INSERT : NO_INSERT));
2408
  if (slot == NULL)
2409
    return NULL_TREE;
2410
 
2411
  if (*slot == NULL)
2412
    {
2413
      struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2414
      *element2 = element;
2415
      element2->stamp = element2;
2416
      *slot = (void *) element2;
2417
 
2418
      if (dump_file && (dump_flags & TDF_DETAILS))
2419
        {
2420
          fprintf (dump_file, "2>>> ");
2421
          print_expr_hash_elt (dump_file, element2);
2422
        }
2423
 
2424
      VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element2);
2425
      return NULL_TREE;
2426
    }
2427
 
2428
  /* Extract the LHS of the assignment so that it can be used as the current
2429
     definition of another variable.  */
2430
  lhs = ((struct expr_hash_elt *)*slot)->lhs;
2431
 
2432
  /* See if the LHS appears in the CONST_AND_COPIES table.  If it does, then
2433
     use the value from the const_and_copies table.  */
2434
  if (TREE_CODE (lhs) == SSA_NAME)
2435
    {
2436
      temp = SSA_NAME_VALUE (lhs);
2437
      if (temp)
2438
        lhs = temp;
2439
    }
2440
 
2441
  if (dump_file && (dump_flags & TDF_DETAILS))
2442
    {
2443
      fprintf (dump_file, "FIND: ");
2444
      print_generic_expr (dump_file, lhs, 0);
2445
      fprintf (dump_file, "\n");
2446
    }
2447
 
2448
  return lhs;
2449
}
2450
 
2451
/* Hashing and equality functions for AVAIL_EXPRS.  We compute a value number
2452
   for expressions using the code of the expression and the SSA numbers of
2453
   its operands.  */
2454
 
2455
static hashval_t
2456
avail_expr_hash (const void *p)
2457
{
2458
  gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2459
  const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2460
  tree vuse;
2461
  hashval_t val = 0;
2462
 
2463
  val = iterative_hash_hashable_expr (expr, val);
2464
 
2465
  /* If the hash table entry is not associated with a statement, then we
2466
     can just hash the expression and not worry about virtual operands
2467
     and such.  */
2468
  if (!stmt)
2469
    return val;
2470
 
2471
  /* Add the SSA version numbers of the vuse operand.  This is important
2472
     because compound variables like arrays are not renamed in the
2473
     operands.  Rather, the rename is done on the virtual variable
2474
     representing all the elements of the array.  */
2475
  if ((vuse = gimple_vuse (stmt)))
2476
    val = iterative_hash_expr (vuse, val);
2477
 
2478
  return val;
2479
}
2480
 
2481
static hashval_t
2482
real_avail_expr_hash (const void *p)
2483
{
2484
  return ((const struct expr_hash_elt *)p)->hash;
2485
}
2486
 
2487
static int
2488
avail_expr_eq (const void *p1, const void *p2)
2489
{
2490
  gimple stmt1 = ((const struct expr_hash_elt *)p1)->stmt;
2491
  const struct hashable_expr *expr1 = &((const struct expr_hash_elt *)p1)->expr;
2492
  const struct expr_hash_elt *stamp1 = ((const struct expr_hash_elt *)p1)->stamp;
2493
  gimple stmt2 = ((const struct expr_hash_elt *)p2)->stmt;
2494
  const struct hashable_expr *expr2 = &((const struct expr_hash_elt *)p2)->expr;
2495
  const struct expr_hash_elt *stamp2 = ((const struct expr_hash_elt *)p2)->stamp;
2496
 
2497
  /* This case should apply only when removing entries from the table.  */
2498
  if (stamp1 == stamp2)
2499
    return true;
2500
 
2501
  /* FIXME tuples:
2502
     We add stmts to a hash table and them modify them. To detect the case
2503
     that we modify a stmt and then search for it, we assume that the hash
2504
     is always modified by that change.
2505
     We have to fully check why this doesn't happen on trunk or rewrite
2506
     this in a more  reliable (and easier to understand) way. */
2507
  if (((const struct expr_hash_elt *)p1)->hash
2508
      != ((const struct expr_hash_elt *)p2)->hash)
2509
    return false;
2510
 
2511
  /* In case of a collision, both RHS have to be identical and have the
2512
     same VUSE operands.  */
2513
  if (hashable_expr_equal_p (expr1, expr2)
2514
      && types_compatible_p (expr1->type, expr2->type))
2515
    {
2516
      /* Note that STMT1 and/or STMT2 may be NULL.  */
2517
      return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
2518
              == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
2519
    }
2520
 
2521
  return false;
2522
}
2523
 
2524
/* PHI-ONLY copy and constant propagation.  This pass is meant to clean
2525
   up degenerate PHIs created by or exposed by jump threading.  */
2526
 
2527
/* Given PHI, return its RHS if the PHI is a degenerate, otherwise return
2528
   NULL.  */
2529
 
2530
tree
2531
degenerate_phi_result (gimple phi)
2532
{
2533
  tree lhs = gimple_phi_result (phi);
2534
  tree val = NULL;
2535
  size_t i;
2536
 
2537
  /* Ignoring arguments which are the same as LHS, if all the remaining
2538
     arguments are the same, then the PHI is a degenerate and has the
2539
     value of that common argument.  */
2540
  for (i = 0; i < gimple_phi_num_args (phi); i++)
2541
    {
2542
      tree arg = gimple_phi_arg_def (phi, i);
2543
 
2544
      if (arg == lhs)
2545
        continue;
2546
      else if (!arg)
2547
        break;
2548
      else if (!val)
2549
        val = arg;
2550
      else if (arg == val)
2551
        continue;
2552
      /* We bring in some of operand_equal_p not only to speed things
2553
         up, but also to avoid crashing when dereferencing the type of
2554
         a released SSA name.  */
2555
      else if (TREE_CODE (val) != TREE_CODE (arg)
2556
               || TREE_CODE (val) == SSA_NAME
2557
               || !operand_equal_p (arg, val, 0))
2558
        break;
2559
    }
2560
  return (i == gimple_phi_num_args (phi) ? val : NULL);
2561
}
2562
 
2563
/* Given a statement STMT, which is either a PHI node or an assignment,
2564
   remove it from the IL.  */
2565
 
2566
static void
2567
remove_stmt_or_phi (gimple stmt)
2568
{
2569
  gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2570
 
2571
  if (gimple_code (stmt) == GIMPLE_PHI)
2572
    remove_phi_node (&gsi, true);
2573
  else
2574
    {
2575
      gsi_remove (&gsi, true);
2576
      release_defs (stmt);
2577
    }
2578
}
2579
 
2580
/* Given a statement STMT, which is either a PHI node or an assignment,
2581
   return the "rhs" of the node, in the case of a non-degenerate
2582
   phi, NULL is returned.  */
2583
 
2584
static tree
2585
get_rhs_or_phi_arg (gimple stmt)
2586
{
2587
  if (gimple_code (stmt) == GIMPLE_PHI)
2588
    return degenerate_phi_result (stmt);
2589
  else if (gimple_assign_single_p (stmt))
2590
    return gimple_assign_rhs1 (stmt);
2591
  else
2592
    gcc_unreachable ();
2593
}
2594
 
2595
 
2596
/* Given a statement STMT, which is either a PHI node or an assignment,
2597
   return the "lhs" of the node.  */
2598
 
2599
static tree
2600
get_lhs_or_phi_result (gimple stmt)
2601
{
2602
  if (gimple_code (stmt) == GIMPLE_PHI)
2603
    return gimple_phi_result (stmt);
2604
  else if (is_gimple_assign (stmt))
2605
    return gimple_assign_lhs (stmt);
2606
  else
2607
    gcc_unreachable ();
2608
}
2609
 
2610
/* Propagate RHS into all uses of LHS (when possible).
2611
 
2612
   RHS and LHS are derived from STMT, which is passed in solely so
2613
   that we can remove it if propagation is successful.
2614
 
2615
   When propagating into a PHI node or into a statement which turns
2616
   into a trivial copy or constant initialization, set the
2617
   appropriate bit in INTERESTING_NAMEs so that we will visit those
2618
   nodes as well in an effort to pick up secondary optimization
2619
   opportunities.  */
2620
 
2621
static void
2622
propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2623
{
2624
  /* First verify that propagation is valid and isn't going to move a
2625
     loop variant variable outside its loop.  */
2626
  if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2627
      && (TREE_CODE (rhs) != SSA_NAME
2628
          || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2629
      && may_propagate_copy (lhs, rhs)
2630
      && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2631
    {
2632
      use_operand_p use_p;
2633
      imm_use_iterator iter;
2634
      gimple use_stmt;
2635
      bool all = true;
2636
 
2637
      /* Dump details.  */
2638
      if (dump_file && (dump_flags & TDF_DETAILS))
2639
        {
2640
          fprintf (dump_file, "  Replacing '");
2641
          print_generic_expr (dump_file, lhs, dump_flags);
2642
          fprintf (dump_file, "' with %s '",
2643
                   (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2644
                   print_generic_expr (dump_file, rhs, dump_flags);
2645
          fprintf (dump_file, "'\n");
2646
        }
2647
 
2648
      /* Walk over every use of LHS and try to replace the use with RHS.
2649
         At this point the only reason why such a propagation would not
2650
         be successful would be if the use occurs in an ASM_EXPR.  */
2651
      FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2652
        {
2653
          /* Leave debug stmts alone.  If we succeed in propagating
2654
             all non-debug uses, we'll drop the DEF, and propagation
2655
             into debug stmts will occur then.  */
2656
          if (gimple_debug_bind_p (use_stmt))
2657
            continue;
2658
 
2659
          /* It's not always safe to propagate into an ASM_EXPR.  */
2660
          if (gimple_code (use_stmt) == GIMPLE_ASM
2661
              && ! may_propagate_copy_into_asm (lhs))
2662
            {
2663
              all = false;
2664
              continue;
2665
            }
2666
 
2667
          /* It's not ok to propagate into the definition stmt of RHS.
2668
                <bb 9>:
2669
                  # prephitmp.12_36 = PHI <g_67.1_6(9)>
2670
                  g_67.1_6 = prephitmp.12_36;
2671
                  goto <bb 9>;
2672
             While this is strictly all dead code we do not want to
2673
             deal with this here.  */
2674
          if (TREE_CODE (rhs) == SSA_NAME
2675
              && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2676
            {
2677
              all = false;
2678
              continue;
2679
            }
2680
 
2681
          /* Dump details.  */
2682
          if (dump_file && (dump_flags & TDF_DETAILS))
2683
            {
2684
              fprintf (dump_file, "    Original statement:");
2685
              print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2686
            }
2687
 
2688
          /* Propagate the RHS into this use of the LHS.  */
2689
          FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2690
            propagate_value (use_p, rhs);
2691
 
2692
          /* Special cases to avoid useless calls into the folding
2693
             routines, operand scanning, etc.
2694
 
2695
             First, propagation into a PHI may cause the PHI to become
2696
             a degenerate, so mark the PHI as interesting.  No other
2697
             actions are necessary.
2698
 
2699
             Second, if we're propagating a virtual operand and the
2700
             propagation does not change the underlying _DECL node for
2701
             the virtual operand, then no further actions are necessary.  */
2702
          if (gimple_code (use_stmt) == GIMPLE_PHI
2703
              || (! is_gimple_reg (lhs)
2704
                  && TREE_CODE (rhs) == SSA_NAME
2705
                  && SSA_NAME_VAR (lhs) == SSA_NAME_VAR (rhs)))
2706
            {
2707
              /* Dump details.  */
2708
              if (dump_file && (dump_flags & TDF_DETAILS))
2709
                {
2710
                  fprintf (dump_file, "    Updated statement:");
2711
                  print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2712
                }
2713
 
2714
              /* Propagation into a PHI may expose new degenerate PHIs,
2715
                 so mark the result of the PHI as interesting.  */
2716
              if (gimple_code (use_stmt) == GIMPLE_PHI)
2717
                {
2718
                  tree result = get_lhs_or_phi_result (use_stmt);
2719
                  bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2720
                }
2721
 
2722
              continue;
2723
            }
2724
 
2725
          /* From this point onward we are propagating into a
2726
             real statement.  Folding may (or may not) be possible,
2727
             we may expose new operands, expose dead EH edges,
2728
             etc.  */
2729
          /* NOTE tuples. In the tuples world, fold_stmt_inplace
2730
             cannot fold a call that simplifies to a constant,
2731
             because the GIMPLE_CALL must be replaced by a
2732
             GIMPLE_ASSIGN, and there is no way to effect such a
2733
             transformation in-place.  We might want to consider
2734
             using the more general fold_stmt here.  */
2735
            {
2736
              gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2737
              fold_stmt_inplace (&gsi);
2738
            }
2739
 
2740
          /* Sometimes propagation can expose new operands to the
2741
             renamer.  */
2742
          update_stmt (use_stmt);
2743
 
2744
          /* Dump details.  */
2745
          if (dump_file && (dump_flags & TDF_DETAILS))
2746
            {
2747
              fprintf (dump_file, "    Updated statement:");
2748
              print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2749
            }
2750
 
2751
          /* If we replaced a variable index with a constant, then
2752
             we would need to update the invariant flag for ADDR_EXPRs.  */
2753
          if (gimple_assign_single_p (use_stmt)
2754
              && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2755
            recompute_tree_invariant_for_addr_expr
2756
                (gimple_assign_rhs1 (use_stmt));
2757
 
2758
          /* If we cleaned up EH information from the statement,
2759
             mark its containing block as needing EH cleanups.  */
2760
          if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2761
            {
2762
              bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2763
              if (dump_file && (dump_flags & TDF_DETAILS))
2764
                fprintf (dump_file, "  Flagged to clear EH edges.\n");
2765
            }
2766
 
2767
          /* Propagation may expose new trivial copy/constant propagation
2768
             opportunities.  */
2769
          if (gimple_assign_single_p (use_stmt)
2770
              && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2771
              && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2772
                  || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2773
            {
2774
              tree result = get_lhs_or_phi_result (use_stmt);
2775
              bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2776
            }
2777
 
2778
          /* Propagation into these nodes may make certain edges in
2779
             the CFG unexecutable.  We want to identify them as PHI nodes
2780
             at the destination of those unexecutable edges may become
2781
             degenerates.  */
2782
          else if (gimple_code (use_stmt) == GIMPLE_COND
2783
                   || gimple_code (use_stmt) == GIMPLE_SWITCH
2784
                   || gimple_code (use_stmt) == GIMPLE_GOTO)
2785
            {
2786
              tree val;
2787
 
2788
              if (gimple_code (use_stmt) == GIMPLE_COND)
2789
                val = fold_binary_loc (gimple_location (use_stmt),
2790
                                   gimple_cond_code (use_stmt),
2791
                                   boolean_type_node,
2792
                                   gimple_cond_lhs (use_stmt),
2793
                                   gimple_cond_rhs (use_stmt));
2794
              else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2795
                val = gimple_switch_index (use_stmt);
2796
              else
2797
                val = gimple_goto_dest  (use_stmt);
2798
 
2799
              if (val && is_gimple_min_invariant (val))
2800
                {
2801
                  basic_block bb = gimple_bb (use_stmt);
2802
                  edge te = find_taken_edge (bb, val);
2803
                  edge_iterator ei;
2804
                  edge e;
2805
                  gimple_stmt_iterator gsi, psi;
2806
 
2807
                  /* Remove all outgoing edges except TE.  */
2808
                  for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2809
                    {
2810
                      if (e != te)
2811
                        {
2812
                          /* Mark all the PHI nodes at the destination of
2813
                             the unexecutable edge as interesting.  */
2814
                          for (psi = gsi_start_phis (e->dest);
2815
                               !gsi_end_p (psi);
2816
                               gsi_next (&psi))
2817
                            {
2818
                              gimple phi = gsi_stmt (psi);
2819
 
2820
                              tree result = gimple_phi_result (phi);
2821
                              int version = SSA_NAME_VERSION (result);
2822
 
2823
                              bitmap_set_bit (interesting_names, version);
2824
                            }
2825
 
2826
                          te->probability += e->probability;
2827
 
2828
                          te->count += e->count;
2829
                          remove_edge (e);
2830
                          cfg_altered = true;
2831
                        }
2832
                      else
2833
                        ei_next (&ei);
2834
                    }
2835
 
2836
                  gsi = gsi_last_bb (gimple_bb (use_stmt));
2837
                  gsi_remove (&gsi, true);
2838
 
2839
                  /* And fixup the flags on the single remaining edge.  */
2840
                  te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2841
                  te->flags &= ~EDGE_ABNORMAL;
2842
                  te->flags |= EDGE_FALLTHRU;
2843
                  if (te->probability > REG_BR_PROB_BASE)
2844
                    te->probability = REG_BR_PROB_BASE;
2845
                }
2846
            }
2847
        }
2848
 
2849
      /* Ensure there is nothing else to do. */
2850
      gcc_assert (!all || has_zero_uses (lhs));
2851
 
2852
      /* If we were able to propagate away all uses of LHS, then
2853
         we can remove STMT.  */
2854
      if (all)
2855
        remove_stmt_or_phi (stmt);
2856
    }
2857
}
2858
 
2859
/* STMT is either a PHI node (potentially a degenerate PHI node) or
2860
   a statement that is a trivial copy or constant initialization.
2861
 
2862
   Attempt to eliminate T by propagating its RHS into all uses of
2863
   its LHS.  This may in turn set new bits in INTERESTING_NAMES
2864
   for nodes we want to revisit later.
2865
 
2866
   All exit paths should clear INTERESTING_NAMES for the result
2867
   of STMT.  */
2868
 
2869
static void
2870
eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2871
{
2872
  tree lhs = get_lhs_or_phi_result (stmt);
2873
  tree rhs;
2874
  int version = SSA_NAME_VERSION (lhs);
2875
 
2876
  /* If the LHS of this statement or PHI has no uses, then we can
2877
     just eliminate it.  This can occur if, for example, the PHI
2878
     was created by block duplication due to threading and its only
2879
     use was in the conditional at the end of the block which was
2880
     deleted.  */
2881
  if (has_zero_uses (lhs))
2882
    {
2883
      bitmap_clear_bit (interesting_names, version);
2884
      remove_stmt_or_phi (stmt);
2885
      return;
2886
    }
2887
 
2888
  /* Get the RHS of the assignment or PHI node if the PHI is a
2889
     degenerate.  */
2890
  rhs = get_rhs_or_phi_arg (stmt);
2891
  if (!rhs)
2892
    {
2893
      bitmap_clear_bit (interesting_names, version);
2894
      return;
2895
    }
2896
 
2897
  propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2898
 
2899
  /* Note that STMT may well have been deleted by now, so do
2900
     not access it, instead use the saved version # to clear
2901
     T's entry in the worklist.  */
2902
  bitmap_clear_bit (interesting_names, version);
2903
}
2904
 
2905
/* The first phase in degenerate PHI elimination.
2906
 
2907
   Eliminate the degenerate PHIs in BB, then recurse on the
2908
   dominator children of BB.  */
2909
 
2910
static void
2911
eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2912
{
2913
  gimple_stmt_iterator gsi;
2914
  basic_block son;
2915
 
2916
  for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2917
    {
2918
      gimple phi = gsi_stmt (gsi);
2919
 
2920
      eliminate_const_or_copy (phi, interesting_names);
2921
    }
2922
 
2923
  /* Recurse into the dominator children of BB.  */
2924
  for (son = first_dom_son (CDI_DOMINATORS, bb);
2925
       son;
2926
       son = next_dom_son (CDI_DOMINATORS, son))
2927
    eliminate_degenerate_phis_1 (son, interesting_names);
2928
}
2929
 
2930
 
2931
/* A very simple pass to eliminate degenerate PHI nodes from the
2932
   IL.  This is meant to be fast enough to be able to be run several
2933
   times in the optimization pipeline.
2934
 
2935
   Certain optimizations, particularly those which duplicate blocks
2936
   or remove edges from the CFG can create or expose PHIs which are
2937
   trivial copies or constant initializations.
2938
 
2939
   While we could pick up these optimizations in DOM or with the
2940
   combination of copy-prop and CCP, those solutions are far too
2941
   heavy-weight for our needs.
2942
 
2943
   This implementation has two phases so that we can efficiently
2944
   eliminate the first order degenerate PHIs and second order
2945
   degenerate PHIs.
2946
 
2947
   The first phase performs a dominator walk to identify and eliminate
2948
   the vast majority of the degenerate PHIs.  When a degenerate PHI
2949
   is identified and eliminated any affected statements or PHIs
2950
   are put on a worklist.
2951
 
2952
   The second phase eliminates degenerate PHIs and trivial copies
2953
   or constant initializations using the worklist.  This is how we
2954
   pick up the secondary optimization opportunities with minimal
2955
   cost.  */
2956
 
2957
static unsigned int
2958
eliminate_degenerate_phis (void)
2959
{
2960
  bitmap interesting_names;
2961
  bitmap interesting_names1;
2962
 
2963
  /* Bitmap of blocks which need EH information updated.  We can not
2964
     update it on-the-fly as doing so invalidates the dominator tree.  */
2965
  need_eh_cleanup = BITMAP_ALLOC (NULL);
2966
 
2967
  /* INTERESTING_NAMES is effectively our worklist, indexed by
2968
     SSA_NAME_VERSION.
2969
 
2970
     A set bit indicates that the statement or PHI node which
2971
     defines the SSA_NAME should be (re)examined to determine if
2972
     it has become a degenerate PHI or trivial const/copy propagation
2973
     opportunity.
2974
 
2975
     Experiments have show we generally get better compilation
2976
     time behavior with bitmaps rather than sbitmaps.  */
2977
  interesting_names = BITMAP_ALLOC (NULL);
2978
  interesting_names1 = BITMAP_ALLOC (NULL);
2979
 
2980
  calculate_dominance_info (CDI_DOMINATORS);
2981
  cfg_altered = false;
2982
 
2983
  /* First phase.  Eliminate degenerate PHIs via a dominator
2984
     walk of the CFG.
2985
 
2986
     Experiments have indicated that we generally get better
2987
     compile-time behavior by visiting blocks in the first
2988
     phase in dominator order.  Presumably this is because walking
2989
     in dominator order leaves fewer PHIs for later examination
2990
     by the worklist phase.  */
2991
  eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
2992
 
2993
  /* Second phase.  Eliminate second order degenerate PHIs as well
2994
     as trivial copies or constant initializations identified by
2995
     the first phase or this phase.  Basically we keep iterating
2996
     until our set of INTERESTING_NAMEs is empty.   */
2997
  while (!bitmap_empty_p (interesting_names))
2998
    {
2999
      unsigned int i;
3000
      bitmap_iterator bi;
3001
 
3002
      /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3003
         changed during the loop.  Copy it to another bitmap and
3004
         use that.  */
3005
      bitmap_copy (interesting_names1, interesting_names);
3006
 
3007
      EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3008
        {
3009
          tree name = ssa_name (i);
3010
 
3011
          /* Ignore SSA_NAMEs that have been released because
3012
             their defining statement was deleted (unreachable).  */
3013
          if (name)
3014
            eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3015
                                     interesting_names);
3016
        }
3017
    }
3018
 
3019
  if (cfg_altered)
3020
    free_dominance_info (CDI_DOMINATORS);
3021
 
3022
  /* Propagation of const and copies may make some EH edges dead.  Purge
3023
     such edges from the CFG as needed.  */
3024
  if (!bitmap_empty_p (need_eh_cleanup))
3025
    {
3026
      gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3027
      BITMAP_FREE (need_eh_cleanup);
3028
    }
3029
 
3030
  BITMAP_FREE (interesting_names);
3031
  BITMAP_FREE (interesting_names1);
3032
  return 0;
3033
}
3034
 
3035
struct gimple_opt_pass pass_phi_only_cprop =
3036
{
3037
 {
3038
  GIMPLE_PASS,
3039
  "phicprop",                           /* name */
3040
  gate_dominator,                       /* gate */
3041
  eliminate_degenerate_phis,            /* execute */
3042
  NULL,                                 /* sub */
3043
  NULL,                                 /* next */
3044
  0,                                    /* static_pass_number */
3045
  TV_TREE_PHI_CPROP,                    /* tv_id */
3046
  PROP_cfg | PROP_ssa,                  /* properties_required */
3047
  0,                                    /* properties_provided */
3048
  0,                                     /* properties_destroyed */
3049
  0,                                    /* todo_flags_start */
3050
  TODO_cleanup_cfg
3051
    | TODO_ggc_collect
3052
    | TODO_verify_ssa
3053
    | TODO_verify_stmts
3054
    | TODO_update_ssa                   /* todo_flags_finish */
3055
 }
3056
};

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.