OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-stable/] [gcc-4.5.1/] [gcc/] [tree-ssa-dom.c] - Blame information for rev 859

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 280 jeremybenn
/* SSA Dominator optimizations for trees
2
   Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3
   Free Software Foundation, Inc.
4
   Contributed by Diego Novillo <dnovillo@redhat.com>
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify
9
it under the terms of the GNU General Public License as published by
10
the Free Software Foundation; either version 3, or (at your option)
11
any later version.
12
 
13
GCC is distributed in the hope that it will be useful,
14
but WITHOUT ANY WARRANTY; without even the implied warranty of
15
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
GNU General Public License for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
#include "config.h"
23
#include "system.h"
24
#include "coretypes.h"
25
#include "tm.h"
26
#include "tree.h"
27
#include "flags.h"
28
#include "rtl.h"
29
#include "tm_p.h"
30
#include "ggc.h"
31
#include "basic-block.h"
32
#include "cfgloop.h"
33
#include "output.h"
34
#include "expr.h"
35
#include "function.h"
36
#include "diagnostic.h"
37
#include "timevar.h"
38
#include "tree-dump.h"
39
#include "tree-flow.h"
40
#include "domwalk.h"
41
#include "real.h"
42
#include "tree-pass.h"
43
#include "tree-ssa-propagate.h"
44
#include "langhooks.h"
45
#include "params.h"
46
 
47
/* This file implements optimizations on the dominator tree.  */
48
 
49
/* Representation of a "naked" right-hand-side expression, to be used
50
   in recording available expressions in the expression hash table.  */
51
 
52
enum expr_kind
53
{
54
  EXPR_SINGLE,
55
  EXPR_UNARY,
56
  EXPR_BINARY,
57
  EXPR_CALL
58
};
59
 
60
struct hashable_expr
61
{
62
  tree type;
63
  enum expr_kind kind;
64
  union {
65
    struct { tree rhs; } single;
66
    struct { enum tree_code op;  tree opnd; } unary;
67
    struct { enum tree_code op;  tree opnd0; tree opnd1; } binary;
68
    struct { tree fn; bool pure; size_t nargs; tree *args; } call;
69
  } ops;
70
};
71
 
72
/* Structure for recording known values of a conditional expression
73
   at the exits from its block.  */
74
 
75
struct cond_equivalence
76
{
77
  struct hashable_expr cond;
78
  tree value;
79
};
80
 
81
/* Structure for recording edge equivalences as well as any pending
82
   edge redirections during the dominator optimizer.
83
 
84
   Computing and storing the edge equivalences instead of creating
85
   them on-demand can save significant amounts of time, particularly
86
   for pathological cases involving switch statements.
87
 
88
   These structures live for a single iteration of the dominator
89
   optimizer in the edge's AUX field.  At the end of an iteration we
90
   free each of these structures and update the AUX field to point
91
   to any requested redirection target (the code for updating the
92
   CFG and SSA graph for edge redirection expects redirection edge
93
   targets to be in the AUX field for each edge.  */
94
 
95
struct edge_info
96
{
97
  /* If this edge creates a simple equivalence, the LHS and RHS of
98
     the equivalence will be stored here.  */
99
  tree lhs;
100
  tree rhs;
101
 
102
  /* Traversing an edge may also indicate one or more particular conditions
103
     are true or false.  The number of recorded conditions can vary, but
104
     can be determined by the condition's code.  So we have an array
105
     and its maximum index rather than use a varray.  */
106
  struct cond_equivalence *cond_equivalences;
107
  unsigned int max_cond_equivalences;
108
};
109
 
110
/* Hash table with expressions made available during the renaming process.
111
   When an assignment of the form X_i = EXPR is found, the statement is
112
   stored in this table.  If the same expression EXPR is later found on the
113
   RHS of another statement, it is replaced with X_i (thus performing
114
   global redundancy elimination).  Similarly as we pass through conditionals
115
   we record the conditional itself as having either a true or false value
116
   in this table.  */
117
static htab_t avail_exprs;
118
 
119
/* Stack of available expressions in AVAIL_EXPRs.  Each block pushes any
120
   expressions it enters into the hash table along with a marker entry
121
   (null).  When we finish processing the block, we pop off entries and
122
   remove the expressions from the global hash table until we hit the
123
   marker.  */
124
typedef struct expr_hash_elt * expr_hash_elt_t;
125
DEF_VEC_P(expr_hash_elt_t);
126
DEF_VEC_ALLOC_P(expr_hash_elt_t,heap);
127
 
128
static VEC(expr_hash_elt_t,heap) *avail_exprs_stack;
129
 
130
/* Structure for entries in the expression hash table.  */
131
 
132
struct expr_hash_elt
133
{
134
  /* The value (lhs) of this expression.  */
135
  tree lhs;
136
 
137
  /* The expression (rhs) we want to record.  */
138
  struct hashable_expr expr;
139
 
140
  /* The stmt pointer if this element corresponds to a statement.  */
141
  gimple stmt;
142
 
143
  /* The hash value for RHS.  */
144
  hashval_t hash;
145
 
146
  /* A unique stamp, typically the address of the hash
147
     element itself, used in removing entries from the table.  */
148
  struct expr_hash_elt *stamp;
149
};
150
 
151
/* Stack of dest,src pairs that need to be restored during finalization.
152
 
153
   A NULL entry is used to mark the end of pairs which need to be
154
   restored during finalization of this block.  */
155
static VEC(tree,heap) *const_and_copies_stack;
156
 
157
/* Track whether or not we have changed the control flow graph.  */
158
static bool cfg_altered;
159
 
160
/* Bitmap of blocks that have had EH statements cleaned.  We should
161
   remove their dead edges eventually.  */
162
static bitmap need_eh_cleanup;
163
 
164
/* Statistics for dominator optimizations.  */
165
struct opt_stats_d
166
{
167
  long num_stmts;
168
  long num_exprs_considered;
169
  long num_re;
170
  long num_const_prop;
171
  long num_copy_prop;
172
};
173
 
174
static struct opt_stats_d opt_stats;
175
 
176
/* Local functions.  */
177
static void optimize_stmt (basic_block, gimple_stmt_iterator);
178
static tree lookup_avail_expr (gimple, bool);
179
static hashval_t avail_expr_hash (const void *);
180
static hashval_t real_avail_expr_hash (const void *);
181
static int avail_expr_eq (const void *, const void *);
182
static void htab_statistics (FILE *, htab_t);
183
static void record_cond (struct cond_equivalence *);
184
static void record_const_or_copy (tree, tree);
185
static void record_equality (tree, tree);
186
static void record_equivalences_from_phis (basic_block);
187
static void record_equivalences_from_incoming_edge (basic_block);
188
static void eliminate_redundant_computations (gimple_stmt_iterator *);
189
static void record_equivalences_from_stmt (gimple, int);
190
static void dom_thread_across_edge (struct dom_walk_data *, edge);
191
static void dom_opt_leave_block (struct dom_walk_data *, basic_block);
192
static void dom_opt_enter_block (struct dom_walk_data *, basic_block);
193
static void remove_local_expressions_from_table (void);
194
static void restore_vars_to_original_value (void);
195
static edge single_incoming_edge_ignoring_loop_edges (basic_block);
196
 
197
 
198
/* Given a statement STMT, initialize the hash table element pointed to
199
   by ELEMENT.  */
200
 
201
static void
202
initialize_hash_element (gimple stmt, tree lhs,
203
                         struct expr_hash_elt *element)
204
{
205
  enum gimple_code code = gimple_code (stmt);
206
  struct hashable_expr *expr = &element->expr;
207
 
208
  if (code == GIMPLE_ASSIGN)
209
    {
210
      enum tree_code subcode = gimple_assign_rhs_code (stmt);
211
 
212
      expr->type = NULL_TREE;
213
 
214
      switch (get_gimple_rhs_class (subcode))
215
        {
216
        case GIMPLE_SINGLE_RHS:
217
          expr->kind = EXPR_SINGLE;
218
          expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
219
          break;
220
        case GIMPLE_UNARY_RHS:
221
          expr->kind = EXPR_UNARY;
222
          expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
223
          expr->ops.unary.op = subcode;
224
          expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
225
          break;
226
        case GIMPLE_BINARY_RHS:
227
          expr->kind = EXPR_BINARY;
228
          expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
229
          expr->ops.binary.op = subcode;
230
          expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
231
          expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
232
          break;
233
        default:
234
          gcc_unreachable ();
235
        }
236
    }
237
  else if (code == GIMPLE_COND)
238
    {
239
      expr->type = boolean_type_node;
240
      expr->kind = EXPR_BINARY;
241
      expr->ops.binary.op = gimple_cond_code (stmt);
242
      expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
243
      expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
244
    }
245
  else if (code == GIMPLE_CALL)
246
    {
247
      size_t nargs = gimple_call_num_args (stmt);
248
      size_t i;
249
 
250
      gcc_assert (gimple_call_lhs (stmt));
251
 
252
      expr->type = TREE_TYPE (gimple_call_lhs (stmt));
253
      expr->kind = EXPR_CALL;
254
      expr->ops.call.fn = gimple_call_fn (stmt);
255
 
256
      if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
257
        expr->ops.call.pure = true;
258
      else
259
        expr->ops.call.pure = false;
260
 
261
      expr->ops.call.nargs = nargs;
262
      expr->ops.call.args = (tree *) xcalloc (nargs, sizeof (tree));
263
      for (i = 0; i < nargs; i++)
264
        expr->ops.call.args[i] = gimple_call_arg (stmt, i);
265
    }
266
  else if (code == GIMPLE_SWITCH)
267
    {
268
      expr->type = TREE_TYPE (gimple_switch_index (stmt));
269
      expr->kind = EXPR_SINGLE;
270
      expr->ops.single.rhs = gimple_switch_index (stmt);
271
    }
272
  else if (code == GIMPLE_GOTO)
273
    {
274
      expr->type = TREE_TYPE (gimple_goto_dest (stmt));
275
      expr->kind = EXPR_SINGLE;
276
      expr->ops.single.rhs = gimple_goto_dest (stmt);
277
    }
278
  else
279
    gcc_unreachable ();
280
 
281
  element->lhs = lhs;
282
  element->stmt = stmt;
283
  element->hash = avail_expr_hash (element);
284
  element->stamp = element;
285
}
286
 
287
/* Given a conditional expression COND as a tree, initialize
288
   a hashable_expr expression EXPR.  The conditional must be a
289
   comparison or logical negation.  A constant or a variable is
290
   not permitted.  */
291
 
292
static void
293
initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
294
{
295
  expr->type = boolean_type_node;
296
 
297
  if (COMPARISON_CLASS_P (cond))
298
    {
299
      expr->kind = EXPR_BINARY;
300
      expr->ops.binary.op = TREE_CODE (cond);
301
      expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
302
      expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
303
    }
304
  else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
305
    {
306
      expr->kind = EXPR_UNARY;
307
      expr->ops.unary.op = TRUTH_NOT_EXPR;
308
      expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
309
    }
310
  else
311
    gcc_unreachable ();
312
}
313
 
314
/* Given a hashable_expr expression EXPR and an LHS,
315
   initialize the hash table element pointed to by ELEMENT.  */
316
 
317
static void
318
initialize_hash_element_from_expr (struct hashable_expr *expr,
319
                                   tree lhs,
320
                                   struct expr_hash_elt *element)
321
{
322
  element->expr = *expr;
323
  element->lhs = lhs;
324
  element->stmt = NULL;
325
  element->hash = avail_expr_hash (element);
326
  element->stamp = element;
327
}
328
 
329
/* Compare two hashable_expr structures for equivalence.
330
   They are considered equivalent when the the expressions
331
   they denote must necessarily be equal.  The logic is intended
332
   to follow that of operand_equal_p in fold-const.c  */
333
 
334
static bool
335
hashable_expr_equal_p (const struct hashable_expr *expr0,
336
                        const struct hashable_expr *expr1)
337
{
338
  tree type0 = expr0->type;
339
  tree type1 = expr1->type;
340
 
341
  /* If either type is NULL, there is nothing to check.  */
342
  if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
343
    return false;
344
 
345
  /* If both types don't have the same signedness, precision, and mode,
346
     then we can't consider  them equal.  */
347
  if (type0 != type1
348
      && (TREE_CODE (type0) == ERROR_MARK
349
          || TREE_CODE (type1) == ERROR_MARK
350
          || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
351
          || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
352
          || TYPE_MODE (type0) != TYPE_MODE (type1)))
353
    return false;
354
 
355
  if (expr0->kind != expr1->kind)
356
    return false;
357
 
358
  switch (expr0->kind)
359
    {
360
    case EXPR_SINGLE:
361
      return operand_equal_p (expr0->ops.single.rhs,
362
                              expr1->ops.single.rhs, 0);
363
 
364
    case EXPR_UNARY:
365
      if (expr0->ops.unary.op != expr1->ops.unary.op)
366
        return false;
367
 
368
      if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
369
           || expr0->ops.unary.op == NON_LVALUE_EXPR)
370
          && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
371
        return false;
372
 
373
      return operand_equal_p (expr0->ops.unary.opnd,
374
                              expr1->ops.unary.opnd, 0);
375
 
376
    case EXPR_BINARY:
377
      {
378
        if (expr0->ops.binary.op != expr1->ops.binary.op)
379
          return false;
380
 
381
        if (operand_equal_p (expr0->ops.binary.opnd0,
382
                             expr1->ops.binary.opnd0, 0)
383
            && operand_equal_p (expr0->ops.binary.opnd1,
384
                                expr1->ops.binary.opnd1, 0))
385
          return true;
386
 
387
        /* For commutative ops, allow the other order.  */
388
        return (commutative_tree_code (expr0->ops.binary.op)
389
                && operand_equal_p (expr0->ops.binary.opnd0,
390
                                    expr1->ops.binary.opnd1, 0)
391
                && operand_equal_p (expr0->ops.binary.opnd1,
392
                                    expr1->ops.binary.opnd0, 0));
393
      }
394
 
395
    case EXPR_CALL:
396
      {
397
        size_t i;
398
 
399
        /* If the calls are to different functions, then they
400
           clearly cannot be equal.  */
401
        if (! operand_equal_p (expr0->ops.call.fn,
402
                               expr1->ops.call.fn, 0))
403
          return false;
404
 
405
        if (! expr0->ops.call.pure)
406
          return false;
407
 
408
        if (expr0->ops.call.nargs !=  expr1->ops.call.nargs)
409
          return false;
410
 
411
        for (i = 0; i < expr0->ops.call.nargs; i++)
412
          if (! operand_equal_p (expr0->ops.call.args[i],
413
                                 expr1->ops.call.args[i], 0))
414
            return false;
415
 
416
        return true;
417
      }
418
 
419
    default:
420
      gcc_unreachable ();
421
    }
422
}
423
 
424
/* Compute a hash value for a hashable_expr value EXPR and a
425
   previously accumulated hash value VAL.  If two hashable_expr
426
   values compare equal with hashable_expr_equal_p, they must
427
   hash to the same value, given an identical value of VAL.
428
   The logic is intended to follow iterative_hash_expr in tree.c.  */
429
 
430
static hashval_t
431
iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
432
{
433
  switch (expr->kind)
434
    {
435
    case EXPR_SINGLE:
436
      val = iterative_hash_expr (expr->ops.single.rhs, val);
437
      break;
438
 
439
    case EXPR_UNARY:
440
      val = iterative_hash_object (expr->ops.unary.op, val);
441
 
442
      /* Make sure to include signedness in the hash computation.
443
         Don't hash the type, that can lead to having nodes which
444
         compare equal according to operand_equal_p, but which
445
         have different hash codes.  */
446
      if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
447
          || expr->ops.unary.op == NON_LVALUE_EXPR)
448
        val += TYPE_UNSIGNED (expr->type);
449
 
450
      val = iterative_hash_expr (expr->ops.unary.opnd, val);
451
      break;
452
 
453
    case EXPR_BINARY:
454
      val = iterative_hash_object (expr->ops.binary.op, val);
455
      if (commutative_tree_code (expr->ops.binary.op))
456
          val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
457
                                                  expr->ops.binary.opnd1, val);
458
      else
459
        {
460
          val = iterative_hash_expr (expr->ops.binary.opnd0, val);
461
          val = iterative_hash_expr (expr->ops.binary.opnd1, val);
462
        }
463
      break;
464
 
465
    case EXPR_CALL:
466
      {
467
        size_t i;
468
        enum tree_code code = CALL_EXPR;
469
 
470
        val = iterative_hash_object (code, val);
471
        val = iterative_hash_expr (expr->ops.call.fn, val);
472
        for (i = 0; i < expr->ops.call.nargs; i++)
473
          val = iterative_hash_expr (expr->ops.call.args[i], val);
474
      }
475
      break;
476
 
477
    default:
478
      gcc_unreachable ();
479
    }
480
 
481
  return val;
482
}
483
 
484
/* Print a diagnostic dump of an expression hash table entry.  */
485
 
486
static void
487
print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
488
{
489
  if (element->stmt)
490
    fprintf (stream, "STMT ");
491
  else
492
    fprintf (stream, "COND ");
493
 
494
  if (element->lhs)
495
    {
496
      print_generic_expr (stream, element->lhs, 0);
497
      fprintf (stream, " = ");
498
    }
499
 
500
  switch (element->expr.kind)
501
    {
502
      case EXPR_SINGLE:
503
        print_generic_expr (stream, element->expr.ops.single.rhs, 0);
504
        break;
505
 
506
      case EXPR_UNARY:
507
        fprintf (stream, "%s ", tree_code_name[element->expr.ops.unary.op]);
508
        print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
509
        break;
510
 
511
      case EXPR_BINARY:
512
        print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
513
        fprintf (stream, " %s ", tree_code_name[element->expr.ops.binary.op]);
514
        print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
515
        break;
516
 
517
      case EXPR_CALL:
518
        {
519
          size_t i;
520
          size_t nargs = element->expr.ops.call.nargs;
521
 
522
          print_generic_expr (stream, element->expr.ops.call.fn, 0);
523
          fprintf (stream, " (");
524
          for (i = 0; i < nargs; i++)
525
            {
526
              print_generic_expr (stream, element->expr.ops.call.args[i], 0);
527
              if (i + 1 < nargs)
528
                fprintf (stream, ", ");
529
            }
530
          fprintf (stream, ")");
531
        }
532
        break;
533
    }
534
  fprintf (stream, "\n");
535
 
536
  if (element->stmt)
537
    {
538
      fprintf (stream, "          ");
539
      print_gimple_stmt (stream, element->stmt, 0, 0);
540
    }
541
}
542
 
543
/* Delete an expr_hash_elt and reclaim its storage.  */
544
 
545
static void
546
free_expr_hash_elt (void *elt)
547
{
548
  struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
549
 
550
  if (element->expr.kind == EXPR_CALL)
551
    free (element->expr.ops.call.args);
552
 
553
  free (element);
554
}
555
 
556
/* Allocate an EDGE_INFO for edge E and attach it to E.
557
   Return the new EDGE_INFO structure.  */
558
 
559
static struct edge_info *
560
allocate_edge_info (edge e)
561
{
562
  struct edge_info *edge_info;
563
 
564
  edge_info = XCNEW (struct edge_info);
565
 
566
  e->aux = edge_info;
567
  return edge_info;
568
}
569
 
570
/* Free all EDGE_INFO structures associated with edges in the CFG.
571
   If a particular edge can be threaded, copy the redirection
572
   target from the EDGE_INFO structure into the edge's AUX field
573
   as required by code to update the CFG and SSA graph for
574
   jump threading.  */
575
 
576
static void
577
free_all_edge_infos (void)
578
{
579
  basic_block bb;
580
  edge_iterator ei;
581
  edge e;
582
 
583
  FOR_EACH_BB (bb)
584
    {
585
      FOR_EACH_EDGE (e, ei, bb->preds)
586
        {
587
         struct edge_info *edge_info = (struct edge_info *) e->aux;
588
 
589
          if (edge_info)
590
            {
591
              if (edge_info->cond_equivalences)
592
                free (edge_info->cond_equivalences);
593
              free (edge_info);
594
              e->aux = NULL;
595
            }
596
        }
597
    }
598
}
599
 
600
/* Jump threading, redundancy elimination and const/copy propagation.
601
 
602
   This pass may expose new symbols that need to be renamed into SSA.  For
603
   every new symbol exposed, its corresponding bit will be set in
604
   VARS_TO_RENAME.  */
605
 
606
static unsigned int
607
tree_ssa_dominator_optimize (void)
608
{
609
  struct dom_walk_data walk_data;
610
 
611
  memset (&opt_stats, 0, sizeof (opt_stats));
612
 
613
  /* Create our hash tables.  */
614
  avail_exprs = htab_create (1024, real_avail_expr_hash, avail_expr_eq, free_expr_hash_elt);
615
  avail_exprs_stack = VEC_alloc (expr_hash_elt_t, heap, 20);
616
  const_and_copies_stack = VEC_alloc (tree, heap, 20);
617
  need_eh_cleanup = BITMAP_ALLOC (NULL);
618
 
619
  /* Setup callbacks for the generic dominator tree walker.  */
620
  walk_data.dom_direction = CDI_DOMINATORS;
621
  walk_data.initialize_block_local_data = NULL;
622
  walk_data.before_dom_children = dom_opt_enter_block;
623
  walk_data.after_dom_children = dom_opt_leave_block;
624
  /* Right now we only attach a dummy COND_EXPR to the global data pointer.
625
     When we attach more stuff we'll need to fill this out with a real
626
     structure.  */
627
  walk_data.global_data = NULL;
628
  walk_data.block_local_data_size = 0;
629
 
630
  /* Now initialize the dominator walker.  */
631
  init_walk_dominator_tree (&walk_data);
632
 
633
  calculate_dominance_info (CDI_DOMINATORS);
634
  cfg_altered = false;
635
 
636
  /* We need to know loop structures in order to avoid destroying them
637
     in jump threading.  Note that we still can e.g. thread through loop
638
     headers to an exit edge, or through loop header to the loop body, assuming
639
     that we update the loop info.  */
640
  loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
641
 
642
  /* Initialize the value-handle array.  */
643
  threadedge_initialize_values ();
644
 
645
  /* We need accurate information regarding back edges in the CFG
646
     for jump threading; this may include back edges that are not part of
647
     a single loop.  */
648
  mark_dfs_back_edges ();
649
 
650
  /* Recursively walk the dominator tree optimizing statements.  */
651
  walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
652
 
653
  {
654
    gimple_stmt_iterator gsi;
655
    basic_block bb;
656
    FOR_EACH_BB (bb)
657
      {for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
658
          update_stmt_if_modified (gsi_stmt (gsi));
659
      }
660
  }
661
 
662
  /* If we exposed any new variables, go ahead and put them into
663
     SSA form now, before we handle jump threading.  This simplifies
664
     interactions between rewriting of _DECL nodes into SSA form
665
     and rewriting SSA_NAME nodes into SSA form after block
666
     duplication and CFG manipulation.  */
667
  update_ssa (TODO_update_ssa);
668
 
669
  free_all_edge_infos ();
670
 
671
  /* Thread jumps, creating duplicate blocks as needed.  */
672
  cfg_altered |= thread_through_all_blocks (first_pass_instance);
673
 
674
  if (cfg_altered)
675
    free_dominance_info (CDI_DOMINATORS);
676
 
677
  /* Removal of statements may make some EH edges dead.  Purge
678
     such edges from the CFG as needed.  */
679
  if (!bitmap_empty_p (need_eh_cleanup))
680
    {
681
      unsigned i;
682
      bitmap_iterator bi;
683
 
684
      /* Jump threading may have created forwarder blocks from blocks
685
         needing EH cleanup; the new successor of these blocks, which
686
         has inherited from the original block, needs the cleanup.  */
687
      EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
688
        {
689
          basic_block bb = BASIC_BLOCK (i);
690
          if (single_succ_p (bb) == 1
691
              && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
692
            {
693
              bitmap_clear_bit (need_eh_cleanup, i);
694
              bitmap_set_bit (need_eh_cleanup, single_succ (bb)->index);
695
            }
696
        }
697
 
698
      gimple_purge_all_dead_eh_edges (need_eh_cleanup);
699
      bitmap_zero (need_eh_cleanup);
700
    }
701
 
702
  statistics_counter_event (cfun, "Redundant expressions eliminated",
703
                            opt_stats.num_re);
704
  statistics_counter_event (cfun, "Constants propagated",
705
                            opt_stats.num_const_prop);
706
  statistics_counter_event (cfun, "Copies propagated",
707
                            opt_stats.num_copy_prop);
708
 
709
  /* Debugging dumps.  */
710
  if (dump_file && (dump_flags & TDF_STATS))
711
    dump_dominator_optimization_stats (dump_file);
712
 
713
  loop_optimizer_finalize ();
714
 
715
  /* Delete our main hashtable.  */
716
  htab_delete (avail_exprs);
717
 
718
  /* And finalize the dominator walker.  */
719
  fini_walk_dominator_tree (&walk_data);
720
 
721
  /* Free asserted bitmaps and stacks.  */
722
  BITMAP_FREE (need_eh_cleanup);
723
 
724
  VEC_free (expr_hash_elt_t, heap, avail_exprs_stack);
725
  VEC_free (tree, heap, const_and_copies_stack);
726
 
727
  /* Free the value-handle array.  */
728
  threadedge_finalize_values ();
729
  ssa_name_values = NULL;
730
 
731
  return 0;
732
}
733
 
734
static bool
735
gate_dominator (void)
736
{
737
  return flag_tree_dom != 0;
738
}
739
 
740
struct gimple_opt_pass pass_dominator =
741
{
742
 {
743
  GIMPLE_PASS,
744
  "dom",                                /* name */
745
  gate_dominator,                       /* gate */
746
  tree_ssa_dominator_optimize,          /* execute */
747
  NULL,                                 /* sub */
748
  NULL,                                 /* next */
749
  0,                                     /* static_pass_number */
750
  TV_TREE_SSA_DOMINATOR_OPTS,           /* tv_id */
751
  PROP_cfg | PROP_ssa,                  /* properties_required */
752
  0,                                     /* properties_provided */
753
  0,                                     /* properties_destroyed */
754
  0,                                     /* todo_flags_start */
755
  TODO_dump_func
756
    | TODO_update_ssa
757
    | TODO_cleanup_cfg
758
    | TODO_verify_ssa                   /* todo_flags_finish */
759
 }
760
};
761
 
762
 
763
/* Given a conditional statement CONDSTMT, convert the
764
   condition to a canonical form.  */
765
 
766
static void
767
canonicalize_comparison (gimple condstmt)
768
{
769
  tree op0;
770
  tree op1;
771
  enum tree_code code;
772
 
773
  gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
774
 
775
  op0 = gimple_cond_lhs (condstmt);
776
  op1 = gimple_cond_rhs (condstmt);
777
 
778
  code = gimple_cond_code (condstmt);
779
 
780
  /* If it would be profitable to swap the operands, then do so to
781
     canonicalize the statement, enabling better optimization.
782
 
783
     By placing canonicalization of such expressions here we
784
     transparently keep statements in canonical form, even
785
     when the statement is modified.  */
786
  if (tree_swap_operands_p (op0, op1, false))
787
    {
788
      /* For relationals we need to swap the operands
789
         and change the code.  */
790
      if (code == LT_EXPR
791
          || code == GT_EXPR
792
          || code == LE_EXPR
793
          || code == GE_EXPR)
794
        {
795
          code = swap_tree_comparison (code);
796
 
797
          gimple_cond_set_code (condstmt, code);
798
          gimple_cond_set_lhs (condstmt, op1);
799
          gimple_cond_set_rhs (condstmt, op0);
800
 
801
          update_stmt (condstmt);
802
        }
803
    }
804
}
805
 
806
/* Initialize local stacks for this optimizer and record equivalences
807
   upon entry to BB.  Equivalences can come from the edge traversed to
808
   reach BB or they may come from PHI nodes at the start of BB.  */
809
 
810
/* Remove all the expressions in LOCALS from TABLE, stopping when there are
811
   LIMIT entries left in LOCALs.  */
812
 
813
static void
814
remove_local_expressions_from_table (void)
815
{
816
  /* Remove all the expressions made available in this block.  */
817
  while (VEC_length (expr_hash_elt_t, avail_exprs_stack) > 0)
818
    {
819
      expr_hash_elt_t victim = VEC_pop (expr_hash_elt_t, avail_exprs_stack);
820
      void **slot;
821
 
822
      if (victim == NULL)
823
        break;
824
 
825
      /* This must precede the actual removal from the hash table,
826
         as ELEMENT and the table entry may share a call argument
827
         vector which will be freed during removal.  */
828
      if (dump_file && (dump_flags & TDF_DETAILS))
829
        {
830
          fprintf (dump_file, "<<<< ");
831
          print_expr_hash_elt (dump_file, victim);
832
        }
833
 
834
      slot = htab_find_slot_with_hash (avail_exprs,
835
                                       victim, victim->hash, NO_INSERT);
836
      gcc_assert (slot && *slot == (void *) victim);
837
      htab_clear_slot (avail_exprs, slot);
838
    }
839
}
840
 
841
/* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
842
   CONST_AND_COPIES to its original state, stopping when we hit a
843
   NULL marker.  */
844
 
845
static void
846
restore_vars_to_original_value (void)
847
{
848
  while (VEC_length (tree, const_and_copies_stack) > 0)
849
    {
850
      tree prev_value, dest;
851
 
852
      dest = VEC_pop (tree, const_and_copies_stack);
853
 
854
      if (dest == NULL)
855
        break;
856
 
857
      if (dump_file && (dump_flags & TDF_DETAILS))
858
        {
859
          fprintf (dump_file, "<<<< COPY ");
860
          print_generic_expr (dump_file, dest, 0);
861
          fprintf (dump_file, " = ");
862
          print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
863
          fprintf (dump_file, "\n");
864
        }
865
 
866
      prev_value = VEC_pop (tree, const_and_copies_stack);
867
      set_ssa_name_value (dest, prev_value);
868
    }
869
}
870
 
871
/* A trivial wrapper so that we can present the generic jump
872
   threading code with a simple API for simplifying statements.  */
873
static tree
874
simplify_stmt_for_jump_threading (gimple stmt,
875
                                  gimple within_stmt ATTRIBUTE_UNUSED)
876
{
877
  return lookup_avail_expr (stmt, false);
878
}
879
 
880
/* Wrapper for common code to attempt to thread an edge.  For example,
881
   it handles lazily building the dummy condition and the bookkeeping
882
   when jump threading is successful.  */
883
 
884
static void
885
dom_thread_across_edge (struct dom_walk_data *walk_data, edge e)
886
{
887
  if (! walk_data->global_data)
888
  {
889
    gimple dummy_cond =
890
        gimple_build_cond (NE_EXPR,
891
                           integer_zero_node, integer_zero_node,
892
                           NULL, NULL);
893
    walk_data->global_data = dummy_cond;
894
  }
895
 
896
  thread_across_edge ((gimple) walk_data->global_data, e, false,
897
                      &const_and_copies_stack,
898
                      simplify_stmt_for_jump_threading);
899
}
900
 
901
/* PHI nodes can create equivalences too.
902
 
903
   Ignoring any alternatives which are the same as the result, if
904
   all the alternatives are equal, then the PHI node creates an
905
   equivalence.  */
906
 
907
static void
908
record_equivalences_from_phis (basic_block bb)
909
{
910
  gimple_stmt_iterator gsi;
911
 
912
  for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
913
    {
914
      gimple phi = gsi_stmt (gsi);
915
 
916
      tree lhs = gimple_phi_result (phi);
917
      tree rhs = NULL;
918
      size_t i;
919
 
920
      for (i = 0; i < gimple_phi_num_args (phi); i++)
921
        {
922
          tree t = gimple_phi_arg_def (phi, i);
923
 
924
          /* Ignore alternatives which are the same as our LHS.  Since
925
             LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
926
             can simply compare pointers.  */
927
          if (lhs == t)
928
            continue;
929
 
930
          /* If we have not processed an alternative yet, then set
931
             RHS to this alternative.  */
932
          if (rhs == NULL)
933
            rhs = t;
934
          /* If we have processed an alternative (stored in RHS), then
935
             see if it is equal to this one.  If it isn't, then stop
936
             the search.  */
937
          else if (! operand_equal_for_phi_arg_p (rhs, t))
938
            break;
939
        }
940
 
941
      /* If we had no interesting alternatives, then all the RHS alternatives
942
         must have been the same as LHS.  */
943
      if (!rhs)
944
        rhs = lhs;
945
 
946
      /* If we managed to iterate through each PHI alternative without
947
         breaking out of the loop, then we have a PHI which may create
948
         a useful equivalence.  We do not need to record unwind data for
949
         this, since this is a true assignment and not an equivalence
950
         inferred from a comparison.  All uses of this ssa name are dominated
951
         by this assignment, so unwinding just costs time and space.  */
952
      if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
953
        set_ssa_name_value (lhs, rhs);
954
    }
955
}
956
 
957
/* Ignoring loop backedges, if BB has precisely one incoming edge then
958
   return that edge.  Otherwise return NULL.  */
959
static edge
960
single_incoming_edge_ignoring_loop_edges (basic_block bb)
961
{
962
  edge retval = NULL;
963
  edge e;
964
  edge_iterator ei;
965
 
966
  FOR_EACH_EDGE (e, ei, bb->preds)
967
    {
968
      /* A loop back edge can be identified by the destination of
969
         the edge dominating the source of the edge.  */
970
      if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
971
        continue;
972
 
973
      /* If we have already seen a non-loop edge, then we must have
974
         multiple incoming non-loop edges and thus we return NULL.  */
975
      if (retval)
976
        return NULL;
977
 
978
      /* This is the first non-loop incoming edge we have found.  Record
979
         it.  */
980
      retval = e;
981
    }
982
 
983
  return retval;
984
}
985
 
986
/* Record any equivalences created by the incoming edge to BB.  If BB
987
   has more than one incoming edge, then no equivalence is created.  */
988
 
989
static void
990
record_equivalences_from_incoming_edge (basic_block bb)
991
{
992
  edge e;
993
  basic_block parent;
994
  struct edge_info *edge_info;
995
 
996
  /* If our parent block ended with a control statement, then we may be
997
     able to record some equivalences based on which outgoing edge from
998
     the parent was followed.  */
999
  parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1000
 
1001
  e = single_incoming_edge_ignoring_loop_edges (bb);
1002
 
1003
  /* If we had a single incoming edge from our parent block, then enter
1004
     any data associated with the edge into our tables.  */
1005
  if (e && e->src == parent)
1006
    {
1007
      unsigned int i;
1008
 
1009
      edge_info = (struct edge_info *) e->aux;
1010
 
1011
      if (edge_info)
1012
        {
1013
          tree lhs = edge_info->lhs;
1014
          tree rhs = edge_info->rhs;
1015
          struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences;
1016
 
1017
          if (lhs)
1018
            record_equality (lhs, rhs);
1019
 
1020
          if (cond_equivalences)
1021
            for (i = 0; i < edge_info->max_cond_equivalences; i++)
1022
              record_cond (&cond_equivalences[i]);
1023
        }
1024
    }
1025
}
1026
 
1027
/* Dump SSA statistics on FILE.  */
1028
 
1029
void
1030
dump_dominator_optimization_stats (FILE *file)
1031
{
1032
  fprintf (file, "Total number of statements:                   %6ld\n\n",
1033
           opt_stats.num_stmts);
1034
  fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1035
           opt_stats.num_exprs_considered);
1036
 
1037
  fprintf (file, "\nHash table statistics:\n");
1038
 
1039
  fprintf (file, "    avail_exprs: ");
1040
  htab_statistics (file, avail_exprs);
1041
}
1042
 
1043
 
1044
/* Dump SSA statistics on stderr.  */
1045
 
1046
void
1047
debug_dominator_optimization_stats (void)
1048
{
1049
  dump_dominator_optimization_stats (stderr);
1050
}
1051
 
1052
 
1053
/* Dump statistics for the hash table HTAB.  */
1054
 
1055
static void
1056
htab_statistics (FILE *file, htab_t htab)
1057
{
1058
  fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1059
           (long) htab_size (htab),
1060
           (long) htab_elements (htab),
1061
           htab_collisions (htab));
1062
}
1063
 
1064
 
1065
/* Enter condition equivalence into the expression hash table.
1066
   This indicates that a conditional expression has a known
1067
   boolean value.  */
1068
 
1069
static void
1070
record_cond (struct cond_equivalence *p)
1071
{
1072
  struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1073
  void **slot;
1074
 
1075
  initialize_hash_element_from_expr (&p->cond, p->value, element);
1076
 
1077
  slot = htab_find_slot_with_hash (avail_exprs, (void *)element,
1078
                                   element->hash, INSERT);
1079
  if (*slot == NULL)
1080
    {
1081
      *slot = (void *) element;
1082
 
1083
      if (dump_file && (dump_flags & TDF_DETAILS))
1084
        {
1085
          fprintf (dump_file, "1>>> ");
1086
          print_expr_hash_elt (dump_file, element);
1087
        }
1088
 
1089
      VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element);
1090
    }
1091
  else
1092
    free (element);
1093
}
1094
 
1095
/* Build a cond_equivalence record indicating that the comparison
1096
   CODE holds between operands OP0 and OP1.  */
1097
 
1098
static void
1099
build_and_record_new_cond (enum tree_code code,
1100
                           tree op0, tree op1,
1101
                           struct cond_equivalence *p)
1102
{
1103
  struct hashable_expr *cond = &p->cond;
1104
 
1105
  gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1106
 
1107
  cond->type = boolean_type_node;
1108
  cond->kind = EXPR_BINARY;
1109
  cond->ops.binary.op = code;
1110
  cond->ops.binary.opnd0 = op0;
1111
  cond->ops.binary.opnd1 = op1;
1112
 
1113
  p->value = boolean_true_node;
1114
}
1115
 
1116
/* Record that COND is true and INVERTED is false into the edge information
1117
   structure.  Also record that any conditions dominated by COND are true
1118
   as well.
1119
 
1120
   For example, if a < b is true, then a <= b must also be true.  */
1121
 
1122
static void
1123
record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1124
{
1125
  tree op0, op1;
1126
 
1127
  if (!COMPARISON_CLASS_P (cond))
1128
    return;
1129
 
1130
  op0 = TREE_OPERAND (cond, 0);
1131
  op1 = TREE_OPERAND (cond, 1);
1132
 
1133
  switch (TREE_CODE (cond))
1134
    {
1135
    case LT_EXPR:
1136
    case GT_EXPR:
1137
      if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1138
        {
1139
          edge_info->max_cond_equivalences = 6;
1140
          edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 6);
1141
          build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1142
                                     &edge_info->cond_equivalences[4]);
1143
          build_and_record_new_cond (LTGT_EXPR, op0, op1,
1144
                                     &edge_info->cond_equivalences[5]);
1145
        }
1146
      else
1147
        {
1148
          edge_info->max_cond_equivalences = 4;
1149
          edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1150
        }
1151
 
1152
      build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1153
                                  ? LE_EXPR : GE_EXPR),
1154
                                 op0, op1, &edge_info->cond_equivalences[2]);
1155
      build_and_record_new_cond (NE_EXPR, op0, op1,
1156
                                 &edge_info->cond_equivalences[3]);
1157
      break;
1158
 
1159
    case GE_EXPR:
1160
    case LE_EXPR:
1161
      if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1162
        {
1163
          edge_info->max_cond_equivalences = 3;
1164
          edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 3);
1165
          build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1166
                                     &edge_info->cond_equivalences[2]);
1167
        }
1168
      else
1169
        {
1170
          edge_info->max_cond_equivalences = 2;
1171
          edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 2);
1172
        }
1173
      break;
1174
 
1175
    case EQ_EXPR:
1176
      if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1177
        {
1178
          edge_info->max_cond_equivalences = 5;
1179
          edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 5);
1180
          build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1181
                                     &edge_info->cond_equivalences[4]);
1182
        }
1183
      else
1184
        {
1185
          edge_info->max_cond_equivalences = 4;
1186
          edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1187
        }
1188
      build_and_record_new_cond (LE_EXPR, op0, op1,
1189
                                 &edge_info->cond_equivalences[2]);
1190
      build_and_record_new_cond (GE_EXPR, op0, op1,
1191
                                 &edge_info->cond_equivalences[3]);
1192
      break;
1193
 
1194
    case UNORDERED_EXPR:
1195
      edge_info->max_cond_equivalences = 8;
1196
      edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 8);
1197
      build_and_record_new_cond (NE_EXPR, op0, op1,
1198
                                 &edge_info->cond_equivalences[2]);
1199
      build_and_record_new_cond (UNLE_EXPR, op0, op1,
1200
                                 &edge_info->cond_equivalences[3]);
1201
      build_and_record_new_cond (UNGE_EXPR, op0, op1,
1202
                                 &edge_info->cond_equivalences[4]);
1203
      build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1204
                                 &edge_info->cond_equivalences[5]);
1205
      build_and_record_new_cond (UNLT_EXPR, op0, op1,
1206
                                 &edge_info->cond_equivalences[6]);
1207
      build_and_record_new_cond (UNGT_EXPR, op0, op1,
1208
                                 &edge_info->cond_equivalences[7]);
1209
      break;
1210
 
1211
    case UNLT_EXPR:
1212
    case UNGT_EXPR:
1213
      edge_info->max_cond_equivalences = 4;
1214
      edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1215
      build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1216
                                  ? UNLE_EXPR : UNGE_EXPR),
1217
                                 op0, op1, &edge_info->cond_equivalences[2]);
1218
      build_and_record_new_cond (NE_EXPR, op0, op1,
1219
                                 &edge_info->cond_equivalences[3]);
1220
      break;
1221
 
1222
    case UNEQ_EXPR:
1223
      edge_info->max_cond_equivalences = 4;
1224
      edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1225
      build_and_record_new_cond (UNLE_EXPR, op0, op1,
1226
                                 &edge_info->cond_equivalences[2]);
1227
      build_and_record_new_cond (UNGE_EXPR, op0, op1,
1228
                                 &edge_info->cond_equivalences[3]);
1229
      break;
1230
 
1231
    case LTGT_EXPR:
1232
      edge_info->max_cond_equivalences = 4;
1233
      edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1234
      build_and_record_new_cond (NE_EXPR, op0, op1,
1235
                                 &edge_info->cond_equivalences[2]);
1236
      build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1237
                                 &edge_info->cond_equivalences[3]);
1238
      break;
1239
 
1240
    default:
1241
      edge_info->max_cond_equivalences = 2;
1242
      edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 2);
1243
      break;
1244
    }
1245
 
1246
  /* Now store the original true and false conditions into the first
1247
     two slots.  */
1248
  initialize_expr_from_cond (cond, &edge_info->cond_equivalences[0].cond);
1249
  edge_info->cond_equivalences[0].value = boolean_true_node;
1250
 
1251
  /* It is possible for INVERTED to be the negation of a comparison,
1252
     and not a valid RHS or GIMPLE_COND condition.  This happens because
1253
     invert_truthvalue may return such an expression when asked to invert
1254
     a floating-point comparison.  These comparisons are not assumed to
1255
     obey the trichotomy law.  */
1256
  initialize_expr_from_cond (inverted, &edge_info->cond_equivalences[1].cond);
1257
  edge_info->cond_equivalences[1].value = boolean_false_node;
1258
}
1259
 
1260
/* A helper function for record_const_or_copy and record_equality.
1261
   Do the work of recording the value and undo info.  */
1262
 
1263
static void
1264
record_const_or_copy_1 (tree x, tree y, tree prev_x)
1265
{
1266
  set_ssa_name_value (x, y);
1267
 
1268
  if (dump_file && (dump_flags & TDF_DETAILS))
1269
    {
1270
      fprintf (dump_file, "0>>> COPY ");
1271
      print_generic_expr (dump_file, x, 0);
1272
      fprintf (dump_file, " = ");
1273
      print_generic_expr (dump_file, y, 0);
1274
      fprintf (dump_file, "\n");
1275
    }
1276
 
1277
  VEC_reserve (tree, heap, const_and_copies_stack, 2);
1278
  VEC_quick_push (tree, const_and_copies_stack, prev_x);
1279
  VEC_quick_push (tree, const_and_copies_stack, x);
1280
}
1281
 
1282
/* Return the loop depth of the basic block of the defining statement of X.
1283
   This number should not be treated as absolutely correct because the loop
1284
   information may not be completely up-to-date when dom runs.  However, it
1285
   will be relatively correct, and as more passes are taught to keep loop info
1286
   up to date, the result will become more and more accurate.  */
1287
 
1288
int
1289
loop_depth_of_name (tree x)
1290
{
1291
  gimple defstmt;
1292
  basic_block defbb;
1293
 
1294
  /* If it's not an SSA_NAME, we have no clue where the definition is.  */
1295
  if (TREE_CODE (x) != SSA_NAME)
1296
    return 0;
1297
 
1298
  /* Otherwise return the loop depth of the defining statement's bb.
1299
     Note that there may not actually be a bb for this statement, if the
1300
     ssa_name is live on entry.  */
1301
  defstmt = SSA_NAME_DEF_STMT (x);
1302
  defbb = gimple_bb (defstmt);
1303
  if (!defbb)
1304
    return 0;
1305
 
1306
  return defbb->loop_depth;
1307
}
1308
 
1309
/* Record that X is equal to Y in const_and_copies.  Record undo
1310
   information in the block-local vector.  */
1311
 
1312
static void
1313
record_const_or_copy (tree x, tree y)
1314
{
1315
  tree prev_x = SSA_NAME_VALUE (x);
1316
 
1317
  gcc_assert (TREE_CODE (x) == SSA_NAME);
1318
 
1319
  if (TREE_CODE (y) == SSA_NAME)
1320
    {
1321
      tree tmp = SSA_NAME_VALUE (y);
1322
      if (tmp)
1323
        y = tmp;
1324
    }
1325
 
1326
  record_const_or_copy_1 (x, y, prev_x);
1327
}
1328
 
1329
/* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1330
   This constrains the cases in which we may treat this as assignment.  */
1331
 
1332
static void
1333
record_equality (tree x, tree y)
1334
{
1335
  tree prev_x = NULL, prev_y = NULL;
1336
 
1337
  if (TREE_CODE (x) == SSA_NAME)
1338
    prev_x = SSA_NAME_VALUE (x);
1339
  if (TREE_CODE (y) == SSA_NAME)
1340
    prev_y = SSA_NAME_VALUE (y);
1341
 
1342
  /* If one of the previous values is invariant, or invariant in more loops
1343
     (by depth), then use that.
1344
     Otherwise it doesn't matter which value we choose, just so
1345
     long as we canonicalize on one value.  */
1346
  if (is_gimple_min_invariant (y))
1347
    ;
1348
  else if (is_gimple_min_invariant (x)
1349
           || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1350
    prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1351
  else if (prev_x && is_gimple_min_invariant (prev_x))
1352
    x = y, y = prev_x, prev_x = prev_y;
1353
  else if (prev_y)
1354
    y = prev_y;
1355
 
1356
  /* After the swapping, we must have one SSA_NAME.  */
1357
  if (TREE_CODE (x) != SSA_NAME)
1358
    return;
1359
 
1360
  /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1361
     variable compared against zero.  If we're honoring signed zeros,
1362
     then we cannot record this value unless we know that the value is
1363
     nonzero.  */
1364
  if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1365
      && (TREE_CODE (y) != REAL_CST
1366
          || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1367
    return;
1368
 
1369
  record_const_or_copy_1 (x, y, prev_x);
1370
}
1371
 
1372
/* Returns true when STMT is a simple iv increment.  It detects the
1373
   following situation:
1374
 
1375
   i_1 = phi (..., i_2)
1376
   i_2 = i_1 +/- ...  */
1377
 
1378
static bool
1379
simple_iv_increment_p (gimple stmt)
1380
{
1381
  tree lhs, preinc;
1382
  gimple phi;
1383
  size_t i;
1384
 
1385
  if (gimple_code (stmt) != GIMPLE_ASSIGN)
1386
    return false;
1387
 
1388
  lhs = gimple_assign_lhs (stmt);
1389
  if (TREE_CODE (lhs) != SSA_NAME)
1390
    return false;
1391
 
1392
  if (gimple_assign_rhs_code (stmt) != PLUS_EXPR
1393
      && gimple_assign_rhs_code (stmt) != MINUS_EXPR)
1394
    return false;
1395
 
1396
  preinc = gimple_assign_rhs1 (stmt);
1397
 
1398
  if (TREE_CODE (preinc) != SSA_NAME)
1399
    return false;
1400
 
1401
  phi = SSA_NAME_DEF_STMT (preinc);
1402
  if (gimple_code (phi) != GIMPLE_PHI)
1403
    return false;
1404
 
1405
  for (i = 0; i < gimple_phi_num_args (phi); i++)
1406
    if (gimple_phi_arg_def (phi, i) == lhs)
1407
      return true;
1408
 
1409
  return false;
1410
}
1411
 
1412
/* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1413
   known value for that SSA_NAME (or NULL if no value is known).
1414
 
1415
   Propagate values from CONST_AND_COPIES into the PHI nodes of the
1416
   successors of BB.  */
1417
 
1418
static void
1419
cprop_into_successor_phis (basic_block bb)
1420
{
1421
  edge e;
1422
  edge_iterator ei;
1423
 
1424
  FOR_EACH_EDGE (e, ei, bb->succs)
1425
    {
1426
      int indx;
1427
      gimple_stmt_iterator gsi;
1428
 
1429
      /* If this is an abnormal edge, then we do not want to copy propagate
1430
         into the PHI alternative associated with this edge.  */
1431
      if (e->flags & EDGE_ABNORMAL)
1432
        continue;
1433
 
1434
      gsi = gsi_start_phis (e->dest);
1435
      if (gsi_end_p (gsi))
1436
        continue;
1437
 
1438
      indx = e->dest_idx;
1439
      for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1440
        {
1441
          tree new_val;
1442
          use_operand_p orig_p;
1443
          tree orig_val;
1444
          gimple phi = gsi_stmt (gsi);
1445
 
1446
          /* The alternative may be associated with a constant, so verify
1447
             it is an SSA_NAME before doing anything with it.  */
1448
          orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1449
          orig_val = get_use_from_ptr (orig_p);
1450
          if (TREE_CODE (orig_val) != SSA_NAME)
1451
            continue;
1452
 
1453
          /* If we have *ORIG_P in our constant/copy table, then replace
1454
             ORIG_P with its value in our constant/copy table.  */
1455
          new_val = SSA_NAME_VALUE (orig_val);
1456
          if (new_val
1457
              && new_val != orig_val
1458
              && (TREE_CODE (new_val) == SSA_NAME
1459
                  || is_gimple_min_invariant (new_val))
1460
              && may_propagate_copy (orig_val, new_val))
1461
            propagate_value (orig_p, new_val);
1462
        }
1463
    }
1464
}
1465
 
1466
/* We have finished optimizing BB, record any information implied by
1467
   taking a specific outgoing edge from BB.  */
1468
 
1469
static void
1470
record_edge_info (basic_block bb)
1471
{
1472
  gimple_stmt_iterator gsi = gsi_last_bb (bb);
1473
  struct edge_info *edge_info;
1474
 
1475
  if (! gsi_end_p (gsi))
1476
    {
1477
      gimple stmt = gsi_stmt (gsi);
1478
      location_t loc = gimple_location (stmt);
1479
 
1480
      if (gimple_code (stmt) == GIMPLE_SWITCH)
1481
        {
1482
          tree index = gimple_switch_index (stmt);
1483
 
1484
          if (TREE_CODE (index) == SSA_NAME)
1485
            {
1486
              int i;
1487
              int n_labels = gimple_switch_num_labels (stmt);
1488
              tree *info = XCNEWVEC (tree, last_basic_block);
1489
              edge e;
1490
              edge_iterator ei;
1491
 
1492
              for (i = 0; i < n_labels; i++)
1493
                {
1494
                  tree label = gimple_switch_label (stmt, i);
1495
                  basic_block target_bb = label_to_block (CASE_LABEL (label));
1496
                  if (CASE_HIGH (label)
1497
                      || !CASE_LOW (label)
1498
                      || info[target_bb->index])
1499
                    info[target_bb->index] = error_mark_node;
1500
                  else
1501
                    info[target_bb->index] = label;
1502
                }
1503
 
1504
              FOR_EACH_EDGE (e, ei, bb->succs)
1505
                {
1506
                  basic_block target_bb = e->dest;
1507
                  tree label = info[target_bb->index];
1508
 
1509
                  if (label != NULL && label != error_mark_node)
1510
                    {
1511
                      tree x = fold_convert_loc (loc, TREE_TYPE (index),
1512
                                                 CASE_LOW (label));
1513
                      edge_info = allocate_edge_info (e);
1514
                      edge_info->lhs = index;
1515
                      edge_info->rhs = x;
1516
                    }
1517
                }
1518
              free (info);
1519
            }
1520
        }
1521
 
1522
      /* A COND_EXPR may create equivalences too.  */
1523
      if (gimple_code (stmt) == GIMPLE_COND)
1524
        {
1525
          edge true_edge;
1526
          edge false_edge;
1527
 
1528
          tree op0 = gimple_cond_lhs (stmt);
1529
          tree op1 = gimple_cond_rhs (stmt);
1530
          enum tree_code code = gimple_cond_code (stmt);
1531
 
1532
          extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1533
 
1534
          /* Special case comparing booleans against a constant as we
1535
             know the value of OP0 on both arms of the branch.  i.e., we
1536
             can record an equivalence for OP0 rather than COND.  */
1537
          if ((code == EQ_EXPR || code == NE_EXPR)
1538
              && TREE_CODE (op0) == SSA_NAME
1539
              && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1540
              && is_gimple_min_invariant (op1))
1541
            {
1542
              if (code == EQ_EXPR)
1543
                {
1544
                  edge_info = allocate_edge_info (true_edge);
1545
                  edge_info->lhs = op0;
1546
                  edge_info->rhs = (integer_zerop (op1)
1547
                                    ? boolean_false_node
1548
                                    : boolean_true_node);
1549
 
1550
                  edge_info = allocate_edge_info (false_edge);
1551
                  edge_info->lhs = op0;
1552
                  edge_info->rhs = (integer_zerop (op1)
1553
                                    ? boolean_true_node
1554
                                    : boolean_false_node);
1555
                }
1556
              else
1557
                {
1558
                  edge_info = allocate_edge_info (true_edge);
1559
                  edge_info->lhs = op0;
1560
                  edge_info->rhs = (integer_zerop (op1)
1561
                                    ? boolean_true_node
1562
                                    : boolean_false_node);
1563
 
1564
                  edge_info = allocate_edge_info (false_edge);
1565
                  edge_info->lhs = op0;
1566
                  edge_info->rhs = (integer_zerop (op1)
1567
                                    ? boolean_false_node
1568
                                    : boolean_true_node);
1569
                }
1570
            }
1571
          else if (is_gimple_min_invariant (op0)
1572
                   && (TREE_CODE (op1) == SSA_NAME
1573
                       || is_gimple_min_invariant (op1)))
1574
            {
1575
              tree cond = build2 (code, boolean_type_node, op0, op1);
1576
              tree inverted = invert_truthvalue_loc (loc, cond);
1577
              struct edge_info *edge_info;
1578
 
1579
              edge_info = allocate_edge_info (true_edge);
1580
              record_conditions (edge_info, cond, inverted);
1581
 
1582
              if (code == EQ_EXPR)
1583
                {
1584
                  edge_info->lhs = op1;
1585
                  edge_info->rhs = op0;
1586
                }
1587
 
1588
              edge_info = allocate_edge_info (false_edge);
1589
              record_conditions (edge_info, inverted, cond);
1590
 
1591
              if (TREE_CODE (inverted) == EQ_EXPR)
1592
                {
1593
                  edge_info->lhs = op1;
1594
                  edge_info->rhs = op0;
1595
                }
1596
            }
1597
 
1598
          else if (TREE_CODE (op0) == SSA_NAME
1599
                   && (is_gimple_min_invariant (op1)
1600
                       || TREE_CODE (op1) == SSA_NAME))
1601
            {
1602
              tree cond = build2 (code, boolean_type_node, op0, op1);
1603
              tree inverted = invert_truthvalue_loc (loc, cond);
1604
              struct edge_info *edge_info;
1605
 
1606
              edge_info = allocate_edge_info (true_edge);
1607
              record_conditions (edge_info, cond, inverted);
1608
 
1609
              if (code == EQ_EXPR)
1610
                {
1611
                  edge_info->lhs = op0;
1612
                  edge_info->rhs = op1;
1613
                }
1614
 
1615
              edge_info = allocate_edge_info (false_edge);
1616
              record_conditions (edge_info, inverted, cond);
1617
 
1618
              if (TREE_CODE (inverted) == EQ_EXPR)
1619
                {
1620
                  edge_info->lhs = op0;
1621
                  edge_info->rhs = op1;
1622
                }
1623
            }
1624
        }
1625
 
1626
      /* ??? TRUTH_NOT_EXPR can create an equivalence too.  */
1627
    }
1628
}
1629
 
1630
static void
1631
dom_opt_enter_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
1632
                     basic_block bb)
1633
{
1634
  gimple_stmt_iterator gsi;
1635
 
1636
  if (dump_file && (dump_flags & TDF_DETAILS))
1637
    fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1638
 
1639
  /* Push a marker on the stacks of local information so that we know how
1640
     far to unwind when we finalize this block.  */
1641
  VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1642
  VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1643
 
1644
  record_equivalences_from_incoming_edge (bb);
1645
 
1646
  /* PHI nodes can create equivalences too.  */
1647
  record_equivalences_from_phis (bb);
1648
 
1649
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1650
    optimize_stmt (bb, gsi);
1651
 
1652
  /* Now prepare to process dominated blocks.  */
1653
  record_edge_info (bb);
1654
  cprop_into_successor_phis (bb);
1655
}
1656
 
1657
/* We have finished processing the dominator children of BB, perform
1658
   any finalization actions in preparation for leaving this node in
1659
   the dominator tree.  */
1660
 
1661
static void
1662
dom_opt_leave_block (struct dom_walk_data *walk_data, basic_block bb)
1663
{
1664
  gimple last;
1665
 
1666
  /* If we have an outgoing edge to a block with multiple incoming and
1667
     outgoing edges, then we may be able to thread the edge, i.e., we
1668
     may be able to statically determine which of the outgoing edges
1669
     will be traversed when the incoming edge from BB is traversed.  */
1670
  if (single_succ_p (bb)
1671
      && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1672
      && potentially_threadable_block (single_succ (bb)))
1673
    {
1674
      dom_thread_across_edge (walk_data, single_succ_edge (bb));
1675
    }
1676
  else if ((last = last_stmt (bb))
1677
           && gimple_code (last) == GIMPLE_COND
1678
           && EDGE_COUNT (bb->succs) == 2
1679
           && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1680
           && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1681
    {
1682
      edge true_edge, false_edge;
1683
 
1684
      extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1685
 
1686
      /* Only try to thread the edge if it reaches a target block with
1687
         more than one predecessor and more than one successor.  */
1688
      if (potentially_threadable_block (true_edge->dest))
1689
        {
1690
          struct edge_info *edge_info;
1691
          unsigned int i;
1692
 
1693
          /* Push a marker onto the available expression stack so that we
1694
             unwind any expressions related to the TRUE arm before processing
1695
             the false arm below.  */
1696
          VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1697
          VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1698
 
1699
          edge_info = (struct edge_info *) true_edge->aux;
1700
 
1701
          /* If we have info associated with this edge, record it into
1702
             our equivalence tables.  */
1703
          if (edge_info)
1704
            {
1705
              struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences;
1706
              tree lhs = edge_info->lhs;
1707
              tree rhs = edge_info->rhs;
1708
 
1709
              /* If we have a simple NAME = VALUE equivalence, record it.  */
1710
              if (lhs && TREE_CODE (lhs) == SSA_NAME)
1711
                record_const_or_copy (lhs, rhs);
1712
 
1713
              /* If we have 0 = COND or 1 = COND equivalences, record them
1714
                 into our expression hash tables.  */
1715
              if (cond_equivalences)
1716
                for (i = 0; i < edge_info->max_cond_equivalences; i++)
1717
                  record_cond (&cond_equivalences[i]);
1718
            }
1719
 
1720
          dom_thread_across_edge (walk_data, true_edge);
1721
 
1722
          /* And restore the various tables to their state before
1723
             we threaded this edge.  */
1724
          remove_local_expressions_from_table ();
1725
        }
1726
 
1727
      /* Similarly for the ELSE arm.  */
1728
      if (potentially_threadable_block (false_edge->dest))
1729
        {
1730
          struct edge_info *edge_info;
1731
          unsigned int i;
1732
 
1733
          VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1734
          edge_info = (struct edge_info *) false_edge->aux;
1735
 
1736
          /* If we have info associated with this edge, record it into
1737
             our equivalence tables.  */
1738
          if (edge_info)
1739
            {
1740
              struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences;
1741
              tree lhs = edge_info->lhs;
1742
              tree rhs = edge_info->rhs;
1743
 
1744
              /* If we have a simple NAME = VALUE equivalence, record it.  */
1745
              if (lhs && TREE_CODE (lhs) == SSA_NAME)
1746
                record_const_or_copy (lhs, rhs);
1747
 
1748
              /* If we have 0 = COND or 1 = COND equivalences, record them
1749
                 into our expression hash tables.  */
1750
              if (cond_equivalences)
1751
                for (i = 0; i < edge_info->max_cond_equivalences; i++)
1752
                  record_cond (&cond_equivalences[i]);
1753
            }
1754
 
1755
          /* Now thread the edge.  */
1756
          dom_thread_across_edge (walk_data, false_edge);
1757
 
1758
          /* No need to remove local expressions from our tables
1759
             or restore vars to their original value as that will
1760
             be done immediately below.  */
1761
        }
1762
    }
1763
 
1764
  remove_local_expressions_from_table ();
1765
  restore_vars_to_original_value ();
1766
}
1767
 
1768
/* Search for redundant computations in STMT.  If any are found, then
1769
   replace them with the variable holding the result of the computation.
1770
 
1771
   If safe, record this expression into the available expression hash
1772
   table.  */
1773
 
1774
static void
1775
eliminate_redundant_computations (gimple_stmt_iterator* gsi)
1776
{
1777
  tree expr_type;
1778
  tree cached_lhs;
1779
  bool insert = true;
1780
  bool assigns_var_p = false;
1781
 
1782
  gimple stmt = gsi_stmt (*gsi);
1783
 
1784
  tree def = gimple_get_lhs (stmt);
1785
 
1786
  /* Certain expressions on the RHS can be optimized away, but can not
1787
     themselves be entered into the hash tables.  */
1788
  if (! def
1789
      || TREE_CODE (def) != SSA_NAME
1790
      || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1791
      || gimple_vdef (stmt)
1792
      /* Do not record equivalences for increments of ivs.  This would create
1793
         overlapping live ranges for a very questionable gain.  */
1794
      || simple_iv_increment_p (stmt))
1795
    insert = false;
1796
 
1797
  /* Check if the expression has been computed before.  */
1798
  cached_lhs = lookup_avail_expr (stmt, insert);
1799
 
1800
  opt_stats.num_exprs_considered++;
1801
 
1802
  /* Get the type of the expression we are trying to optimize.  */
1803
  if (is_gimple_assign (stmt))
1804
    {
1805
      expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1806
      assigns_var_p = true;
1807
    }
1808
  else if (gimple_code (stmt) == GIMPLE_COND)
1809
    expr_type = boolean_type_node;
1810
  else if (is_gimple_call (stmt))
1811
    {
1812
      gcc_assert (gimple_call_lhs (stmt));
1813
      expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1814
      assigns_var_p = true;
1815
    }
1816
  else if (gimple_code (stmt) == GIMPLE_SWITCH)
1817
    expr_type = TREE_TYPE (gimple_switch_index (stmt));
1818
  else
1819
    gcc_unreachable ();
1820
 
1821
  if (!cached_lhs)
1822
    return;
1823
 
1824
  /* It is safe to ignore types here since we have already done
1825
     type checking in the hashing and equality routines.  In fact
1826
     type checking here merely gets in the way of constant
1827
     propagation.  Also, make sure that it is safe to propagate
1828
     CACHED_LHS into the expression in STMT.  */
1829
  if ((TREE_CODE (cached_lhs) != SSA_NAME
1830
       && (assigns_var_p
1831
           || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1832
      || may_propagate_copy_into_stmt (stmt, cached_lhs))
1833
  {
1834
#if defined ENABLE_CHECKING
1835
      gcc_assert (TREE_CODE (cached_lhs) == SSA_NAME
1836
                  || is_gimple_min_invariant (cached_lhs));
1837
#endif
1838
 
1839
      if (dump_file && (dump_flags & TDF_DETAILS))
1840
        {
1841
          fprintf (dump_file, "  Replaced redundant expr '");
1842
          print_gimple_expr (dump_file, stmt, 0, dump_flags);
1843
          fprintf (dump_file, "' with '");
1844
          print_generic_expr (dump_file, cached_lhs, dump_flags);
1845
          fprintf (dump_file, "'\n");
1846
        }
1847
 
1848
      opt_stats.num_re++;
1849
 
1850
      if (assigns_var_p
1851
          && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1852
        cached_lhs = fold_convert (expr_type, cached_lhs);
1853
 
1854
      propagate_tree_value_into_stmt (gsi, cached_lhs);
1855
 
1856
      /* Since it is always necessary to mark the result as modified,
1857
         perhaps we should move this into propagate_tree_value_into_stmt
1858
         itself.  */
1859
      gimple_set_modified (gsi_stmt (*gsi), true);
1860
  }
1861
}
1862
 
1863
/* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1864
   the available expressions table or the const_and_copies table.
1865
   Detect and record those equivalences.  */
1866
/* We handle only very simple copy equivalences here.  The heavy
1867
   lifing is done by eliminate_redundant_computations.  */
1868
 
1869
static void
1870
record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
1871
{
1872
  tree lhs;
1873
  enum tree_code lhs_code;
1874
 
1875
  gcc_assert (is_gimple_assign (stmt));
1876
 
1877
  lhs = gimple_assign_lhs (stmt);
1878
  lhs_code = TREE_CODE (lhs);
1879
 
1880
  if (lhs_code == SSA_NAME
1881
      && gimple_assign_single_p (stmt))
1882
    {
1883
      tree rhs = gimple_assign_rhs1 (stmt);
1884
 
1885
      /* If the RHS of the assignment is a constant or another variable that
1886
         may be propagated, register it in the CONST_AND_COPIES table.  We
1887
         do not need to record unwind data for this, since this is a true
1888
         assignment and not an equivalence inferred from a comparison.  All
1889
         uses of this ssa name are dominated by this assignment, so unwinding
1890
         just costs time and space.  */
1891
      if (may_optimize_p
1892
          && (TREE_CODE (rhs) == SSA_NAME
1893
              || is_gimple_min_invariant (rhs)))
1894
      {
1895
        if (dump_file && (dump_flags & TDF_DETAILS))
1896
          {
1897
            fprintf (dump_file, "==== ASGN ");
1898
            print_generic_expr (dump_file, lhs, 0);
1899
            fprintf (dump_file, " = ");
1900
            print_generic_expr (dump_file, rhs, 0);
1901
            fprintf (dump_file, "\n");
1902
          }
1903
 
1904
        set_ssa_name_value (lhs, rhs);
1905
      }
1906
    }
1907
 
1908
  /* A memory store, even an aliased store, creates a useful
1909
     equivalence.  By exchanging the LHS and RHS, creating suitable
1910
     vops and recording the result in the available expression table,
1911
     we may be able to expose more redundant loads.  */
1912
  if (!gimple_has_volatile_ops (stmt)
1913
      && gimple_references_memory_p (stmt)
1914
      && gimple_assign_single_p (stmt)
1915
      && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1916
          || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1917
      && !is_gimple_reg (lhs))
1918
    {
1919
      tree rhs = gimple_assign_rhs1 (stmt);
1920
      gimple new_stmt;
1921
 
1922
      /* Build a new statement with the RHS and LHS exchanged.  */
1923
      if (TREE_CODE (rhs) == SSA_NAME)
1924
        {
1925
          /* NOTE tuples.  The call to gimple_build_assign below replaced
1926
             a call to build_gimple_modify_stmt, which did not set the
1927
             SSA_NAME_DEF_STMT on the LHS of the assignment.  Doing so
1928
             may cause an SSA validation failure, as the LHS may be a
1929
             default-initialized name and should have no definition.  I'm
1930
             a bit dubious of this, as the artificial statement that we
1931
             generate here may in fact be ill-formed, but it is simply
1932
             used as an internal device in this pass, and never becomes
1933
             part of the CFG.  */
1934
          gimple defstmt = SSA_NAME_DEF_STMT (rhs);
1935
          new_stmt = gimple_build_assign (rhs, lhs);
1936
          SSA_NAME_DEF_STMT (rhs) = defstmt;
1937
        }
1938
      else
1939
        new_stmt = gimple_build_assign (rhs, lhs);
1940
 
1941
      gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1942
 
1943
      /* Finally enter the statement into the available expression
1944
         table.  */
1945
      lookup_avail_expr (new_stmt, true);
1946
    }
1947
}
1948
 
1949
/* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1950
   CONST_AND_COPIES.  */
1951
 
1952
static void
1953
cprop_operand (gimple stmt, use_operand_p op_p)
1954
{
1955
  tree val;
1956
  tree op = USE_FROM_PTR (op_p);
1957
 
1958
  /* If the operand has a known constant value or it is known to be a
1959
     copy of some other variable, use the value or copy stored in
1960
     CONST_AND_COPIES.  */
1961
  val = SSA_NAME_VALUE (op);
1962
  if (val && val != op)
1963
    {
1964
      /* Do not change the base variable in the virtual operand
1965
         tables.  That would make it impossible to reconstruct
1966
         the renamed virtual operand if we later modify this
1967
         statement.  Also only allow the new value to be an SSA_NAME
1968
         for propagation into virtual operands.  */
1969
      if (!is_gimple_reg (op)
1970
          && (TREE_CODE (val) != SSA_NAME
1971
              || is_gimple_reg (val)
1972
              || get_virtual_var (val) != get_virtual_var (op)))
1973
        return;
1974
 
1975
      /* Do not replace hard register operands in asm statements.  */
1976
      if (gimple_code (stmt) == GIMPLE_ASM
1977
          && !may_propagate_copy_into_asm (op))
1978
        return;
1979
 
1980
      /* Certain operands are not allowed to be copy propagated due
1981
         to their interaction with exception handling and some GCC
1982
         extensions.  */
1983
      if (!may_propagate_copy (op, val))
1984
        return;
1985
 
1986
      /* Do not propagate addresses that point to volatiles into memory
1987
         stmts without volatile operands.  */
1988
      if (POINTER_TYPE_P (TREE_TYPE (val))
1989
          && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
1990
          && gimple_has_mem_ops (stmt)
1991
          && !gimple_has_volatile_ops (stmt))
1992
        return;
1993
 
1994
      /* Do not propagate copies if the propagated value is at a deeper loop
1995
         depth than the propagatee.  Otherwise, this may move loop variant
1996
         variables outside of their loops and prevent coalescing
1997
         opportunities.  If the value was loop invariant, it will be hoisted
1998
         by LICM and exposed for copy propagation.  */
1999
      if (loop_depth_of_name (val) > loop_depth_of_name (op))
2000
        return;
2001
 
2002
      /* Do not propagate copies into simple IV increment statements.
2003
         See PR23821 for how this can disturb IV analysis.  */
2004
      if (TREE_CODE (val) != INTEGER_CST
2005
          && simple_iv_increment_p (stmt))
2006
        return;
2007
 
2008
      /* Dump details.  */
2009
      if (dump_file && (dump_flags & TDF_DETAILS))
2010
        {
2011
          fprintf (dump_file, "  Replaced '");
2012
          print_generic_expr (dump_file, op, dump_flags);
2013
          fprintf (dump_file, "' with %s '",
2014
                   (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2015
          print_generic_expr (dump_file, val, dump_flags);
2016
          fprintf (dump_file, "'\n");
2017
        }
2018
 
2019
      if (TREE_CODE (val) != SSA_NAME)
2020
        opt_stats.num_const_prop++;
2021
      else
2022
        opt_stats.num_copy_prop++;
2023
 
2024
      propagate_value (op_p, val);
2025
 
2026
      /* And note that we modified this statement.  This is now
2027
         safe, even if we changed virtual operands since we will
2028
         rescan the statement and rewrite its operands again.  */
2029
      gimple_set_modified (stmt, true);
2030
    }
2031
}
2032
 
2033
/* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2034
   known value for that SSA_NAME (or NULL if no value is known).
2035
 
2036
   Propagate values from CONST_AND_COPIES into the uses, vuses and
2037
   vdef_ops of STMT.  */
2038
 
2039
static void
2040
cprop_into_stmt (gimple stmt)
2041
{
2042
  use_operand_p op_p;
2043
  ssa_op_iter iter;
2044
 
2045
  FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_ALL_USES)
2046
    {
2047
      if (TREE_CODE (USE_FROM_PTR (op_p)) == SSA_NAME)
2048
        cprop_operand (stmt, op_p);
2049
    }
2050
}
2051
 
2052
/* Optimize the statement pointed to by iterator SI.
2053
 
2054
   We try to perform some simplistic global redundancy elimination and
2055
   constant propagation:
2056
 
2057
   1- To detect global redundancy, we keep track of expressions that have
2058
      been computed in this block and its dominators.  If we find that the
2059
      same expression is computed more than once, we eliminate repeated
2060
      computations by using the target of the first one.
2061
 
2062
   2- Constant values and copy assignments.  This is used to do very
2063
      simplistic constant and copy propagation.  When a constant or copy
2064
      assignment is found, we map the value on the RHS of the assignment to
2065
      the variable in the LHS in the CONST_AND_COPIES table.  */
2066
 
2067
static void
2068
optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2069
{
2070
  gimple stmt, old_stmt;
2071
  bool may_optimize_p;
2072
  bool modified_p = false;
2073
 
2074
  old_stmt = stmt = gsi_stmt (si);
2075
 
2076
  if (gimple_code (stmt) == GIMPLE_COND)
2077
    canonicalize_comparison (stmt);
2078
 
2079
  update_stmt_if_modified (stmt);
2080
  opt_stats.num_stmts++;
2081
 
2082
  if (dump_file && (dump_flags & TDF_DETAILS))
2083
    {
2084
      fprintf (dump_file, "Optimizing statement ");
2085
      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2086
    }
2087
 
2088
  /* Const/copy propagate into USES, VUSES and the RHS of VDEFs.  */
2089
  cprop_into_stmt (stmt);
2090
 
2091
  /* If the statement has been modified with constant replacements,
2092
     fold its RHS before checking for redundant computations.  */
2093
  if (gimple_modified_p (stmt))
2094
    {
2095
      tree rhs = NULL;
2096
 
2097
      /* Try to fold the statement making sure that STMT is kept
2098
         up to date.  */
2099
      if (fold_stmt (&si))
2100
        {
2101
          stmt = gsi_stmt (si);
2102
          gimple_set_modified (stmt, true);
2103
 
2104
          if (dump_file && (dump_flags & TDF_DETAILS))
2105
            {
2106
              fprintf (dump_file, "  Folded to: ");
2107
              print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2108
            }
2109
        }
2110
 
2111
      /* We only need to consider cases that can yield a gimple operand.  */
2112
      if (gimple_assign_single_p (stmt))
2113
        rhs = gimple_assign_rhs1 (stmt);
2114
      else if (gimple_code (stmt) == GIMPLE_GOTO)
2115
        rhs = gimple_goto_dest (stmt);
2116
      else if (gimple_code (stmt) == GIMPLE_SWITCH)
2117
        /* This should never be an ADDR_EXPR.  */
2118
        rhs = gimple_switch_index (stmt);
2119
 
2120
      if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2121
        recompute_tree_invariant_for_addr_expr (rhs);
2122
 
2123
      /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2124
         even if fold_stmt updated the stmt already and thus cleared
2125
         gimple_modified_p flag on it.  */
2126
      modified_p = true;
2127
    }
2128
 
2129
  /* Check for redundant computations.  Do this optimization only
2130
     for assignments that have no volatile ops and conditionals.  */
2131
  may_optimize_p = (!gimple_has_volatile_ops (stmt)
2132
                    && ((is_gimple_assign (stmt)
2133
                         && !gimple_rhs_has_side_effects (stmt))
2134
                        || (is_gimple_call (stmt)
2135
                            && gimple_call_lhs (stmt) != NULL_TREE
2136
                            && !gimple_rhs_has_side_effects (stmt))
2137
                        || gimple_code (stmt) == GIMPLE_COND
2138
                        || gimple_code (stmt) == GIMPLE_SWITCH));
2139
 
2140
  if (may_optimize_p)
2141
    {
2142
      if (gimple_code (stmt) == GIMPLE_CALL)
2143
        {
2144
          /* Resolve __builtin_constant_p.  If it hasn't been
2145
             folded to integer_one_node by now, it's fairly
2146
             certain that the value simply isn't constant.  */
2147
          tree callee = gimple_call_fndecl (stmt);
2148
          if (callee
2149
              && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2150
              && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2151
            {
2152
              propagate_tree_value_into_stmt (&si, integer_zero_node);
2153
              stmt = gsi_stmt (si);
2154
            }
2155
        }
2156
 
2157
      update_stmt_if_modified (stmt);
2158
      eliminate_redundant_computations (&si);
2159
      stmt = gsi_stmt (si);
2160
    }
2161
 
2162
  /* Record any additional equivalences created by this statement.  */
2163
  if (is_gimple_assign (stmt))
2164
    record_equivalences_from_stmt (stmt, may_optimize_p);
2165
 
2166
  /* If STMT is a COND_EXPR and it was modified, then we may know
2167
     where it goes.  If that is the case, then mark the CFG as altered.
2168
 
2169
     This will cause us to later call remove_unreachable_blocks and
2170
     cleanup_tree_cfg when it is safe to do so.  It is not safe to
2171
     clean things up here since removal of edges and such can trigger
2172
     the removal of PHI nodes, which in turn can release SSA_NAMEs to
2173
     the manager.
2174
 
2175
     That's all fine and good, except that once SSA_NAMEs are released
2176
     to the manager, we must not call create_ssa_name until all references
2177
     to released SSA_NAMEs have been eliminated.
2178
 
2179
     All references to the deleted SSA_NAMEs can not be eliminated until
2180
     we remove unreachable blocks.
2181
 
2182
     We can not remove unreachable blocks until after we have completed
2183
     any queued jump threading.
2184
 
2185
     We can not complete any queued jump threads until we have taken
2186
     appropriate variables out of SSA form.  Taking variables out of
2187
     SSA form can call create_ssa_name and thus we lose.
2188
 
2189
     Ultimately I suspect we're going to need to change the interface
2190
     into the SSA_NAME manager.  */
2191
  if (gimple_modified_p (stmt) || modified_p)
2192
    {
2193
      tree val = NULL;
2194
 
2195
      update_stmt_if_modified (stmt);
2196
 
2197
      if (gimple_code (stmt) == GIMPLE_COND)
2198
        val = fold_binary_loc (gimple_location (stmt),
2199
                           gimple_cond_code (stmt), boolean_type_node,
2200
                           gimple_cond_lhs (stmt),  gimple_cond_rhs (stmt));
2201
      else if (gimple_code (stmt) == GIMPLE_SWITCH)
2202
        val = gimple_switch_index (stmt);
2203
 
2204
      if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2205
        cfg_altered = true;
2206
 
2207
      /* If we simplified a statement in such a way as to be shown that it
2208
         cannot trap, update the eh information and the cfg to match.  */
2209
      if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2210
        {
2211
          bitmap_set_bit (need_eh_cleanup, bb->index);
2212
          if (dump_file && (dump_flags & TDF_DETAILS))
2213
            fprintf (dump_file, "  Flagged to clear EH edges.\n");
2214
        }
2215
    }
2216
}
2217
 
2218
/* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2219
   If found, return its LHS. Otherwise insert STMT in the table and
2220
   return NULL_TREE.
2221
 
2222
   Also, when an expression is first inserted in the  table, it is also
2223
   is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2224
   we finish processing this block and its children.  */
2225
 
2226
static tree
2227
lookup_avail_expr (gimple stmt, bool insert)
2228
{
2229
  void **slot;
2230
  tree lhs;
2231
  tree temp;
2232
  struct expr_hash_elt element;
2233
 
2234
  /* Get LHS of assignment or call, else NULL_TREE.  */
2235
  lhs = gimple_get_lhs (stmt);
2236
 
2237
  initialize_hash_element (stmt, lhs, &element);
2238
 
2239
  if (dump_file && (dump_flags & TDF_DETAILS))
2240
    {
2241
      fprintf (dump_file, "LKUP ");
2242
      print_expr_hash_elt (dump_file, &element);
2243
    }
2244
 
2245
  /* Don't bother remembering constant assignments and copy operations.
2246
     Constants and copy operations are handled by the constant/copy propagator
2247
     in optimize_stmt.  */
2248
  if (element.expr.kind == EXPR_SINGLE
2249
      && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2250
          || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2251
    return NULL_TREE;
2252
 
2253
  /* Finally try to find the expression in the main expression hash table.  */
2254
  slot = htab_find_slot_with_hash (avail_exprs, &element, element.hash,
2255
                                   (insert ? INSERT : NO_INSERT));
2256
  if (slot == NULL)
2257
    return NULL_TREE;
2258
 
2259
  if (*slot == NULL)
2260
    {
2261
      struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2262
      *element2 = element;
2263
      element2->stamp = element2;
2264
      *slot = (void *) element2;
2265
 
2266
      if (dump_file && (dump_flags & TDF_DETAILS))
2267
        {
2268
          fprintf (dump_file, "2>>> ");
2269
          print_expr_hash_elt (dump_file, element2);
2270
        }
2271
 
2272
      VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element2);
2273
      return NULL_TREE;
2274
    }
2275
 
2276
  /* Extract the LHS of the assignment so that it can be used as the current
2277
     definition of another variable.  */
2278
  lhs = ((struct expr_hash_elt *)*slot)->lhs;
2279
 
2280
  /* See if the LHS appears in the CONST_AND_COPIES table.  If it does, then
2281
     use the value from the const_and_copies table.  */
2282
  if (TREE_CODE (lhs) == SSA_NAME)
2283
    {
2284
      temp = SSA_NAME_VALUE (lhs);
2285
      if (temp)
2286
        lhs = temp;
2287
    }
2288
 
2289
  if (dump_file && (dump_flags & TDF_DETAILS))
2290
    {
2291
      fprintf (dump_file, "FIND: ");
2292
      print_generic_expr (dump_file, lhs, 0);
2293
      fprintf (dump_file, "\n");
2294
    }
2295
 
2296
  return lhs;
2297
}
2298
 
2299
/* Hashing and equality functions for AVAIL_EXPRS.  We compute a value number
2300
   for expressions using the code of the expression and the SSA numbers of
2301
   its operands.  */
2302
 
2303
static hashval_t
2304
avail_expr_hash (const void *p)
2305
{
2306
  gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2307
  const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2308
  tree vuse;
2309
  hashval_t val = 0;
2310
 
2311
  val = iterative_hash_hashable_expr (expr, val);
2312
 
2313
  /* If the hash table entry is not associated with a statement, then we
2314
     can just hash the expression and not worry about virtual operands
2315
     and such.  */
2316
  if (!stmt)
2317
    return val;
2318
 
2319
  /* Add the SSA version numbers of the vuse operand.  This is important
2320
     because compound variables like arrays are not renamed in the
2321
     operands.  Rather, the rename is done on the virtual variable
2322
     representing all the elements of the array.  */
2323
  if ((vuse = gimple_vuse (stmt)))
2324
    val = iterative_hash_expr (vuse, val);
2325
 
2326
  return val;
2327
}
2328
 
2329
static hashval_t
2330
real_avail_expr_hash (const void *p)
2331
{
2332
  return ((const struct expr_hash_elt *)p)->hash;
2333
}
2334
 
2335
static int
2336
avail_expr_eq (const void *p1, const void *p2)
2337
{
2338
  gimple stmt1 = ((const struct expr_hash_elt *)p1)->stmt;
2339
  const struct hashable_expr *expr1 = &((const struct expr_hash_elt *)p1)->expr;
2340
  const struct expr_hash_elt *stamp1 = ((const struct expr_hash_elt *)p1)->stamp;
2341
  gimple stmt2 = ((const struct expr_hash_elt *)p2)->stmt;
2342
  const struct hashable_expr *expr2 = &((const struct expr_hash_elt *)p2)->expr;
2343
  const struct expr_hash_elt *stamp2 = ((const struct expr_hash_elt *)p2)->stamp;
2344
 
2345
  /* This case should apply only when removing entries from the table.  */
2346
  if (stamp1 == stamp2)
2347
    return true;
2348
 
2349
  /* FIXME tuples:
2350
     We add stmts to a hash table and them modify them. To detect the case
2351
     that we modify a stmt and then search for it, we assume that the hash
2352
     is always modified by that change.
2353
     We have to fully check why this doesn't happen on trunk or rewrite
2354
     this in a more  reliable (and easier to understand) way. */
2355
  if (((const struct expr_hash_elt *)p1)->hash
2356
      != ((const struct expr_hash_elt *)p2)->hash)
2357
    return false;
2358
 
2359
  /* In case of a collision, both RHS have to be identical and have the
2360
     same VUSE operands.  */
2361
  if (hashable_expr_equal_p (expr1, expr2)
2362
      && types_compatible_p (expr1->type, expr2->type))
2363
    {
2364
      /* Note that STMT1 and/or STMT2 may be NULL.  */
2365
      return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
2366
              == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
2367
    }
2368
 
2369
  return false;
2370
}
2371
 
2372
/* PHI-ONLY copy and constant propagation.  This pass is meant to clean
2373
   up degenerate PHIs created by or exposed by jump threading.  */
2374
 
2375
/* Given PHI, return its RHS if the PHI is a degenerate, otherwise return
2376
   NULL.  */
2377
 
2378
tree
2379
degenerate_phi_result (gimple phi)
2380
{
2381
  tree lhs = gimple_phi_result (phi);
2382
  tree val = NULL;
2383
  size_t i;
2384
 
2385
  /* Ignoring arguments which are the same as LHS, if all the remaining
2386
     arguments are the same, then the PHI is a degenerate and has the
2387
     value of that common argument.  */
2388
  for (i = 0; i < gimple_phi_num_args (phi); i++)
2389
    {
2390
      tree arg = gimple_phi_arg_def (phi, i);
2391
 
2392
      if (arg == lhs)
2393
        continue;
2394
      else if (!arg)
2395
        break;
2396
      else if (!val)
2397
        val = arg;
2398
      else if (arg == val)
2399
        continue;
2400
      /* We bring in some of operand_equal_p not only to speed things
2401
         up, but also to avoid crashing when dereferencing the type of
2402
         a released SSA name.  */
2403
      else if (TREE_CODE (val) != TREE_CODE (arg)
2404
               || TREE_CODE (val) == SSA_NAME
2405
               || !operand_equal_p (arg, val, 0))
2406
        break;
2407
    }
2408
  return (i == gimple_phi_num_args (phi) ? val : NULL);
2409
}
2410
 
2411
/* Given a statement STMT, which is either a PHI node or an assignment,
2412
   remove it from the IL.  */
2413
 
2414
static void
2415
remove_stmt_or_phi (gimple stmt)
2416
{
2417
  gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2418
 
2419
  if (gimple_code (stmt) == GIMPLE_PHI)
2420
    remove_phi_node (&gsi, true);
2421
  else
2422
    {
2423
      gsi_remove (&gsi, true);
2424
      release_defs (stmt);
2425
    }
2426
}
2427
 
2428
/* Given a statement STMT, which is either a PHI node or an assignment,
2429
   return the "rhs" of the node, in the case of a non-degenerate
2430
   phi, NULL is returned.  */
2431
 
2432
static tree
2433
get_rhs_or_phi_arg (gimple stmt)
2434
{
2435
  if (gimple_code (stmt) == GIMPLE_PHI)
2436
    return degenerate_phi_result (stmt);
2437
  else if (gimple_assign_single_p (stmt))
2438
    return gimple_assign_rhs1 (stmt);
2439
  else
2440
    gcc_unreachable ();
2441
}
2442
 
2443
 
2444
/* Given a statement STMT, which is either a PHI node or an assignment,
2445
   return the "lhs" of the node.  */
2446
 
2447
static tree
2448
get_lhs_or_phi_result (gimple stmt)
2449
{
2450
  if (gimple_code (stmt) == GIMPLE_PHI)
2451
    return gimple_phi_result (stmt);
2452
  else if (is_gimple_assign (stmt))
2453
    return gimple_assign_lhs (stmt);
2454
  else
2455
    gcc_unreachable ();
2456
}
2457
 
2458
/* Propagate RHS into all uses of LHS (when possible).
2459
 
2460
   RHS and LHS are derived from STMT, which is passed in solely so
2461
   that we can remove it if propagation is successful.
2462
 
2463
   When propagating into a PHI node or into a statement which turns
2464
   into a trivial copy or constant initialization, set the
2465
   appropriate bit in INTERESTING_NAMEs so that we will visit those
2466
   nodes as well in an effort to pick up secondary optimization
2467
   opportunities.  */
2468
 
2469
static void
2470
propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2471
{
2472
  /* First verify that propagation is valid and isn't going to move a
2473
     loop variant variable outside its loop.  */
2474
  if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2475
      && (TREE_CODE (rhs) != SSA_NAME
2476
          || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2477
      && may_propagate_copy (lhs, rhs)
2478
      && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2479
    {
2480
      use_operand_p use_p;
2481
      imm_use_iterator iter;
2482
      gimple use_stmt;
2483
      bool all = true;
2484
 
2485
      /* Dump details.  */
2486
      if (dump_file && (dump_flags & TDF_DETAILS))
2487
        {
2488
          fprintf (dump_file, "  Replacing '");
2489
          print_generic_expr (dump_file, lhs, dump_flags);
2490
          fprintf (dump_file, "' with %s '",
2491
                   (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2492
                   print_generic_expr (dump_file, rhs, dump_flags);
2493
          fprintf (dump_file, "'\n");
2494
        }
2495
 
2496
      /* Walk over every use of LHS and try to replace the use with RHS.
2497
         At this point the only reason why such a propagation would not
2498
         be successful would be if the use occurs in an ASM_EXPR.  */
2499
      FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2500
        {
2501
          /* Leave debug stmts alone.  If we succeed in propagating
2502
             all non-debug uses, we'll drop the DEF, and propagation
2503
             into debug stmts will occur then.  */
2504
          if (gimple_debug_bind_p (use_stmt))
2505
            continue;
2506
 
2507
          /* It's not always safe to propagate into an ASM_EXPR.  */
2508
          if (gimple_code (use_stmt) == GIMPLE_ASM
2509
              && ! may_propagate_copy_into_asm (lhs))
2510
            {
2511
              all = false;
2512
              continue;
2513
            }
2514
 
2515
          /* It's not ok to propagate into the definition stmt of RHS.
2516
                <bb 9>:
2517
                  # prephitmp.12_36 = PHI <g_67.1_6(9)>
2518
                  g_67.1_6 = prephitmp.12_36;
2519
                  goto <bb 9>;
2520
             While this is strictly all dead code we do not want to
2521
             deal with this here.  */
2522
          if (TREE_CODE (rhs) == SSA_NAME
2523
              && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2524
            {
2525
              all = false;
2526
              continue;
2527
            }
2528
 
2529
          /* Dump details.  */
2530
          if (dump_file && (dump_flags & TDF_DETAILS))
2531
            {
2532
              fprintf (dump_file, "    Original statement:");
2533
              print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2534
            }
2535
 
2536
          /* Propagate the RHS into this use of the LHS.  */
2537
          FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2538
            propagate_value (use_p, rhs);
2539
 
2540
          /* Special cases to avoid useless calls into the folding
2541
             routines, operand scanning, etc.
2542
 
2543
             First, propagation into a PHI may cause the PHI to become
2544
             a degenerate, so mark the PHI as interesting.  No other
2545
             actions are necessary.
2546
 
2547
             Second, if we're propagating a virtual operand and the
2548
             propagation does not change the underlying _DECL node for
2549
             the virtual operand, then no further actions are necessary.  */
2550
          if (gimple_code (use_stmt) == GIMPLE_PHI
2551
              || (! is_gimple_reg (lhs)
2552
                  && TREE_CODE (rhs) == SSA_NAME
2553
                  && SSA_NAME_VAR (lhs) == SSA_NAME_VAR (rhs)))
2554
            {
2555
              /* Dump details.  */
2556
              if (dump_file && (dump_flags & TDF_DETAILS))
2557
                {
2558
                  fprintf (dump_file, "    Updated statement:");
2559
                  print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2560
                }
2561
 
2562
              /* Propagation into a PHI may expose new degenerate PHIs,
2563
                 so mark the result of the PHI as interesting.  */
2564
              if (gimple_code (use_stmt) == GIMPLE_PHI)
2565
                {
2566
                  tree result = get_lhs_or_phi_result (use_stmt);
2567
                  bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2568
                }
2569
 
2570
              continue;
2571
            }
2572
 
2573
          /* From this point onward we are propagating into a
2574
             real statement.  Folding may (or may not) be possible,
2575
             we may expose new operands, expose dead EH edges,
2576
             etc.  */
2577
          /* NOTE tuples. In the tuples world, fold_stmt_inplace
2578
             cannot fold a call that simplifies to a constant,
2579
             because the GIMPLE_CALL must be replaced by a
2580
             GIMPLE_ASSIGN, and there is no way to effect such a
2581
             transformation in-place.  We might want to consider
2582
             using the more general fold_stmt here.  */
2583
          fold_stmt_inplace (use_stmt);
2584
 
2585
          /* Sometimes propagation can expose new operands to the
2586
             renamer.  */
2587
          update_stmt (use_stmt);
2588
 
2589
          /* Dump details.  */
2590
          if (dump_file && (dump_flags & TDF_DETAILS))
2591
            {
2592
              fprintf (dump_file, "    Updated statement:");
2593
              print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2594
            }
2595
 
2596
          /* If we replaced a variable index with a constant, then
2597
             we would need to update the invariant flag for ADDR_EXPRs.  */
2598
          if (gimple_assign_single_p (use_stmt)
2599
              && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2600
            recompute_tree_invariant_for_addr_expr
2601
                (gimple_assign_rhs1 (use_stmt));
2602
 
2603
          /* If we cleaned up EH information from the statement,
2604
             mark its containing block as needing EH cleanups.  */
2605
          if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2606
            {
2607
              bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2608
              if (dump_file && (dump_flags & TDF_DETAILS))
2609
                fprintf (dump_file, "  Flagged to clear EH edges.\n");
2610
            }
2611
 
2612
          /* Propagation may expose new trivial copy/constant propagation
2613
             opportunities.  */
2614
          if (gimple_assign_single_p (use_stmt)
2615
              && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2616
              && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2617
                  || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2618
            {
2619
              tree result = get_lhs_or_phi_result (use_stmt);
2620
              bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2621
            }
2622
 
2623
          /* Propagation into these nodes may make certain edges in
2624
             the CFG unexecutable.  We want to identify them as PHI nodes
2625
             at the destination of those unexecutable edges may become
2626
             degenerates.  */
2627
          else if (gimple_code (use_stmt) == GIMPLE_COND
2628
                   || gimple_code (use_stmt) == GIMPLE_SWITCH
2629
                   || gimple_code (use_stmt) == GIMPLE_GOTO)
2630
            {
2631
              tree val;
2632
 
2633
              if (gimple_code (use_stmt) == GIMPLE_COND)
2634
                val = fold_binary_loc (gimple_location (use_stmt),
2635
                                   gimple_cond_code (use_stmt),
2636
                                   boolean_type_node,
2637
                                   gimple_cond_lhs (use_stmt),
2638
                                   gimple_cond_rhs (use_stmt));
2639
              else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2640
                val = gimple_switch_index (use_stmt);
2641
              else
2642
                val = gimple_goto_dest  (use_stmt);
2643
 
2644
              if (val && is_gimple_min_invariant (val))
2645
                {
2646
                  basic_block bb = gimple_bb (use_stmt);
2647
                  edge te = find_taken_edge (bb, val);
2648
                  edge_iterator ei;
2649
                  edge e;
2650
                  gimple_stmt_iterator gsi, psi;
2651
 
2652
                  /* Remove all outgoing edges except TE.  */
2653
                  for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2654
                    {
2655
                      if (e != te)
2656
                        {
2657
                          /* Mark all the PHI nodes at the destination of
2658
                             the unexecutable edge as interesting.  */
2659
                          for (psi = gsi_start_phis (e->dest);
2660
                               !gsi_end_p (psi);
2661
                               gsi_next (&psi))
2662
                            {
2663
                              gimple phi = gsi_stmt (psi);
2664
 
2665
                              tree result = gimple_phi_result (phi);
2666
                              int version = SSA_NAME_VERSION (result);
2667
 
2668
                              bitmap_set_bit (interesting_names, version);
2669
                            }
2670
 
2671
                          te->probability += e->probability;
2672
 
2673
                          te->count += e->count;
2674
                          remove_edge (e);
2675
                          cfg_altered = true;
2676
                        }
2677
                      else
2678
                        ei_next (&ei);
2679
                    }
2680
 
2681
                  gsi = gsi_last_bb (gimple_bb (use_stmt));
2682
                  gsi_remove (&gsi, true);
2683
 
2684
                  /* And fixup the flags on the single remaining edge.  */
2685
                  te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2686
                  te->flags &= ~EDGE_ABNORMAL;
2687
                  te->flags |= EDGE_FALLTHRU;
2688
                  if (te->probability > REG_BR_PROB_BASE)
2689
                    te->probability = REG_BR_PROB_BASE;
2690
                }
2691
            }
2692
        }
2693
 
2694
      /* Ensure there is nothing else to do. */
2695
      gcc_assert (!all || has_zero_uses (lhs));
2696
 
2697
      /* If we were able to propagate away all uses of LHS, then
2698
         we can remove STMT.  */
2699
      if (all)
2700
        remove_stmt_or_phi (stmt);
2701
    }
2702
}
2703
 
2704
/* STMT is either a PHI node (potentially a degenerate PHI node) or
2705
   a statement that is a trivial copy or constant initialization.
2706
 
2707
   Attempt to eliminate T by propagating its RHS into all uses of
2708
   its LHS.  This may in turn set new bits in INTERESTING_NAMES
2709
   for nodes we want to revisit later.
2710
 
2711
   All exit paths should clear INTERESTING_NAMES for the result
2712
   of STMT.  */
2713
 
2714
static void
2715
eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2716
{
2717
  tree lhs = get_lhs_or_phi_result (stmt);
2718
  tree rhs;
2719
  int version = SSA_NAME_VERSION (lhs);
2720
 
2721
  /* If the LHS of this statement or PHI has no uses, then we can
2722
     just eliminate it.  This can occur if, for example, the PHI
2723
     was created by block duplication due to threading and its only
2724
     use was in the conditional at the end of the block which was
2725
     deleted.  */
2726
  if (has_zero_uses (lhs))
2727
    {
2728
      bitmap_clear_bit (interesting_names, version);
2729
      remove_stmt_or_phi (stmt);
2730
      return;
2731
    }
2732
 
2733
  /* Get the RHS of the assignment or PHI node if the PHI is a
2734
     degenerate.  */
2735
  rhs = get_rhs_or_phi_arg (stmt);
2736
  if (!rhs)
2737
    {
2738
      bitmap_clear_bit (interesting_names, version);
2739
      return;
2740
    }
2741
 
2742
  propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2743
 
2744
  /* Note that STMT may well have been deleted by now, so do
2745
     not access it, instead use the saved version # to clear
2746
     T's entry in the worklist.  */
2747
  bitmap_clear_bit (interesting_names, version);
2748
}
2749
 
2750
/* The first phase in degenerate PHI elimination.
2751
 
2752
   Eliminate the degenerate PHIs in BB, then recurse on the
2753
   dominator children of BB.  */
2754
 
2755
static void
2756
eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2757
{
2758
  gimple_stmt_iterator gsi;
2759
  basic_block son;
2760
 
2761
  for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2762
    {
2763
      gimple phi = gsi_stmt (gsi);
2764
 
2765
      eliminate_const_or_copy (phi, interesting_names);
2766
    }
2767
 
2768
  /* Recurse into the dominator children of BB.  */
2769
  for (son = first_dom_son (CDI_DOMINATORS, bb);
2770
       son;
2771
       son = next_dom_son (CDI_DOMINATORS, son))
2772
    eliminate_degenerate_phis_1 (son, interesting_names);
2773
}
2774
 
2775
 
2776
/* A very simple pass to eliminate degenerate PHI nodes from the
2777
   IL.  This is meant to be fast enough to be able to be run several
2778
   times in the optimization pipeline.
2779
 
2780
   Certain optimizations, particularly those which duplicate blocks
2781
   or remove edges from the CFG can create or expose PHIs which are
2782
   trivial copies or constant initializations.
2783
 
2784
   While we could pick up these optimizations in DOM or with the
2785
   combination of copy-prop and CCP, those solutions are far too
2786
   heavy-weight for our needs.
2787
 
2788
   This implementation has two phases so that we can efficiently
2789
   eliminate the first order degenerate PHIs and second order
2790
   degenerate PHIs.
2791
 
2792
   The first phase performs a dominator walk to identify and eliminate
2793
   the vast majority of the degenerate PHIs.  When a degenerate PHI
2794
   is identified and eliminated any affected statements or PHIs
2795
   are put on a worklist.
2796
 
2797
   The second phase eliminates degenerate PHIs and trivial copies
2798
   or constant initializations using the worklist.  This is how we
2799
   pick up the secondary optimization opportunities with minimal
2800
   cost.  */
2801
 
2802
static unsigned int
2803
eliminate_degenerate_phis (void)
2804
{
2805
  bitmap interesting_names;
2806
  bitmap interesting_names1;
2807
 
2808
  /* Bitmap of blocks which need EH information updated.  We can not
2809
     update it on-the-fly as doing so invalidates the dominator tree.  */
2810
  need_eh_cleanup = BITMAP_ALLOC (NULL);
2811
 
2812
  /* INTERESTING_NAMES is effectively our worklist, indexed by
2813
     SSA_NAME_VERSION.
2814
 
2815
     A set bit indicates that the statement or PHI node which
2816
     defines the SSA_NAME should be (re)examined to determine if
2817
     it has become a degenerate PHI or trivial const/copy propagation
2818
     opportunity.
2819
 
2820
     Experiments have show we generally get better compilation
2821
     time behavior with bitmaps rather than sbitmaps.  */
2822
  interesting_names = BITMAP_ALLOC (NULL);
2823
  interesting_names1 = BITMAP_ALLOC (NULL);
2824
 
2825
  calculate_dominance_info (CDI_DOMINATORS);
2826
  cfg_altered = false;
2827
 
2828
  /* First phase.  Eliminate degenerate PHIs via a dominator
2829
     walk of the CFG.
2830
 
2831
     Experiments have indicated that we generally get better
2832
     compile-time behavior by visiting blocks in the first
2833
     phase in dominator order.  Presumably this is because walking
2834
     in dominator order leaves fewer PHIs for later examination
2835
     by the worklist phase.  */
2836
  eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
2837
 
2838
  /* Second phase.  Eliminate second order degenerate PHIs as well
2839
     as trivial copies or constant initializations identified by
2840
     the first phase or this phase.  Basically we keep iterating
2841
     until our set of INTERESTING_NAMEs is empty.   */
2842
  while (!bitmap_empty_p (interesting_names))
2843
    {
2844
      unsigned int i;
2845
      bitmap_iterator bi;
2846
 
2847
      /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
2848
         changed during the loop.  Copy it to another bitmap and
2849
         use that.  */
2850
      bitmap_copy (interesting_names1, interesting_names);
2851
 
2852
      EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
2853
        {
2854
          tree name = ssa_name (i);
2855
 
2856
          /* Ignore SSA_NAMEs that have been released because
2857
             their defining statement was deleted (unreachable).  */
2858
          if (name)
2859
            eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
2860
                                     interesting_names);
2861
        }
2862
    }
2863
 
2864
  if (cfg_altered)
2865
    free_dominance_info (CDI_DOMINATORS);
2866
 
2867
  /* Propagation of const and copies may make some EH edges dead.  Purge
2868
     such edges from the CFG as needed.  */
2869
  if (!bitmap_empty_p (need_eh_cleanup))
2870
    {
2871
      gimple_purge_all_dead_eh_edges (need_eh_cleanup);
2872
      BITMAP_FREE (need_eh_cleanup);
2873
    }
2874
 
2875
  BITMAP_FREE (interesting_names);
2876
  BITMAP_FREE (interesting_names1);
2877
  return 0;
2878
}
2879
 
2880
struct gimple_opt_pass pass_phi_only_cprop =
2881
{
2882
 {
2883
  GIMPLE_PASS,
2884
  "phicprop",                           /* name */
2885
  gate_dominator,                       /* gate */
2886
  eliminate_degenerate_phis,            /* execute */
2887
  NULL,                                 /* sub */
2888
  NULL,                                 /* next */
2889
  0,                                    /* static_pass_number */
2890
  TV_TREE_PHI_CPROP,                    /* tv_id */
2891
  PROP_cfg | PROP_ssa,                  /* properties_required */
2892
  0,                                    /* properties_provided */
2893
  0,                                     /* properties_destroyed */
2894
  0,                                    /* todo_flags_start */
2895
  TODO_cleanup_cfg
2896
    | TODO_dump_func
2897
    | TODO_ggc_collect
2898
    | TODO_verify_ssa
2899
    | TODO_verify_stmts
2900
    | TODO_update_ssa                   /* todo_flags_finish */
2901
 }
2902
};

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.