OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [tree-ssa-phiopt.c] - Blame information for rev 722

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Optimization of PHI nodes by converting them into straightline code.
2
   Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010
3
   Free Software Foundation, Inc.
4
 
5
This file is part of GCC.
6
 
7
GCC is free software; you can redistribute it and/or modify it
8
under the terms of the GNU General Public License as published by the
9
Free Software Foundation; either version 3, or (at your option) any
10
later version.
11
 
12
GCC is distributed in the hope that it will be useful, but WITHOUT
13
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15
for more details.
16
 
17
You should have received a copy of the GNU General Public License
18
along with GCC; see the file COPYING3.  If not see
19
<http://www.gnu.org/licenses/>.  */
20
 
21
#include "config.h"
22
#include "system.h"
23
#include "coretypes.h"
24
#include "tm.h"
25
#include "ggc.h"
26
#include "tree.h"
27
#include "flags.h"
28
#include "tm_p.h"
29
#include "basic-block.h"
30
#include "timevar.h"
31
#include "tree-flow.h"
32
#include "tree-pass.h"
33
#include "tree-dump.h"
34
#include "langhooks.h"
35
#include "pointer-set.h"
36
#include "domwalk.h"
37
#include "cfgloop.h"
38
#include "tree-data-ref.h"
39
 
40
static unsigned int tree_ssa_phiopt (void);
41
static unsigned int tree_ssa_phiopt_worker (bool);
42
static bool conditional_replacement (basic_block, basic_block,
43
                                     edge, edge, gimple, tree, tree);
44
static bool value_replacement (basic_block, basic_block,
45
                               edge, edge, gimple, tree, tree);
46
static bool minmax_replacement (basic_block, basic_block,
47
                                edge, edge, gimple, tree, tree);
48
static bool abs_replacement (basic_block, basic_block,
49
                             edge, edge, gimple, tree, tree);
50
static bool cond_store_replacement (basic_block, basic_block, edge, edge,
51
                                    struct pointer_set_t *);
52
static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
53
static struct pointer_set_t * get_non_trapping (void);
54
static void replace_phi_edge_with_variable (basic_block, edge, gimple, tree);
55
 
56
/* This pass tries to replaces an if-then-else block with an
57
   assignment.  We have four kinds of transformations.  Some of these
58
   transformations are also performed by the ifcvt RTL optimizer.
59
 
60
   Conditional Replacement
61
   -----------------------
62
 
63
   This transformation, implemented in conditional_replacement,
64
   replaces
65
 
66
     bb0:
67
      if (cond) goto bb2; else goto bb1;
68
     bb1:
69
     bb2:
70
      x = PHI <0 (bb1), 1 (bb0), ...>;
71
 
72
   with
73
 
74
     bb0:
75
      x' = cond;
76
      goto bb2;
77
     bb2:
78
      x = PHI <x' (bb0), ...>;
79
 
80
   We remove bb1 as it becomes unreachable.  This occurs often due to
81
   gimplification of conditionals.
82
 
83
   Value Replacement
84
   -----------------
85
 
86
   This transformation, implemented in value_replacement, replaces
87
 
88
     bb0:
89
       if (a != b) goto bb2; else goto bb1;
90
     bb1:
91
     bb2:
92
       x = PHI <a (bb1), b (bb0), ...>;
93
 
94
   with
95
 
96
     bb0:
97
     bb2:
98
       x = PHI <b (bb0), ...>;
99
 
100
   This opportunity can sometimes occur as a result of other
101
   optimizations.
102
 
103
   ABS Replacement
104
   ---------------
105
 
106
   This transformation, implemented in abs_replacement, replaces
107
 
108
     bb0:
109
       if (a >= 0) goto bb2; else goto bb1;
110
     bb1:
111
       x = -a;
112
     bb2:
113
       x = PHI <x (bb1), a (bb0), ...>;
114
 
115
   with
116
 
117
     bb0:
118
       x' = ABS_EXPR< a >;
119
     bb2:
120
       x = PHI <x' (bb0), ...>;
121
 
122
   MIN/MAX Replacement
123
   -------------------
124
 
125
   This transformation, minmax_replacement replaces
126
 
127
     bb0:
128
       if (a <= b) goto bb2; else goto bb1;
129
     bb1:
130
     bb2:
131
       x = PHI <b (bb1), a (bb0), ...>;
132
 
133
   with
134
 
135
     bb0:
136
       x' = MIN_EXPR (a, b)
137
     bb2:
138
       x = PHI <x' (bb0), ...>;
139
 
140
   A similar transformation is done for MAX_EXPR.  */
141
 
142
static unsigned int
143
tree_ssa_phiopt (void)
144
{
145
  return tree_ssa_phiopt_worker (false);
146
}
147
 
148
/* This pass tries to transform conditional stores into unconditional
149
   ones, enabling further simplifications with the simpler then and else
150
   blocks.  In particular it replaces this:
151
 
152
     bb0:
153
       if (cond) goto bb2; else goto bb1;
154
     bb1:
155
       *p = RHS;
156
     bb2:
157
 
158
   with
159
 
160
     bb0:
161
       if (cond) goto bb1; else goto bb2;
162
     bb1:
163
       condtmp' = *p;
164
     bb2:
165
       condtmp = PHI <RHS, condtmp'>
166
       *p = condtmp;
167
 
168
   This transformation can only be done under several constraints,
169
   documented below.  It also replaces:
170
 
171
     bb0:
172
       if (cond) goto bb2; else goto bb1;
173
     bb1:
174
       *p = RHS1;
175
       goto bb3;
176
     bb2:
177
       *p = RHS2;
178
     bb3:
179
 
180
   with
181
 
182
     bb0:
183
       if (cond) goto bb3; else goto bb1;
184
     bb1:
185
     bb3:
186
       condtmp = PHI <RHS1, RHS2>
187
       *p = condtmp;  */
188
 
189
static unsigned int
190
tree_ssa_cs_elim (void)
191
{
192
  return tree_ssa_phiopt_worker (true);
193
}
194
 
195
/* For conditional store replacement we need a temporary to
196
   put the old contents of the memory in.  */
197
static tree condstoretemp;
198
 
199
/* The core routine of conditional store replacement and normal
200
   phi optimizations.  Both share much of the infrastructure in how
201
   to match applicable basic block patterns.  DO_STORE_ELIM is true
202
   when we want to do conditional store replacement, false otherwise.  */
203
static unsigned int
204
tree_ssa_phiopt_worker (bool do_store_elim)
205
{
206
  basic_block bb;
207
  basic_block *bb_order;
208
  unsigned n, i;
209
  bool cfgchanged = false;
210
  struct pointer_set_t *nontrap = 0;
211
 
212
  if (do_store_elim)
213
    {
214
      condstoretemp = NULL_TREE;
215
      /* Calculate the set of non-trapping memory accesses.  */
216
      nontrap = get_non_trapping ();
217
    }
218
 
219
  /* Search every basic block for COND_EXPR we may be able to optimize.
220
 
221
     We walk the blocks in order that guarantees that a block with
222
     a single predecessor is processed before the predecessor.
223
     This ensures that we collapse inner ifs before visiting the
224
     outer ones, and also that we do not try to visit a removed
225
     block.  */
226
  bb_order = blocks_in_phiopt_order ();
227
  n = n_basic_blocks - NUM_FIXED_BLOCKS;
228
 
229
  for (i = 0; i < n; i++)
230
    {
231
      gimple cond_stmt, phi;
232
      basic_block bb1, bb2;
233
      edge e1, e2;
234
      tree arg0, arg1;
235
 
236
      bb = bb_order[i];
237
 
238
      cond_stmt = last_stmt (bb);
239
      /* Check to see if the last statement is a GIMPLE_COND.  */
240
      if (!cond_stmt
241
          || gimple_code (cond_stmt) != GIMPLE_COND)
242
        continue;
243
 
244
      e1 = EDGE_SUCC (bb, 0);
245
      bb1 = e1->dest;
246
      e2 = EDGE_SUCC (bb, 1);
247
      bb2 = e2->dest;
248
 
249
      /* We cannot do the optimization on abnormal edges.  */
250
      if ((e1->flags & EDGE_ABNORMAL) != 0
251
          || (e2->flags & EDGE_ABNORMAL) != 0)
252
       continue;
253
 
254
      /* If either bb1's succ or bb2 or bb2's succ is non NULL.  */
255
      if (EDGE_COUNT (bb1->succs) == 0
256
          || bb2 == NULL
257
          || EDGE_COUNT (bb2->succs) == 0)
258
        continue;
259
 
260
      /* Find the bb which is the fall through to the other.  */
261
      if (EDGE_SUCC (bb1, 0)->dest == bb2)
262
        ;
263
      else if (EDGE_SUCC (bb2, 0)->dest == bb1)
264
        {
265
          basic_block bb_tmp = bb1;
266
          edge e_tmp = e1;
267
          bb1 = bb2;
268
          bb2 = bb_tmp;
269
          e1 = e2;
270
          e2 = e_tmp;
271
        }
272
      else if (do_store_elim
273
               && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
274
        {
275
          basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
276
 
277
          if (!single_succ_p (bb1)
278
              || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
279
              || !single_succ_p (bb2)
280
              || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
281
              || EDGE_COUNT (bb3->preds) != 2)
282
            continue;
283
          if (cond_if_else_store_replacement (bb1, bb2, bb3))
284
            cfgchanged = true;
285
          continue;
286
        }
287
      else
288
        continue;
289
 
290
      e1 = EDGE_SUCC (bb1, 0);
291
 
292
      /* Make sure that bb1 is just a fall through.  */
293
      if (!single_succ_p (bb1)
294
          || (e1->flags & EDGE_FALLTHRU) == 0)
295
        continue;
296
 
297
      /* Also make sure that bb1 only have one predecessor and that it
298
         is bb.  */
299
      if (!single_pred_p (bb1)
300
          || single_pred (bb1) != bb)
301
        continue;
302
 
303
      if (do_store_elim)
304
        {
305
          /* bb1 is the middle block, bb2 the join block, bb the split block,
306
             e1 the fallthrough edge from bb1 to bb2.  We can't do the
307
             optimization if the join block has more than two predecessors.  */
308
          if (EDGE_COUNT (bb2->preds) > 2)
309
            continue;
310
          if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
311
            cfgchanged = true;
312
        }
313
      else
314
        {
315
          gimple_seq phis = phi_nodes (bb2);
316
          gimple_stmt_iterator gsi;
317
 
318
          /* Check to make sure that there is only one non-virtual PHI node.
319
             TODO: we could do it with more than one iff the other PHI nodes
320
             have the same elements for these two edges.  */
321
          phi = NULL;
322
          for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
323
            {
324
              if (!is_gimple_reg (gimple_phi_result (gsi_stmt (gsi))))
325
                continue;
326
              if (phi)
327
                {
328
                  phi = NULL;
329
                  break;
330
                }
331
              phi = gsi_stmt (gsi);
332
            }
333
          if (!phi)
334
            continue;
335
 
336
          arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
337
          arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
338
 
339
          /* Something is wrong if we cannot find the arguments in the PHI
340
             node.  */
341
          gcc_assert (arg0 != NULL && arg1 != NULL);
342
 
343
          /* Do the replacement of conditional if it can be done.  */
344
          if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
345
            cfgchanged = true;
346
          else if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
347
            cfgchanged = true;
348
          else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
349
            cfgchanged = true;
350
          else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
351
            cfgchanged = true;
352
        }
353
    }
354
 
355
  free (bb_order);
356
 
357
  if (do_store_elim)
358
    pointer_set_destroy (nontrap);
359
  /* If the CFG has changed, we should cleanup the CFG.  */
360
  if (cfgchanged && do_store_elim)
361
    {
362
      /* In cond-store replacement we have added some loads on edges
363
         and new VOPS (as we moved the store, and created a load).  */
364
      gsi_commit_edge_inserts ();
365
      return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
366
    }
367
  else if (cfgchanged)
368
    return TODO_cleanup_cfg;
369
  return 0;
370
}
371
 
372
/* Returns the list of basic blocks in the function in an order that guarantees
373
   that if a block X has just a single predecessor Y, then Y is after X in the
374
   ordering.  */
375
 
376
basic_block *
377
blocks_in_phiopt_order (void)
378
{
379
  basic_block x, y;
380
  basic_block *order = XNEWVEC (basic_block, n_basic_blocks);
381
  unsigned n = n_basic_blocks - NUM_FIXED_BLOCKS;
382
  unsigned np, i;
383
  sbitmap visited = sbitmap_alloc (last_basic_block);
384
 
385
#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
386
#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))
387
 
388
  sbitmap_zero (visited);
389
 
390
  MARK_VISITED (ENTRY_BLOCK_PTR);
391
  FOR_EACH_BB (x)
392
    {
393
      if (VISITED_P (x))
394
        continue;
395
 
396
      /* Walk the predecessors of x as long as they have precisely one
397
         predecessor and add them to the list, so that they get stored
398
         after x.  */
399
      for (y = x, np = 1;
400
           single_pred_p (y) && !VISITED_P (single_pred (y));
401
           y = single_pred (y))
402
        np++;
403
      for (y = x, i = n - np;
404
           single_pred_p (y) && !VISITED_P (single_pred (y));
405
           y = single_pred (y), i++)
406
        {
407
          order[i] = y;
408
          MARK_VISITED (y);
409
        }
410
      order[i] = y;
411
      MARK_VISITED (y);
412
 
413
      gcc_assert (i == n - 1);
414
      n -= np;
415
    }
416
 
417
  sbitmap_free (visited);
418
  gcc_assert (n == 0);
419
  return order;
420
 
421
#undef MARK_VISITED
422
#undef VISITED_P
423
}
424
 
425
 
426
/* Return TRUE if block BB has no executable statements, otherwise return
427
   FALSE.  */
428
 
429
bool
430
empty_block_p (basic_block bb)
431
{
432
  /* BB must have no executable statements.  */
433
  gimple_stmt_iterator gsi = gsi_after_labels (bb);
434
  if (gsi_end_p (gsi))
435
    return true;
436
  if (is_gimple_debug (gsi_stmt (gsi)))
437
    gsi_next_nondebug (&gsi);
438
  return gsi_end_p (gsi);
439
}
440
 
441
/* Replace PHI node element whose edge is E in block BB with variable NEW.
442
   Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
443
   is known to have two edges, one of which must reach BB).  */
444
 
445
static void
446
replace_phi_edge_with_variable (basic_block cond_block,
447
                                edge e, gimple phi, tree new_tree)
448
{
449
  basic_block bb = gimple_bb (phi);
450
  basic_block block_to_remove;
451
  gimple_stmt_iterator gsi;
452
 
453
  /* Change the PHI argument to new.  */
454
  SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
455
 
456
  /* Remove the empty basic block.  */
457
  if (EDGE_SUCC (cond_block, 0)->dest == bb)
458
    {
459
      EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
460
      EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
461
      EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
462
      EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
463
 
464
      block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
465
    }
466
  else
467
    {
468
      EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
469
      EDGE_SUCC (cond_block, 1)->flags
470
        &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
471
      EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
472
      EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
473
 
474
      block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
475
    }
476
  delete_basic_block (block_to_remove);
477
 
478
  /* Eliminate the COND_EXPR at the end of COND_BLOCK.  */
479
  gsi = gsi_last_bb (cond_block);
480
  gsi_remove (&gsi, true);
481
 
482
  if (dump_file && (dump_flags & TDF_DETAILS))
483
    fprintf (dump_file,
484
              "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
485
              cond_block->index,
486
              bb->index);
487
}
488
 
489
/*  The function conditional_replacement does the main work of doing the
490
    conditional replacement.  Return true if the replacement is done.
491
    Otherwise return false.
492
    BB is the basic block where the replacement is going to be done on.  ARG0
493
    is argument 0 from PHI.  Likewise for ARG1.  */
494
 
495
static bool
496
conditional_replacement (basic_block cond_bb, basic_block middle_bb,
497
                         edge e0, edge e1, gimple phi,
498
                         tree arg0, tree arg1)
499
{
500
  tree result;
501
  gimple stmt, new_stmt;
502
  tree cond;
503
  gimple_stmt_iterator gsi;
504
  edge true_edge, false_edge;
505
  tree new_var, new_var2;
506
 
507
  /* FIXME: Gimplification of complex type is too hard for now.  */
508
  if (TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
509
      || TREE_CODE (TREE_TYPE (arg1)) == COMPLEX_TYPE)
510
    return false;
511
 
512
  /* The PHI arguments have the constants 0 and 1, then convert
513
     it to the conditional.  */
514
  if ((integer_zerop (arg0) && integer_onep (arg1))
515
      || (integer_zerop (arg1) && integer_onep (arg0)))
516
    ;
517
  else
518
    return false;
519
 
520
  if (!empty_block_p (middle_bb))
521
    return false;
522
 
523
  /* At this point we know we have a GIMPLE_COND with two successors.
524
     One successor is BB, the other successor is an empty block which
525
     falls through into BB.
526
 
527
     There is a single PHI node at the join point (BB) and its arguments
528
     are constants (0, 1).
529
 
530
     So, given the condition COND, and the two PHI arguments, we can
531
     rewrite this PHI into non-branching code:
532
 
533
       dest = (COND) or dest = COND'
534
 
535
     We use the condition as-is if the argument associated with the
536
     true edge has the value one or the argument associated with the
537
     false edge as the value zero.  Note that those conditions are not
538
     the same since only one of the outgoing edges from the GIMPLE_COND
539
     will directly reach BB and thus be associated with an argument.  */
540
 
541
  stmt = last_stmt (cond_bb);
542
  result = PHI_RESULT (phi);
543
 
544
  /* To handle special cases like floating point comparison, it is easier and
545
     less error-prone to build a tree and gimplify it on the fly though it is
546
     less efficient.  */
547
  cond = fold_build2_loc (gimple_location (stmt),
548
                          gimple_cond_code (stmt), boolean_type_node,
549
                          gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
550
 
551
  /* We need to know which is the true edge and which is the false
552
     edge so that we know when to invert the condition below.  */
553
  extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
554
  if ((e0 == true_edge && integer_zerop (arg0))
555
      || (e0 == false_edge && integer_onep (arg0))
556
      || (e1 == true_edge && integer_zerop (arg1))
557
      || (e1 == false_edge && integer_onep (arg1)))
558
    cond = fold_build1_loc (gimple_location (stmt),
559
                            TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
560
 
561
  /* Insert our new statements at the end of conditional block before the
562
     COND_STMT.  */
563
  gsi = gsi_for_stmt (stmt);
564
  new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
565
                                      GSI_SAME_STMT);
566
 
567
  if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
568
    {
569
      source_location locus_0, locus_1;
570
 
571
      new_var2 = create_tmp_var (TREE_TYPE (result), NULL);
572
      add_referenced_var (new_var2);
573
      new_stmt = gimple_build_assign_with_ops (CONVERT_EXPR, new_var2,
574
                                               new_var, NULL);
575
      new_var2 = make_ssa_name (new_var2, new_stmt);
576
      gimple_assign_set_lhs (new_stmt, new_var2);
577
      gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
578
      new_var = new_var2;
579
 
580
      /* Set the locus to the first argument, unless is doesn't have one.  */
581
      locus_0 = gimple_phi_arg_location (phi, 0);
582
      locus_1 = gimple_phi_arg_location (phi, 1);
583
      if (locus_0 == UNKNOWN_LOCATION)
584
        locus_0 = locus_1;
585
      gimple_set_location (new_stmt, locus_0);
586
    }
587
 
588
  replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
589
 
590
  /* Note that we optimized this PHI.  */
591
  return true;
592
}
593
 
594
/* Update *ARG which is defined in STMT so that it contains the
595
   computed value if that seems profitable.  Return true if the
596
   statement is made dead by that rewriting.  */
597
 
598
static bool
599
jump_function_from_stmt (tree *arg, gimple stmt)
600
{
601
  enum tree_code code = gimple_assign_rhs_code (stmt);
602
  if (code == ADDR_EXPR)
603
    {
604
      /* For arg = &p->i transform it to p, if possible.  */
605
      tree rhs1 = gimple_assign_rhs1 (stmt);
606
      HOST_WIDE_INT offset;
607
      tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
608
                                                &offset);
609
      if (tem
610
          && TREE_CODE (tem) == MEM_REF
611
          && double_int_zero_p
612
               (double_int_add (mem_ref_offset (tem),
613
                                shwi_to_double_int (offset))))
614
        {
615
          *arg = TREE_OPERAND (tem, 0);
616
          return true;
617
        }
618
    }
619
  /* TODO: Much like IPA-CP jump-functions we want to handle constant
620
     additions symbolically here, and we'd need to update the comparison
621
     code that compares the arg + cst tuples in our caller.  For now the
622
     code above exactly handles the VEC_BASE pattern from vec.h.  */
623
  return false;
624
}
625
 
626
/*  The function value_replacement does the main work of doing the value
627
    replacement.  Return true if the replacement is done.  Otherwise return
628
    false.
629
    BB is the basic block where the replacement is going to be done on.  ARG0
630
    is argument 0 from the PHI.  Likewise for ARG1.  */
631
 
632
static bool
633
value_replacement (basic_block cond_bb, basic_block middle_bb,
634
                   edge e0, edge e1, gimple phi,
635
                   tree arg0, tree arg1)
636
{
637
  gimple_stmt_iterator gsi;
638
  gimple cond;
639
  edge true_edge, false_edge;
640
  enum tree_code code;
641
 
642
  /* If the type says honor signed zeros we cannot do this
643
     optimization.  */
644
  if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
645
    return false;
646
 
647
  /* Allow a single statement in MIDDLE_BB that defines one of the PHI
648
     arguments.  */
649
  gsi = gsi_after_labels (middle_bb);
650
  if (!gsi_end_p (gsi))
651
    {
652
      if (is_gimple_debug (gsi_stmt (gsi)))
653
        gsi_next_nondebug (&gsi);
654
      if (!gsi_end_p (gsi))
655
        {
656
          gimple stmt = gsi_stmt (gsi);
657
          tree lhs;
658
          gsi_next_nondebug (&gsi);
659
          if (!gsi_end_p (gsi))
660
            return false;
661
          if (!is_gimple_assign (stmt))
662
            return false;
663
          /* Now try to adjust arg0 or arg1 according to the computation
664
             in the single statement.  */
665
          lhs = gimple_assign_lhs (stmt);
666
          if (!((lhs == arg0
667
                 && jump_function_from_stmt (&arg0, stmt))
668
                || (lhs == arg1
669
                    && jump_function_from_stmt (&arg1, stmt))))
670
            return false;
671
        }
672
    }
673
 
674
  cond = last_stmt (cond_bb);
675
  code = gimple_cond_code (cond);
676
 
677
  /* This transformation is only valid for equality comparisons.  */
678
  if (code != NE_EXPR && code != EQ_EXPR)
679
    return false;
680
 
681
  /* We need to know which is the true edge and which is the false
682
      edge so that we know if have abs or negative abs.  */
683
  extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
684
 
685
  /* At this point we know we have a COND_EXPR with two successors.
686
     One successor is BB, the other successor is an empty block which
687
     falls through into BB.
688
 
689
     The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
690
 
691
     There is a single PHI node at the join point (BB) with two arguments.
692
 
693
     We now need to verify that the two arguments in the PHI node match
694
     the two arguments to the equality comparison.  */
695
 
696
  if ((operand_equal_for_phi_arg_p (arg0, gimple_cond_lhs (cond))
697
       && operand_equal_for_phi_arg_p (arg1, gimple_cond_rhs (cond)))
698
      || (operand_equal_for_phi_arg_p (arg1, gimple_cond_lhs (cond))
699
          && operand_equal_for_phi_arg_p (arg0, gimple_cond_rhs (cond))))
700
    {
701
      edge e;
702
      tree arg;
703
 
704
      /* For NE_EXPR, we want to build an assignment result = arg where
705
         arg is the PHI argument associated with the true edge.  For
706
         EQ_EXPR we want the PHI argument associated with the false edge.  */
707
      e = (code == NE_EXPR ? true_edge : false_edge);
708
 
709
      /* Unfortunately, E may not reach BB (it may instead have gone to
710
         OTHER_BLOCK).  If that is the case, then we want the single outgoing
711
         edge from OTHER_BLOCK which reaches BB and represents the desired
712
         path from COND_BLOCK.  */
713
      if (e->dest == middle_bb)
714
        e = single_succ_edge (e->dest);
715
 
716
      /* Now we know the incoming edge to BB that has the argument for the
717
         RHS of our new assignment statement.  */
718
      if (e0 == e)
719
        arg = arg0;
720
      else
721
        arg = arg1;
722
 
723
      replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
724
 
725
      /* Note that we optimized this PHI.  */
726
      return true;
727
    }
728
  return false;
729
}
730
 
731
/*  The function minmax_replacement does the main work of doing the minmax
732
    replacement.  Return true if the replacement is done.  Otherwise return
733
    false.
734
    BB is the basic block where the replacement is going to be done on.  ARG0
735
    is argument 0 from the PHI.  Likewise for ARG1.  */
736
 
737
static bool
738
minmax_replacement (basic_block cond_bb, basic_block middle_bb,
739
                    edge e0, edge e1, gimple phi,
740
                    tree arg0, tree arg1)
741
{
742
  tree result, type;
743
  gimple cond, new_stmt;
744
  edge true_edge, false_edge;
745
  enum tree_code cmp, minmax, ass_code;
746
  tree smaller, larger, arg_true, arg_false;
747
  gimple_stmt_iterator gsi, gsi_from;
748
 
749
  type = TREE_TYPE (PHI_RESULT (phi));
750
 
751
  /* The optimization may be unsafe due to NaNs.  */
752
  if (HONOR_NANS (TYPE_MODE (type)))
753
    return false;
754
 
755
  cond = last_stmt (cond_bb);
756
  cmp = gimple_cond_code (cond);
757
 
758
  /* This transformation is only valid for order comparisons.  Record which
759
     operand is smaller/larger if the result of the comparison is true.  */
760
  if (cmp == LT_EXPR || cmp == LE_EXPR)
761
    {
762
      smaller = gimple_cond_lhs (cond);
763
      larger = gimple_cond_rhs (cond);
764
    }
765
  else if (cmp == GT_EXPR || cmp == GE_EXPR)
766
    {
767
      smaller = gimple_cond_rhs (cond);
768
      larger = gimple_cond_lhs (cond);
769
    }
770
  else
771
    return false;
772
 
773
  /* We need to know which is the true edge and which is the false
774
      edge so that we know if have abs or negative abs.  */
775
  extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
776
 
777
  /* Forward the edges over the middle basic block.  */
778
  if (true_edge->dest == middle_bb)
779
    true_edge = EDGE_SUCC (true_edge->dest, 0);
780
  if (false_edge->dest == middle_bb)
781
    false_edge = EDGE_SUCC (false_edge->dest, 0);
782
 
783
  if (true_edge == e0)
784
    {
785
      gcc_assert (false_edge == e1);
786
      arg_true = arg0;
787
      arg_false = arg1;
788
    }
789
  else
790
    {
791
      gcc_assert (false_edge == e0);
792
      gcc_assert (true_edge == e1);
793
      arg_true = arg1;
794
      arg_false = arg0;
795
    }
796
 
797
  if (empty_block_p (middle_bb))
798
    {
799
      if (operand_equal_for_phi_arg_p (arg_true, smaller)
800
          && operand_equal_for_phi_arg_p (arg_false, larger))
801
        {
802
          /* Case
803
 
804
             if (smaller < larger)
805
             rslt = smaller;
806
             else
807
             rslt = larger;  */
808
          minmax = MIN_EXPR;
809
        }
810
      else if (operand_equal_for_phi_arg_p (arg_false, smaller)
811
               && operand_equal_for_phi_arg_p (arg_true, larger))
812
        minmax = MAX_EXPR;
813
      else
814
        return false;
815
    }
816
  else
817
    {
818
      /* Recognize the following case, assuming d <= u:
819
 
820
         if (a <= u)
821
           b = MAX (a, d);
822
         x = PHI <b, u>
823
 
824
         This is equivalent to
825
 
826
         b = MAX (a, d);
827
         x = MIN (b, u);  */
828
 
829
      gimple assign = last_and_only_stmt (middle_bb);
830
      tree lhs, op0, op1, bound;
831
 
832
      if (!assign
833
          || gimple_code (assign) != GIMPLE_ASSIGN)
834
        return false;
835
 
836
      lhs = gimple_assign_lhs (assign);
837
      ass_code = gimple_assign_rhs_code (assign);
838
      if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
839
        return false;
840
      op0 = gimple_assign_rhs1 (assign);
841
      op1 = gimple_assign_rhs2 (assign);
842
 
843
      if (true_edge->src == middle_bb)
844
        {
845
          /* We got here if the condition is true, i.e., SMALLER < LARGER.  */
846
          if (!operand_equal_for_phi_arg_p (lhs, arg_true))
847
            return false;
848
 
849
          if (operand_equal_for_phi_arg_p (arg_false, larger))
850
            {
851
              /* Case
852
 
853
                 if (smaller < larger)
854
                   {
855
                     r' = MAX_EXPR (smaller, bound)
856
                   }
857
                 r = PHI <r', larger>  --> to be turned to MIN_EXPR.  */
858
              if (ass_code != MAX_EXPR)
859
                return false;
860
 
861
              minmax = MIN_EXPR;
862
              if (operand_equal_for_phi_arg_p (op0, smaller))
863
                bound = op1;
864
              else if (operand_equal_for_phi_arg_p (op1, smaller))
865
                bound = op0;
866
              else
867
                return false;
868
 
869
              /* We need BOUND <= LARGER.  */
870
              if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
871
                                                  bound, larger)))
872
                return false;
873
            }
874
          else if (operand_equal_for_phi_arg_p (arg_false, smaller))
875
            {
876
              /* Case
877
 
878
                 if (smaller < larger)
879
                   {
880
                     r' = MIN_EXPR (larger, bound)
881
                   }
882
                 r = PHI <r', smaller>  --> to be turned to MAX_EXPR.  */
883
              if (ass_code != MIN_EXPR)
884
                return false;
885
 
886
              minmax = MAX_EXPR;
887
              if (operand_equal_for_phi_arg_p (op0, larger))
888
                bound = op1;
889
              else if (operand_equal_for_phi_arg_p (op1, larger))
890
                bound = op0;
891
              else
892
                return false;
893
 
894
              /* We need BOUND >= SMALLER.  */
895
              if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
896
                                                  bound, smaller)))
897
                return false;
898
            }
899
          else
900
            return false;
901
        }
902
      else
903
        {
904
          /* We got here if the condition is false, i.e., SMALLER > LARGER.  */
905
          if (!operand_equal_for_phi_arg_p (lhs, arg_false))
906
            return false;
907
 
908
          if (operand_equal_for_phi_arg_p (arg_true, larger))
909
            {
910
              /* Case
911
 
912
                 if (smaller > larger)
913
                   {
914
                     r' = MIN_EXPR (smaller, bound)
915
                   }
916
                 r = PHI <r', larger>  --> to be turned to MAX_EXPR.  */
917
              if (ass_code != MIN_EXPR)
918
                return false;
919
 
920
              minmax = MAX_EXPR;
921
              if (operand_equal_for_phi_arg_p (op0, smaller))
922
                bound = op1;
923
              else if (operand_equal_for_phi_arg_p (op1, smaller))
924
                bound = op0;
925
              else
926
                return false;
927
 
928
              /* We need BOUND >= LARGER.  */
929
              if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
930
                                                  bound, larger)))
931
                return false;
932
            }
933
          else if (operand_equal_for_phi_arg_p (arg_true, smaller))
934
            {
935
              /* Case
936
 
937
                 if (smaller > larger)
938
                   {
939
                     r' = MAX_EXPR (larger, bound)
940
                   }
941
                 r = PHI <r', smaller>  --> to be turned to MIN_EXPR.  */
942
              if (ass_code != MAX_EXPR)
943
                return false;
944
 
945
              minmax = MIN_EXPR;
946
              if (operand_equal_for_phi_arg_p (op0, larger))
947
                bound = op1;
948
              else if (operand_equal_for_phi_arg_p (op1, larger))
949
                bound = op0;
950
              else
951
                return false;
952
 
953
              /* We need BOUND <= SMALLER.  */
954
              if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
955
                                                  bound, smaller)))
956
                return false;
957
            }
958
          else
959
            return false;
960
        }
961
 
962
      /* Move the statement from the middle block.  */
963
      gsi = gsi_last_bb (cond_bb);
964
      gsi_from = gsi_last_nondebug_bb (middle_bb);
965
      gsi_move_before (&gsi_from, &gsi);
966
    }
967
 
968
  /* Emit the statement to compute min/max.  */
969
  result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
970
  new_stmt = gimple_build_assign_with_ops (minmax, result, arg0, arg1);
971
  gsi = gsi_last_bb (cond_bb);
972
  gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
973
 
974
  replace_phi_edge_with_variable (cond_bb, e1, phi, result);
975
  return true;
976
}
977
 
978
/*  The function absolute_replacement does the main work of doing the absolute
979
    replacement.  Return true if the replacement is done.  Otherwise return
980
    false.
981
    bb is the basic block where the replacement is going to be done on.  arg0
982
    is argument 0 from the phi.  Likewise for arg1.  */
983
 
984
static bool
985
abs_replacement (basic_block cond_bb, basic_block middle_bb,
986
                 edge e0 ATTRIBUTE_UNUSED, edge e1,
987
                 gimple phi, tree arg0, tree arg1)
988
{
989
  tree result;
990
  gimple new_stmt, cond;
991
  gimple_stmt_iterator gsi;
992
  edge true_edge, false_edge;
993
  gimple assign;
994
  edge e;
995
  tree rhs, lhs;
996
  bool negate;
997
  enum tree_code cond_code;
998
 
999
  /* If the type says honor signed zeros we cannot do this
1000
     optimization.  */
1001
  if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
1002
    return false;
1003
 
1004
  /* OTHER_BLOCK must have only one executable statement which must have the
1005
     form arg0 = -arg1 or arg1 = -arg0.  */
1006
 
1007
  assign = last_and_only_stmt (middle_bb);
1008
  /* If we did not find the proper negation assignment, then we can not
1009
     optimize.  */
1010
  if (assign == NULL)
1011
    return false;
1012
 
1013
  /* If we got here, then we have found the only executable statement
1014
     in OTHER_BLOCK.  If it is anything other than arg = -arg1 or
1015
     arg1 = -arg0, then we can not optimize.  */
1016
  if (gimple_code (assign) != GIMPLE_ASSIGN)
1017
    return false;
1018
 
1019
  lhs = gimple_assign_lhs (assign);
1020
 
1021
  if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1022
    return false;
1023
 
1024
  rhs = gimple_assign_rhs1 (assign);
1025
 
1026
  /* The assignment has to be arg0 = -arg1 or arg1 = -arg0.  */
1027
  if (!(lhs == arg0 && rhs == arg1)
1028
      && !(lhs == arg1 && rhs == arg0))
1029
    return false;
1030
 
1031
  cond = last_stmt (cond_bb);
1032
  result = PHI_RESULT (phi);
1033
 
1034
  /* Only relationals comparing arg[01] against zero are interesting.  */
1035
  cond_code = gimple_cond_code (cond);
1036
  if (cond_code != GT_EXPR && cond_code != GE_EXPR
1037
      && cond_code != LT_EXPR && cond_code != LE_EXPR)
1038
    return false;
1039
 
1040
  /* Make sure the conditional is arg[01] OP y.  */
1041
  if (gimple_cond_lhs (cond) != rhs)
1042
    return false;
1043
 
1044
  if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1045
               ? real_zerop (gimple_cond_rhs (cond))
1046
               : integer_zerop (gimple_cond_rhs (cond)))
1047
    ;
1048
  else
1049
    return false;
1050
 
1051
  /* We need to know which is the true edge and which is the false
1052
     edge so that we know if have abs or negative abs.  */
1053
  extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1054
 
1055
  /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1056
     will need to negate the result.  Similarly for LT_EXPR/LE_EXPR if
1057
     the false edge goes to OTHER_BLOCK.  */
1058
  if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1059
    e = true_edge;
1060
  else
1061
    e = false_edge;
1062
 
1063
  if (e->dest == middle_bb)
1064
    negate = true;
1065
  else
1066
    negate = false;
1067
 
1068
  result = duplicate_ssa_name (result, NULL);
1069
 
1070
  if (negate)
1071
    {
1072
      tree tmp = create_tmp_var (TREE_TYPE (result), NULL);
1073
      add_referenced_var (tmp);
1074
      lhs = make_ssa_name (tmp, NULL);
1075
    }
1076
  else
1077
    lhs = result;
1078
 
1079
  /* Build the modify expression with abs expression.  */
1080
  new_stmt = gimple_build_assign_with_ops (ABS_EXPR, lhs, rhs, NULL);
1081
 
1082
  gsi = gsi_last_bb (cond_bb);
1083
  gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1084
 
1085
  if (negate)
1086
    {
1087
      /* Get the right GSI.  We want to insert after the recently
1088
         added ABS_EXPR statement (which we know is the first statement
1089
         in the block.  */
1090
      new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, result, lhs, NULL);
1091
 
1092
      gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1093
    }
1094
 
1095
  replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1096
 
1097
  /* Note that we optimized this PHI.  */
1098
  return true;
1099
}
1100
 
1101
/* Auxiliary functions to determine the set of memory accesses which
1102
   can't trap because they are preceded by accesses to the same memory
1103
   portion.  We do that for MEM_REFs, so we only need to track
1104
   the SSA_NAME of the pointer indirectly referenced.  The algorithm
1105
   simply is a walk over all instructions in dominator order.  When
1106
   we see an MEM_REF we determine if we've already seen a same
1107
   ref anywhere up to the root of the dominator tree.  If we do the
1108
   current access can't trap.  If we don't see any dominating access
1109
   the current access might trap, but might also make later accesses
1110
   non-trapping, so we remember it.  We need to be careful with loads
1111
   or stores, for instance a load might not trap, while a store would,
1112
   so if we see a dominating read access this doesn't mean that a later
1113
   write access would not trap.  Hence we also need to differentiate the
1114
   type of access(es) seen.
1115
 
1116
   ??? We currently are very conservative and assume that a load might
1117
   trap even if a store doesn't (write-only memory).  This probably is
1118
   overly conservative.  */
1119
 
1120
/* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1121
   through it was seen, which would constitute a no-trap region for
1122
   same accesses.  */
1123
struct name_to_bb
1124
{
1125
  tree ssa_name;
1126
  basic_block bb;
1127
  unsigned store : 1;
1128
};
1129
 
1130
/* The hash table for remembering what we've seen.  */
1131
static htab_t seen_ssa_names;
1132
 
1133
/* The set of MEM_REFs which can't trap.  */
1134
static struct pointer_set_t *nontrap_set;
1135
 
1136
/* The hash function, based on the pointer to the pointer SSA_NAME.  */
1137
static hashval_t
1138
name_to_bb_hash (const void *p)
1139
{
1140
  const_tree n = ((const struct name_to_bb *)p)->ssa_name;
1141
  return htab_hash_pointer (n) ^ ((const struct name_to_bb *)p)->store;
1142
}
1143
 
1144
/* The equality function of *P1 and *P2.  SSA_NAMEs are shared, so
1145
   it's enough to simply compare them for equality.  */
1146
static int
1147
name_to_bb_eq (const void *p1, const void *p2)
1148
{
1149
  const struct name_to_bb *n1 = (const struct name_to_bb *)p1;
1150
  const struct name_to_bb *n2 = (const struct name_to_bb *)p2;
1151
 
1152
  return n1->ssa_name == n2->ssa_name && n1->store == n2->store;
1153
}
1154
 
1155
/* We see the expression EXP in basic block BB.  If it's an interesting
1156
   expression (an MEM_REF through an SSA_NAME) possibly insert the
1157
   expression into the set NONTRAP or the hash table of seen expressions.
1158
   STORE is true if this expression is on the LHS, otherwise it's on
1159
   the RHS.  */
1160
static void
1161
add_or_mark_expr (basic_block bb, tree exp,
1162
                  struct pointer_set_t *nontrap, bool store)
1163
{
1164
  if (TREE_CODE (exp) == MEM_REF
1165
      && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME)
1166
    {
1167
      tree name = TREE_OPERAND (exp, 0);
1168
      struct name_to_bb map;
1169
      void **slot;
1170
      struct name_to_bb *n2bb;
1171
      basic_block found_bb = 0;
1172
 
1173
      /* Try to find the last seen MEM_REF through the same
1174
         SSA_NAME, which can trap.  */
1175
      map.ssa_name = name;
1176
      map.bb = 0;
1177
      map.store = store;
1178
      slot = htab_find_slot (seen_ssa_names, &map, INSERT);
1179
      n2bb = (struct name_to_bb *) *slot;
1180
      if (n2bb)
1181
        found_bb = n2bb->bb;
1182
 
1183
      /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1184
         (it's in a basic block on the path from us to the dominator root)
1185
         then we can't trap.  */
1186
      if (found_bb && found_bb->aux == (void *)1)
1187
        {
1188
          pointer_set_insert (nontrap, exp);
1189
        }
1190
      else
1191
        {
1192
          /* EXP might trap, so insert it into the hash table.  */
1193
          if (n2bb)
1194
            {
1195
              n2bb->bb = bb;
1196
            }
1197
          else
1198
            {
1199
              n2bb = XNEW (struct name_to_bb);
1200
              n2bb->ssa_name = name;
1201
              n2bb->bb = bb;
1202
              n2bb->store = store;
1203
              *slot = n2bb;
1204
            }
1205
        }
1206
    }
1207
}
1208
 
1209
/* Called by walk_dominator_tree, when entering the block BB.  */
1210
static void
1211
nt_init_block (struct dom_walk_data *data ATTRIBUTE_UNUSED, basic_block bb)
1212
{
1213
  gimple_stmt_iterator gsi;
1214
  /* Mark this BB as being on the path to dominator root.  */
1215
  bb->aux = (void*)1;
1216
 
1217
  /* And walk the statements in order.  */
1218
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1219
    {
1220
      gimple stmt = gsi_stmt (gsi);
1221
 
1222
      if (is_gimple_assign (stmt))
1223
        {
1224
          add_or_mark_expr (bb, gimple_assign_lhs (stmt), nontrap_set, true);
1225
          add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), nontrap_set, false);
1226
          if (get_gimple_rhs_num_ops (gimple_assign_rhs_code (stmt)) > 1)
1227
            add_or_mark_expr (bb, gimple_assign_rhs2 (stmt), nontrap_set,
1228
                              false);
1229
        }
1230
    }
1231
}
1232
 
1233
/* Called by walk_dominator_tree, when basic block BB is exited.  */
1234
static void
1235
nt_fini_block (struct dom_walk_data *data ATTRIBUTE_UNUSED, basic_block bb)
1236
{
1237
  /* This BB isn't on the path to dominator root anymore.  */
1238
  bb->aux = NULL;
1239
}
1240
 
1241
/* This is the entry point of gathering non trapping memory accesses.
1242
   It will do a dominator walk over the whole function, and it will
1243
   make use of the bb->aux pointers.  It returns a set of trees
1244
   (the MEM_REFs itself) which can't trap.  */
1245
static struct pointer_set_t *
1246
get_non_trapping (void)
1247
{
1248
  struct pointer_set_t *nontrap;
1249
  struct dom_walk_data walk_data;
1250
 
1251
  nontrap = pointer_set_create ();
1252
  seen_ssa_names = htab_create (128, name_to_bb_hash, name_to_bb_eq,
1253
                                free);
1254
  /* We're going to do a dominator walk, so ensure that we have
1255
     dominance information.  */
1256
  calculate_dominance_info (CDI_DOMINATORS);
1257
 
1258
  /* Setup callbacks for the generic dominator tree walker.  */
1259
  nontrap_set = nontrap;
1260
  walk_data.dom_direction = CDI_DOMINATORS;
1261
  walk_data.initialize_block_local_data = NULL;
1262
  walk_data.before_dom_children = nt_init_block;
1263
  walk_data.after_dom_children = nt_fini_block;
1264
  walk_data.global_data = NULL;
1265
  walk_data.block_local_data_size = 0;
1266
 
1267
  init_walk_dominator_tree (&walk_data);
1268
  walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
1269
  fini_walk_dominator_tree (&walk_data);
1270
  htab_delete (seen_ssa_names);
1271
 
1272
  return nontrap;
1273
}
1274
 
1275
/* Do the main work of conditional store replacement.  We already know
1276
   that the recognized pattern looks like so:
1277
 
1278
   split:
1279
     if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1280
   MIDDLE_BB:
1281
     something
1282
     fallthrough (edge E0)
1283
   JOIN_BB:
1284
     some more
1285
 
1286
   We check that MIDDLE_BB contains only one store, that that store
1287
   doesn't trap (not via NOTRAP, but via checking if an access to the same
1288
   memory location dominates us) and that the store has a "simple" RHS.  */
1289
 
1290
static bool
1291
cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1292
                        edge e0, edge e1, struct pointer_set_t *nontrap)
1293
{
1294
  gimple assign = last_and_only_stmt (middle_bb);
1295
  tree lhs, rhs, name;
1296
  gimple newphi, new_stmt;
1297
  gimple_stmt_iterator gsi;
1298
  source_location locus;
1299
 
1300
  /* Check if middle_bb contains of only one store.  */
1301
  if (!assign
1302
      || !gimple_assign_single_p (assign))
1303
    return false;
1304
 
1305
  locus = gimple_location (assign);
1306
  lhs = gimple_assign_lhs (assign);
1307
  rhs = gimple_assign_rhs1 (assign);
1308
  if (TREE_CODE (lhs) != MEM_REF
1309
      || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1310
      || !is_gimple_reg_type (TREE_TYPE (lhs)))
1311
    return false;
1312
 
1313
  /* Prove that we can move the store down.  We could also check
1314
     TREE_THIS_NOTRAP here, but in that case we also could move stores,
1315
     whose value is not available readily, which we want to avoid.  */
1316
  if (!pointer_set_contains (nontrap, lhs))
1317
    return false;
1318
 
1319
  /* Now we've checked the constraints, so do the transformation:
1320
     1) Remove the single store.  */
1321
  gsi = gsi_for_stmt (assign);
1322
  unlink_stmt_vdef (assign);
1323
  gsi_remove (&gsi, true);
1324
  release_defs (assign);
1325
 
1326
  /* 2) Create a temporary where we can store the old content
1327
        of the memory touched by the store, if we need to.  */
1328
  if (!condstoretemp || TREE_TYPE (lhs) != TREE_TYPE (condstoretemp))
1329
    condstoretemp = create_tmp_reg (TREE_TYPE (lhs), "cstore");
1330
  add_referenced_var (condstoretemp);
1331
 
1332
  /* 3) Insert a load from the memory of the store to the temporary
1333
        on the edge which did not contain the store.  */
1334
  lhs = unshare_expr (lhs);
1335
  new_stmt = gimple_build_assign (condstoretemp, lhs);
1336
  name = make_ssa_name (condstoretemp, new_stmt);
1337
  gimple_assign_set_lhs (new_stmt, name);
1338
  gimple_set_location (new_stmt, locus);
1339
  gsi_insert_on_edge (e1, new_stmt);
1340
 
1341
  /* 4) Create a PHI node at the join block, with one argument
1342
        holding the old RHS, and the other holding the temporary
1343
        where we stored the old memory contents.  */
1344
  newphi = create_phi_node (condstoretemp, join_bb);
1345
  add_phi_arg (newphi, rhs, e0, locus);
1346
  add_phi_arg (newphi, name, e1, locus);
1347
 
1348
  lhs = unshare_expr (lhs);
1349
  new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1350
 
1351
  /* 5) Insert that PHI node.  */
1352
  gsi = gsi_after_labels (join_bb);
1353
  if (gsi_end_p (gsi))
1354
    {
1355
      gsi = gsi_last_bb (join_bb);
1356
      gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1357
    }
1358
  else
1359
    gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1360
 
1361
  return true;
1362
}
1363
 
1364
/* Do the main work of conditional store replacement.  */
1365
 
1366
static bool
1367
cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1368
                                  basic_block join_bb, gimple then_assign,
1369
                                  gimple else_assign)
1370
{
1371
  tree lhs_base, lhs, then_rhs, else_rhs;
1372
  source_location then_locus, else_locus;
1373
  gimple_stmt_iterator gsi;
1374
  gimple newphi, new_stmt;
1375
 
1376
  if (then_assign == NULL
1377
      || !gimple_assign_single_p (then_assign)
1378
      || gimple_clobber_p (then_assign)
1379
      || else_assign == NULL
1380
      || !gimple_assign_single_p (else_assign)
1381
      || gimple_clobber_p (else_assign))
1382
    return false;
1383
 
1384
  lhs = gimple_assign_lhs (then_assign);
1385
  if (!is_gimple_reg_type (TREE_TYPE (lhs))
1386
      || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1387
    return false;
1388
 
1389
  lhs_base = get_base_address (lhs);
1390
  if (lhs_base == NULL_TREE
1391
      || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1392
    return false;
1393
 
1394
  then_rhs = gimple_assign_rhs1 (then_assign);
1395
  else_rhs = gimple_assign_rhs1 (else_assign);
1396
  then_locus = gimple_location (then_assign);
1397
  else_locus = gimple_location (else_assign);
1398
 
1399
  /* Now we've checked the constraints, so do the transformation:
1400
     1) Remove the stores.  */
1401
  gsi = gsi_for_stmt (then_assign);
1402
  unlink_stmt_vdef (then_assign);
1403
  gsi_remove (&gsi, true);
1404
  release_defs (then_assign);
1405
 
1406
  gsi = gsi_for_stmt (else_assign);
1407
  unlink_stmt_vdef (else_assign);
1408
  gsi_remove (&gsi, true);
1409
  release_defs (else_assign);
1410
 
1411
  /* 2) Create a temporary where we can store the old content
1412
        of the memory touched by the store, if we need to.  */
1413
  if (!condstoretemp || TREE_TYPE (lhs) != TREE_TYPE (condstoretemp))
1414
    condstoretemp = create_tmp_reg (TREE_TYPE (lhs), "cstore");
1415
  add_referenced_var (condstoretemp);
1416
 
1417
  /* 3) Create a PHI node at the join block, with one argument
1418
        holding the old RHS, and the other holding the temporary
1419
        where we stored the old memory contents.  */
1420
  newphi = create_phi_node (condstoretemp, join_bb);
1421
  add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1422
  add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1423
 
1424
  new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1425
 
1426
  /* 4) Insert that PHI node.  */
1427
  gsi = gsi_after_labels (join_bb);
1428
  if (gsi_end_p (gsi))
1429
    {
1430
      gsi = gsi_last_bb (join_bb);
1431
      gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1432
    }
1433
  else
1434
    gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1435
 
1436
  return true;
1437
}
1438
 
1439
/* Conditional store replacement.  We already know
1440
   that the recognized pattern looks like so:
1441
 
1442
   split:
1443
     if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1444
   THEN_BB:
1445
     ...
1446
     X = Y;
1447
     ...
1448
     goto JOIN_BB;
1449
   ELSE_BB:
1450
     ...
1451
     X = Z;
1452
     ...
1453
     fallthrough (edge E0)
1454
   JOIN_BB:
1455
     some more
1456
 
1457
   We check that it is safe to sink the store to JOIN_BB by verifying that
1458
   there are no read-after-write or write-after-write dependencies in
1459
   THEN_BB and ELSE_BB.  */
1460
 
1461
static bool
1462
cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1463
                                basic_block join_bb)
1464
{
1465
  gimple then_assign = last_and_only_stmt (then_bb);
1466
  gimple else_assign = last_and_only_stmt (else_bb);
1467
  VEC (data_reference_p, heap) *then_datarefs, *else_datarefs;
1468
  VEC (ddr_p, heap) *then_ddrs, *else_ddrs;
1469
  gimple then_store, else_store;
1470
  bool found, ok = false, res;
1471
  struct data_dependence_relation *ddr;
1472
  data_reference_p then_dr, else_dr;
1473
  int i, j;
1474
  tree then_lhs, else_lhs;
1475
  VEC (gimple, heap) *then_stores, *else_stores;
1476
  basic_block blocks[3];
1477
 
1478
  if (MAX_STORES_TO_SINK == 0)
1479
    return false;
1480
 
1481
  /* Handle the case with single statement in THEN_BB and ELSE_BB.  */
1482
  if (then_assign && else_assign)
1483
    return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1484
                                             then_assign, else_assign);
1485
 
1486
  /* Find data references.  */
1487
  then_datarefs = VEC_alloc (data_reference_p, heap, 1);
1488
  else_datarefs = VEC_alloc (data_reference_p, heap, 1);
1489
  if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1490
        == chrec_dont_know)
1491
      || !VEC_length (data_reference_p, then_datarefs)
1492
      || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1493
        == chrec_dont_know)
1494
      || !VEC_length (data_reference_p, else_datarefs))
1495
    {
1496
      free_data_refs (then_datarefs);
1497
      free_data_refs (else_datarefs);
1498
      return false;
1499
    }
1500
 
1501
  /* Find pairs of stores with equal LHS.  */
1502
  then_stores = VEC_alloc (gimple, heap, 1);
1503
  else_stores = VEC_alloc (gimple, heap, 1);
1504
  FOR_EACH_VEC_ELT (data_reference_p, then_datarefs, i, then_dr)
1505
    {
1506
      if (DR_IS_READ (then_dr))
1507
        continue;
1508
 
1509
      then_store = DR_STMT (then_dr);
1510
      then_lhs = gimple_get_lhs (then_store);
1511
      found = false;
1512
 
1513
      FOR_EACH_VEC_ELT (data_reference_p, else_datarefs, j, else_dr)
1514
        {
1515
          if (DR_IS_READ (else_dr))
1516
            continue;
1517
 
1518
          else_store = DR_STMT (else_dr);
1519
          else_lhs = gimple_get_lhs (else_store);
1520
 
1521
          if (operand_equal_p (then_lhs, else_lhs, 0))
1522
            {
1523
              found = true;
1524
              break;
1525
            }
1526
        }
1527
 
1528
      if (!found)
1529
        continue;
1530
 
1531
      VEC_safe_push (gimple, heap, then_stores, then_store);
1532
      VEC_safe_push (gimple, heap, else_stores, else_store);
1533
    }
1534
 
1535
  /* No pairs of stores found.  */
1536
  if (!VEC_length (gimple, then_stores)
1537
      || VEC_length (gimple, then_stores) > (unsigned) MAX_STORES_TO_SINK)
1538
    {
1539
      free_data_refs (then_datarefs);
1540
      free_data_refs (else_datarefs);
1541
      VEC_free (gimple, heap, then_stores);
1542
      VEC_free (gimple, heap, else_stores);
1543
      return false;
1544
    }
1545
 
1546
  /* Compute and check data dependencies in both basic blocks.  */
1547
  then_ddrs = VEC_alloc (ddr_p, heap, 1);
1548
  else_ddrs = VEC_alloc (ddr_p, heap, 1);
1549
  compute_all_dependences (then_datarefs, &then_ddrs, NULL, false);
1550
  compute_all_dependences (else_datarefs, &else_ddrs, NULL, false);
1551
  blocks[0] = then_bb;
1552
  blocks[1] = else_bb;
1553
  blocks[2] = join_bb;
1554
  renumber_gimple_stmt_uids_in_blocks (blocks, 3);
1555
 
1556
  /* Check that there are no read-after-write or write-after-write dependencies
1557
     in THEN_BB.  */
1558
  FOR_EACH_VEC_ELT (ddr_p, then_ddrs, i, ddr)
1559
    {
1560
      struct data_reference *dra = DDR_A (ddr);
1561
      struct data_reference *drb = DDR_B (ddr);
1562
 
1563
      if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1564
          && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1565
               && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1566
              || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1567
                  && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1568
              || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1569
        {
1570
          free_dependence_relations (then_ddrs);
1571
          free_dependence_relations (else_ddrs);
1572
          free_data_refs (then_datarefs);
1573
          free_data_refs (else_datarefs);
1574
          VEC_free (gimple, heap, then_stores);
1575
          VEC_free (gimple, heap, else_stores);
1576
          return false;
1577
        }
1578
    }
1579
 
1580
  /* Check that there are no read-after-write or write-after-write dependencies
1581
     in ELSE_BB.  */
1582
  FOR_EACH_VEC_ELT (ddr_p, else_ddrs, i, ddr)
1583
    {
1584
      struct data_reference *dra = DDR_A (ddr);
1585
      struct data_reference *drb = DDR_B (ddr);
1586
 
1587
      if (DDR_ARE_DEPENDENT (ddr) != chrec_known
1588
          && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
1589
               && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
1590
              || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
1591
                  && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
1592
              || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
1593
        {
1594
          free_dependence_relations (then_ddrs);
1595
          free_dependence_relations (else_ddrs);
1596
          free_data_refs (then_datarefs);
1597
          free_data_refs (else_datarefs);
1598
          VEC_free (gimple, heap, then_stores);
1599
          VEC_free (gimple, heap, else_stores);
1600
          return false;
1601
        }
1602
    }
1603
 
1604
  /* Sink stores with same LHS.  */
1605
  FOR_EACH_VEC_ELT (gimple, then_stores, i, then_store)
1606
    {
1607
      else_store = VEC_index (gimple, else_stores, i);
1608
      res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1609
                                              then_store, else_store);
1610
      ok = ok || res;
1611
    }
1612
 
1613
  free_dependence_relations (then_ddrs);
1614
  free_dependence_relations (else_ddrs);
1615
  free_data_refs (then_datarefs);
1616
  free_data_refs (else_datarefs);
1617
  VEC_free (gimple, heap, then_stores);
1618
  VEC_free (gimple, heap, else_stores);
1619
 
1620
  return ok;
1621
}
1622
 
1623
/* Always do these optimizations if we have SSA
1624
   trees to work on.  */
1625
static bool
1626
gate_phiopt (void)
1627
{
1628
  return 1;
1629
}
1630
 
1631
struct gimple_opt_pass pass_phiopt =
1632
{
1633
 {
1634
  GIMPLE_PASS,
1635
  "phiopt",                             /* name */
1636
  gate_phiopt,                          /* gate */
1637
  tree_ssa_phiopt,                      /* execute */
1638
  NULL,                                 /* sub */
1639
  NULL,                                 /* next */
1640
  0,                                     /* static_pass_number */
1641
  TV_TREE_PHIOPT,                       /* tv_id */
1642
  PROP_cfg | PROP_ssa,                  /* properties_required */
1643
  0,                                     /* properties_provided */
1644
  0,                                     /* properties_destroyed */
1645
  0,                                     /* todo_flags_start */
1646
  TODO_ggc_collect
1647
    | TODO_verify_ssa
1648
    | TODO_verify_flow
1649
    | TODO_verify_stmts                 /* todo_flags_finish */
1650
 }
1651
};
1652
 
1653
static bool
1654
gate_cselim (void)
1655
{
1656
  return flag_tree_cselim;
1657
}
1658
 
1659
struct gimple_opt_pass pass_cselim =
1660
{
1661
 {
1662
  GIMPLE_PASS,
1663
  "cselim",                             /* name */
1664
  gate_cselim,                          /* gate */
1665
  tree_ssa_cs_elim,                     /* execute */
1666
  NULL,                                 /* sub */
1667
  NULL,                                 /* next */
1668
  0,                                     /* static_pass_number */
1669
  TV_TREE_PHIOPT,                       /* tv_id */
1670
  PROP_cfg | PROP_ssa,                  /* properties_required */
1671
  0,                                     /* properties_provided */
1672
  0,                                     /* properties_destroyed */
1673
  0,                                     /* todo_flags_start */
1674
  TODO_ggc_collect
1675
    | TODO_verify_ssa
1676
    | TODO_verify_flow
1677
    | TODO_verify_stmts                 /* todo_flags_finish */
1678
 }
1679
};

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.