OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [tree-ssa-threadedge.c] - Blame information for rev 806

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* SSA Jump Threading
2
   Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
3
   Free Software Foundation, Inc.
4
   Contributed by Jeff Law  <law@redhat.com>
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify
9
it under the terms of the GNU General Public License as published by
10
the Free Software Foundation; either version 3, or (at your option)
11
any later version.
12
 
13
GCC is distributed in the hope that it will be useful,
14
but WITHOUT ANY WARRANTY; without even the implied warranty of
15
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
GNU General Public License for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
#include "config.h"
23
#include "system.h"
24
#include "coretypes.h"
25
#include "tm.h"
26
#include "tree.h"
27
#include "flags.h"
28
#include "tm_p.h"
29
#include "basic-block.h"
30
#include "cfgloop.h"
31
#include "output.h"
32
#include "function.h"
33
#include "timevar.h"
34
#include "tree-dump.h"
35
#include "tree-flow.h"
36
#include "tree-pass.h"
37
#include "tree-ssa-propagate.h"
38
#include "langhooks.h"
39
#include "params.h"
40
 
41
/* To avoid code explosion due to jump threading, we limit the
42
   number of statements we are going to copy.  This variable
43
   holds the number of statements currently seen that we'll have
44
   to copy as part of the jump threading process.  */
45
static int stmt_count;
46
 
47
/* Array to record value-handles per SSA_NAME.  */
48
VEC(tree,heap) *ssa_name_values;
49
 
50
/* Set the value for the SSA name NAME to VALUE.  */
51
 
52
void
53
set_ssa_name_value (tree name, tree value)
54
{
55
  if (SSA_NAME_VERSION (name) >= VEC_length (tree, ssa_name_values))
56
    VEC_safe_grow_cleared (tree, heap, ssa_name_values,
57
                           SSA_NAME_VERSION (name) + 1);
58
  VEC_replace (tree, ssa_name_values, SSA_NAME_VERSION (name), value);
59
}
60
 
61
/* Initialize the per SSA_NAME value-handles array.  Returns it.  */
62
void
63
threadedge_initialize_values (void)
64
{
65
  gcc_assert (ssa_name_values == NULL);
66
  ssa_name_values = VEC_alloc(tree, heap, num_ssa_names);
67
}
68
 
69
/* Free the per SSA_NAME value-handle array.  */
70
void
71
threadedge_finalize_values (void)
72
{
73
  VEC_free(tree, heap, ssa_name_values);
74
}
75
 
76
/* Return TRUE if we may be able to thread an incoming edge into
77
   BB to an outgoing edge from BB.  Return FALSE otherwise.  */
78
 
79
bool
80
potentially_threadable_block (basic_block bb)
81
{
82
  gimple_stmt_iterator gsi;
83
 
84
  /* If BB has a single successor or a single predecessor, then
85
     there is no threading opportunity.  */
86
  if (single_succ_p (bb) || single_pred_p (bb))
87
    return false;
88
 
89
  /* If BB does not end with a conditional, switch or computed goto,
90
     then there is no threading opportunity.  */
91
  gsi = gsi_last_bb (bb);
92
  if (gsi_end_p (gsi)
93
      || ! gsi_stmt (gsi)
94
      || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
95
          && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
96
          && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
97
    return false;
98
 
99
  return true;
100
}
101
 
102
/* Return the LHS of any ASSERT_EXPR where OP appears as the first
103
   argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
104
   BB.  If no such ASSERT_EXPR is found, return OP.  */
105
 
106
static tree
107
lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
108
{
109
  imm_use_iterator imm_iter;
110
  gimple use_stmt;
111
  use_operand_p use_p;
112
 
113
  FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
114
    {
115
      use_stmt = USE_STMT (use_p);
116
      if (use_stmt != stmt
117
          && gimple_assign_single_p (use_stmt)
118
          && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
119
          && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
120
          && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
121
        {
122
          return gimple_assign_lhs (use_stmt);
123
        }
124
    }
125
  return op;
126
}
127
 
128
/* We record temporary equivalences created by PHI nodes or
129
   statements within the target block.  Doing so allows us to
130
   identify more jump threading opportunities, even in blocks
131
   with side effects.
132
 
133
   We keep track of those temporary equivalences in a stack
134
   structure so that we can unwind them when we're done processing
135
   a particular edge.  This routine handles unwinding the data
136
   structures.  */
137
 
138
static void
139
remove_temporary_equivalences (VEC(tree, heap) **stack)
140
{
141
  while (VEC_length (tree, *stack) > 0)
142
    {
143
      tree prev_value, dest;
144
 
145
      dest = VEC_pop (tree, *stack);
146
 
147
      /* A NULL value indicates we should stop unwinding, otherwise
148
         pop off the next entry as they're recorded in pairs.  */
149
      if (dest == NULL)
150
        break;
151
 
152
      prev_value = VEC_pop (tree, *stack);
153
      set_ssa_name_value (dest, prev_value);
154
    }
155
}
156
 
157
/* Record a temporary equivalence, saving enough information so that
158
   we can restore the state of recorded equivalences when we're
159
   done processing the current edge.  */
160
 
161
static void
162
record_temporary_equivalence (tree x, tree y, VEC(tree, heap) **stack)
163
{
164
  tree prev_x = SSA_NAME_VALUE (x);
165
 
166
  if (TREE_CODE (y) == SSA_NAME)
167
    {
168
      tree tmp = SSA_NAME_VALUE (y);
169
      y = tmp ? tmp : y;
170
    }
171
 
172
  set_ssa_name_value (x, y);
173
  VEC_reserve (tree, heap, *stack, 2);
174
  VEC_quick_push (tree, *stack, prev_x);
175
  VEC_quick_push (tree, *stack, x);
176
}
177
 
178
/* Record temporary equivalences created by PHIs at the target of the
179
   edge E.  Record unwind information for the equivalences onto STACK.
180
 
181
   If a PHI which prevents threading is encountered, then return FALSE
182
   indicating we should not thread this edge, else return TRUE.  */
183
 
184
static bool
185
record_temporary_equivalences_from_phis (edge e, VEC(tree, heap) **stack)
186
{
187
  gimple_stmt_iterator gsi;
188
 
189
  /* Each PHI creates a temporary equivalence, record them.
190
     These are context sensitive equivalences and will be removed
191
     later.  */
192
  for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
193
    {
194
      gimple phi = gsi_stmt (gsi);
195
      tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
196
      tree dst = gimple_phi_result (phi);
197
 
198
      /* If the desired argument is not the same as this PHI's result
199
         and it is set by a PHI in E->dest, then we can not thread
200
         through E->dest.  */
201
      if (src != dst
202
          && TREE_CODE (src) == SSA_NAME
203
          && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
204
          && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
205
        return false;
206
 
207
      /* We consider any non-virtual PHI as a statement since it
208
         count result in a constant assignment or copy operation.  */
209
      if (is_gimple_reg (dst))
210
        stmt_count++;
211
 
212
      record_temporary_equivalence (dst, src, stack);
213
    }
214
  return true;
215
}
216
 
217
/* Fold the RHS of an assignment statement and return it as a tree.
218
   May return NULL_TREE if no simplification is possible.  */
219
 
220
static tree
221
fold_assignment_stmt (gimple stmt)
222
{
223
  enum tree_code subcode = gimple_assign_rhs_code (stmt);
224
 
225
  switch (get_gimple_rhs_class (subcode))
226
    {
227
    case GIMPLE_SINGLE_RHS:
228
      return fold (gimple_assign_rhs1 (stmt));
229
 
230
    case GIMPLE_UNARY_RHS:
231
      {
232
        tree lhs = gimple_assign_lhs (stmt);
233
        tree op0 = gimple_assign_rhs1 (stmt);
234
        return fold_unary (subcode, TREE_TYPE (lhs), op0);
235
      }
236
 
237
    case GIMPLE_BINARY_RHS:
238
      {
239
        tree lhs = gimple_assign_lhs (stmt);
240
        tree op0 = gimple_assign_rhs1 (stmt);
241
        tree op1 = gimple_assign_rhs2 (stmt);
242
        return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
243
      }
244
 
245
    case GIMPLE_TERNARY_RHS:
246
      {
247
        tree lhs = gimple_assign_lhs (stmt);
248
        tree op0 = gimple_assign_rhs1 (stmt);
249
        tree op1 = gimple_assign_rhs2 (stmt);
250
        tree op2 = gimple_assign_rhs3 (stmt);
251
 
252
        /* Sadly, we have to handle conditional assignments specially
253
           here, because fold expects all the operands of an expression
254
           to be folded before the expression itself is folded, but we
255
           can't just substitute the folded condition here.  */
256
        if (gimple_assign_rhs_code (stmt) == COND_EXPR)
257
          op0 = fold (op0);
258
 
259
        return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
260
      }
261
 
262
    default:
263
      gcc_unreachable ();
264
    }
265
}
266
 
267
/* Try to simplify each statement in E->dest, ultimately leading to
268
   a simplification of the COND_EXPR at the end of E->dest.
269
 
270
   Record unwind information for temporary equivalences onto STACK.
271
 
272
   Use SIMPLIFY (a pointer to a callback function) to further simplify
273
   statements using pass specific information.
274
 
275
   We might consider marking just those statements which ultimately
276
   feed the COND_EXPR.  It's not clear if the overhead of bookkeeping
277
   would be recovered by trying to simplify fewer statements.
278
 
279
   If we are able to simplify a statement into the form
280
   SSA_NAME = (SSA_NAME | gimple invariant), then we can record
281
   a context sensitive equivalence which may help us simplify
282
   later statements in E->dest.  */
283
 
284
static gimple
285
record_temporary_equivalences_from_stmts_at_dest (edge e,
286
                                                  VEC(tree, heap) **stack,
287
                                                  tree (*simplify) (gimple,
288
                                                                    gimple))
289
{
290
  gimple stmt = NULL;
291
  gimple_stmt_iterator gsi;
292
  int max_stmt_count;
293
 
294
  max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
295
 
296
  /* Walk through each statement in the block recording equivalences
297
     we discover.  Note any equivalences we discover are context
298
     sensitive (ie, are dependent on traversing E) and must be unwound
299
     when we're finished processing E.  */
300
  for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
301
    {
302
      tree cached_lhs = NULL;
303
 
304
      stmt = gsi_stmt (gsi);
305
 
306
      /* Ignore empty statements and labels.  */
307
      if (gimple_code (stmt) == GIMPLE_NOP
308
          || gimple_code (stmt) == GIMPLE_LABEL
309
          || is_gimple_debug (stmt))
310
        continue;
311
 
312
      /* If the statement has volatile operands, then we assume we
313
         can not thread through this block.  This is overly
314
         conservative in some ways.  */
315
      if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
316
        return NULL;
317
 
318
      /* If duplicating this block is going to cause too much code
319
         expansion, then do not thread through this block.  */
320
      stmt_count++;
321
      if (stmt_count > max_stmt_count)
322
        return NULL;
323
 
324
      /* If this is not a statement that sets an SSA_NAME to a new
325
         value, then do not try to simplify this statement as it will
326
         not simplify in any way that is helpful for jump threading.  */
327
      if ((gimple_code (stmt) != GIMPLE_ASSIGN
328
           || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
329
          && (gimple_code (stmt) != GIMPLE_CALL
330
              || gimple_call_lhs (stmt) == NULL_TREE
331
              || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
332
        continue;
333
 
334
      /* The result of __builtin_object_size depends on all the arguments
335
         of a phi node. Temporarily using only one edge produces invalid
336
         results. For example
337
 
338
         if (x < 6)
339
           goto l;
340
         else
341
           goto l;
342
 
343
         l:
344
         r = PHI <&w[2].a[1](2), &a.a[6](3)>
345
         __builtin_object_size (r, 0)
346
 
347
         The result of __builtin_object_size is defined to be the maximum of
348
         remaining bytes. If we use only one edge on the phi, the result will
349
         change to be the remaining bytes for the corresponding phi argument.
350
 
351
         Similarly for __builtin_constant_p:
352
 
353
         r = PHI <1(2), 2(3)>
354
         __builtin_constant_p (r)
355
 
356
         Both PHI arguments are constant, but x ? 1 : 2 is still not
357
         constant.  */
358
 
359
      if (is_gimple_call (stmt))
360
        {
361
          tree fndecl = gimple_call_fndecl (stmt);
362
          if (fndecl
363
              && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
364
                  || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
365
            continue;
366
        }
367
 
368
      /* At this point we have a statement which assigns an RHS to an
369
         SSA_VAR on the LHS.  We want to try and simplify this statement
370
         to expose more context sensitive equivalences which in turn may
371
         allow us to simplify the condition at the end of the loop.
372
 
373
         Handle simple copy operations as well as implied copies from
374
         ASSERT_EXPRs.  */
375
      if (gimple_assign_single_p (stmt)
376
          && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
377
        cached_lhs = gimple_assign_rhs1 (stmt);
378
      else if (gimple_assign_single_p (stmt)
379
               && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
380
        cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
381
      else
382
        {
383
          /* A statement that is not a trivial copy or ASSERT_EXPR.
384
             We're going to temporarily copy propagate the operands
385
             and see if that allows us to simplify this statement.  */
386
          tree *copy;
387
          ssa_op_iter iter;
388
          use_operand_p use_p;
389
          unsigned int num, i = 0;
390
 
391
          num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
392
          copy = XCNEWVEC (tree, num);
393
 
394
          /* Make a copy of the uses & vuses into USES_COPY, then cprop into
395
             the operands.  */
396
          FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
397
            {
398
              tree tmp = NULL;
399
              tree use = USE_FROM_PTR (use_p);
400
 
401
              copy[i++] = use;
402
              if (TREE_CODE (use) == SSA_NAME)
403
                tmp = SSA_NAME_VALUE (use);
404
              if (tmp)
405
                SET_USE (use_p, tmp);
406
            }
407
 
408
          /* Try to fold/lookup the new expression.  Inserting the
409
             expression into the hash table is unlikely to help.  */
410
          if (is_gimple_call (stmt))
411
            cached_lhs = fold_call_stmt (stmt, false);
412
          else
413
            cached_lhs = fold_assignment_stmt (stmt);
414
 
415
          if (!cached_lhs
416
              || (TREE_CODE (cached_lhs) != SSA_NAME
417
                  && !is_gimple_min_invariant (cached_lhs)))
418
            cached_lhs = (*simplify) (stmt, stmt);
419
 
420
          /* Restore the statement's original uses/defs.  */
421
          i = 0;
422
          FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
423
            SET_USE (use_p, copy[i++]);
424
 
425
          free (copy);
426
        }
427
 
428
      /* Record the context sensitive equivalence if we were able
429
         to simplify this statement.  */
430
      if (cached_lhs
431
          && (TREE_CODE (cached_lhs) == SSA_NAME
432
              || is_gimple_min_invariant (cached_lhs)))
433
        record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
434
    }
435
  return stmt;
436
}
437
 
438
/* Simplify the control statement at the end of the block E->dest.
439
 
440
   To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
441
   is available to use/clobber in DUMMY_COND.
442
 
443
   Use SIMPLIFY (a pointer to a callback function) to further simplify
444
   a condition using pass specific information.
445
 
446
   Return the simplified condition or NULL if simplification could
447
   not be performed.  */
448
 
449
static tree
450
simplify_control_stmt_condition (edge e,
451
                                 gimple stmt,
452
                                 gimple dummy_cond,
453
                                 tree (*simplify) (gimple, gimple),
454
                                 bool handle_dominating_asserts)
455
{
456
  tree cond, cached_lhs;
457
  enum gimple_code code = gimple_code (stmt);
458
 
459
  /* For comparisons, we have to update both operands, then try
460
     to simplify the comparison.  */
461
  if (code == GIMPLE_COND)
462
    {
463
      tree op0, op1;
464
      enum tree_code cond_code;
465
 
466
      op0 = gimple_cond_lhs (stmt);
467
      op1 = gimple_cond_rhs (stmt);
468
      cond_code = gimple_cond_code (stmt);
469
 
470
      /* Get the current value of both operands.  */
471
      if (TREE_CODE (op0) == SSA_NAME)
472
        {
473
          tree tmp = SSA_NAME_VALUE (op0);
474
          if (tmp)
475
            op0 = tmp;
476
        }
477
 
478
      if (TREE_CODE (op1) == SSA_NAME)
479
        {
480
          tree tmp = SSA_NAME_VALUE (op1);
481
          if (tmp)
482
            op1 = tmp;
483
        }
484
 
485
      if (handle_dominating_asserts)
486
        {
487
          /* Now see if the operand was consumed by an ASSERT_EXPR
488
             which dominates E->src.  If so, we want to replace the
489
             operand with the LHS of the ASSERT_EXPR.  */
490
          if (TREE_CODE (op0) == SSA_NAME)
491
            op0 = lhs_of_dominating_assert (op0, e->src, stmt);
492
 
493
          if (TREE_CODE (op1) == SSA_NAME)
494
            op1 = lhs_of_dominating_assert (op1, e->src, stmt);
495
        }
496
 
497
      /* We may need to canonicalize the comparison.  For
498
         example, op0 might be a constant while op1 is an
499
         SSA_NAME.  Failure to canonicalize will cause us to
500
         miss threading opportunities.  */
501
      if (tree_swap_operands_p (op0, op1, false))
502
        {
503
          tree tmp;
504
          cond_code = swap_tree_comparison (cond_code);
505
          tmp = op0;
506
          op0 = op1;
507
          op1 = tmp;
508
        }
509
 
510
      /* Stuff the operator and operands into our dummy conditional
511
         expression.  */
512
      gimple_cond_set_code (dummy_cond, cond_code);
513
      gimple_cond_set_lhs (dummy_cond, op0);
514
      gimple_cond_set_rhs (dummy_cond, op1);
515
 
516
      /* We absolutely do not care about any type conversions
517
         we only care about a zero/nonzero value.  */
518
      fold_defer_overflow_warnings ();
519
 
520
      cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
521
      if (cached_lhs)
522
        while (CONVERT_EXPR_P (cached_lhs))
523
          cached_lhs = TREE_OPERAND (cached_lhs, 0);
524
 
525
      fold_undefer_overflow_warnings ((cached_lhs
526
                                       && is_gimple_min_invariant (cached_lhs)),
527
                                      stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
528
 
529
      /* If we have not simplified the condition down to an invariant,
530
         then use the pass specific callback to simplify the condition.  */
531
      if (!cached_lhs
532
          || !is_gimple_min_invariant (cached_lhs))
533
        cached_lhs = (*simplify) (dummy_cond, stmt);
534
 
535
      return cached_lhs;
536
    }
537
 
538
  if (code == GIMPLE_SWITCH)
539
    cond = gimple_switch_index (stmt);
540
  else if (code == GIMPLE_GOTO)
541
    cond = gimple_goto_dest (stmt);
542
  else
543
    gcc_unreachable ();
544
 
545
  /* We can have conditionals which just test the state of a variable
546
     rather than use a relational operator.  These are simpler to handle.  */
547
  if (TREE_CODE (cond) == SSA_NAME)
548
    {
549
      cached_lhs = cond;
550
 
551
      /* Get the variable's current value from the equivalence chains.
552
 
553
         It is possible to get loops in the SSA_NAME_VALUE chains
554
         (consider threading the backedge of a loop where we have
555
         a loop invariant SSA_NAME used in the condition.  */
556
      if (cached_lhs
557
          && TREE_CODE (cached_lhs) == SSA_NAME
558
          && SSA_NAME_VALUE (cached_lhs))
559
        cached_lhs = SSA_NAME_VALUE (cached_lhs);
560
 
561
      /* If we're dominated by a suitable ASSERT_EXPR, then
562
         update CACHED_LHS appropriately.  */
563
      if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
564
        cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
565
 
566
      /* If we haven't simplified to an invariant yet, then use the
567
         pass specific callback to try and simplify it further.  */
568
      if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
569
        cached_lhs = (*simplify) (stmt, stmt);
570
    }
571
  else
572
    cached_lhs = NULL;
573
 
574
  return cached_lhs;
575
}
576
 
577
/* TAKEN_EDGE represents the an edge taken as a result of jump threading.
578
   See if we can thread around TAKEN_EDGE->dest as well.  If so, return
579
   the edge out of TAKEN_EDGE->dest that we can statically compute will be
580
   traversed.
581
 
582
   We are much more restrictive as to the contents of TAKEN_EDGE->dest
583
   as the path isolation code in tree-ssa-threadupdate.c isn't prepared
584
   to handle copying intermediate blocks on a threaded path.
585
 
586
   Long term a more consistent and structured approach to path isolation
587
   would be a huge help.   */
588
static edge
589
thread_around_empty_block (edge taken_edge,
590
                           gimple dummy_cond,
591
                           bool handle_dominating_asserts,
592
                           tree (*simplify) (gimple, gimple),
593
                           bitmap visited)
594
{
595
  basic_block bb = taken_edge->dest;
596
  gimple_stmt_iterator gsi;
597
  gimple stmt;
598
  tree cond;
599
 
600
  /* This block must have a single predecessor (E->dest).  */
601
  if (!single_pred_p (bb))
602
    return NULL;
603
 
604
  /* This block must have more than one successor.  */
605
  if (single_succ_p (bb))
606
    return NULL;
607
 
608
  /* This block can have no PHI nodes.  This is overly conservative.  */
609
  if (!gsi_end_p (gsi_start_phis (bb)))
610
    return NULL;
611
 
612
  /* Skip over DEBUG statements at the start of the block.  */
613
  gsi = gsi_start_nondebug_bb (bb);
614
 
615
  if (gsi_end_p (gsi))
616
    return NULL;
617
 
618
  /* This block can have no statements other than its control altering
619
     statement.  This is overly conservative.  */
620
  stmt = gsi_stmt (gsi);
621
  if (gimple_code (stmt) != GIMPLE_COND
622
      && gimple_code (stmt) != GIMPLE_GOTO
623
      && gimple_code (stmt) != GIMPLE_SWITCH)
624
    return NULL;
625
 
626
  /* Extract and simplify the condition.  */
627
  cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
628
                                          simplify, handle_dominating_asserts);
629
 
630
  /* If the condition can be statically computed and we have not already
631
     visited the destination edge, then add the taken edge to our thread
632
     path.  */
633
  if (cond && is_gimple_min_invariant (cond))
634
    {
635
      edge taken_edge = find_taken_edge (bb, cond);
636
 
637
      if (bitmap_bit_p (visited, taken_edge->dest->index))
638
        return NULL;
639
      bitmap_set_bit (visited, taken_edge->dest->index);
640
      return taken_edge;
641
    }
642
 
643
  return NULL;
644
}
645
 
646
/* E1 and E2 are edges into the same basic block.  Return TRUE if the
647
   PHI arguments associated with those edges are equal or there are no
648
   PHI arguments, otherwise return FALSE.  */
649
 
650
static bool
651
phi_args_equal_on_edges (edge e1, edge e2)
652
{
653
  gimple_stmt_iterator gsi;
654
  int indx1 = e1->dest_idx;
655
  int indx2 = e2->dest_idx;
656
 
657
  for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
658
    {
659
      gimple phi = gsi_stmt (gsi);
660
 
661
      if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
662
                            gimple_phi_arg_def (phi, indx2), 0))
663
        return false;
664
    }
665
  return true;
666
}
667
 
668
/* We are exiting E->src, see if E->dest ends with a conditional
669
   jump which has a known value when reached via E.
670
 
671
   Special care is necessary if E is a back edge in the CFG as we
672
   may have already recorded equivalences for E->dest into our
673
   various tables, including the result of the conditional at
674
   the end of E->dest.  Threading opportunities are severely
675
   limited in that case to avoid short-circuiting the loop
676
   incorrectly.
677
 
678
   Note it is quite common for the first block inside a loop to
679
   end with a conditional which is either always true or always
680
   false when reached via the loop backedge.  Thus we do not want
681
   to blindly disable threading across a loop backedge.
682
 
683
   DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
684
   to avoid allocating memory.
685
 
686
   HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
687
   the simplified condition with left-hand sides of ASSERT_EXPRs they are
688
   used in.
689
 
690
   STACK is used to undo temporary equivalences created during the walk of
691
   E->dest.
692
 
693
   SIMPLIFY is a pass-specific function used to simplify statements.  */
694
 
695
void
696
thread_across_edge (gimple dummy_cond,
697
                    edge e,
698
                    bool handle_dominating_asserts,
699
                    VEC(tree, heap) **stack,
700
                    tree (*simplify) (gimple, gimple))
701
{
702
  gimple stmt;
703
 
704
  /* If E is a backedge, then we want to verify that the COND_EXPR,
705
     SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
706
     by any statements in e->dest.  If it is affected, then it is not
707
     safe to thread this edge.  */
708
  if (e->flags & EDGE_DFS_BACK)
709
    {
710
      ssa_op_iter iter;
711
      use_operand_p use_p;
712
      gimple last = gsi_stmt (gsi_last_bb (e->dest));
713
 
714
      FOR_EACH_SSA_USE_OPERAND (use_p, last, iter, SSA_OP_USE | SSA_OP_VUSE)
715
        {
716
          tree use = USE_FROM_PTR (use_p);
717
 
718
          if (TREE_CODE (use) == SSA_NAME
719
              && gimple_code (SSA_NAME_DEF_STMT (use)) != GIMPLE_PHI
720
              && gimple_bb (SSA_NAME_DEF_STMT (use)) == e->dest)
721
            goto fail;
722
        }
723
    }
724
 
725
  stmt_count = 0;
726
 
727
  /* PHIs create temporary equivalences.  */
728
  if (!record_temporary_equivalences_from_phis (e, stack))
729
    goto fail;
730
 
731
  /* Now walk each statement recording any context sensitive
732
     temporary equivalences we can detect.  */
733
  stmt = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify);
734
  if (!stmt)
735
    goto fail;
736
 
737
  /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
738
     will be taken.  */
739
  if (gimple_code (stmt) == GIMPLE_COND
740
      || gimple_code (stmt) == GIMPLE_GOTO
741
      || gimple_code (stmt) == GIMPLE_SWITCH)
742
    {
743
      tree cond;
744
 
745
      /* Extract and simplify the condition.  */
746
      cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
747
                                              handle_dominating_asserts);
748
 
749
      if (cond && is_gimple_min_invariant (cond))
750
        {
751
          edge taken_edge = find_taken_edge (e->dest, cond);
752
          basic_block dest = (taken_edge ? taken_edge->dest : NULL);
753
          bitmap visited;
754
          edge e2;
755
 
756
          if (dest == e->dest)
757
            goto fail;
758
 
759
          /* DEST could be null for a computed jump to an absolute
760
             address.  If DEST is not null, then see if we can thread
761
             through it as well, this helps capture secondary effects
762
             of threading without having to re-run DOM or VRP.  */
763
          if (dest)
764
            {
765
              /* We don't want to thread back to a block we have already
766
                 visited.  This may be overly conservative.  */
767
              visited = BITMAP_ALLOC (NULL);
768
              bitmap_set_bit (visited, dest->index);
769
              bitmap_set_bit (visited, e->dest->index);
770
              do
771
                {
772
                  e2 = thread_around_empty_block (taken_edge,
773
                                                  dummy_cond,
774
                                                  handle_dominating_asserts,
775
                                                  simplify,
776
                                                  visited);
777
                  if (e2)
778
                    taken_edge = e2;
779
                }
780
              while (e2);
781
              BITMAP_FREE (visited);
782
            }
783
 
784
          remove_temporary_equivalences (stack);
785
          register_jump_thread (e, taken_edge, NULL);
786
          return;
787
        }
788
    }
789
 
790
 /* We were unable to determine what out edge from E->dest is taken.  However,
791
    we might still be able to thread through successors of E->dest.  This
792
    often occurs when E->dest is a joiner block which then fans back out
793
    based on redundant tests.
794
 
795
    If so, we'll copy E->dest and redirect the appropriate predecessor to
796
    the copy.  Within the copy of E->dest, we'll thread one or more edges
797
    to points deeper in the CFG.
798
 
799
    This is a stopgap until we have a more structured approach to path
800
    isolation.  */
801
  {
802
    edge e2, e3, taken_edge;
803
    edge_iterator ei;
804
    bool found = false;
805
    bitmap visited = BITMAP_ALLOC (NULL);
806
 
807
    /* Look at each successor of E->dest to see if we can thread through it.  */
808
    FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
809
      {
810
        /* Avoid threading to any block we have already visited.  */
811
        bitmap_clear (visited);
812
        bitmap_set_bit (visited, taken_edge->dest->index);
813
        bitmap_set_bit (visited, e->dest->index);
814
 
815
        /* Record whether or not we were able to thread through a successor
816
           of E->dest.  */
817
        found = false;
818
        e3 = taken_edge;
819
        do
820
          {
821
            e2 = thread_around_empty_block (e3,
822
                                            dummy_cond,
823
                                            handle_dominating_asserts,
824
                                            simplify,
825
                                            visited);
826
            if (e2)
827
              {
828
                e3 = e2;
829
                found = true;
830
              }
831
          }
832
        while (e2);
833
 
834
        /* If we were able to thread through a successor of E->dest, then
835
           record the jump threading opportunity.  */
836
        if (found)
837
          {
838
            edge tmp;
839
            /* If there is already an edge from the block to be duplicated
840
               (E2->src) to the final target (E3->dest), then make sure that
841
               the PHI args associated with the edges E2 and E3 are the
842
               same.  */
843
            tmp = find_edge (taken_edge->src, e3->dest);
844
            if (!tmp || phi_args_equal_on_edges (tmp, e3))
845
              register_jump_thread (e, taken_edge, e3);
846
          }
847
 
848
      }
849
    BITMAP_FREE (visited);
850
  }
851
 
852
 fail:
853
  remove_temporary_equivalences (stack);
854
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.