OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.2.2/] [gcc/] [tree-ssa-threadupdate.c] - Blame information for rev 298

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Thread edges through blocks and update the control flow and SSA graphs.
2
   Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
3
 
4
This file is part of GCC.
5
 
6
GCC is free software; you can redistribute it and/or modify
7
it under the terms of the GNU General Public License as published by
8
the Free Software Foundation; either version 3, or (at your option)
9
any later version.
10
 
11
GCC is distributed in the hope that it will be useful,
12
but WITHOUT ANY WARRANTY; without even the implied warranty of
13
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
GNU General Public License for more details.
15
 
16
You should have received a copy of the GNU General Public License
17
along with GCC; see the file COPYING3.  If not see
18
<http://www.gnu.org/licenses/>.  */
19
 
20
#include "config.h"
21
#include "system.h"
22
#include "coretypes.h"
23
#include "tm.h"
24
#include "tree.h"
25
#include "flags.h"
26
#include "rtl.h"
27
#include "tm_p.h"
28
#include "ggc.h"
29
#include "basic-block.h"
30
#include "output.h"
31
#include "expr.h"
32
#include "function.h"
33
#include "diagnostic.h"
34
#include "tree-flow.h"
35
#include "tree-dump.h"
36
#include "tree-pass.h"
37
#include "cfgloop.h"
38
 
39
/* Given a block B, update the CFG and SSA graph to reflect redirecting
40
   one or more in-edges to B to instead reach the destination of an
41
   out-edge from B while preserving any side effects in B.
42
 
43
   i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
44
   side effects of executing B.
45
 
46
     1. Make a copy of B (including its outgoing edges and statements).  Call
47
        the copy B'.  Note B' has no incoming edges or PHIs at this time.
48
 
49
     2. Remove the control statement at the end of B' and all outgoing edges
50
        except B'->C.
51
 
52
     3. Add a new argument to each PHI in C with the same value as the existing
53
        argument associated with edge B->C.  Associate the new PHI arguments
54
        with the edge B'->C.
55
 
56
     4. For each PHI in B, find or create a PHI in B' with an identical
57
        PHI_RESULT.  Add an argument to the PHI in B' which has the same
58
        value as the PHI in B associated with the edge A->B.  Associate
59
        the new argument in the PHI in B' with the edge A->B.
60
 
61
     5. Change the edge A->B to A->B'.
62
 
63
        5a. This automatically deletes any PHI arguments associated with the
64
            edge A->B in B.
65
 
66
        5b. This automatically associates each new argument added in step 4
67
            with the edge A->B'.
68
 
69
     6. Repeat for other incoming edges into B.
70
 
71
     7. Put the duplicated resources in B and all the B' blocks into SSA form.
72
 
73
   Note that block duplication can be minimized by first collecting the
74
   the set of unique destination blocks that the incoming edges should
75
   be threaded to.  Block duplication can be further minimized by using
76
   B instead of creating B' for one destination if all edges into B are
77
   going to be threaded to a successor of B.
78
 
79
   We further reduce the number of edges and statements we create by
80
   not copying all the outgoing edges and the control statement in
81
   step #1.  We instead create a template block without the outgoing
82
   edges and duplicate the template.  */
83
 
84
 
85
/* Steps #5 and #6 of the above algorithm are best implemented by walking
86
   all the incoming edges which thread to the same destination edge at
87
   the same time.  That avoids lots of table lookups to get information
88
   for the destination edge.
89
 
90
   To realize that implementation we create a list of incoming edges
91
   which thread to the same outgoing edge.  Thus to implement steps
92
   #5 and #6 we traverse our hash table of outgoing edge information.
93
   For each entry we walk the list of incoming edges which thread to
94
   the current outgoing edge.  */
95
 
96
struct el
97
{
98
  edge e;
99
  struct el *next;
100
};
101
 
102
/* Main data structure recording information regarding B's duplicate
103
   blocks.  */
104
 
105
/* We need to efficiently record the unique thread destinations of this
106
   block and specific information associated with those destinations.  We
107
   may have many incoming edges threaded to the same outgoing edge.  This
108
   can be naturally implemented with a hash table.  */
109
 
110
struct redirection_data
111
{
112
  /* A duplicate of B with the trailing control statement removed and which
113
     targets a single successor of B.  */
114
  basic_block dup_block;
115
 
116
  /* An outgoing edge from B.  DUP_BLOCK will have OUTGOING_EDGE->dest as
117
     its single successor.  */
118
  edge outgoing_edge;
119
 
120
  /* A list of incoming edges which we want to thread to
121
     OUTGOING_EDGE->dest.  */
122
  struct el *incoming_edges;
123
 
124
  /* Flag indicating whether or not we should create a duplicate block
125
     for this thread destination.  This is only true if we are threading
126
     all incoming edges and thus are using BB itself as a duplicate block.  */
127
  bool do_not_duplicate;
128
};
129
 
130
/* Main data structure to hold information for duplicates of BB.  */
131
static htab_t redirection_data;
132
 
133
/* Data structure of information to pass to hash table traversal routines.  */
134
struct local_info
135
{
136
  /* The current block we are working on.  */
137
  basic_block bb;
138
 
139
  /* A template copy of BB with no outgoing edges or control statement that
140
     we use for creating copies.  */
141
  basic_block template_block;
142
 
143
  /* TRUE if we thread one or more jumps, FALSE otherwise.  */
144
  bool jumps_threaded;
145
};
146
 
147
/* Passes which use the jump threading code register jump threading
148
   opportunities as they are discovered.  We keep the registered
149
   jump threading opportunities in this vector as edge pairs
150
   (original_edge, target_edge).  */
151
DEF_VEC_ALLOC_P(edge,heap);
152
static VEC(edge,heap) *threaded_edges;
153
 
154
 
155
/* Jump threading statistics.  */
156
 
157
struct thread_stats_d
158
{
159
  unsigned long num_threaded_edges;
160
};
161
 
162
struct thread_stats_d thread_stats;
163
 
164
 
165
/* Remove the last statement in block BB if it is a control statement
166
   Also remove all outgoing edges except the edge which reaches DEST_BB.
167
   If DEST_BB is NULL, then remove all outgoing edges.  */
168
 
169
static void
170
remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
171
{
172
  block_stmt_iterator bsi;
173
  edge e;
174
  edge_iterator ei;
175
 
176
  bsi = bsi_last (bb);
177
 
178
  /* If the duplicate ends with a control statement, then remove it.
179
 
180
     Note that if we are duplicating the template block rather than the
181
     original basic block, then the duplicate might not have any real
182
     statements in it.  */
183
  if (!bsi_end_p (bsi)
184
      && bsi_stmt (bsi)
185
      && (TREE_CODE (bsi_stmt (bsi)) == COND_EXPR
186
          || TREE_CODE (bsi_stmt (bsi)) == GOTO_EXPR
187
          || TREE_CODE (bsi_stmt (bsi)) == SWITCH_EXPR))
188
    bsi_remove (&bsi, true);
189
 
190
  for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
191
    {
192
      if (e->dest != dest_bb)
193
        remove_edge (e);
194
      else
195
        ei_next (&ei);
196
    }
197
}
198
 
199
/* Create a duplicate of BB which only reaches the destination of the edge
200
   stored in RD.  Record the duplicate block in RD.  */
201
 
202
static void
203
create_block_for_threading (basic_block bb, struct redirection_data *rd)
204
{
205
  /* We can use the generic block duplication code and simply remove
206
     the stuff we do not need.  */
207
  rd->dup_block = duplicate_block (bb, NULL, NULL);
208
 
209
  /* Zero out the profile, since the block is unreachable for now.  */
210
  rd->dup_block->frequency = 0;
211
  rd->dup_block->count = 0;
212
 
213
  /* The call to duplicate_block will copy everything, including the
214
     useless COND_EXPR or SWITCH_EXPR at the end of BB.  We just remove
215
     the useless COND_EXPR or SWITCH_EXPR here rather than having a
216
     specialized block copier.  We also remove all outgoing edges
217
     from the duplicate block.  The appropriate edge will be created
218
     later.  */
219
  remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
220
}
221
 
222
/* Hashing and equality routines for our hash table.  */
223
static hashval_t
224
redirection_data_hash (const void *p)
225
{
226
  edge e = ((struct redirection_data *)p)->outgoing_edge;
227
  return e->dest->index;
228
}
229
 
230
static int
231
redirection_data_eq (const void *p1, const void *p2)
232
{
233
  edge e1 = ((struct redirection_data *)p1)->outgoing_edge;
234
  edge e2 = ((struct redirection_data *)p2)->outgoing_edge;
235
 
236
  return e1 == e2;
237
}
238
 
239
/* Given an outgoing edge E lookup and return its entry in our hash table.
240
 
241
   If INSERT is true, then we insert the entry into the hash table if
242
   it is not already present.  INCOMING_EDGE is added to the list of incoming
243
   edges associated with E in the hash table.  */
244
 
245
static struct redirection_data *
246
lookup_redirection_data (edge e, edge incoming_edge, enum insert_option insert)
247
{
248
  void **slot;
249
  struct redirection_data *elt;
250
 
251
 /* Build a hash table element so we can see if E is already
252
     in the table.  */
253
  elt = XNEW (struct redirection_data);
254
  elt->outgoing_edge = e;
255
  elt->dup_block = NULL;
256
  elt->do_not_duplicate = false;
257
  elt->incoming_edges = NULL;
258
 
259
  slot = htab_find_slot (redirection_data, elt, insert);
260
 
261
  /* This will only happen if INSERT is false and the entry is not
262
     in the hash table.  */
263
  if (slot == NULL)
264
    {
265
      free (elt);
266
      return NULL;
267
    }
268
 
269
  /* This will only happen if E was not in the hash table and
270
     INSERT is true.  */
271
  if (*slot == NULL)
272
    {
273
      *slot = (void *)elt;
274
      elt->incoming_edges = XNEW (struct el);
275
      elt->incoming_edges->e = incoming_edge;
276
      elt->incoming_edges->next = NULL;
277
      return elt;
278
    }
279
  /* E was in the hash table.  */
280
  else
281
    {
282
      /* Free ELT as we do not need it anymore, we will extract the
283
         relevant entry from the hash table itself.  */
284
      free (elt);
285
 
286
      /* Get the entry stored in the hash table.  */
287
      elt = (struct redirection_data *) *slot;
288
 
289
      /* If insertion was requested, then we need to add INCOMING_EDGE
290
         to the list of incoming edges associated with E.  */
291
      if (insert)
292
        {
293
          struct el *el = XNEW (struct el);
294
          el->next = elt->incoming_edges;
295
          el->e = incoming_edge;
296
          elt->incoming_edges = el;
297
        }
298
 
299
      return elt;
300
    }
301
}
302
 
303
/* Given a duplicate block and its single destination (both stored
304
   in RD).  Create an edge between the duplicate and its single
305
   destination.
306
 
307
   Add an additional argument to any PHI nodes at the single
308
   destination.  */
309
 
310
static void
311
create_edge_and_update_destination_phis (struct redirection_data *rd)
312
{
313
  edge e = make_edge (rd->dup_block, rd->outgoing_edge->dest, EDGE_FALLTHRU);
314
  tree phi;
315
 
316
  e->probability = REG_BR_PROB_BASE;
317
  e->count = rd->dup_block->count;
318
 
319
  /* If there are any PHI nodes at the destination of the outgoing edge
320
     from the duplicate block, then we will need to add a new argument
321
     to them.  The argument should have the same value as the argument
322
     associated with the outgoing edge stored in RD.  */
323
  for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi))
324
    {
325
      int indx = rd->outgoing_edge->dest_idx;
326
      add_phi_arg (phi, PHI_ARG_DEF (phi, indx), e);
327
    }
328
}
329
 
330
/* Hash table traversal callback routine to create duplicate blocks.  */
331
 
332
static int
333
create_duplicates (void **slot, void *data)
334
{
335
  struct redirection_data *rd = (struct redirection_data *) *slot;
336
  struct local_info *local_info = (struct local_info *)data;
337
 
338
  /* If this entry should not have a duplicate created, then there's
339
     nothing to do.  */
340
  if (rd->do_not_duplicate)
341
    return 1;
342
 
343
  /* Create a template block if we have not done so already.  Otherwise
344
     use the template to create a new block.  */
345
  if (local_info->template_block == NULL)
346
    {
347
      create_block_for_threading (local_info->bb, rd);
348
      local_info->template_block = rd->dup_block;
349
 
350
      /* We do not create any outgoing edges for the template.  We will
351
         take care of that in a later traversal.  That way we do not
352
         create edges that are going to just be deleted.  */
353
    }
354
  else
355
    {
356
      create_block_for_threading (local_info->template_block, rd);
357
 
358
      /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
359
         block.  */
360
      create_edge_and_update_destination_phis (rd);
361
    }
362
 
363
  /* Keep walking the hash table.  */
364
  return 1;
365
}
366
 
367
/* We did not create any outgoing edges for the template block during
368
   block creation.  This hash table traversal callback creates the
369
   outgoing edge for the template block.  */
370
 
371
static int
372
fixup_template_block (void **slot, void *data)
373
{
374
  struct redirection_data *rd = (struct redirection_data *) *slot;
375
  struct local_info *local_info = (struct local_info *)data;
376
 
377
  /* If this is the template block, then create its outgoing edges
378
     and halt the hash table traversal.  */
379
  if (rd->dup_block && rd->dup_block == local_info->template_block)
380
    {
381
      create_edge_and_update_destination_phis (rd);
382
      return 0;
383
    }
384
 
385
  return 1;
386
}
387
 
388
/* Not all jump threading requests are useful.  In particular some
389
   jump threading requests can create irreducible regions which are
390
   undesirable.
391
 
392
   This routine will examine the BB's incoming edges for jump threading
393
   requests which, if acted upon, would create irreducible regions.  Any
394
   such jump threading requests found will be pruned away.  */
395
 
396
static void
397
prune_undesirable_thread_requests (basic_block bb)
398
{
399
  edge e;
400
  edge_iterator ei;
401
  bool may_create_irreducible_region = false;
402
  unsigned int num_outgoing_edges_into_loop = 0;
403
 
404
  /* For the heuristics below, we need to know if BB has more than
405
     one outgoing edge into a loop.  */
406
  FOR_EACH_EDGE (e, ei, bb->succs)
407
    num_outgoing_edges_into_loop += ((e->flags & EDGE_LOOP_EXIT) == 0);
408
 
409
  if (num_outgoing_edges_into_loop > 1)
410
    {
411
      edge backedge = NULL;
412
 
413
      /* Consider the effect of threading the edge (0, 1) to 2 on the left
414
         CFG to produce the right CFG:
415
 
416
 
417
 
418
             |            |
419
             1<--+        2<--------+
420
            / \  |        |         |
421
           2   3 |        4<----+   |
422
            \ /  |       / \    |   |
423
             4---+      E   1-- | --+
424
             |              |   |
425
             E              3---+
426
 
427
 
428
        Threading the (0, 1) edge to 2 effectively creates two loops
429
        (2, 4, 1) and (4, 1, 3) which are neither disjoint nor nested.
430
        This is not good.
431
 
432
        However, we do need to be able to thread  (0, 1) to 2 or 3
433
        in the left CFG below (which creates the middle and right
434
        CFGs with nested loops).
435
 
436
 
437
             |          |             |
438
             1<--+      2<----+       3<-+<-+
439
            /|   |      |     |       |  |  |
440
           2 |   |      3<-+  |       1--+  |
441
            \|   |      |  |  |       |     |
442
             3---+      1--+--+       2-----+
443
 
444
 
445
         A safe heuristic appears to be to only allow threading if BB
446
         has a single incoming backedge from one of its direct successors.  */
447
 
448
      FOR_EACH_EDGE (e, ei, bb->preds)
449
        {
450
          if (e->flags & EDGE_DFS_BACK)
451
            {
452
              if (backedge)
453
                {
454
                  backedge = NULL;
455
                  break;
456
                }
457
              else
458
                {
459
                  backedge = e;
460
                }
461
            }
462
        }
463
 
464
      if (backedge && find_edge (bb, backedge->src))
465
        ;
466
      else
467
        may_create_irreducible_region = true;
468
    }
469
  else
470
    {
471
      edge dest = NULL;
472
 
473
      /* If we thread across the loop entry block (BB) into the
474
         loop and BB is still reached from outside the loop, then
475
         we would create an irreducible CFG.  Consider the effect
476
         of threading the edge (1, 4) to 5 on the left CFG to produce
477
         the right CFG
478
 
479
 
480
            / \             / \
481
           1   2           1   2
482
            \ /            |   |
483
             4<----+       5<->4
484
            / \    |           |
485
           E   5---+           E
486
 
487
 
488
         Threading the (1, 4) edge to 5 creates two entry points
489
         into the loop (4, 5) (one from block 1, the other from
490
         block 2).  A classic irreducible region.
491
 
492
         So look at all of BB's incoming edges which are not
493
         backedges and which are not threaded to the loop exit.
494
         If that subset of incoming edges do not all thread
495
         to the same block, then threading any of them will create
496
         an irreducible region.  */
497
 
498
      FOR_EACH_EDGE (e, ei, bb->preds)
499
        {
500
          edge e2;
501
 
502
          /* We ignore back edges for now.  This may need refinement
503
             as threading a backedge creates an inner loop which
504
             we would need to verify has a single entry point.
505
 
506
             If all backedges thread to new locations, then this
507
             block will no longer have incoming backedges and we
508
             need not worry about creating irreducible regions
509
             by threading through BB.  I don't think this happens
510
             enough in practice to worry about it.  */
511
          if (e->flags & EDGE_DFS_BACK)
512
            continue;
513
 
514
          /* If the incoming edge threads to the loop exit, then it
515
             is clearly safe.  */
516
          e2 = e->aux;
517
          if (e2 && (e2->flags & EDGE_LOOP_EXIT))
518
            continue;
519
 
520
          /* E enters the loop header and is not threaded.  We can
521
             not allow any other incoming edges to thread into
522
             the loop as that would create an irreducible region.  */
523
          if (!e2)
524
            {
525
              may_create_irreducible_region = true;
526
              break;
527
            }
528
 
529
          /* We know that this incoming edge threads to a block inside
530
             the loop.  This edge must thread to the same target in
531
             the loop as any previously seen threaded edges.  Otherwise
532
             we will create an irreducible region.  */
533
          if (!dest)
534
            dest = e2;
535
          else if (e2 != dest)
536
            {
537
              may_create_irreducible_region = true;
538
              break;
539
            }
540
        }
541
    }
542
 
543
  /* If we might create an irreducible region, then cancel any of
544
     the jump threading requests for incoming edges which are
545
     not backedges and which do not thread to the exit block.  */
546
  if (may_create_irreducible_region)
547
    {
548
      FOR_EACH_EDGE (e, ei, bb->preds)
549
        {
550
          edge e2;
551
 
552
          /* Ignore back edges.  */
553
          if (e->flags & EDGE_DFS_BACK)
554
            continue;
555
 
556
          e2 = e->aux;
557
 
558
          /* If this incoming edge was not threaded, then there is
559
             nothing to do.  */
560
          if (!e2)
561
            continue;
562
 
563
          /* If this incoming edge threaded to the loop exit,
564
             then it can be ignored as it is safe.  */
565
          if (e2->flags & EDGE_LOOP_EXIT)
566
            continue;
567
 
568
          if (e2)
569
            {
570
              /* This edge threaded into the loop and the jump thread
571
                 request must be cancelled.  */
572
              if (dump_file && (dump_flags & TDF_DETAILS))
573
                fprintf (dump_file, "  Not threading jump %d --> %d to %d\n",
574
                         e->src->index, e->dest->index, e2->dest->index);
575
              e->aux = NULL;
576
            }
577
        }
578
    }
579
}
580
 
581
/* Hash table traversal callback to redirect each incoming edge
582
   associated with this hash table element to its new destination.  */
583
 
584
static int
585
redirect_edges (void **slot, void *data)
586
{
587
  struct redirection_data *rd = (struct redirection_data *) *slot;
588
  struct local_info *local_info = (struct local_info *)data;
589
  struct el *next, *el;
590
 
591
  /* Walk over all the incoming edges associated associated with this
592
     hash table entry.  */
593
  for (el = rd->incoming_edges; el; el = next)
594
    {
595
      edge e = el->e;
596
 
597
      /* Go ahead and free this element from the list.  Doing this now
598
         avoids the need for another list walk when we destroy the hash
599
         table.  */
600
      next = el->next;
601
      free (el);
602
 
603
      /* Go ahead and clear E->aux.  It's not needed anymore and failure
604
         to clear it will cause all kinds of unpleasant problems later.  */
605
      e->aux = NULL;
606
 
607
      thread_stats.num_threaded_edges++;
608
 
609
      if (rd->dup_block)
610
        {
611
          edge e2;
612
 
613
          if (dump_file && (dump_flags & TDF_DETAILS))
614
            fprintf (dump_file, "  Threaded jump %d --> %d to %d\n",
615
                     e->src->index, e->dest->index, rd->dup_block->index);
616
 
617
          rd->dup_block->count += e->count;
618
          rd->dup_block->frequency += EDGE_FREQUENCY (e);
619
          EDGE_SUCC (rd->dup_block, 0)->count += e->count;
620
          /* Redirect the incoming edge to the appropriate duplicate
621
             block.  */
622
          e2 = redirect_edge_and_branch (e, rd->dup_block);
623
          flush_pending_stmts (e2);
624
 
625
          if ((dump_file && (dump_flags & TDF_DETAILS))
626
              && e->src != e2->src)
627
            fprintf (dump_file, "    basic block %d created\n", e2->src->index);
628
        }
629
      else
630
        {
631
          if (dump_file && (dump_flags & TDF_DETAILS))
632
            fprintf (dump_file, "  Threaded jump %d --> %d to %d\n",
633
                     e->src->index, e->dest->index, local_info->bb->index);
634
 
635
          /* We are using BB as the duplicate.  Remove the unnecessary
636
             outgoing edges and statements from BB.  */
637
          remove_ctrl_stmt_and_useless_edges (local_info->bb,
638
                                              rd->outgoing_edge->dest);
639
 
640
          /* And fixup the flags on the single remaining edge.  */
641
          single_succ_edge (local_info->bb)->flags
642
            &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
643
          single_succ_edge (local_info->bb)->flags |= EDGE_FALLTHRU;
644
        }
645
    }
646
 
647
  /* Indicate that we actually threaded one or more jumps.  */
648
  if (rd->incoming_edges)
649
    local_info->jumps_threaded = true;
650
 
651
  return 1;
652
}
653
 
654
/* Return true if this block has no executable statements other than
655
   a simple ctrl flow instruction.  When the number of outgoing edges
656
   is one, this is equivalent to a "forwarder" block.  */
657
 
658
static bool
659
redirection_block_p (basic_block bb)
660
{
661
  block_stmt_iterator bsi;
662
 
663
  /* Advance to the first executable statement.  */
664
  bsi = bsi_start (bb);
665
  while (!bsi_end_p (bsi)
666
          && (TREE_CODE (bsi_stmt (bsi)) == LABEL_EXPR
667
              || IS_EMPTY_STMT (bsi_stmt (bsi))))
668
    bsi_next (&bsi);
669
 
670
  /* Check if this is an empty block.  */
671
  if (bsi_end_p (bsi))
672
    return true;
673
 
674
  /* Test that we've reached the terminating control statement.  */
675
  return bsi_stmt (bsi)
676
         && (TREE_CODE (bsi_stmt (bsi)) == COND_EXPR
677
             || TREE_CODE (bsi_stmt (bsi)) == GOTO_EXPR
678
             || TREE_CODE (bsi_stmt (bsi)) == SWITCH_EXPR);
679
}
680
 
681
/* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
682
   is reached via one or more specific incoming edges, we know which
683
   outgoing edge from BB will be traversed.
684
 
685
   We want to redirect those incoming edges to the target of the
686
   appropriate outgoing edge.  Doing so avoids a conditional branch
687
   and may expose new optimization opportunities.  Note that we have
688
   to update dominator tree and SSA graph after such changes.
689
 
690
   The key to keeping the SSA graph update manageable is to duplicate
691
   the side effects occurring in BB so that those side effects still
692
   occur on the paths which bypass BB after redirecting edges.
693
 
694
   We accomplish this by creating duplicates of BB and arranging for
695
   the duplicates to unconditionally pass control to one specific
696
   successor of BB.  We then revector the incoming edges into BB to
697
   the appropriate duplicate of BB.
698
 
699
   BB and its duplicates will have assignments to the same set of
700
   SSA_NAMEs.  Right now, we just call into update_ssa to update the
701
   SSA graph for those names.
702
 
703
   We are also going to experiment with a true incremental update
704
   scheme for the duplicated resources.  One of the interesting
705
   properties we can exploit here is that all the resources set
706
   in BB will have the same IDFS, so we have one IDFS computation
707
   per block with incoming threaded edges, which can lower the
708
   cost of the true incremental update algorithm.  */
709
 
710
static bool
711
thread_block (basic_block bb)
712
{
713
  /* E is an incoming edge into BB that we may or may not want to
714
     redirect to a duplicate of BB.  */
715
  edge e;
716
  edge_iterator ei;
717
  struct local_info local_info;
718
 
719
  /* FOUND_BACKEDGE indicates that we found an incoming backedge
720
     into BB, in which case we may ignore certain jump threads
721
     to avoid creating irreducible regions.  */
722
  bool found_backedge = false;
723
 
724
  /* ALL indicates whether or not all incoming edges into BB should
725
     be threaded to a duplicate of BB.  */
726
  bool all = true;
727
 
728
  /* If optimizing for size, only thread this block if we don't have
729
     to duplicate it or it's an otherwise empty redirection block.  */
730
  if (optimize_size
731
      && EDGE_COUNT (bb->preds) > 1
732
      && !redirection_block_p (bb))
733
    {
734
      FOR_EACH_EDGE (e, ei, bb->preds)
735
        e->aux = NULL;
736
      return false;
737
    }
738
 
739
  /* To avoid scanning a linear array for the element we need we instead
740
     use a hash table.  For normal code there should be no noticeable
741
     difference.  However, if we have a block with a large number of
742
     incoming and outgoing edges such linear searches can get expensive.  */
743
  redirection_data = htab_create (EDGE_COUNT (bb->succs),
744
                                  redirection_data_hash,
745
                                  redirection_data_eq,
746
                                  free);
747
 
748
  FOR_EACH_EDGE (e, ei, bb->preds)
749
    found_backedge |= ((e->flags & EDGE_DFS_BACK) != 0);
750
 
751
  /* If BB has incoming backedges, then threading across BB might
752
     introduce an irreducible region, which would be undesirable
753
     as that inhibits various optimizations later.  Prune away
754
     any jump threading requests which we know will result in
755
     an irreducible region.  */
756
  if (found_backedge)
757
    prune_undesirable_thread_requests (bb);
758
 
759
  /* Record each unique threaded destination into a hash table for
760
     efficient lookups.  */
761
  FOR_EACH_EDGE (e, ei, bb->preds)
762
    {
763
      if (!e->aux)
764
        {
765
          all = false;
766
        }
767
      else
768
        {
769
          edge e2 = e->aux;
770
          update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
771
                                           e->count, e->aux);
772
 
773
          /* Insert the outgoing edge into the hash table if it is not
774
             already in the hash table.  */
775
          lookup_redirection_data (e2, e, INSERT);
776
        }
777
    }
778
 
779
  /* If we are going to thread all incoming edges to an outgoing edge, then
780
     BB will become unreachable.  Rather than just throwing it away, use
781
     it for one of the duplicates.  Mark the first incoming edge with the
782
     DO_NOT_DUPLICATE attribute.  */
783
  if (all)
784
    {
785
      edge e = EDGE_PRED (bb, 0)->aux;
786
      lookup_redirection_data (e, NULL, NO_INSERT)->do_not_duplicate = true;
787
    }
788
 
789
  /* Now create duplicates of BB.
790
 
791
     Note that for a block with a high outgoing degree we can waste
792
     a lot of time and memory creating and destroying useless edges.
793
 
794
     So we first duplicate BB and remove the control structure at the
795
     tail of the duplicate as well as all outgoing edges from the
796
     duplicate.  We then use that duplicate block as a template for
797
     the rest of the duplicates.  */
798
  local_info.template_block = NULL;
799
  local_info.bb = bb;
800
  local_info.jumps_threaded = false;
801
  htab_traverse (redirection_data, create_duplicates, &local_info);
802
 
803
  /* The template does not have an outgoing edge.  Create that outgoing
804
     edge and update PHI nodes as the edge's target as necessary.
805
 
806
     We do this after creating all the duplicates to avoid creating
807
     unnecessary edges.  */
808
  htab_traverse (redirection_data, fixup_template_block, &local_info);
809
 
810
  /* The hash table traversals above created the duplicate blocks (and the
811
     statements within the duplicate blocks).  This loop creates PHI nodes for
812
     the duplicated blocks and redirects the incoming edges into BB to reach
813
     the duplicates of BB.  */
814
  htab_traverse (redirection_data, redirect_edges, &local_info);
815
 
816
  /* Done with this block.  Clear REDIRECTION_DATA.  */
817
  htab_delete (redirection_data);
818
  redirection_data = NULL;
819
 
820
  /* Indicate to our caller whether or not any jumps were threaded.  */
821
  return local_info.jumps_threaded;
822
}
823
 
824
/* Walk through the registered jump threads and convert them into a
825
   form convenient for this pass.
826
 
827
   Any block which has incoming edges threaded to outgoing edges
828
   will have its entry in THREADED_BLOCK set.
829
 
830
   Any threaded edge will have its new outgoing edge stored in the
831
   original edge's AUX field.
832
 
833
   This form avoids the need to walk all the edges in the CFG to
834
   discover blocks which need processing and avoids unnecessary
835
   hash table lookups to map from threaded edge to new target.  */
836
 
837
static void
838
mark_threaded_blocks (bitmap threaded_blocks)
839
{
840
  unsigned int i;
841
 
842
  for (i = 0; i < VEC_length (edge, threaded_edges); i += 2)
843
    {
844
      edge e = VEC_index (edge, threaded_edges, i);
845
      edge e2 = VEC_index (edge, threaded_edges, i + 1);
846
 
847
      e->aux = e2;
848
      bitmap_set_bit (threaded_blocks, e->dest->index);
849
    }
850
}
851
 
852
 
853
/* Walk through all blocks and thread incoming edges to the appropriate
854
   outgoing edge for each edge pair recorded in THREADED_EDGES.
855
 
856
   It is the caller's responsibility to fix the dominance information
857
   and rewrite duplicated SSA_NAMEs back into SSA form.
858
 
859
   Returns true if one or more edges were threaded, false otherwise.  */
860
 
861
bool
862
thread_through_all_blocks (void)
863
{
864
  bool retval = false;
865
  unsigned int i;
866
  bitmap_iterator bi;
867
  bitmap threaded_blocks;
868
 
869
  if (threaded_edges == NULL)
870
    return false;
871
 
872
  threaded_blocks = BITMAP_ALLOC (NULL);
873
  memset (&thread_stats, 0, sizeof (thread_stats));
874
 
875
  mark_threaded_blocks (threaded_blocks);
876
 
877
  EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
878
    {
879
      basic_block bb = BASIC_BLOCK (i);
880
 
881
      if (EDGE_COUNT (bb->preds) > 0)
882
        retval |= thread_block (bb);
883
    }
884
 
885
  if (dump_file && (dump_flags & TDF_STATS))
886
    fprintf (dump_file, "\nJumps threaded: %lu\n",
887
             thread_stats.num_threaded_edges);
888
 
889
  BITMAP_FREE (threaded_blocks);
890
  threaded_blocks = NULL;
891
  VEC_free (edge, heap, threaded_edges);
892
  threaded_edges = NULL;
893
  return retval;
894
}
895
 
896
/* Register a jump threading opportunity.  We queue up all the jump
897
   threading opportunities discovered by a pass and update the CFG
898
   and SSA form all at once.
899
 
900
   E is the edge we can thread, E2 is the new target edge.  ie, we
901
   are effectively recording that E->dest can be changed to E2->dest
902
   after fixing the SSA graph.  */
903
 
904
void
905
register_jump_thread (edge e, edge e2)
906
{
907
  if (threaded_edges == NULL)
908
    threaded_edges = VEC_alloc (edge, heap, 10);
909
 
910
  VEC_safe_push (edge, heap, threaded_edges, e);
911
  VEC_safe_push (edge, heap, threaded_edges, e2);
912
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.