OpenCores
URL https://opencores.org/ocsvn/scarts/scarts/trunk

Subversion Repositories scarts

[/] [scarts/] [trunk/] [toolchain/] [scarts-gcc/] [gcc-4.1.1/] [gcc/] [tree-ssa-threadupdate.c] - Blame information for rev 20

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 12 jlechner
/* Thread edges through blocks and update the control flow and SSA graphs.
2
   Copyright (C) 2004, 2005 Free Software Foundation, Inc.
3
 
4
This file is part of GCC.
5
 
6
GCC is free software; you can redistribute it and/or modify
7
it under the terms of the GNU General Public License as published by
8
the Free Software Foundation; either version 2, or (at your option)
9
any later version.
10
 
11
GCC is distributed in the hope that it will be useful,
12
but WITHOUT ANY WARRANTY; without even the implied warranty of
13
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
GNU General Public License for more details.
15
 
16
You should have received a copy of the GNU General Public License
17
along with GCC; see the file COPYING.  If not, write to
18
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
19
Boston, MA 02110-1301, USA.  */
20
 
21
#include "config.h"
22
#include "system.h"
23
#include "coretypes.h"
24
#include "tm.h"
25
#include "tree.h"
26
#include "flags.h"
27
#include "rtl.h"
28
#include "tm_p.h"
29
#include "ggc.h"
30
#include "basic-block.h"
31
#include "output.h"
32
#include "expr.h"
33
#include "function.h"
34
#include "diagnostic.h"
35
#include "tree-flow.h"
36
#include "tree-dump.h"
37
#include "tree-pass.h"
38
#include "cfgloop.h"
39
 
40
/* Given a block B, update the CFG and SSA graph to reflect redirecting
41
   one or more in-edges to B to instead reach the destination of an
42
   out-edge from B while preserving any side effects in B.
43
 
44
   i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
45
   side effects of executing B.
46
 
47
     1. Make a copy of B (including its outgoing edges and statements).  Call
48
        the copy B'.  Note B' has no incoming edges or PHIs at this time.
49
 
50
     2. Remove the control statement at the end of B' and all outgoing edges
51
        except B'->C.
52
 
53
     3. Add a new argument to each PHI in C with the same value as the existing
54
        argument associated with edge B->C.  Associate the new PHI arguments
55
        with the edge B'->C.
56
 
57
     4. For each PHI in B, find or create a PHI in B' with an identical
58
        PHI_RESULT.  Add an argument to the PHI in B' which has the same
59
        value as the PHI in B associated with the edge A->B.  Associate
60
        the new argument in the PHI in B' with the edge A->B.
61
 
62
     5. Change the edge A->B to A->B'.
63
 
64
        5a. This automatically deletes any PHI arguments associated with the
65
            edge A->B in B.
66
 
67
        5b. This automatically associates each new argument added in step 4
68
            with the edge A->B'.
69
 
70
     6. Repeat for other incoming edges into B.
71
 
72
     7. Put the duplicated resources in B and all the B' blocks into SSA form.
73
 
74
   Note that block duplication can be minimized by first collecting the
75
   the set of unique destination blocks that the incoming edges should
76
   be threaded to.  Block duplication can be further minimized by using
77
   B instead of creating B' for one destination if all edges into B are
78
   going to be threaded to a successor of B.
79
 
80
   We further reduce the number of edges and statements we create by
81
   not copying all the outgoing edges and the control statement in
82
   step #1.  We instead create a template block without the outgoing
83
   edges and duplicate the template.  */
84
 
85
 
86
/* Steps #5 and #6 of the above algorithm are best implemented by walking
87
   all the incoming edges which thread to the same destination edge at
88
   the same time.  That avoids lots of table lookups to get information
89
   for the destination edge.
90
 
91
   To realize that implementation we create a list of incoming edges
92
   which thread to the same outgoing edge.  Thus to implement steps
93
   #5 and #6 we traverse our hash table of outgoing edge information.
94
   For each entry we walk the list of incoming edges which thread to
95
   the current outgoing edge.  */
96
 
97
struct el
98
{
99
  edge e;
100
  struct el *next;
101
};
102
 
103
/* Main data structure recording information regarding B's duplicate
104
   blocks.  */
105
 
106
/* We need to efficiently record the unique thread destinations of this
107
   block and specific information associated with those destinations.  We
108
   may have many incoming edges threaded to the same outgoing edge.  This
109
   can be naturally implemented with a hash table.  */
110
 
111
struct redirection_data
112
{
113
  /* A duplicate of B with the trailing control statement removed and which
114
     targets a single successor of B.  */
115
  basic_block dup_block;
116
 
117
  /* An outgoing edge from B.  DUP_BLOCK will have OUTGOING_EDGE->dest as
118
     its single successor.  */
119
  edge outgoing_edge;
120
 
121
  /* A list of incoming edges which we want to thread to
122
     OUTGOING_EDGE->dest.  */
123
  struct el *incoming_edges;
124
 
125
  /* Flag indicating whether or not we should create a duplicate block
126
     for this thread destination.  This is only true if we are threading
127
     all incoming edges and thus are using BB itself as a duplicate block.  */
128
  bool do_not_duplicate;
129
};
130
 
131
/* Main data structure to hold information for duplicates of BB.  */
132
static htab_t redirection_data;
133
 
134
bool rediscover_loops_after_threading;
135
 
136
/* Data structure of information to pass to hash table traversal routines.  */
137
struct local_info
138
{
139
  /* The current block we are working on.  */
140
  basic_block bb;
141
 
142
  /* A template copy of BB with no outgoing edges or control statement that
143
     we use for creating copies.  */
144
  basic_block template_block;
145
 
146
  /* TRUE if we thread one or more jumps, FALSE otherwise.  */
147
  bool jumps_threaded;
148
};
149
 
150
/* Jump threading statistics.  */
151
 
152
struct thread_stats_d
153
{
154
  unsigned long num_threaded_edges;
155
};
156
 
157
struct thread_stats_d thread_stats;
158
 
159
 
160
/* Remove the last statement in block BB if it is a control statement
161
   Also remove all outgoing edges except the edge which reaches DEST_BB.
162
   If DEST_BB is NULL, then remove all outgoing edges.  */
163
 
164
static void
165
remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
166
{
167
  block_stmt_iterator bsi;
168
  edge e;
169
  edge_iterator ei;
170
 
171
  bsi = bsi_last (bb);
172
 
173
  /* If the duplicate ends with a control statement, then remove it.
174
 
175
     Note that if we are duplicating the template block rather than the
176
     original basic block, then the duplicate might not have any real
177
     statements in it.  */
178
  if (!bsi_end_p (bsi)
179
      && bsi_stmt (bsi)
180
      && (TREE_CODE (bsi_stmt (bsi)) == COND_EXPR
181
          || TREE_CODE (bsi_stmt (bsi)) == GOTO_EXPR
182
          || TREE_CODE (bsi_stmt (bsi)) == SWITCH_EXPR))
183
    bsi_remove (&bsi);
184
 
185
  for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
186
    {
187
      if (e->dest != dest_bb)
188
        remove_edge (e);
189
      else
190
        ei_next (&ei);
191
    }
192
}
193
 
194
/* Create a duplicate of BB which only reaches the destination of the edge
195
   stored in RD.  Record the duplicate block in RD.  */
196
 
197
static void
198
create_block_for_threading (basic_block bb, struct redirection_data *rd)
199
{
200
  /* We can use the generic block duplication code and simply remove
201
     the stuff we do not need.  */
202
  rd->dup_block = duplicate_block (bb, NULL, NULL);
203
 
204
  /* Zero out the profile, since the block is unreachable for now.  */
205
  rd->dup_block->frequency = 0;
206
  rd->dup_block->count = 0;
207
 
208
  /* The call to duplicate_block will copy everything, including the
209
     useless COND_EXPR or SWITCH_EXPR at the end of BB.  We just remove
210
     the useless COND_EXPR or SWITCH_EXPR here rather than having a
211
     specialized block copier.  We also remove all outgoing edges
212
     from the duplicate block.  The appropriate edge will be created
213
     later.  */
214
  remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
215
}
216
 
217
/* Hashing and equality routines for our hash table.  */
218
static hashval_t
219
redirection_data_hash (const void *p)
220
{
221
  edge e = ((struct redirection_data *)p)->outgoing_edge;
222
  return e->dest->index;
223
}
224
 
225
static int
226
redirection_data_eq (const void *p1, const void *p2)
227
{
228
  edge e1 = ((struct redirection_data *)p1)->outgoing_edge;
229
  edge e2 = ((struct redirection_data *)p2)->outgoing_edge;
230
 
231
  return e1 == e2;
232
}
233
 
234
/* Given an outgoing edge E lookup and return its entry in our hash table.
235
 
236
   If INSERT is true, then we insert the entry into the hash table if
237
   it is not already present.  INCOMING_EDGE is added to the list of incoming
238
   edges associated with E in the hash table.  */
239
 
240
static struct redirection_data *
241
lookup_redirection_data (edge e, edge incoming_edge, enum insert_option insert)
242
{
243
  void **slot;
244
  struct redirection_data *elt;
245
 
246
 /* Build a hash table element so we can see if E is already
247
     in the table.  */
248
  elt = xmalloc (sizeof (struct redirection_data));
249
  elt->outgoing_edge = e;
250
  elt->dup_block = NULL;
251
  elt->do_not_duplicate = false;
252
  elt->incoming_edges = NULL;
253
 
254
  slot = htab_find_slot (redirection_data, elt, insert);
255
 
256
  /* This will only happen if INSERT is false and the entry is not
257
     in the hash table.  */
258
  if (slot == NULL)
259
    {
260
      free (elt);
261
      return NULL;
262
    }
263
 
264
  /* This will only happen if E was not in the hash table and
265
     INSERT is true.  */
266
  if (*slot == NULL)
267
    {
268
      *slot = (void *)elt;
269
      elt->incoming_edges = xmalloc (sizeof (struct el));
270
      elt->incoming_edges->e = incoming_edge;
271
      elt->incoming_edges->next = NULL;
272
      return elt;
273
    }
274
  /* E was in the hash table.  */
275
  else
276
    {
277
      /* Free ELT as we do not need it anymore, we will extract the
278
         relevant entry from the hash table itself.  */
279
      free (elt);
280
 
281
      /* Get the entry stored in the hash table.  */
282
      elt = (struct redirection_data *) *slot;
283
 
284
      /* If insertion was requested, then we need to add INCOMING_EDGE
285
         to the list of incoming edges associated with E.  */
286
      if (insert)
287
        {
288
          struct el *el = xmalloc (sizeof (struct el));
289
          el->next = elt->incoming_edges;
290
          el->e = incoming_edge;
291
          elt->incoming_edges = el;
292
        }
293
 
294
      return elt;
295
    }
296
}
297
 
298
/* Given a duplicate block and its single destination (both stored
299
   in RD).  Create an edge between the duplicate and its single
300
   destination.
301
 
302
   Add an additional argument to any PHI nodes at the single
303
   destination.  */
304
 
305
static void
306
create_edge_and_update_destination_phis (struct redirection_data *rd)
307
{
308
  edge e = make_edge (rd->dup_block, rd->outgoing_edge->dest, EDGE_FALLTHRU);
309
  tree phi;
310
 
311
  e->probability = REG_BR_PROB_BASE;
312
  e->count = rd->dup_block->count;
313
 
314
  /* If there are any PHI nodes at the destination of the outgoing edge
315
     from the duplicate block, then we will need to add a new argument
316
     to them.  The argument should have the same value as the argument
317
     associated with the outgoing edge stored in RD.  */
318
  for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi))
319
    {
320
      int indx = rd->outgoing_edge->dest_idx;
321
      add_phi_arg (phi, PHI_ARG_DEF (phi, indx), e);
322
    }
323
}
324
 
325
/* Hash table traversal callback routine to create duplicate blocks.  */
326
 
327
static int
328
create_duplicates (void **slot, void *data)
329
{
330
  struct redirection_data *rd = (struct redirection_data *) *slot;
331
  struct local_info *local_info = (struct local_info *)data;
332
 
333
  /* If this entry should not have a duplicate created, then there's
334
     nothing to do.  */
335
  if (rd->do_not_duplicate)
336
    return 1;
337
 
338
  /* Create a template block if we have not done so already.  Otherwise
339
     use the template to create a new block.  */
340
  if (local_info->template_block == NULL)
341
    {
342
      create_block_for_threading (local_info->bb, rd);
343
      local_info->template_block = rd->dup_block;
344
 
345
      /* We do not create any outgoing edges for the template.  We will
346
         take care of that in a later traversal.  That way we do not
347
         create edges that are going to just be deleted.  */
348
    }
349
  else
350
    {
351
      create_block_for_threading (local_info->template_block, rd);
352
 
353
      /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
354
         block.  */
355
      create_edge_and_update_destination_phis (rd);
356
    }
357
 
358
  /* Keep walking the hash table.  */
359
  return 1;
360
}
361
 
362
/* We did not create any outgoing edges for the template block during
363
   block creation.  This hash table traversal callback creates the
364
   outgoing edge for the template block.  */
365
 
366
static int
367
fixup_template_block (void **slot, void *data)
368
{
369
  struct redirection_data *rd = (struct redirection_data *) *slot;
370
  struct local_info *local_info = (struct local_info *)data;
371
 
372
  /* If this is the template block, then create its outgoing edges
373
     and halt the hash table traversal.  */
374
  if (rd->dup_block && rd->dup_block == local_info->template_block)
375
    {
376
      create_edge_and_update_destination_phis (rd);
377
      return 0;
378
    }
379
 
380
  return 1;
381
}
382
 
383
/* Not all jump threading requests are useful.  In particular some
384
   jump threading requests can create irreducible regions which are
385
   undesirable.
386
 
387
   This routine will examine the BB's incoming edges for jump threading
388
   requests which, if acted upon, would create irreducible regions.  Any
389
   such jump threading requests found will be pruned away.  */
390
 
391
static void
392
prune_undesirable_thread_requests (basic_block bb)
393
{
394
  edge e;
395
  edge_iterator ei;
396
  bool may_create_irreducible_region = false;
397
  unsigned int num_outgoing_edges_into_loop = 0;
398
 
399
  /* For the heuristics below, we need to know if BB has more than
400
     one outgoing edge into a loop.  */
401
  FOR_EACH_EDGE (e, ei, bb->succs)
402
    num_outgoing_edges_into_loop += ((e->flags & EDGE_LOOP_EXIT) == 0);
403
 
404
  if (num_outgoing_edges_into_loop > 1)
405
    {
406
      edge backedge = NULL;
407
 
408
      /* Consider the effect of threading the edge (0, 1) to 2 on the left
409
         CFG to produce the right CFG:
410
 
411
 
412
 
413
             |            |
414
             1<--+        2<--------+
415
            / \  |        |         |
416
           2   3 |        4<----+   |
417
            \ /  |       / \    |   |
418
             4---+      E   1-- | --+
419
             |              |   |
420
             E              3---+
421
 
422
 
423
        Threading the (0, 1) edge to 2 effectively creates two loops
424
        (2, 4, 1) and (4, 1, 3) which are neither disjoint nor nested.
425
        This is not good.
426
 
427
        However, we do need to be able to thread  (0, 1) to 2 or 3
428
        in the left CFG below (which creates the middle and right
429
        CFGs with nested loops).
430
 
431
 
432
             |          |             |
433
             1<--+      2<----+       3<-+<-+
434
            /|   |      |     |       |  |  |
435
           2 |   |      3<-+  |       1--+  |
436
            \|   |      |  |  |       |     |
437
             3---+      1--+--+       2-----+
438
 
439
 
440
         A safe heuristic appears to be to only allow threading if BB
441
         has a single incoming backedge from one of its direct successors.  */
442
 
443
      FOR_EACH_EDGE (e, ei, bb->preds)
444
        {
445
          if (e->flags & EDGE_DFS_BACK)
446
            {
447
              if (backedge)
448
                {
449
                  backedge = NULL;
450
                  break;
451
                }
452
              else
453
                {
454
                  backedge = e;
455
                }
456
            }
457
        }
458
 
459
      if (backedge && find_edge (bb, backedge->src))
460
        ;
461
      else
462
        may_create_irreducible_region = true;
463
    }
464
  else
465
    {
466
      edge dest = NULL;
467
 
468
      /* If we thread across the loop entry block (BB) into the
469
         loop and BB is still reached from outside the loop, then
470
         we would create an irreducible CFG.  Consider the effect
471
         of threading the edge (1, 4) to 5 on the left CFG to produce
472
         the right CFG
473
 
474
 
475
            / \             / \
476
           1   2           1   2
477
            \ /            |   |
478
             4<----+       5<->4
479
            / \    |           |
480
           E   5---+           E
481
 
482
 
483
         Threading the (1, 4) edge to 5 creates two entry points
484
         into the loop (4, 5) (one from block 1, the other from
485
         block 2).  A classic irreducible region.
486
 
487
         So look at all of BB's incoming edges which are not
488
         backedges and which are not threaded to the loop exit.
489
         If that subset of incoming edges do not all thread
490
         to the same block, then threading any of them will create
491
         an irreducible region.  */
492
 
493
      FOR_EACH_EDGE (e, ei, bb->preds)
494
        {
495
          edge e2;
496
 
497
          /* We ignore back edges for now.  This may need refinement
498
             as threading a backedge creates an inner loop which
499
             we would need to verify has a single entry point.
500
 
501
             If all backedges thread to new locations, then this
502
             block will no longer have incoming backedges and we
503
             need not worry about creating irreducible regions
504
             by threading through BB.  I don't think this happens
505
             enough in practice to worry about it.  */
506
          if (e->flags & EDGE_DFS_BACK)
507
            continue;
508
 
509
          /* If the incoming edge threads to the loop exit, then it
510
             is clearly safe.  */
511
          e2 = e->aux;
512
          if (e2 && (e2->flags & EDGE_LOOP_EXIT))
513
            continue;
514
 
515
          /* E enters the loop header and is not threaded.  We can
516
             not allow any other incoming edges to thread into
517
             the loop as that would create an irreducible region.  */
518
          if (!e2)
519
            {
520
              may_create_irreducible_region = true;
521
              break;
522
            }
523
 
524
          /* We know that this incoming edge threads to a block inside
525
             the loop.  This edge must thread to the same target in
526
             the loop as any previously seen threaded edges.  Otherwise
527
             we will create an irreducible region.  */
528
          if (!dest)
529
            dest = e2;
530
          else if (e2 != dest)
531
            {
532
              may_create_irreducible_region = true;
533
              break;
534
            }
535
        }
536
    }
537
 
538
  /* If we might create an irreducible region, then cancel any of
539
     the jump threading requests for incoming edges which are
540
     not backedges and which do not thread to the exit block.  */
541
  if (may_create_irreducible_region)
542
    {
543
      FOR_EACH_EDGE (e, ei, bb->preds)
544
        {
545
          edge e2;
546
 
547
          /* Ignore back edges.  */
548
          if (e->flags & EDGE_DFS_BACK)
549
            continue;
550
 
551
          e2 = e->aux;
552
 
553
          /* If this incoming edge was not threaded, then there is
554
             nothing to do.  */
555
          if (!e2)
556
            continue;
557
 
558
          /* If this incoming edge threaded to the loop exit,
559
             then it can be ignored as it is safe.  */
560
          if (e2->flags & EDGE_LOOP_EXIT)
561
            continue;
562
 
563
          if (e2)
564
            {
565
              /* This edge threaded into the loop and the jump thread
566
                 request must be cancelled.  */
567
              if (dump_file && (dump_flags & TDF_DETAILS))
568
                fprintf (dump_file, "  Not threading jump %d --> %d to %d\n",
569
                         e->src->index, e->dest->index, e2->dest->index);
570
              e->aux = NULL;
571
            }
572
        }
573
    }
574
}
575
 
576
/* Hash table traversal callback to redirect each incoming edge
577
   associated with this hash table element to its new destination.  */
578
 
579
static int
580
redirect_edges (void **slot, void *data)
581
{
582
  struct redirection_data *rd = (struct redirection_data *) *slot;
583
  struct local_info *local_info = (struct local_info *)data;
584
  struct el *next, *el;
585
 
586
  /* Walk over all the incoming edges associated associated with this
587
     hash table entry.  */
588
  for (el = rd->incoming_edges; el; el = next)
589
    {
590
      edge e = el->e;
591
 
592
      /* Go ahead and free this element from the list.  Doing this now
593
         avoids the need for another list walk when we destroy the hash
594
         table.  */
595
      next = el->next;
596
      free (el);
597
 
598
      /* Go ahead and clear E->aux.  It's not needed anymore and failure
599
         to clear it will cause all kinds of unpleasant problems later.  */
600
      e->aux = NULL;
601
 
602
      thread_stats.num_threaded_edges++;
603
 
604
      if (rd->dup_block)
605
        {
606
          edge e2;
607
 
608
          if (dump_file && (dump_flags & TDF_DETAILS))
609
            fprintf (dump_file, "  Threaded jump %d --> %d to %d\n",
610
                     e->src->index, e->dest->index, rd->dup_block->index);
611
 
612
          rd->dup_block->count += e->count;
613
          rd->dup_block->frequency += EDGE_FREQUENCY (e);
614
          EDGE_SUCC (rd->dup_block, 0)->count += e->count;
615
          /* Redirect the incoming edge to the appropriate duplicate
616
             block.  */
617
          e2 = redirect_edge_and_branch (e, rd->dup_block);
618
          flush_pending_stmts (e2);
619
 
620
          if ((dump_file && (dump_flags & TDF_DETAILS))
621
              && e->src != e2->src)
622
            fprintf (dump_file, "    basic block %d created\n", e2->src->index);
623
        }
624
      else
625
        {
626
          if (dump_file && (dump_flags & TDF_DETAILS))
627
            fprintf (dump_file, "  Threaded jump %d --> %d to %d\n",
628
                     e->src->index, e->dest->index, local_info->bb->index);
629
 
630
          /* We are using BB as the duplicate.  Remove the unnecessary
631
             outgoing edges and statements from BB.  */
632
          remove_ctrl_stmt_and_useless_edges (local_info->bb,
633
                                              rd->outgoing_edge->dest);
634
 
635
          /* And fixup the flags on the single remaining edge.  */
636
          single_succ_edge (local_info->bb)->flags
637
            &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
638
          single_succ_edge (local_info->bb)->flags |= EDGE_FALLTHRU;
639
        }
640
    }
641
 
642
  /* Indicate that we actually threaded one or more jumps.  */
643
  if (rd->incoming_edges)
644
    local_info->jumps_threaded = true;
645
 
646
  return 1;
647
}
648
 
649
/* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
650
   is reached via one or more specific incoming edges, we know which
651
   outgoing edge from BB will be traversed.
652
 
653
   We want to redirect those incoming edges to the target of the
654
   appropriate outgoing edge.  Doing so avoids a conditional branch
655
   and may expose new optimization opportunities.  Note that we have
656
   to update dominator tree and SSA graph after such changes.
657
 
658
   The key to keeping the SSA graph update manageable is to duplicate
659
   the side effects occurring in BB so that those side effects still
660
   occur on the paths which bypass BB after redirecting edges.
661
 
662
   We accomplish this by creating duplicates of BB and arranging for
663
   the duplicates to unconditionally pass control to one specific
664
   successor of BB.  We then revector the incoming edges into BB to
665
   the appropriate duplicate of BB.
666
 
667
   BB and its duplicates will have assignments to the same set of
668
   SSA_NAMEs.  Right now, we just call into update_ssa to update the
669
   SSA graph for those names.
670
 
671
   We are also going to experiment with a true incremental update
672
   scheme for the duplicated resources.  One of the interesting
673
   properties we can exploit here is that all the resources set
674
   in BB will have the same IDFS, so we have one IDFS computation
675
   per block with incoming threaded edges, which can lower the
676
   cost of the true incremental update algorithm.  */
677
 
678
static bool
679
thread_block (basic_block bb)
680
{
681
  /* E is an incoming edge into BB that we may or may not want to
682
     redirect to a duplicate of BB.  */
683
  edge e;
684
  edge_iterator ei;
685
  struct local_info local_info;
686
 
687
  /* FOUND_BACKEDGE indicates that we found an incoming backedge
688
     into BB, in which case we may ignore certain jump threads
689
     to avoid creating irreducible regions.  */
690
  bool found_backedge = false;
691
 
692
  /* ALL indicates whether or not all incoming edges into BB should
693
     be threaded to a duplicate of BB.  */
694
  bool all = true;
695
 
696
  /* To avoid scanning a linear array for the element we need we instead
697
     use a hash table.  For normal code there should be no noticeable
698
     difference.  However, if we have a block with a large number of
699
     incoming and outgoing edges such linear searches can get expensive.  */
700
  redirection_data = htab_create (EDGE_COUNT (bb->succs),
701
                                  redirection_data_hash,
702
                                  redirection_data_eq,
703
                                  free);
704
 
705
  FOR_EACH_EDGE (e, ei, bb->preds)
706
    found_backedge |= ((e->flags & EDGE_DFS_BACK) != 0);
707
 
708
  /* If BB has incoming backedges, then threading across BB might
709
     introduce an irreducible region, which would be undesirable
710
     as that inhibits various optimizations later.  Prune away
711
     any jump threading requests which we know will result in
712
     an irreducible region.  */
713
  if (found_backedge)
714
    prune_undesirable_thread_requests (bb);
715
 
716
  /* Record each unique threaded destination into a hash table for
717
     efficient lookups.  */
718
  FOR_EACH_EDGE (e, ei, bb->preds)
719
    {
720
      if (!e->aux)
721
        {
722
          all = false;
723
        }
724
      else
725
        {
726
          edge e2 = e->aux;
727
          update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
728
                                           e->count, e->aux);
729
 
730
          /* If we thread to a loop exit edge, then we will need to
731
             rediscover the loop exit edges.  While it may seem that
732
             the new edge is a loop exit edge, that is not the case.
733
             Consider threading the edge (5,6) to E in the CFG on the
734
             left which creates the CFG on the right:
735
 
736
 
737
                      0<--+            0<---+
738
                     / \  |           / \   |
739
                    1   2 |          1   2  |
740
                   / \  | |         / \  |  |
741
                  3   4 | |        3   4 6--+
742
                   \ /  | |         \ /
743
                    5   | |          5
744
                     \ /  |          |
745
                      6---+          E
746
                      |
747
                      E
748
 
749
             After threading, the edge (0, 1)  is the loop exit edge and
750
             the nodes 0, 2, 6 are the only nodes in the loop.  */
751
          if (e2->flags & EDGE_LOOP_EXIT)
752
            rediscover_loops_after_threading = true;
753
 
754
          /* Insert the outgoing edge into the hash table if it is not
755
             already in the hash table.  */
756
          lookup_redirection_data (e2, e, INSERT);
757
        }
758
    }
759
 
760
  /* If we are going to thread all incoming edges to an outgoing edge, then
761
     BB will become unreachable.  Rather than just throwing it away, use
762
     it for one of the duplicates.  Mark the first incoming edge with the
763
     DO_NOT_DUPLICATE attribute.  */
764
  if (all)
765
    {
766
      edge e = EDGE_PRED (bb, 0)->aux;
767
      lookup_redirection_data (e, NULL, NO_INSERT)->do_not_duplicate = true;
768
    }
769
 
770
  /* Now create duplicates of BB.
771
 
772
     Note that for a block with a high outgoing degree we can waste
773
     a lot of time and memory creating and destroying useless edges.
774
 
775
     So we first duplicate BB and remove the control structure at the
776
     tail of the duplicate as well as all outgoing edges from the
777
     duplicate.  We then use that duplicate block as a template for
778
     the rest of the duplicates.  */
779
  local_info.template_block = NULL;
780
  local_info.bb = bb;
781
  local_info.jumps_threaded = false;
782
  htab_traverse (redirection_data, create_duplicates, &local_info);
783
 
784
  /* The template does not have an outgoing edge.  Create that outgoing
785
     edge and update PHI nodes as the edge's target as necessary.
786
 
787
     We do this after creating all the duplicates to avoid creating
788
     unnecessary edges.  */
789
  htab_traverse (redirection_data, fixup_template_block, &local_info);
790
 
791
  /* The hash table traversals above created the duplicate blocks (and the
792
     statements within the duplicate blocks).  This loop creates PHI nodes for
793
     the duplicated blocks and redirects the incoming edges into BB to reach
794
     the duplicates of BB.  */
795
  htab_traverse (redirection_data, redirect_edges, &local_info);
796
 
797
  /* Done with this block.  Clear REDIRECTION_DATA.  */
798
  htab_delete (redirection_data);
799
  redirection_data = NULL;
800
 
801
  /* Indicate to our caller whether or not any jumps were threaded.  */
802
  return local_info.jumps_threaded;
803
}
804
 
805
/* Walk through all blocks and thread incoming edges to the block's
806
   destinations as requested.  This is the only entry point into this
807
   file.
808
 
809
   Blocks which have one or more incoming edges have INCOMING_EDGE_THREADED
810
   set in the block's annotation.
811
 
812
   Each edge that should be threaded has the new destination edge stored in
813
   the original edge's AUX field.
814
 
815
   This routine (or one of its callees) will clear INCOMING_EDGE_THREADED
816
   in the block annotations and the AUX field in the edges.
817
 
818
   It is the caller's responsibility to fix the dominance information
819
   and rewrite duplicated SSA_NAMEs back into SSA form.
820
 
821
   Returns true if one or more edges were threaded, false otherwise.  */
822
 
823
bool
824
thread_through_all_blocks (bitmap threaded_blocks)
825
{
826
  bool retval = false;
827
  unsigned int i;
828
  bitmap_iterator bi;
829
 
830
  rediscover_loops_after_threading = false;
831
  memset (&thread_stats, 0, sizeof (thread_stats));
832
 
833
  EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
834
    {
835
      basic_block bb = BASIC_BLOCK (i);
836
 
837
      if (EDGE_COUNT (bb->preds) > 0)
838
        retval |= thread_block (bb);
839
    }
840
 
841
  if (dump_file && (dump_flags & TDF_STATS))
842
    fprintf (dump_file, "\nJumps threaded: %lu\n",
843
             thread_stats.num_threaded_edges);
844
 
845
  return retval;
846
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.