OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [cfgloopanal.c] - Blame information for rev 867

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Natural loop analysis code for GNU compiler.
2
   Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
3
 
4
This file is part of GCC.
5
 
6
GCC is free software; you can redistribute it and/or modify it under
7
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9
version.
10
 
11
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12
WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14
for more details.
15
 
16
You should have received a copy of the GNU General Public License
17
along with GCC; see the file COPYING3.  If not see
18
<http://www.gnu.org/licenses/>.  */
19
 
20
#include "config.h"
21
#include "system.h"
22
#include "coretypes.h"
23
#include "tm.h"
24
#include "rtl.h"
25
#include "hard-reg-set.h"
26
#include "obstack.h"
27
#include "basic-block.h"
28
#include "cfgloop.h"
29
#include "expr.h"
30
#include "output.h"
31
 
32
/* Checks whether BB is executed exactly once in each LOOP iteration.  */
33
 
34
bool
35
just_once_each_iteration_p (const struct loop *loop, basic_block bb)
36
{
37
  /* It must be executed at least once each iteration.  */
38
  if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
39
    return false;
40
 
41
  /* And just once.  */
42
  if (bb->loop_father != loop)
43
    return false;
44
 
45
  /* But this was not enough.  We might have some irreducible loop here.  */
46
  if (bb->flags & BB_IRREDUCIBLE_LOOP)
47
    return false;
48
 
49
  return true;
50
}
51
 
52
/* Structure representing edge of a graph.  */
53
 
54
struct edge
55
{
56
  int src, dest;        /* Source and destination.  */
57
  struct edge *pred_next, *succ_next;
58
                        /* Next edge in predecessor and successor lists.  */
59
  void *data;           /* Data attached to the edge.  */
60
};
61
 
62
/* Structure representing vertex of a graph.  */
63
 
64
struct vertex
65
{
66
  struct edge *pred, *succ;
67
                        /* Lists of predecessors and successors.  */
68
  int component;        /* Number of dfs restarts before reaching the
69
                           vertex.  */
70
  int post;             /* Postorder number.  */
71
};
72
 
73
/* Structure representing a graph.  */
74
 
75
struct graph
76
{
77
  int n_vertices;       /* Number of vertices.  */
78
  struct vertex *vertices;
79
                        /* The vertices.  */
80
};
81
 
82
/* Dumps graph G into F.  */
83
 
84
extern void dump_graph (FILE *, struct graph *);
85
 
86
void
87
dump_graph (FILE *f, struct graph *g)
88
{
89
  int i;
90
  struct edge *e;
91
 
92
  for (i = 0; i < g->n_vertices; i++)
93
    {
94
      if (!g->vertices[i].pred
95
          && !g->vertices[i].succ)
96
        continue;
97
 
98
      fprintf (f, "%d (%d)\t<-", i, g->vertices[i].component);
99
      for (e = g->vertices[i].pred; e; e = e->pred_next)
100
        fprintf (f, " %d", e->src);
101
      fprintf (f, "\n");
102
 
103
      fprintf (f, "\t->");
104
      for (e = g->vertices[i].succ; e; e = e->succ_next)
105
        fprintf (f, " %d", e->dest);
106
      fprintf (f, "\n");
107
    }
108
}
109
 
110
/* Creates a new graph with N_VERTICES vertices.  */
111
 
112
static struct graph *
113
new_graph (int n_vertices)
114
{
115
  struct graph *g = XNEW (struct graph);
116
 
117
  g->n_vertices = n_vertices;
118
  g->vertices = XCNEWVEC (struct vertex, n_vertices);
119
 
120
  return g;
121
}
122
 
123
/* Adds an edge from F to T to graph G, with DATA attached.  */
124
 
125
static void
126
add_edge (struct graph *g, int f, int t, void *data)
127
{
128
  struct edge *e = xmalloc (sizeof (struct edge));
129
 
130
  e->src = f;
131
  e->dest = t;
132
  e->data = data;
133
 
134
  e->pred_next = g->vertices[t].pred;
135
  g->vertices[t].pred = e;
136
 
137
  e->succ_next = g->vertices[f].succ;
138
  g->vertices[f].succ = e;
139
}
140
 
141
/* Runs dfs search over vertices of G, from NQ vertices in queue QS.
142
   The vertices in postorder are stored into QT.  If FORWARD is false,
143
   backward dfs is run.  */
144
 
145
static void
146
dfs (struct graph *g, int *qs, int nq, int *qt, bool forward)
147
{
148
  int i, tick = 0, v, comp = 0, top;
149
  struct edge *e;
150
  struct edge **stack = xmalloc (sizeof (struct edge *) * g->n_vertices);
151
 
152
  for (i = 0; i < g->n_vertices; i++)
153
    {
154
      g->vertices[i].component = -1;
155
      g->vertices[i].post = -1;
156
    }
157
 
158
#define FST_EDGE(V) (forward ? g->vertices[(V)].succ : g->vertices[(V)].pred)
159
#define NEXT_EDGE(E) (forward ? (E)->succ_next : (E)->pred_next)
160
#define EDGE_SRC(E) (forward ? (E)->src : (E)->dest)
161
#define EDGE_DEST(E) (forward ? (E)->dest : (E)->src)
162
 
163
  for (i = 0; i < nq; i++)
164
    {
165
      v = qs[i];
166
      if (g->vertices[v].post != -1)
167
        continue;
168
 
169
      g->vertices[v].component = comp++;
170
      e = FST_EDGE (v);
171
      top = 0;
172
 
173
      while (1)
174
        {
175
          while (e && g->vertices[EDGE_DEST (e)].component != -1)
176
            e = NEXT_EDGE (e);
177
 
178
          if (!e)
179
            {
180
              if (qt)
181
                qt[tick] = v;
182
              g->vertices[v].post = tick++;
183
 
184
              if (!top)
185
                break;
186
 
187
              e = stack[--top];
188
              v = EDGE_SRC (e);
189
              e = NEXT_EDGE (e);
190
              continue;
191
            }
192
 
193
          stack[top++] = e;
194
          v = EDGE_DEST (e);
195
          e = FST_EDGE (v);
196
          g->vertices[v].component = comp - 1;
197
        }
198
    }
199
 
200
  free (stack);
201
}
202
 
203
/* Marks the edge E in graph G irreducible if it connects two vertices in the
204
   same scc.  */
205
 
206
static void
207
check_irred (struct graph *g, struct edge *e)
208
{
209
  edge real = e->data;
210
 
211
  /* All edges should lead from a component with higher number to the
212
     one with lower one.  */
213
  gcc_assert (g->vertices[e->src].component >= g->vertices[e->dest].component);
214
 
215
  if (g->vertices[e->src].component != g->vertices[e->dest].component)
216
    return;
217
 
218
  real->flags |= EDGE_IRREDUCIBLE_LOOP;
219
  if (flow_bb_inside_loop_p (real->src->loop_father, real->dest))
220
    real->src->flags |= BB_IRREDUCIBLE_LOOP;
221
}
222
 
223
/* Runs CALLBACK for all edges in G.  */
224
 
225
static void
226
for_each_edge (struct graph *g,
227
               void (callback) (struct graph *, struct edge *))
228
{
229
  struct edge *e;
230
  int i;
231
 
232
  for (i = 0; i < g->n_vertices; i++)
233
    for (e = g->vertices[i].succ; e; e = e->succ_next)
234
      callback (g, e);
235
}
236
 
237
/* Releases the memory occupied by G.  */
238
 
239
static void
240
free_graph (struct graph *g)
241
{
242
  struct edge *e, *n;
243
  int i;
244
 
245
  for (i = 0; i < g->n_vertices; i++)
246
    for (e = g->vertices[i].succ; e; e = n)
247
      {
248
        n = e->succ_next;
249
        free (e);
250
      }
251
  free (g->vertices);
252
  free (g);
253
}
254
 
255
/* Marks blocks and edges that are part of non-recognized loops; i.e. we
256
   throw away all latch edges and mark blocks inside any remaining cycle.
257
   Everything is a bit complicated due to fact we do not want to do this
258
   for parts of cycles that only "pass" through some loop -- i.e. for
259
   each cycle, we want to mark blocks that belong directly to innermost
260
   loop containing the whole cycle.
261
 
262
   LOOPS is the loop tree.  */
263
 
264
#define LOOP_REPR(LOOP) ((LOOP)->num + last_basic_block)
265
#define BB_REPR(BB) ((BB)->index + 1)
266
 
267
void
268
mark_irreducible_loops (struct loops *loops)
269
{
270
  basic_block act;
271
  edge e;
272
  edge_iterator ei;
273
  int i, src, dest;
274
  struct graph *g;
275
  int *queue1 = XNEWVEC (int, last_basic_block + loops->num);
276
  int *queue2 = XNEWVEC (int, last_basic_block + loops->num);
277
  int nq, depth;
278
  struct loop *cloop;
279
 
280
  /* Reset the flags.  */
281
  FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
282
    {
283
      act->flags &= ~BB_IRREDUCIBLE_LOOP;
284
      FOR_EACH_EDGE (e, ei, act->succs)
285
        e->flags &= ~EDGE_IRREDUCIBLE_LOOP;
286
    }
287
 
288
  /* Create the edge lists.  */
289
  g = new_graph (last_basic_block + loops->num);
290
 
291
  FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
292
    FOR_EACH_EDGE (e, ei, act->succs)
293
      {
294
        /* Ignore edges to exit.  */
295
        if (e->dest == EXIT_BLOCK_PTR)
296
          continue;
297
 
298
        /* And latch edges.  */
299
        if (e->dest->loop_father->header == e->dest
300
            && e->dest->loop_father->latch == act)
301
          continue;
302
 
303
        /* Edges inside a single loop should be left where they are.  Edges
304
           to subloop headers should lead to representative of the subloop,
305
           but from the same place.
306
 
307
           Edges exiting loops should lead from representative
308
           of the son of nearest common ancestor of the loops in that
309
           act lays.  */
310
 
311
        src = BB_REPR (act);
312
        dest = BB_REPR (e->dest);
313
 
314
        if (e->dest->loop_father->header == e->dest)
315
          dest = LOOP_REPR (e->dest->loop_father);
316
 
317
        if (!flow_bb_inside_loop_p (act->loop_father, e->dest))
318
          {
319
            depth = find_common_loop (act->loop_father,
320
                                      e->dest->loop_father)->depth + 1;
321
            if (depth == act->loop_father->depth)
322
              cloop = act->loop_father;
323
            else
324
              cloop = act->loop_father->pred[depth];
325
 
326
            src = LOOP_REPR (cloop);
327
          }
328
 
329
        add_edge (g, src, dest, e);
330
      }
331
 
332
  /* Find the strongly connected components.  Use the algorithm of Tarjan --
333
     first determine the postorder dfs numbering in reversed graph, then
334
     run the dfs on the original graph in the order given by decreasing
335
     numbers assigned by the previous pass.  */
336
  nq = 0;
337
  FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
338
    {
339
      queue1[nq++] = BB_REPR (act);
340
    }
341
  for (i = 1; i < (int) loops->num; i++)
342
    if (loops->parray[i])
343
      queue1[nq++] = LOOP_REPR (loops->parray[i]);
344
  dfs (g, queue1, nq, queue2, false);
345
  for (i = 0; i < nq; i++)
346
    queue1[i] = queue2[nq - i - 1];
347
  dfs (g, queue1, nq, NULL, true);
348
 
349
  /* Mark the irreducible loops.  */
350
  for_each_edge (g, check_irred);
351
 
352
  free_graph (g);
353
  free (queue1);
354
  free (queue2);
355
 
356
  loops->state |= LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS;
357
}
358
 
359
/* Counts number of insns inside LOOP.  */
360
int
361
num_loop_insns (struct loop *loop)
362
{
363
  basic_block *bbs, bb;
364
  unsigned i, ninsns = 0;
365
  rtx insn;
366
 
367
  bbs = get_loop_body (loop);
368
  for (i = 0; i < loop->num_nodes; i++)
369
    {
370
      bb = bbs[i];
371
      ninsns++;
372
      for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
373
        if (INSN_P (insn))
374
          ninsns++;
375
    }
376
  free(bbs);
377
 
378
  return ninsns;
379
}
380
 
381
/* Counts number of insns executed on average per iteration LOOP.  */
382
int
383
average_num_loop_insns (struct loop *loop)
384
{
385
  basic_block *bbs, bb;
386
  unsigned i, binsns, ninsns, ratio;
387
  rtx insn;
388
 
389
  ninsns = 0;
390
  bbs = get_loop_body (loop);
391
  for (i = 0; i < loop->num_nodes; i++)
392
    {
393
      bb = bbs[i];
394
 
395
      binsns = 1;
396
      for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
397
        if (INSN_P (insn))
398
          binsns++;
399
 
400
      ratio = loop->header->frequency == 0
401
              ? BB_FREQ_MAX
402
              : (bb->frequency * BB_FREQ_MAX) / loop->header->frequency;
403
      ninsns += binsns * ratio;
404
    }
405
  free(bbs);
406
 
407
  ninsns /= BB_FREQ_MAX;
408
  if (!ninsns)
409
    ninsns = 1; /* To avoid division by zero.  */
410
 
411
  return ninsns;
412
}
413
 
414
/* Returns expected number of LOOP iterations.
415
   Compute upper bound on number of iterations in case they do not fit integer
416
   to help loop peeling heuristics.  Use exact counts if at all possible.  */
417
unsigned
418
expected_loop_iterations (const struct loop *loop)
419
{
420
  edge e;
421
  edge_iterator ei;
422
 
423
  if (loop->latch->count || loop->header->count)
424
    {
425
      gcov_type count_in, count_latch, expected;
426
 
427
      count_in = 0;
428
      count_latch = 0;
429
 
430
      FOR_EACH_EDGE (e, ei, loop->header->preds)
431
        if (e->src == loop->latch)
432
          count_latch = e->count;
433
        else
434
          count_in += e->count;
435
 
436
      if (count_in == 0)
437
        expected = count_latch * 2;
438
      else
439
        expected = (count_latch + count_in - 1) / count_in;
440
 
441
      /* Avoid overflows.  */
442
      return (expected > REG_BR_PROB_BASE ? REG_BR_PROB_BASE : expected);
443
    }
444
  else
445
    {
446
      int freq_in, freq_latch;
447
 
448
      freq_in = 0;
449
      freq_latch = 0;
450
 
451
      FOR_EACH_EDGE (e, ei, loop->header->preds)
452
        if (e->src == loop->latch)
453
          freq_latch = EDGE_FREQUENCY (e);
454
        else
455
          freq_in += EDGE_FREQUENCY (e);
456
 
457
      if (freq_in == 0)
458
        return freq_latch * 2;
459
 
460
      return (freq_latch + freq_in - 1) / freq_in;
461
    }
462
}
463
 
464
/* Returns the maximum level of nesting of subloops of LOOP.  */
465
 
466
unsigned
467
get_loop_level (const struct loop *loop)
468
{
469
  const struct loop *ploop;
470
  unsigned mx = 0, l;
471
 
472
  for (ploop = loop->inner; ploop; ploop = ploop->next)
473
    {
474
      l = get_loop_level (ploop);
475
      if (l >= mx)
476
        mx = l + 1;
477
    }
478
  return mx;
479
}
480
 
481
/* Returns estimate on cost of computing SEQ.  */
482
 
483
static unsigned
484
seq_cost (rtx seq)
485
{
486
  unsigned cost = 0;
487
  rtx set;
488
 
489
  for (; seq; seq = NEXT_INSN (seq))
490
    {
491
      set = single_set (seq);
492
      if (set)
493
        cost += rtx_cost (set, SET);
494
      else
495
        cost++;
496
    }
497
 
498
  return cost;
499
}
500
 
501
/* The properties of the target.  */
502
 
503
unsigned target_avail_regs;     /* Number of available registers.  */
504
unsigned target_res_regs;       /* Number of reserved registers.  */
505
unsigned target_small_cost;     /* The cost for register when there is a free one.  */
506
unsigned target_pres_cost;      /* The cost for register when there are not too many
507
                                   free ones.  */
508
unsigned target_spill_cost;     /* The cost for register when we need to spill.  */
509
 
510
/* Initialize the constants for computing set costs.  */
511
 
512
void
513
init_set_costs (void)
514
{
515
  rtx seq;
516
  rtx reg1 = gen_raw_REG (SImode, FIRST_PSEUDO_REGISTER);
517
  rtx reg2 = gen_raw_REG (SImode, FIRST_PSEUDO_REGISTER + 1);
518
  rtx addr = gen_raw_REG (Pmode, FIRST_PSEUDO_REGISTER + 2);
519
  rtx mem = validize_mem (gen_rtx_MEM (SImode, addr));
520
  unsigned i;
521
 
522
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
523
    if (TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], i)
524
        && !fixed_regs[i])
525
      target_avail_regs++;
526
 
527
  target_res_regs = 3;
528
 
529
  /* These are really just heuristic values.  */
530
 
531
  start_sequence ();
532
  emit_move_insn (reg1, reg2);
533
  seq = get_insns ();
534
  end_sequence ();
535
  target_small_cost = seq_cost (seq);
536
  target_pres_cost = 2 * target_small_cost;
537
 
538
  start_sequence ();
539
  emit_move_insn (mem, reg1);
540
  emit_move_insn (reg2, mem);
541
  seq = get_insns ();
542
  end_sequence ();
543
  target_spill_cost = seq_cost (seq);
544
}
545
 
546
/* Calculates cost for having SIZE new loop global variables.  REGS_USED is the
547
   number of global registers used in loop.  N_USES is the number of relevant
548
   variable uses.  */
549
 
550
unsigned
551
global_cost_for_size (unsigned size, unsigned regs_used, unsigned n_uses)
552
{
553
  unsigned regs_needed = regs_used + size;
554
  unsigned cost = 0;
555
 
556
  if (regs_needed + target_res_regs <= target_avail_regs)
557
    cost += target_small_cost * size;
558
  else if (regs_needed <= target_avail_regs)
559
    cost += target_pres_cost * size;
560
  else
561
    {
562
      cost += target_pres_cost * size;
563
      cost += target_spill_cost * n_uses * (regs_needed - target_avail_regs) / regs_needed;
564
    }
565
 
566
  return cost;
567
}
568
 
569
/* Sets EDGE_LOOP_EXIT flag for all exits of LOOPS.  */
570
 
571
void
572
mark_loop_exit_edges (struct loops *loops)
573
{
574
  basic_block bb;
575
  edge e;
576
 
577
  if (loops->num <= 1)
578
    return;
579
 
580
  FOR_EACH_BB (bb)
581
    {
582
      edge_iterator ei;
583
 
584
      FOR_EACH_EDGE (e, ei, bb->succs)
585
        {
586
          if (bb->loop_father->outer
587
              && loop_exit_edge_p (bb->loop_father, e))
588
            e->flags |= EDGE_LOOP_EXIT;
589
          else
590
            e->flags &= ~EDGE_LOOP_EXIT;
591
        }
592
    }
593
}
594
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.