OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.2.2/] [gcc/] [loop-invariant.c] - Blame information for rev 165

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* RTL-level loop invariant motion.
2
   Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
3
 
4
This file is part of GCC.
5
 
6
GCC is free software; you can redistribute it and/or modify it
7
under the terms of the GNU General Public License as published by the
8
Free Software Foundation; either version 3, or (at your option) any
9
later version.
10
 
11
GCC is distributed in the hope that it will be useful, but WITHOUT
12
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14
for more details.
15
 
16
You should have received a copy of the GNU General Public License
17
along with GCC; see the file COPYING3.  If not see
18
<http://www.gnu.org/licenses/>.  */
19
 
20
/* This implements the loop invariant motion pass.  It is very simple
21
   (no calls, libcalls, etc.).  This should be sufficient to cleanup things
22
   like address arithmetics -- other more complicated invariants should be
23
   eliminated on tree level either in tree-ssa-loop-im.c or in tree-ssa-pre.c.
24
 
25
   We proceed loop by loop -- it is simpler than trying to handle things
26
   globally and should not lose much.  First we inspect all sets inside loop
27
   and create a dependency graph on insns (saying "to move this insn, you must
28
   also move the following insns").
29
 
30
   We then need to determine what to move.  We estimate the number of registers
31
   used and move as many invariants as possible while we still have enough free
32
   registers.  We prefer the expensive invariants.
33
 
34
   Then we move the selected invariants out of the loop, creating a new
35
   temporaries for them if necessary.  */
36
 
37
#include "config.h"
38
#include "system.h"
39
#include "coretypes.h"
40
#include "tm.h"
41
#include "rtl.h"
42
#include "tm_p.h"
43
#include "hard-reg-set.h"
44
#include "obstack.h"
45
#include "basic-block.h"
46
#include "cfgloop.h"
47
#include "expr.h"
48
#include "recog.h"
49
#include "output.h"
50
#include "function.h"
51
#include "flags.h"
52
#include "df.h"
53
#include "hashtab.h"
54
#include "except.h"
55
 
56
/* The data stored for the loop.  */
57
 
58
struct loop_data
59
{
60
  struct loop *outermost_exit;  /* The outermost exit of the loop.  */
61
  bool has_call;                /* True if the loop contains a call.  */
62
};
63
 
64
#define LOOP_DATA(LOOP) ((struct loop_data *) (LOOP)->aux)
65
 
66
/* The description of an use.  */
67
 
68
struct use
69
{
70
  rtx *pos;                     /* Position of the use.  */
71
  rtx insn;                     /* The insn in that the use occurs.  */
72
 
73
  struct use *next;             /* Next use in the list.  */
74
};
75
 
76
/* The description of a def.  */
77
 
78
struct def
79
{
80
  struct use *uses;             /* The list of uses that are uniquely reached
81
                                   by it.  */
82
  unsigned n_uses;              /* Number of such uses.  */
83
  unsigned invno;               /* The corresponding invariant.  */
84
};
85
 
86
/* The data stored for each invariant.  */
87
 
88
struct invariant
89
{
90
  /* The number of the invariant.  */
91
  unsigned invno;
92
 
93
  /* The number of the invariant with the same value.  */
94
  unsigned eqto;
95
 
96
  /* If we moved the invariant out of the loop, the register that contains its
97
     value.  */
98
  rtx reg;
99
 
100
  /* The definition of the invariant.  */
101
  struct def *def;
102
 
103
  /* The insn in that it is defined.  */
104
  rtx insn;
105
 
106
  /* Whether it is always executed.  */
107
  bool always_executed;
108
 
109
  /* Whether to move the invariant.  */
110
  bool move;
111
 
112
  /* Cost of the invariant.  */
113
  unsigned cost;
114
 
115
  /* The invariants it depends on.  */
116
  bitmap depends_on;
117
 
118
  /* Used for detecting already visited invariants during determining
119
     costs of movements.  */
120
  unsigned stamp;
121
};
122
 
123
/* Entry for hash table of invariant expressions.  */
124
 
125
struct invariant_expr_entry
126
{
127
  /* The invariant.  */
128
  struct invariant *inv;
129
 
130
  /* Its value.  */
131
  rtx expr;
132
 
133
  /* Its mode.  */
134
  enum machine_mode mode;
135
 
136
  /* Its hash.  */
137
  hashval_t hash;
138
};
139
 
140
/* The actual stamp for marking already visited invariants during determining
141
   costs of movements.  */
142
 
143
static unsigned actual_stamp;
144
 
145
typedef struct invariant *invariant_p;
146
 
147
DEF_VEC_P(invariant_p);
148
DEF_VEC_ALLOC_P(invariant_p, heap);
149
 
150
/* The invariants.  */
151
 
152
static VEC(invariant_p,heap) *invariants;
153
 
154
/* The dataflow object.  */
155
 
156
static struct df *df = NULL;
157
 
158
/* Test for possibility of invariantness of X.  */
159
 
160
static bool
161
check_maybe_invariant (rtx x)
162
{
163
  enum rtx_code code = GET_CODE (x);
164
  int i, j;
165
  const char *fmt;
166
 
167
  switch (code)
168
    {
169
    case CONST_INT:
170
    case CONST_DOUBLE:
171
    case SYMBOL_REF:
172
    case CONST:
173
    case LABEL_REF:
174
      return true;
175
 
176
    case PC:
177
    case CC0:
178
    case UNSPEC_VOLATILE:
179
    case CALL:
180
      return false;
181
 
182
    case REG:
183
      return true;
184
 
185
    case MEM:
186
      /* Load/store motion is done elsewhere.  ??? Perhaps also add it here?
187
         It should not be hard, and might be faster than "elsewhere".  */
188
 
189
      /* Just handle the most trivial case where we load from an unchanging
190
         location (most importantly, pic tables).  */
191
      if (MEM_READONLY_P (x))
192
        break;
193
 
194
      return false;
195
 
196
    case ASM_OPERANDS:
197
      /* Don't mess with insns declared volatile.  */
198
      if (MEM_VOLATILE_P (x))
199
        return false;
200
      break;
201
 
202
    default:
203
      break;
204
    }
205
 
206
  fmt = GET_RTX_FORMAT (code);
207
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
208
    {
209
      if (fmt[i] == 'e')
210
        {
211
          if (!check_maybe_invariant (XEXP (x, i)))
212
            return false;
213
        }
214
      else if (fmt[i] == 'E')
215
        {
216
          for (j = 0; j < XVECLEN (x, i); j++)
217
            if (!check_maybe_invariant (XVECEXP (x, i, j)))
218
              return false;
219
        }
220
    }
221
 
222
  return true;
223
}
224
 
225
/* Returns the invariant definition for USE, or NULL if USE is not
226
   invariant.  */
227
 
228
static struct invariant *
229
invariant_for_use (struct df_ref *use)
230
{
231
  struct df_link *defs;
232
  struct df_ref *def;
233
  basic_block bb = BLOCK_FOR_INSN (use->insn), def_bb;
234
 
235
  if (use->flags & DF_REF_READ_WRITE)
236
    return NULL;
237
 
238
  defs = DF_REF_CHAIN (use);
239
  if (!defs || defs->next)
240
    return NULL;
241
  def = defs->ref;
242
  if (!DF_REF_DATA (def))
243
    return NULL;
244
 
245
  def_bb = DF_REF_BB (def);
246
  if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb))
247
    return NULL;
248
  return DF_REF_DATA (def);
249
}
250
 
251
/* Computes hash value for invariant expression X in INSN.  */
252
 
253
static hashval_t
254
hash_invariant_expr_1 (rtx insn, rtx x)
255
{
256
  enum rtx_code code = GET_CODE (x);
257
  int i, j;
258
  const char *fmt;
259
  hashval_t val = code;
260
  int do_not_record_p;
261
  struct df_ref *use;
262
  struct invariant *inv;
263
 
264
  switch (code)
265
    {
266
    case CONST_INT:
267
    case CONST_DOUBLE:
268
    case SYMBOL_REF:
269
    case CONST:
270
    case LABEL_REF:
271
      return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
272
 
273
    case REG:
274
      use = df_find_use (df, insn, x);
275
      if (!use)
276
        return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
277
      inv = invariant_for_use (use);
278
      if (!inv)
279
        return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
280
 
281
      gcc_assert (inv->eqto != ~0u);
282
      return inv->eqto;
283
 
284
    default:
285
      break;
286
    }
287
 
288
  fmt = GET_RTX_FORMAT (code);
289
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
290
    {
291
      if (fmt[i] == 'e')
292
        val ^= hash_invariant_expr_1 (insn, XEXP (x, i));
293
      else if (fmt[i] == 'E')
294
        {
295
          for (j = 0; j < XVECLEN (x, i); j++)
296
            val ^= hash_invariant_expr_1 (insn, XVECEXP (x, i, j));
297
        }
298
      else if (fmt[i] == 'i' || fmt[i] == 'n')
299
        val ^= XINT (x, i);
300
    }
301
 
302
  return val;
303
}
304
 
305
/* Returns true if the invariant expressions E1 and E2 used in insns INSN1
306
   and INSN2 have always the same value.  */
307
 
308
static bool
309
invariant_expr_equal_p (rtx insn1, rtx e1, rtx insn2, rtx e2)
310
{
311
  enum rtx_code code = GET_CODE (e1);
312
  int i, j;
313
  const char *fmt;
314
  struct df_ref *use1, *use2;
315
  struct invariant *inv1 = NULL, *inv2 = NULL;
316
  rtx sub1, sub2;
317
 
318
  /* If mode of only one of the operands is VOIDmode, it is not equivalent to
319
     the other one.  If both are VOIDmode, we rely on the caller of this
320
     function to verify that their modes are the same.  */
321
  if (code != GET_CODE (e2) || GET_MODE (e1) != GET_MODE (e2))
322
    return false;
323
 
324
  switch (code)
325
    {
326
    case CONST_INT:
327
    case CONST_DOUBLE:
328
    case SYMBOL_REF:
329
    case CONST:
330
    case LABEL_REF:
331
      return rtx_equal_p (e1, e2);
332
 
333
    case REG:
334
      use1 = df_find_use (df, insn1, e1);
335
      use2 = df_find_use (df, insn2, e2);
336
      if (use1)
337
        inv1 = invariant_for_use (use1);
338
      if (use2)
339
        inv2 = invariant_for_use (use2);
340
 
341
      if (!inv1 && !inv2)
342
        return rtx_equal_p (e1, e2);
343
 
344
      if (!inv1 || !inv2)
345
        return false;
346
 
347
      gcc_assert (inv1->eqto != ~0u);
348
      gcc_assert (inv2->eqto != ~0u);
349
      return inv1->eqto == inv2->eqto;
350
 
351
    default:
352
      break;
353
    }
354
 
355
  fmt = GET_RTX_FORMAT (code);
356
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
357
    {
358
      if (fmt[i] == 'e')
359
        {
360
          sub1 = XEXP (e1, i);
361
          sub2 = XEXP (e2, i);
362
 
363
          if (!invariant_expr_equal_p (insn1, sub1, insn2, sub2))
364
            return false;
365
        }
366
 
367
      else if (fmt[i] == 'E')
368
        {
369
          if (XVECLEN (e1, i) != XVECLEN (e2, i))
370
            return false;
371
 
372
          for (j = 0; j < XVECLEN (e1, i); j++)
373
            {
374
              sub1 = XVECEXP (e1, i, j);
375
              sub2 = XVECEXP (e2, i, j);
376
 
377
              if (!invariant_expr_equal_p (insn1, sub1, insn2, sub2))
378
                return false;
379
            }
380
        }
381
      else if (fmt[i] == 'i' || fmt[i] == 'n')
382
        {
383
          if (XINT (e1, i) != XINT (e2, i))
384
            return false;
385
        }
386
      /* Unhandled type of subexpression, we fail conservatively.  */
387
      else
388
        return false;
389
    }
390
 
391
  return true;
392
}
393
 
394
/* Returns hash value for invariant expression entry E.  */
395
 
396
static hashval_t
397
hash_invariant_expr (const void *e)
398
{
399
  const struct invariant_expr_entry *entry = e;
400
 
401
  return entry->hash;
402
}
403
 
404
/* Compares invariant expression entries E1 and E2.  */
405
 
406
static int
407
eq_invariant_expr (const void *e1, const void *e2)
408
{
409
  const struct invariant_expr_entry *entry1 = e1;
410
  const struct invariant_expr_entry *entry2 = e2;
411
 
412
  if (entry1->mode != entry2->mode)
413
    return 0;
414
 
415
  return invariant_expr_equal_p (entry1->inv->insn, entry1->expr,
416
                                 entry2->inv->insn, entry2->expr);
417
}
418
 
419
/* Checks whether invariant with value EXPR in machine mode MODE is
420
   recorded in EQ.  If this is the case, return the invariant.  Otherwise
421
   insert INV to the table for this expression and return INV.  */
422
 
423
static struct invariant *
424
find_or_insert_inv (htab_t eq, rtx expr, enum machine_mode mode,
425
                    struct invariant *inv)
426
{
427
  hashval_t hash = hash_invariant_expr_1 (inv->insn, expr);
428
  struct invariant_expr_entry *entry;
429
  struct invariant_expr_entry pentry;
430
  PTR *slot;
431
 
432
  pentry.expr = expr;
433
  pentry.inv = inv;
434
  pentry.mode = mode;
435
  slot = htab_find_slot_with_hash (eq, &pentry, hash, INSERT);
436
  entry = *slot;
437
 
438
  if (entry)
439
    return entry->inv;
440
 
441
  entry = XNEW (struct invariant_expr_entry);
442
  entry->inv = inv;
443
  entry->expr = expr;
444
  entry->mode = mode;
445
  entry->hash = hash;
446
  *slot = entry;
447
 
448
  return inv;
449
}
450
 
451
/* Finds invariants identical to INV and records the equivalence.  EQ is the
452
   hash table of the invariants.  */
453
 
454
static void
455
find_identical_invariants (htab_t eq, struct invariant *inv)
456
{
457
  unsigned depno;
458
  bitmap_iterator bi;
459
  struct invariant *dep;
460
  rtx expr, set;
461
  enum machine_mode mode;
462
 
463
  if (inv->eqto != ~0u)
464
    return;
465
 
466
  EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, depno, bi)
467
    {
468
      dep = VEC_index (invariant_p, invariants, depno);
469
      find_identical_invariants (eq, dep);
470
    }
471
 
472
  set = single_set (inv->insn);
473
  expr = SET_SRC (set);
474
  mode = GET_MODE (expr);
475
  if (mode == VOIDmode)
476
    mode = GET_MODE (SET_DEST (set));
477
  inv->eqto = find_or_insert_inv (eq, expr, mode, inv)->invno;
478
 
479
  if (dump_file && inv->eqto != inv->invno)
480
    fprintf (dump_file,
481
             "Invariant %d is equivalent to invariant %d.\n",
482
             inv->invno, inv->eqto);
483
}
484
 
485
/* Find invariants with the same value and record the equivalences.  */
486
 
487
static void
488
merge_identical_invariants (void)
489
{
490
  unsigned i;
491
  struct invariant *inv;
492
  htab_t eq = htab_create (VEC_length (invariant_p, invariants),
493
                           hash_invariant_expr, eq_invariant_expr, free);
494
 
495
  for (i = 0; VEC_iterate (invariant_p, invariants, i, inv); i++)
496
    find_identical_invariants (eq, inv);
497
 
498
  htab_delete (eq);
499
}
500
 
501
/* Determines the basic blocks inside LOOP that are always executed and
502
   stores their bitmap to ALWAYS_REACHED.  MAY_EXIT is a bitmap of
503
   basic blocks that may either exit the loop, or contain the call that
504
   does not have to return.  BODY is body of the loop obtained by
505
   get_loop_body_in_dom_order.  */
506
 
507
static void
508
compute_always_reached (struct loop *loop, basic_block *body,
509
                        bitmap may_exit, bitmap always_reached)
510
{
511
  unsigned i;
512
 
513
  for (i = 0; i < loop->num_nodes; i++)
514
    {
515
      if (dominated_by_p (CDI_DOMINATORS, loop->latch, body[i]))
516
        bitmap_set_bit (always_reached, i);
517
 
518
      if (bitmap_bit_p (may_exit, i))
519
        return;
520
    }
521
}
522
 
523
/* Finds exits out of the LOOP with body BODY.  Marks blocks in that we may
524
   exit the loop by cfg edge to HAS_EXIT and MAY_EXIT.  In MAY_EXIT
525
   additionally mark blocks that may exit due to a call.  */
526
 
527
static void
528
find_exits (struct loop *loop, basic_block *body,
529
            bitmap may_exit, bitmap has_exit)
530
{
531
  unsigned i;
532
  edge_iterator ei;
533
  edge e;
534
  struct loop *outermost_exit = loop, *aexit;
535
  bool has_call = false;
536
  rtx insn;
537
 
538
  for (i = 0; i < loop->num_nodes; i++)
539
    {
540
      if (body[i]->loop_father == loop)
541
        {
542
          FOR_BB_INSNS (body[i], insn)
543
            {
544
              if (CALL_P (insn)
545
                  && !CONST_OR_PURE_CALL_P (insn))
546
                {
547
                  has_call = true;
548
                  bitmap_set_bit (may_exit, i);
549
                  break;
550
                }
551
            }
552
 
553
          FOR_EACH_EDGE (e, ei, body[i]->succs)
554
            {
555
              if (flow_bb_inside_loop_p (loop, e->dest))
556
                continue;
557
 
558
              bitmap_set_bit (may_exit, i);
559
              bitmap_set_bit (has_exit, i);
560
              outermost_exit = find_common_loop (outermost_exit,
561
                                                 e->dest->loop_father);
562
            }
563
          continue;
564
        }
565
 
566
      /* Use the data stored for the subloop to decide whether we may exit
567
         through it.  It is sufficient to do this for header of the loop,
568
         as other basic blocks inside it must be dominated by it.  */
569
      if (body[i]->loop_father->header != body[i])
570
        continue;
571
 
572
      if (LOOP_DATA (body[i]->loop_father)->has_call)
573
        {
574
          has_call = true;
575
          bitmap_set_bit (may_exit, i);
576
        }
577
      aexit = LOOP_DATA (body[i]->loop_father)->outermost_exit;
578
      if (aexit != loop)
579
        {
580
          bitmap_set_bit (may_exit, i);
581
          bitmap_set_bit (has_exit, i);
582
 
583
          if (flow_loop_nested_p (aexit, outermost_exit))
584
            outermost_exit = aexit;
585
        }
586
    }
587
 
588
  loop->aux = xcalloc (1, sizeof (struct loop_data));
589
  LOOP_DATA (loop)->outermost_exit = outermost_exit;
590
  LOOP_DATA (loop)->has_call = has_call;
591
}
592
 
593
/* Check whether we may assign a value to X from a register.  */
594
 
595
static bool
596
may_assign_reg_p (rtx x)
597
{
598
  return (GET_MODE (x) != VOIDmode
599
          && GET_MODE (x) != BLKmode
600
          && can_copy_p (GET_MODE (x))
601
          && (!REG_P (x)
602
              || !HARD_REGISTER_P (x)
603
              || REGNO_REG_CLASS (REGNO (x)) != NO_REGS));
604
}
605
 
606
/* Finds definitions that may correspond to invariants in LOOP with body
607
   BODY.  */
608
 
609
static void
610
find_defs (struct loop *loop, basic_block *body)
611
{
612
  unsigned i;
613
  bitmap blocks = BITMAP_ALLOC (NULL);
614
 
615
  for (i = 0; i < loop->num_nodes; i++)
616
    bitmap_set_bit (blocks, body[i]->index);
617
 
618
  df_set_blocks (df, blocks);
619
  df_analyze (df);
620
  BITMAP_FREE (blocks);
621
}
622
 
623
/* Creates a new invariant for definition DEF in INSN, depending on invariants
624
   in DEPENDS_ON.  ALWAYS_EXECUTED is true if the insn is always executed,
625
   unless the program ends due to a function call.  The newly created invariant
626
   is returned.  */
627
 
628
static struct invariant *
629
create_new_invariant (struct def *def, rtx insn, bitmap depends_on,
630
                      bool always_executed)
631
{
632
  struct invariant *inv = XNEW (struct invariant);
633
  rtx set = single_set (insn);
634
 
635
  inv->def = def;
636
  inv->always_executed = always_executed;
637
  inv->depends_on = depends_on;
638
 
639
  /* If the set is simple, usually by moving it we move the whole store out of
640
     the loop.  Otherwise we save only cost of the computation.  */
641
  if (def)
642
    inv->cost = rtx_cost (set, SET);
643
  else
644
    inv->cost = rtx_cost (SET_SRC (set), SET);
645
 
646
  inv->move = false;
647
  inv->reg = NULL_RTX;
648
  inv->stamp = 0;
649
  inv->insn = insn;
650
 
651
  inv->invno = VEC_length (invariant_p, invariants);
652
  inv->eqto = ~0u;
653
  if (def)
654
    def->invno = inv->invno;
655
  VEC_safe_push (invariant_p, heap, invariants, inv);
656
 
657
  if (dump_file)
658
    {
659
      fprintf (dump_file,
660
               "Set in insn %d is invariant (%d), cost %d, depends on ",
661
               INSN_UID (insn), inv->invno, inv->cost);
662
      dump_bitmap (dump_file, inv->depends_on);
663
    }
664
 
665
  return inv;
666
}
667
 
668
/* Record USE at DEF.  */
669
 
670
static void
671
record_use (struct def *def, rtx *use, rtx insn)
672
{
673
  struct use *u = XNEW (struct use);
674
 
675
  if (GET_CODE (*use) == SUBREG)
676
    use = &SUBREG_REG (*use);
677
  gcc_assert (REG_P (*use));
678
 
679
  u->pos = use;
680
  u->insn = insn;
681
  u->next = def->uses;
682
  def->uses = u;
683
  def->n_uses++;
684
}
685
 
686
/* Finds the invariants INSN depends on and store them to the DEPENDS_ON
687
   bitmap.  Returns true if all dependencies of INSN are known to be
688
   loop invariants, false otherwise.  */
689
 
690
static bool
691
check_dependencies (rtx insn, bitmap depends_on)
692
{
693
  struct df_link *defs;
694
  struct df_ref *use, *def;
695
  basic_block bb = BLOCK_FOR_INSN (insn), def_bb;
696
  struct def *def_data;
697
  struct invariant *inv;
698
 
699
  for (use = DF_INSN_GET (df, insn)->uses; use; use = use->next_ref)
700
    {
701
      if (use->flags & DF_REF_READ_WRITE)
702
        return false;
703
 
704
      defs = DF_REF_CHAIN (use);
705
      if (!defs)
706
        continue;
707
 
708
      if (defs->next)
709
        return false;
710
 
711
      def = defs->ref;
712
      inv = DF_REF_DATA (def);
713
      if (!inv)
714
        return false;
715
 
716
      def_data = inv->def;
717
      gcc_assert (def_data != NULL);
718
 
719
      def_bb = DF_REF_BB (def);
720
      /* Note that in case bb == def_bb, we know that the definition dominates
721
         insn, because def has DF_REF_DATA defined and we process the insns
722
         in the basic block bb sequentially.  */
723
      if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb))
724
        return false;
725
 
726
      bitmap_set_bit (depends_on, def_data->invno);
727
    }
728
 
729
  return true;
730
}
731
 
732
/* Finds invariant in INSN.  ALWAYS_REACHED is true if the insn is always
733
   executed.  ALWAYS_EXECUTED is true if the insn is always executed,
734
   unless the program ends due to a function call.  */
735
 
736
static void
737
find_invariant_insn (rtx insn, bool always_reached, bool always_executed)
738
{
739
  struct df_ref *ref;
740
  struct def *def;
741
  bitmap depends_on;
742
  rtx set, dest;
743
  bool simple = true;
744
  struct invariant *inv;
745
 
746
  /* Until we get rid of LIBCALLS.  */
747
  if (find_reg_note (insn, REG_RETVAL, NULL_RTX)
748
      || find_reg_note (insn, REG_LIBCALL, NULL_RTX)
749
      || find_reg_note (insn, REG_NO_CONFLICT, NULL_RTX))
750
    return;
751
 
752
#ifdef HAVE_cc0
753
  /* We can't move a CC0 setter without the user.  */
754
  if (sets_cc0_p (insn))
755
    return;
756
#endif
757
 
758
  set = single_set (insn);
759
  if (!set)
760
    return;
761
  dest = SET_DEST (set);
762
 
763
  if (!REG_P (dest)
764
      || HARD_REGISTER_P (dest))
765
    simple = false;
766
 
767
  if (!may_assign_reg_p (SET_DEST (set))
768
      || !check_maybe_invariant (SET_SRC (set)))
769
    return;
770
 
771
  /* If the insn can throw exception, we cannot move it at all without changing
772
     cfg.  */
773
  if (can_throw_internal (insn))
774
    return;
775
 
776
  /* We cannot make trapping insn executed, unless it was executed before.  */
777
  if (may_trap_after_code_motion_p (PATTERN (insn)) && !always_reached)
778
    return;
779
 
780
  depends_on = BITMAP_ALLOC (NULL);
781
  if (!check_dependencies (insn, depends_on))
782
    {
783
      BITMAP_FREE (depends_on);
784
      return;
785
    }
786
 
787
  if (simple)
788
    def = XCNEW (struct def);
789
  else
790
    def = NULL;
791
 
792
  inv = create_new_invariant (def, insn, depends_on, always_executed);
793
 
794
  if (simple)
795
    {
796
      ref = df_find_def (df, insn, dest);
797
      DF_REF_DATA (ref) = inv;
798
    }
799
}
800
 
801
/* Record registers used in INSN that have a unique invariant definition.  */
802
 
803
static void
804
record_uses (rtx insn)
805
{
806
  struct df_ref *use;
807
  struct invariant *inv;
808
 
809
  for (use = DF_INSN_GET (df, insn)->uses; use; use = use->next_ref)
810
    {
811
      inv = invariant_for_use (use);
812
      if (inv)
813
        record_use (inv->def, DF_REF_LOC (use), DF_REF_INSN (use));
814
    }
815
}
816
 
817
/* Finds invariants in INSN.  ALWAYS_REACHED is true if the insn is always
818
   executed.  ALWAYS_EXECUTED is true if the insn is always executed,
819
   unless the program ends due to a function call.  */
820
 
821
static void
822
find_invariants_insn (rtx insn, bool always_reached, bool always_executed)
823
{
824
  find_invariant_insn (insn, always_reached, always_executed);
825
  record_uses (insn);
826
}
827
 
828
/* Finds invariants in basic block BB.  ALWAYS_REACHED is true if the
829
   basic block is always executed.  ALWAYS_EXECUTED is true if the basic
830
   block is always executed, unless the program ends due to a function
831
   call.  */
832
 
833
static void
834
find_invariants_bb (basic_block bb, bool always_reached, bool always_executed)
835
{
836
  rtx insn;
837
 
838
  FOR_BB_INSNS (bb, insn)
839
    {
840
      if (!INSN_P (insn))
841
        continue;
842
 
843
      find_invariants_insn (insn, always_reached, always_executed);
844
 
845
      if (always_reached
846
          && CALL_P (insn)
847
          && !CONST_OR_PURE_CALL_P (insn))
848
        always_reached = false;
849
    }
850
}
851
 
852
/* Finds invariants in LOOP with body BODY.  ALWAYS_REACHED is the bitmap of
853
   basic blocks in BODY that are always executed.  ALWAYS_EXECUTED is the
854
   bitmap of basic blocks in BODY that are always executed unless the program
855
   ends due to a function call.  */
856
 
857
static void
858
find_invariants_body (struct loop *loop, basic_block *body,
859
                      bitmap always_reached, bitmap always_executed)
860
{
861
  unsigned i;
862
 
863
  for (i = 0; i < loop->num_nodes; i++)
864
    find_invariants_bb (body[i],
865
                        bitmap_bit_p (always_reached, i),
866
                        bitmap_bit_p (always_executed, i));
867
}
868
 
869
/* Finds invariants in LOOP.  */
870
 
871
static void
872
find_invariants (struct loop *loop)
873
{
874
  bitmap may_exit = BITMAP_ALLOC (NULL);
875
  bitmap always_reached = BITMAP_ALLOC (NULL);
876
  bitmap has_exit = BITMAP_ALLOC (NULL);
877
  bitmap always_executed = BITMAP_ALLOC (NULL);
878
  basic_block *body = get_loop_body_in_dom_order (loop);
879
 
880
  find_exits (loop, body, may_exit, has_exit);
881
  compute_always_reached (loop, body, may_exit, always_reached);
882
  compute_always_reached (loop, body, has_exit, always_executed);
883
 
884
  find_defs (loop, body);
885
  find_invariants_body (loop, body, always_reached, always_executed);
886
  merge_identical_invariants ();
887
 
888
  BITMAP_FREE (always_reached);
889
  BITMAP_FREE (always_executed);
890
  BITMAP_FREE (may_exit);
891
  BITMAP_FREE (has_exit);
892
  free (body);
893
}
894
 
895
/* Frees a list of uses USE.  */
896
 
897
static void
898
free_use_list (struct use *use)
899
{
900
  struct use *next;
901
 
902
  for (; use; use = next)
903
    {
904
      next = use->next;
905
      free (use);
906
    }
907
}
908
 
909
/* Calculates cost and number of registers needed for moving invariant INV
910
   out of the loop and stores them to *COST and *REGS_NEEDED.  */
911
 
912
static void
913
get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed)
914
{
915
  int acomp_cost;
916
  unsigned aregs_needed;
917
  unsigned depno;
918
  struct invariant *dep;
919
  bitmap_iterator bi;
920
 
921
  /* Find the representative of the class of the equivalent invariants.  */
922
  inv = VEC_index (invariant_p, invariants, inv->eqto);
923
 
924
  *comp_cost = 0;
925
  *regs_needed = 0;
926
  if (inv->move
927
      || inv->stamp == actual_stamp)
928
    return;
929
  inv->stamp = actual_stamp;
930
 
931
  (*regs_needed)++;
932
  (*comp_cost) += inv->cost;
933
 
934
#ifdef STACK_REGS
935
  {
936
    /* Hoisting constant pool constants into stack regs may cost more than
937
       just single register.  On x87, the balance is affected both by the
938
       small number of FP registers, and by its register stack organization,
939
       that forces us to add compensation code in and around the loop to
940
       shuffle the operands to the top of stack before use, and pop them
941
       from the stack after the loop finishes.
942
 
943
       To model this effect, we increase the number of registers needed for
944
       stack registers by two: one register push, and one register pop.
945
       This usually has the effect that FP constant loads from the constant
946
       pool are not moved out of the loop.
947
 
948
       Note that this also means that dependent invariants can not be moved.
949
       However, the primary purpose of this pass is to move loop invariant
950
       address arithmetic out of loops, and address arithmetic that depends
951
       on floating point constants is unlikely to ever occur.  */
952
    rtx set = single_set (inv->insn);
953
    if (set
954
       && IS_STACK_MODE (GET_MODE (SET_SRC (set)))
955
       && constant_pool_constant_p (SET_SRC (set)))
956
      (*regs_needed) += 2;
957
  }
958
#endif
959
 
960
  EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, depno, bi)
961
    {
962
      dep = VEC_index (invariant_p, invariants, depno);
963
 
964
      get_inv_cost (dep, &acomp_cost, &aregs_needed);
965
 
966
      if (aregs_needed
967
          /* We need to check always_executed, since if the original value of
968
             the invariant may be preserved, we may need to keep it in a
969
             separate register.  TODO check whether the register has an
970
             use outside of the loop.  */
971
          && dep->always_executed
972
          && !dep->def->uses->next)
973
        {
974
          /* If this is a single use, after moving the dependency we will not
975
             need a new register.  */
976
          aregs_needed--;
977
        }
978
 
979
      (*regs_needed) += aregs_needed;
980
      (*comp_cost) += acomp_cost;
981
    }
982
}
983
 
984
/* Calculates gain for eliminating invariant INV.  REGS_USED is the number
985
   of registers used in the loop, N_INV_USES is the number of uses of
986
   invariants, NEW_REGS is the number of new variables already added due to
987
   the invariant motion.  The number of registers needed for it is stored in
988
   *REGS_NEEDED.  */
989
 
990
static int
991
gain_for_invariant (struct invariant *inv, unsigned *regs_needed,
992
                    unsigned new_regs, unsigned regs_used, unsigned n_inv_uses)
993
{
994
  int comp_cost, size_cost;
995
 
996
  get_inv_cost (inv, &comp_cost, regs_needed);
997
  actual_stamp++;
998
 
999
  size_cost = (global_cost_for_size (new_regs + *regs_needed,
1000
                                     regs_used, n_inv_uses)
1001
               - global_cost_for_size (new_regs, regs_used, n_inv_uses));
1002
 
1003
  return comp_cost - size_cost;
1004
}
1005
 
1006
/* Finds invariant with best gain for moving.  Returns the gain, stores
1007
   the invariant in *BEST and number of registers needed for it to
1008
   *REGS_NEEDED.  REGS_USED is the number of registers used in
1009
   the loop, N_INV_USES is the number of uses of invariants.  NEW_REGS
1010
   is the number of new variables already added due to invariant motion.  */
1011
 
1012
static int
1013
best_gain_for_invariant (struct invariant **best, unsigned *regs_needed,
1014
                         unsigned new_regs, unsigned regs_used,
1015
                         unsigned n_inv_uses)
1016
{
1017
  struct invariant *inv;
1018
  int gain = 0, again;
1019
  unsigned aregs_needed, invno;
1020
 
1021
  for (invno = 0; VEC_iterate (invariant_p, invariants, invno, inv); invno++)
1022
    {
1023
      if (inv->move)
1024
        continue;
1025
 
1026
      /* Only consider the "representatives" of equivalent invariants.  */
1027
      if (inv->eqto != inv->invno)
1028
        continue;
1029
 
1030
      again = gain_for_invariant (inv, &aregs_needed,
1031
                                  new_regs, regs_used, n_inv_uses);
1032
      if (again > gain)
1033
        {
1034
          gain = again;
1035
          *best = inv;
1036
          *regs_needed = aregs_needed;
1037
        }
1038
    }
1039
 
1040
  return gain;
1041
}
1042
 
1043
/* Marks invariant INVNO and all its dependencies for moving.  */
1044
 
1045
static void
1046
set_move_mark (unsigned invno)
1047
{
1048
  struct invariant *inv = VEC_index (invariant_p, invariants, invno);
1049
  bitmap_iterator bi;
1050
 
1051
  /* Find the representative of the class of the equivalent invariants.  */
1052
  inv = VEC_index (invariant_p, invariants, inv->eqto);
1053
 
1054
  if (inv->move)
1055
    return;
1056
  inv->move = true;
1057
 
1058
  if (dump_file)
1059
    fprintf (dump_file, "Decided to move invariant %d\n", invno);
1060
 
1061
  EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, invno, bi)
1062
    {
1063
      set_move_mark (invno);
1064
    }
1065
}
1066
 
1067
/* Determines which invariants to move.  */
1068
 
1069
static void
1070
find_invariants_to_move (void)
1071
{
1072
  unsigned i, regs_used, n_inv_uses, regs_needed = 0, new_regs;
1073
  struct invariant *inv = NULL;
1074
  unsigned int n_regs = DF_REG_SIZE (df);
1075
 
1076
  if (!VEC_length (invariant_p, invariants))
1077
    return;
1078
 
1079
  /* Now something slightly more involved.  First estimate the number of used
1080
     registers.  */
1081
  n_inv_uses = 0;
1082
 
1083
  /* We do not really do a good job in this estimation; put some initial bound
1084
     here to stand for induction variables etc. that we do not detect.  */
1085
  regs_used = 2;
1086
 
1087
  for (i = 0; i < n_regs; i++)
1088
    {
1089
      if (!DF_REGNO_FIRST_DEF (df, i) && DF_REGNO_LAST_USE (df, i))
1090
        {
1091
          /* This is a value that is used but not changed inside loop.  */
1092
          regs_used++;
1093
        }
1094
    }
1095
 
1096
  for (i = 0; VEC_iterate (invariant_p, invariants, i, inv); i++)
1097
    {
1098
      if (inv->def)
1099
        n_inv_uses += inv->def->n_uses;
1100
    }
1101
 
1102
  new_regs = 0;
1103
  while (best_gain_for_invariant (&inv, &regs_needed,
1104
                                  new_regs, regs_used, n_inv_uses) > 0)
1105
    {
1106
      set_move_mark (inv->invno);
1107
      new_regs += regs_needed;
1108
    }
1109
}
1110
 
1111
/* Returns true if all insns in SEQ are valid.  */
1112
 
1113
static bool
1114
seq_insns_valid_p (rtx seq)
1115
{
1116
  rtx x;
1117
 
1118
  for (x = seq; x; x = NEXT_INSN (x))
1119
    if (insn_invalid_p (x))
1120
      return false;
1121
 
1122
  return true;
1123
}
1124
 
1125
/* Move invariant INVNO out of the LOOP.  Returns true if this succeeds, false
1126
   otherwise.  */
1127
 
1128
static bool
1129
move_invariant_reg (struct loop *loop, unsigned invno)
1130
{
1131
  struct invariant *inv = VEC_index (invariant_p, invariants, invno);
1132
  struct invariant *repr = VEC_index (invariant_p, invariants, inv->eqto);
1133
  unsigned i;
1134
  basic_block preheader = loop_preheader_edge (loop)->src;
1135
  rtx reg, set, dest, seq, op;
1136
  struct use *use;
1137
  bitmap_iterator bi;
1138
 
1139
  if (inv->reg)
1140
    return true;
1141
  if (!repr->move)
1142
    return false;
1143
 
1144
  /* If this is a representative of the class of equivalent invariants,
1145
     really move the invariant.  Otherwise just replace its use with
1146
     the register used for the representative.  */
1147
  if (inv == repr)
1148
    {
1149
      if (inv->depends_on)
1150
        {
1151
          EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, i, bi)
1152
            {
1153
              if (!move_invariant_reg (loop, i))
1154
                goto fail;
1155
            }
1156
        }
1157
 
1158
      /* Move the set out of the loop.  If the set is always executed (we could
1159
         omit this condition if we know that the register is unused outside of the
1160
         loop, but it does not seem worth finding out) and it has no uses that
1161
         would not be dominated by it, we may just move it (TODO).  Otherwise we
1162
         need to create a temporary register.  */
1163
      set = single_set (inv->insn);
1164
      dest = SET_DEST (set);
1165
      reg = gen_reg_rtx (GET_MODE (dest));
1166
 
1167
      /* If the SET_DEST of the invariant insn is a pseudo, we can just move
1168
         the insn out of the loop.  Otherwise, we have to use gen_move_insn
1169
         to let emit_move_insn produce a valid instruction stream.  */
1170
      if (REG_P (dest) && !HARD_REGISTER_P (dest))
1171
        {
1172
          emit_insn_after (gen_move_insn (dest, reg), inv->insn);
1173
          SET_DEST (set) = reg;
1174
          reorder_insns (inv->insn, inv->insn, BB_END (preheader));
1175
        }
1176
      else
1177
        {
1178
          start_sequence ();
1179
          op = force_operand (SET_SRC (set), reg);
1180
          if (!op)
1181
            {
1182
              end_sequence ();
1183
              goto fail;
1184
            }
1185
          if (op != reg)
1186
            emit_move_insn (reg, op);
1187
          seq = get_insns ();
1188
          end_sequence ();
1189
 
1190
          if (!seq_insns_valid_p (seq))
1191
            goto fail;
1192
          emit_insn_after (seq, BB_END (preheader));
1193
 
1194
          emit_insn_after (gen_move_insn (dest, reg), inv->insn);
1195
          delete_insn (inv->insn);
1196
        }
1197
    }
1198
  else
1199
    {
1200
      if (!move_invariant_reg (loop, repr->invno))
1201
        goto fail;
1202
      reg = repr->reg;
1203
      set = single_set (inv->insn);
1204
      emit_insn_after (gen_move_insn (SET_DEST (set), reg), inv->insn);
1205
      delete_insn (inv->insn);
1206
    }
1207
 
1208
  inv->reg = reg;
1209
 
1210
  /* Replace the uses we know to be dominated.  It saves work for copy
1211
     propagation, and also it is necessary so that dependent invariants
1212
     are computed right.  */
1213
  if (inv->def)
1214
    {
1215
      for (use = inv->def->uses; use; use = use->next)
1216
        *use->pos = reg;
1217
    }
1218
 
1219
  return true;
1220
 
1221
fail:
1222
  /* If we failed, clear move flag, so that we do not try to move inv
1223
     again.  */
1224
  if (dump_file)
1225
    fprintf (dump_file, "Failed to move invariant %d\n", invno);
1226
  inv->move = false;
1227
  inv->reg = NULL_RTX;
1228
  return false;
1229
}
1230
 
1231
/* Move selected invariant out of the LOOP.  Newly created regs are marked
1232
   in TEMPORARY_REGS.  */
1233
 
1234
static void
1235
move_invariants (struct loop *loop)
1236
{
1237
  struct invariant *inv;
1238
  unsigned i;
1239
 
1240
  for (i = 0; VEC_iterate (invariant_p, invariants, i, inv); i++)
1241
    move_invariant_reg (loop, i);
1242
}
1243
 
1244
/* Initializes invariant motion data.  */
1245
 
1246
static void
1247
init_inv_motion_data (void)
1248
{
1249
  actual_stamp = 1;
1250
 
1251
  invariants = VEC_alloc (invariant_p, heap, 100);
1252
}
1253
 
1254
/* Frees the data allocated by invariant motion.  */
1255
 
1256
static void
1257
free_inv_motion_data (void)
1258
{
1259
  unsigned i;
1260
  struct def *def;
1261
  struct invariant *inv;
1262
 
1263
  for (i = 0; i < DF_DEFS_SIZE (df); i++)
1264
    {
1265
      struct df_ref * ref = DF_DEFS_GET (df, i);
1266
      if (!ref)
1267
        continue;
1268
 
1269
      inv = DF_REF_DATA (ref);
1270
      if (!inv)
1271
        continue;
1272
 
1273
      def = inv->def;
1274
      gcc_assert (def != NULL);
1275
 
1276
      free_use_list (def->uses);
1277
      free (def);
1278
      DF_REF_DATA (ref) = NULL;
1279
    }
1280
 
1281
  for (i = 0; VEC_iterate (invariant_p, invariants, i, inv); i++)
1282
    {
1283
      BITMAP_FREE (inv->depends_on);
1284
      free (inv);
1285
    }
1286
  VEC_free (invariant_p, heap, invariants);
1287
}
1288
 
1289
/* Move the invariants out of the LOOP.  */
1290
 
1291
static void
1292
move_single_loop_invariants (struct loop *loop)
1293
{
1294
  init_inv_motion_data ();
1295
 
1296
  find_invariants (loop);
1297
  find_invariants_to_move ();
1298
  move_invariants (loop);
1299
 
1300
  free_inv_motion_data ();
1301
}
1302
 
1303
/* Releases the auxiliary data for LOOP.  */
1304
 
1305
static void
1306
free_loop_data (struct loop *loop)
1307
{
1308
  struct loop_data *data = LOOP_DATA (loop);
1309
 
1310
  free (data);
1311
  loop->aux = NULL;
1312
}
1313
 
1314
/* Move the invariants out of the LOOPS.  */
1315
 
1316
void
1317
move_loop_invariants (struct loops *loops)
1318
{
1319
  struct loop *loop;
1320
  unsigned i;
1321
 
1322
  df = df_init (DF_HARD_REGS | DF_EQUIV_NOTES);
1323
  df_chain_add_problem (df, DF_UD_CHAIN);
1324
 
1325
  /* Process the loops, innermost first.  */
1326
  loop = loops->tree_root;
1327
  while (loop->inner)
1328
    loop = loop->inner;
1329
 
1330
  while (loop != loops->tree_root)
1331
    {
1332
      move_single_loop_invariants (loop);
1333
 
1334
      if (loop->next)
1335
        {
1336
          loop = loop->next;
1337
          while (loop->inner)
1338
            loop = loop->inner;
1339
        }
1340
      else
1341
        loop = loop->outer;
1342
    }
1343
 
1344
  for (i = 1; i < loops->num; i++)
1345
    if (loops->parray[i])
1346
      free_loop_data (loops->parray[i]);
1347
 
1348
  df_finish (df);
1349
  df = NULL;
1350
 
1351
#ifdef ENABLE_CHECKING
1352
  verify_flow_info ();
1353
#endif
1354
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.