OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [fwprop.c] - Blame information for rev 714

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* RTL-based forward propagation pass for GNU compiler.
2
   Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
3
   Free Software Foundation, Inc.
4
   Contributed by Paolo Bonzini and Steven Bosscher.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
#include "config.h"
23
#include "system.h"
24
#include "coretypes.h"
25
#include "tm.h"
26
#include "diagnostic-core.h"
27
 
28
#include "sparseset.h"
29
#include "timevar.h"
30
#include "rtl.h"
31
#include "tm_p.h"
32
#include "insn-config.h"
33
#include "recog.h"
34
#include "flags.h"
35
#include "obstack.h"
36
#include "basic-block.h"
37
#include "output.h"
38
#include "df.h"
39
#include "target.h"
40
#include "cfgloop.h"
41
#include "tree-pass.h"
42
#include "domwalk.h"
43
#include "emit-rtl.h"
44
 
45
 
46
/* This pass does simple forward propagation and simplification when an
47
   operand of an insn can only come from a single def.  This pass uses
48
   df.c, so it is global.  However, we only do limited analysis of
49
   available expressions.
50
 
51
   1) The pass tries to propagate the source of the def into the use,
52
   and checks if the result is independent of the substituted value.
53
   For example, the high word of a (zero_extend:DI (reg:SI M)) is always
54
   zero, independent of the source register.
55
 
56
   In particular, we propagate constants into the use site.  Sometimes
57
   RTL expansion did not put the constant in the same insn on purpose,
58
   to satisfy a predicate, and the result will fail to be recognized;
59
   but this happens rarely and in this case we can still create a
60
   REG_EQUAL note.  For multi-word operations, this
61
 
62
      (set (subreg:SI (reg:DI 120) 0) (const_int 0))
63
      (set (subreg:SI (reg:DI 120) 4) (const_int -1))
64
      (set (subreg:SI (reg:DI 122) 0)
65
         (ior:SI (subreg:SI (reg:DI 119) 0) (subreg:SI (reg:DI 120) 0)))
66
      (set (subreg:SI (reg:DI 122) 4)
67
         (ior:SI (subreg:SI (reg:DI 119) 4) (subreg:SI (reg:DI 120) 4)))
68
 
69
   can be simplified to the much simpler
70
 
71
      (set (subreg:SI (reg:DI 122) 0) (subreg:SI (reg:DI 119)))
72
      (set (subreg:SI (reg:DI 122) 4) (const_int -1))
73
 
74
   This particular propagation is also effective at putting together
75
   complex addressing modes.  We are more aggressive inside MEMs, in
76
   that all definitions are propagated if the use is in a MEM; if the
77
   result is a valid memory address we check address_cost to decide
78
   whether the substitution is worthwhile.
79
 
80
   2) The pass propagates register copies.  This is not as effective as
81
   the copy propagation done by CSE's canon_reg, which works by walking
82
   the instruction chain, it can help the other transformations.
83
 
84
   We should consider removing this optimization, and instead reorder the
85
   RTL passes, because GCSE does this transformation too.  With some luck,
86
   the CSE pass at the end of rest_of_handle_gcse could also go away.
87
 
88
   3) The pass looks for paradoxical subregs that are actually unnecessary.
89
   Things like this:
90
 
91
     (set (reg:QI 120) (subreg:QI (reg:SI 118) 0))
92
     (set (reg:QI 121) (subreg:QI (reg:SI 119) 0))
93
     (set (reg:SI 122) (plus:SI (subreg:SI (reg:QI 120) 0)
94
                                (subreg:SI (reg:QI 121) 0)))
95
 
96
   are very common on machines that can only do word-sized operations.
97
   For each use of a paradoxical subreg (subreg:WIDER (reg:NARROW N) 0),
98
   if it has a single def and it is (subreg:NARROW (reg:WIDE M) 0),
99
   we can replace the paradoxical subreg with simply (reg:WIDE M).  The
100
   above will simplify this to
101
 
102
     (set (reg:QI 120) (subreg:QI (reg:SI 118) 0))
103
     (set (reg:QI 121) (subreg:QI (reg:SI 119) 0))
104
     (set (reg:SI 122) (plus:SI (reg:SI 118) (reg:SI 119)))
105
 
106
   where the first two insns are now dead.
107
 
108
   We used to use reaching definitions to find which uses have a
109
   single reaching definition (sounds obvious...), but this is too
110
   complex a problem in nasty testcases like PR33928.  Now we use the
111
   multiple definitions problem in df-problems.c.  The similarity
112
   between that problem and SSA form creation is taken further, in
113
   that fwprop does a dominator walk to create its chains; however,
114
   instead of creating a PHI function where multiple definitions meet
115
   I just punt and record only singleton use-def chains, which is
116
   all that is needed by fwprop.  */
117
 
118
 
119
static int num_changes;
120
 
121
DEF_VEC_P(df_ref);
122
DEF_VEC_ALLOC_P(df_ref,heap);
123
static VEC(df_ref,heap) *use_def_ref;
124
static VEC(df_ref,heap) *reg_defs;
125
static VEC(df_ref,heap) *reg_defs_stack;
126
 
127
/* The MD bitmaps are trimmed to include only live registers to cut
128
   memory usage on testcases like insn-recog.c.  Track live registers
129
   in the basic block and do not perform forward propagation if the
130
   destination is a dead pseudo occurring in a note.  */
131
static bitmap local_md;
132
static bitmap local_lr;
133
 
134
/* Return the only def in USE's use-def chain, or NULL if there is
135
   more than one def in the chain.  */
136
 
137
static inline df_ref
138
get_def_for_use (df_ref use)
139
{
140
  return VEC_index (df_ref, use_def_ref, DF_REF_ID (use));
141
}
142
 
143
 
144
/* Update the reg_defs vector with non-partial definitions in DEF_REC.
145
   TOP_FLAG says which artificials uses should be used, when DEF_REC
146
   is an artificial def vector.  LOCAL_MD is modified as after a
147
   df_md_simulate_* function; we do more or less the same processing
148
   done there, so we do not use those functions.  */
149
 
150
#define DF_MD_GEN_FLAGS \
151
        (DF_REF_PARTIAL | DF_REF_CONDITIONAL | DF_REF_MAY_CLOBBER)
152
 
153
static void
154
process_defs (df_ref *def_rec, int top_flag)
155
{
156
  df_ref def;
157
  while ((def = *def_rec++) != NULL)
158
    {
159
      df_ref curr_def = VEC_index (df_ref, reg_defs, DF_REF_REGNO (def));
160
      unsigned int dregno;
161
 
162
      if ((DF_REF_FLAGS (def) & DF_REF_AT_TOP) != top_flag)
163
        continue;
164
 
165
      dregno = DF_REF_REGNO (def);
166
      if (curr_def)
167
        VEC_safe_push (df_ref, heap, reg_defs_stack, curr_def);
168
      else
169
        {
170
          /* Do not store anything if "transitioning" from NULL to NULL.  But
171
             otherwise, push a special entry on the stack to tell the
172
             leave_block callback that the entry in reg_defs was NULL.  */
173
          if (DF_REF_FLAGS (def) & DF_MD_GEN_FLAGS)
174
            ;
175
          else
176
            VEC_safe_push (df_ref, heap, reg_defs_stack, def);
177
        }
178
 
179
      if (DF_REF_FLAGS (def) & DF_MD_GEN_FLAGS)
180
        {
181
          bitmap_set_bit (local_md, dregno);
182
          VEC_replace (df_ref, reg_defs, dregno, NULL);
183
        }
184
      else
185
        {
186
          bitmap_clear_bit (local_md, dregno);
187
          VEC_replace (df_ref, reg_defs, dregno, def);
188
        }
189
    }
190
}
191
 
192
 
193
/* Fill the use_def_ref vector with values for the uses in USE_REC,
194
   taking reaching definitions info from LOCAL_MD and REG_DEFS.
195
   TOP_FLAG says which artificials uses should be used, when USE_REC
196
   is an artificial use vector.  */
197
 
198
static void
199
process_uses (df_ref *use_rec, int top_flag)
200
{
201
  df_ref use;
202
  while ((use = *use_rec++) != NULL)
203
    if ((DF_REF_FLAGS (use) & DF_REF_AT_TOP) == top_flag)
204
      {
205
        unsigned int uregno = DF_REF_REGNO (use);
206
        if (VEC_index (df_ref, reg_defs, uregno)
207
            && !bitmap_bit_p (local_md, uregno)
208
            && bitmap_bit_p (local_lr, uregno))
209
          VEC_replace (df_ref, use_def_ref, DF_REF_ID (use),
210
                       VEC_index (df_ref, reg_defs, uregno));
211
      }
212
}
213
 
214
 
215
static void
216
single_def_use_enter_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
217
                            basic_block bb)
218
{
219
  int bb_index = bb->index;
220
  struct df_md_bb_info *md_bb_info = df_md_get_bb_info (bb_index);
221
  struct df_lr_bb_info *lr_bb_info = df_lr_get_bb_info (bb_index);
222
  rtx insn;
223
 
224
  bitmap_copy (local_md, &md_bb_info->in);
225
  bitmap_copy (local_lr, &lr_bb_info->in);
226
 
227
  /* Push a marker for the leave_block callback.  */
228
  VEC_safe_push (df_ref, heap, reg_defs_stack, NULL);
229
 
230
  process_uses (df_get_artificial_uses (bb_index), DF_REF_AT_TOP);
231
  process_defs (df_get_artificial_defs (bb_index), DF_REF_AT_TOP);
232
 
233
  /* We don't call df_simulate_initialize_forwards, as it may overestimate
234
     the live registers if there are unused artificial defs.  We prefer
235
     liveness to be underestimated.  */
236
 
237
  FOR_BB_INSNS (bb, insn)
238
    if (INSN_P (insn))
239
      {
240
        unsigned int uid = INSN_UID (insn);
241
        process_uses (DF_INSN_UID_USES (uid), 0);
242
        process_uses (DF_INSN_UID_EQ_USES (uid), 0);
243
        process_defs (DF_INSN_UID_DEFS (uid), 0);
244
        df_simulate_one_insn_forwards (bb, insn, local_lr);
245
      }
246
 
247
  process_uses (df_get_artificial_uses (bb_index), 0);
248
  process_defs (df_get_artificial_defs (bb_index), 0);
249
}
250
 
251
/* Pop the definitions created in this basic block when leaving its
252
   dominated parts.  */
253
 
254
static void
255
single_def_use_leave_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
256
                            basic_block bb ATTRIBUTE_UNUSED)
257
{
258
  df_ref saved_def;
259
  while ((saved_def = VEC_pop (df_ref, reg_defs_stack)) != NULL)
260
    {
261
      unsigned int dregno = DF_REF_REGNO (saved_def);
262
 
263
      /* See also process_defs.  */
264
      if (saved_def == VEC_index (df_ref, reg_defs, dregno))
265
        VEC_replace (df_ref, reg_defs, dregno, NULL);
266
      else
267
        VEC_replace (df_ref, reg_defs, dregno, saved_def);
268
    }
269
}
270
 
271
 
272
/* Build a vector holding the reaching definitions of uses reached by a
273
   single dominating definition.  */
274
 
275
static void
276
build_single_def_use_links (void)
277
{
278
  struct dom_walk_data walk_data;
279
 
280
  /* We use the multiple definitions problem to compute our restricted
281
     use-def chains.  */
282
  df_set_flags (DF_EQ_NOTES);
283
  df_md_add_problem ();
284
  df_note_add_problem ();
285
  df_analyze ();
286
  df_maybe_reorganize_use_refs (DF_REF_ORDER_BY_INSN_WITH_NOTES);
287
 
288
  use_def_ref = VEC_alloc (df_ref, heap, DF_USES_TABLE_SIZE ());
289
  VEC_safe_grow_cleared (df_ref, heap, use_def_ref, DF_USES_TABLE_SIZE ());
290
 
291
  reg_defs = VEC_alloc (df_ref, heap, max_reg_num ());
292
  VEC_safe_grow_cleared (df_ref, heap, reg_defs, max_reg_num ());
293
 
294
  reg_defs_stack = VEC_alloc (df_ref, heap, n_basic_blocks * 10);
295
  local_md = BITMAP_ALLOC (NULL);
296
  local_lr = BITMAP_ALLOC (NULL);
297
 
298
  /* Walk the dominator tree looking for single reaching definitions
299
     dominating the uses.  This is similar to how SSA form is built.  */
300
  walk_data.dom_direction = CDI_DOMINATORS;
301
  walk_data.initialize_block_local_data = NULL;
302
  walk_data.before_dom_children = single_def_use_enter_block;
303
  walk_data.after_dom_children = single_def_use_leave_block;
304
 
305
  init_walk_dominator_tree (&walk_data);
306
  walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
307
  fini_walk_dominator_tree (&walk_data);
308
 
309
  BITMAP_FREE (local_lr);
310
  BITMAP_FREE (local_md);
311
  VEC_free (df_ref, heap, reg_defs);
312
  VEC_free (df_ref, heap, reg_defs_stack);
313
}
314
 
315
 
316
/* Do not try to replace constant addresses or addresses of local and
317
   argument slots.  These MEM expressions are made only once and inserted
318
   in many instructions, as well as being used to control symbol table
319
   output.  It is not safe to clobber them.
320
 
321
   There are some uncommon cases where the address is already in a register
322
   for some reason, but we cannot take advantage of that because we have
323
   no easy way to unshare the MEM.  In addition, looking up all stack
324
   addresses is costly.  */
325
 
326
static bool
327
can_simplify_addr (rtx addr)
328
{
329
  rtx reg;
330
 
331
  if (CONSTANT_ADDRESS_P (addr))
332
    return false;
333
 
334
  if (GET_CODE (addr) == PLUS)
335
    reg = XEXP (addr, 0);
336
  else
337
    reg = addr;
338
 
339
  return (!REG_P (reg)
340
          || (REGNO (reg) != FRAME_POINTER_REGNUM
341
              && REGNO (reg) != HARD_FRAME_POINTER_REGNUM
342
              && REGNO (reg) != ARG_POINTER_REGNUM));
343
}
344
 
345
/* Returns a canonical version of X for the address, from the point of view,
346
   that all multiplications are represented as MULT instead of the multiply
347
   by a power of 2 being represented as ASHIFT.
348
 
349
   Every ASHIFT we find has been made by simplify_gen_binary and was not
350
   there before, so it is not shared.  So we can do this in place.  */
351
 
352
static void
353
canonicalize_address (rtx x)
354
{
355
  for (;;)
356
    switch (GET_CODE (x))
357
      {
358
      case ASHIFT:
359
        if (CONST_INT_P (XEXP (x, 1))
360
            && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (GET_MODE (x))
361
            && INTVAL (XEXP (x, 1)) >= 0)
362
          {
363
            HOST_WIDE_INT shift = INTVAL (XEXP (x, 1));
364
            PUT_CODE (x, MULT);
365
            XEXP (x, 1) = gen_int_mode ((HOST_WIDE_INT) 1 << shift,
366
                                        GET_MODE (x));
367
          }
368
 
369
        x = XEXP (x, 0);
370
        break;
371
 
372
      case PLUS:
373
        if (GET_CODE (XEXP (x, 0)) == PLUS
374
            || GET_CODE (XEXP (x, 0)) == ASHIFT
375
            || GET_CODE (XEXP (x, 0)) == CONST)
376
          canonicalize_address (XEXP (x, 0));
377
 
378
        x = XEXP (x, 1);
379
        break;
380
 
381
      case CONST:
382
        x = XEXP (x, 0);
383
        break;
384
 
385
      default:
386
        return;
387
      }
388
}
389
 
390
/* OLD is a memory address.  Return whether it is good to use NEW instead,
391
   for a memory access in the given MODE.  */
392
 
393
static bool
394
should_replace_address (rtx old_rtx, rtx new_rtx, enum machine_mode mode,
395
                        addr_space_t as, bool speed)
396
{
397
  int gain;
398
 
399
  if (rtx_equal_p (old_rtx, new_rtx)
400
      || !memory_address_addr_space_p (mode, new_rtx, as))
401
    return false;
402
 
403
  /* Copy propagation is always ok.  */
404
  if (REG_P (old_rtx) && REG_P (new_rtx))
405
    return true;
406
 
407
  /* Prefer the new address if it is less expensive.  */
408
  gain = (address_cost (old_rtx, mode, as, speed)
409
          - address_cost (new_rtx, mode, as, speed));
410
 
411
  /* If the addresses have equivalent cost, prefer the new address
412
     if it has the highest `set_src_cost'.  That has the potential of
413
     eliminating the most insns without additional costs, and it
414
     is the same that cse.c used to do.  */
415
  if (gain == 0)
416
    gain = set_src_cost (new_rtx, speed) - set_src_cost (old_rtx, speed);
417
 
418
  return (gain > 0);
419
}
420
 
421
 
422
/* Flags for the last parameter of propagate_rtx_1.  */
423
 
424
enum {
425
  /* If PR_CAN_APPEAR is true, propagate_rtx_1 always returns true;
426
     if it is false, propagate_rtx_1 returns false if, for at least
427
     one occurrence OLD, it failed to collapse the result to a constant.
428
     For example, (mult:M (reg:M A) (minus:M (reg:M B) (reg:M A))) may
429
     collapse to zero if replacing (reg:M B) with (reg:M A).
430
 
431
     PR_CAN_APPEAR is disregarded inside MEMs: in that case,
432
     propagate_rtx_1 just tries to make cheaper and valid memory
433
     addresses.  */
434
  PR_CAN_APPEAR = 1,
435
 
436
  /* If PR_HANDLE_MEM is not set, propagate_rtx_1 won't attempt any replacement
437
     outside memory addresses.  This is needed because propagate_rtx_1 does
438
     not do any analysis on memory; thus it is very conservative and in general
439
     it will fail if non-read-only MEMs are found in the source expression.
440
 
441
     PR_HANDLE_MEM is set when the source of the propagation was not
442
     another MEM.  Then, it is safe not to treat non-read-only MEMs as
443
     ``opaque'' objects.  */
444
  PR_HANDLE_MEM = 2,
445
 
446
  /* Set when costs should be optimized for speed.  */
447
  PR_OPTIMIZE_FOR_SPEED = 4
448
};
449
 
450
 
451
/* Replace all occurrences of OLD in *PX with NEW and try to simplify the
452
   resulting expression.  Replace *PX with a new RTL expression if an
453
   occurrence of OLD was found.
454
 
455
   This is only a wrapper around simplify-rtx.c: do not add any pattern
456
   matching code here.  (The sole exception is the handling of LO_SUM, but
457
   that is because there is no simplify_gen_* function for LO_SUM).  */
458
 
459
static bool
460
propagate_rtx_1 (rtx *px, rtx old_rtx, rtx new_rtx, int flags)
461
{
462
  rtx x = *px, tem = NULL_RTX, op0, op1, op2;
463
  enum rtx_code code = GET_CODE (x);
464
  enum machine_mode mode = GET_MODE (x);
465
  enum machine_mode op_mode;
466
  bool can_appear = (flags & PR_CAN_APPEAR) != 0;
467
  bool valid_ops = true;
468
 
469
  if (!(flags & PR_HANDLE_MEM) && MEM_P (x) && !MEM_READONLY_P (x))
470
    {
471
      /* If unsafe, change MEMs to CLOBBERs or SCRATCHes (to preserve whether
472
         they have side effects or not).  */
473
      *px = (side_effects_p (x)
474
             ? gen_rtx_CLOBBER (GET_MODE (x), const0_rtx)
475
             : gen_rtx_SCRATCH (GET_MODE (x)));
476
      return false;
477
    }
478
 
479
  /* If X is OLD_RTX, return NEW_RTX.  But not if replacing only within an
480
     address, and we are *not* inside one.  */
481
  if (x == old_rtx)
482
    {
483
      *px = new_rtx;
484
      return can_appear;
485
    }
486
 
487
  /* If this is an expression, try recursive substitution.  */
488
  switch (GET_RTX_CLASS (code))
489
    {
490
    case RTX_UNARY:
491
      op0 = XEXP (x, 0);
492
      op_mode = GET_MODE (op0);
493
      valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
494
      if (op0 == XEXP (x, 0))
495
        return true;
496
      tem = simplify_gen_unary (code, mode, op0, op_mode);
497
      break;
498
 
499
    case RTX_BIN_ARITH:
500
    case RTX_COMM_ARITH:
501
      op0 = XEXP (x, 0);
502
      op1 = XEXP (x, 1);
503
      valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
504
      valid_ops &= propagate_rtx_1 (&op1, old_rtx, new_rtx, flags);
505
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
506
        return true;
507
      tem = simplify_gen_binary (code, mode, op0, op1);
508
      break;
509
 
510
    case RTX_COMPARE:
511
    case RTX_COMM_COMPARE:
512
      op0 = XEXP (x, 0);
513
      op1 = XEXP (x, 1);
514
      op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
515
      valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
516
      valid_ops &= propagate_rtx_1 (&op1, old_rtx, new_rtx, flags);
517
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
518
        return true;
519
      tem = simplify_gen_relational (code, mode, op_mode, op0, op1);
520
      break;
521
 
522
    case RTX_TERNARY:
523
    case RTX_BITFIELD_OPS:
524
      op0 = XEXP (x, 0);
525
      op1 = XEXP (x, 1);
526
      op2 = XEXP (x, 2);
527
      op_mode = GET_MODE (op0);
528
      valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
529
      valid_ops &= propagate_rtx_1 (&op1, old_rtx, new_rtx, flags);
530
      valid_ops &= propagate_rtx_1 (&op2, old_rtx, new_rtx, flags);
531
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
532
        return true;
533
      if (op_mode == VOIDmode)
534
        op_mode = GET_MODE (op0);
535
      tem = simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
536
      break;
537
 
538
    case RTX_EXTRA:
539
      /* The only case we try to handle is a SUBREG.  */
540
      if (code == SUBREG)
541
        {
542
          op0 = XEXP (x, 0);
543
          valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
544
          if (op0 == XEXP (x, 0))
545
            return true;
546
          tem = simplify_gen_subreg (mode, op0, GET_MODE (SUBREG_REG (x)),
547
                                     SUBREG_BYTE (x));
548
        }
549
      break;
550
 
551
    case RTX_OBJ:
552
      if (code == MEM && x != new_rtx)
553
        {
554
          rtx new_op0;
555
          op0 = XEXP (x, 0);
556
 
557
          /* There are some addresses that we cannot work on.  */
558
          if (!can_simplify_addr (op0))
559
            return true;
560
 
561
          op0 = new_op0 = targetm.delegitimize_address (op0);
562
          valid_ops &= propagate_rtx_1 (&new_op0, old_rtx, new_rtx,
563
                                        flags | PR_CAN_APPEAR);
564
 
565
          /* Dismiss transformation that we do not want to carry on.  */
566
          if (!valid_ops
567
              || new_op0 == op0
568
              || !(GET_MODE (new_op0) == GET_MODE (op0)
569
                   || GET_MODE (new_op0) == VOIDmode))
570
            return true;
571
 
572
          canonicalize_address (new_op0);
573
 
574
          /* Copy propagations are always ok.  Otherwise check the costs.  */
575
          if (!(REG_P (old_rtx) && REG_P (new_rtx))
576
              && !should_replace_address (op0, new_op0, GET_MODE (x),
577
                                          MEM_ADDR_SPACE (x),
578
                                          flags & PR_OPTIMIZE_FOR_SPEED))
579
            return true;
580
 
581
          tem = replace_equiv_address_nv (x, new_op0);
582
        }
583
 
584
      else if (code == LO_SUM)
585
        {
586
          op0 = XEXP (x, 0);
587
          op1 = XEXP (x, 1);
588
 
589
          /* The only simplification we do attempts to remove references to op0
590
             or make it constant -- in both cases, op0's invalidity will not
591
             make the result invalid.  */
592
          propagate_rtx_1 (&op0, old_rtx, new_rtx, flags | PR_CAN_APPEAR);
593
          valid_ops &= propagate_rtx_1 (&op1, old_rtx, new_rtx, flags);
594
          if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
595
            return true;
596
 
597
          /* (lo_sum (high x) x) -> x  */
598
          if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
599
            tem = op1;
600
          else
601
            tem = gen_rtx_LO_SUM (mode, op0, op1);
602
 
603
          /* OP1 is likely not a legitimate address, otherwise there would have
604
             been no LO_SUM.  We want it to disappear if it is invalid, return
605
             false in that case.  */
606
          return memory_address_p (mode, tem);
607
        }
608
 
609
      else if (code == REG)
610
        {
611
          if (rtx_equal_p (x, old_rtx))
612
            {
613
              *px = new_rtx;
614
              return can_appear;
615
            }
616
        }
617
      break;
618
 
619
    default:
620
      break;
621
    }
622
 
623
  /* No change, no trouble.  */
624
  if (tem == NULL_RTX)
625
    return true;
626
 
627
  *px = tem;
628
 
629
  /* The replacement we made so far is valid, if all of the recursive
630
     replacements were valid, or we could simplify everything to
631
     a constant.  */
632
  return valid_ops || can_appear || CONSTANT_P (tem);
633
}
634
 
635
 
636
/* for_each_rtx traversal function that returns 1 if BODY points to
637
   a non-constant mem.  */
638
 
639
static int
640
varying_mem_p (rtx *body, void *data ATTRIBUTE_UNUSED)
641
{
642
  rtx x = *body;
643
  return MEM_P (x) && !MEM_READONLY_P (x);
644
}
645
 
646
 
647
/* Replace all occurrences of OLD in X with NEW and try to simplify the
648
   resulting expression (in mode MODE).  Return a new expression if it is
649
   a constant, otherwise X.
650
 
651
   Simplifications where occurrences of NEW collapse to a constant are always
652
   accepted.  All simplifications are accepted if NEW is a pseudo too.
653
   Otherwise, we accept simplifications that have a lower or equal cost.  */
654
 
655
static rtx
656
propagate_rtx (rtx x, enum machine_mode mode, rtx old_rtx, rtx new_rtx,
657
               bool speed)
658
{
659
  rtx tem;
660
  bool collapsed;
661
  int flags;
662
 
663
  if (REG_P (new_rtx) && REGNO (new_rtx) < FIRST_PSEUDO_REGISTER)
664
    return NULL_RTX;
665
 
666
  flags = 0;
667
  if (REG_P (new_rtx) || CONSTANT_P (new_rtx))
668
    flags |= PR_CAN_APPEAR;
669
  if (!for_each_rtx (&new_rtx, varying_mem_p, NULL))
670
    flags |= PR_HANDLE_MEM;
671
 
672
  if (speed)
673
    flags |= PR_OPTIMIZE_FOR_SPEED;
674
 
675
  tem = x;
676
  collapsed = propagate_rtx_1 (&tem, old_rtx, copy_rtx (new_rtx), flags);
677
  if (tem == x || !collapsed)
678
    return NULL_RTX;
679
 
680
  /* gen_lowpart_common will not be able to process VOIDmode entities other
681
     than CONST_INTs.  */
682
  if (GET_MODE (tem) == VOIDmode && !CONST_INT_P (tem))
683
    return NULL_RTX;
684
 
685
  if (GET_MODE (tem) == VOIDmode)
686
    tem = rtl_hooks.gen_lowpart_no_emit (mode, tem);
687
  else
688
    gcc_assert (GET_MODE (tem) == mode);
689
 
690
  return tem;
691
}
692
 
693
 
694
 
695
 
696
/* Return true if the register from reference REF is killed
697
   between FROM to (but not including) TO.  */
698
 
699
static bool
700
local_ref_killed_between_p (df_ref ref, rtx from, rtx to)
701
{
702
  rtx insn;
703
 
704
  for (insn = from; insn != to; insn = NEXT_INSN (insn))
705
    {
706
      df_ref *def_rec;
707
      if (!INSN_P (insn))
708
        continue;
709
 
710
      for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
711
        {
712
          df_ref def = *def_rec;
713
          if (DF_REF_REGNO (ref) == DF_REF_REGNO (def))
714
            return true;
715
        }
716
    }
717
  return false;
718
}
719
 
720
 
721
/* Check if the given DEF is available in INSN.  This would require full
722
   computation of available expressions; we check only restricted conditions:
723
   - if DEF is the sole definition of its register, go ahead;
724
   - in the same basic block, we check for no definitions killing the
725
     definition of DEF_INSN;
726
   - if USE's basic block has DEF's basic block as the sole predecessor,
727
     we check if the definition is killed after DEF_INSN or before
728
     TARGET_INSN insn, in their respective basic blocks.  */
729
static bool
730
use_killed_between (df_ref use, rtx def_insn, rtx target_insn)
731
{
732
  basic_block def_bb = BLOCK_FOR_INSN (def_insn);
733
  basic_block target_bb = BLOCK_FOR_INSN (target_insn);
734
  int regno;
735
  df_ref def;
736
 
737
  /* We used to have a def reaching a use that is _before_ the def,
738
     with the def not dominating the use even though the use and def
739
     are in the same basic block, when a register may be used
740
     uninitialized in a loop.  This should not happen anymore since
741
     we do not use reaching definitions, but still we test for such
742
     cases and assume that DEF is not available.  */
743
  if (def_bb == target_bb
744
      ? DF_INSN_LUID (def_insn) >= DF_INSN_LUID (target_insn)
745
      : !dominated_by_p (CDI_DOMINATORS, target_bb, def_bb))
746
    return true;
747
 
748
  /* Check if the reg in USE has only one definition.  We already
749
     know that this definition reaches use, or we wouldn't be here.
750
     However, this is invalid for hard registers because if they are
751
     live at the beginning of the function it does not mean that we
752
     have an uninitialized access.  */
753
  regno = DF_REF_REGNO (use);
754
  def = DF_REG_DEF_CHAIN (regno);
755
  if (def
756
      && DF_REF_NEXT_REG (def) == NULL
757
      && regno >= FIRST_PSEUDO_REGISTER)
758
    return false;
759
 
760
  /* Check locally if we are in the same basic block.  */
761
  if (def_bb == target_bb)
762
    return local_ref_killed_between_p (use, def_insn, target_insn);
763
 
764
  /* Finally, if DEF_BB is the sole predecessor of TARGET_BB.  */
765
  if (single_pred_p (target_bb)
766
      && single_pred (target_bb) == def_bb)
767
    {
768
      df_ref x;
769
 
770
      /* See if USE is killed between DEF_INSN and the last insn in the
771
         basic block containing DEF_INSN.  */
772
      x = df_bb_regno_last_def_find (def_bb, regno);
773
      if (x && DF_INSN_LUID (DF_REF_INSN (x)) >= DF_INSN_LUID (def_insn))
774
        return true;
775
 
776
      /* See if USE is killed between TARGET_INSN and the first insn in the
777
         basic block containing TARGET_INSN.  */
778
      x = df_bb_regno_first_def_find (target_bb, regno);
779
      if (x && DF_INSN_LUID (DF_REF_INSN (x)) < DF_INSN_LUID (target_insn))
780
        return true;
781
 
782
      return false;
783
    }
784
 
785
  /* Otherwise assume the worst case.  */
786
  return true;
787
}
788
 
789
 
790
/* Check if all uses in DEF_INSN can be used in TARGET_INSN.  This
791
   would require full computation of available expressions;
792
   we check only restricted conditions, see use_killed_between.  */
793
static bool
794
all_uses_available_at (rtx def_insn, rtx target_insn)
795
{
796
  df_ref *use_rec;
797
  struct df_insn_info *insn_info = DF_INSN_INFO_GET (def_insn);
798
  rtx def_set = single_set (def_insn);
799
 
800
  gcc_assert (def_set);
801
 
802
  /* If target_insn comes right after def_insn, which is very common
803
     for addresses, we can use a quicker test.  */
804
  if (NEXT_INSN (def_insn) == target_insn
805
      && REG_P (SET_DEST (def_set)))
806
    {
807
      rtx def_reg = SET_DEST (def_set);
808
 
809
      /* If the insn uses the reg that it defines, the substitution is
810
         invalid.  */
811
      for (use_rec = DF_INSN_INFO_USES (insn_info); *use_rec; use_rec++)
812
        {
813
          df_ref use = *use_rec;
814
          if (rtx_equal_p (DF_REF_REG (use), def_reg))
815
            return false;
816
        }
817
      for (use_rec = DF_INSN_INFO_EQ_USES (insn_info); *use_rec; use_rec++)
818
        {
819
          df_ref use = *use_rec;
820
          if (rtx_equal_p (DF_REF_REG (use), def_reg))
821
            return false;
822
        }
823
    }
824
  else
825
    {
826
      rtx def_reg = REG_P (SET_DEST (def_set)) ? SET_DEST (def_set) : NULL_RTX;
827
 
828
      /* Look at all the uses of DEF_INSN, and see if they are not
829
         killed between DEF_INSN and TARGET_INSN.  */
830
      for (use_rec = DF_INSN_INFO_USES (insn_info); *use_rec; use_rec++)
831
        {
832
          df_ref use = *use_rec;
833
          if (def_reg && rtx_equal_p (DF_REF_REG (use), def_reg))
834
            return false;
835
          if (use_killed_between (use, def_insn, target_insn))
836
            return false;
837
        }
838
      for (use_rec = DF_INSN_INFO_EQ_USES (insn_info); *use_rec; use_rec++)
839
        {
840
          df_ref use = *use_rec;
841
          if (def_reg && rtx_equal_p (DF_REF_REG (use), def_reg))
842
            return false;
843
          if (use_killed_between (use, def_insn, target_insn))
844
            return false;
845
        }
846
    }
847
 
848
  return true;
849
}
850
 
851
 
852
static df_ref *active_defs;
853
#ifdef ENABLE_CHECKING
854
static sparseset active_defs_check;
855
#endif
856
 
857
/* Fill the ACTIVE_DEFS array with the use->def link for the registers
858
   mentioned in USE_REC.  Register the valid entries in ACTIVE_DEFS_CHECK
859
   too, for checking purposes.  */
860
 
861
static void
862
register_active_defs (df_ref *use_rec)
863
{
864
  while (*use_rec)
865
    {
866
      df_ref use = *use_rec++;
867
      df_ref def = get_def_for_use (use);
868
      int regno = DF_REF_REGNO (use);
869
 
870
#ifdef ENABLE_CHECKING
871
      sparseset_set_bit (active_defs_check, regno);
872
#endif
873
      active_defs[regno] = def;
874
    }
875
}
876
 
877
 
878
/* Build the use->def links that we use to update the dataflow info
879
   for new uses.  Note that building the links is very cheap and if
880
   it were done earlier, they could be used to rule out invalid
881
   propagations (in addition to what is done in all_uses_available_at).
882
   I'm not doing this yet, though.  */
883
 
884
static void
885
update_df_init (rtx def_insn, rtx insn)
886
{
887
#ifdef ENABLE_CHECKING
888
  sparseset_clear (active_defs_check);
889
#endif
890
  register_active_defs (DF_INSN_USES (def_insn));
891
  register_active_defs (DF_INSN_USES (insn));
892
  register_active_defs (DF_INSN_EQ_USES (insn));
893
}
894
 
895
 
896
/* Update the USE_DEF_REF array for the given use, using the active definitions
897
   in the ACTIVE_DEFS array to match pseudos to their def. */
898
 
899
static inline void
900
update_uses (df_ref *use_rec)
901
{
902
  while (*use_rec)
903
    {
904
      df_ref use = *use_rec++;
905
      int regno = DF_REF_REGNO (use);
906
 
907
      /* Set up the use-def chain.  */
908
      if (DF_REF_ID (use) >= (int) VEC_length (df_ref, use_def_ref))
909
        VEC_safe_grow_cleared (df_ref, heap, use_def_ref,
910
                               DF_REF_ID (use) + 1);
911
 
912
#ifdef ENABLE_CHECKING
913
      gcc_assert (sparseset_bit_p (active_defs_check, regno));
914
#endif
915
      VEC_replace (df_ref, use_def_ref, DF_REF_ID (use), active_defs[regno]);
916
    }
917
}
918
 
919
 
920
/* Update the USE_DEF_REF array for the uses in INSN.  Only update note
921
   uses if NOTES_ONLY is true.  */
922
 
923
static void
924
update_df (rtx insn, rtx note)
925
{
926
  struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
927
 
928
  if (note)
929
    {
930
      df_uses_create (&XEXP (note, 0), insn, DF_REF_IN_NOTE);
931
      df_notes_rescan (insn);
932
    }
933
  else
934
    {
935
      df_uses_create (&PATTERN (insn), insn, 0);
936
      df_insn_rescan (insn);
937
      update_uses (DF_INSN_INFO_USES (insn_info));
938
    }
939
 
940
  update_uses (DF_INSN_INFO_EQ_USES (insn_info));
941
}
942
 
943
 
944
/* Try substituting NEW into LOC, which originated from forward propagation
945
   of USE's value from DEF_INSN.  SET_REG_EQUAL says whether we are
946
   substituting the whole SET_SRC, so we can set a REG_EQUAL note if the
947
   new insn is not recognized.  Return whether the substitution was
948
   performed.  */
949
 
950
static bool
951
try_fwprop_subst (df_ref use, rtx *loc, rtx new_rtx, rtx def_insn, bool set_reg_equal)
952
{
953
  rtx insn = DF_REF_INSN (use);
954
  rtx set = single_set (insn);
955
  rtx note = NULL_RTX;
956
  bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
957
  int old_cost = 0;
958
  bool ok;
959
 
960
  update_df_init (def_insn, insn);
961
 
962
  /* forward_propagate_subreg may be operating on an instruction with
963
     multiple sets.  If so, assume the cost of the new instruction is
964
     not greater than the old one.  */
965
  if (set)
966
    old_cost = set_src_cost (SET_SRC (set), speed);
967
  if (dump_file)
968
    {
969
      fprintf (dump_file, "\nIn insn %d, replacing\n ", INSN_UID (insn));
970
      print_inline_rtx (dump_file, *loc, 2);
971
      fprintf (dump_file, "\n with ");
972
      print_inline_rtx (dump_file, new_rtx, 2);
973
      fprintf (dump_file, "\n");
974
    }
975
 
976
  validate_unshare_change (insn, loc, new_rtx, true);
977
  if (!verify_changes (0))
978
    {
979
      if (dump_file)
980
        fprintf (dump_file, "Changes to insn %d not recognized\n",
981
                 INSN_UID (insn));
982
      ok = false;
983
    }
984
 
985
  else if (DF_REF_TYPE (use) == DF_REF_REG_USE
986
           && set
987
           && set_src_cost (SET_SRC (set), speed) > old_cost)
988
    {
989
      if (dump_file)
990
        fprintf (dump_file, "Changes to insn %d not profitable\n",
991
                 INSN_UID (insn));
992
      ok = false;
993
    }
994
 
995
  else
996
    {
997
      if (dump_file)
998
        fprintf (dump_file, "Changed insn %d\n", INSN_UID (insn));
999
      ok = true;
1000
    }
1001
 
1002
  if (ok)
1003
    {
1004
      confirm_change_group ();
1005
      num_changes++;
1006
    }
1007
  else
1008
    {
1009
      cancel_changes (0);
1010
 
1011
      /* Can also record a simplified value in a REG_EQUAL note,
1012
         making a new one if one does not already exist.  */
1013
      if (set_reg_equal)
1014
        {
1015
          if (dump_file)
1016
            fprintf (dump_file, " Setting REG_EQUAL note\n");
1017
 
1018
          note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (new_rtx));
1019
        }
1020
    }
1021
 
1022
  if ((ok || note) && !CONSTANT_P (new_rtx))
1023
    update_df (insn, note);
1024
 
1025
  return ok;
1026
}
1027
 
1028
/* For the given single_set INSN, containing SRC known to be a
1029
   ZERO_EXTEND or SIGN_EXTEND of a register, return true if INSN
1030
   is redundant due to the register being set by a LOAD_EXTEND_OP
1031
   load from memory.  */
1032
 
1033
static bool
1034
free_load_extend (rtx src, rtx insn)
1035
{
1036
  rtx reg;
1037
  df_ref *use_vec;
1038
  df_ref use = 0, def;
1039
 
1040
  reg = XEXP (src, 0);
1041
#ifdef LOAD_EXTEND_OP
1042
  if (LOAD_EXTEND_OP (GET_MODE (reg)) != GET_CODE (src))
1043
#endif
1044
    return false;
1045
 
1046
  for (use_vec = DF_INSN_USES (insn); *use_vec; use_vec++)
1047
    {
1048
      use = *use_vec;
1049
 
1050
      if (!DF_REF_IS_ARTIFICIAL (use)
1051
          && DF_REF_TYPE (use) == DF_REF_REG_USE
1052
          && DF_REF_REG (use) == reg)
1053
        break;
1054
    }
1055
  if (!use)
1056
    return false;
1057
 
1058
  def = get_def_for_use (use);
1059
  if (!def)
1060
    return false;
1061
 
1062
  if (DF_REF_IS_ARTIFICIAL (def))
1063
    return false;
1064
 
1065
  if (NONJUMP_INSN_P (DF_REF_INSN (def)))
1066
    {
1067
      rtx patt = PATTERN (DF_REF_INSN (def));
1068
 
1069
      if (GET_CODE (patt) == SET
1070
          && GET_CODE (SET_SRC (patt)) == MEM
1071
          && rtx_equal_p (SET_DEST (patt), reg))
1072
        return true;
1073
    }
1074
  return false;
1075
}
1076
 
1077
/* If USE is a subreg, see if it can be replaced by a pseudo.  */
1078
 
1079
static bool
1080
forward_propagate_subreg (df_ref use, rtx def_insn, rtx def_set)
1081
{
1082
  rtx use_reg = DF_REF_REG (use);
1083
  rtx use_insn, src;
1084
 
1085
  /* Only consider subregs... */
1086
  enum machine_mode use_mode = GET_MODE (use_reg);
1087
  if (GET_CODE (use_reg) != SUBREG
1088
      || !REG_P (SET_DEST (def_set)))
1089
    return false;
1090
 
1091
  /* If this is a paradoxical SUBREG...  */
1092
  if (GET_MODE_SIZE (use_mode)
1093
      > GET_MODE_SIZE (GET_MODE (SUBREG_REG (use_reg))))
1094
    {
1095
      /* If this is a paradoxical SUBREG, we have no idea what value the
1096
         extra bits would have.  However, if the operand is equivalent to
1097
         a SUBREG whose operand is the same as our mode, and all the modes
1098
         are within a word, we can just use the inner operand because
1099
         these SUBREGs just say how to treat the register.  */
1100
      use_insn = DF_REF_INSN (use);
1101
      src = SET_SRC (def_set);
1102
      if (GET_CODE (src) == SUBREG
1103
          && REG_P (SUBREG_REG (src))
1104
          && REGNO (SUBREG_REG (src)) >= FIRST_PSEUDO_REGISTER
1105
          && GET_MODE (SUBREG_REG (src)) == use_mode
1106
          && subreg_lowpart_p (src)
1107
          && all_uses_available_at (def_insn, use_insn))
1108
        return try_fwprop_subst (use, DF_REF_LOC (use), SUBREG_REG (src),
1109
                                 def_insn, false);
1110
    }
1111
 
1112
  /* If this is a SUBREG of a ZERO_EXTEND or SIGN_EXTEND, and the SUBREG
1113
     is the low part of the reg being extended then just use the inner
1114
     operand.  Don't do this if the ZERO_EXTEND or SIGN_EXTEND insn will
1115
     be removed due to it matching a LOAD_EXTEND_OP load from memory,
1116
     or due to the operation being a no-op when applied to registers.
1117
     For example, if we have:
1118
 
1119
         A: (set (reg:DI X) (sign_extend:DI (reg:SI Y)))
1120
         B: (... (subreg:SI (reg:DI X)) ...)
1121
 
1122
     and mode_rep_extended says that Y is already sign-extended,
1123
     the backend will typically allow A to be combined with the
1124
     definition of Y or, failing that, allow A to be deleted after
1125
     reload through register tying.  Introducing more uses of Y
1126
     prevents both optimisations.  */
1127
  else if (subreg_lowpart_p (use_reg))
1128
    {
1129
      use_insn = DF_REF_INSN (use);
1130
      src = SET_SRC (def_set);
1131
      if ((GET_CODE (src) == ZERO_EXTEND
1132
           || GET_CODE (src) == SIGN_EXTEND)
1133
          && REG_P (XEXP (src, 0))
1134
          && REGNO (XEXP (src, 0)) >= FIRST_PSEUDO_REGISTER
1135
          && GET_MODE (XEXP (src, 0)) == use_mode
1136
          && !free_load_extend (src, def_insn)
1137
          && (targetm.mode_rep_extended (use_mode, GET_MODE (src))
1138
              != (int) GET_CODE (src))
1139
          && all_uses_available_at (def_insn, use_insn))
1140
        return try_fwprop_subst (use, DF_REF_LOC (use), XEXP (src, 0),
1141
                                 def_insn, false);
1142
    }
1143
 
1144
  return false;
1145
}
1146
 
1147
/* Try to replace USE with SRC (defined in DEF_INSN) in __asm.  */
1148
 
1149
static bool
1150
forward_propagate_asm (df_ref use, rtx def_insn, rtx def_set, rtx reg)
1151
{
1152
  rtx use_insn = DF_REF_INSN (use), src, use_pat, asm_operands, new_rtx, *loc;
1153
  int speed_p, i;
1154
  df_ref *use_vec;
1155
 
1156
  gcc_assert ((DF_REF_FLAGS (use) & DF_REF_IN_NOTE) == 0);
1157
 
1158
  src = SET_SRC (def_set);
1159
  use_pat = PATTERN (use_insn);
1160
 
1161
  /* In __asm don't replace if src might need more registers than
1162
     reg, as that could increase register pressure on the __asm.  */
1163
  use_vec = DF_INSN_USES (def_insn);
1164
  if (use_vec[0] && use_vec[1])
1165
    return false;
1166
 
1167
  update_df_init (def_insn, use_insn);
1168
  speed_p = optimize_bb_for_speed_p (BLOCK_FOR_INSN (use_insn));
1169
  asm_operands = NULL_RTX;
1170
  switch (GET_CODE (use_pat))
1171
    {
1172
    case ASM_OPERANDS:
1173
      asm_operands = use_pat;
1174
      break;
1175
    case SET:
1176
      if (MEM_P (SET_DEST (use_pat)))
1177
        {
1178
          loc = &SET_DEST (use_pat);
1179
          new_rtx = propagate_rtx (*loc, GET_MODE (*loc), reg, src, speed_p);
1180
          if (new_rtx)
1181
            validate_unshare_change (use_insn, loc, new_rtx, true);
1182
        }
1183
      asm_operands = SET_SRC (use_pat);
1184
      break;
1185
    case PARALLEL:
1186
      for (i = 0; i < XVECLEN (use_pat, 0); i++)
1187
        if (GET_CODE (XVECEXP (use_pat, 0, i)) == SET)
1188
          {
1189
            if (MEM_P (SET_DEST (XVECEXP (use_pat, 0, i))))
1190
              {
1191
                loc = &SET_DEST (XVECEXP (use_pat, 0, i));
1192
                new_rtx = propagate_rtx (*loc, GET_MODE (*loc), reg,
1193
                                         src, speed_p);
1194
                if (new_rtx)
1195
                  validate_unshare_change (use_insn, loc, new_rtx, true);
1196
              }
1197
            asm_operands = SET_SRC (XVECEXP (use_pat, 0, i));
1198
          }
1199
        else if (GET_CODE (XVECEXP (use_pat, 0, i)) == ASM_OPERANDS)
1200
          asm_operands = XVECEXP (use_pat, 0, i);
1201
      break;
1202
    default:
1203
      gcc_unreachable ();
1204
    }
1205
 
1206
  gcc_assert (asm_operands && GET_CODE (asm_operands) == ASM_OPERANDS);
1207
  for (i = 0; i < ASM_OPERANDS_INPUT_LENGTH (asm_operands); i++)
1208
    {
1209
      loc = &ASM_OPERANDS_INPUT (asm_operands, i);
1210
      new_rtx = propagate_rtx (*loc, GET_MODE (*loc), reg, src, speed_p);
1211
      if (new_rtx)
1212
        validate_unshare_change (use_insn, loc, new_rtx, true);
1213
    }
1214
 
1215
  if (num_changes_pending () == 0 || !apply_change_group ())
1216
    return false;
1217
 
1218
  update_df (use_insn, NULL);
1219
  num_changes++;
1220
  return true;
1221
}
1222
 
1223
/* Try to replace USE with SRC (defined in DEF_INSN) and simplify the
1224
   result.  */
1225
 
1226
static bool
1227
forward_propagate_and_simplify (df_ref use, rtx def_insn, rtx def_set)
1228
{
1229
  rtx use_insn = DF_REF_INSN (use);
1230
  rtx use_set = single_set (use_insn);
1231
  rtx src, reg, new_rtx, *loc;
1232
  bool set_reg_equal;
1233
  enum machine_mode mode;
1234
  int asm_use = -1;
1235
 
1236
  if (INSN_CODE (use_insn) < 0)
1237
    asm_use = asm_noperands (PATTERN (use_insn));
1238
 
1239
  if (!use_set && asm_use < 0 && !DEBUG_INSN_P (use_insn))
1240
    return false;
1241
 
1242
  /* Do not propagate into PC, CC0, etc.  */
1243
  if (use_set && GET_MODE (SET_DEST (use_set)) == VOIDmode)
1244
    return false;
1245
 
1246
  /* If def and use are subreg, check if they match.  */
1247
  reg = DF_REF_REG (use);
1248
  if (GET_CODE (reg) == SUBREG && GET_CODE (SET_DEST (def_set)) == SUBREG)
1249
    {
1250
      if (SUBREG_BYTE (SET_DEST (def_set)) != SUBREG_BYTE (reg))
1251
        return false;
1252
    }
1253
  /* Check if the def had a subreg, but the use has the whole reg.  */
1254
  else if (REG_P (reg) && GET_CODE (SET_DEST (def_set)) == SUBREG)
1255
    return false;
1256
  /* Check if the use has a subreg, but the def had the whole reg.  Unlike the
1257
     previous case, the optimization is possible and often useful indeed.  */
1258
  else if (GET_CODE (reg) == SUBREG && REG_P (SET_DEST (def_set)))
1259
    reg = SUBREG_REG (reg);
1260
 
1261
  /* Make sure that we can treat REG as having the same mode as the
1262
     source of DEF_SET.  */
1263
  if (GET_MODE (SET_DEST (def_set)) != GET_MODE (reg))
1264
    return false;
1265
 
1266
  /* Check if the substitution is valid (last, because it's the most
1267
     expensive check!).  */
1268
  src = SET_SRC (def_set);
1269
  if (!CONSTANT_P (src) && !all_uses_available_at (def_insn, use_insn))
1270
    return false;
1271
 
1272
  /* Check if the def is loading something from the constant pool; in this
1273
     case we would undo optimization such as compress_float_constant.
1274
     Still, we can set a REG_EQUAL note.  */
1275
  if (MEM_P (src) && MEM_READONLY_P (src))
1276
    {
1277
      rtx x = avoid_constant_pool_reference (src);
1278
      if (x != src && use_set)
1279
        {
1280
          rtx note = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
1281
          rtx old_rtx = note ? XEXP (note, 0) : SET_SRC (use_set);
1282
          rtx new_rtx = simplify_replace_rtx (old_rtx, src, x);
1283
          if (old_rtx != new_rtx)
1284
            set_unique_reg_note (use_insn, REG_EQUAL, copy_rtx (new_rtx));
1285
        }
1286
      return false;
1287
    }
1288
 
1289
  if (asm_use >= 0)
1290
    return forward_propagate_asm (use, def_insn, def_set, reg);
1291
 
1292
  /* Else try simplifying.  */
1293
 
1294
  if (DF_REF_TYPE (use) == DF_REF_REG_MEM_STORE)
1295
    {
1296
      loc = &SET_DEST (use_set);
1297
      set_reg_equal = false;
1298
    }
1299
  else if (!use_set)
1300
    {
1301
      loc = &INSN_VAR_LOCATION_LOC (use_insn);
1302
      set_reg_equal = false;
1303
    }
1304
  else
1305
    {
1306
      rtx note = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
1307
      if (DF_REF_FLAGS (use) & DF_REF_IN_NOTE)
1308
        loc = &XEXP (note, 0);
1309
      else
1310
        loc = &SET_SRC (use_set);
1311
 
1312
      /* Do not replace an existing REG_EQUAL note if the insn is not
1313
         recognized.  Either we're already replacing in the note, or we'll
1314
         separately try plugging the definition in the note and simplifying.
1315
         And only install a REQ_EQUAL note when the destination is a REG,
1316
         as the note would be invalid otherwise.  */
1317
      set_reg_equal = (note == NULL_RTX && REG_P (SET_DEST (use_set)));
1318
    }
1319
 
1320
  if (GET_MODE (*loc) == VOIDmode)
1321
    mode = GET_MODE (SET_DEST (use_set));
1322
  else
1323
    mode = GET_MODE (*loc);
1324
 
1325
  new_rtx = propagate_rtx (*loc, mode, reg, src,
1326
                           optimize_bb_for_speed_p (BLOCK_FOR_INSN (use_insn)));
1327
 
1328
  if (!new_rtx)
1329
    return false;
1330
 
1331
  return try_fwprop_subst (use, loc, new_rtx, def_insn, set_reg_equal);
1332
}
1333
 
1334
 
1335
/* Given a use USE of an insn, if it has a single reaching
1336
   definition, try to forward propagate it into that insn.
1337
   Return true if cfg cleanup will be needed.  */
1338
 
1339
static bool
1340
forward_propagate_into (df_ref use)
1341
{
1342
  df_ref def;
1343
  rtx def_insn, def_set, use_insn;
1344
  rtx parent;
1345
 
1346
  if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
1347
    return false;
1348
  if (DF_REF_IS_ARTIFICIAL (use))
1349
    return false;
1350
 
1351
  /* Only consider uses that have a single definition.  */
1352
  def = get_def_for_use (use);
1353
  if (!def)
1354
    return false;
1355
  if (DF_REF_FLAGS (def) & DF_REF_READ_WRITE)
1356
    return false;
1357
  if (DF_REF_IS_ARTIFICIAL (def))
1358
    return false;
1359
 
1360
  /* Do not propagate loop invariant definitions inside the loop.  */
1361
  if (DF_REF_BB (def)->loop_father != DF_REF_BB (use)->loop_father)
1362
    return false;
1363
 
1364
  /* Check if the use is still present in the insn!  */
1365
  use_insn = DF_REF_INSN (use);
1366
  if (DF_REF_FLAGS (use) & DF_REF_IN_NOTE)
1367
    parent = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
1368
  else
1369
    parent = PATTERN (use_insn);
1370
 
1371
  if (!reg_mentioned_p (DF_REF_REG (use), parent))
1372
    return false;
1373
 
1374
  def_insn = DF_REF_INSN (def);
1375
  if (multiple_sets (def_insn))
1376
    return false;
1377
  def_set = single_set (def_insn);
1378
  if (!def_set)
1379
    return false;
1380
 
1381
  /* Only try one kind of propagation.  If two are possible, we'll
1382
     do it on the following iterations.  */
1383
  if (forward_propagate_and_simplify (use, def_insn, def_set)
1384
      || forward_propagate_subreg (use, def_insn, def_set))
1385
    {
1386
      if (cfun->can_throw_non_call_exceptions
1387
          && find_reg_note (use_insn, REG_EH_REGION, NULL_RTX)
1388
          && purge_dead_edges (DF_REF_BB (use)))
1389
        return true;
1390
    }
1391
  return false;
1392
}
1393
 
1394
 
1395
static void
1396
fwprop_init (void)
1397
{
1398
  num_changes = 0;
1399
  calculate_dominance_info (CDI_DOMINATORS);
1400
 
1401
  /* We do not always want to propagate into loops, so we have to find
1402
     loops and be careful about them.  But we have to call flow_loops_find
1403
     before df_analyze, because flow_loops_find may introduce new jump
1404
     insns (sadly) if we are not working in cfglayout mode.  */
1405
  loop_optimizer_init (0);
1406
 
1407
  build_single_def_use_links ();
1408
  df_set_flags (DF_DEFER_INSN_RESCAN);
1409
 
1410
  active_defs = XNEWVEC (df_ref, max_reg_num ());
1411
#ifdef ENABLE_CHECKING
1412
  active_defs_check = sparseset_alloc (max_reg_num ());
1413
#endif
1414
}
1415
 
1416
static void
1417
fwprop_done (void)
1418
{
1419
  loop_optimizer_finalize ();
1420
 
1421
  VEC_free (df_ref, heap, use_def_ref);
1422
  free (active_defs);
1423
#ifdef ENABLE_CHECKING
1424
  sparseset_free (active_defs_check);
1425
#endif
1426
 
1427
  free_dominance_info (CDI_DOMINATORS);
1428
  cleanup_cfg (0);
1429
  delete_trivially_dead_insns (get_insns (), max_reg_num ());
1430
 
1431
  if (dump_file)
1432
    fprintf (dump_file,
1433
             "\nNumber of successful forward propagations: %d\n\n",
1434
             num_changes);
1435
}
1436
 
1437
 
1438
/* Main entry point.  */
1439
 
1440
static bool
1441
gate_fwprop (void)
1442
{
1443
  return optimize > 0 && flag_forward_propagate;
1444
}
1445
 
1446
static unsigned int
1447
fwprop (void)
1448
{
1449
  unsigned i;
1450
  bool need_cleanup = false;
1451
 
1452
  fwprop_init ();
1453
 
1454
  /* Go through all the uses.  df_uses_create will create new ones at the
1455
     end, and we'll go through them as well.
1456
 
1457
     Do not forward propagate addresses into loops until after unrolling.
1458
     CSE did so because it was able to fix its own mess, but we are not.  */
1459
 
1460
  for (i = 0; i < DF_USES_TABLE_SIZE (); i++)
1461
    {
1462
      df_ref use = DF_USES_GET (i);
1463
      if (use)
1464
        if (DF_REF_TYPE (use) == DF_REF_REG_USE
1465
            || DF_REF_BB (use)->loop_father == NULL
1466
            /* The outer most loop is not really a loop.  */
1467
            || loop_outer (DF_REF_BB (use)->loop_father) == NULL)
1468
          need_cleanup |= forward_propagate_into (use);
1469
    }
1470
 
1471
  fwprop_done ();
1472
  if (need_cleanup)
1473
    cleanup_cfg (0);
1474
  return 0;
1475
}
1476
 
1477
struct rtl_opt_pass pass_rtl_fwprop =
1478
{
1479
 {
1480
  RTL_PASS,
1481
  "fwprop1",                            /* name */
1482
  gate_fwprop,                          /* gate */
1483
  fwprop,                               /* execute */
1484
  NULL,                                 /* sub */
1485
  NULL,                                 /* next */
1486
  0,                                    /* static_pass_number */
1487
  TV_FWPROP,                            /* tv_id */
1488
  0,                                    /* properties_required */
1489
  0,                                    /* properties_provided */
1490
  0,                                    /* properties_destroyed */
1491
  0,                                    /* todo_flags_start */
1492
  TODO_df_finish
1493
    | TODO_verify_flow
1494
    | TODO_verify_rtl_sharing           /* todo_flags_finish */
1495
 }
1496
};
1497
 
1498
static unsigned int
1499
fwprop_addr (void)
1500
{
1501
  unsigned i;
1502
  bool need_cleanup = false;
1503
 
1504
  fwprop_init ();
1505
 
1506
  /* Go through all the uses.  df_uses_create will create new ones at the
1507
     end, and we'll go through them as well.  */
1508
  for (i = 0; i < DF_USES_TABLE_SIZE (); i++)
1509
    {
1510
      df_ref use = DF_USES_GET (i);
1511
      if (use)
1512
        if (DF_REF_TYPE (use) != DF_REF_REG_USE
1513
            && DF_REF_BB (use)->loop_father != NULL
1514
            /* The outer most loop is not really a loop.  */
1515
            && loop_outer (DF_REF_BB (use)->loop_father) != NULL)
1516
          need_cleanup |= forward_propagate_into (use);
1517
    }
1518
 
1519
  fwprop_done ();
1520
 
1521
  if (need_cleanup)
1522
    cleanup_cfg (0);
1523
  return 0;
1524
}
1525
 
1526
struct rtl_opt_pass pass_rtl_fwprop_addr =
1527
{
1528
 {
1529
  RTL_PASS,
1530
  "fwprop2",                            /* name */
1531
  gate_fwprop,                          /* gate */
1532
  fwprop_addr,                          /* execute */
1533
  NULL,                                 /* sub */
1534
  NULL,                                 /* next */
1535
  0,                                    /* static_pass_number */
1536
  TV_FWPROP,                            /* tv_id */
1537
  0,                                    /* properties_required */
1538
  0,                                    /* properties_provided */
1539
  0,                                    /* properties_destroyed */
1540
  0,                                    /* todo_flags_start */
1541
  TODO_df_finish | TODO_verify_rtl_sharing  /* todo_flags_finish */
1542
 }
1543
};

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.