OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [alias.c] - Blame information for rev 849

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Alias analysis for GNU C
2
   Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3
   2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
   Contributed by John Carr (jfc@mit.edu).
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
#include "config.h"
23
#include "system.h"
24
#include "coretypes.h"
25
#include "tm.h"
26
#include "rtl.h"
27
#include "tree.h"
28
#include "tm_p.h"
29
#include "function.h"
30
#include "alias.h"
31
#include "emit-rtl.h"
32
#include "regs.h"
33
#include "hard-reg-set.h"
34
#include "basic-block.h"
35
#include "flags.h"
36
#include "output.h"
37
#include "diagnostic-core.h"
38
#include "cselib.h"
39
#include "splay-tree.h"
40
#include "ggc.h"
41
#include "langhooks.h"
42
#include "timevar.h"
43
#include "target.h"
44
#include "cgraph.h"
45
#include "tree-pass.h"
46
#include "df.h"
47
#include "tree-ssa-alias.h"
48
#include "pointer-set.h"
49
#include "tree-flow.h"
50
 
51
/* The aliasing API provided here solves related but different problems:
52
 
53
   Say there exists (in c)
54
 
55
   struct X {
56
     struct Y y1;
57
     struct Z z2;
58
   } x1, *px1,  *px2;
59
 
60
   struct Y y2, *py;
61
   struct Z z2, *pz;
62
 
63
 
64
   py = &px1.y1;
65
   px2 = &x1;
66
 
67
   Consider the four questions:
68
 
69
   Can a store to x1 interfere with px2->y1?
70
   Can a store to x1 interfere with px2->z2?
71
   (*px2).z2
72
   Can a store to x1 change the value pointed to by with py?
73
   Can a store to x1 change the value pointed to by with pz?
74
 
75
   The answer to these questions can be yes, yes, yes, and maybe.
76
 
77
   The first two questions can be answered with a simple examination
78
   of the type system.  If structure X contains a field of type Y then
79
   a store thru a pointer to an X can overwrite any field that is
80
   contained (recursively) in an X (unless we know that px1 != px2).
81
 
82
   The last two of the questions can be solved in the same way as the
83
   first two questions but this is too conservative.  The observation
84
   is that in some cases analysis we can know if which (if any) fields
85
   are addressed and if those addresses are used in bad ways.  This
86
   analysis may be language specific.  In C, arbitrary operations may
87
   be applied to pointers.  However, there is some indication that
88
   this may be too conservative for some C++ types.
89
 
90
   The pass ipa-type-escape does this analysis for the types whose
91
   instances do not escape across the compilation boundary.
92
 
93
   Historically in GCC, these two problems were combined and a single
94
   data structure was used to represent the solution to these
95
   problems.  We now have two similar but different data structures,
96
   The data structure to solve the last two question is similar to the
97
   first, but does not contain have the fields in it whose address are
98
   never taken.  For types that do escape the compilation unit, the
99
   data structures will have identical information.
100
*/
101
 
102
/* The alias sets assigned to MEMs assist the back-end in determining
103
   which MEMs can alias which other MEMs.  In general, two MEMs in
104
   different alias sets cannot alias each other, with one important
105
   exception.  Consider something like:
106
 
107
     struct S { int i; double d; };
108
 
109
   a store to an `S' can alias something of either type `int' or type
110
   `double'.  (However, a store to an `int' cannot alias a `double'
111
   and vice versa.)  We indicate this via a tree structure that looks
112
   like:
113
           struct S
114
            /   \
115
           /     \
116
         |/_     _\|
117
         int    double
118
 
119
   (The arrows are directed and point downwards.)
120
    In this situation we say the alias set for `struct S' is the
121
   `superset' and that those for `int' and `double' are `subsets'.
122
 
123
   To see whether two alias sets can point to the same memory, we must
124
   see if either alias set is a subset of the other. We need not trace
125
   past immediate descendants, however, since we propagate all
126
   grandchildren up one level.
127
 
128
   Alias set zero is implicitly a superset of all other alias sets.
129
   However, this is no actual entry for alias set zero.  It is an
130
   error to attempt to explicitly construct a subset of zero.  */
131
 
132
struct GTY(()) alias_set_entry_d {
133
  /* The alias set number, as stored in MEM_ALIAS_SET.  */
134
  alias_set_type alias_set;
135
 
136
  /* Nonzero if would have a child of zero: this effectively makes this
137
     alias set the same as alias set zero.  */
138
  int has_zero_child;
139
 
140
  /* The children of the alias set.  These are not just the immediate
141
     children, but, in fact, all descendants.  So, if we have:
142
 
143
       struct T { struct S s; float f; }
144
 
145
     continuing our example above, the children here will be all of
146
     `int', `double', `float', and `struct S'.  */
147
  splay_tree GTY((param1_is (int), param2_is (int))) children;
148
};
149
typedef struct alias_set_entry_d *alias_set_entry;
150
 
151
static int rtx_equal_for_memref_p (const_rtx, const_rtx);
152
static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
153
static void record_set (rtx, const_rtx, void *);
154
static int base_alias_check (rtx, rtx, enum machine_mode,
155
                             enum machine_mode);
156
static rtx find_base_value (rtx);
157
static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
158
static int insert_subset_children (splay_tree_node, void*);
159
static alias_set_entry get_alias_set_entry (alias_set_type);
160
static int aliases_everything_p (const_rtx);
161
static bool nonoverlapping_component_refs_p (const_tree, const_tree);
162
static tree decl_for_component_ref (tree);
163
static int write_dependence_p (const_rtx, const_rtx, int);
164
 
165
static void memory_modified_1 (rtx, const_rtx, void *);
166
 
167
/* Set up all info needed to perform alias analysis on memory references.  */
168
 
169
/* Returns the size in bytes of the mode of X.  */
170
#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
171
 
172
/* Returns nonzero if MEM1 and MEM2 do not alias because they are in
173
   different alias sets.  We ignore alias sets in functions making use
174
   of variable arguments because the va_arg macros on some systems are
175
   not legal ANSI C.  */
176
#define DIFFERENT_ALIAS_SETS_P(MEM1, MEM2)                      \
177
  mems_in_disjoint_alias_sets_p (MEM1, MEM2)
178
 
179
/* Cap the number of passes we make over the insns propagating alias
180
   information through set chains.   10 is a completely arbitrary choice.  */
181
#define MAX_ALIAS_LOOP_PASSES 10
182
 
183
/* reg_base_value[N] gives an address to which register N is related.
184
   If all sets after the first add or subtract to the current value
185
   or otherwise modify it so it does not point to a different top level
186
   object, reg_base_value[N] is equal to the address part of the source
187
   of the first set.
188
 
189
   A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF.  ADDRESS
190
   expressions represent certain special values: function arguments and
191
   the stack, frame, and argument pointers.
192
 
193
   The contents of an ADDRESS is not normally used, the mode of the
194
   ADDRESS determines whether the ADDRESS is a function argument or some
195
   other special value.  Pointer equality, not rtx_equal_p, determines whether
196
   two ADDRESS expressions refer to the same base address.
197
 
198
   The only use of the contents of an ADDRESS is for determining if the
199
   current function performs nonlocal memory memory references for the
200
   purposes of marking the function as a constant function.  */
201
 
202
static GTY(()) VEC(rtx,gc) *reg_base_value;
203
static rtx *new_reg_base_value;
204
 
205
/* We preserve the copy of old array around to avoid amount of garbage
206
   produced.  About 8% of garbage produced were attributed to this
207
   array.  */
208
static GTY((deletable)) VEC(rtx,gc) *old_reg_base_value;
209
 
210
#define static_reg_base_value \
211
  (this_target_rtl->x_static_reg_base_value)
212
 
213
#define REG_BASE_VALUE(X)                               \
214
  (REGNO (X) < VEC_length (rtx, reg_base_value)         \
215
   ? VEC_index (rtx, reg_base_value, REGNO (X)) : 0)
216
 
217
/* Vector indexed by N giving the initial (unchanging) value known for
218
   pseudo-register N.  This array is initialized in init_alias_analysis,
219
   and does not change until end_alias_analysis is called.  */
220
static GTY((length("reg_known_value_size"))) rtx *reg_known_value;
221
 
222
/* Indicates number of valid entries in reg_known_value.  */
223
static GTY(()) unsigned int reg_known_value_size;
224
 
225
/* Vector recording for each reg_known_value whether it is due to a
226
   REG_EQUIV note.  Future passes (viz., reload) may replace the
227
   pseudo with the equivalent expression and so we account for the
228
   dependences that would be introduced if that happens.
229
 
230
   The REG_EQUIV notes created in assign_parms may mention the arg
231
   pointer, and there are explicit insns in the RTL that modify the
232
   arg pointer.  Thus we must ensure that such insns don't get
233
   scheduled across each other because that would invalidate the
234
   REG_EQUIV notes.  One could argue that the REG_EQUIV notes are
235
   wrong, but solving the problem in the scheduler will likely give
236
   better code, so we do it here.  */
237
static bool *reg_known_equiv_p;
238
 
239
/* True when scanning insns from the start of the rtl to the
240
   NOTE_INSN_FUNCTION_BEG note.  */
241
static bool copying_arguments;
242
 
243
DEF_VEC_P(alias_set_entry);
244
DEF_VEC_ALLOC_P(alias_set_entry,gc);
245
 
246
/* The splay-tree used to store the various alias set entries.  */
247
static GTY (()) VEC(alias_set_entry,gc) *alias_sets;
248
 
249
/* Build a decomposed reference object for querying the alias-oracle
250
   from the MEM rtx and store it in *REF.
251
   Returns false if MEM is not suitable for the alias-oracle.  */
252
 
253
static bool
254
ao_ref_from_mem (ao_ref *ref, const_rtx mem)
255
{
256
  tree expr = MEM_EXPR (mem);
257
  tree base;
258
 
259
  if (!expr)
260
    return false;
261
 
262
  ao_ref_init (ref, expr);
263
 
264
  /* Get the base of the reference and see if we have to reject or
265
     adjust it.  */
266
  base = ao_ref_base (ref);
267
  if (base == NULL_TREE)
268
    return false;
269
 
270
  /* The tree oracle doesn't like to have these.  */
271
  if (TREE_CODE (base) == FUNCTION_DECL
272
      || TREE_CODE (base) == LABEL_DECL)
273
    return false;
274
 
275
  /* If this is a pointer dereference of a non-SSA_NAME punt.
276
     ???  We could replace it with a pointer to anything.  */
277
  if ((INDIRECT_REF_P (base)
278
       || TREE_CODE (base) == MEM_REF)
279
      && TREE_CODE (TREE_OPERAND (base, 0)) != SSA_NAME)
280
    return false;
281
  if (TREE_CODE (base) == TARGET_MEM_REF
282
      && TMR_BASE (base)
283
      && TREE_CODE (TMR_BASE (base)) != SSA_NAME)
284
    return false;
285
 
286
  /* If this is a reference based on a partitioned decl replace the
287
     base with an INDIRECT_REF of the pointer representative we
288
     created during stack slot partitioning.  */
289
  if (TREE_CODE (base) == VAR_DECL
290
      && ! TREE_STATIC (base)
291
      && cfun->gimple_df->decls_to_pointers != NULL)
292
    {
293
      void *namep;
294
      namep = pointer_map_contains (cfun->gimple_df->decls_to_pointers, base);
295
      if (namep)
296
        ref->base = build_simple_mem_ref (*(tree *)namep);
297
    }
298
  else if (TREE_CODE (base) == TARGET_MEM_REF
299
           && TREE_CODE (TMR_BASE (base)) == ADDR_EXPR
300
           && TREE_CODE (TREE_OPERAND (TMR_BASE (base), 0)) == VAR_DECL
301
           && ! TREE_STATIC (TREE_OPERAND (TMR_BASE (base), 0))
302
           && cfun->gimple_df->decls_to_pointers != NULL)
303
    {
304
      void *namep;
305
      namep = pointer_map_contains (cfun->gimple_df->decls_to_pointers,
306
                                    TREE_OPERAND (TMR_BASE (base), 0));
307
      if (namep)
308
        ref->base = build_simple_mem_ref (*(tree *)namep);
309
    }
310
 
311
  ref->ref_alias_set = MEM_ALIAS_SET (mem);
312
 
313
  /* If MEM_OFFSET or MEM_SIZE are unknown we have to punt.
314
     Keep points-to related information though.  */
315
  if (!MEM_OFFSET_KNOWN_P (mem)
316
      || !MEM_SIZE_KNOWN_P (mem))
317
    {
318
      ref->ref = NULL_TREE;
319
      ref->offset = 0;
320
      ref->size = -1;
321
      ref->max_size = -1;
322
      return true;
323
    }
324
 
325
  /* If the base decl is a parameter we can have negative MEM_OFFSET in
326
     case of promoted subregs on bigendian targets.  Trust the MEM_EXPR
327
     here.  */
328
  if (MEM_OFFSET (mem) < 0
329
      && (MEM_SIZE (mem) + MEM_OFFSET (mem)) * BITS_PER_UNIT == ref->size)
330
    return true;
331
 
332
  ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT;
333
  ref->size = MEM_SIZE (mem) * BITS_PER_UNIT;
334
 
335
  /* The MEM may extend into adjacent fields, so adjust max_size if
336
     necessary.  */
337
  if (ref->max_size != -1
338
      && ref->size > ref->max_size)
339
    ref->max_size = ref->size;
340
 
341
  /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
342
     the MEM_EXPR punt.  This happens for STRICT_ALIGNMENT targets a lot.  */
343
  if (MEM_EXPR (mem) != get_spill_slot_decl (false)
344
      && (ref->offset < 0
345
          || (DECL_P (ref->base)
346
              && (!host_integerp (DECL_SIZE (ref->base), 1)
347
                  || (TREE_INT_CST_LOW (DECL_SIZE ((ref->base)))
348
                      < (unsigned HOST_WIDE_INT)(ref->offset + ref->size))))))
349
    return false;
350
 
351
  return true;
352
}
353
 
354
/* Query the alias-oracle on whether the two memory rtx X and MEM may
355
   alias.  If TBAA_P is set also apply TBAA.  Returns true if the
356
   two rtxen may alias, false otherwise.  */
357
 
358
static bool
359
rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
360
{
361
  ao_ref ref1, ref2;
362
 
363
  if (!ao_ref_from_mem (&ref1, x)
364
      || !ao_ref_from_mem (&ref2, mem))
365
    return true;
366
 
367
  return refs_may_alias_p_1 (&ref1, &ref2,
368
                             tbaa_p
369
                             && MEM_ALIAS_SET (x) != 0
370
                             && MEM_ALIAS_SET (mem) != 0);
371
}
372
 
373
/* Returns a pointer to the alias set entry for ALIAS_SET, if there is
374
   such an entry, or NULL otherwise.  */
375
 
376
static inline alias_set_entry
377
get_alias_set_entry (alias_set_type alias_set)
378
{
379
  return VEC_index (alias_set_entry, alias_sets, alias_set);
380
}
381
 
382
/* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
383
   the two MEMs cannot alias each other.  */
384
 
385
static inline int
386
mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
387
{
388
/* Perform a basic sanity check.  Namely, that there are no alias sets
389
   if we're not using strict aliasing.  This helps to catch bugs
390
   whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or
391
   where a MEM is allocated in some way other than by the use of
392
   gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared.  If we begin to
393
   use alias sets to indicate that spilled registers cannot alias each
394
   other, we might need to remove this check.  */
395
  gcc_assert (flag_strict_aliasing
396
              || (!MEM_ALIAS_SET (mem1) && !MEM_ALIAS_SET (mem2)));
397
 
398
  return ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), MEM_ALIAS_SET (mem2));
399
}
400
 
401
/* Insert the NODE into the splay tree given by DATA.  Used by
402
   record_alias_subset via splay_tree_foreach.  */
403
 
404
static int
405
insert_subset_children (splay_tree_node node, void *data)
406
{
407
  splay_tree_insert ((splay_tree) data, node->key, node->value);
408
 
409
  return 0;
410
}
411
 
412
/* Return true if the first alias set is a subset of the second.  */
413
 
414
bool
415
alias_set_subset_of (alias_set_type set1, alias_set_type set2)
416
{
417
  alias_set_entry ase;
418
 
419
  /* Everything is a subset of the "aliases everything" set.  */
420
  if (set2 == 0)
421
    return true;
422
 
423
  /* Otherwise, check if set1 is a subset of set2.  */
424
  ase = get_alias_set_entry (set2);
425
  if (ase != 0
426
      && (ase->has_zero_child
427
          || splay_tree_lookup (ase->children,
428
                                (splay_tree_key) set1)))
429
    return true;
430
  return false;
431
}
432
 
433
/* Return 1 if the two specified alias sets may conflict.  */
434
 
435
int
436
alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
437
{
438
  alias_set_entry ase;
439
 
440
  /* The easy case.  */
441
  if (alias_sets_must_conflict_p (set1, set2))
442
    return 1;
443
 
444
  /* See if the first alias set is a subset of the second.  */
445
  ase = get_alias_set_entry (set1);
446
  if (ase != 0
447
      && (ase->has_zero_child
448
          || splay_tree_lookup (ase->children,
449
                                (splay_tree_key) set2)))
450
    return 1;
451
 
452
  /* Now do the same, but with the alias sets reversed.  */
453
  ase = get_alias_set_entry (set2);
454
  if (ase != 0
455
      && (ase->has_zero_child
456
          || splay_tree_lookup (ase->children,
457
                                (splay_tree_key) set1)))
458
    return 1;
459
 
460
  /* The two alias sets are distinct and neither one is the
461
     child of the other.  Therefore, they cannot conflict.  */
462
  return 0;
463
}
464
 
465
/* Return 1 if the two specified alias sets will always conflict.  */
466
 
467
int
468
alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
469
{
470
  if (set1 == 0 || set2 == 0 || set1 == set2)
471
    return 1;
472
 
473
  return 0;
474
}
475
 
476
/* Return 1 if any MEM object of type T1 will always conflict (using the
477
   dependency routines in this file) with any MEM object of type T2.
478
   This is used when allocating temporary storage.  If T1 and/or T2 are
479
   NULL_TREE, it means we know nothing about the storage.  */
480
 
481
int
482
objects_must_conflict_p (tree t1, tree t2)
483
{
484
  alias_set_type set1, set2;
485
 
486
  /* If neither has a type specified, we don't know if they'll conflict
487
     because we may be using them to store objects of various types, for
488
     example the argument and local variables areas of inlined functions.  */
489
  if (t1 == 0 && t2 == 0)
490
    return 0;
491
 
492
  /* If they are the same type, they must conflict.  */
493
  if (t1 == t2
494
      /* Likewise if both are volatile.  */
495
      || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
496
    return 1;
497
 
498
  set1 = t1 ? get_alias_set (t1) : 0;
499
  set2 = t2 ? get_alias_set (t2) : 0;
500
 
501
  /* We can't use alias_sets_conflict_p because we must make sure
502
     that every subtype of t1 will conflict with every subtype of
503
     t2 for which a pair of subobjects of these respective subtypes
504
     overlaps on the stack.  */
505
  return alias_sets_must_conflict_p (set1, set2);
506
}
507
 
508
/* Return true if all nested component references handled by
509
   get_inner_reference in T are such that we should use the alias set
510
   provided by the object at the heart of T.
511
 
512
   This is true for non-addressable components (which don't have their
513
   own alias set), as well as components of objects in alias set zero.
514
   This later point is a special case wherein we wish to override the
515
   alias set used by the component, but we don't have per-FIELD_DECL
516
   assignable alias sets.  */
517
 
518
bool
519
component_uses_parent_alias_set (const_tree t)
520
{
521
  while (1)
522
    {
523
      /* If we're at the end, it vacuously uses its own alias set.  */
524
      if (!handled_component_p (t))
525
        return false;
526
 
527
      switch (TREE_CODE (t))
528
        {
529
        case COMPONENT_REF:
530
          if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
531
            return true;
532
          break;
533
 
534
        case ARRAY_REF:
535
        case ARRAY_RANGE_REF:
536
          if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
537
            return true;
538
          break;
539
 
540
        case REALPART_EXPR:
541
        case IMAGPART_EXPR:
542
          break;
543
 
544
        default:
545
          /* Bitfields and casts are never addressable.  */
546
          return true;
547
        }
548
 
549
      t = TREE_OPERAND (t, 0);
550
      if (get_alias_set (TREE_TYPE (t)) == 0)
551
        return true;
552
    }
553
}
554
 
555
/* Return the alias set for the memory pointed to by T, which may be
556
   either a type or an expression.  Return -1 if there is nothing
557
   special about dereferencing T.  */
558
 
559
static alias_set_type
560
get_deref_alias_set_1 (tree t)
561
{
562
  /* If we're not doing any alias analysis, just assume everything
563
     aliases everything else.  */
564
  if (!flag_strict_aliasing)
565
    return 0;
566
 
567
  /* All we care about is the type.  */
568
  if (! TYPE_P (t))
569
    t = TREE_TYPE (t);
570
 
571
  /* If we have an INDIRECT_REF via a void pointer, we don't
572
     know anything about what that might alias.  Likewise if the
573
     pointer is marked that way.  */
574
  if (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
575
      || TYPE_REF_CAN_ALIAS_ALL (t))
576
    return 0;
577
 
578
  return -1;
579
}
580
 
581
/* Return the alias set for the memory pointed to by T, which may be
582
   either a type or an expression.  */
583
 
584
alias_set_type
585
get_deref_alias_set (tree t)
586
{
587
  alias_set_type set = get_deref_alias_set_1 (t);
588
 
589
  /* Fall back to the alias-set of the pointed-to type.  */
590
  if (set == -1)
591
    {
592
      if (! TYPE_P (t))
593
        t = TREE_TYPE (t);
594
      set = get_alias_set (TREE_TYPE (t));
595
    }
596
 
597
  return set;
598
}
599
 
600
/* Return the alias set for T, which may be either a type or an
601
   expression.  Call language-specific routine for help, if needed.  */
602
 
603
alias_set_type
604
get_alias_set (tree t)
605
{
606
  alias_set_type set;
607
 
608
  /* If we're not doing any alias analysis, just assume everything
609
     aliases everything else.  Also return 0 if this or its type is
610
     an error.  */
611
  if (! flag_strict_aliasing || t == error_mark_node
612
      || (! TYPE_P (t)
613
          && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
614
    return 0;
615
 
616
  /* We can be passed either an expression or a type.  This and the
617
     language-specific routine may make mutually-recursive calls to each other
618
     to figure out what to do.  At each juncture, we see if this is a tree
619
     that the language may need to handle specially.  First handle things that
620
     aren't types.  */
621
  if (! TYPE_P (t))
622
    {
623
      tree inner;
624
 
625
      /* Give the language a chance to do something with this tree
626
         before we look at it.  */
627
      STRIP_NOPS (t);
628
      set = lang_hooks.get_alias_set (t);
629
      if (set != -1)
630
        return set;
631
 
632
      /* Get the base object of the reference.  */
633
      inner = t;
634
      while (handled_component_p (inner))
635
        {
636
          /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use
637
             the type of any component references that wrap it to
638
             determine the alias-set.  */
639
          if (TREE_CODE (inner) == VIEW_CONVERT_EXPR)
640
            t = TREE_OPERAND (inner, 0);
641
          inner = TREE_OPERAND (inner, 0);
642
        }
643
 
644
      /* Handle pointer dereferences here, they can override the
645
         alias-set.  */
646
      if (INDIRECT_REF_P (inner))
647
        {
648
          set = get_deref_alias_set_1 (TREE_OPERAND (inner, 0));
649
          if (set != -1)
650
            return set;
651
        }
652
      else if (TREE_CODE (inner) == TARGET_MEM_REF)
653
        return get_deref_alias_set (TMR_OFFSET (inner));
654
      else if (TREE_CODE (inner) == MEM_REF)
655
        {
656
          set = get_deref_alias_set_1 (TREE_OPERAND (inner, 1));
657
          if (set != -1)
658
            return set;
659
        }
660
 
661
      /* If the innermost reference is a MEM_REF that has a
662
         conversion embedded treat it like a VIEW_CONVERT_EXPR above,
663
         using the memory access type for determining the alias-set.  */
664
     if (TREE_CODE (inner) == MEM_REF
665
         && TYPE_MAIN_VARIANT (TREE_TYPE (inner))
666
            != TYPE_MAIN_VARIANT
667
               (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1)))))
668
       return get_deref_alias_set (TREE_OPERAND (inner, 1));
669
 
670
      /* Otherwise, pick up the outermost object that we could have a pointer
671
         to, processing conversions as above.  */
672
      while (component_uses_parent_alias_set (t))
673
        {
674
          t = TREE_OPERAND (t, 0);
675
          STRIP_NOPS (t);
676
        }
677
 
678
      /* If we've already determined the alias set for a decl, just return
679
         it.  This is necessary for C++ anonymous unions, whose component
680
         variables don't look like union members (boo!).  */
681
      if (TREE_CODE (t) == VAR_DECL
682
          && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
683
        return MEM_ALIAS_SET (DECL_RTL (t));
684
 
685
      /* Now all we care about is the type.  */
686
      t = TREE_TYPE (t);
687
    }
688
 
689
  /* Variant qualifiers don't affect the alias set, so get the main
690
     variant.  */
691
  t = TYPE_MAIN_VARIANT (t);
692
 
693
  /* Always use the canonical type as well.  If this is a type that
694
     requires structural comparisons to identify compatible types
695
     use alias set zero.  */
696
  if (TYPE_STRUCTURAL_EQUALITY_P (t))
697
    {
698
      /* Allow the language to specify another alias set for this
699
         type.  */
700
      set = lang_hooks.get_alias_set (t);
701
      if (set != -1)
702
        return set;
703
      return 0;
704
    }
705
 
706
  t = TYPE_CANONICAL (t);
707
 
708
  /* The canonical type should not require structural equality checks.  */
709
  gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t));
710
 
711
  /* If this is a type with a known alias set, return it.  */
712
  if (TYPE_ALIAS_SET_KNOWN_P (t))
713
    return TYPE_ALIAS_SET (t);
714
 
715
  /* We don't want to set TYPE_ALIAS_SET for incomplete types.  */
716
  if (!COMPLETE_TYPE_P (t))
717
    {
718
      /* For arrays with unknown size the conservative answer is the
719
         alias set of the element type.  */
720
      if (TREE_CODE (t) == ARRAY_TYPE)
721
        return get_alias_set (TREE_TYPE (t));
722
 
723
      /* But return zero as a conservative answer for incomplete types.  */
724
      return 0;
725
    }
726
 
727
  /* See if the language has special handling for this type.  */
728
  set = lang_hooks.get_alias_set (t);
729
  if (set != -1)
730
    return set;
731
 
732
  /* There are no objects of FUNCTION_TYPE, so there's no point in
733
     using up an alias set for them.  (There are, of course, pointers
734
     and references to functions, but that's different.)  */
735
  else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
736
    set = 0;
737
 
738
  /* Unless the language specifies otherwise, let vector types alias
739
     their components.  This avoids some nasty type punning issues in
740
     normal usage.  And indeed lets vectors be treated more like an
741
     array slice.  */
742
  else if (TREE_CODE (t) == VECTOR_TYPE)
743
    set = get_alias_set (TREE_TYPE (t));
744
 
745
  /* Unless the language specifies otherwise, treat array types the
746
     same as their components.  This avoids the asymmetry we get
747
     through recording the components.  Consider accessing a
748
     character(kind=1) through a reference to a character(kind=1)[1:1].
749
     Or consider if we want to assign integer(kind=4)[0:D.1387] and
750
     integer(kind=4)[4] the same alias set or not.
751
     Just be pragmatic here and make sure the array and its element
752
     type get the same alias set assigned.  */
753
  else if (TREE_CODE (t) == ARRAY_TYPE && !TYPE_NONALIASED_COMPONENT (t))
754
    set = get_alias_set (TREE_TYPE (t));
755
 
756
  /* From the former common C and C++ langhook implementation:
757
 
758
     Unfortunately, there is no canonical form of a pointer type.
759
     In particular, if we have `typedef int I', then `int *', and
760
     `I *' are different types.  So, we have to pick a canonical
761
     representative.  We do this below.
762
 
763
     Technically, this approach is actually more conservative that
764
     it needs to be.  In particular, `const int *' and `int *'
765
     should be in different alias sets, according to the C and C++
766
     standard, since their types are not the same, and so,
767
     technically, an `int **' and `const int **' cannot point at
768
     the same thing.
769
 
770
     But, the standard is wrong.  In particular, this code is
771
     legal C++:
772
 
773
     int *ip;
774
     int **ipp = &ip;
775
     const int* const* cipp = ipp;
776
     And, it doesn't make sense for that to be legal unless you
777
     can dereference IPP and CIPP.  So, we ignore cv-qualifiers on
778
     the pointed-to types.  This issue has been reported to the
779
     C++ committee.
780
 
781
     In addition to the above canonicalization issue, with LTO
782
     we should also canonicalize `T (*)[]' to `T *' avoiding
783
     alias issues with pointer-to element types and pointer-to
784
     array types.
785
 
786
     Likewise we need to deal with the situation of incomplete
787
     pointed-to types and make `*(struct X **)&a' and
788
     `*(struct X {} **)&a' alias.  Otherwise we will have to
789
     guarantee that all pointer-to incomplete type variants
790
     will be replaced by pointer-to complete type variants if
791
     they are available.
792
 
793
     With LTO the convenient situation of using `void *' to
794
     access and store any pointer type will also become
795
     more apparent (and `void *' is just another pointer-to
796
     incomplete type).  Assigning alias-set zero to `void *'
797
     and all pointer-to incomplete types is a not appealing
798
     solution.  Assigning an effective alias-set zero only
799
     affecting pointers might be - by recording proper subset
800
     relationships of all pointer alias-sets.
801
 
802
     Pointer-to function types are another grey area which
803
     needs caution.  Globbing them all into one alias-set
804
     or the above effective zero set would work.
805
 
806
     For now just assign the same alias-set to all pointers.
807
     That's simple and avoids all the above problems.  */
808
  else if (POINTER_TYPE_P (t)
809
           && t != ptr_type_node)
810
    set = get_alias_set (ptr_type_node);
811
 
812
  /* Otherwise make a new alias set for this type.  */
813
  else
814
    {
815
      /* Each canonical type gets its own alias set, so canonical types
816
         shouldn't form a tree.  It doesn't really matter for types
817
         we handle specially above, so only check it where it possibly
818
         would result in a bogus alias set.  */
819
      gcc_checking_assert (TYPE_CANONICAL (t) == t);
820
 
821
      set = new_alias_set ();
822
    }
823
 
824
  TYPE_ALIAS_SET (t) = set;
825
 
826
  /* If this is an aggregate type or a complex type, we must record any
827
     component aliasing information.  */
828
  if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
829
    record_component_aliases (t);
830
 
831
  return set;
832
}
833
 
834
/* Return a brand-new alias set.  */
835
 
836
alias_set_type
837
new_alias_set (void)
838
{
839
  if (flag_strict_aliasing)
840
    {
841
      if (alias_sets == 0)
842
        VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
843
      VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
844
      return VEC_length (alias_set_entry, alias_sets) - 1;
845
    }
846
  else
847
    return 0;
848
}
849
 
850
/* Indicate that things in SUBSET can alias things in SUPERSET, but that
851
   not everything that aliases SUPERSET also aliases SUBSET.  For example,
852
   in C, a store to an `int' can alias a load of a structure containing an
853
   `int', and vice versa.  But it can't alias a load of a 'double' member
854
   of the same structure.  Here, the structure would be the SUPERSET and
855
   `int' the SUBSET.  This relationship is also described in the comment at
856
   the beginning of this file.
857
 
858
   This function should be called only once per SUPERSET/SUBSET pair.
859
 
860
   It is illegal for SUPERSET to be zero; everything is implicitly a
861
   subset of alias set zero.  */
862
 
863
void
864
record_alias_subset (alias_set_type superset, alias_set_type subset)
865
{
866
  alias_set_entry superset_entry;
867
  alias_set_entry subset_entry;
868
 
869
  /* It is possible in complex type situations for both sets to be the same,
870
     in which case we can ignore this operation.  */
871
  if (superset == subset)
872
    return;
873
 
874
  gcc_assert (superset);
875
 
876
  superset_entry = get_alias_set_entry (superset);
877
  if (superset_entry == 0)
878
    {
879
      /* Create an entry for the SUPERSET, so that we have a place to
880
         attach the SUBSET.  */
881
      superset_entry = ggc_alloc_cleared_alias_set_entry_d ();
882
      superset_entry->alias_set = superset;
883
      superset_entry->children
884
        = splay_tree_new_ggc (splay_tree_compare_ints,
885
                              ggc_alloc_splay_tree_scalar_scalar_splay_tree_s,
886
                              ggc_alloc_splay_tree_scalar_scalar_splay_tree_node_s);
887
      superset_entry->has_zero_child = 0;
888
      VEC_replace (alias_set_entry, alias_sets, superset, superset_entry);
889
    }
890
 
891
  if (subset == 0)
892
    superset_entry->has_zero_child = 1;
893
  else
894
    {
895
      subset_entry = get_alias_set_entry (subset);
896
      /* If there is an entry for the subset, enter all of its children
897
         (if they are not already present) as children of the SUPERSET.  */
898
      if (subset_entry)
899
        {
900
          if (subset_entry->has_zero_child)
901
            superset_entry->has_zero_child = 1;
902
 
903
          splay_tree_foreach (subset_entry->children, insert_subset_children,
904
                              superset_entry->children);
905
        }
906
 
907
      /* Enter the SUBSET itself as a child of the SUPERSET.  */
908
      splay_tree_insert (superset_entry->children,
909
                         (splay_tree_key) subset, 0);
910
    }
911
}
912
 
913
/* Record that component types of TYPE, if any, are part of that type for
914
   aliasing purposes.  For record types, we only record component types
915
   for fields that are not marked non-addressable.  For array types, we
916
   only record the component type if it is not marked non-aliased.  */
917
 
918
void
919
record_component_aliases (tree type)
920
{
921
  alias_set_type superset = get_alias_set (type);
922
  tree field;
923
 
924
  if (superset == 0)
925
    return;
926
 
927
  switch (TREE_CODE (type))
928
    {
929
    case RECORD_TYPE:
930
    case UNION_TYPE:
931
    case QUAL_UNION_TYPE:
932
      /* Recursively record aliases for the base classes, if there are any.  */
933
      if (TYPE_BINFO (type))
934
        {
935
          int i;
936
          tree binfo, base_binfo;
937
 
938
          for (binfo = TYPE_BINFO (type), i = 0;
939
               BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
940
            record_alias_subset (superset,
941
                                 get_alias_set (BINFO_TYPE (base_binfo)));
942
        }
943
      for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
944
        if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
945
          record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
946
      break;
947
 
948
    case COMPLEX_TYPE:
949
      record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
950
      break;
951
 
952
    /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
953
       element type.  */
954
 
955
    default:
956
      break;
957
    }
958
}
959
 
960
/* Allocate an alias set for use in storing and reading from the varargs
961
   spill area.  */
962
 
963
static GTY(()) alias_set_type varargs_set = -1;
964
 
965
alias_set_type
966
get_varargs_alias_set (void)
967
{
968
#if 1
969
  /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
970
     varargs alias set to an INDIRECT_REF (FIXME!), so we can't
971
     consistently use the varargs alias set for loads from the varargs
972
     area.  So don't use it anywhere.  */
973
  return 0;
974
#else
975
  if (varargs_set == -1)
976
    varargs_set = new_alias_set ();
977
 
978
  return varargs_set;
979
#endif
980
}
981
 
982
/* Likewise, but used for the fixed portions of the frame, e.g., register
983
   save areas.  */
984
 
985
static GTY(()) alias_set_type frame_set = -1;
986
 
987
alias_set_type
988
get_frame_alias_set (void)
989
{
990
  if (frame_set == -1)
991
    frame_set = new_alias_set ();
992
 
993
  return frame_set;
994
}
995
 
996
/* Inside SRC, the source of a SET, find a base address.  */
997
 
998
static rtx
999
find_base_value (rtx src)
1000
{
1001
  unsigned int regno;
1002
 
1003
#if defined (FIND_BASE_TERM)
1004
  /* Try machine-dependent ways to find the base term.  */
1005
  src = FIND_BASE_TERM (src);
1006
#endif
1007
 
1008
  switch (GET_CODE (src))
1009
    {
1010
    case SYMBOL_REF:
1011
    case LABEL_REF:
1012
      return src;
1013
 
1014
    case REG:
1015
      regno = REGNO (src);
1016
      /* At the start of a function, argument registers have known base
1017
         values which may be lost later.  Returning an ADDRESS
1018
         expression here allows optimization based on argument values
1019
         even when the argument registers are used for other purposes.  */
1020
      if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
1021
        return new_reg_base_value[regno];
1022
 
1023
      /* If a pseudo has a known base value, return it.  Do not do this
1024
         for non-fixed hard regs since it can result in a circular
1025
         dependency chain for registers which have values at function entry.
1026
 
1027
         The test above is not sufficient because the scheduler may move
1028
         a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN.  */
1029
      if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
1030
          && regno < VEC_length (rtx, reg_base_value))
1031
        {
1032
          /* If we're inside init_alias_analysis, use new_reg_base_value
1033
             to reduce the number of relaxation iterations.  */
1034
          if (new_reg_base_value && new_reg_base_value[regno]
1035
              && DF_REG_DEF_COUNT (regno) == 1)
1036
            return new_reg_base_value[regno];
1037
 
1038
          if (VEC_index (rtx, reg_base_value, regno))
1039
            return VEC_index (rtx, reg_base_value, regno);
1040
        }
1041
 
1042
      return 0;
1043
 
1044
    case MEM:
1045
      /* Check for an argument passed in memory.  Only record in the
1046
         copying-arguments block; it is too hard to track changes
1047
         otherwise.  */
1048
      if (copying_arguments
1049
          && (XEXP (src, 0) == arg_pointer_rtx
1050
              || (GET_CODE (XEXP (src, 0)) == PLUS
1051
                  && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
1052
        return gen_rtx_ADDRESS (VOIDmode, src);
1053
      return 0;
1054
 
1055
    case CONST:
1056
      src = XEXP (src, 0);
1057
      if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
1058
        break;
1059
 
1060
      /* ... fall through ...  */
1061
 
1062
    case PLUS:
1063
    case MINUS:
1064
      {
1065
        rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1066
 
1067
        /* If either operand is a REG that is a known pointer, then it
1068
           is the base.  */
1069
        if (REG_P (src_0) && REG_POINTER (src_0))
1070
          return find_base_value (src_0);
1071
        if (REG_P (src_1) && REG_POINTER (src_1))
1072
          return find_base_value (src_1);
1073
 
1074
        /* If either operand is a REG, then see if we already have
1075
           a known value for it.  */
1076
        if (REG_P (src_0))
1077
          {
1078
            temp = find_base_value (src_0);
1079
            if (temp != 0)
1080
              src_0 = temp;
1081
          }
1082
 
1083
        if (REG_P (src_1))
1084
          {
1085
            temp = find_base_value (src_1);
1086
            if (temp!= 0)
1087
              src_1 = temp;
1088
          }
1089
 
1090
        /* If either base is named object or a special address
1091
           (like an argument or stack reference), then use it for the
1092
           base term.  */
1093
        if (src_0 != 0
1094
            && (GET_CODE (src_0) == SYMBOL_REF
1095
                || GET_CODE (src_0) == LABEL_REF
1096
                || (GET_CODE (src_0) == ADDRESS
1097
                    && GET_MODE (src_0) != VOIDmode)))
1098
          return src_0;
1099
 
1100
        if (src_1 != 0
1101
            && (GET_CODE (src_1) == SYMBOL_REF
1102
                || GET_CODE (src_1) == LABEL_REF
1103
                || (GET_CODE (src_1) == ADDRESS
1104
                    && GET_MODE (src_1) != VOIDmode)))
1105
          return src_1;
1106
 
1107
        /* Guess which operand is the base address:
1108
           If either operand is a symbol, then it is the base.  If
1109
           either operand is a CONST_INT, then the other is the base.  */
1110
        if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1111
          return find_base_value (src_0);
1112
        else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1113
          return find_base_value (src_1);
1114
 
1115
        return 0;
1116
      }
1117
 
1118
    case LO_SUM:
1119
      /* The standard form is (lo_sum reg sym) so look only at the
1120
         second operand.  */
1121
      return find_base_value (XEXP (src, 1));
1122
 
1123
    case AND:
1124
      /* If the second operand is constant set the base
1125
         address to the first operand.  */
1126
      if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
1127
        return find_base_value (XEXP (src, 0));
1128
      return 0;
1129
 
1130
    case TRUNCATE:
1131
      /* As we do not know which address space the pointer is refering to, we can
1132
         handle this only if the target does not support different pointer or
1133
         address modes depending on the address space.  */
1134
      if (!target_default_pointer_address_modes_p ())
1135
        break;
1136
      if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
1137
        break;
1138
      /* Fall through.  */
1139
    case HIGH:
1140
    case PRE_INC:
1141
    case PRE_DEC:
1142
    case POST_INC:
1143
    case POST_DEC:
1144
    case PRE_MODIFY:
1145
    case POST_MODIFY:
1146
      return find_base_value (XEXP (src, 0));
1147
 
1148
    case ZERO_EXTEND:
1149
    case SIGN_EXTEND:   /* used for NT/Alpha pointers */
1150
      /* As we do not know which address space the pointer is refering to, we can
1151
         handle this only if the target does not support different pointer or
1152
         address modes depending on the address space.  */
1153
      if (!target_default_pointer_address_modes_p ())
1154
        break;
1155
 
1156
      {
1157
        rtx temp = find_base_value (XEXP (src, 0));
1158
 
1159
        if (temp != 0 && CONSTANT_P (temp))
1160
          temp = convert_memory_address (Pmode, temp);
1161
 
1162
        return temp;
1163
      }
1164
 
1165
    default:
1166
      break;
1167
    }
1168
 
1169
  return 0;
1170
}
1171
 
1172
/* Called from init_alias_analysis indirectly through note_stores.  */
1173
 
1174
/* While scanning insns to find base values, reg_seen[N] is nonzero if
1175
   register N has been set in this function.  */
1176
static char *reg_seen;
1177
 
1178
/* Addresses which are known not to alias anything else are identified
1179
   by a unique integer.  */
1180
static int unique_id;
1181
 
1182
static void
1183
record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1184
{
1185
  unsigned regno;
1186
  rtx src;
1187
  int n;
1188
 
1189
  if (!REG_P (dest))
1190
    return;
1191
 
1192
  regno = REGNO (dest);
1193
 
1194
  gcc_checking_assert (regno < VEC_length (rtx, reg_base_value));
1195
 
1196
  /* If this spans multiple hard registers, then we must indicate that every
1197
     register has an unusable value.  */
1198
  if (regno < FIRST_PSEUDO_REGISTER)
1199
    n = hard_regno_nregs[regno][GET_MODE (dest)];
1200
  else
1201
    n = 1;
1202
  if (n != 1)
1203
    {
1204
      while (--n >= 0)
1205
        {
1206
          reg_seen[regno + n] = 1;
1207
          new_reg_base_value[regno + n] = 0;
1208
        }
1209
      return;
1210
    }
1211
 
1212
  if (set)
1213
    {
1214
      /* A CLOBBER wipes out any old value but does not prevent a previously
1215
         unset register from acquiring a base address (i.e. reg_seen is not
1216
         set).  */
1217
      if (GET_CODE (set) == CLOBBER)
1218
        {
1219
          new_reg_base_value[regno] = 0;
1220
          return;
1221
        }
1222
      src = SET_SRC (set);
1223
    }
1224
  else
1225
    {
1226
      if (reg_seen[regno])
1227
        {
1228
          new_reg_base_value[regno] = 0;
1229
          return;
1230
        }
1231
      reg_seen[regno] = 1;
1232
      new_reg_base_value[regno] = gen_rtx_ADDRESS (Pmode,
1233
                                                   GEN_INT (unique_id++));
1234
      return;
1235
    }
1236
 
1237
  /* If this is not the first set of REGNO, see whether the new value
1238
     is related to the old one.  There are two cases of interest:
1239
 
1240
        (1) The register might be assigned an entirely new value
1241
            that has the same base term as the original set.
1242
 
1243
        (2) The set might be a simple self-modification that
1244
            cannot change REGNO's base value.
1245
 
1246
     If neither case holds, reject the original base value as invalid.
1247
     Note that the following situation is not detected:
1248
 
1249
         extern int x, y;  int *p = &x; p += (&y-&x);
1250
 
1251
     ANSI C does not allow computing the difference of addresses
1252
     of distinct top level objects.  */
1253
  if (new_reg_base_value[regno] != 0
1254
      && find_base_value (src) != new_reg_base_value[regno])
1255
    switch (GET_CODE (src))
1256
      {
1257
      case LO_SUM:
1258
      case MINUS:
1259
        if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1260
          new_reg_base_value[regno] = 0;
1261
        break;
1262
      case PLUS:
1263
        /* If the value we add in the PLUS is also a valid base value,
1264
           this might be the actual base value, and the original value
1265
           an index.  */
1266
        {
1267
          rtx other = NULL_RTX;
1268
 
1269
          if (XEXP (src, 0) == dest)
1270
            other = XEXP (src, 1);
1271
          else if (XEXP (src, 1) == dest)
1272
            other = XEXP (src, 0);
1273
 
1274
          if (! other || find_base_value (other))
1275
            new_reg_base_value[regno] = 0;
1276
          break;
1277
        }
1278
      case AND:
1279
        if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1280
          new_reg_base_value[regno] = 0;
1281
        break;
1282
      default:
1283
        new_reg_base_value[regno] = 0;
1284
        break;
1285
      }
1286
  /* If this is the first set of a register, record the value.  */
1287
  else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1288
           && ! reg_seen[regno] && new_reg_base_value[regno] == 0)
1289
    new_reg_base_value[regno] = find_base_value (src);
1290
 
1291
  reg_seen[regno] = 1;
1292
}
1293
 
1294
/* Return REG_BASE_VALUE for REGNO.  Selective scheduler uses this to avoid
1295
   using hard registers with non-null REG_BASE_VALUE for renaming.  */
1296
rtx
1297
get_reg_base_value (unsigned int regno)
1298
{
1299
  return VEC_index (rtx, reg_base_value, regno);
1300
}
1301
 
1302
/* If a value is known for REGNO, return it.  */
1303
 
1304
rtx
1305
get_reg_known_value (unsigned int regno)
1306
{
1307
  if (regno >= FIRST_PSEUDO_REGISTER)
1308
    {
1309
      regno -= FIRST_PSEUDO_REGISTER;
1310
      if (regno < reg_known_value_size)
1311
        return reg_known_value[regno];
1312
    }
1313
  return NULL;
1314
}
1315
 
1316
/* Set it.  */
1317
 
1318
static void
1319
set_reg_known_value (unsigned int regno, rtx val)
1320
{
1321
  if (regno >= FIRST_PSEUDO_REGISTER)
1322
    {
1323
      regno -= FIRST_PSEUDO_REGISTER;
1324
      if (regno < reg_known_value_size)
1325
        reg_known_value[regno] = val;
1326
    }
1327
}
1328
 
1329
/* Similarly for reg_known_equiv_p.  */
1330
 
1331
bool
1332
get_reg_known_equiv_p (unsigned int regno)
1333
{
1334
  if (regno >= FIRST_PSEUDO_REGISTER)
1335
    {
1336
      regno -= FIRST_PSEUDO_REGISTER;
1337
      if (regno < reg_known_value_size)
1338
        return reg_known_equiv_p[regno];
1339
    }
1340
  return false;
1341
}
1342
 
1343
static void
1344
set_reg_known_equiv_p (unsigned int regno, bool val)
1345
{
1346
  if (regno >= FIRST_PSEUDO_REGISTER)
1347
    {
1348
      regno -= FIRST_PSEUDO_REGISTER;
1349
      if (regno < reg_known_value_size)
1350
        reg_known_equiv_p[regno] = val;
1351
    }
1352
}
1353
 
1354
 
1355
/* Returns a canonical version of X, from the point of view alias
1356
   analysis.  (For example, if X is a MEM whose address is a register,
1357
   and the register has a known value (say a SYMBOL_REF), then a MEM
1358
   whose address is the SYMBOL_REF is returned.)  */
1359
 
1360
rtx
1361
canon_rtx (rtx x)
1362
{
1363
  /* Recursively look for equivalences.  */
1364
  if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1365
    {
1366
      rtx t = get_reg_known_value (REGNO (x));
1367
      if (t == x)
1368
        return x;
1369
      if (t)
1370
        return canon_rtx (t);
1371
    }
1372
 
1373
  if (GET_CODE (x) == PLUS)
1374
    {
1375
      rtx x0 = canon_rtx (XEXP (x, 0));
1376
      rtx x1 = canon_rtx (XEXP (x, 1));
1377
 
1378
      if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1379
        {
1380
          if (CONST_INT_P (x0))
1381
            return plus_constant (x1, INTVAL (x0));
1382
          else if (CONST_INT_P (x1))
1383
            return plus_constant (x0, INTVAL (x1));
1384
          return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1385
        }
1386
    }
1387
 
1388
  /* This gives us much better alias analysis when called from
1389
     the loop optimizer.   Note we want to leave the original
1390
     MEM alone, but need to return the canonicalized MEM with
1391
     all the flags with their original values.  */
1392
  else if (MEM_P (x))
1393
    x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1394
 
1395
  return x;
1396
}
1397
 
1398
/* Return 1 if X and Y are identical-looking rtx's.
1399
   Expect that X and Y has been already canonicalized.
1400
 
1401
   We use the data in reg_known_value above to see if two registers with
1402
   different numbers are, in fact, equivalent.  */
1403
 
1404
static int
1405
rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1406
{
1407
  int i;
1408
  int j;
1409
  enum rtx_code code;
1410
  const char *fmt;
1411
 
1412
  if (x == 0 && y == 0)
1413
    return 1;
1414
  if (x == 0 || y == 0)
1415
    return 0;
1416
 
1417
  if (x == y)
1418
    return 1;
1419
 
1420
  code = GET_CODE (x);
1421
  /* Rtx's of different codes cannot be equal.  */
1422
  if (code != GET_CODE (y))
1423
    return 0;
1424
 
1425
  /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1426
     (REG:SI x) and (REG:HI x) are NOT equivalent.  */
1427
 
1428
  if (GET_MODE (x) != GET_MODE (y))
1429
    return 0;
1430
 
1431
  /* Some RTL can be compared without a recursive examination.  */
1432
  switch (code)
1433
    {
1434
    case REG:
1435
      return REGNO (x) == REGNO (y);
1436
 
1437
    case LABEL_REF:
1438
      return XEXP (x, 0) == XEXP (y, 0);
1439
 
1440
    case SYMBOL_REF:
1441
      return XSTR (x, 0) == XSTR (y, 0);
1442
 
1443
    case VALUE:
1444
    case CONST_INT:
1445
    case CONST_DOUBLE:
1446
    case CONST_FIXED:
1447
      /* There's no need to compare the contents of CONST_DOUBLEs or
1448
         CONST_INTs because pointer equality is a good enough
1449
         comparison for these nodes.  */
1450
      return 0;
1451
 
1452
    default:
1453
      break;
1454
    }
1455
 
1456
  /* canon_rtx knows how to handle plus.  No need to canonicalize.  */
1457
  if (code == PLUS)
1458
    return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1459
             && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1460
            || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1461
                && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1462
  /* For commutative operations, the RTX match if the operand match in any
1463
     order.  Also handle the simple binary and unary cases without a loop.  */
1464
  if (COMMUTATIVE_P (x))
1465
    {
1466
      rtx xop0 = canon_rtx (XEXP (x, 0));
1467
      rtx yop0 = canon_rtx (XEXP (y, 0));
1468
      rtx yop1 = canon_rtx (XEXP (y, 1));
1469
 
1470
      return ((rtx_equal_for_memref_p (xop0, yop0)
1471
               && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1472
              || (rtx_equal_for_memref_p (xop0, yop1)
1473
                  && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1474
    }
1475
  else if (NON_COMMUTATIVE_P (x))
1476
    {
1477
      return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1478
                                      canon_rtx (XEXP (y, 0)))
1479
              && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1480
                                         canon_rtx (XEXP (y, 1))));
1481
    }
1482
  else if (UNARY_P (x))
1483
    return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1484
                                   canon_rtx (XEXP (y, 0)));
1485
 
1486
  /* Compare the elements.  If any pair of corresponding elements
1487
     fail to match, return 0 for the whole things.
1488
 
1489
     Limit cases to types which actually appear in addresses.  */
1490
 
1491
  fmt = GET_RTX_FORMAT (code);
1492
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1493
    {
1494
      switch (fmt[i])
1495
        {
1496
        case 'i':
1497
          if (XINT (x, i) != XINT (y, i))
1498
            return 0;
1499
          break;
1500
 
1501
        case 'E':
1502
          /* Two vectors must have the same length.  */
1503
          if (XVECLEN (x, i) != XVECLEN (y, i))
1504
            return 0;
1505
 
1506
          /* And the corresponding elements must match.  */
1507
          for (j = 0; j < XVECLEN (x, i); j++)
1508
            if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1509
                                        canon_rtx (XVECEXP (y, i, j))) == 0)
1510
              return 0;
1511
          break;
1512
 
1513
        case 'e':
1514
          if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1515
                                      canon_rtx (XEXP (y, i))) == 0)
1516
            return 0;
1517
          break;
1518
 
1519
          /* This can happen for asm operands.  */
1520
        case 's':
1521
          if (strcmp (XSTR (x, i), XSTR (y, i)))
1522
            return 0;
1523
          break;
1524
 
1525
        /* This can happen for an asm which clobbers memory.  */
1526
        case '0':
1527
          break;
1528
 
1529
          /* It is believed that rtx's at this level will never
1530
             contain anything but integers and other rtx's,
1531
             except for within LABEL_REFs and SYMBOL_REFs.  */
1532
        default:
1533
          gcc_unreachable ();
1534
        }
1535
    }
1536
  return 1;
1537
}
1538
 
1539
rtx
1540
find_base_term (rtx x)
1541
{
1542
  cselib_val *val;
1543
  struct elt_loc_list *l, *f;
1544
  rtx ret;
1545
 
1546
#if defined (FIND_BASE_TERM)
1547
  /* Try machine-dependent ways to find the base term.  */
1548
  x = FIND_BASE_TERM (x);
1549
#endif
1550
 
1551
  switch (GET_CODE (x))
1552
    {
1553
    case REG:
1554
      return REG_BASE_VALUE (x);
1555
 
1556
    case TRUNCATE:
1557
      /* As we do not know which address space the pointer is refering to, we can
1558
         handle this only if the target does not support different pointer or
1559
         address modes depending on the address space.  */
1560
      if (!target_default_pointer_address_modes_p ())
1561
        return 0;
1562
      if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1563
        return 0;
1564
      /* Fall through.  */
1565
    case HIGH:
1566
    case PRE_INC:
1567
    case PRE_DEC:
1568
    case POST_INC:
1569
    case POST_DEC:
1570
    case PRE_MODIFY:
1571
    case POST_MODIFY:
1572
      return find_base_term (XEXP (x, 0));
1573
 
1574
    case ZERO_EXTEND:
1575
    case SIGN_EXTEND:   /* Used for Alpha/NT pointers */
1576
      /* As we do not know which address space the pointer is refering to, we can
1577
         handle this only if the target does not support different pointer or
1578
         address modes depending on the address space.  */
1579
      if (!target_default_pointer_address_modes_p ())
1580
        return 0;
1581
 
1582
      {
1583
        rtx temp = find_base_term (XEXP (x, 0));
1584
 
1585
        if (temp != 0 && CONSTANT_P (temp))
1586
          temp = convert_memory_address (Pmode, temp);
1587
 
1588
        return temp;
1589
      }
1590
 
1591
    case VALUE:
1592
      val = CSELIB_VAL_PTR (x);
1593
      ret = NULL_RTX;
1594
 
1595
      if (!val)
1596
        return ret;
1597
 
1598
      f = val->locs;
1599
      /* Temporarily reset val->locs to avoid infinite recursion.  */
1600
      val->locs = NULL;
1601
 
1602
      for (l = f; l; l = l->next)
1603
        if (GET_CODE (l->loc) == VALUE
1604
            && CSELIB_VAL_PTR (l->loc)->locs
1605
            && !CSELIB_VAL_PTR (l->loc)->locs->next
1606
            && CSELIB_VAL_PTR (l->loc)->locs->loc == x)
1607
          continue;
1608
        else if ((ret = find_base_term (l->loc)) != 0)
1609
          break;
1610
 
1611
      val->locs = f;
1612
      return ret;
1613
 
1614
    case LO_SUM:
1615
      /* The standard form is (lo_sum reg sym) so look only at the
1616
         second operand.  */
1617
      return find_base_term (XEXP (x, 1));
1618
 
1619
    case CONST:
1620
      x = XEXP (x, 0);
1621
      if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1622
        return 0;
1623
      /* Fall through.  */
1624
    case PLUS:
1625
    case MINUS:
1626
      {
1627
        rtx tmp1 = XEXP (x, 0);
1628
        rtx tmp2 = XEXP (x, 1);
1629
 
1630
        /* This is a little bit tricky since we have to determine which of
1631
           the two operands represents the real base address.  Otherwise this
1632
           routine may return the index register instead of the base register.
1633
 
1634
           That may cause us to believe no aliasing was possible, when in
1635
           fact aliasing is possible.
1636
 
1637
           We use a few simple tests to guess the base register.  Additional
1638
           tests can certainly be added.  For example, if one of the operands
1639
           is a shift or multiply, then it must be the index register and the
1640
           other operand is the base register.  */
1641
 
1642
        if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1643
          return find_base_term (tmp2);
1644
 
1645
        /* If either operand is known to be a pointer, then use it
1646
           to determine the base term.  */
1647
        if (REG_P (tmp1) && REG_POINTER (tmp1))
1648
          {
1649
            rtx base = find_base_term (tmp1);
1650
            if (base)
1651
              return base;
1652
          }
1653
 
1654
        if (REG_P (tmp2) && REG_POINTER (tmp2))
1655
          {
1656
            rtx base = find_base_term (tmp2);
1657
            if (base)
1658
              return base;
1659
          }
1660
 
1661
        /* Neither operand was known to be a pointer.  Go ahead and find the
1662
           base term for both operands.  */
1663
        tmp1 = find_base_term (tmp1);
1664
        tmp2 = find_base_term (tmp2);
1665
 
1666
        /* If either base term is named object or a special address
1667
           (like an argument or stack reference), then use it for the
1668
           base term.  */
1669
        if (tmp1 != 0
1670
            && (GET_CODE (tmp1) == SYMBOL_REF
1671
                || GET_CODE (tmp1) == LABEL_REF
1672
                || (GET_CODE (tmp1) == ADDRESS
1673
                    && GET_MODE (tmp1) != VOIDmode)))
1674
          return tmp1;
1675
 
1676
        if (tmp2 != 0
1677
            && (GET_CODE (tmp2) == SYMBOL_REF
1678
                || GET_CODE (tmp2) == LABEL_REF
1679
                || (GET_CODE (tmp2) == ADDRESS
1680
                    && GET_MODE (tmp2) != VOIDmode)))
1681
          return tmp2;
1682
 
1683
        /* We could not determine which of the two operands was the
1684
           base register and which was the index.  So we can determine
1685
           nothing from the base alias check.  */
1686
        return 0;
1687
      }
1688
 
1689
    case AND:
1690
      if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
1691
        return find_base_term (XEXP (x, 0));
1692
      return 0;
1693
 
1694
    case SYMBOL_REF:
1695
    case LABEL_REF:
1696
      return x;
1697
 
1698
    default:
1699
      return 0;
1700
    }
1701
}
1702
 
1703
/* Return 0 if the addresses X and Y are known to point to different
1704
   objects, 1 if they might be pointers to the same object.  */
1705
 
1706
static int
1707
base_alias_check (rtx x, rtx y, enum machine_mode x_mode,
1708
                  enum machine_mode y_mode)
1709
{
1710
  rtx x_base = find_base_term (x);
1711
  rtx y_base = find_base_term (y);
1712
 
1713
  /* If the address itself has no known base see if a known equivalent
1714
     value has one.  If either address still has no known base, nothing
1715
     is known about aliasing.  */
1716
  if (x_base == 0)
1717
    {
1718
      rtx x_c;
1719
 
1720
      if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
1721
        return 1;
1722
 
1723
      x_base = find_base_term (x_c);
1724
      if (x_base == 0)
1725
        return 1;
1726
    }
1727
 
1728
  if (y_base == 0)
1729
    {
1730
      rtx y_c;
1731
      if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
1732
        return 1;
1733
 
1734
      y_base = find_base_term (y_c);
1735
      if (y_base == 0)
1736
        return 1;
1737
    }
1738
 
1739
  /* If the base addresses are equal nothing is known about aliasing.  */
1740
  if (rtx_equal_p (x_base, y_base))
1741
    return 1;
1742
 
1743
  /* The base addresses are different expressions.  If they are not accessed
1744
     via AND, there is no conflict.  We can bring knowledge of object
1745
     alignment into play here.  For example, on alpha, "char a, b;" can
1746
     alias one another, though "char a; long b;" cannot.  AND addesses may
1747
     implicitly alias surrounding objects; i.e. unaligned access in DImode
1748
     via AND address can alias all surrounding object types except those
1749
     with aligment 8 or higher.  */
1750
  if (GET_CODE (x) == AND && GET_CODE (y) == AND)
1751
    return 1;
1752
  if (GET_CODE (x) == AND
1753
      && (!CONST_INT_P (XEXP (x, 1))
1754
          || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
1755
    return 1;
1756
  if (GET_CODE (y) == AND
1757
      && (!CONST_INT_P (XEXP (y, 1))
1758
          || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
1759
    return 1;
1760
 
1761
  /* Differing symbols not accessed via AND never alias.  */
1762
  if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
1763
    return 0;
1764
 
1765
  /* If one address is a stack reference there can be no alias:
1766
     stack references using different base registers do not alias,
1767
     a stack reference can not alias a parameter, and a stack reference
1768
     can not alias a global.  */
1769
  if ((GET_CODE (x_base) == ADDRESS && GET_MODE (x_base) == Pmode)
1770
      || (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode))
1771
    return 0;
1772
 
1773
  return 1;
1774
}
1775
 
1776
/* Callback for for_each_rtx, that returns 1 upon encountering a VALUE
1777
   whose UID is greater than the int uid that D points to.  */
1778
 
1779
static int
1780
refs_newer_value_cb (rtx *x, void *d)
1781
{
1782
  if (GET_CODE (*x) == VALUE && CSELIB_VAL_PTR (*x)->uid > *(int *)d)
1783
    return 1;
1784
 
1785
  return 0;
1786
}
1787
 
1788
/* Return TRUE if EXPR refers to a VALUE whose uid is greater than
1789
   that of V.  */
1790
 
1791
static bool
1792
refs_newer_value_p (rtx expr, rtx v)
1793
{
1794
  int minuid = CSELIB_VAL_PTR (v)->uid;
1795
 
1796
  return for_each_rtx (&expr, refs_newer_value_cb, &minuid);
1797
}
1798
 
1799
/* Convert the address X into something we can use.  This is done by returning
1800
   it unchanged unless it is a value; in the latter case we call cselib to get
1801
   a more useful rtx.  */
1802
 
1803
rtx
1804
get_addr (rtx x)
1805
{
1806
  cselib_val *v;
1807
  struct elt_loc_list *l;
1808
 
1809
  if (GET_CODE (x) != VALUE)
1810
    return x;
1811
  v = CSELIB_VAL_PTR (x);
1812
  if (v)
1813
    {
1814
      v = canonical_cselib_val (v);
1815
      for (l = v->locs; l; l = l->next)
1816
        if (CONSTANT_P (l->loc))
1817
          return l->loc;
1818
      for (l = v->locs; l; l = l->next)
1819
        if (!REG_P (l->loc) && !MEM_P (l->loc) && GET_CODE (l->loc) != VALUE
1820
            && !refs_newer_value_p (l->loc, x))
1821
          return l->loc;
1822
      for (l = v->locs; l; l = l->next)
1823
        if (REG_P (l->loc) || (GET_CODE (l->loc) != VALUE
1824
                               && !refs_newer_value_p (l->loc, x)))
1825
          return l->loc;
1826
      /* Return the canonical value.  */
1827
      return v->val_rtx;
1828
    }
1829
  return x;
1830
}
1831
 
1832
/*  Return the address of the (N_REFS + 1)th memory reference to ADDR
1833
    where SIZE is the size in bytes of the memory reference.  If ADDR
1834
    is not modified by the memory reference then ADDR is returned.  */
1835
 
1836
static rtx
1837
addr_side_effect_eval (rtx addr, int size, int n_refs)
1838
{
1839
  int offset = 0;
1840
 
1841
  switch (GET_CODE (addr))
1842
    {
1843
    case PRE_INC:
1844
      offset = (n_refs + 1) * size;
1845
      break;
1846
    case PRE_DEC:
1847
      offset = -(n_refs + 1) * size;
1848
      break;
1849
    case POST_INC:
1850
      offset = n_refs * size;
1851
      break;
1852
    case POST_DEC:
1853
      offset = -n_refs * size;
1854
      break;
1855
 
1856
    default:
1857
      return addr;
1858
    }
1859
 
1860
  if (offset)
1861
    addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
1862
                         GEN_INT (offset));
1863
  else
1864
    addr = XEXP (addr, 0);
1865
  addr = canon_rtx (addr);
1866
 
1867
  return addr;
1868
}
1869
 
1870
/* Return one if X and Y (memory addresses) reference the
1871
   same location in memory or if the references overlap.
1872
   Return zero if they do not overlap, else return
1873
   minus one in which case they still might reference the same location.
1874
 
1875
   C is an offset accumulator.  When
1876
   C is nonzero, we are testing aliases between X and Y + C.
1877
   XSIZE is the size in bytes of the X reference,
1878
   similarly YSIZE is the size in bytes for Y.
1879
   Expect that canon_rtx has been already called for X and Y.
1880
 
1881
   If XSIZE or YSIZE is zero, we do not know the amount of memory being
1882
   referenced (the reference was BLKmode), so make the most pessimistic
1883
   assumptions.
1884
 
1885
   If XSIZE or YSIZE is negative, we may access memory outside the object
1886
   being referenced as a side effect.  This can happen when using AND to
1887
   align memory references, as is done on the Alpha.
1888
 
1889
   Nice to notice that varying addresses cannot conflict with fp if no
1890
   local variables had their addresses taken, but that's too hard now.
1891
 
1892
   ???  Contrary to the tree alias oracle this does not return
1893
   one for X + non-constant and Y + non-constant when X and Y are equal.
1894
   If that is fixed the TBAA hack for union type-punning can be removed.  */
1895
 
1896
static int
1897
memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
1898
{
1899
  if (GET_CODE (x) == VALUE)
1900
    {
1901
      if (REG_P (y))
1902
        {
1903
          struct elt_loc_list *l = NULL;
1904
          if (CSELIB_VAL_PTR (x))
1905
            for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs;
1906
                 l; l = l->next)
1907
              if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
1908
                break;
1909
          if (l)
1910
            x = y;
1911
          else
1912
            x = get_addr (x);
1913
        }
1914
      /* Don't call get_addr if y is the same VALUE.  */
1915
      else if (x != y)
1916
        x = get_addr (x);
1917
    }
1918
  if (GET_CODE (y) == VALUE)
1919
    {
1920
      if (REG_P (x))
1921
        {
1922
          struct elt_loc_list *l = NULL;
1923
          if (CSELIB_VAL_PTR (y))
1924
            for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs;
1925
                 l; l = l->next)
1926
              if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
1927
                break;
1928
          if (l)
1929
            y = x;
1930
          else
1931
            y = get_addr (y);
1932
        }
1933
      /* Don't call get_addr if x is the same VALUE.  */
1934
      else if (y != x)
1935
        y = get_addr (y);
1936
    }
1937
  if (GET_CODE (x) == HIGH)
1938
    x = XEXP (x, 0);
1939
  else if (GET_CODE (x) == LO_SUM)
1940
    x = XEXP (x, 1);
1941
  else
1942
    x = addr_side_effect_eval (x, xsize, 0);
1943
  if (GET_CODE (y) == HIGH)
1944
    y = XEXP (y, 0);
1945
  else if (GET_CODE (y) == LO_SUM)
1946
    y = XEXP (y, 1);
1947
  else
1948
    y = addr_side_effect_eval (y, ysize, 0);
1949
 
1950
  if (rtx_equal_for_memref_p (x, y))
1951
    {
1952
      if (xsize <= 0 || ysize <= 0)
1953
        return 1;
1954
      if (c >= 0 && xsize > c)
1955
        return 1;
1956
      if (c < 0 && ysize+c > 0)
1957
        return 1;
1958
      return 0;
1959
    }
1960
 
1961
  /* This code used to check for conflicts involving stack references and
1962
     globals but the base address alias code now handles these cases.  */
1963
 
1964
  if (GET_CODE (x) == PLUS)
1965
    {
1966
      /* The fact that X is canonicalized means that this
1967
         PLUS rtx is canonicalized.  */
1968
      rtx x0 = XEXP (x, 0);
1969
      rtx x1 = XEXP (x, 1);
1970
 
1971
      if (GET_CODE (y) == PLUS)
1972
        {
1973
          /* The fact that Y is canonicalized means that this
1974
             PLUS rtx is canonicalized.  */
1975
          rtx y0 = XEXP (y, 0);
1976
          rtx y1 = XEXP (y, 1);
1977
 
1978
          if (rtx_equal_for_memref_p (x1, y1))
1979
            return memrefs_conflict_p (xsize, x0, ysize, y0, c);
1980
          if (rtx_equal_for_memref_p (x0, y0))
1981
            return memrefs_conflict_p (xsize, x1, ysize, y1, c);
1982
          if (CONST_INT_P (x1))
1983
            {
1984
              if (CONST_INT_P (y1))
1985
                return memrefs_conflict_p (xsize, x0, ysize, y0,
1986
                                           c - INTVAL (x1) + INTVAL (y1));
1987
              else
1988
                return memrefs_conflict_p (xsize, x0, ysize, y,
1989
                                           c - INTVAL (x1));
1990
            }
1991
          else if (CONST_INT_P (y1))
1992
            return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
1993
 
1994
          return -1;
1995
        }
1996
      else if (CONST_INT_P (x1))
1997
        return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
1998
    }
1999
  else if (GET_CODE (y) == PLUS)
2000
    {
2001
      /* The fact that Y is canonicalized means that this
2002
         PLUS rtx is canonicalized.  */
2003
      rtx y0 = XEXP (y, 0);
2004
      rtx y1 = XEXP (y, 1);
2005
 
2006
      if (CONST_INT_P (y1))
2007
        return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2008
      else
2009
        return -1;
2010
    }
2011
 
2012
  if (GET_CODE (x) == GET_CODE (y))
2013
    switch (GET_CODE (x))
2014
      {
2015
      case MULT:
2016
        {
2017
          /* Handle cases where we expect the second operands to be the
2018
             same, and check only whether the first operand would conflict
2019
             or not.  */
2020
          rtx x0, y0;
2021
          rtx x1 = canon_rtx (XEXP (x, 1));
2022
          rtx y1 = canon_rtx (XEXP (y, 1));
2023
          if (! rtx_equal_for_memref_p (x1, y1))
2024
            return -1;
2025
          x0 = canon_rtx (XEXP (x, 0));
2026
          y0 = canon_rtx (XEXP (y, 0));
2027
          if (rtx_equal_for_memref_p (x0, y0))
2028
            return (xsize == 0 || ysize == 0
2029
                    || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
2030
 
2031
          /* Can't properly adjust our sizes.  */
2032
          if (!CONST_INT_P (x1))
2033
            return -1;
2034
          xsize /= INTVAL (x1);
2035
          ysize /= INTVAL (x1);
2036
          c /= INTVAL (x1);
2037
          return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2038
        }
2039
 
2040
      default:
2041
        break;
2042
      }
2043
 
2044
  /* Treat an access through an AND (e.g. a subword access on an Alpha)
2045
     as an access with indeterminate size.  Assume that references
2046
     besides AND are aligned, so if the size of the other reference is
2047
     at least as large as the alignment, assume no other overlap.  */
2048
  if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
2049
    {
2050
      if (GET_CODE (y) == AND || ysize < -INTVAL (XEXP (x, 1)))
2051
        xsize = -1;
2052
      return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ysize, y, c);
2053
    }
2054
  if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
2055
    {
2056
      /* ??? If we are indexing far enough into the array/structure, we
2057
         may yet be able to determine that we can not overlap.  But we
2058
         also need to that we are far enough from the end not to overlap
2059
         a following reference, so we do nothing with that for now.  */
2060
      if (GET_CODE (x) == AND || xsize < -INTVAL (XEXP (y, 1)))
2061
        ysize = -1;
2062
      return memrefs_conflict_p (xsize, x, ysize, canon_rtx (XEXP (y, 0)), c);
2063
    }
2064
 
2065
  if (CONSTANT_P (x))
2066
    {
2067
      if (CONST_INT_P (x) && CONST_INT_P (y))
2068
        {
2069
          c += (INTVAL (y) - INTVAL (x));
2070
          return (xsize <= 0 || ysize <= 0
2071
                  || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
2072
        }
2073
 
2074
      if (GET_CODE (x) == CONST)
2075
        {
2076
          if (GET_CODE (y) == CONST)
2077
            return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2078
                                       ysize, canon_rtx (XEXP (y, 0)), c);
2079
          else
2080
            return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2081
                                       ysize, y, c);
2082
        }
2083
      if (GET_CODE (y) == CONST)
2084
        return memrefs_conflict_p (xsize, x, ysize,
2085
                                   canon_rtx (XEXP (y, 0)), c);
2086
 
2087
      if (CONSTANT_P (y))
2088
        return (xsize <= 0 || ysize <= 0
2089
                || (rtx_equal_for_memref_p (x, y)
2090
                    && ((c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))));
2091
 
2092
      return -1;
2093
    }
2094
 
2095
  return -1;
2096
}
2097
 
2098
/* Functions to compute memory dependencies.
2099
 
2100
   Since we process the insns in execution order, we can build tables
2101
   to keep track of what registers are fixed (and not aliased), what registers
2102
   are varying in known ways, and what registers are varying in unknown
2103
   ways.
2104
 
2105
   If both memory references are volatile, then there must always be a
2106
   dependence between the two references, since their order can not be
2107
   changed.  A volatile and non-volatile reference can be interchanged
2108
   though.
2109
 
2110
   We also must allow AND addresses, because they may generate accesses
2111
   outside the object being referenced.  This is used to generate aligned
2112
   addresses from unaligned addresses, for instance, the alpha
2113
   storeqi_unaligned pattern.  */
2114
 
2115
/* Read dependence: X is read after read in MEM takes place.  There can
2116
   only be a dependence here if both reads are volatile.  */
2117
 
2118
int
2119
read_dependence (const_rtx mem, const_rtx x)
2120
{
2121
  return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
2122
}
2123
 
2124
/* Returns nonzero if something about the mode or address format MEM1
2125
   indicates that it might well alias *anything*.  */
2126
 
2127
static int
2128
aliases_everything_p (const_rtx mem)
2129
{
2130
  if (GET_CODE (XEXP (mem, 0)) == AND)
2131
    /* If the address is an AND, it's very hard to know at what it is
2132
       actually pointing.  */
2133
    return 1;
2134
 
2135
  return 0;
2136
}
2137
 
2138
/* Return true if we can determine that the fields referenced cannot
2139
   overlap for any pair of objects.  */
2140
 
2141
static bool
2142
nonoverlapping_component_refs_p (const_tree x, const_tree y)
2143
{
2144
  const_tree fieldx, fieldy, typex, typey, orig_y;
2145
 
2146
  if (!flag_strict_aliasing)
2147
    return false;
2148
 
2149
  do
2150
    {
2151
      /* The comparison has to be done at a common type, since we don't
2152
         know how the inheritance hierarchy works.  */
2153
      orig_y = y;
2154
      do
2155
        {
2156
          fieldx = TREE_OPERAND (x, 1);
2157
          typex = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldx));
2158
 
2159
          y = orig_y;
2160
          do
2161
            {
2162
              fieldy = TREE_OPERAND (y, 1);
2163
              typey = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldy));
2164
 
2165
              if (typex == typey)
2166
                goto found;
2167
 
2168
              y = TREE_OPERAND (y, 0);
2169
            }
2170
          while (y && TREE_CODE (y) == COMPONENT_REF);
2171
 
2172
          x = TREE_OPERAND (x, 0);
2173
        }
2174
      while (x && TREE_CODE (x) == COMPONENT_REF);
2175
      /* Never found a common type.  */
2176
      return false;
2177
 
2178
    found:
2179
      /* If we're left with accessing different fields of a structure,
2180
         then no overlap.  */
2181
      if (TREE_CODE (typex) == RECORD_TYPE
2182
          && fieldx != fieldy)
2183
        return true;
2184
 
2185
      /* The comparison on the current field failed.  If we're accessing
2186
         a very nested structure, look at the next outer level.  */
2187
      x = TREE_OPERAND (x, 0);
2188
      y = TREE_OPERAND (y, 0);
2189
    }
2190
  while (x && y
2191
         && TREE_CODE (x) == COMPONENT_REF
2192
         && TREE_CODE (y) == COMPONENT_REF);
2193
 
2194
  return false;
2195
}
2196
 
2197
/* Look at the bottom of the COMPONENT_REF list for a DECL, and return it.  */
2198
 
2199
static tree
2200
decl_for_component_ref (tree x)
2201
{
2202
  do
2203
    {
2204
      x = TREE_OPERAND (x, 0);
2205
    }
2206
  while (x && TREE_CODE (x) == COMPONENT_REF);
2207
 
2208
  return x && DECL_P (x) ? x : NULL_TREE;
2209
}
2210
 
2211
/* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate
2212
   for the offset of the field reference.  *KNOWN_P says whether the
2213
   offset is known.  */
2214
 
2215
static void
2216
adjust_offset_for_component_ref (tree x, bool *known_p,
2217
                                 HOST_WIDE_INT *offset)
2218
{
2219
  if (!*known_p)
2220
    return;
2221
  do
2222
    {
2223
      tree xoffset = component_ref_field_offset (x);
2224
      tree field = TREE_OPERAND (x, 1);
2225
 
2226
      if (! host_integerp (xoffset, 1))
2227
        {
2228
          *known_p = false;
2229
          return;
2230
        }
2231
      *offset += (tree_low_cst (xoffset, 1)
2232
                  + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
2233
                     / BITS_PER_UNIT));
2234
 
2235
      x = TREE_OPERAND (x, 0);
2236
    }
2237
  while (x && TREE_CODE (x) == COMPONENT_REF);
2238
}
2239
 
2240
/* Return nonzero if we can determine the exprs corresponding to memrefs
2241
   X and Y and they do not overlap.
2242
   If LOOP_VARIANT is set, skip offset-based disambiguation */
2243
 
2244
int
2245
nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant)
2246
{
2247
  tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2248
  rtx rtlx, rtly;
2249
  rtx basex, basey;
2250
  bool moffsetx_known_p, moffsety_known_p;
2251
  HOST_WIDE_INT moffsetx = 0, moffsety = 0;
2252
  HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
2253
 
2254
  /* Unless both have exprs, we can't tell anything.  */
2255
  if (exprx == 0 || expry == 0)
2256
    return 0;
2257
 
2258
  /* For spill-slot accesses make sure we have valid offsets.  */
2259
  if ((exprx == get_spill_slot_decl (false)
2260
       && ! MEM_OFFSET_KNOWN_P (x))
2261
      || (expry == get_spill_slot_decl (false)
2262
          && ! MEM_OFFSET_KNOWN_P (y)))
2263
    return 0;
2264
 
2265
  /* If both are field references, we may be able to determine something.  */
2266
  if (TREE_CODE (exprx) == COMPONENT_REF
2267
      && TREE_CODE (expry) == COMPONENT_REF
2268
      && nonoverlapping_component_refs_p (exprx, expry))
2269
    return 1;
2270
 
2271
 
2272
  /* If the field reference test failed, look at the DECLs involved.  */
2273
  moffsetx_known_p = MEM_OFFSET_KNOWN_P (x);
2274
  if (moffsetx_known_p)
2275
    moffsetx = MEM_OFFSET (x);
2276
  if (TREE_CODE (exprx) == COMPONENT_REF)
2277
    {
2278
      tree t = decl_for_component_ref (exprx);
2279
      if (! t)
2280
        return 0;
2281
      adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx);
2282
      exprx = t;
2283
    }
2284
 
2285
  moffsety_known_p = MEM_OFFSET_KNOWN_P (y);
2286
  if (moffsety_known_p)
2287
    moffsety = MEM_OFFSET (y);
2288
  if (TREE_CODE (expry) == COMPONENT_REF)
2289
    {
2290
      tree t = decl_for_component_ref (expry);
2291
      if (! t)
2292
        return 0;
2293
      adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety);
2294
      expry = t;
2295
    }
2296
 
2297
  if (! DECL_P (exprx) || ! DECL_P (expry))
2298
    return 0;
2299
 
2300
  /* With invalid code we can end up storing into the constant pool.
2301
     Bail out to avoid ICEing when creating RTL for this.
2302
     See gfortran.dg/lto/20091028-2_0.f90.  */
2303
  if (TREE_CODE (exprx) == CONST_DECL
2304
      || TREE_CODE (expry) == CONST_DECL)
2305
    return 1;
2306
 
2307
  rtlx = DECL_RTL (exprx);
2308
  rtly = DECL_RTL (expry);
2309
 
2310
  /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2311
     can't overlap unless they are the same because we never reuse that part
2312
     of the stack frame used for locals for spilled pseudos.  */
2313
  if ((!MEM_P (rtlx) || !MEM_P (rtly))
2314
      && ! rtx_equal_p (rtlx, rtly))
2315
    return 1;
2316
 
2317
  /* If we have MEMs refering to different address spaces (which can
2318
     potentially overlap), we cannot easily tell from the addresses
2319
     whether the references overlap.  */
2320
  if (MEM_P (rtlx) && MEM_P (rtly)
2321
      && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2322
    return 0;
2323
 
2324
  /* Get the base and offsets of both decls.  If either is a register, we
2325
     know both are and are the same, so use that as the base.  The only
2326
     we can avoid overlap is if we can deduce that they are nonoverlapping
2327
     pieces of that decl, which is very rare.  */
2328
  basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2329
  if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
2330
    offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2331
 
2332
  basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2333
  if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
2334
    offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2335
 
2336
  /* If the bases are different, we know they do not overlap if both
2337
     are constants or if one is a constant and the other a pointer into the
2338
     stack frame.  Otherwise a different base means we can't tell if they
2339
     overlap or not.  */
2340
  if (! rtx_equal_p (basex, basey))
2341
    return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2342
            || (CONSTANT_P (basex) && REG_P (basey)
2343
                && REGNO_PTR_FRAME_P (REGNO (basey)))
2344
            || (CONSTANT_P (basey) && REG_P (basex)
2345
                && REGNO_PTR_FRAME_P (REGNO (basex))));
2346
 
2347
  /* Offset based disambiguation not appropriate for loop invariant */
2348
  if (loop_invariant)
2349
    return 0;
2350
 
2351
  sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2352
           : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx)
2353
           : -1);
2354
  sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2355
           : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly)
2356
           : -1);
2357
 
2358
  /* If we have an offset for either memref, it can update the values computed
2359
     above.  */
2360
  if (moffsetx_known_p)
2361
    offsetx += moffsetx, sizex -= moffsetx;
2362
  if (moffsety_known_p)
2363
    offsety += moffsety, sizey -= moffsety;
2364
 
2365
  /* If a memref has both a size and an offset, we can use the smaller size.
2366
     We can't do this if the offset isn't known because we must view this
2367
     memref as being anywhere inside the DECL's MEM.  */
2368
  if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p)
2369
    sizex = MEM_SIZE (x);
2370
  if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p)
2371
    sizey = MEM_SIZE (y);
2372
 
2373
  /* Put the values of the memref with the lower offset in X's values.  */
2374
  if (offsetx > offsety)
2375
    {
2376
      tem = offsetx, offsetx = offsety, offsety = tem;
2377
      tem = sizex, sizex = sizey, sizey = tem;
2378
    }
2379
 
2380
  /* If we don't know the size of the lower-offset value, we can't tell
2381
     if they conflict.  Otherwise, we do the test.  */
2382
  return sizex >= 0 && offsety >= offsetx + sizex;
2383
}
2384
 
2385
/* Helper for true_dependence and canon_true_dependence.
2386
   Checks for true dependence: X is read after store in MEM takes place.
2387
 
2388
   If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be
2389
   NULL_RTX, and the canonical addresses of MEM and X are both computed
2390
   here.  If MEM_CANONICALIZED, then MEM must be already canonicalized.
2391
 
2392
   If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0).
2393
 
2394
   Returns 1 if there is a true dependence, 0 otherwise.  */
2395
 
2396
static int
2397
true_dependence_1 (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2398
                   const_rtx x, rtx x_addr, bool mem_canonicalized)
2399
{
2400
  rtx base;
2401
  int ret;
2402
 
2403
  gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX)
2404
                       : (mem_addr == NULL_RTX && x_addr == NULL_RTX));
2405
 
2406
  if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2407
    return 1;
2408
 
2409
  /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2410
     This is used in epilogue deallocation functions, and in cselib.  */
2411
  if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2412
    return 1;
2413
  if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2414
    return 1;
2415
  if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2416
      || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2417
    return 1;
2418
 
2419
  /* Read-only memory is by definition never modified, and therefore can't
2420
     conflict with anything.  We don't expect to find read-only set on MEM,
2421
     but stupid user tricks can produce them, so don't die.  */
2422
  if (MEM_READONLY_P (x))
2423
    return 0;
2424
 
2425
  /* If we have MEMs refering to different address spaces (which can
2426
     potentially overlap), we cannot easily tell from the addresses
2427
     whether the references overlap.  */
2428
  if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2429
    return 1;
2430
 
2431
  if (! mem_addr)
2432
    {
2433
      mem_addr = XEXP (mem, 0);
2434
      if (mem_mode == VOIDmode)
2435
        mem_mode = GET_MODE (mem);
2436
    }
2437
 
2438
  if (! x_addr)
2439
    {
2440
      x_addr = XEXP (x, 0);
2441
      if (!((GET_CODE (x_addr) == VALUE
2442
             && GET_CODE (mem_addr) != VALUE
2443
             && reg_mentioned_p (x_addr, mem_addr))
2444
            || (GET_CODE (x_addr) != VALUE
2445
                && GET_CODE (mem_addr) == VALUE
2446
                && reg_mentioned_p (mem_addr, x_addr))))
2447
        {
2448
          x_addr = get_addr (x_addr);
2449
          if (! mem_canonicalized)
2450
            mem_addr = get_addr (mem_addr);
2451
        }
2452
    }
2453
 
2454
  base = find_base_term (x_addr);
2455
  if (base && (GET_CODE (base) == LABEL_REF
2456
               || (GET_CODE (base) == SYMBOL_REF
2457
                   && CONSTANT_POOL_ADDRESS_P (base))))
2458
    return 0;
2459
 
2460
  if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
2461
    return 0;
2462
 
2463
  x_addr = canon_rtx (x_addr);
2464
  if (!mem_canonicalized)
2465
    mem_addr = canon_rtx (mem_addr);
2466
 
2467
  if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2468
                                 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2469
    return ret;
2470
 
2471
  if (DIFFERENT_ALIAS_SETS_P (x, mem))
2472
    return 0;
2473
 
2474
  if (nonoverlapping_memrefs_p (mem, x, false))
2475
    return 0;
2476
 
2477
  if (aliases_everything_p (x))
2478
    return 1;
2479
 
2480
  /* We cannot use aliases_everything_p to test MEM, since we must look
2481
     at MEM_ADDR, rather than XEXP (mem, 0).  */
2482
  if (GET_CODE (mem_addr) == AND)
2483
    return 1;
2484
 
2485
  /* ??? In true_dependence we also allow BLKmode to alias anything.  Why
2486
     don't we do this in anti_dependence and output_dependence?  */
2487
  if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
2488
    return 1;
2489
 
2490
  return rtx_refs_may_alias_p (x, mem, true);
2491
}
2492
 
2493
/* True dependence: X is read after store in MEM takes place.  */
2494
 
2495
int
2496
true_dependence (const_rtx mem, enum machine_mode mem_mode, const_rtx x)
2497
{
2498
  return true_dependence_1 (mem, mem_mode, NULL_RTX,
2499
                            x, NULL_RTX, /*mem_canonicalized=*/false);
2500
}
2501
 
2502
/* Canonical true dependence: X is read after store in MEM takes place.
2503
   Variant of true_dependence which assumes MEM has already been
2504
   canonicalized (hence we no longer do that here).
2505
   The mem_addr argument has been added, since true_dependence_1 computed
2506
   this value prior to canonicalizing.  */
2507
 
2508
int
2509
canon_true_dependence (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2510
                       const_rtx x, rtx x_addr)
2511
{
2512
  return true_dependence_1 (mem, mem_mode, mem_addr,
2513
                            x, x_addr, /*mem_canonicalized=*/true);
2514
}
2515
 
2516
/* Returns nonzero if a write to X might alias a previous read from
2517
   (or, if WRITEP is nonzero, a write to) MEM.  */
2518
 
2519
static int
2520
write_dependence_p (const_rtx mem, const_rtx x, int writep)
2521
{
2522
  rtx x_addr, mem_addr;
2523
  rtx base;
2524
  int ret;
2525
 
2526
  if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2527
    return 1;
2528
 
2529
  /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2530
     This is used in epilogue deallocation functions.  */
2531
  if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2532
    return 1;
2533
  if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2534
    return 1;
2535
  if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2536
      || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2537
    return 1;
2538
 
2539
  /* A read from read-only memory can't conflict with read-write memory.  */
2540
  if (!writep && MEM_READONLY_P (mem))
2541
    return 0;
2542
 
2543
  /* If we have MEMs refering to different address spaces (which can
2544
     potentially overlap), we cannot easily tell from the addresses
2545
     whether the references overlap.  */
2546
  if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2547
    return 1;
2548
 
2549
  x_addr = XEXP (x, 0);
2550
  mem_addr = XEXP (mem, 0);
2551
  if (!((GET_CODE (x_addr) == VALUE
2552
         && GET_CODE (mem_addr) != VALUE
2553
         && reg_mentioned_p (x_addr, mem_addr))
2554
        || (GET_CODE (x_addr) != VALUE
2555
            && GET_CODE (mem_addr) == VALUE
2556
            && reg_mentioned_p (mem_addr, x_addr))))
2557
    {
2558
      x_addr = get_addr (x_addr);
2559
      mem_addr = get_addr (mem_addr);
2560
    }
2561
 
2562
  if (! writep)
2563
    {
2564
      base = find_base_term (mem_addr);
2565
      if (base && (GET_CODE (base) == LABEL_REF
2566
                   || (GET_CODE (base) == SYMBOL_REF
2567
                       && CONSTANT_POOL_ADDRESS_P (base))))
2568
        return 0;
2569
    }
2570
 
2571
  if (! base_alias_check (x_addr, mem_addr, GET_MODE (x),
2572
                          GET_MODE (mem)))
2573
    return 0;
2574
 
2575
  x_addr = canon_rtx (x_addr);
2576
  mem_addr = canon_rtx (mem_addr);
2577
 
2578
  if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
2579
                                 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2580
    return ret;
2581
 
2582
  if (nonoverlapping_memrefs_p (x, mem, false))
2583
    return 0;
2584
 
2585
  return rtx_refs_may_alias_p (x, mem, false);
2586
}
2587
 
2588
/* Anti dependence: X is written after read in MEM takes place.  */
2589
 
2590
int
2591
anti_dependence (const_rtx mem, const_rtx x)
2592
{
2593
  return write_dependence_p (mem, x, /*writep=*/0);
2594
}
2595
 
2596
/* Output dependence: X is written after store in MEM takes place.  */
2597
 
2598
int
2599
output_dependence (const_rtx mem, const_rtx x)
2600
{
2601
  return write_dependence_p (mem, x, /*writep=*/1);
2602
}
2603
 
2604
 
2605
 
2606
/* Check whether X may be aliased with MEM.  Don't do offset-based
2607
  memory disambiguation & TBAA.  */
2608
int
2609
may_alias_p (const_rtx mem, const_rtx x)
2610
{
2611
  rtx x_addr, mem_addr;
2612
 
2613
  if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2614
    return 1;
2615
 
2616
  /* ??? In true_dependence we also allow BLKmode to alias anything. */
2617
  if (GET_MODE (mem) == BLKmode || GET_MODE (x) == BLKmode)
2618
    return 1;
2619
 
2620
  if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2621
      || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2622
    return 1;
2623
 
2624
  /* Read-only memory is by definition never modified, and therefore can't
2625
     conflict with anything.  We don't expect to find read-only set on MEM,
2626
     but stupid user tricks can produce them, so don't die.  */
2627
  if (MEM_READONLY_P (x))
2628
    return 0;
2629
 
2630
  /* If we have MEMs refering to different address spaces (which can
2631
     potentially overlap), we cannot easily tell from the addresses
2632
     whether the references overlap.  */
2633
  if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2634
    return 1;
2635
 
2636
  x_addr = XEXP (x, 0);
2637
  mem_addr = XEXP (mem, 0);
2638
  if (!((GET_CODE (x_addr) == VALUE
2639
         && GET_CODE (mem_addr) != VALUE
2640
         && reg_mentioned_p (x_addr, mem_addr))
2641
        || (GET_CODE (x_addr) != VALUE
2642
            && GET_CODE (mem_addr) == VALUE
2643
            && reg_mentioned_p (mem_addr, x_addr))))
2644
    {
2645
      x_addr = get_addr (x_addr);
2646
      mem_addr = get_addr (mem_addr);
2647
    }
2648
 
2649
  if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), GET_MODE (mem_addr)))
2650
    return 0;
2651
 
2652
  x_addr = canon_rtx (x_addr);
2653
  mem_addr = canon_rtx (mem_addr);
2654
 
2655
  if (nonoverlapping_memrefs_p (mem, x, true))
2656
    return 0;
2657
 
2658
  if (aliases_everything_p (x))
2659
    return 1;
2660
 
2661
  /* We cannot use aliases_everything_p to test MEM, since we must look
2662
     at MEM_ADDR, rather than XEXP (mem, 0).  */
2663
  if (GET_CODE (mem_addr) == AND)
2664
    return 1;
2665
 
2666
  /* TBAA not valid for loop_invarint */
2667
  return rtx_refs_may_alias_p (x, mem, false);
2668
}
2669
 
2670
void
2671
init_alias_target (void)
2672
{
2673
  int i;
2674
 
2675
  memset (static_reg_base_value, 0, sizeof static_reg_base_value);
2676
 
2677
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2678
    /* Check whether this register can hold an incoming pointer
2679
       argument.  FUNCTION_ARG_REGNO_P tests outgoing register
2680
       numbers, so translate if necessary due to register windows.  */
2681
    if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2682
        && HARD_REGNO_MODE_OK (i, Pmode))
2683
      static_reg_base_value[i]
2684
        = gen_rtx_ADDRESS (VOIDmode, gen_rtx_REG (Pmode, i));
2685
 
2686
  static_reg_base_value[STACK_POINTER_REGNUM]
2687
    = gen_rtx_ADDRESS (Pmode, stack_pointer_rtx);
2688
  static_reg_base_value[ARG_POINTER_REGNUM]
2689
    = gen_rtx_ADDRESS (Pmode, arg_pointer_rtx);
2690
  static_reg_base_value[FRAME_POINTER_REGNUM]
2691
    = gen_rtx_ADDRESS (Pmode, frame_pointer_rtx);
2692
#if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2693
  static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
2694
    = gen_rtx_ADDRESS (Pmode, hard_frame_pointer_rtx);
2695
#endif
2696
}
2697
 
2698
/* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
2699
   to be memory reference.  */
2700
static bool memory_modified;
2701
static void
2702
memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
2703
{
2704
  if (MEM_P (x))
2705
    {
2706
      if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
2707
        memory_modified = true;
2708
    }
2709
}
2710
 
2711
 
2712
/* Return true when INSN possibly modify memory contents of MEM
2713
   (i.e. address can be modified).  */
2714
bool
2715
memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
2716
{
2717
  if (!INSN_P (insn))
2718
    return false;
2719
  memory_modified = false;
2720
  note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
2721
  return memory_modified;
2722
}
2723
 
2724
/* Initialize the aliasing machinery.  Initialize the REG_KNOWN_VALUE
2725
   array.  */
2726
 
2727
void
2728
init_alias_analysis (void)
2729
{
2730
  unsigned int maxreg = max_reg_num ();
2731
  int changed, pass;
2732
  int i;
2733
  unsigned int ui;
2734
  rtx insn;
2735
 
2736
  timevar_push (TV_ALIAS_ANALYSIS);
2737
 
2738
  reg_known_value_size = maxreg - FIRST_PSEUDO_REGISTER;
2739
  reg_known_value = ggc_alloc_cleared_vec_rtx (reg_known_value_size);
2740
  reg_known_equiv_p = XCNEWVEC (bool, reg_known_value_size);
2741
 
2742
  /* If we have memory allocated from the previous run, use it.  */
2743
  if (old_reg_base_value)
2744
    reg_base_value = old_reg_base_value;
2745
 
2746
  if (reg_base_value)
2747
    VEC_truncate (rtx, reg_base_value, 0);
2748
 
2749
  VEC_safe_grow_cleared (rtx, gc, reg_base_value, maxreg);
2750
 
2751
  new_reg_base_value = XNEWVEC (rtx, maxreg);
2752
  reg_seen = XNEWVEC (char, maxreg);
2753
 
2754
  /* The basic idea is that each pass through this loop will use the
2755
     "constant" information from the previous pass to propagate alias
2756
     information through another level of assignments.
2757
 
2758
     This could get expensive if the assignment chains are long.  Maybe
2759
     we should throttle the number of iterations, possibly based on
2760
     the optimization level or flag_expensive_optimizations.
2761
 
2762
     We could propagate more information in the first pass by making use
2763
     of DF_REG_DEF_COUNT to determine immediately that the alias information
2764
     for a pseudo is "constant".
2765
 
2766
     A program with an uninitialized variable can cause an infinite loop
2767
     here.  Instead of doing a full dataflow analysis to detect such problems
2768
     we just cap the number of iterations for the loop.
2769
 
2770
     The state of the arrays for the set chain in question does not matter
2771
     since the program has undefined behavior.  */
2772
 
2773
  pass = 0;
2774
  do
2775
    {
2776
      /* Assume nothing will change this iteration of the loop.  */
2777
      changed = 0;
2778
 
2779
      /* We want to assign the same IDs each iteration of this loop, so
2780
         start counting from zero each iteration of the loop.  */
2781
      unique_id = 0;
2782
 
2783
      /* We're at the start of the function each iteration through the
2784
         loop, so we're copying arguments.  */
2785
      copying_arguments = true;
2786
 
2787
      /* Wipe the potential alias information clean for this pass.  */
2788
      memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
2789
 
2790
      /* Wipe the reg_seen array clean.  */
2791
      memset (reg_seen, 0, maxreg);
2792
 
2793
      /* Mark all hard registers which may contain an address.
2794
         The stack, frame and argument pointers may contain an address.
2795
         An argument register which can hold a Pmode value may contain
2796
         an address even if it is not in BASE_REGS.
2797
 
2798
         The address expression is VOIDmode for an argument and
2799
         Pmode for other registers.  */
2800
 
2801
      memcpy (new_reg_base_value, static_reg_base_value,
2802
              FIRST_PSEUDO_REGISTER * sizeof (rtx));
2803
 
2804
      /* Walk the insns adding values to the new_reg_base_value array.  */
2805
      for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2806
        {
2807
          if (INSN_P (insn))
2808
            {
2809
              rtx note, set;
2810
 
2811
#if defined (HAVE_prologue) || defined (HAVE_epilogue)
2812
              /* The prologue/epilogue insns are not threaded onto the
2813
                 insn chain until after reload has completed.  Thus,
2814
                 there is no sense wasting time checking if INSN is in
2815
                 the prologue/epilogue until after reload has completed.  */
2816
              if (reload_completed
2817
                  && prologue_epilogue_contains (insn))
2818
                continue;
2819
#endif
2820
 
2821
              /* If this insn has a noalias note, process it,  Otherwise,
2822
                 scan for sets.  A simple set will have no side effects
2823
                 which could change the base value of any other register.  */
2824
 
2825
              if (GET_CODE (PATTERN (insn)) == SET
2826
                  && REG_NOTES (insn) != 0
2827
                  && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
2828
                record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
2829
              else
2830
                note_stores (PATTERN (insn), record_set, NULL);
2831
 
2832
              set = single_set (insn);
2833
 
2834
              if (set != 0
2835
                  && REG_P (SET_DEST (set))
2836
                  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
2837
                {
2838
                  unsigned int regno = REGNO (SET_DEST (set));
2839
                  rtx src = SET_SRC (set);
2840
                  rtx t;
2841
 
2842
                  note = find_reg_equal_equiv_note (insn);
2843
                  if (note && REG_NOTE_KIND (note) == REG_EQUAL
2844
                      && DF_REG_DEF_COUNT (regno) != 1)
2845
                    note = NULL_RTX;
2846
 
2847
                  if (note != NULL_RTX
2848
                      && GET_CODE (XEXP (note, 0)) != EXPR_LIST
2849
                      && ! rtx_varies_p (XEXP (note, 0), 1)
2850
                      && ! reg_overlap_mentioned_p (SET_DEST (set),
2851
                                                    XEXP (note, 0)))
2852
                    {
2853
                      set_reg_known_value (regno, XEXP (note, 0));
2854
                      set_reg_known_equiv_p (regno,
2855
                        REG_NOTE_KIND (note) == REG_EQUIV);
2856
                    }
2857
                  else if (DF_REG_DEF_COUNT (regno) == 1
2858
                           && GET_CODE (src) == PLUS
2859
                           && REG_P (XEXP (src, 0))
2860
                           && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
2861
                           && CONST_INT_P (XEXP (src, 1)))
2862
                    {
2863
                      t = plus_constant (t, INTVAL (XEXP (src, 1)));
2864
                      set_reg_known_value (regno, t);
2865
                      set_reg_known_equiv_p (regno, 0);
2866
                    }
2867
                  else if (DF_REG_DEF_COUNT (regno) == 1
2868
                           && ! rtx_varies_p (src, 1))
2869
                    {
2870
                      set_reg_known_value (regno, src);
2871
                      set_reg_known_equiv_p (regno, 0);
2872
                    }
2873
                }
2874
            }
2875
          else if (NOTE_P (insn)
2876
                   && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
2877
            copying_arguments = false;
2878
        }
2879
 
2880
      /* Now propagate values from new_reg_base_value to reg_base_value.  */
2881
      gcc_assert (maxreg == (unsigned int) max_reg_num ());
2882
 
2883
      for (ui = 0; ui < maxreg; ui++)
2884
        {
2885
          if (new_reg_base_value[ui]
2886
              && new_reg_base_value[ui] != VEC_index (rtx, reg_base_value, ui)
2887
              && ! rtx_equal_p (new_reg_base_value[ui],
2888
                                VEC_index (rtx, reg_base_value, ui)))
2889
            {
2890
              VEC_replace (rtx, reg_base_value, ui, new_reg_base_value[ui]);
2891
              changed = 1;
2892
            }
2893
        }
2894
    }
2895
  while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
2896
 
2897
  /* Fill in the remaining entries.  */
2898
  for (i = 0; i < (int)reg_known_value_size; i++)
2899
    if (reg_known_value[i] == 0)
2900
      reg_known_value[i] = regno_reg_rtx[i + FIRST_PSEUDO_REGISTER];
2901
 
2902
  /* Clean up.  */
2903
  free (new_reg_base_value);
2904
  new_reg_base_value = 0;
2905
  free (reg_seen);
2906
  reg_seen = 0;
2907
  timevar_pop (TV_ALIAS_ANALYSIS);
2908
}
2909
 
2910
/* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2).
2911
   Special API for var-tracking pass purposes.  */
2912
 
2913
void
2914
vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2)
2915
{
2916
  VEC_replace (rtx, reg_base_value, REGNO (reg1), REG_BASE_VALUE (reg2));
2917
}
2918
 
2919
void
2920
end_alias_analysis (void)
2921
{
2922
  old_reg_base_value = reg_base_value;
2923
  ggc_free (reg_known_value);
2924
  reg_known_value = 0;
2925
  reg_known_value_size = 0;
2926
  free (reg_known_equiv_p);
2927
  reg_known_equiv_p = 0;
2928
}
2929
 
2930
#include "gt-alias.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.