| 1 |
280 |
jeremybenn |
/* Alias analysis for GNU C
|
| 2 |
|
|
Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
|
| 3 |
|
|
2007, 2008, 2009, 2010 Free Software Foundation, Inc.
|
| 4 |
|
|
Contributed by John Carr (jfc@mit.edu).
|
| 5 |
|
|
|
| 6 |
|
|
This file is part of GCC.
|
| 7 |
|
|
|
| 8 |
|
|
GCC is free software; you can redistribute it and/or modify it under
|
| 9 |
|
|
the terms of the GNU General Public License as published by the Free
|
| 10 |
|
|
Software Foundation; either version 3, or (at your option) any later
|
| 11 |
|
|
version.
|
| 12 |
|
|
|
| 13 |
|
|
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
| 14 |
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
| 15 |
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
| 16 |
|
|
for more details.
|
| 17 |
|
|
|
| 18 |
|
|
You should have received a copy of the GNU General Public License
|
| 19 |
|
|
along with GCC; see the file COPYING3. If not see
|
| 20 |
|
|
<http://www.gnu.org/licenses/>. */
|
| 21 |
|
|
|
| 22 |
|
|
#include "config.h"
|
| 23 |
|
|
#include "system.h"
|
| 24 |
|
|
#include "coretypes.h"
|
| 25 |
|
|
#include "tm.h"
|
| 26 |
|
|
#include "rtl.h"
|
| 27 |
|
|
#include "tree.h"
|
| 28 |
|
|
#include "tm_p.h"
|
| 29 |
|
|
#include "function.h"
|
| 30 |
|
|
#include "alias.h"
|
| 31 |
|
|
#include "emit-rtl.h"
|
| 32 |
|
|
#include "regs.h"
|
| 33 |
|
|
#include "hard-reg-set.h"
|
| 34 |
|
|
#include "basic-block.h"
|
| 35 |
|
|
#include "flags.h"
|
| 36 |
|
|
#include "output.h"
|
| 37 |
|
|
#include "toplev.h"
|
| 38 |
|
|
#include "cselib.h"
|
| 39 |
|
|
#include "splay-tree.h"
|
| 40 |
|
|
#include "ggc.h"
|
| 41 |
|
|
#include "langhooks.h"
|
| 42 |
|
|
#include "timevar.h"
|
| 43 |
|
|
#include "target.h"
|
| 44 |
|
|
#include "cgraph.h"
|
| 45 |
|
|
#include "varray.h"
|
| 46 |
|
|
#include "tree-pass.h"
|
| 47 |
|
|
#include "ipa-type-escape.h"
|
| 48 |
|
|
#include "df.h"
|
| 49 |
|
|
#include "tree-ssa-alias.h"
|
| 50 |
|
|
#include "pointer-set.h"
|
| 51 |
|
|
#include "tree-flow.h"
|
| 52 |
|
|
|
| 53 |
|
|
/* The aliasing API provided here solves related but different problems:
|
| 54 |
|
|
|
| 55 |
|
|
Say there exists (in c)
|
| 56 |
|
|
|
| 57 |
|
|
struct X {
|
| 58 |
|
|
struct Y y1;
|
| 59 |
|
|
struct Z z2;
|
| 60 |
|
|
} x1, *px1, *px2;
|
| 61 |
|
|
|
| 62 |
|
|
struct Y y2, *py;
|
| 63 |
|
|
struct Z z2, *pz;
|
| 64 |
|
|
|
| 65 |
|
|
|
| 66 |
|
|
py = &px1.y1;
|
| 67 |
|
|
px2 = &x1;
|
| 68 |
|
|
|
| 69 |
|
|
Consider the four questions:
|
| 70 |
|
|
|
| 71 |
|
|
Can a store to x1 interfere with px2->y1?
|
| 72 |
|
|
Can a store to x1 interfere with px2->z2?
|
| 73 |
|
|
(*px2).z2
|
| 74 |
|
|
Can a store to x1 change the value pointed to by with py?
|
| 75 |
|
|
Can a store to x1 change the value pointed to by with pz?
|
| 76 |
|
|
|
| 77 |
|
|
The answer to these questions can be yes, yes, yes, and maybe.
|
| 78 |
|
|
|
| 79 |
|
|
The first two questions can be answered with a simple examination
|
| 80 |
|
|
of the type system. If structure X contains a field of type Y then
|
| 81 |
|
|
a store thru a pointer to an X can overwrite any field that is
|
| 82 |
|
|
contained (recursively) in an X (unless we know that px1 != px2).
|
| 83 |
|
|
|
| 84 |
|
|
The last two of the questions can be solved in the same way as the
|
| 85 |
|
|
first two questions but this is too conservative. The observation
|
| 86 |
|
|
is that in some cases analysis we can know if which (if any) fields
|
| 87 |
|
|
are addressed and if those addresses are used in bad ways. This
|
| 88 |
|
|
analysis may be language specific. In C, arbitrary operations may
|
| 89 |
|
|
be applied to pointers. However, there is some indication that
|
| 90 |
|
|
this may be too conservative for some C++ types.
|
| 91 |
|
|
|
| 92 |
|
|
The pass ipa-type-escape does this analysis for the types whose
|
| 93 |
|
|
instances do not escape across the compilation boundary.
|
| 94 |
|
|
|
| 95 |
|
|
Historically in GCC, these two problems were combined and a single
|
| 96 |
|
|
data structure was used to represent the solution to these
|
| 97 |
|
|
problems. We now have two similar but different data structures,
|
| 98 |
|
|
The data structure to solve the last two question is similar to the
|
| 99 |
|
|
first, but does not contain have the fields in it whose address are
|
| 100 |
|
|
never taken. For types that do escape the compilation unit, the
|
| 101 |
|
|
data structures will have identical information.
|
| 102 |
|
|
*/
|
| 103 |
|
|
|
| 104 |
|
|
/* The alias sets assigned to MEMs assist the back-end in determining
|
| 105 |
|
|
which MEMs can alias which other MEMs. In general, two MEMs in
|
| 106 |
|
|
different alias sets cannot alias each other, with one important
|
| 107 |
|
|
exception. Consider something like:
|
| 108 |
|
|
|
| 109 |
|
|
struct S { int i; double d; };
|
| 110 |
|
|
|
| 111 |
|
|
a store to an `S' can alias something of either type `int' or type
|
| 112 |
|
|
`double'. (However, a store to an `int' cannot alias a `double'
|
| 113 |
|
|
and vice versa.) We indicate this via a tree structure that looks
|
| 114 |
|
|
like:
|
| 115 |
|
|
struct S
|
| 116 |
|
|
/ \
|
| 117 |
|
|
/ \
|
| 118 |
|
|
|/_ _\|
|
| 119 |
|
|
int double
|
| 120 |
|
|
|
| 121 |
|
|
(The arrows are directed and point downwards.)
|
| 122 |
|
|
In this situation we say the alias set for `struct S' is the
|
| 123 |
|
|
`superset' and that those for `int' and `double' are `subsets'.
|
| 124 |
|
|
|
| 125 |
|
|
To see whether two alias sets can point to the same memory, we must
|
| 126 |
|
|
see if either alias set is a subset of the other. We need not trace
|
| 127 |
|
|
past immediate descendants, however, since we propagate all
|
| 128 |
|
|
grandchildren up one level.
|
| 129 |
|
|
|
| 130 |
|
|
Alias set zero is implicitly a superset of all other alias sets.
|
| 131 |
|
|
However, this is no actual entry for alias set zero. It is an
|
| 132 |
|
|
error to attempt to explicitly construct a subset of zero. */
|
| 133 |
|
|
|
| 134 |
|
|
struct GTY(()) alias_set_entry_d {
|
| 135 |
|
|
/* The alias set number, as stored in MEM_ALIAS_SET. */
|
| 136 |
|
|
alias_set_type alias_set;
|
| 137 |
|
|
|
| 138 |
|
|
/* Nonzero if would have a child of zero: this effectively makes this
|
| 139 |
|
|
alias set the same as alias set zero. */
|
| 140 |
|
|
int has_zero_child;
|
| 141 |
|
|
|
| 142 |
|
|
/* The children of the alias set. These are not just the immediate
|
| 143 |
|
|
children, but, in fact, all descendants. So, if we have:
|
| 144 |
|
|
|
| 145 |
|
|
struct T { struct S s; float f; }
|
| 146 |
|
|
|
| 147 |
|
|
continuing our example above, the children here will be all of
|
| 148 |
|
|
`int', `double', `float', and `struct S'. */
|
| 149 |
|
|
splay_tree GTY((param1_is (int), param2_is (int))) children;
|
| 150 |
|
|
};
|
| 151 |
|
|
typedef struct alias_set_entry_d *alias_set_entry;
|
| 152 |
|
|
|
| 153 |
|
|
static int rtx_equal_for_memref_p (const_rtx, const_rtx);
|
| 154 |
|
|
static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
|
| 155 |
|
|
static void record_set (rtx, const_rtx, void *);
|
| 156 |
|
|
static int base_alias_check (rtx, rtx, enum machine_mode,
|
| 157 |
|
|
enum machine_mode);
|
| 158 |
|
|
static rtx find_base_value (rtx);
|
| 159 |
|
|
static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
|
| 160 |
|
|
static int insert_subset_children (splay_tree_node, void*);
|
| 161 |
|
|
static alias_set_entry get_alias_set_entry (alias_set_type);
|
| 162 |
|
|
static const_rtx fixed_scalar_and_varying_struct_p (const_rtx, const_rtx, rtx, rtx,
|
| 163 |
|
|
bool (*) (const_rtx, bool));
|
| 164 |
|
|
static int aliases_everything_p (const_rtx);
|
| 165 |
|
|
static bool nonoverlapping_component_refs_p (const_tree, const_tree);
|
| 166 |
|
|
static tree decl_for_component_ref (tree);
|
| 167 |
|
|
static rtx adjust_offset_for_component_ref (tree, rtx);
|
| 168 |
|
|
static int write_dependence_p (const_rtx, const_rtx, int);
|
| 169 |
|
|
|
| 170 |
|
|
static void memory_modified_1 (rtx, const_rtx, void *);
|
| 171 |
|
|
|
| 172 |
|
|
/* Set up all info needed to perform alias analysis on memory references. */
|
| 173 |
|
|
|
| 174 |
|
|
/* Returns the size in bytes of the mode of X. */
|
| 175 |
|
|
#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
|
| 176 |
|
|
|
| 177 |
|
|
/* Returns nonzero if MEM1 and MEM2 do not alias because they are in
|
| 178 |
|
|
different alias sets. We ignore alias sets in functions making use
|
| 179 |
|
|
of variable arguments because the va_arg macros on some systems are
|
| 180 |
|
|
not legal ANSI C. */
|
| 181 |
|
|
#define DIFFERENT_ALIAS_SETS_P(MEM1, MEM2) \
|
| 182 |
|
|
mems_in_disjoint_alias_sets_p (MEM1, MEM2)
|
| 183 |
|
|
|
| 184 |
|
|
/* Cap the number of passes we make over the insns propagating alias
|
| 185 |
|
|
information through set chains. 10 is a completely arbitrary choice. */
|
| 186 |
|
|
#define MAX_ALIAS_LOOP_PASSES 10
|
| 187 |
|
|
|
| 188 |
|
|
/* reg_base_value[N] gives an address to which register N is related.
|
| 189 |
|
|
If all sets after the first add or subtract to the current value
|
| 190 |
|
|
or otherwise modify it so it does not point to a different top level
|
| 191 |
|
|
object, reg_base_value[N] is equal to the address part of the source
|
| 192 |
|
|
of the first set.
|
| 193 |
|
|
|
| 194 |
|
|
A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS
|
| 195 |
|
|
expressions represent certain special values: function arguments and
|
| 196 |
|
|
the stack, frame, and argument pointers.
|
| 197 |
|
|
|
| 198 |
|
|
The contents of an ADDRESS is not normally used, the mode of the
|
| 199 |
|
|
ADDRESS determines whether the ADDRESS is a function argument or some
|
| 200 |
|
|
other special value. Pointer equality, not rtx_equal_p, determines whether
|
| 201 |
|
|
two ADDRESS expressions refer to the same base address.
|
| 202 |
|
|
|
| 203 |
|
|
The only use of the contents of an ADDRESS is for determining if the
|
| 204 |
|
|
current function performs nonlocal memory memory references for the
|
| 205 |
|
|
purposes of marking the function as a constant function. */
|
| 206 |
|
|
|
| 207 |
|
|
static GTY(()) VEC(rtx,gc) *reg_base_value;
|
| 208 |
|
|
static rtx *new_reg_base_value;
|
| 209 |
|
|
|
| 210 |
|
|
/* We preserve the copy of old array around to avoid amount of garbage
|
| 211 |
|
|
produced. About 8% of garbage produced were attributed to this
|
| 212 |
|
|
array. */
|
| 213 |
|
|
static GTY((deletable)) VEC(rtx,gc) *old_reg_base_value;
|
| 214 |
|
|
|
| 215 |
|
|
/* Static hunks of RTL used by the aliasing code; these are initialized
|
| 216 |
|
|
once per function to avoid unnecessary RTL allocations. */
|
| 217 |
|
|
static GTY (()) rtx static_reg_base_value[FIRST_PSEUDO_REGISTER];
|
| 218 |
|
|
|
| 219 |
|
|
#define REG_BASE_VALUE(X) \
|
| 220 |
|
|
(REGNO (X) < VEC_length (rtx, reg_base_value) \
|
| 221 |
|
|
? VEC_index (rtx, reg_base_value, REGNO (X)) : 0)
|
| 222 |
|
|
|
| 223 |
|
|
/* Vector indexed by N giving the initial (unchanging) value known for
|
| 224 |
|
|
pseudo-register N. This array is initialized in init_alias_analysis,
|
| 225 |
|
|
and does not change until end_alias_analysis is called. */
|
| 226 |
|
|
static GTY((length("reg_known_value_size"))) rtx *reg_known_value;
|
| 227 |
|
|
|
| 228 |
|
|
/* Indicates number of valid entries in reg_known_value. */
|
| 229 |
|
|
static GTY(()) unsigned int reg_known_value_size;
|
| 230 |
|
|
|
| 231 |
|
|
/* Vector recording for each reg_known_value whether it is due to a
|
| 232 |
|
|
REG_EQUIV note. Future passes (viz., reload) may replace the
|
| 233 |
|
|
pseudo with the equivalent expression and so we account for the
|
| 234 |
|
|
dependences that would be introduced if that happens.
|
| 235 |
|
|
|
| 236 |
|
|
The REG_EQUIV notes created in assign_parms may mention the arg
|
| 237 |
|
|
pointer, and there are explicit insns in the RTL that modify the
|
| 238 |
|
|
arg pointer. Thus we must ensure that such insns don't get
|
| 239 |
|
|
scheduled across each other because that would invalidate the
|
| 240 |
|
|
REG_EQUIV notes. One could argue that the REG_EQUIV notes are
|
| 241 |
|
|
wrong, but solving the problem in the scheduler will likely give
|
| 242 |
|
|
better code, so we do it here. */
|
| 243 |
|
|
static bool *reg_known_equiv_p;
|
| 244 |
|
|
|
| 245 |
|
|
/* True when scanning insns from the start of the rtl to the
|
| 246 |
|
|
NOTE_INSN_FUNCTION_BEG note. */
|
| 247 |
|
|
static bool copying_arguments;
|
| 248 |
|
|
|
| 249 |
|
|
DEF_VEC_P(alias_set_entry);
|
| 250 |
|
|
DEF_VEC_ALLOC_P(alias_set_entry,gc);
|
| 251 |
|
|
|
| 252 |
|
|
/* The splay-tree used to store the various alias set entries. */
|
| 253 |
|
|
static GTY (()) VEC(alias_set_entry,gc) *alias_sets;
|
| 254 |
|
|
|
| 255 |
|
|
/* Build a decomposed reference object for querying the alias-oracle
|
| 256 |
|
|
from the MEM rtx and store it in *REF.
|
| 257 |
|
|
Returns false if MEM is not suitable for the alias-oracle. */
|
| 258 |
|
|
|
| 259 |
|
|
static bool
|
| 260 |
|
|
ao_ref_from_mem (ao_ref *ref, const_rtx mem)
|
| 261 |
|
|
{
|
| 262 |
|
|
tree expr = MEM_EXPR (mem);
|
| 263 |
|
|
tree base;
|
| 264 |
|
|
|
| 265 |
|
|
if (!expr)
|
| 266 |
|
|
return false;
|
| 267 |
|
|
|
| 268 |
|
|
/* If MEM_OFFSET or MEM_SIZE are NULL punt. */
|
| 269 |
|
|
if (!MEM_OFFSET (mem)
|
| 270 |
|
|
|| !MEM_SIZE (mem))
|
| 271 |
|
|
return false;
|
| 272 |
|
|
|
| 273 |
|
|
ao_ref_init (ref, expr);
|
| 274 |
|
|
|
| 275 |
|
|
/* Get the base of the reference and see if we have to reject or
|
| 276 |
|
|
adjust it. */
|
| 277 |
|
|
base = ao_ref_base (ref);
|
| 278 |
|
|
if (base == NULL_TREE)
|
| 279 |
|
|
return false;
|
| 280 |
|
|
|
| 281 |
|
|
/* If this is a pointer dereference of a non-SSA_NAME punt.
|
| 282 |
|
|
??? We could replace it with a pointer to anything. */
|
| 283 |
|
|
if (INDIRECT_REF_P (base)
|
| 284 |
|
|
&& TREE_CODE (TREE_OPERAND (base, 0)) != SSA_NAME)
|
| 285 |
|
|
return false;
|
| 286 |
|
|
|
| 287 |
|
|
/* The tree oracle doesn't like to have these. */
|
| 288 |
|
|
if (TREE_CODE (base) == FUNCTION_DECL
|
| 289 |
|
|
|| TREE_CODE (base) == LABEL_DECL)
|
| 290 |
|
|
return false;
|
| 291 |
|
|
|
| 292 |
|
|
/* If this is a reference based on a partitioned decl replace the
|
| 293 |
|
|
base with an INDIRECT_REF of the pointer representative we
|
| 294 |
|
|
created during stack slot partitioning. */
|
| 295 |
|
|
if (TREE_CODE (base) == VAR_DECL
|
| 296 |
|
|
&& ! TREE_STATIC (base)
|
| 297 |
|
|
&& cfun->gimple_df->decls_to_pointers != NULL)
|
| 298 |
|
|
{
|
| 299 |
|
|
void *namep;
|
| 300 |
|
|
namep = pointer_map_contains (cfun->gimple_df->decls_to_pointers, base);
|
| 301 |
|
|
if (namep)
|
| 302 |
|
|
{
|
| 303 |
|
|
ref->base_alias_set = get_alias_set (base);
|
| 304 |
|
|
ref->base = build1 (INDIRECT_REF, TREE_TYPE (base), *(tree *)namep);
|
| 305 |
|
|
}
|
| 306 |
|
|
}
|
| 307 |
|
|
|
| 308 |
|
|
ref->ref_alias_set = MEM_ALIAS_SET (mem);
|
| 309 |
|
|
|
| 310 |
|
|
/* If the base decl is a parameter we can have negative MEM_OFFSET in
|
| 311 |
|
|
case of promoted subregs on bigendian targets. Trust the MEM_EXPR
|
| 312 |
|
|
here. */
|
| 313 |
|
|
if (INTVAL (MEM_OFFSET (mem)) < 0
|
| 314 |
|
|
&& ((INTVAL (MEM_SIZE (mem)) + INTVAL (MEM_OFFSET (mem)))
|
| 315 |
|
|
* BITS_PER_UNIT) == ref->size)
|
| 316 |
|
|
return true;
|
| 317 |
|
|
|
| 318 |
|
|
ref->offset += INTVAL (MEM_OFFSET (mem)) * BITS_PER_UNIT;
|
| 319 |
|
|
ref->size = INTVAL (MEM_SIZE (mem)) * BITS_PER_UNIT;
|
| 320 |
|
|
|
| 321 |
|
|
/* The MEM may extend into adjacent fields, so adjust max_size if
|
| 322 |
|
|
necessary. */
|
| 323 |
|
|
if (ref->max_size != -1
|
| 324 |
|
|
&& ref->size > ref->max_size)
|
| 325 |
|
|
ref->max_size = ref->size;
|
| 326 |
|
|
|
| 327 |
|
|
/* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
|
| 328 |
|
|
the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */
|
| 329 |
|
|
if (MEM_EXPR (mem) != get_spill_slot_decl (false)
|
| 330 |
|
|
&& (ref->offset < 0
|
| 331 |
|
|
|| (DECL_P (ref->base)
|
| 332 |
|
|
&& (!host_integerp (DECL_SIZE (ref->base), 1)
|
| 333 |
|
|
|| (TREE_INT_CST_LOW (DECL_SIZE ((ref->base)))
|
| 334 |
|
|
< (unsigned HOST_WIDE_INT)(ref->offset + ref->size))))))
|
| 335 |
|
|
return false;
|
| 336 |
|
|
|
| 337 |
|
|
return true;
|
| 338 |
|
|
}
|
| 339 |
|
|
|
| 340 |
|
|
/* Query the alias-oracle on whether the two memory rtx X and MEM may
|
| 341 |
|
|
alias. If TBAA_P is set also apply TBAA. Returns true if the
|
| 342 |
|
|
two rtxen may alias, false otherwise. */
|
| 343 |
|
|
|
| 344 |
|
|
static bool
|
| 345 |
|
|
rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
|
| 346 |
|
|
{
|
| 347 |
|
|
ao_ref ref1, ref2;
|
| 348 |
|
|
|
| 349 |
|
|
if (!ao_ref_from_mem (&ref1, x)
|
| 350 |
|
|
|| !ao_ref_from_mem (&ref2, mem))
|
| 351 |
|
|
return true;
|
| 352 |
|
|
|
| 353 |
|
|
return refs_may_alias_p_1 (&ref1, &ref2, tbaa_p);
|
| 354 |
|
|
}
|
| 355 |
|
|
|
| 356 |
|
|
/* Returns a pointer to the alias set entry for ALIAS_SET, if there is
|
| 357 |
|
|
such an entry, or NULL otherwise. */
|
| 358 |
|
|
|
| 359 |
|
|
static inline alias_set_entry
|
| 360 |
|
|
get_alias_set_entry (alias_set_type alias_set)
|
| 361 |
|
|
{
|
| 362 |
|
|
return VEC_index (alias_set_entry, alias_sets, alias_set);
|
| 363 |
|
|
}
|
| 364 |
|
|
|
| 365 |
|
|
/* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
|
| 366 |
|
|
the two MEMs cannot alias each other. */
|
| 367 |
|
|
|
| 368 |
|
|
static inline int
|
| 369 |
|
|
mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
|
| 370 |
|
|
{
|
| 371 |
|
|
/* Perform a basic sanity check. Namely, that there are no alias sets
|
| 372 |
|
|
if we're not using strict aliasing. This helps to catch bugs
|
| 373 |
|
|
whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or
|
| 374 |
|
|
where a MEM is allocated in some way other than by the use of
|
| 375 |
|
|
gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared. If we begin to
|
| 376 |
|
|
use alias sets to indicate that spilled registers cannot alias each
|
| 377 |
|
|
other, we might need to remove this check. */
|
| 378 |
|
|
gcc_assert (flag_strict_aliasing
|
| 379 |
|
|
|| (!MEM_ALIAS_SET (mem1) && !MEM_ALIAS_SET (mem2)));
|
| 380 |
|
|
|
| 381 |
|
|
return ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), MEM_ALIAS_SET (mem2));
|
| 382 |
|
|
}
|
| 383 |
|
|
|
| 384 |
|
|
/* Insert the NODE into the splay tree given by DATA. Used by
|
| 385 |
|
|
record_alias_subset via splay_tree_foreach. */
|
| 386 |
|
|
|
| 387 |
|
|
static int
|
| 388 |
|
|
insert_subset_children (splay_tree_node node, void *data)
|
| 389 |
|
|
{
|
| 390 |
|
|
splay_tree_insert ((splay_tree) data, node->key, node->value);
|
| 391 |
|
|
|
| 392 |
|
|
return 0;
|
| 393 |
|
|
}
|
| 394 |
|
|
|
| 395 |
|
|
/* Return true if the first alias set is a subset of the second. */
|
| 396 |
|
|
|
| 397 |
|
|
bool
|
| 398 |
|
|
alias_set_subset_of (alias_set_type set1, alias_set_type set2)
|
| 399 |
|
|
{
|
| 400 |
|
|
alias_set_entry ase;
|
| 401 |
|
|
|
| 402 |
|
|
/* Everything is a subset of the "aliases everything" set. */
|
| 403 |
|
|
if (set2 == 0)
|
| 404 |
|
|
return true;
|
| 405 |
|
|
|
| 406 |
|
|
/* Otherwise, check if set1 is a subset of set2. */
|
| 407 |
|
|
ase = get_alias_set_entry (set2);
|
| 408 |
|
|
if (ase != 0
|
| 409 |
|
|
&& (ase->has_zero_child
|
| 410 |
|
|
|| splay_tree_lookup (ase->children,
|
| 411 |
|
|
(splay_tree_key) set1)))
|
| 412 |
|
|
return true;
|
| 413 |
|
|
return false;
|
| 414 |
|
|
}
|
| 415 |
|
|
|
| 416 |
|
|
/* Return 1 if the two specified alias sets may conflict. */
|
| 417 |
|
|
|
| 418 |
|
|
int
|
| 419 |
|
|
alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
|
| 420 |
|
|
{
|
| 421 |
|
|
alias_set_entry ase;
|
| 422 |
|
|
|
| 423 |
|
|
/* The easy case. */
|
| 424 |
|
|
if (alias_sets_must_conflict_p (set1, set2))
|
| 425 |
|
|
return 1;
|
| 426 |
|
|
|
| 427 |
|
|
/* See if the first alias set is a subset of the second. */
|
| 428 |
|
|
ase = get_alias_set_entry (set1);
|
| 429 |
|
|
if (ase != 0
|
| 430 |
|
|
&& (ase->has_zero_child
|
| 431 |
|
|
|| splay_tree_lookup (ase->children,
|
| 432 |
|
|
(splay_tree_key) set2)))
|
| 433 |
|
|
return 1;
|
| 434 |
|
|
|
| 435 |
|
|
/* Now do the same, but with the alias sets reversed. */
|
| 436 |
|
|
ase = get_alias_set_entry (set2);
|
| 437 |
|
|
if (ase != 0
|
| 438 |
|
|
&& (ase->has_zero_child
|
| 439 |
|
|
|| splay_tree_lookup (ase->children,
|
| 440 |
|
|
(splay_tree_key) set1)))
|
| 441 |
|
|
return 1;
|
| 442 |
|
|
|
| 443 |
|
|
/* The two alias sets are distinct and neither one is the
|
| 444 |
|
|
child of the other. Therefore, they cannot conflict. */
|
| 445 |
|
|
return 0;
|
| 446 |
|
|
}
|
| 447 |
|
|
|
| 448 |
|
|
static int
|
| 449 |
|
|
walk_mems_2 (rtx *x, rtx mem)
|
| 450 |
|
|
{
|
| 451 |
|
|
if (MEM_P (*x))
|
| 452 |
|
|
{
|
| 453 |
|
|
if (alias_sets_conflict_p (MEM_ALIAS_SET(*x), MEM_ALIAS_SET(mem)))
|
| 454 |
|
|
return 1;
|
| 455 |
|
|
|
| 456 |
|
|
return -1;
|
| 457 |
|
|
}
|
| 458 |
|
|
return 0;
|
| 459 |
|
|
}
|
| 460 |
|
|
|
| 461 |
|
|
static int
|
| 462 |
|
|
walk_mems_1 (rtx *x, rtx *pat)
|
| 463 |
|
|
{
|
| 464 |
|
|
if (MEM_P (*x))
|
| 465 |
|
|
{
|
| 466 |
|
|
/* Visit all MEMs in *PAT and check indepedence. */
|
| 467 |
|
|
if (for_each_rtx (pat, (rtx_function) walk_mems_2, *x))
|
| 468 |
|
|
/* Indicate that dependence was determined and stop traversal. */
|
| 469 |
|
|
return 1;
|
| 470 |
|
|
|
| 471 |
|
|
return -1;
|
| 472 |
|
|
}
|
| 473 |
|
|
return 0;
|
| 474 |
|
|
}
|
| 475 |
|
|
|
| 476 |
|
|
/* Return 1 if two specified instructions have mem expr with conflict alias sets*/
|
| 477 |
|
|
bool
|
| 478 |
|
|
insn_alias_sets_conflict_p (rtx insn1, rtx insn2)
|
| 479 |
|
|
{
|
| 480 |
|
|
/* For each pair of MEMs in INSN1 and INSN2 check their independence. */
|
| 481 |
|
|
return for_each_rtx (&PATTERN (insn1), (rtx_function) walk_mems_1,
|
| 482 |
|
|
&PATTERN (insn2));
|
| 483 |
|
|
}
|
| 484 |
|
|
|
| 485 |
|
|
/* Return 1 if the two specified alias sets will always conflict. */
|
| 486 |
|
|
|
| 487 |
|
|
int
|
| 488 |
|
|
alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
|
| 489 |
|
|
{
|
| 490 |
|
|
if (set1 == 0 || set2 == 0 || set1 == set2)
|
| 491 |
|
|
return 1;
|
| 492 |
|
|
|
| 493 |
|
|
return 0;
|
| 494 |
|
|
}
|
| 495 |
|
|
|
| 496 |
|
|
/* Return 1 if any MEM object of type T1 will always conflict (using the
|
| 497 |
|
|
dependency routines in this file) with any MEM object of type T2.
|
| 498 |
|
|
This is used when allocating temporary storage. If T1 and/or T2 are
|
| 499 |
|
|
NULL_TREE, it means we know nothing about the storage. */
|
| 500 |
|
|
|
| 501 |
|
|
int
|
| 502 |
|
|
objects_must_conflict_p (tree t1, tree t2)
|
| 503 |
|
|
{
|
| 504 |
|
|
alias_set_type set1, set2;
|
| 505 |
|
|
|
| 506 |
|
|
/* If neither has a type specified, we don't know if they'll conflict
|
| 507 |
|
|
because we may be using them to store objects of various types, for
|
| 508 |
|
|
example the argument and local variables areas of inlined functions. */
|
| 509 |
|
|
if (t1 == 0 && t2 == 0)
|
| 510 |
|
|
return 0;
|
| 511 |
|
|
|
| 512 |
|
|
/* If they are the same type, they must conflict. */
|
| 513 |
|
|
if (t1 == t2
|
| 514 |
|
|
/* Likewise if both are volatile. */
|
| 515 |
|
|
|| (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
|
| 516 |
|
|
return 1;
|
| 517 |
|
|
|
| 518 |
|
|
set1 = t1 ? get_alias_set (t1) : 0;
|
| 519 |
|
|
set2 = t2 ? get_alias_set (t2) : 0;
|
| 520 |
|
|
|
| 521 |
|
|
/* We can't use alias_sets_conflict_p because we must make sure
|
| 522 |
|
|
that every subtype of t1 will conflict with every subtype of
|
| 523 |
|
|
t2 for which a pair of subobjects of these respective subtypes
|
| 524 |
|
|
overlaps on the stack. */
|
| 525 |
|
|
return alias_sets_must_conflict_p (set1, set2);
|
| 526 |
|
|
}
|
| 527 |
|
|
|
| 528 |
|
|
/* Return true if all nested component references handled by
|
| 529 |
|
|
get_inner_reference in T are such that we should use the alias set
|
| 530 |
|
|
provided by the object at the heart of T.
|
| 531 |
|
|
|
| 532 |
|
|
This is true for non-addressable components (which don't have their
|
| 533 |
|
|
own alias set), as well as components of objects in alias set zero.
|
| 534 |
|
|
This later point is a special case wherein we wish to override the
|
| 535 |
|
|
alias set used by the component, but we don't have per-FIELD_DECL
|
| 536 |
|
|
assignable alias sets. */
|
| 537 |
|
|
|
| 538 |
|
|
bool
|
| 539 |
|
|
component_uses_parent_alias_set (const_tree t)
|
| 540 |
|
|
{
|
| 541 |
|
|
while (1)
|
| 542 |
|
|
{
|
| 543 |
|
|
/* If we're at the end, it vacuously uses its own alias set. */
|
| 544 |
|
|
if (!handled_component_p (t))
|
| 545 |
|
|
return false;
|
| 546 |
|
|
|
| 547 |
|
|
switch (TREE_CODE (t))
|
| 548 |
|
|
{
|
| 549 |
|
|
case COMPONENT_REF:
|
| 550 |
|
|
if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
|
| 551 |
|
|
return true;
|
| 552 |
|
|
break;
|
| 553 |
|
|
|
| 554 |
|
|
case ARRAY_REF:
|
| 555 |
|
|
case ARRAY_RANGE_REF:
|
| 556 |
|
|
if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
|
| 557 |
|
|
return true;
|
| 558 |
|
|
break;
|
| 559 |
|
|
|
| 560 |
|
|
case REALPART_EXPR:
|
| 561 |
|
|
case IMAGPART_EXPR:
|
| 562 |
|
|
break;
|
| 563 |
|
|
|
| 564 |
|
|
default:
|
| 565 |
|
|
/* Bitfields and casts are never addressable. */
|
| 566 |
|
|
return true;
|
| 567 |
|
|
}
|
| 568 |
|
|
|
| 569 |
|
|
t = TREE_OPERAND (t, 0);
|
| 570 |
|
|
if (get_alias_set (TREE_TYPE (t)) == 0)
|
| 571 |
|
|
return true;
|
| 572 |
|
|
}
|
| 573 |
|
|
}
|
| 574 |
|
|
|
| 575 |
|
|
/* Return the alias set for the memory pointed to by T, which may be
|
| 576 |
|
|
either a type or an expression. Return -1 if there is nothing
|
| 577 |
|
|
special about dereferencing T. */
|
| 578 |
|
|
|
| 579 |
|
|
static alias_set_type
|
| 580 |
|
|
get_deref_alias_set_1 (tree t)
|
| 581 |
|
|
{
|
| 582 |
|
|
/* If we're not doing any alias analysis, just assume everything
|
| 583 |
|
|
aliases everything else. */
|
| 584 |
|
|
if (!flag_strict_aliasing)
|
| 585 |
|
|
return 0;
|
| 586 |
|
|
|
| 587 |
|
|
/* All we care about is the type. */
|
| 588 |
|
|
if (! TYPE_P (t))
|
| 589 |
|
|
t = TREE_TYPE (t);
|
| 590 |
|
|
|
| 591 |
|
|
/* If we have an INDIRECT_REF via a void pointer, we don't
|
| 592 |
|
|
know anything about what that might alias. Likewise if the
|
| 593 |
|
|
pointer is marked that way. */
|
| 594 |
|
|
if (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
|
| 595 |
|
|
|| TYPE_REF_CAN_ALIAS_ALL (t))
|
| 596 |
|
|
return 0;
|
| 597 |
|
|
|
| 598 |
|
|
return -1;
|
| 599 |
|
|
}
|
| 600 |
|
|
|
| 601 |
|
|
/* Return the alias set for the memory pointed to by T, which may be
|
| 602 |
|
|
either a type or an expression. */
|
| 603 |
|
|
|
| 604 |
|
|
alias_set_type
|
| 605 |
|
|
get_deref_alias_set (tree t)
|
| 606 |
|
|
{
|
| 607 |
|
|
alias_set_type set = get_deref_alias_set_1 (t);
|
| 608 |
|
|
|
| 609 |
|
|
/* Fall back to the alias-set of the pointed-to type. */
|
| 610 |
|
|
if (set == -1)
|
| 611 |
|
|
{
|
| 612 |
|
|
if (! TYPE_P (t))
|
| 613 |
|
|
t = TREE_TYPE (t);
|
| 614 |
|
|
set = get_alias_set (TREE_TYPE (t));
|
| 615 |
|
|
}
|
| 616 |
|
|
|
| 617 |
|
|
return set;
|
| 618 |
|
|
}
|
| 619 |
|
|
|
| 620 |
|
|
/* Return the alias set for T, which may be either a type or an
|
| 621 |
|
|
expression. Call language-specific routine for help, if needed. */
|
| 622 |
|
|
|
| 623 |
|
|
alias_set_type
|
| 624 |
|
|
get_alias_set (tree t)
|
| 625 |
|
|
{
|
| 626 |
|
|
alias_set_type set;
|
| 627 |
|
|
|
| 628 |
|
|
/* If we're not doing any alias analysis, just assume everything
|
| 629 |
|
|
aliases everything else. Also return 0 if this or its type is
|
| 630 |
|
|
an error. */
|
| 631 |
|
|
if (! flag_strict_aliasing || t == error_mark_node
|
| 632 |
|
|
|| (! TYPE_P (t)
|
| 633 |
|
|
&& (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
|
| 634 |
|
|
return 0;
|
| 635 |
|
|
|
| 636 |
|
|
/* We can be passed either an expression or a type. This and the
|
| 637 |
|
|
language-specific routine may make mutually-recursive calls to each other
|
| 638 |
|
|
to figure out what to do. At each juncture, we see if this is a tree
|
| 639 |
|
|
that the language may need to handle specially. First handle things that
|
| 640 |
|
|
aren't types. */
|
| 641 |
|
|
if (! TYPE_P (t))
|
| 642 |
|
|
{
|
| 643 |
|
|
tree inner;
|
| 644 |
|
|
|
| 645 |
|
|
/* Remove any nops, then give the language a chance to do
|
| 646 |
|
|
something with this tree before we look at it. */
|
| 647 |
|
|
STRIP_NOPS (t);
|
| 648 |
|
|
set = lang_hooks.get_alias_set (t);
|
| 649 |
|
|
if (set != -1)
|
| 650 |
|
|
return set;
|
| 651 |
|
|
|
| 652 |
|
|
/* Retrieve the original memory reference if needed. */
|
| 653 |
|
|
if (TREE_CODE (t) == TARGET_MEM_REF)
|
| 654 |
|
|
t = TMR_ORIGINAL (t);
|
| 655 |
|
|
|
| 656 |
|
|
/* First see if the actual object referenced is an INDIRECT_REF from a
|
| 657 |
|
|
restrict-qualified pointer or a "void *". */
|
| 658 |
|
|
inner = t;
|
| 659 |
|
|
while (handled_component_p (inner))
|
| 660 |
|
|
{
|
| 661 |
|
|
inner = TREE_OPERAND (inner, 0);
|
| 662 |
|
|
STRIP_NOPS (inner);
|
| 663 |
|
|
}
|
| 664 |
|
|
|
| 665 |
|
|
if (INDIRECT_REF_P (inner))
|
| 666 |
|
|
{
|
| 667 |
|
|
set = get_deref_alias_set_1 (TREE_OPERAND (inner, 0));
|
| 668 |
|
|
if (set != -1)
|
| 669 |
|
|
return set;
|
| 670 |
|
|
}
|
| 671 |
|
|
|
| 672 |
|
|
/* Otherwise, pick up the outermost object that we could have a pointer
|
| 673 |
|
|
to, processing conversions as above. */
|
| 674 |
|
|
while (component_uses_parent_alias_set (t))
|
| 675 |
|
|
{
|
| 676 |
|
|
t = TREE_OPERAND (t, 0);
|
| 677 |
|
|
STRIP_NOPS (t);
|
| 678 |
|
|
}
|
| 679 |
|
|
|
| 680 |
|
|
/* If we've already determined the alias set for a decl, just return
|
| 681 |
|
|
it. This is necessary for C++ anonymous unions, whose component
|
| 682 |
|
|
variables don't look like union members (boo!). */
|
| 683 |
|
|
if (TREE_CODE (t) == VAR_DECL
|
| 684 |
|
|
&& DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
|
| 685 |
|
|
return MEM_ALIAS_SET (DECL_RTL (t));
|
| 686 |
|
|
|
| 687 |
|
|
/* Now all we care about is the type. */
|
| 688 |
|
|
t = TREE_TYPE (t);
|
| 689 |
|
|
}
|
| 690 |
|
|
|
| 691 |
|
|
/* Variant qualifiers don't affect the alias set, so get the main
|
| 692 |
|
|
variant. */
|
| 693 |
|
|
t = TYPE_MAIN_VARIANT (t);
|
| 694 |
|
|
|
| 695 |
|
|
/* Always use the canonical type as well. If this is a type that
|
| 696 |
|
|
requires structural comparisons to identify compatible types
|
| 697 |
|
|
use alias set zero. */
|
| 698 |
|
|
if (TYPE_STRUCTURAL_EQUALITY_P (t))
|
| 699 |
|
|
{
|
| 700 |
|
|
/* Allow the language to specify another alias set for this
|
| 701 |
|
|
type. */
|
| 702 |
|
|
set = lang_hooks.get_alias_set (t);
|
| 703 |
|
|
if (set != -1)
|
| 704 |
|
|
return set;
|
| 705 |
|
|
return 0;
|
| 706 |
|
|
}
|
| 707 |
|
|
t = TYPE_CANONICAL (t);
|
| 708 |
|
|
/* Canonical types shouldn't form a tree nor should the canonical
|
| 709 |
|
|
type require structural equality checks. */
|
| 710 |
|
|
gcc_assert (!TYPE_STRUCTURAL_EQUALITY_P (t) && TYPE_CANONICAL (t) == t);
|
| 711 |
|
|
|
| 712 |
|
|
/* If this is a type with a known alias set, return it. */
|
| 713 |
|
|
if (TYPE_ALIAS_SET_KNOWN_P (t))
|
| 714 |
|
|
return TYPE_ALIAS_SET (t);
|
| 715 |
|
|
|
| 716 |
|
|
/* We don't want to set TYPE_ALIAS_SET for incomplete types. */
|
| 717 |
|
|
if (!COMPLETE_TYPE_P (t))
|
| 718 |
|
|
{
|
| 719 |
|
|
/* For arrays with unknown size the conservative answer is the
|
| 720 |
|
|
alias set of the element type. */
|
| 721 |
|
|
if (TREE_CODE (t) == ARRAY_TYPE)
|
| 722 |
|
|
return get_alias_set (TREE_TYPE (t));
|
| 723 |
|
|
|
| 724 |
|
|
/* But return zero as a conservative answer for incomplete types. */
|
| 725 |
|
|
return 0;
|
| 726 |
|
|
}
|
| 727 |
|
|
|
| 728 |
|
|
/* See if the language has special handling for this type. */
|
| 729 |
|
|
set = lang_hooks.get_alias_set (t);
|
| 730 |
|
|
if (set != -1)
|
| 731 |
|
|
return set;
|
| 732 |
|
|
|
| 733 |
|
|
/* There are no objects of FUNCTION_TYPE, so there's no point in
|
| 734 |
|
|
using up an alias set for them. (There are, of course, pointers
|
| 735 |
|
|
and references to functions, but that's different.) */
|
| 736 |
|
|
else if (TREE_CODE (t) == FUNCTION_TYPE
|
| 737 |
|
|
|| TREE_CODE (t) == METHOD_TYPE)
|
| 738 |
|
|
set = 0;
|
| 739 |
|
|
|
| 740 |
|
|
/* Unless the language specifies otherwise, let vector types alias
|
| 741 |
|
|
their components. This avoids some nasty type punning issues in
|
| 742 |
|
|
normal usage. And indeed lets vectors be treated more like an
|
| 743 |
|
|
array slice. */
|
| 744 |
|
|
else if (TREE_CODE (t) == VECTOR_TYPE)
|
| 745 |
|
|
set = get_alias_set (TREE_TYPE (t));
|
| 746 |
|
|
|
| 747 |
|
|
/* Unless the language specifies otherwise, treat array types the
|
| 748 |
|
|
same as their components. This avoids the asymmetry we get
|
| 749 |
|
|
through recording the components. Consider accessing a
|
| 750 |
|
|
character(kind=1) through a reference to a character(kind=1)[1:1].
|
| 751 |
|
|
Or consider if we want to assign integer(kind=4)[0:D.1387] and
|
| 752 |
|
|
integer(kind=4)[4] the same alias set or not.
|
| 753 |
|
|
Just be pragmatic here and make sure the array and its element
|
| 754 |
|
|
type get the same alias set assigned. */
|
| 755 |
|
|
else if (TREE_CODE (t) == ARRAY_TYPE
|
| 756 |
|
|
&& !TYPE_NONALIASED_COMPONENT (t))
|
| 757 |
|
|
set = get_alias_set (TREE_TYPE (t));
|
| 758 |
|
|
|
| 759 |
|
|
else
|
| 760 |
|
|
/* Otherwise make a new alias set for this type. */
|
| 761 |
|
|
set = new_alias_set ();
|
| 762 |
|
|
|
| 763 |
|
|
TYPE_ALIAS_SET (t) = set;
|
| 764 |
|
|
|
| 765 |
|
|
/* If this is an aggregate type, we must record any component aliasing
|
| 766 |
|
|
information. */
|
| 767 |
|
|
if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
|
| 768 |
|
|
record_component_aliases (t);
|
| 769 |
|
|
|
| 770 |
|
|
return set;
|
| 771 |
|
|
}
|
| 772 |
|
|
|
| 773 |
|
|
/* Return a brand-new alias set. */
|
| 774 |
|
|
|
| 775 |
|
|
alias_set_type
|
| 776 |
|
|
new_alias_set (void)
|
| 777 |
|
|
{
|
| 778 |
|
|
if (flag_strict_aliasing)
|
| 779 |
|
|
{
|
| 780 |
|
|
if (alias_sets == 0)
|
| 781 |
|
|
VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
|
| 782 |
|
|
VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
|
| 783 |
|
|
return VEC_length (alias_set_entry, alias_sets) - 1;
|
| 784 |
|
|
}
|
| 785 |
|
|
else
|
| 786 |
|
|
return 0;
|
| 787 |
|
|
}
|
| 788 |
|
|
|
| 789 |
|
|
/* Indicate that things in SUBSET can alias things in SUPERSET, but that
|
| 790 |
|
|
not everything that aliases SUPERSET also aliases SUBSET. For example,
|
| 791 |
|
|
in C, a store to an `int' can alias a load of a structure containing an
|
| 792 |
|
|
`int', and vice versa. But it can't alias a load of a 'double' member
|
| 793 |
|
|
of the same structure. Here, the structure would be the SUPERSET and
|
| 794 |
|
|
`int' the SUBSET. This relationship is also described in the comment at
|
| 795 |
|
|
the beginning of this file.
|
| 796 |
|
|
|
| 797 |
|
|
This function should be called only once per SUPERSET/SUBSET pair.
|
| 798 |
|
|
|
| 799 |
|
|
It is illegal for SUPERSET to be zero; everything is implicitly a
|
| 800 |
|
|
subset of alias set zero. */
|
| 801 |
|
|
|
| 802 |
|
|
void
|
| 803 |
|
|
record_alias_subset (alias_set_type superset, alias_set_type subset)
|
| 804 |
|
|
{
|
| 805 |
|
|
alias_set_entry superset_entry;
|
| 806 |
|
|
alias_set_entry subset_entry;
|
| 807 |
|
|
|
| 808 |
|
|
/* It is possible in complex type situations for both sets to be the same,
|
| 809 |
|
|
in which case we can ignore this operation. */
|
| 810 |
|
|
if (superset == subset)
|
| 811 |
|
|
return;
|
| 812 |
|
|
|
| 813 |
|
|
gcc_assert (superset);
|
| 814 |
|
|
|
| 815 |
|
|
superset_entry = get_alias_set_entry (superset);
|
| 816 |
|
|
if (superset_entry == 0)
|
| 817 |
|
|
{
|
| 818 |
|
|
/* Create an entry for the SUPERSET, so that we have a place to
|
| 819 |
|
|
attach the SUBSET. */
|
| 820 |
|
|
superset_entry = GGC_NEW (struct alias_set_entry_d);
|
| 821 |
|
|
superset_entry->alias_set = superset;
|
| 822 |
|
|
superset_entry->children
|
| 823 |
|
|
= splay_tree_new_ggc (splay_tree_compare_ints);
|
| 824 |
|
|
superset_entry->has_zero_child = 0;
|
| 825 |
|
|
VEC_replace (alias_set_entry, alias_sets, superset, superset_entry);
|
| 826 |
|
|
}
|
| 827 |
|
|
|
| 828 |
|
|
if (subset == 0)
|
| 829 |
|
|
superset_entry->has_zero_child = 1;
|
| 830 |
|
|
else
|
| 831 |
|
|
{
|
| 832 |
|
|
subset_entry = get_alias_set_entry (subset);
|
| 833 |
|
|
/* If there is an entry for the subset, enter all of its children
|
| 834 |
|
|
(if they are not already present) as children of the SUPERSET. */
|
| 835 |
|
|
if (subset_entry)
|
| 836 |
|
|
{
|
| 837 |
|
|
if (subset_entry->has_zero_child)
|
| 838 |
|
|
superset_entry->has_zero_child = 1;
|
| 839 |
|
|
|
| 840 |
|
|
splay_tree_foreach (subset_entry->children, insert_subset_children,
|
| 841 |
|
|
superset_entry->children);
|
| 842 |
|
|
}
|
| 843 |
|
|
|
| 844 |
|
|
/* Enter the SUBSET itself as a child of the SUPERSET. */
|
| 845 |
|
|
splay_tree_insert (superset_entry->children,
|
| 846 |
|
|
(splay_tree_key) subset, 0);
|
| 847 |
|
|
}
|
| 848 |
|
|
}
|
| 849 |
|
|
|
| 850 |
|
|
/* Record that component types of TYPE, if any, are part of that type for
|
| 851 |
|
|
aliasing purposes. For record types, we only record component types
|
| 852 |
|
|
for fields that are not marked non-addressable. For array types, we
|
| 853 |
|
|
only record the component type if it is not marked non-aliased. */
|
| 854 |
|
|
|
| 855 |
|
|
void
|
| 856 |
|
|
record_component_aliases (tree type)
|
| 857 |
|
|
{
|
| 858 |
|
|
alias_set_type superset = get_alias_set (type);
|
| 859 |
|
|
tree field;
|
| 860 |
|
|
|
| 861 |
|
|
if (superset == 0)
|
| 862 |
|
|
return;
|
| 863 |
|
|
|
| 864 |
|
|
switch (TREE_CODE (type))
|
| 865 |
|
|
{
|
| 866 |
|
|
case RECORD_TYPE:
|
| 867 |
|
|
case UNION_TYPE:
|
| 868 |
|
|
case QUAL_UNION_TYPE:
|
| 869 |
|
|
/* Recursively record aliases for the base classes, if there are any. */
|
| 870 |
|
|
if (TYPE_BINFO (type))
|
| 871 |
|
|
{
|
| 872 |
|
|
int i;
|
| 873 |
|
|
tree binfo, base_binfo;
|
| 874 |
|
|
|
| 875 |
|
|
for (binfo = TYPE_BINFO (type), i = 0;
|
| 876 |
|
|
BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
|
| 877 |
|
|
record_alias_subset (superset,
|
| 878 |
|
|
get_alias_set (BINFO_TYPE (base_binfo)));
|
| 879 |
|
|
}
|
| 880 |
|
|
for (field = TYPE_FIELDS (type); field != 0; field = TREE_CHAIN (field))
|
| 881 |
|
|
if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
|
| 882 |
|
|
record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
|
| 883 |
|
|
break;
|
| 884 |
|
|
|
| 885 |
|
|
case COMPLEX_TYPE:
|
| 886 |
|
|
record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
|
| 887 |
|
|
break;
|
| 888 |
|
|
|
| 889 |
|
|
/* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
|
| 890 |
|
|
element type. */
|
| 891 |
|
|
|
| 892 |
|
|
default:
|
| 893 |
|
|
break;
|
| 894 |
|
|
}
|
| 895 |
|
|
}
|
| 896 |
|
|
|
| 897 |
|
|
/* Allocate an alias set for use in storing and reading from the varargs
|
| 898 |
|
|
spill area. */
|
| 899 |
|
|
|
| 900 |
|
|
static GTY(()) alias_set_type varargs_set = -1;
|
| 901 |
|
|
|
| 902 |
|
|
alias_set_type
|
| 903 |
|
|
get_varargs_alias_set (void)
|
| 904 |
|
|
{
|
| 905 |
|
|
#if 1
|
| 906 |
|
|
/* We now lower VA_ARG_EXPR, and there's currently no way to attach the
|
| 907 |
|
|
varargs alias set to an INDIRECT_REF (FIXME!), so we can't
|
| 908 |
|
|
consistently use the varargs alias set for loads from the varargs
|
| 909 |
|
|
area. So don't use it anywhere. */
|
| 910 |
|
|
return 0;
|
| 911 |
|
|
#else
|
| 912 |
|
|
if (varargs_set == -1)
|
| 913 |
|
|
varargs_set = new_alias_set ();
|
| 914 |
|
|
|
| 915 |
|
|
return varargs_set;
|
| 916 |
|
|
#endif
|
| 917 |
|
|
}
|
| 918 |
|
|
|
| 919 |
|
|
/* Likewise, but used for the fixed portions of the frame, e.g., register
|
| 920 |
|
|
save areas. */
|
| 921 |
|
|
|
| 922 |
|
|
static GTY(()) alias_set_type frame_set = -1;
|
| 923 |
|
|
|
| 924 |
|
|
alias_set_type
|
| 925 |
|
|
get_frame_alias_set (void)
|
| 926 |
|
|
{
|
| 927 |
|
|
if (frame_set == -1)
|
| 928 |
|
|
frame_set = new_alias_set ();
|
| 929 |
|
|
|
| 930 |
|
|
return frame_set;
|
| 931 |
|
|
}
|
| 932 |
|
|
|
| 933 |
|
|
/* Inside SRC, the source of a SET, find a base address. */
|
| 934 |
|
|
|
| 935 |
|
|
static rtx
|
| 936 |
|
|
find_base_value (rtx src)
|
| 937 |
|
|
{
|
| 938 |
|
|
unsigned int regno;
|
| 939 |
|
|
|
| 940 |
|
|
#if defined (FIND_BASE_TERM)
|
| 941 |
|
|
/* Try machine-dependent ways to find the base term. */
|
| 942 |
|
|
src = FIND_BASE_TERM (src);
|
| 943 |
|
|
#endif
|
| 944 |
|
|
|
| 945 |
|
|
switch (GET_CODE (src))
|
| 946 |
|
|
{
|
| 947 |
|
|
case SYMBOL_REF:
|
| 948 |
|
|
case LABEL_REF:
|
| 949 |
|
|
return src;
|
| 950 |
|
|
|
| 951 |
|
|
case REG:
|
| 952 |
|
|
regno = REGNO (src);
|
| 953 |
|
|
/* At the start of a function, argument registers have known base
|
| 954 |
|
|
values which may be lost later. Returning an ADDRESS
|
| 955 |
|
|
expression here allows optimization based on argument values
|
| 956 |
|
|
even when the argument registers are used for other purposes. */
|
| 957 |
|
|
if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
|
| 958 |
|
|
return new_reg_base_value[regno];
|
| 959 |
|
|
|
| 960 |
|
|
/* If a pseudo has a known base value, return it. Do not do this
|
| 961 |
|
|
for non-fixed hard regs since it can result in a circular
|
| 962 |
|
|
dependency chain for registers which have values at function entry.
|
| 963 |
|
|
|
| 964 |
|
|
The test above is not sufficient because the scheduler may move
|
| 965 |
|
|
a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */
|
| 966 |
|
|
if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
|
| 967 |
|
|
&& regno < VEC_length (rtx, reg_base_value))
|
| 968 |
|
|
{
|
| 969 |
|
|
/* If we're inside init_alias_analysis, use new_reg_base_value
|
| 970 |
|
|
to reduce the number of relaxation iterations. */
|
| 971 |
|
|
if (new_reg_base_value && new_reg_base_value[regno]
|
| 972 |
|
|
&& DF_REG_DEF_COUNT (regno) == 1)
|
| 973 |
|
|
return new_reg_base_value[regno];
|
| 974 |
|
|
|
| 975 |
|
|
if (VEC_index (rtx, reg_base_value, regno))
|
| 976 |
|
|
return VEC_index (rtx, reg_base_value, regno);
|
| 977 |
|
|
}
|
| 978 |
|
|
|
| 979 |
|
|
return 0;
|
| 980 |
|
|
|
| 981 |
|
|
case MEM:
|
| 982 |
|
|
/* Check for an argument passed in memory. Only record in the
|
| 983 |
|
|
copying-arguments block; it is too hard to track changes
|
| 984 |
|
|
otherwise. */
|
| 985 |
|
|
if (copying_arguments
|
| 986 |
|
|
&& (XEXP (src, 0) == arg_pointer_rtx
|
| 987 |
|
|
|| (GET_CODE (XEXP (src, 0)) == PLUS
|
| 988 |
|
|
&& XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
|
| 989 |
|
|
return gen_rtx_ADDRESS (VOIDmode, src);
|
| 990 |
|
|
return 0;
|
| 991 |
|
|
|
| 992 |
|
|
case CONST:
|
| 993 |
|
|
src = XEXP (src, 0);
|
| 994 |
|
|
if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
|
| 995 |
|
|
break;
|
| 996 |
|
|
|
| 997 |
|
|
/* ... fall through ... */
|
| 998 |
|
|
|
| 999 |
|
|
case PLUS:
|
| 1000 |
|
|
case MINUS:
|
| 1001 |
|
|
{
|
| 1002 |
|
|
rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
|
| 1003 |
|
|
|
| 1004 |
|
|
/* If either operand is a REG that is a known pointer, then it
|
| 1005 |
|
|
is the base. */
|
| 1006 |
|
|
if (REG_P (src_0) && REG_POINTER (src_0))
|
| 1007 |
|
|
return find_base_value (src_0);
|
| 1008 |
|
|
if (REG_P (src_1) && REG_POINTER (src_1))
|
| 1009 |
|
|
return find_base_value (src_1);
|
| 1010 |
|
|
|
| 1011 |
|
|
/* If either operand is a REG, then see if we already have
|
| 1012 |
|
|
a known value for it. */
|
| 1013 |
|
|
if (REG_P (src_0))
|
| 1014 |
|
|
{
|
| 1015 |
|
|
temp = find_base_value (src_0);
|
| 1016 |
|
|
if (temp != 0)
|
| 1017 |
|
|
src_0 = temp;
|
| 1018 |
|
|
}
|
| 1019 |
|
|
|
| 1020 |
|
|
if (REG_P (src_1))
|
| 1021 |
|
|
{
|
| 1022 |
|
|
temp = find_base_value (src_1);
|
| 1023 |
|
|
if (temp!= 0)
|
| 1024 |
|
|
src_1 = temp;
|
| 1025 |
|
|
}
|
| 1026 |
|
|
|
| 1027 |
|
|
/* If either base is named object or a special address
|
| 1028 |
|
|
(like an argument or stack reference), then use it for the
|
| 1029 |
|
|
base term. */
|
| 1030 |
|
|
if (src_0 != 0
|
| 1031 |
|
|
&& (GET_CODE (src_0) == SYMBOL_REF
|
| 1032 |
|
|
|| GET_CODE (src_0) == LABEL_REF
|
| 1033 |
|
|
|| (GET_CODE (src_0) == ADDRESS
|
| 1034 |
|
|
&& GET_MODE (src_0) != VOIDmode)))
|
| 1035 |
|
|
return src_0;
|
| 1036 |
|
|
|
| 1037 |
|
|
if (src_1 != 0
|
| 1038 |
|
|
&& (GET_CODE (src_1) == SYMBOL_REF
|
| 1039 |
|
|
|| GET_CODE (src_1) == LABEL_REF
|
| 1040 |
|
|
|| (GET_CODE (src_1) == ADDRESS
|
| 1041 |
|
|
&& GET_MODE (src_1) != VOIDmode)))
|
| 1042 |
|
|
return src_1;
|
| 1043 |
|
|
|
| 1044 |
|
|
/* Guess which operand is the base address:
|
| 1045 |
|
|
If either operand is a symbol, then it is the base. If
|
| 1046 |
|
|
either operand is a CONST_INT, then the other is the base. */
|
| 1047 |
|
|
if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
|
| 1048 |
|
|
return find_base_value (src_0);
|
| 1049 |
|
|
else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
|
| 1050 |
|
|
return find_base_value (src_1);
|
| 1051 |
|
|
|
| 1052 |
|
|
return 0;
|
| 1053 |
|
|
}
|
| 1054 |
|
|
|
| 1055 |
|
|
case LO_SUM:
|
| 1056 |
|
|
/* The standard form is (lo_sum reg sym) so look only at the
|
| 1057 |
|
|
second operand. */
|
| 1058 |
|
|
return find_base_value (XEXP (src, 1));
|
| 1059 |
|
|
|
| 1060 |
|
|
case AND:
|
| 1061 |
|
|
/* If the second operand is constant set the base
|
| 1062 |
|
|
address to the first operand. */
|
| 1063 |
|
|
if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
|
| 1064 |
|
|
return find_base_value (XEXP (src, 0));
|
| 1065 |
|
|
return 0;
|
| 1066 |
|
|
|
| 1067 |
|
|
case TRUNCATE:
|
| 1068 |
|
|
/* As we do not know which address space the pointer is refering to, we can
|
| 1069 |
|
|
handle this only if the target does not support different pointer or
|
| 1070 |
|
|
address modes depending on the address space. */
|
| 1071 |
|
|
if (!target_default_pointer_address_modes_p ())
|
| 1072 |
|
|
break;
|
| 1073 |
|
|
if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
|
| 1074 |
|
|
break;
|
| 1075 |
|
|
/* Fall through. */
|
| 1076 |
|
|
case HIGH:
|
| 1077 |
|
|
case PRE_INC:
|
| 1078 |
|
|
case PRE_DEC:
|
| 1079 |
|
|
case POST_INC:
|
| 1080 |
|
|
case POST_DEC:
|
| 1081 |
|
|
case PRE_MODIFY:
|
| 1082 |
|
|
case POST_MODIFY:
|
| 1083 |
|
|
return find_base_value (XEXP (src, 0));
|
| 1084 |
|
|
|
| 1085 |
|
|
case ZERO_EXTEND:
|
| 1086 |
|
|
case SIGN_EXTEND: /* used for NT/Alpha pointers */
|
| 1087 |
|
|
/* As we do not know which address space the pointer is refering to, we can
|
| 1088 |
|
|
handle this only if the target does not support different pointer or
|
| 1089 |
|
|
address modes depending on the address space. */
|
| 1090 |
|
|
if (!target_default_pointer_address_modes_p ())
|
| 1091 |
|
|
break;
|
| 1092 |
|
|
|
| 1093 |
|
|
{
|
| 1094 |
|
|
rtx temp = find_base_value (XEXP (src, 0));
|
| 1095 |
|
|
|
| 1096 |
|
|
if (temp != 0 && CONSTANT_P (temp))
|
| 1097 |
|
|
temp = convert_memory_address (Pmode, temp);
|
| 1098 |
|
|
|
| 1099 |
|
|
return temp;
|
| 1100 |
|
|
}
|
| 1101 |
|
|
|
| 1102 |
|
|
default:
|
| 1103 |
|
|
break;
|
| 1104 |
|
|
}
|
| 1105 |
|
|
|
| 1106 |
|
|
return 0;
|
| 1107 |
|
|
}
|
| 1108 |
|
|
|
| 1109 |
|
|
/* Called from init_alias_analysis indirectly through note_stores. */
|
| 1110 |
|
|
|
| 1111 |
|
|
/* While scanning insns to find base values, reg_seen[N] is nonzero if
|
| 1112 |
|
|
register N has been set in this function. */
|
| 1113 |
|
|
static char *reg_seen;
|
| 1114 |
|
|
|
| 1115 |
|
|
/* Addresses which are known not to alias anything else are identified
|
| 1116 |
|
|
by a unique integer. */
|
| 1117 |
|
|
static int unique_id;
|
| 1118 |
|
|
|
| 1119 |
|
|
static void
|
| 1120 |
|
|
record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
|
| 1121 |
|
|
{
|
| 1122 |
|
|
unsigned regno;
|
| 1123 |
|
|
rtx src;
|
| 1124 |
|
|
int n;
|
| 1125 |
|
|
|
| 1126 |
|
|
if (!REG_P (dest))
|
| 1127 |
|
|
return;
|
| 1128 |
|
|
|
| 1129 |
|
|
regno = REGNO (dest);
|
| 1130 |
|
|
|
| 1131 |
|
|
gcc_assert (regno < VEC_length (rtx, reg_base_value));
|
| 1132 |
|
|
|
| 1133 |
|
|
/* If this spans multiple hard registers, then we must indicate that every
|
| 1134 |
|
|
register has an unusable value. */
|
| 1135 |
|
|
if (regno < FIRST_PSEUDO_REGISTER)
|
| 1136 |
|
|
n = hard_regno_nregs[regno][GET_MODE (dest)];
|
| 1137 |
|
|
else
|
| 1138 |
|
|
n = 1;
|
| 1139 |
|
|
if (n != 1)
|
| 1140 |
|
|
{
|
| 1141 |
|
|
while (--n >= 0)
|
| 1142 |
|
|
{
|
| 1143 |
|
|
reg_seen[regno + n] = 1;
|
| 1144 |
|
|
new_reg_base_value[regno + n] = 0;
|
| 1145 |
|
|
}
|
| 1146 |
|
|
return;
|
| 1147 |
|
|
}
|
| 1148 |
|
|
|
| 1149 |
|
|
if (set)
|
| 1150 |
|
|
{
|
| 1151 |
|
|
/* A CLOBBER wipes out any old value but does not prevent a previously
|
| 1152 |
|
|
unset register from acquiring a base address (i.e. reg_seen is not
|
| 1153 |
|
|
set). */
|
| 1154 |
|
|
if (GET_CODE (set) == CLOBBER)
|
| 1155 |
|
|
{
|
| 1156 |
|
|
new_reg_base_value[regno] = 0;
|
| 1157 |
|
|
return;
|
| 1158 |
|
|
}
|
| 1159 |
|
|
src = SET_SRC (set);
|
| 1160 |
|
|
}
|
| 1161 |
|
|
else
|
| 1162 |
|
|
{
|
| 1163 |
|
|
if (reg_seen[regno])
|
| 1164 |
|
|
{
|
| 1165 |
|
|
new_reg_base_value[regno] = 0;
|
| 1166 |
|
|
return;
|
| 1167 |
|
|
}
|
| 1168 |
|
|
reg_seen[regno] = 1;
|
| 1169 |
|
|
new_reg_base_value[regno] = gen_rtx_ADDRESS (Pmode,
|
| 1170 |
|
|
GEN_INT (unique_id++));
|
| 1171 |
|
|
return;
|
| 1172 |
|
|
}
|
| 1173 |
|
|
|
| 1174 |
|
|
/* If this is not the first set of REGNO, see whether the new value
|
| 1175 |
|
|
is related to the old one. There are two cases of interest:
|
| 1176 |
|
|
|
| 1177 |
|
|
(1) The register might be assigned an entirely new value
|
| 1178 |
|
|
that has the same base term as the original set.
|
| 1179 |
|
|
|
| 1180 |
|
|
(2) The set might be a simple self-modification that
|
| 1181 |
|
|
cannot change REGNO's base value.
|
| 1182 |
|
|
|
| 1183 |
|
|
If neither case holds, reject the original base value as invalid.
|
| 1184 |
|
|
Note that the following situation is not detected:
|
| 1185 |
|
|
|
| 1186 |
|
|
extern int x, y; int *p = &x; p += (&y-&x);
|
| 1187 |
|
|
|
| 1188 |
|
|
ANSI C does not allow computing the difference of addresses
|
| 1189 |
|
|
of distinct top level objects. */
|
| 1190 |
|
|
if (new_reg_base_value[regno] != 0
|
| 1191 |
|
|
&& find_base_value (src) != new_reg_base_value[regno])
|
| 1192 |
|
|
switch (GET_CODE (src))
|
| 1193 |
|
|
{
|
| 1194 |
|
|
case LO_SUM:
|
| 1195 |
|
|
case MINUS:
|
| 1196 |
|
|
if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
|
| 1197 |
|
|
new_reg_base_value[regno] = 0;
|
| 1198 |
|
|
break;
|
| 1199 |
|
|
case PLUS:
|
| 1200 |
|
|
/* If the value we add in the PLUS is also a valid base value,
|
| 1201 |
|
|
this might be the actual base value, and the original value
|
| 1202 |
|
|
an index. */
|
| 1203 |
|
|
{
|
| 1204 |
|
|
rtx other = NULL_RTX;
|
| 1205 |
|
|
|
| 1206 |
|
|
if (XEXP (src, 0) == dest)
|
| 1207 |
|
|
other = XEXP (src, 1);
|
| 1208 |
|
|
else if (XEXP (src, 1) == dest)
|
| 1209 |
|
|
other = XEXP (src, 0);
|
| 1210 |
|
|
|
| 1211 |
|
|
if (! other || find_base_value (other))
|
| 1212 |
|
|
new_reg_base_value[regno] = 0;
|
| 1213 |
|
|
break;
|
| 1214 |
|
|
}
|
| 1215 |
|
|
case AND:
|
| 1216 |
|
|
if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
|
| 1217 |
|
|
new_reg_base_value[regno] = 0;
|
| 1218 |
|
|
break;
|
| 1219 |
|
|
default:
|
| 1220 |
|
|
new_reg_base_value[regno] = 0;
|
| 1221 |
|
|
break;
|
| 1222 |
|
|
}
|
| 1223 |
|
|
/* If this is the first set of a register, record the value. */
|
| 1224 |
|
|
else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
|
| 1225 |
|
|
&& ! reg_seen[regno] && new_reg_base_value[regno] == 0)
|
| 1226 |
|
|
new_reg_base_value[regno] = find_base_value (src);
|
| 1227 |
|
|
|
| 1228 |
|
|
reg_seen[regno] = 1;
|
| 1229 |
|
|
}
|
| 1230 |
|
|
|
| 1231 |
|
|
/* If a value is known for REGNO, return it. */
|
| 1232 |
|
|
|
| 1233 |
|
|
rtx
|
| 1234 |
|
|
get_reg_known_value (unsigned int regno)
|
| 1235 |
|
|
{
|
| 1236 |
|
|
if (regno >= FIRST_PSEUDO_REGISTER)
|
| 1237 |
|
|
{
|
| 1238 |
|
|
regno -= FIRST_PSEUDO_REGISTER;
|
| 1239 |
|
|
if (regno < reg_known_value_size)
|
| 1240 |
|
|
return reg_known_value[regno];
|
| 1241 |
|
|
}
|
| 1242 |
|
|
return NULL;
|
| 1243 |
|
|
}
|
| 1244 |
|
|
|
| 1245 |
|
|
/* Set it. */
|
| 1246 |
|
|
|
| 1247 |
|
|
static void
|
| 1248 |
|
|
set_reg_known_value (unsigned int regno, rtx val)
|
| 1249 |
|
|
{
|
| 1250 |
|
|
if (regno >= FIRST_PSEUDO_REGISTER)
|
| 1251 |
|
|
{
|
| 1252 |
|
|
regno -= FIRST_PSEUDO_REGISTER;
|
| 1253 |
|
|
if (regno < reg_known_value_size)
|
| 1254 |
|
|
reg_known_value[regno] = val;
|
| 1255 |
|
|
}
|
| 1256 |
|
|
}
|
| 1257 |
|
|
|
| 1258 |
|
|
/* Similarly for reg_known_equiv_p. */
|
| 1259 |
|
|
|
| 1260 |
|
|
bool
|
| 1261 |
|
|
get_reg_known_equiv_p (unsigned int regno)
|
| 1262 |
|
|
{
|
| 1263 |
|
|
if (regno >= FIRST_PSEUDO_REGISTER)
|
| 1264 |
|
|
{
|
| 1265 |
|
|
regno -= FIRST_PSEUDO_REGISTER;
|
| 1266 |
|
|
if (regno < reg_known_value_size)
|
| 1267 |
|
|
return reg_known_equiv_p[regno];
|
| 1268 |
|
|
}
|
| 1269 |
|
|
return false;
|
| 1270 |
|
|
}
|
| 1271 |
|
|
|
| 1272 |
|
|
static void
|
| 1273 |
|
|
set_reg_known_equiv_p (unsigned int regno, bool val)
|
| 1274 |
|
|
{
|
| 1275 |
|
|
if (regno >= FIRST_PSEUDO_REGISTER)
|
| 1276 |
|
|
{
|
| 1277 |
|
|
regno -= FIRST_PSEUDO_REGISTER;
|
| 1278 |
|
|
if (regno < reg_known_value_size)
|
| 1279 |
|
|
reg_known_equiv_p[regno] = val;
|
| 1280 |
|
|
}
|
| 1281 |
|
|
}
|
| 1282 |
|
|
|
| 1283 |
|
|
|
| 1284 |
|
|
/* Returns a canonical version of X, from the point of view alias
|
| 1285 |
|
|
analysis. (For example, if X is a MEM whose address is a register,
|
| 1286 |
|
|
and the register has a known value (say a SYMBOL_REF), then a MEM
|
| 1287 |
|
|
whose address is the SYMBOL_REF is returned.) */
|
| 1288 |
|
|
|
| 1289 |
|
|
rtx
|
| 1290 |
|
|
canon_rtx (rtx x)
|
| 1291 |
|
|
{
|
| 1292 |
|
|
/* Recursively look for equivalences. */
|
| 1293 |
|
|
if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
|
| 1294 |
|
|
{
|
| 1295 |
|
|
rtx t = get_reg_known_value (REGNO (x));
|
| 1296 |
|
|
if (t == x)
|
| 1297 |
|
|
return x;
|
| 1298 |
|
|
if (t)
|
| 1299 |
|
|
return canon_rtx (t);
|
| 1300 |
|
|
}
|
| 1301 |
|
|
|
| 1302 |
|
|
if (GET_CODE (x) == PLUS)
|
| 1303 |
|
|
{
|
| 1304 |
|
|
rtx x0 = canon_rtx (XEXP (x, 0));
|
| 1305 |
|
|
rtx x1 = canon_rtx (XEXP (x, 1));
|
| 1306 |
|
|
|
| 1307 |
|
|
if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
|
| 1308 |
|
|
{
|
| 1309 |
|
|
if (CONST_INT_P (x0))
|
| 1310 |
|
|
return plus_constant (x1, INTVAL (x0));
|
| 1311 |
|
|
else if (CONST_INT_P (x1))
|
| 1312 |
|
|
return plus_constant (x0, INTVAL (x1));
|
| 1313 |
|
|
return gen_rtx_PLUS (GET_MODE (x), x0, x1);
|
| 1314 |
|
|
}
|
| 1315 |
|
|
}
|
| 1316 |
|
|
|
| 1317 |
|
|
/* This gives us much better alias analysis when called from
|
| 1318 |
|
|
the loop optimizer. Note we want to leave the original
|
| 1319 |
|
|
MEM alone, but need to return the canonicalized MEM with
|
| 1320 |
|
|
all the flags with their original values. */
|
| 1321 |
|
|
else if (MEM_P (x))
|
| 1322 |
|
|
x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
|
| 1323 |
|
|
|
| 1324 |
|
|
return x;
|
| 1325 |
|
|
}
|
| 1326 |
|
|
|
| 1327 |
|
|
/* Return 1 if X and Y are identical-looking rtx's.
|
| 1328 |
|
|
Expect that X and Y has been already canonicalized.
|
| 1329 |
|
|
|
| 1330 |
|
|
We use the data in reg_known_value above to see if two registers with
|
| 1331 |
|
|
different numbers are, in fact, equivalent. */
|
| 1332 |
|
|
|
| 1333 |
|
|
static int
|
| 1334 |
|
|
rtx_equal_for_memref_p (const_rtx x, const_rtx y)
|
| 1335 |
|
|
{
|
| 1336 |
|
|
int i;
|
| 1337 |
|
|
int j;
|
| 1338 |
|
|
enum rtx_code code;
|
| 1339 |
|
|
const char *fmt;
|
| 1340 |
|
|
|
| 1341 |
|
|
if (x == 0 && y == 0)
|
| 1342 |
|
|
return 1;
|
| 1343 |
|
|
if (x == 0 || y == 0)
|
| 1344 |
|
|
return 0;
|
| 1345 |
|
|
|
| 1346 |
|
|
if (x == y)
|
| 1347 |
|
|
return 1;
|
| 1348 |
|
|
|
| 1349 |
|
|
code = GET_CODE (x);
|
| 1350 |
|
|
/* Rtx's of different codes cannot be equal. */
|
| 1351 |
|
|
if (code != GET_CODE (y))
|
| 1352 |
|
|
return 0;
|
| 1353 |
|
|
|
| 1354 |
|
|
/* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
|
| 1355 |
|
|
(REG:SI x) and (REG:HI x) are NOT equivalent. */
|
| 1356 |
|
|
|
| 1357 |
|
|
if (GET_MODE (x) != GET_MODE (y))
|
| 1358 |
|
|
return 0;
|
| 1359 |
|
|
|
| 1360 |
|
|
/* Some RTL can be compared without a recursive examination. */
|
| 1361 |
|
|
switch (code)
|
| 1362 |
|
|
{
|
| 1363 |
|
|
case REG:
|
| 1364 |
|
|
return REGNO (x) == REGNO (y);
|
| 1365 |
|
|
|
| 1366 |
|
|
case LABEL_REF:
|
| 1367 |
|
|
return XEXP (x, 0) == XEXP (y, 0);
|
| 1368 |
|
|
|
| 1369 |
|
|
case SYMBOL_REF:
|
| 1370 |
|
|
return XSTR (x, 0) == XSTR (y, 0);
|
| 1371 |
|
|
|
| 1372 |
|
|
case VALUE:
|
| 1373 |
|
|
case CONST_INT:
|
| 1374 |
|
|
case CONST_DOUBLE:
|
| 1375 |
|
|
case CONST_FIXED:
|
| 1376 |
|
|
/* There's no need to compare the contents of CONST_DOUBLEs or
|
| 1377 |
|
|
CONST_INTs because pointer equality is a good enough
|
| 1378 |
|
|
comparison for these nodes. */
|
| 1379 |
|
|
return 0;
|
| 1380 |
|
|
|
| 1381 |
|
|
default:
|
| 1382 |
|
|
break;
|
| 1383 |
|
|
}
|
| 1384 |
|
|
|
| 1385 |
|
|
/* canon_rtx knows how to handle plus. No need to canonicalize. */
|
| 1386 |
|
|
if (code == PLUS)
|
| 1387 |
|
|
return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
|
| 1388 |
|
|
&& rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
|
| 1389 |
|
|
|| (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
|
| 1390 |
|
|
&& rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
|
| 1391 |
|
|
/* For commutative operations, the RTX match if the operand match in any
|
| 1392 |
|
|
order. Also handle the simple binary and unary cases without a loop. */
|
| 1393 |
|
|
if (COMMUTATIVE_P (x))
|
| 1394 |
|
|
{
|
| 1395 |
|
|
rtx xop0 = canon_rtx (XEXP (x, 0));
|
| 1396 |
|
|
rtx yop0 = canon_rtx (XEXP (y, 0));
|
| 1397 |
|
|
rtx yop1 = canon_rtx (XEXP (y, 1));
|
| 1398 |
|
|
|
| 1399 |
|
|
return ((rtx_equal_for_memref_p (xop0, yop0)
|
| 1400 |
|
|
&& rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
|
| 1401 |
|
|
|| (rtx_equal_for_memref_p (xop0, yop1)
|
| 1402 |
|
|
&& rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
|
| 1403 |
|
|
}
|
| 1404 |
|
|
else if (NON_COMMUTATIVE_P (x))
|
| 1405 |
|
|
{
|
| 1406 |
|
|
return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
|
| 1407 |
|
|
canon_rtx (XEXP (y, 0)))
|
| 1408 |
|
|
&& rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
|
| 1409 |
|
|
canon_rtx (XEXP (y, 1))));
|
| 1410 |
|
|
}
|
| 1411 |
|
|
else if (UNARY_P (x))
|
| 1412 |
|
|
return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
|
| 1413 |
|
|
canon_rtx (XEXP (y, 0)));
|
| 1414 |
|
|
|
| 1415 |
|
|
/* Compare the elements. If any pair of corresponding elements
|
| 1416 |
|
|
fail to match, return 0 for the whole things.
|
| 1417 |
|
|
|
| 1418 |
|
|
Limit cases to types which actually appear in addresses. */
|
| 1419 |
|
|
|
| 1420 |
|
|
fmt = GET_RTX_FORMAT (code);
|
| 1421 |
|
|
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
| 1422 |
|
|
{
|
| 1423 |
|
|
switch (fmt[i])
|
| 1424 |
|
|
{
|
| 1425 |
|
|
case 'i':
|
| 1426 |
|
|
if (XINT (x, i) != XINT (y, i))
|
| 1427 |
|
|
return 0;
|
| 1428 |
|
|
break;
|
| 1429 |
|
|
|
| 1430 |
|
|
case 'E':
|
| 1431 |
|
|
/* Two vectors must have the same length. */
|
| 1432 |
|
|
if (XVECLEN (x, i) != XVECLEN (y, i))
|
| 1433 |
|
|
return 0;
|
| 1434 |
|
|
|
| 1435 |
|
|
/* And the corresponding elements must match. */
|
| 1436 |
|
|
for (j = 0; j < XVECLEN (x, i); j++)
|
| 1437 |
|
|
if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
|
| 1438 |
|
|
canon_rtx (XVECEXP (y, i, j))) == 0)
|
| 1439 |
|
|
return 0;
|
| 1440 |
|
|
break;
|
| 1441 |
|
|
|
| 1442 |
|
|
case 'e':
|
| 1443 |
|
|
if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
|
| 1444 |
|
|
canon_rtx (XEXP (y, i))) == 0)
|
| 1445 |
|
|
return 0;
|
| 1446 |
|
|
break;
|
| 1447 |
|
|
|
| 1448 |
|
|
/* This can happen for asm operands. */
|
| 1449 |
|
|
case 's':
|
| 1450 |
|
|
if (strcmp (XSTR (x, i), XSTR (y, i)))
|
| 1451 |
|
|
return 0;
|
| 1452 |
|
|
break;
|
| 1453 |
|
|
|
| 1454 |
|
|
/* This can happen for an asm which clobbers memory. */
|
| 1455 |
|
|
case '0':
|
| 1456 |
|
|
break;
|
| 1457 |
|
|
|
| 1458 |
|
|
/* It is believed that rtx's at this level will never
|
| 1459 |
|
|
contain anything but integers and other rtx's,
|
| 1460 |
|
|
except for within LABEL_REFs and SYMBOL_REFs. */
|
| 1461 |
|
|
default:
|
| 1462 |
|
|
gcc_unreachable ();
|
| 1463 |
|
|
}
|
| 1464 |
|
|
}
|
| 1465 |
|
|
return 1;
|
| 1466 |
|
|
}
|
| 1467 |
|
|
|
| 1468 |
|
|
rtx
|
| 1469 |
|
|
find_base_term (rtx x)
|
| 1470 |
|
|
{
|
| 1471 |
|
|
cselib_val *val;
|
| 1472 |
|
|
struct elt_loc_list *l;
|
| 1473 |
|
|
|
| 1474 |
|
|
#if defined (FIND_BASE_TERM)
|
| 1475 |
|
|
/* Try machine-dependent ways to find the base term. */
|
| 1476 |
|
|
x = FIND_BASE_TERM (x);
|
| 1477 |
|
|
#endif
|
| 1478 |
|
|
|
| 1479 |
|
|
switch (GET_CODE (x))
|
| 1480 |
|
|
{
|
| 1481 |
|
|
case REG:
|
| 1482 |
|
|
return REG_BASE_VALUE (x);
|
| 1483 |
|
|
|
| 1484 |
|
|
case TRUNCATE:
|
| 1485 |
|
|
/* As we do not know which address space the pointer is refering to, we can
|
| 1486 |
|
|
handle this only if the target does not support different pointer or
|
| 1487 |
|
|
address modes depending on the address space. */
|
| 1488 |
|
|
if (!target_default_pointer_address_modes_p ())
|
| 1489 |
|
|
return 0;
|
| 1490 |
|
|
if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
|
| 1491 |
|
|
return 0;
|
| 1492 |
|
|
/* Fall through. */
|
| 1493 |
|
|
case HIGH:
|
| 1494 |
|
|
case PRE_INC:
|
| 1495 |
|
|
case PRE_DEC:
|
| 1496 |
|
|
case POST_INC:
|
| 1497 |
|
|
case POST_DEC:
|
| 1498 |
|
|
case PRE_MODIFY:
|
| 1499 |
|
|
case POST_MODIFY:
|
| 1500 |
|
|
return find_base_term (XEXP (x, 0));
|
| 1501 |
|
|
|
| 1502 |
|
|
case ZERO_EXTEND:
|
| 1503 |
|
|
case SIGN_EXTEND: /* Used for Alpha/NT pointers */
|
| 1504 |
|
|
/* As we do not know which address space the pointer is refering to, we can
|
| 1505 |
|
|
handle this only if the target does not support different pointer or
|
| 1506 |
|
|
address modes depending on the address space. */
|
| 1507 |
|
|
if (!target_default_pointer_address_modes_p ())
|
| 1508 |
|
|
return 0;
|
| 1509 |
|
|
|
| 1510 |
|
|
{
|
| 1511 |
|
|
rtx temp = find_base_term (XEXP (x, 0));
|
| 1512 |
|
|
|
| 1513 |
|
|
if (temp != 0 && CONSTANT_P (temp))
|
| 1514 |
|
|
temp = convert_memory_address (Pmode, temp);
|
| 1515 |
|
|
|
| 1516 |
|
|
return temp;
|
| 1517 |
|
|
}
|
| 1518 |
|
|
|
| 1519 |
|
|
case VALUE:
|
| 1520 |
|
|
val = CSELIB_VAL_PTR (x);
|
| 1521 |
|
|
if (!val)
|
| 1522 |
|
|
return 0;
|
| 1523 |
|
|
for (l = val->locs; l; l = l->next)
|
| 1524 |
|
|
if ((x = find_base_term (l->loc)) != 0)
|
| 1525 |
|
|
return x;
|
| 1526 |
|
|
return 0;
|
| 1527 |
|
|
|
| 1528 |
|
|
case LO_SUM:
|
| 1529 |
|
|
/* The standard form is (lo_sum reg sym) so look only at the
|
| 1530 |
|
|
second operand. */
|
| 1531 |
|
|
return find_base_term (XEXP (x, 1));
|
| 1532 |
|
|
|
| 1533 |
|
|
case CONST:
|
| 1534 |
|
|
x = XEXP (x, 0);
|
| 1535 |
|
|
if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
|
| 1536 |
|
|
return 0;
|
| 1537 |
|
|
/* Fall through. */
|
| 1538 |
|
|
case PLUS:
|
| 1539 |
|
|
case MINUS:
|
| 1540 |
|
|
{
|
| 1541 |
|
|
rtx tmp1 = XEXP (x, 0);
|
| 1542 |
|
|
rtx tmp2 = XEXP (x, 1);
|
| 1543 |
|
|
|
| 1544 |
|
|
/* This is a little bit tricky since we have to determine which of
|
| 1545 |
|
|
the two operands represents the real base address. Otherwise this
|
| 1546 |
|
|
routine may return the index register instead of the base register.
|
| 1547 |
|
|
|
| 1548 |
|
|
That may cause us to believe no aliasing was possible, when in
|
| 1549 |
|
|
fact aliasing is possible.
|
| 1550 |
|
|
|
| 1551 |
|
|
We use a few simple tests to guess the base register. Additional
|
| 1552 |
|
|
tests can certainly be added. For example, if one of the operands
|
| 1553 |
|
|
is a shift or multiply, then it must be the index register and the
|
| 1554 |
|
|
other operand is the base register. */
|
| 1555 |
|
|
|
| 1556 |
|
|
if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
|
| 1557 |
|
|
return find_base_term (tmp2);
|
| 1558 |
|
|
|
| 1559 |
|
|
/* If either operand is known to be a pointer, then use it
|
| 1560 |
|
|
to determine the base term. */
|
| 1561 |
|
|
if (REG_P (tmp1) && REG_POINTER (tmp1))
|
| 1562 |
|
|
{
|
| 1563 |
|
|
rtx base = find_base_term (tmp1);
|
| 1564 |
|
|
if (base)
|
| 1565 |
|
|
return base;
|
| 1566 |
|
|
}
|
| 1567 |
|
|
|
| 1568 |
|
|
if (REG_P (tmp2) && REG_POINTER (tmp2))
|
| 1569 |
|
|
{
|
| 1570 |
|
|
rtx base = find_base_term (tmp2);
|
| 1571 |
|
|
if (base)
|
| 1572 |
|
|
return base;
|
| 1573 |
|
|
}
|
| 1574 |
|
|
|
| 1575 |
|
|
/* Neither operand was known to be a pointer. Go ahead and find the
|
| 1576 |
|
|
base term for both operands. */
|
| 1577 |
|
|
tmp1 = find_base_term (tmp1);
|
| 1578 |
|
|
tmp2 = find_base_term (tmp2);
|
| 1579 |
|
|
|
| 1580 |
|
|
/* If either base term is named object or a special address
|
| 1581 |
|
|
(like an argument or stack reference), then use it for the
|
| 1582 |
|
|
base term. */
|
| 1583 |
|
|
if (tmp1 != 0
|
| 1584 |
|
|
&& (GET_CODE (tmp1) == SYMBOL_REF
|
| 1585 |
|
|
|| GET_CODE (tmp1) == LABEL_REF
|
| 1586 |
|
|
|| (GET_CODE (tmp1) == ADDRESS
|
| 1587 |
|
|
&& GET_MODE (tmp1) != VOIDmode)))
|
| 1588 |
|
|
return tmp1;
|
| 1589 |
|
|
|
| 1590 |
|
|
if (tmp2 != 0
|
| 1591 |
|
|
&& (GET_CODE (tmp2) == SYMBOL_REF
|
| 1592 |
|
|
|| GET_CODE (tmp2) == LABEL_REF
|
| 1593 |
|
|
|| (GET_CODE (tmp2) == ADDRESS
|
| 1594 |
|
|
&& GET_MODE (tmp2) != VOIDmode)))
|
| 1595 |
|
|
return tmp2;
|
| 1596 |
|
|
|
| 1597 |
|
|
/* We could not determine which of the two operands was the
|
| 1598 |
|
|
base register and which was the index. So we can determine
|
| 1599 |
|
|
nothing from the base alias check. */
|
| 1600 |
|
|
return 0;
|
| 1601 |
|
|
}
|
| 1602 |
|
|
|
| 1603 |
|
|
case AND:
|
| 1604 |
|
|
if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
|
| 1605 |
|
|
return find_base_term (XEXP (x, 0));
|
| 1606 |
|
|
return 0;
|
| 1607 |
|
|
|
| 1608 |
|
|
case SYMBOL_REF:
|
| 1609 |
|
|
case LABEL_REF:
|
| 1610 |
|
|
return x;
|
| 1611 |
|
|
|
| 1612 |
|
|
default:
|
| 1613 |
|
|
return 0;
|
| 1614 |
|
|
}
|
| 1615 |
|
|
}
|
| 1616 |
|
|
|
| 1617 |
|
|
/* Return 0 if the addresses X and Y are known to point to different
|
| 1618 |
|
|
objects, 1 if they might be pointers to the same object. */
|
| 1619 |
|
|
|
| 1620 |
|
|
static int
|
| 1621 |
|
|
base_alias_check (rtx x, rtx y, enum machine_mode x_mode,
|
| 1622 |
|
|
enum machine_mode y_mode)
|
| 1623 |
|
|
{
|
| 1624 |
|
|
rtx x_base = find_base_term (x);
|
| 1625 |
|
|
rtx y_base = find_base_term (y);
|
| 1626 |
|
|
|
| 1627 |
|
|
/* If the address itself has no known base see if a known equivalent
|
| 1628 |
|
|
value has one. If either address still has no known base, nothing
|
| 1629 |
|
|
is known about aliasing. */
|
| 1630 |
|
|
if (x_base == 0)
|
| 1631 |
|
|
{
|
| 1632 |
|
|
rtx x_c;
|
| 1633 |
|
|
|
| 1634 |
|
|
if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
|
| 1635 |
|
|
return 1;
|
| 1636 |
|
|
|
| 1637 |
|
|
x_base = find_base_term (x_c);
|
| 1638 |
|
|
if (x_base == 0)
|
| 1639 |
|
|
return 1;
|
| 1640 |
|
|
}
|
| 1641 |
|
|
|
| 1642 |
|
|
if (y_base == 0)
|
| 1643 |
|
|
{
|
| 1644 |
|
|
rtx y_c;
|
| 1645 |
|
|
if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
|
| 1646 |
|
|
return 1;
|
| 1647 |
|
|
|
| 1648 |
|
|
y_base = find_base_term (y_c);
|
| 1649 |
|
|
if (y_base == 0)
|
| 1650 |
|
|
return 1;
|
| 1651 |
|
|
}
|
| 1652 |
|
|
|
| 1653 |
|
|
/* If the base addresses are equal nothing is known about aliasing. */
|
| 1654 |
|
|
if (rtx_equal_p (x_base, y_base))
|
| 1655 |
|
|
return 1;
|
| 1656 |
|
|
|
| 1657 |
|
|
/* The base addresses are different expressions. If they are not accessed
|
| 1658 |
|
|
via AND, there is no conflict. We can bring knowledge of object
|
| 1659 |
|
|
alignment into play here. For example, on alpha, "char a, b;" can
|
| 1660 |
|
|
alias one another, though "char a; long b;" cannot. AND addesses may
|
| 1661 |
|
|
implicitly alias surrounding objects; i.e. unaligned access in DImode
|
| 1662 |
|
|
via AND address can alias all surrounding object types except those
|
| 1663 |
|
|
with aligment 8 or higher. */
|
| 1664 |
|
|
if (GET_CODE (x) == AND && GET_CODE (y) == AND)
|
| 1665 |
|
|
return 1;
|
| 1666 |
|
|
if (GET_CODE (x) == AND
|
| 1667 |
|
|
&& (!CONST_INT_P (XEXP (x, 1))
|
| 1668 |
|
|
|| (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
|
| 1669 |
|
|
return 1;
|
| 1670 |
|
|
if (GET_CODE (y) == AND
|
| 1671 |
|
|
&& (!CONST_INT_P (XEXP (y, 1))
|
| 1672 |
|
|
|| (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
|
| 1673 |
|
|
return 1;
|
| 1674 |
|
|
|
| 1675 |
|
|
/* Differing symbols not accessed via AND never alias. */
|
| 1676 |
|
|
if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
|
| 1677 |
|
|
return 0;
|
| 1678 |
|
|
|
| 1679 |
|
|
/* If one address is a stack reference there can be no alias:
|
| 1680 |
|
|
stack references using different base registers do not alias,
|
| 1681 |
|
|
a stack reference can not alias a parameter, and a stack reference
|
| 1682 |
|
|
can not alias a global. */
|
| 1683 |
|
|
if ((GET_CODE (x_base) == ADDRESS && GET_MODE (x_base) == Pmode)
|
| 1684 |
|
|
|| (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode))
|
| 1685 |
|
|
return 0;
|
| 1686 |
|
|
|
| 1687 |
|
|
if (! flag_argument_noalias)
|
| 1688 |
|
|
return 1;
|
| 1689 |
|
|
|
| 1690 |
|
|
if (flag_argument_noalias > 1)
|
| 1691 |
|
|
return 0;
|
| 1692 |
|
|
|
| 1693 |
|
|
/* Weak noalias assertion (arguments are distinct, but may match globals). */
|
| 1694 |
|
|
return ! (GET_MODE (x_base) == VOIDmode && GET_MODE (y_base) == VOIDmode);
|
| 1695 |
|
|
}
|
| 1696 |
|
|
|
| 1697 |
|
|
/* Convert the address X into something we can use. This is done by returning
|
| 1698 |
|
|
it unchanged unless it is a value; in the latter case we call cselib to get
|
| 1699 |
|
|
a more useful rtx. */
|
| 1700 |
|
|
|
| 1701 |
|
|
rtx
|
| 1702 |
|
|
get_addr (rtx x)
|
| 1703 |
|
|
{
|
| 1704 |
|
|
cselib_val *v;
|
| 1705 |
|
|
struct elt_loc_list *l;
|
| 1706 |
|
|
|
| 1707 |
|
|
if (GET_CODE (x) != VALUE)
|
| 1708 |
|
|
return x;
|
| 1709 |
|
|
v = CSELIB_VAL_PTR (x);
|
| 1710 |
|
|
if (v)
|
| 1711 |
|
|
{
|
| 1712 |
|
|
for (l = v->locs; l; l = l->next)
|
| 1713 |
|
|
if (CONSTANT_P (l->loc))
|
| 1714 |
|
|
return l->loc;
|
| 1715 |
|
|
for (l = v->locs; l; l = l->next)
|
| 1716 |
|
|
if (!REG_P (l->loc) && !MEM_P (l->loc))
|
| 1717 |
|
|
return l->loc;
|
| 1718 |
|
|
if (v->locs)
|
| 1719 |
|
|
return v->locs->loc;
|
| 1720 |
|
|
}
|
| 1721 |
|
|
return x;
|
| 1722 |
|
|
}
|
| 1723 |
|
|
|
| 1724 |
|
|
/* Return the address of the (N_REFS + 1)th memory reference to ADDR
|
| 1725 |
|
|
where SIZE is the size in bytes of the memory reference. If ADDR
|
| 1726 |
|
|
is not modified by the memory reference then ADDR is returned. */
|
| 1727 |
|
|
|
| 1728 |
|
|
static rtx
|
| 1729 |
|
|
addr_side_effect_eval (rtx addr, int size, int n_refs)
|
| 1730 |
|
|
{
|
| 1731 |
|
|
int offset = 0;
|
| 1732 |
|
|
|
| 1733 |
|
|
switch (GET_CODE (addr))
|
| 1734 |
|
|
{
|
| 1735 |
|
|
case PRE_INC:
|
| 1736 |
|
|
offset = (n_refs + 1) * size;
|
| 1737 |
|
|
break;
|
| 1738 |
|
|
case PRE_DEC:
|
| 1739 |
|
|
offset = -(n_refs + 1) * size;
|
| 1740 |
|
|
break;
|
| 1741 |
|
|
case POST_INC:
|
| 1742 |
|
|
offset = n_refs * size;
|
| 1743 |
|
|
break;
|
| 1744 |
|
|
case POST_DEC:
|
| 1745 |
|
|
offset = -n_refs * size;
|
| 1746 |
|
|
break;
|
| 1747 |
|
|
|
| 1748 |
|
|
default:
|
| 1749 |
|
|
return addr;
|
| 1750 |
|
|
}
|
| 1751 |
|
|
|
| 1752 |
|
|
if (offset)
|
| 1753 |
|
|
addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
|
| 1754 |
|
|
GEN_INT (offset));
|
| 1755 |
|
|
else
|
| 1756 |
|
|
addr = XEXP (addr, 0);
|
| 1757 |
|
|
addr = canon_rtx (addr);
|
| 1758 |
|
|
|
| 1759 |
|
|
return addr;
|
| 1760 |
|
|
}
|
| 1761 |
|
|
|
| 1762 |
|
|
/* Return one if X and Y (memory addresses) reference the
|
| 1763 |
|
|
same location in memory or if the references overlap.
|
| 1764 |
|
|
Return zero if they do not overlap, else return
|
| 1765 |
|
|
minus one in which case they still might reference the same location.
|
| 1766 |
|
|
|
| 1767 |
|
|
C is an offset accumulator. When
|
| 1768 |
|
|
C is nonzero, we are testing aliases between X and Y + C.
|
| 1769 |
|
|
XSIZE is the size in bytes of the X reference,
|
| 1770 |
|
|
similarly YSIZE is the size in bytes for Y.
|
| 1771 |
|
|
Expect that canon_rtx has been already called for X and Y.
|
| 1772 |
|
|
|
| 1773 |
|
|
If XSIZE or YSIZE is zero, we do not know the amount of memory being
|
| 1774 |
|
|
referenced (the reference was BLKmode), so make the most pessimistic
|
| 1775 |
|
|
assumptions.
|
| 1776 |
|
|
|
| 1777 |
|
|
If XSIZE or YSIZE is negative, we may access memory outside the object
|
| 1778 |
|
|
being referenced as a side effect. This can happen when using AND to
|
| 1779 |
|
|
align memory references, as is done on the Alpha.
|
| 1780 |
|
|
|
| 1781 |
|
|
Nice to notice that varying addresses cannot conflict with fp if no
|
| 1782 |
|
|
local variables had their addresses taken, but that's too hard now.
|
| 1783 |
|
|
|
| 1784 |
|
|
??? Contrary to the tree alias oracle this does not return
|
| 1785 |
|
|
one for X + non-constant and Y + non-constant when X and Y are equal.
|
| 1786 |
|
|
If that is fixed the TBAA hack for union type-punning can be removed. */
|
| 1787 |
|
|
|
| 1788 |
|
|
static int
|
| 1789 |
|
|
memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
|
| 1790 |
|
|
{
|
| 1791 |
|
|
if (GET_CODE (x) == VALUE)
|
| 1792 |
|
|
x = get_addr (x);
|
| 1793 |
|
|
if (GET_CODE (y) == VALUE)
|
| 1794 |
|
|
y = get_addr (y);
|
| 1795 |
|
|
if (GET_CODE (x) == HIGH)
|
| 1796 |
|
|
x = XEXP (x, 0);
|
| 1797 |
|
|
else if (GET_CODE (x) == LO_SUM)
|
| 1798 |
|
|
x = XEXP (x, 1);
|
| 1799 |
|
|
else
|
| 1800 |
|
|
x = addr_side_effect_eval (x, xsize, 0);
|
| 1801 |
|
|
if (GET_CODE (y) == HIGH)
|
| 1802 |
|
|
y = XEXP (y, 0);
|
| 1803 |
|
|
else if (GET_CODE (y) == LO_SUM)
|
| 1804 |
|
|
y = XEXP (y, 1);
|
| 1805 |
|
|
else
|
| 1806 |
|
|
y = addr_side_effect_eval (y, ysize, 0);
|
| 1807 |
|
|
|
| 1808 |
|
|
if (rtx_equal_for_memref_p (x, y))
|
| 1809 |
|
|
{
|
| 1810 |
|
|
if (xsize <= 0 || ysize <= 0)
|
| 1811 |
|
|
return 1;
|
| 1812 |
|
|
if (c >= 0 && xsize > c)
|
| 1813 |
|
|
return 1;
|
| 1814 |
|
|
if (c < 0 && ysize+c > 0)
|
| 1815 |
|
|
return 1;
|
| 1816 |
|
|
return 0;
|
| 1817 |
|
|
}
|
| 1818 |
|
|
|
| 1819 |
|
|
/* This code used to check for conflicts involving stack references and
|
| 1820 |
|
|
globals but the base address alias code now handles these cases. */
|
| 1821 |
|
|
|
| 1822 |
|
|
if (GET_CODE (x) == PLUS)
|
| 1823 |
|
|
{
|
| 1824 |
|
|
/* The fact that X is canonicalized means that this
|
| 1825 |
|
|
PLUS rtx is canonicalized. */
|
| 1826 |
|
|
rtx x0 = XEXP (x, 0);
|
| 1827 |
|
|
rtx x1 = XEXP (x, 1);
|
| 1828 |
|
|
|
| 1829 |
|
|
if (GET_CODE (y) == PLUS)
|
| 1830 |
|
|
{
|
| 1831 |
|
|
/* The fact that Y is canonicalized means that this
|
| 1832 |
|
|
PLUS rtx is canonicalized. */
|
| 1833 |
|
|
rtx y0 = XEXP (y, 0);
|
| 1834 |
|
|
rtx y1 = XEXP (y, 1);
|
| 1835 |
|
|
|
| 1836 |
|
|
if (rtx_equal_for_memref_p (x1, y1))
|
| 1837 |
|
|
return memrefs_conflict_p (xsize, x0, ysize, y0, c);
|
| 1838 |
|
|
if (rtx_equal_for_memref_p (x0, y0))
|
| 1839 |
|
|
return memrefs_conflict_p (xsize, x1, ysize, y1, c);
|
| 1840 |
|
|
if (CONST_INT_P (x1))
|
| 1841 |
|
|
{
|
| 1842 |
|
|
if (CONST_INT_P (y1))
|
| 1843 |
|
|
return memrefs_conflict_p (xsize, x0, ysize, y0,
|
| 1844 |
|
|
c - INTVAL (x1) + INTVAL (y1));
|
| 1845 |
|
|
else
|
| 1846 |
|
|
return memrefs_conflict_p (xsize, x0, ysize, y,
|
| 1847 |
|
|
c - INTVAL (x1));
|
| 1848 |
|
|
}
|
| 1849 |
|
|
else if (CONST_INT_P (y1))
|
| 1850 |
|
|
return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
|
| 1851 |
|
|
|
| 1852 |
|
|
return -1;
|
| 1853 |
|
|
}
|
| 1854 |
|
|
else if (CONST_INT_P (x1))
|
| 1855 |
|
|
return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
|
| 1856 |
|
|
}
|
| 1857 |
|
|
else if (GET_CODE (y) == PLUS)
|
| 1858 |
|
|
{
|
| 1859 |
|
|
/* The fact that Y is canonicalized means that this
|
| 1860 |
|
|
PLUS rtx is canonicalized. */
|
| 1861 |
|
|
rtx y0 = XEXP (y, 0);
|
| 1862 |
|
|
rtx y1 = XEXP (y, 1);
|
| 1863 |
|
|
|
| 1864 |
|
|
if (CONST_INT_P (y1))
|
| 1865 |
|
|
return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
|
| 1866 |
|
|
else
|
| 1867 |
|
|
return -1;
|
| 1868 |
|
|
}
|
| 1869 |
|
|
|
| 1870 |
|
|
if (GET_CODE (x) == GET_CODE (y))
|
| 1871 |
|
|
switch (GET_CODE (x))
|
| 1872 |
|
|
{
|
| 1873 |
|
|
case MULT:
|
| 1874 |
|
|
{
|
| 1875 |
|
|
/* Handle cases where we expect the second operands to be the
|
| 1876 |
|
|
same, and check only whether the first operand would conflict
|
| 1877 |
|
|
or not. */
|
| 1878 |
|
|
rtx x0, y0;
|
| 1879 |
|
|
rtx x1 = canon_rtx (XEXP (x, 1));
|
| 1880 |
|
|
rtx y1 = canon_rtx (XEXP (y, 1));
|
| 1881 |
|
|
if (! rtx_equal_for_memref_p (x1, y1))
|
| 1882 |
|
|
return -1;
|
| 1883 |
|
|
x0 = canon_rtx (XEXP (x, 0));
|
| 1884 |
|
|
y0 = canon_rtx (XEXP (y, 0));
|
| 1885 |
|
|
if (rtx_equal_for_memref_p (x0, y0))
|
| 1886 |
|
|
return (xsize == 0 || ysize == 0
|
| 1887 |
|
|
|| (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
|
| 1888 |
|
|
|
| 1889 |
|
|
/* Can't properly adjust our sizes. */
|
| 1890 |
|
|
if (!CONST_INT_P (x1))
|
| 1891 |
|
|
return -1;
|
| 1892 |
|
|
xsize /= INTVAL (x1);
|
| 1893 |
|
|
ysize /= INTVAL (x1);
|
| 1894 |
|
|
c /= INTVAL (x1);
|
| 1895 |
|
|
return memrefs_conflict_p (xsize, x0, ysize, y0, c);
|
| 1896 |
|
|
}
|
| 1897 |
|
|
|
| 1898 |
|
|
default:
|
| 1899 |
|
|
break;
|
| 1900 |
|
|
}
|
| 1901 |
|
|
|
| 1902 |
|
|
/* Treat an access through an AND (e.g. a subword access on an Alpha)
|
| 1903 |
|
|
as an access with indeterminate size. Assume that references
|
| 1904 |
|
|
besides AND are aligned, so if the size of the other reference is
|
| 1905 |
|
|
at least as large as the alignment, assume no other overlap. */
|
| 1906 |
|
|
if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
|
| 1907 |
|
|
{
|
| 1908 |
|
|
if (GET_CODE (y) == AND || ysize < -INTVAL (XEXP (x, 1)))
|
| 1909 |
|
|
xsize = -1;
|
| 1910 |
|
|
return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ysize, y, c);
|
| 1911 |
|
|
}
|
| 1912 |
|
|
if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
|
| 1913 |
|
|
{
|
| 1914 |
|
|
/* ??? If we are indexing far enough into the array/structure, we
|
| 1915 |
|
|
may yet be able to determine that we can not overlap. But we
|
| 1916 |
|
|
also need to that we are far enough from the end not to overlap
|
| 1917 |
|
|
a following reference, so we do nothing with that for now. */
|
| 1918 |
|
|
if (GET_CODE (x) == AND || xsize < -INTVAL (XEXP (y, 1)))
|
| 1919 |
|
|
ysize = -1;
|
| 1920 |
|
|
return memrefs_conflict_p (xsize, x, ysize, canon_rtx (XEXP (y, 0)), c);
|
| 1921 |
|
|
}
|
| 1922 |
|
|
|
| 1923 |
|
|
if (CONSTANT_P (x))
|
| 1924 |
|
|
{
|
| 1925 |
|
|
if (CONST_INT_P (x) && CONST_INT_P (y))
|
| 1926 |
|
|
{
|
| 1927 |
|
|
c += (INTVAL (y) - INTVAL (x));
|
| 1928 |
|
|
return (xsize <= 0 || ysize <= 0
|
| 1929 |
|
|
|| (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
|
| 1930 |
|
|
}
|
| 1931 |
|
|
|
| 1932 |
|
|
if (GET_CODE (x) == CONST)
|
| 1933 |
|
|
{
|
| 1934 |
|
|
if (GET_CODE (y) == CONST)
|
| 1935 |
|
|
return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
|
| 1936 |
|
|
ysize, canon_rtx (XEXP (y, 0)), c);
|
| 1937 |
|
|
else
|
| 1938 |
|
|
return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
|
| 1939 |
|
|
ysize, y, c);
|
| 1940 |
|
|
}
|
| 1941 |
|
|
if (GET_CODE (y) == CONST)
|
| 1942 |
|
|
return memrefs_conflict_p (xsize, x, ysize,
|
| 1943 |
|
|
canon_rtx (XEXP (y, 0)), c);
|
| 1944 |
|
|
|
| 1945 |
|
|
if (CONSTANT_P (y))
|
| 1946 |
|
|
return (xsize <= 0 || ysize <= 0
|
| 1947 |
|
|
|| (rtx_equal_for_memref_p (x, y)
|
| 1948 |
|
|
&& ((c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))));
|
| 1949 |
|
|
|
| 1950 |
|
|
return -1;
|
| 1951 |
|
|
}
|
| 1952 |
|
|
|
| 1953 |
|
|
return -1;
|
| 1954 |
|
|
}
|
| 1955 |
|
|
|
| 1956 |
|
|
/* Functions to compute memory dependencies.
|
| 1957 |
|
|
|
| 1958 |
|
|
Since we process the insns in execution order, we can build tables
|
| 1959 |
|
|
to keep track of what registers are fixed (and not aliased), what registers
|
| 1960 |
|
|
are varying in known ways, and what registers are varying in unknown
|
| 1961 |
|
|
ways.
|
| 1962 |
|
|
|
| 1963 |
|
|
If both memory references are volatile, then there must always be a
|
| 1964 |
|
|
dependence between the two references, since their order can not be
|
| 1965 |
|
|
changed. A volatile and non-volatile reference can be interchanged
|
| 1966 |
|
|
though.
|
| 1967 |
|
|
|
| 1968 |
|
|
A MEM_IN_STRUCT reference at a non-AND varying address can never
|
| 1969 |
|
|
conflict with a non-MEM_IN_STRUCT reference at a fixed address. We
|
| 1970 |
|
|
also must allow AND addresses, because they may generate accesses
|
| 1971 |
|
|
outside the object being referenced. This is used to generate
|
| 1972 |
|
|
aligned addresses from unaligned addresses, for instance, the alpha
|
| 1973 |
|
|
storeqi_unaligned pattern. */
|
| 1974 |
|
|
|
| 1975 |
|
|
/* Read dependence: X is read after read in MEM takes place. There can
|
| 1976 |
|
|
only be a dependence here if both reads are volatile. */
|
| 1977 |
|
|
|
| 1978 |
|
|
int
|
| 1979 |
|
|
read_dependence (const_rtx mem, const_rtx x)
|
| 1980 |
|
|
{
|
| 1981 |
|
|
return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
|
| 1982 |
|
|
}
|
| 1983 |
|
|
|
| 1984 |
|
|
/* Returns MEM1 if and only if MEM1 is a scalar at a fixed address and
|
| 1985 |
|
|
MEM2 is a reference to a structure at a varying address, or returns
|
| 1986 |
|
|
MEM2 if vice versa. Otherwise, returns NULL_RTX. If a non-NULL
|
| 1987 |
|
|
value is returned MEM1 and MEM2 can never alias. VARIES_P is used
|
| 1988 |
|
|
to decide whether or not an address may vary; it should return
|
| 1989 |
|
|
nonzero whenever variation is possible.
|
| 1990 |
|
|
MEM1_ADDR and MEM2_ADDR are the addresses of MEM1 and MEM2. */
|
| 1991 |
|
|
|
| 1992 |
|
|
static const_rtx
|
| 1993 |
|
|
fixed_scalar_and_varying_struct_p (const_rtx mem1, const_rtx mem2, rtx mem1_addr,
|
| 1994 |
|
|
rtx mem2_addr,
|
| 1995 |
|
|
bool (*varies_p) (const_rtx, bool))
|
| 1996 |
|
|
{
|
| 1997 |
|
|
if (! flag_strict_aliasing)
|
| 1998 |
|
|
return NULL_RTX;
|
| 1999 |
|
|
|
| 2000 |
|
|
if (MEM_ALIAS_SET (mem2)
|
| 2001 |
|
|
&& MEM_SCALAR_P (mem1) && MEM_IN_STRUCT_P (mem2)
|
| 2002 |
|
|
&& !varies_p (mem1_addr, 1) && varies_p (mem2_addr, 1))
|
| 2003 |
|
|
/* MEM1 is a scalar at a fixed address; MEM2 is a struct at a
|
| 2004 |
|
|
varying address. */
|
| 2005 |
|
|
return mem1;
|
| 2006 |
|
|
|
| 2007 |
|
|
if (MEM_ALIAS_SET (mem1)
|
| 2008 |
|
|
&& MEM_IN_STRUCT_P (mem1) && MEM_SCALAR_P (mem2)
|
| 2009 |
|
|
&& varies_p (mem1_addr, 1) && !varies_p (mem2_addr, 1))
|
| 2010 |
|
|
/* MEM2 is a scalar at a fixed address; MEM1 is a struct at a
|
| 2011 |
|
|
varying address. */
|
| 2012 |
|
|
return mem2;
|
| 2013 |
|
|
|
| 2014 |
|
|
return NULL_RTX;
|
| 2015 |
|
|
}
|
| 2016 |
|
|
|
| 2017 |
|
|
/* Returns nonzero if something about the mode or address format MEM1
|
| 2018 |
|
|
indicates that it might well alias *anything*. */
|
| 2019 |
|
|
|
| 2020 |
|
|
static int
|
| 2021 |
|
|
aliases_everything_p (const_rtx mem)
|
| 2022 |
|
|
{
|
| 2023 |
|
|
if (GET_CODE (XEXP (mem, 0)) == AND)
|
| 2024 |
|
|
/* If the address is an AND, it's very hard to know at what it is
|
| 2025 |
|
|
actually pointing. */
|
| 2026 |
|
|
return 1;
|
| 2027 |
|
|
|
| 2028 |
|
|
return 0;
|
| 2029 |
|
|
}
|
| 2030 |
|
|
|
| 2031 |
|
|
/* Return true if we can determine that the fields referenced cannot
|
| 2032 |
|
|
overlap for any pair of objects. */
|
| 2033 |
|
|
|
| 2034 |
|
|
static bool
|
| 2035 |
|
|
nonoverlapping_component_refs_p (const_tree x, const_tree y)
|
| 2036 |
|
|
{
|
| 2037 |
|
|
const_tree fieldx, fieldy, typex, typey, orig_y;
|
| 2038 |
|
|
|
| 2039 |
|
|
if (!flag_strict_aliasing)
|
| 2040 |
|
|
return false;
|
| 2041 |
|
|
|
| 2042 |
|
|
do
|
| 2043 |
|
|
{
|
| 2044 |
|
|
/* The comparison has to be done at a common type, since we don't
|
| 2045 |
|
|
know how the inheritance hierarchy works. */
|
| 2046 |
|
|
orig_y = y;
|
| 2047 |
|
|
do
|
| 2048 |
|
|
{
|
| 2049 |
|
|
fieldx = TREE_OPERAND (x, 1);
|
| 2050 |
|
|
typex = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldx));
|
| 2051 |
|
|
|
| 2052 |
|
|
y = orig_y;
|
| 2053 |
|
|
do
|
| 2054 |
|
|
{
|
| 2055 |
|
|
fieldy = TREE_OPERAND (y, 1);
|
| 2056 |
|
|
typey = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldy));
|
| 2057 |
|
|
|
| 2058 |
|
|
if (typex == typey)
|
| 2059 |
|
|
goto found;
|
| 2060 |
|
|
|
| 2061 |
|
|
y = TREE_OPERAND (y, 0);
|
| 2062 |
|
|
}
|
| 2063 |
|
|
while (y && TREE_CODE (y) == COMPONENT_REF);
|
| 2064 |
|
|
|
| 2065 |
|
|
x = TREE_OPERAND (x, 0);
|
| 2066 |
|
|
}
|
| 2067 |
|
|
while (x && TREE_CODE (x) == COMPONENT_REF);
|
| 2068 |
|
|
/* Never found a common type. */
|
| 2069 |
|
|
return false;
|
| 2070 |
|
|
|
| 2071 |
|
|
found:
|
| 2072 |
|
|
/* If we're left with accessing different fields of a structure,
|
| 2073 |
|
|
then no overlap. */
|
| 2074 |
|
|
if (TREE_CODE (typex) == RECORD_TYPE
|
| 2075 |
|
|
&& fieldx != fieldy)
|
| 2076 |
|
|
return true;
|
| 2077 |
|
|
|
| 2078 |
|
|
/* The comparison on the current field failed. If we're accessing
|
| 2079 |
|
|
a very nested structure, look at the next outer level. */
|
| 2080 |
|
|
x = TREE_OPERAND (x, 0);
|
| 2081 |
|
|
y = TREE_OPERAND (y, 0);
|
| 2082 |
|
|
}
|
| 2083 |
|
|
while (x && y
|
| 2084 |
|
|
&& TREE_CODE (x) == COMPONENT_REF
|
| 2085 |
|
|
&& TREE_CODE (y) == COMPONENT_REF);
|
| 2086 |
|
|
|
| 2087 |
|
|
return false;
|
| 2088 |
|
|
}
|
| 2089 |
|
|
|
| 2090 |
|
|
/* Look at the bottom of the COMPONENT_REF list for a DECL, and return it. */
|
| 2091 |
|
|
|
| 2092 |
|
|
static tree
|
| 2093 |
|
|
decl_for_component_ref (tree x)
|
| 2094 |
|
|
{
|
| 2095 |
|
|
do
|
| 2096 |
|
|
{
|
| 2097 |
|
|
x = TREE_OPERAND (x, 0);
|
| 2098 |
|
|
}
|
| 2099 |
|
|
while (x && TREE_CODE (x) == COMPONENT_REF);
|
| 2100 |
|
|
|
| 2101 |
|
|
return x && DECL_P (x) ? x : NULL_TREE;
|
| 2102 |
|
|
}
|
| 2103 |
|
|
|
| 2104 |
|
|
/* Walk up the COMPONENT_REF list and adjust OFFSET to compensate for the
|
| 2105 |
|
|
offset of the field reference. */
|
| 2106 |
|
|
|
| 2107 |
|
|
static rtx
|
| 2108 |
|
|
adjust_offset_for_component_ref (tree x, rtx offset)
|
| 2109 |
|
|
{
|
| 2110 |
|
|
HOST_WIDE_INT ioffset;
|
| 2111 |
|
|
|
| 2112 |
|
|
if (! offset)
|
| 2113 |
|
|
return NULL_RTX;
|
| 2114 |
|
|
|
| 2115 |
|
|
ioffset = INTVAL (offset);
|
| 2116 |
|
|
do
|
| 2117 |
|
|
{
|
| 2118 |
|
|
tree offset = component_ref_field_offset (x);
|
| 2119 |
|
|
tree field = TREE_OPERAND (x, 1);
|
| 2120 |
|
|
|
| 2121 |
|
|
if (! host_integerp (offset, 1))
|
| 2122 |
|
|
return NULL_RTX;
|
| 2123 |
|
|
ioffset += (tree_low_cst (offset, 1)
|
| 2124 |
|
|
+ (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
|
| 2125 |
|
|
/ BITS_PER_UNIT));
|
| 2126 |
|
|
|
| 2127 |
|
|
x = TREE_OPERAND (x, 0);
|
| 2128 |
|
|
}
|
| 2129 |
|
|
while (x && TREE_CODE (x) == COMPONENT_REF);
|
| 2130 |
|
|
|
| 2131 |
|
|
return GEN_INT (ioffset);
|
| 2132 |
|
|
}
|
| 2133 |
|
|
|
| 2134 |
|
|
/* Return nonzero if we can determine the exprs corresponding to memrefs
|
| 2135 |
|
|
X and Y and they do not overlap. */
|
| 2136 |
|
|
|
| 2137 |
|
|
int
|
| 2138 |
|
|
nonoverlapping_memrefs_p (const_rtx x, const_rtx y)
|
| 2139 |
|
|
{
|
| 2140 |
|
|
tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
|
| 2141 |
|
|
rtx rtlx, rtly;
|
| 2142 |
|
|
rtx basex, basey;
|
| 2143 |
|
|
rtx moffsetx, moffsety;
|
| 2144 |
|
|
HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
|
| 2145 |
|
|
|
| 2146 |
|
|
/* Unless both have exprs, we can't tell anything. */
|
| 2147 |
|
|
if (exprx == 0 || expry == 0)
|
| 2148 |
|
|
return 0;
|
| 2149 |
|
|
|
| 2150 |
|
|
/* For spill-slot accesses make sure we have valid offsets. */
|
| 2151 |
|
|
if ((exprx == get_spill_slot_decl (false)
|
| 2152 |
|
|
&& ! MEM_OFFSET (x))
|
| 2153 |
|
|
|| (expry == get_spill_slot_decl (false)
|
| 2154 |
|
|
&& ! MEM_OFFSET (y)))
|
| 2155 |
|
|
return 0;
|
| 2156 |
|
|
|
| 2157 |
|
|
/* If both are field references, we may be able to determine something. */
|
| 2158 |
|
|
if (TREE_CODE (exprx) == COMPONENT_REF
|
| 2159 |
|
|
&& TREE_CODE (expry) == COMPONENT_REF
|
| 2160 |
|
|
&& nonoverlapping_component_refs_p (exprx, expry))
|
| 2161 |
|
|
return 1;
|
| 2162 |
|
|
|
| 2163 |
|
|
|
| 2164 |
|
|
/* If the field reference test failed, look at the DECLs involved. */
|
| 2165 |
|
|
moffsetx = MEM_OFFSET (x);
|
| 2166 |
|
|
if (TREE_CODE (exprx) == COMPONENT_REF)
|
| 2167 |
|
|
{
|
| 2168 |
|
|
if (TREE_CODE (expry) == VAR_DECL
|
| 2169 |
|
|
&& POINTER_TYPE_P (TREE_TYPE (expry)))
|
| 2170 |
|
|
{
|
| 2171 |
|
|
tree field = TREE_OPERAND (exprx, 1);
|
| 2172 |
|
|
tree fieldcontext = DECL_FIELD_CONTEXT (field);
|
| 2173 |
|
|
if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
|
| 2174 |
|
|
TREE_TYPE (field)))
|
| 2175 |
|
|
return 1;
|
| 2176 |
|
|
}
|
| 2177 |
|
|
{
|
| 2178 |
|
|
tree t = decl_for_component_ref (exprx);
|
| 2179 |
|
|
if (! t)
|
| 2180 |
|
|
return 0;
|
| 2181 |
|
|
moffsetx = adjust_offset_for_component_ref (exprx, moffsetx);
|
| 2182 |
|
|
exprx = t;
|
| 2183 |
|
|
}
|
| 2184 |
|
|
}
|
| 2185 |
|
|
else if (INDIRECT_REF_P (exprx))
|
| 2186 |
|
|
{
|
| 2187 |
|
|
exprx = TREE_OPERAND (exprx, 0);
|
| 2188 |
|
|
if (flag_argument_noalias < 2
|
| 2189 |
|
|
|| TREE_CODE (exprx) != PARM_DECL)
|
| 2190 |
|
|
return 0;
|
| 2191 |
|
|
}
|
| 2192 |
|
|
|
| 2193 |
|
|
moffsety = MEM_OFFSET (y);
|
| 2194 |
|
|
if (TREE_CODE (expry) == COMPONENT_REF)
|
| 2195 |
|
|
{
|
| 2196 |
|
|
if (TREE_CODE (exprx) == VAR_DECL
|
| 2197 |
|
|
&& POINTER_TYPE_P (TREE_TYPE (exprx)))
|
| 2198 |
|
|
{
|
| 2199 |
|
|
tree field = TREE_OPERAND (expry, 1);
|
| 2200 |
|
|
tree fieldcontext = DECL_FIELD_CONTEXT (field);
|
| 2201 |
|
|
if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
|
| 2202 |
|
|
TREE_TYPE (field)))
|
| 2203 |
|
|
return 1;
|
| 2204 |
|
|
}
|
| 2205 |
|
|
{
|
| 2206 |
|
|
tree t = decl_for_component_ref (expry);
|
| 2207 |
|
|
if (! t)
|
| 2208 |
|
|
return 0;
|
| 2209 |
|
|
moffsety = adjust_offset_for_component_ref (expry, moffsety);
|
| 2210 |
|
|
expry = t;
|
| 2211 |
|
|
}
|
| 2212 |
|
|
}
|
| 2213 |
|
|
else if (INDIRECT_REF_P (expry))
|
| 2214 |
|
|
{
|
| 2215 |
|
|
expry = TREE_OPERAND (expry, 0);
|
| 2216 |
|
|
if (flag_argument_noalias < 2
|
| 2217 |
|
|
|| TREE_CODE (expry) != PARM_DECL)
|
| 2218 |
|
|
return 0;
|
| 2219 |
|
|
}
|
| 2220 |
|
|
|
| 2221 |
|
|
if (! DECL_P (exprx) || ! DECL_P (expry))
|
| 2222 |
|
|
return 0;
|
| 2223 |
|
|
|
| 2224 |
|
|
/* With invalid code we can end up storing into the constant pool.
|
| 2225 |
|
|
Bail out to avoid ICEing when creating RTL for this.
|
| 2226 |
|
|
See gfortran.dg/lto/20091028-2_0.f90. */
|
| 2227 |
|
|
if (TREE_CODE (exprx) == CONST_DECL
|
| 2228 |
|
|
|| TREE_CODE (expry) == CONST_DECL)
|
| 2229 |
|
|
return 1;
|
| 2230 |
|
|
|
| 2231 |
|
|
rtlx = DECL_RTL (exprx);
|
| 2232 |
|
|
rtly = DECL_RTL (expry);
|
| 2233 |
|
|
|
| 2234 |
|
|
/* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
|
| 2235 |
|
|
can't overlap unless they are the same because we never reuse that part
|
| 2236 |
|
|
of the stack frame used for locals for spilled pseudos. */
|
| 2237 |
|
|
if ((!MEM_P (rtlx) || !MEM_P (rtly))
|
| 2238 |
|
|
&& ! rtx_equal_p (rtlx, rtly))
|
| 2239 |
|
|
return 1;
|
| 2240 |
|
|
|
| 2241 |
|
|
/* If we have MEMs refering to different address spaces (which can
|
| 2242 |
|
|
potentially overlap), we cannot easily tell from the addresses
|
| 2243 |
|
|
whether the references overlap. */
|
| 2244 |
|
|
if (MEM_P (rtlx) && MEM_P (rtly)
|
| 2245 |
|
|
&& MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
|
| 2246 |
|
|
return 0;
|
| 2247 |
|
|
|
| 2248 |
|
|
/* Get the base and offsets of both decls. If either is a register, we
|
| 2249 |
|
|
know both are and are the same, so use that as the base. The only
|
| 2250 |
|
|
we can avoid overlap is if we can deduce that they are nonoverlapping
|
| 2251 |
|
|
pieces of that decl, which is very rare. */
|
| 2252 |
|
|
basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
|
| 2253 |
|
|
if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
|
| 2254 |
|
|
offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
|
| 2255 |
|
|
|
| 2256 |
|
|
basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
|
| 2257 |
|
|
if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
|
| 2258 |
|
|
offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
|
| 2259 |
|
|
|
| 2260 |
|
|
/* If the bases are different, we know they do not overlap if both
|
| 2261 |
|
|
are constants or if one is a constant and the other a pointer into the
|
| 2262 |
|
|
stack frame. Otherwise a different base means we can't tell if they
|
| 2263 |
|
|
overlap or not. */
|
| 2264 |
|
|
if (! rtx_equal_p (basex, basey))
|
| 2265 |
|
|
return ((CONSTANT_P (basex) && CONSTANT_P (basey))
|
| 2266 |
|
|
|| (CONSTANT_P (basex) && REG_P (basey)
|
| 2267 |
|
|
&& REGNO_PTR_FRAME_P (REGNO (basey)))
|
| 2268 |
|
|
|| (CONSTANT_P (basey) && REG_P (basex)
|
| 2269 |
|
|
&& REGNO_PTR_FRAME_P (REGNO (basex))));
|
| 2270 |
|
|
|
| 2271 |
|
|
sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
|
| 2272 |
|
|
: MEM_SIZE (rtlx) ? INTVAL (MEM_SIZE (rtlx))
|
| 2273 |
|
|
: -1);
|
| 2274 |
|
|
sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
|
| 2275 |
|
|
: MEM_SIZE (rtly) ? INTVAL (MEM_SIZE (rtly)) :
|
| 2276 |
|
|
-1);
|
| 2277 |
|
|
|
| 2278 |
|
|
/* If we have an offset for either memref, it can update the values computed
|
| 2279 |
|
|
above. */
|
| 2280 |
|
|
if (moffsetx)
|
| 2281 |
|
|
offsetx += INTVAL (moffsetx), sizex -= INTVAL (moffsetx);
|
| 2282 |
|
|
if (moffsety)
|
| 2283 |
|
|
offsety += INTVAL (moffsety), sizey -= INTVAL (moffsety);
|
| 2284 |
|
|
|
| 2285 |
|
|
/* If a memref has both a size and an offset, we can use the smaller size.
|
| 2286 |
|
|
We can't do this if the offset isn't known because we must view this
|
| 2287 |
|
|
memref as being anywhere inside the DECL's MEM. */
|
| 2288 |
|
|
if (MEM_SIZE (x) && moffsetx)
|
| 2289 |
|
|
sizex = INTVAL (MEM_SIZE (x));
|
| 2290 |
|
|
if (MEM_SIZE (y) && moffsety)
|
| 2291 |
|
|
sizey = INTVAL (MEM_SIZE (y));
|
| 2292 |
|
|
|
| 2293 |
|
|
/* Put the values of the memref with the lower offset in X's values. */
|
| 2294 |
|
|
if (offsetx > offsety)
|
| 2295 |
|
|
{
|
| 2296 |
|
|
tem = offsetx, offsetx = offsety, offsety = tem;
|
| 2297 |
|
|
tem = sizex, sizex = sizey, sizey = tem;
|
| 2298 |
|
|
}
|
| 2299 |
|
|
|
| 2300 |
|
|
/* If we don't know the size of the lower-offset value, we can't tell
|
| 2301 |
|
|
if they conflict. Otherwise, we do the test. */
|
| 2302 |
|
|
return sizex >= 0 && offsety >= offsetx + sizex;
|
| 2303 |
|
|
}
|
| 2304 |
|
|
|
| 2305 |
|
|
/* True dependence: X is read after store in MEM takes place. */
|
| 2306 |
|
|
|
| 2307 |
|
|
int
|
| 2308 |
|
|
true_dependence (const_rtx mem, enum machine_mode mem_mode, const_rtx x,
|
| 2309 |
|
|
bool (*varies) (const_rtx, bool))
|
| 2310 |
|
|
{
|
| 2311 |
|
|
rtx x_addr, mem_addr;
|
| 2312 |
|
|
rtx base;
|
| 2313 |
|
|
int ret;
|
| 2314 |
|
|
|
| 2315 |
|
|
if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
|
| 2316 |
|
|
return 1;
|
| 2317 |
|
|
|
| 2318 |
|
|
/* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
|
| 2319 |
|
|
This is used in epilogue deallocation functions, and in cselib. */
|
| 2320 |
|
|
if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
|
| 2321 |
|
|
return 1;
|
| 2322 |
|
|
if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
|
| 2323 |
|
|
return 1;
|
| 2324 |
|
|
if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
|
| 2325 |
|
|
|| MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
|
| 2326 |
|
|
return 1;
|
| 2327 |
|
|
|
| 2328 |
|
|
/* Read-only memory is by definition never modified, and therefore can't
|
| 2329 |
|
|
conflict with anything. We don't expect to find read-only set on MEM,
|
| 2330 |
|
|
but stupid user tricks can produce them, so don't die. */
|
| 2331 |
|
|
if (MEM_READONLY_P (x))
|
| 2332 |
|
|
return 0;
|
| 2333 |
|
|
|
| 2334 |
|
|
/* If we have MEMs refering to different address spaces (which can
|
| 2335 |
|
|
potentially overlap), we cannot easily tell from the addresses
|
| 2336 |
|
|
whether the references overlap. */
|
| 2337 |
|
|
if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
|
| 2338 |
|
|
return 1;
|
| 2339 |
|
|
|
| 2340 |
|
|
if (mem_mode == VOIDmode)
|
| 2341 |
|
|
mem_mode = GET_MODE (mem);
|
| 2342 |
|
|
|
| 2343 |
|
|
x_addr = XEXP (x, 0);
|
| 2344 |
|
|
mem_addr = XEXP (mem, 0);
|
| 2345 |
|
|
if (!((GET_CODE (x_addr) == VALUE
|
| 2346 |
|
|
&& GET_CODE (mem_addr) != VALUE
|
| 2347 |
|
|
&& reg_mentioned_p (x_addr, mem_addr))
|
| 2348 |
|
|
|| (GET_CODE (x_addr) != VALUE
|
| 2349 |
|
|
&& GET_CODE (mem_addr) == VALUE
|
| 2350 |
|
|
&& reg_mentioned_p (mem_addr, x_addr))))
|
| 2351 |
|
|
{
|
| 2352 |
|
|
x_addr = get_addr (x_addr);
|
| 2353 |
|
|
mem_addr = get_addr (mem_addr);
|
| 2354 |
|
|
}
|
| 2355 |
|
|
|
| 2356 |
|
|
base = find_base_term (x_addr);
|
| 2357 |
|
|
if (base && (GET_CODE (base) == LABEL_REF
|
| 2358 |
|
|
|| (GET_CODE (base) == SYMBOL_REF
|
| 2359 |
|
|
&& CONSTANT_POOL_ADDRESS_P (base))))
|
| 2360 |
|
|
return 0;
|
| 2361 |
|
|
|
| 2362 |
|
|
if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
|
| 2363 |
|
|
return 0;
|
| 2364 |
|
|
|
| 2365 |
|
|
x_addr = canon_rtx (x_addr);
|
| 2366 |
|
|
mem_addr = canon_rtx (mem_addr);
|
| 2367 |
|
|
|
| 2368 |
|
|
if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
|
| 2369 |
|
|
SIZE_FOR_MODE (x), x_addr, 0)) != -1)
|
| 2370 |
|
|
return ret;
|
| 2371 |
|
|
|
| 2372 |
|
|
if (DIFFERENT_ALIAS_SETS_P (x, mem))
|
| 2373 |
|
|
return 0;
|
| 2374 |
|
|
|
| 2375 |
|
|
if (nonoverlapping_memrefs_p (mem, x))
|
| 2376 |
|
|
return 0;
|
| 2377 |
|
|
|
| 2378 |
|
|
if (aliases_everything_p (x))
|
| 2379 |
|
|
return 1;
|
| 2380 |
|
|
|
| 2381 |
|
|
/* We cannot use aliases_everything_p to test MEM, since we must look
|
| 2382 |
|
|
at MEM_MODE, rather than GET_MODE (MEM). */
|
| 2383 |
|
|
if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
|
| 2384 |
|
|
return 1;
|
| 2385 |
|
|
|
| 2386 |
|
|
/* In true_dependence we also allow BLKmode to alias anything. Why
|
| 2387 |
|
|
don't we do this in anti_dependence and output_dependence? */
|
| 2388 |
|
|
if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
|
| 2389 |
|
|
return 1;
|
| 2390 |
|
|
|
| 2391 |
|
|
if (fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr, varies))
|
| 2392 |
|
|
return 0;
|
| 2393 |
|
|
|
| 2394 |
|
|
return rtx_refs_may_alias_p (x, mem, true);
|
| 2395 |
|
|
}
|
| 2396 |
|
|
|
| 2397 |
|
|
/* Canonical true dependence: X is read after store in MEM takes place.
|
| 2398 |
|
|
Variant of true_dependence which assumes MEM has already been
|
| 2399 |
|
|
canonicalized (hence we no longer do that here).
|
| 2400 |
|
|
The mem_addr argument has been added, since true_dependence computed
|
| 2401 |
|
|
this value prior to canonicalizing.
|
| 2402 |
|
|
If x_addr is non-NULL, it is used in preference of XEXP (x, 0). */
|
| 2403 |
|
|
|
| 2404 |
|
|
int
|
| 2405 |
|
|
canon_true_dependence (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
|
| 2406 |
|
|
const_rtx x, rtx x_addr, bool (*varies) (const_rtx, bool))
|
| 2407 |
|
|
{
|
| 2408 |
|
|
int ret;
|
| 2409 |
|
|
|
| 2410 |
|
|
if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
|
| 2411 |
|
|
return 1;
|
| 2412 |
|
|
|
| 2413 |
|
|
/* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
|
| 2414 |
|
|
This is used in epilogue deallocation functions. */
|
| 2415 |
|
|
if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
|
| 2416 |
|
|
return 1;
|
| 2417 |
|
|
if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
|
| 2418 |
|
|
return 1;
|
| 2419 |
|
|
if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
|
| 2420 |
|
|
|| MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
|
| 2421 |
|
|
return 1;
|
| 2422 |
|
|
|
| 2423 |
|
|
/* Read-only memory is by definition never modified, and therefore can't
|
| 2424 |
|
|
conflict with anything. We don't expect to find read-only set on MEM,
|
| 2425 |
|
|
but stupid user tricks can produce them, so don't die. */
|
| 2426 |
|
|
if (MEM_READONLY_P (x))
|
| 2427 |
|
|
return 0;
|
| 2428 |
|
|
|
| 2429 |
|
|
/* If we have MEMs refering to different address spaces (which can
|
| 2430 |
|
|
potentially overlap), we cannot easily tell from the addresses
|
| 2431 |
|
|
whether the references overlap. */
|
| 2432 |
|
|
if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
|
| 2433 |
|
|
return 1;
|
| 2434 |
|
|
|
| 2435 |
|
|
if (! x_addr)
|
| 2436 |
|
|
{
|
| 2437 |
|
|
x_addr = XEXP (x, 0);
|
| 2438 |
|
|
if (!((GET_CODE (x_addr) == VALUE
|
| 2439 |
|
|
&& GET_CODE (mem_addr) != VALUE
|
| 2440 |
|
|
&& reg_mentioned_p (x_addr, mem_addr))
|
| 2441 |
|
|
|| (GET_CODE (x_addr) != VALUE
|
| 2442 |
|
|
&& GET_CODE (mem_addr) == VALUE
|
| 2443 |
|
|
&& reg_mentioned_p (mem_addr, x_addr))))
|
| 2444 |
|
|
x_addr = get_addr (x_addr);
|
| 2445 |
|
|
}
|
| 2446 |
|
|
|
| 2447 |
|
|
if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
|
| 2448 |
|
|
return 0;
|
| 2449 |
|
|
|
| 2450 |
|
|
x_addr = canon_rtx (x_addr);
|
| 2451 |
|
|
if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
|
| 2452 |
|
|
SIZE_FOR_MODE (x), x_addr, 0)) != -1)
|
| 2453 |
|
|
return ret;
|
| 2454 |
|
|
|
| 2455 |
|
|
if (DIFFERENT_ALIAS_SETS_P (x, mem))
|
| 2456 |
|
|
return 0;
|
| 2457 |
|
|
|
| 2458 |
|
|
if (nonoverlapping_memrefs_p (x, mem))
|
| 2459 |
|
|
return 0;
|
| 2460 |
|
|
|
| 2461 |
|
|
if (aliases_everything_p (x))
|
| 2462 |
|
|
return 1;
|
| 2463 |
|
|
|
| 2464 |
|
|
/* We cannot use aliases_everything_p to test MEM, since we must look
|
| 2465 |
|
|
at MEM_MODE, rather than GET_MODE (MEM). */
|
| 2466 |
|
|
if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
|
| 2467 |
|
|
return 1;
|
| 2468 |
|
|
|
| 2469 |
|
|
/* In true_dependence we also allow BLKmode to alias anything. Why
|
| 2470 |
|
|
don't we do this in anti_dependence and output_dependence? */
|
| 2471 |
|
|
if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
|
| 2472 |
|
|
return 1;
|
| 2473 |
|
|
|
| 2474 |
|
|
if (fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr, varies))
|
| 2475 |
|
|
return 0;
|
| 2476 |
|
|
|
| 2477 |
|
|
return rtx_refs_may_alias_p (x, mem, true);
|
| 2478 |
|
|
}
|
| 2479 |
|
|
|
| 2480 |
|
|
/* Returns nonzero if a write to X might alias a previous read from
|
| 2481 |
|
|
(or, if WRITEP is nonzero, a write to) MEM. */
|
| 2482 |
|
|
|
| 2483 |
|
|
static int
|
| 2484 |
|
|
write_dependence_p (const_rtx mem, const_rtx x, int writep)
|
| 2485 |
|
|
{
|
| 2486 |
|
|
rtx x_addr, mem_addr;
|
| 2487 |
|
|
const_rtx fixed_scalar;
|
| 2488 |
|
|
rtx base;
|
| 2489 |
|
|
int ret;
|
| 2490 |
|
|
|
| 2491 |
|
|
if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
|
| 2492 |
|
|
return 1;
|
| 2493 |
|
|
|
| 2494 |
|
|
/* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
|
| 2495 |
|
|
This is used in epilogue deallocation functions. */
|
| 2496 |
|
|
if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
|
| 2497 |
|
|
return 1;
|
| 2498 |
|
|
if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
|
| 2499 |
|
|
return 1;
|
| 2500 |
|
|
if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
|
| 2501 |
|
|
|| MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
|
| 2502 |
|
|
return 1;
|
| 2503 |
|
|
|
| 2504 |
|
|
/* A read from read-only memory can't conflict with read-write memory. */
|
| 2505 |
|
|
if (!writep && MEM_READONLY_P (mem))
|
| 2506 |
|
|
return 0;
|
| 2507 |
|
|
|
| 2508 |
|
|
/* If we have MEMs refering to different address spaces (which can
|
| 2509 |
|
|
potentially overlap), we cannot easily tell from the addresses
|
| 2510 |
|
|
whether the references overlap. */
|
| 2511 |
|
|
if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
|
| 2512 |
|
|
return 1;
|
| 2513 |
|
|
|
| 2514 |
|
|
x_addr = XEXP (x, 0);
|
| 2515 |
|
|
mem_addr = XEXP (mem, 0);
|
| 2516 |
|
|
if (!((GET_CODE (x_addr) == VALUE
|
| 2517 |
|
|
&& GET_CODE (mem_addr) != VALUE
|
| 2518 |
|
|
&& reg_mentioned_p (x_addr, mem_addr))
|
| 2519 |
|
|
|| (GET_CODE (x_addr) != VALUE
|
| 2520 |
|
|
&& GET_CODE (mem_addr) == VALUE
|
| 2521 |
|
|
&& reg_mentioned_p (mem_addr, x_addr))))
|
| 2522 |
|
|
{
|
| 2523 |
|
|
x_addr = get_addr (x_addr);
|
| 2524 |
|
|
mem_addr = get_addr (mem_addr);
|
| 2525 |
|
|
}
|
| 2526 |
|
|
|
| 2527 |
|
|
if (! writep)
|
| 2528 |
|
|
{
|
| 2529 |
|
|
base = find_base_term (mem_addr);
|
| 2530 |
|
|
if (base && (GET_CODE (base) == LABEL_REF
|
| 2531 |
|
|
|| (GET_CODE (base) == SYMBOL_REF
|
| 2532 |
|
|
&& CONSTANT_POOL_ADDRESS_P (base))))
|
| 2533 |
|
|
return 0;
|
| 2534 |
|
|
}
|
| 2535 |
|
|
|
| 2536 |
|
|
if (! base_alias_check (x_addr, mem_addr, GET_MODE (x),
|
| 2537 |
|
|
GET_MODE (mem)))
|
| 2538 |
|
|
return 0;
|
| 2539 |
|
|
|
| 2540 |
|
|
x_addr = canon_rtx (x_addr);
|
| 2541 |
|
|
mem_addr = canon_rtx (mem_addr);
|
| 2542 |
|
|
|
| 2543 |
|
|
if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
|
| 2544 |
|
|
SIZE_FOR_MODE (x), x_addr, 0)) != -1)
|
| 2545 |
|
|
return ret;
|
| 2546 |
|
|
|
| 2547 |
|
|
if (nonoverlapping_memrefs_p (x, mem))
|
| 2548 |
|
|
return 0;
|
| 2549 |
|
|
|
| 2550 |
|
|
fixed_scalar
|
| 2551 |
|
|
= fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
|
| 2552 |
|
|
rtx_addr_varies_p);
|
| 2553 |
|
|
|
| 2554 |
|
|
if ((fixed_scalar == mem && !aliases_everything_p (x))
|
| 2555 |
|
|
|| (fixed_scalar == x && !aliases_everything_p (mem)))
|
| 2556 |
|
|
return 0;
|
| 2557 |
|
|
|
| 2558 |
|
|
return rtx_refs_may_alias_p (x, mem, false);
|
| 2559 |
|
|
}
|
| 2560 |
|
|
|
| 2561 |
|
|
/* Anti dependence: X is written after read in MEM takes place. */
|
| 2562 |
|
|
|
| 2563 |
|
|
int
|
| 2564 |
|
|
anti_dependence (const_rtx mem, const_rtx x)
|
| 2565 |
|
|
{
|
| 2566 |
|
|
return write_dependence_p (mem, x, /*writep=*/0);
|
| 2567 |
|
|
}
|
| 2568 |
|
|
|
| 2569 |
|
|
/* Output dependence: X is written after store in MEM takes place. */
|
| 2570 |
|
|
|
| 2571 |
|
|
int
|
| 2572 |
|
|
output_dependence (const_rtx mem, const_rtx x)
|
| 2573 |
|
|
{
|
| 2574 |
|
|
return write_dependence_p (mem, x, /*writep=*/1);
|
| 2575 |
|
|
}
|
| 2576 |
|
|
|
| 2577 |
|
|
|
| 2578 |
|
|
void
|
| 2579 |
|
|
init_alias_target (void)
|
| 2580 |
|
|
{
|
| 2581 |
|
|
int i;
|
| 2582 |
|
|
|
| 2583 |
|
|
memset (static_reg_base_value, 0, sizeof static_reg_base_value);
|
| 2584 |
|
|
|
| 2585 |
|
|
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
|
| 2586 |
|
|
/* Check whether this register can hold an incoming pointer
|
| 2587 |
|
|
argument. FUNCTION_ARG_REGNO_P tests outgoing register
|
| 2588 |
|
|
numbers, so translate if necessary due to register windows. */
|
| 2589 |
|
|
if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
|
| 2590 |
|
|
&& HARD_REGNO_MODE_OK (i, Pmode))
|
| 2591 |
|
|
static_reg_base_value[i]
|
| 2592 |
|
|
= gen_rtx_ADDRESS (VOIDmode, gen_rtx_REG (Pmode, i));
|
| 2593 |
|
|
|
| 2594 |
|
|
static_reg_base_value[STACK_POINTER_REGNUM]
|
| 2595 |
|
|
= gen_rtx_ADDRESS (Pmode, stack_pointer_rtx);
|
| 2596 |
|
|
static_reg_base_value[ARG_POINTER_REGNUM]
|
| 2597 |
|
|
= gen_rtx_ADDRESS (Pmode, arg_pointer_rtx);
|
| 2598 |
|
|
static_reg_base_value[FRAME_POINTER_REGNUM]
|
| 2599 |
|
|
= gen_rtx_ADDRESS (Pmode, frame_pointer_rtx);
|
| 2600 |
|
|
#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
|
| 2601 |
|
|
static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
|
| 2602 |
|
|
= gen_rtx_ADDRESS (Pmode, hard_frame_pointer_rtx);
|
| 2603 |
|
|
#endif
|
| 2604 |
|
|
}
|
| 2605 |
|
|
|
| 2606 |
|
|
/* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
|
| 2607 |
|
|
to be memory reference. */
|
| 2608 |
|
|
static bool memory_modified;
|
| 2609 |
|
|
static void
|
| 2610 |
|
|
memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
|
| 2611 |
|
|
{
|
| 2612 |
|
|
if (MEM_P (x))
|
| 2613 |
|
|
{
|
| 2614 |
|
|
if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
|
| 2615 |
|
|
memory_modified = true;
|
| 2616 |
|
|
}
|
| 2617 |
|
|
}
|
| 2618 |
|
|
|
| 2619 |
|
|
|
| 2620 |
|
|
/* Return true when INSN possibly modify memory contents of MEM
|
| 2621 |
|
|
(i.e. address can be modified). */
|
| 2622 |
|
|
bool
|
| 2623 |
|
|
memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
|
| 2624 |
|
|
{
|
| 2625 |
|
|
if (!INSN_P (insn))
|
| 2626 |
|
|
return false;
|
| 2627 |
|
|
memory_modified = false;
|
| 2628 |
|
|
note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
|
| 2629 |
|
|
return memory_modified;
|
| 2630 |
|
|
}
|
| 2631 |
|
|
|
| 2632 |
|
|
/* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE
|
| 2633 |
|
|
array. */
|
| 2634 |
|
|
|
| 2635 |
|
|
void
|
| 2636 |
|
|
init_alias_analysis (void)
|
| 2637 |
|
|
{
|
| 2638 |
|
|
unsigned int maxreg = max_reg_num ();
|
| 2639 |
|
|
int changed, pass;
|
| 2640 |
|
|
int i;
|
| 2641 |
|
|
unsigned int ui;
|
| 2642 |
|
|
rtx insn;
|
| 2643 |
|
|
|
| 2644 |
|
|
timevar_push (TV_ALIAS_ANALYSIS);
|
| 2645 |
|
|
|
| 2646 |
|
|
reg_known_value_size = maxreg - FIRST_PSEUDO_REGISTER;
|
| 2647 |
|
|
reg_known_value = GGC_CNEWVEC (rtx, reg_known_value_size);
|
| 2648 |
|
|
reg_known_equiv_p = XCNEWVEC (bool, reg_known_value_size);
|
| 2649 |
|
|
|
| 2650 |
|
|
/* If we have memory allocated from the previous run, use it. */
|
| 2651 |
|
|
if (old_reg_base_value)
|
| 2652 |
|
|
reg_base_value = old_reg_base_value;
|
| 2653 |
|
|
|
| 2654 |
|
|
if (reg_base_value)
|
| 2655 |
|
|
VEC_truncate (rtx, reg_base_value, 0);
|
| 2656 |
|
|
|
| 2657 |
|
|
VEC_safe_grow_cleared (rtx, gc, reg_base_value, maxreg);
|
| 2658 |
|
|
|
| 2659 |
|
|
new_reg_base_value = XNEWVEC (rtx, maxreg);
|
| 2660 |
|
|
reg_seen = XNEWVEC (char, maxreg);
|
| 2661 |
|
|
|
| 2662 |
|
|
/* The basic idea is that each pass through this loop will use the
|
| 2663 |
|
|
"constant" information from the previous pass to propagate alias
|
| 2664 |
|
|
information through another level of assignments.
|
| 2665 |
|
|
|
| 2666 |
|
|
This could get expensive if the assignment chains are long. Maybe
|
| 2667 |
|
|
we should throttle the number of iterations, possibly based on
|
| 2668 |
|
|
the optimization level or flag_expensive_optimizations.
|
| 2669 |
|
|
|
| 2670 |
|
|
We could propagate more information in the first pass by making use
|
| 2671 |
|
|
of DF_REG_DEF_COUNT to determine immediately that the alias information
|
| 2672 |
|
|
for a pseudo is "constant".
|
| 2673 |
|
|
|
| 2674 |
|
|
A program with an uninitialized variable can cause an infinite loop
|
| 2675 |
|
|
here. Instead of doing a full dataflow analysis to detect such problems
|
| 2676 |
|
|
we just cap the number of iterations for the loop.
|
| 2677 |
|
|
|
| 2678 |
|
|
The state of the arrays for the set chain in question does not matter
|
| 2679 |
|
|
since the program has undefined behavior. */
|
| 2680 |
|
|
|
| 2681 |
|
|
pass = 0;
|
| 2682 |
|
|
do
|
| 2683 |
|
|
{
|
| 2684 |
|
|
/* Assume nothing will change this iteration of the loop. */
|
| 2685 |
|
|
changed = 0;
|
| 2686 |
|
|
|
| 2687 |
|
|
/* We want to assign the same IDs each iteration of this loop, so
|
| 2688 |
|
|
start counting from zero each iteration of the loop. */
|
| 2689 |
|
|
unique_id = 0;
|
| 2690 |
|
|
|
| 2691 |
|
|
/* We're at the start of the function each iteration through the
|
| 2692 |
|
|
loop, so we're copying arguments. */
|
| 2693 |
|
|
copying_arguments = true;
|
| 2694 |
|
|
|
| 2695 |
|
|
/* Wipe the potential alias information clean for this pass. */
|
| 2696 |
|
|
memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
|
| 2697 |
|
|
|
| 2698 |
|
|
/* Wipe the reg_seen array clean. */
|
| 2699 |
|
|
memset (reg_seen, 0, maxreg);
|
| 2700 |
|
|
|
| 2701 |
|
|
/* Mark all hard registers which may contain an address.
|
| 2702 |
|
|
The stack, frame and argument pointers may contain an address.
|
| 2703 |
|
|
An argument register which can hold a Pmode value may contain
|
| 2704 |
|
|
an address even if it is not in BASE_REGS.
|
| 2705 |
|
|
|
| 2706 |
|
|
The address expression is VOIDmode for an argument and
|
| 2707 |
|
|
Pmode for other registers. */
|
| 2708 |
|
|
|
| 2709 |
|
|
memcpy (new_reg_base_value, static_reg_base_value,
|
| 2710 |
|
|
FIRST_PSEUDO_REGISTER * sizeof (rtx));
|
| 2711 |
|
|
|
| 2712 |
|
|
/* Walk the insns adding values to the new_reg_base_value array. */
|
| 2713 |
|
|
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
|
| 2714 |
|
|
{
|
| 2715 |
|
|
if (INSN_P (insn))
|
| 2716 |
|
|
{
|
| 2717 |
|
|
rtx note, set;
|
| 2718 |
|
|
|
| 2719 |
|
|
#if defined (HAVE_prologue) || defined (HAVE_epilogue)
|
| 2720 |
|
|
/* The prologue/epilogue insns are not threaded onto the
|
| 2721 |
|
|
insn chain until after reload has completed. Thus,
|
| 2722 |
|
|
there is no sense wasting time checking if INSN is in
|
| 2723 |
|
|
the prologue/epilogue until after reload has completed. */
|
| 2724 |
|
|
if (reload_completed
|
| 2725 |
|
|
&& prologue_epilogue_contains (insn))
|
| 2726 |
|
|
continue;
|
| 2727 |
|
|
#endif
|
| 2728 |
|
|
|
| 2729 |
|
|
/* If this insn has a noalias note, process it, Otherwise,
|
| 2730 |
|
|
scan for sets. A simple set will have no side effects
|
| 2731 |
|
|
which could change the base value of any other register. */
|
| 2732 |
|
|
|
| 2733 |
|
|
if (GET_CODE (PATTERN (insn)) == SET
|
| 2734 |
|
|
&& REG_NOTES (insn) != 0
|
| 2735 |
|
|
&& find_reg_note (insn, REG_NOALIAS, NULL_RTX))
|
| 2736 |
|
|
record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
|
| 2737 |
|
|
else
|
| 2738 |
|
|
note_stores (PATTERN (insn), record_set, NULL);
|
| 2739 |
|
|
|
| 2740 |
|
|
set = single_set (insn);
|
| 2741 |
|
|
|
| 2742 |
|
|
if (set != 0
|
| 2743 |
|
|
&& REG_P (SET_DEST (set))
|
| 2744 |
|
|
&& REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
|
| 2745 |
|
|
{
|
| 2746 |
|
|
unsigned int regno = REGNO (SET_DEST (set));
|
| 2747 |
|
|
rtx src = SET_SRC (set);
|
| 2748 |
|
|
rtx t;
|
| 2749 |
|
|
|
| 2750 |
|
|
note = find_reg_equal_equiv_note (insn);
|
| 2751 |
|
|
if (note && REG_NOTE_KIND (note) == REG_EQUAL
|
| 2752 |
|
|
&& DF_REG_DEF_COUNT (regno) != 1)
|
| 2753 |
|
|
note = NULL_RTX;
|
| 2754 |
|
|
|
| 2755 |
|
|
if (note != NULL_RTX
|
| 2756 |
|
|
&& GET_CODE (XEXP (note, 0)) != EXPR_LIST
|
| 2757 |
|
|
&& ! rtx_varies_p (XEXP (note, 0), 1)
|
| 2758 |
|
|
&& ! reg_overlap_mentioned_p (SET_DEST (set),
|
| 2759 |
|
|
XEXP (note, 0)))
|
| 2760 |
|
|
{
|
| 2761 |
|
|
set_reg_known_value (regno, XEXP (note, 0));
|
| 2762 |
|
|
set_reg_known_equiv_p (regno,
|
| 2763 |
|
|
REG_NOTE_KIND (note) == REG_EQUIV);
|
| 2764 |
|
|
}
|
| 2765 |
|
|
else if (DF_REG_DEF_COUNT (regno) == 1
|
| 2766 |
|
|
&& GET_CODE (src) == PLUS
|
| 2767 |
|
|
&& REG_P (XEXP (src, 0))
|
| 2768 |
|
|
&& (t = get_reg_known_value (REGNO (XEXP (src, 0))))
|
| 2769 |
|
|
&& CONST_INT_P (XEXP (src, 1)))
|
| 2770 |
|
|
{
|
| 2771 |
|
|
t = plus_constant (t, INTVAL (XEXP (src, 1)));
|
| 2772 |
|
|
set_reg_known_value (regno, t);
|
| 2773 |
|
|
set_reg_known_equiv_p (regno, 0);
|
| 2774 |
|
|
}
|
| 2775 |
|
|
else if (DF_REG_DEF_COUNT (regno) == 1
|
| 2776 |
|
|
&& ! rtx_varies_p (src, 1))
|
| 2777 |
|
|
{
|
| 2778 |
|
|
set_reg_known_value (regno, src);
|
| 2779 |
|
|
set_reg_known_equiv_p (regno, 0);
|
| 2780 |
|
|
}
|
| 2781 |
|
|
}
|
| 2782 |
|
|
}
|
| 2783 |
|
|
else if (NOTE_P (insn)
|
| 2784 |
|
|
&& NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
|
| 2785 |
|
|
copying_arguments = false;
|
| 2786 |
|
|
}
|
| 2787 |
|
|
|
| 2788 |
|
|
/* Now propagate values from new_reg_base_value to reg_base_value. */
|
| 2789 |
|
|
gcc_assert (maxreg == (unsigned int) max_reg_num ());
|
| 2790 |
|
|
|
| 2791 |
|
|
for (ui = 0; ui < maxreg; ui++)
|
| 2792 |
|
|
{
|
| 2793 |
|
|
if (new_reg_base_value[ui]
|
| 2794 |
|
|
&& new_reg_base_value[ui] != VEC_index (rtx, reg_base_value, ui)
|
| 2795 |
|
|
&& ! rtx_equal_p (new_reg_base_value[ui],
|
| 2796 |
|
|
VEC_index (rtx, reg_base_value, ui)))
|
| 2797 |
|
|
{
|
| 2798 |
|
|
VEC_replace (rtx, reg_base_value, ui, new_reg_base_value[ui]);
|
| 2799 |
|
|
changed = 1;
|
| 2800 |
|
|
}
|
| 2801 |
|
|
}
|
| 2802 |
|
|
}
|
| 2803 |
|
|
while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
|
| 2804 |
|
|
|
| 2805 |
|
|
/* Fill in the remaining entries. */
|
| 2806 |
|
|
for (i = 0; i < (int)reg_known_value_size; i++)
|
| 2807 |
|
|
if (reg_known_value[i] == 0)
|
| 2808 |
|
|
reg_known_value[i] = regno_reg_rtx[i + FIRST_PSEUDO_REGISTER];
|
| 2809 |
|
|
|
| 2810 |
|
|
/* Clean up. */
|
| 2811 |
|
|
free (new_reg_base_value);
|
| 2812 |
|
|
new_reg_base_value = 0;
|
| 2813 |
|
|
free (reg_seen);
|
| 2814 |
|
|
reg_seen = 0;
|
| 2815 |
|
|
timevar_pop (TV_ALIAS_ANALYSIS);
|
| 2816 |
|
|
}
|
| 2817 |
|
|
|
| 2818 |
|
|
void
|
| 2819 |
|
|
end_alias_analysis (void)
|
| 2820 |
|
|
{
|
| 2821 |
|
|
old_reg_base_value = reg_base_value;
|
| 2822 |
|
|
ggc_free (reg_known_value);
|
| 2823 |
|
|
reg_known_value = 0;
|
| 2824 |
|
|
reg_known_value_size = 0;
|
| 2825 |
|
|
free (reg_known_equiv_p);
|
| 2826 |
|
|
reg_known_equiv_p = 0;
|
| 2827 |
|
|
}
|
| 2828 |
|
|
|
| 2829 |
|
|
#include "gt-alias.h"
|