1 |
684 |
jeremybenn |
/* Conditional constant propagation pass for the GNU compiler.
|
2 |
|
|
Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
|
3 |
|
|
2010, 2011, 2012 Free Software Foundation, Inc.
|
4 |
|
|
Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
|
5 |
|
|
Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
|
6 |
|
|
|
7 |
|
|
This file is part of GCC.
|
8 |
|
|
|
9 |
|
|
GCC is free software; you can redistribute it and/or modify it
|
10 |
|
|
under the terms of the GNU General Public License as published by the
|
11 |
|
|
Free Software Foundation; either version 3, or (at your option) any
|
12 |
|
|
later version.
|
13 |
|
|
|
14 |
|
|
GCC is distributed in the hope that it will be useful, but WITHOUT
|
15 |
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
16 |
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
17 |
|
|
for more details.
|
18 |
|
|
|
19 |
|
|
You should have received a copy of the GNU General Public License
|
20 |
|
|
along with GCC; see the file COPYING3. If not see
|
21 |
|
|
<http://www.gnu.org/licenses/>. */
|
22 |
|
|
|
23 |
|
|
/* Conditional constant propagation (CCP) is based on the SSA
|
24 |
|
|
propagation engine (tree-ssa-propagate.c). Constant assignments of
|
25 |
|
|
the form VAR = CST are propagated from the assignments into uses of
|
26 |
|
|
VAR, which in turn may generate new constants. The simulation uses
|
27 |
|
|
a four level lattice to keep track of constant values associated
|
28 |
|
|
with SSA names. Given an SSA name V_i, it may take one of the
|
29 |
|
|
following values:
|
30 |
|
|
|
31 |
|
|
UNINITIALIZED -> the initial state of the value. This value
|
32 |
|
|
is replaced with a correct initial value
|
33 |
|
|
the first time the value is used, so the
|
34 |
|
|
rest of the pass does not need to care about
|
35 |
|
|
it. Using this value simplifies initialization
|
36 |
|
|
of the pass, and prevents us from needlessly
|
37 |
|
|
scanning statements that are never reached.
|
38 |
|
|
|
39 |
|
|
UNDEFINED -> V_i is a local variable whose definition
|
40 |
|
|
has not been processed yet. Therefore we
|
41 |
|
|
don't yet know if its value is a constant
|
42 |
|
|
or not.
|
43 |
|
|
|
44 |
|
|
CONSTANT -> V_i has been found to hold a constant
|
45 |
|
|
value C.
|
46 |
|
|
|
47 |
|
|
VARYING -> V_i cannot take a constant value, or if it
|
48 |
|
|
does, it is not possible to determine it
|
49 |
|
|
at compile time.
|
50 |
|
|
|
51 |
|
|
The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
|
52 |
|
|
|
53 |
|
|
1- In ccp_visit_stmt, we are interested in assignments whose RHS
|
54 |
|
|
evaluates into a constant and conditional jumps whose predicate
|
55 |
|
|
evaluates into a boolean true or false. When an assignment of
|
56 |
|
|
the form V_i = CONST is found, V_i's lattice value is set to
|
57 |
|
|
CONSTANT and CONST is associated with it. This causes the
|
58 |
|
|
propagation engine to add all the SSA edges coming out the
|
59 |
|
|
assignment into the worklists, so that statements that use V_i
|
60 |
|
|
can be visited.
|
61 |
|
|
|
62 |
|
|
If the statement is a conditional with a constant predicate, we
|
63 |
|
|
mark the outgoing edges as executable or not executable
|
64 |
|
|
depending on the predicate's value. This is then used when
|
65 |
|
|
visiting PHI nodes to know when a PHI argument can be ignored.
|
66 |
|
|
|
67 |
|
|
|
68 |
|
|
2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
|
69 |
|
|
same constant C, then the LHS of the PHI is set to C. This
|
70 |
|
|
evaluation is known as the "meet operation". Since one of the
|
71 |
|
|
goals of this evaluation is to optimistically return constant
|
72 |
|
|
values as often as possible, it uses two main short cuts:
|
73 |
|
|
|
74 |
|
|
- If an argument is flowing in through a non-executable edge, it
|
75 |
|
|
is ignored. This is useful in cases like this:
|
76 |
|
|
|
77 |
|
|
if (PRED)
|
78 |
|
|
a_9 = 3;
|
79 |
|
|
else
|
80 |
|
|
a_10 = 100;
|
81 |
|
|
a_11 = PHI (a_9, a_10)
|
82 |
|
|
|
83 |
|
|
If PRED is known to always evaluate to false, then we can
|
84 |
|
|
assume that a_11 will always take its value from a_10, meaning
|
85 |
|
|
that instead of consider it VARYING (a_9 and a_10 have
|
86 |
|
|
different values), we can consider it CONSTANT 100.
|
87 |
|
|
|
88 |
|
|
- If an argument has an UNDEFINED value, then it does not affect
|
89 |
|
|
the outcome of the meet operation. If a variable V_i has an
|
90 |
|
|
UNDEFINED value, it means that either its defining statement
|
91 |
|
|
hasn't been visited yet or V_i has no defining statement, in
|
92 |
|
|
which case the original symbol 'V' is being used
|
93 |
|
|
uninitialized. Since 'V' is a local variable, the compiler
|
94 |
|
|
may assume any initial value for it.
|
95 |
|
|
|
96 |
|
|
|
97 |
|
|
After propagation, every variable V_i that ends up with a lattice
|
98 |
|
|
value of CONSTANT will have the associated constant value in the
|
99 |
|
|
array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
|
100 |
|
|
final substitution and folding.
|
101 |
|
|
|
102 |
|
|
References:
|
103 |
|
|
|
104 |
|
|
Constant propagation with conditional branches,
|
105 |
|
|
Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
|
106 |
|
|
|
107 |
|
|
Building an Optimizing Compiler,
|
108 |
|
|
Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
|
109 |
|
|
|
110 |
|
|
Advanced Compiler Design and Implementation,
|
111 |
|
|
Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
|
112 |
|
|
|
113 |
|
|
#include "config.h"
|
114 |
|
|
#include "system.h"
|
115 |
|
|
#include "coretypes.h"
|
116 |
|
|
#include "tm.h"
|
117 |
|
|
#include "tree.h"
|
118 |
|
|
#include "flags.h"
|
119 |
|
|
#include "tm_p.h"
|
120 |
|
|
#include "basic-block.h"
|
121 |
|
|
#include "output.h"
|
122 |
|
|
#include "function.h"
|
123 |
|
|
#include "tree-pretty-print.h"
|
124 |
|
|
#include "gimple-pretty-print.h"
|
125 |
|
|
#include "timevar.h"
|
126 |
|
|
#include "tree-dump.h"
|
127 |
|
|
#include "tree-flow.h"
|
128 |
|
|
#include "tree-pass.h"
|
129 |
|
|
#include "tree-ssa-propagate.h"
|
130 |
|
|
#include "value-prof.h"
|
131 |
|
|
#include "langhooks.h"
|
132 |
|
|
#include "target.h"
|
133 |
|
|
#include "diagnostic-core.h"
|
134 |
|
|
#include "dbgcnt.h"
|
135 |
|
|
#include "gimple-fold.h"
|
136 |
|
|
#include "params.h"
|
137 |
|
|
|
138 |
|
|
|
139 |
|
|
/* Possible lattice values. */
|
140 |
|
|
typedef enum
|
141 |
|
|
{
|
142 |
|
|
UNINITIALIZED,
|
143 |
|
|
UNDEFINED,
|
144 |
|
|
CONSTANT,
|
145 |
|
|
VARYING
|
146 |
|
|
} ccp_lattice_t;
|
147 |
|
|
|
148 |
|
|
struct prop_value_d {
|
149 |
|
|
/* Lattice value. */
|
150 |
|
|
ccp_lattice_t lattice_val;
|
151 |
|
|
|
152 |
|
|
/* Propagated value. */
|
153 |
|
|
tree value;
|
154 |
|
|
|
155 |
|
|
/* Mask that applies to the propagated value during CCP. For
|
156 |
|
|
X with a CONSTANT lattice value X & ~mask == value & ~mask. */
|
157 |
|
|
double_int mask;
|
158 |
|
|
};
|
159 |
|
|
|
160 |
|
|
typedef struct prop_value_d prop_value_t;
|
161 |
|
|
|
162 |
|
|
/* Array of propagated constant values. After propagation,
|
163 |
|
|
CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
|
164 |
|
|
the constant is held in an SSA name representing a memory store
|
165 |
|
|
(i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
|
166 |
|
|
memory reference used to store (i.e., the LHS of the assignment
|
167 |
|
|
doing the store). */
|
168 |
|
|
static prop_value_t *const_val;
|
169 |
|
|
|
170 |
|
|
static void canonicalize_float_value (prop_value_t *);
|
171 |
|
|
static bool ccp_fold_stmt (gimple_stmt_iterator *);
|
172 |
|
|
|
173 |
|
|
/* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
|
174 |
|
|
|
175 |
|
|
static void
|
176 |
|
|
dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val)
|
177 |
|
|
{
|
178 |
|
|
switch (val.lattice_val)
|
179 |
|
|
{
|
180 |
|
|
case UNINITIALIZED:
|
181 |
|
|
fprintf (outf, "%sUNINITIALIZED", prefix);
|
182 |
|
|
break;
|
183 |
|
|
case UNDEFINED:
|
184 |
|
|
fprintf (outf, "%sUNDEFINED", prefix);
|
185 |
|
|
break;
|
186 |
|
|
case VARYING:
|
187 |
|
|
fprintf (outf, "%sVARYING", prefix);
|
188 |
|
|
break;
|
189 |
|
|
case CONSTANT:
|
190 |
|
|
fprintf (outf, "%sCONSTANT ", prefix);
|
191 |
|
|
if (TREE_CODE (val.value) != INTEGER_CST
|
192 |
|
|
|| double_int_zero_p (val.mask))
|
193 |
|
|
print_generic_expr (outf, val.value, dump_flags);
|
194 |
|
|
else
|
195 |
|
|
{
|
196 |
|
|
double_int cval = double_int_and_not (tree_to_double_int (val.value),
|
197 |
|
|
val.mask);
|
198 |
|
|
fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX,
|
199 |
|
|
prefix, cval.high, cval.low);
|
200 |
|
|
fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")",
|
201 |
|
|
val.mask.high, val.mask.low);
|
202 |
|
|
}
|
203 |
|
|
break;
|
204 |
|
|
default:
|
205 |
|
|
gcc_unreachable ();
|
206 |
|
|
}
|
207 |
|
|
}
|
208 |
|
|
|
209 |
|
|
|
210 |
|
|
/* Print lattice value VAL to stderr. */
|
211 |
|
|
|
212 |
|
|
void debug_lattice_value (prop_value_t val);
|
213 |
|
|
|
214 |
|
|
DEBUG_FUNCTION void
|
215 |
|
|
debug_lattice_value (prop_value_t val)
|
216 |
|
|
{
|
217 |
|
|
dump_lattice_value (stderr, "", val);
|
218 |
|
|
fprintf (stderr, "\n");
|
219 |
|
|
}
|
220 |
|
|
|
221 |
|
|
|
222 |
|
|
/* Compute a default value for variable VAR and store it in the
|
223 |
|
|
CONST_VAL array. The following rules are used to get default
|
224 |
|
|
values:
|
225 |
|
|
|
226 |
|
|
1- Global and static variables that are declared constant are
|
227 |
|
|
considered CONSTANT.
|
228 |
|
|
|
229 |
|
|
2- Any other value is considered UNDEFINED. This is useful when
|
230 |
|
|
considering PHI nodes. PHI arguments that are undefined do not
|
231 |
|
|
change the constant value of the PHI node, which allows for more
|
232 |
|
|
constants to be propagated.
|
233 |
|
|
|
234 |
|
|
3- Variables defined by statements other than assignments and PHI
|
235 |
|
|
nodes are considered VARYING.
|
236 |
|
|
|
237 |
|
|
4- Initial values of variables that are not GIMPLE registers are
|
238 |
|
|
considered VARYING. */
|
239 |
|
|
|
240 |
|
|
static prop_value_t
|
241 |
|
|
get_default_value (tree var)
|
242 |
|
|
{
|
243 |
|
|
tree sym = SSA_NAME_VAR (var);
|
244 |
|
|
prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } };
|
245 |
|
|
gimple stmt;
|
246 |
|
|
|
247 |
|
|
stmt = SSA_NAME_DEF_STMT (var);
|
248 |
|
|
|
249 |
|
|
if (gimple_nop_p (stmt))
|
250 |
|
|
{
|
251 |
|
|
/* Variables defined by an empty statement are those used
|
252 |
|
|
before being initialized. If VAR is a local variable, we
|
253 |
|
|
can assume initially that it is UNDEFINED, otherwise we must
|
254 |
|
|
consider it VARYING. */
|
255 |
|
|
if (is_gimple_reg (sym)
|
256 |
|
|
&& TREE_CODE (sym) == VAR_DECL)
|
257 |
|
|
val.lattice_val = UNDEFINED;
|
258 |
|
|
else
|
259 |
|
|
{
|
260 |
|
|
val.lattice_val = VARYING;
|
261 |
|
|
val.mask = double_int_minus_one;
|
262 |
|
|
}
|
263 |
|
|
}
|
264 |
|
|
else if (is_gimple_assign (stmt)
|
265 |
|
|
/* Value-returning GIMPLE_CALL statements assign to
|
266 |
|
|
a variable, and are treated similarly to GIMPLE_ASSIGN. */
|
267 |
|
|
|| (is_gimple_call (stmt)
|
268 |
|
|
&& gimple_call_lhs (stmt) != NULL_TREE)
|
269 |
|
|
|| gimple_code (stmt) == GIMPLE_PHI)
|
270 |
|
|
{
|
271 |
|
|
tree cst;
|
272 |
|
|
if (gimple_assign_single_p (stmt)
|
273 |
|
|
&& DECL_P (gimple_assign_rhs1 (stmt))
|
274 |
|
|
&& (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
|
275 |
|
|
{
|
276 |
|
|
val.lattice_val = CONSTANT;
|
277 |
|
|
val.value = cst;
|
278 |
|
|
}
|
279 |
|
|
else
|
280 |
|
|
/* Any other variable defined by an assignment or a PHI node
|
281 |
|
|
is considered UNDEFINED. */
|
282 |
|
|
val.lattice_val = UNDEFINED;
|
283 |
|
|
}
|
284 |
|
|
else
|
285 |
|
|
{
|
286 |
|
|
/* Otherwise, VAR will never take on a constant value. */
|
287 |
|
|
val.lattice_val = VARYING;
|
288 |
|
|
val.mask = double_int_minus_one;
|
289 |
|
|
}
|
290 |
|
|
|
291 |
|
|
return val;
|
292 |
|
|
}
|
293 |
|
|
|
294 |
|
|
|
295 |
|
|
/* Get the constant value associated with variable VAR. */
|
296 |
|
|
|
297 |
|
|
static inline prop_value_t *
|
298 |
|
|
get_value (tree var)
|
299 |
|
|
{
|
300 |
|
|
prop_value_t *val;
|
301 |
|
|
|
302 |
|
|
if (const_val == NULL)
|
303 |
|
|
return NULL;
|
304 |
|
|
|
305 |
|
|
val = &const_val[SSA_NAME_VERSION (var)];
|
306 |
|
|
if (val->lattice_val == UNINITIALIZED)
|
307 |
|
|
*val = get_default_value (var);
|
308 |
|
|
|
309 |
|
|
canonicalize_float_value (val);
|
310 |
|
|
|
311 |
|
|
return val;
|
312 |
|
|
}
|
313 |
|
|
|
314 |
|
|
/* Return the constant tree value associated with VAR. */
|
315 |
|
|
|
316 |
|
|
static inline tree
|
317 |
|
|
get_constant_value (tree var)
|
318 |
|
|
{
|
319 |
|
|
prop_value_t *val;
|
320 |
|
|
if (TREE_CODE (var) != SSA_NAME)
|
321 |
|
|
{
|
322 |
|
|
if (is_gimple_min_invariant (var))
|
323 |
|
|
return var;
|
324 |
|
|
return NULL_TREE;
|
325 |
|
|
}
|
326 |
|
|
val = get_value (var);
|
327 |
|
|
if (val
|
328 |
|
|
&& val->lattice_val == CONSTANT
|
329 |
|
|
&& (TREE_CODE (val->value) != INTEGER_CST
|
330 |
|
|
|| double_int_zero_p (val->mask)))
|
331 |
|
|
return val->value;
|
332 |
|
|
return NULL_TREE;
|
333 |
|
|
}
|
334 |
|
|
|
335 |
|
|
/* Sets the value associated with VAR to VARYING. */
|
336 |
|
|
|
337 |
|
|
static inline void
|
338 |
|
|
set_value_varying (tree var)
|
339 |
|
|
{
|
340 |
|
|
prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
|
341 |
|
|
|
342 |
|
|
val->lattice_val = VARYING;
|
343 |
|
|
val->value = NULL_TREE;
|
344 |
|
|
val->mask = double_int_minus_one;
|
345 |
|
|
}
|
346 |
|
|
|
347 |
|
|
/* For float types, modify the value of VAL to make ccp work correctly
|
348 |
|
|
for non-standard values (-0, NaN):
|
349 |
|
|
|
350 |
|
|
If HONOR_SIGNED_ZEROS is false, and VAL = -0, we canonicalize it to 0.
|
351 |
|
|
If HONOR_NANS is false, and VAL is NaN, we canonicalize it to UNDEFINED.
|
352 |
|
|
This is to fix the following problem (see PR 29921): Suppose we have
|
353 |
|
|
|
354 |
|
|
x = 0.0 * y
|
355 |
|
|
|
356 |
|
|
and we set value of y to NaN. This causes value of x to be set to NaN.
|
357 |
|
|
When we later determine that y is in fact VARYING, fold uses the fact
|
358 |
|
|
that HONOR_NANS is false, and we try to change the value of x to 0,
|
359 |
|
|
causing an ICE. With HONOR_NANS being false, the real appearance of
|
360 |
|
|
NaN would cause undefined behavior, though, so claiming that y (and x)
|
361 |
|
|
are UNDEFINED initially is correct. */
|
362 |
|
|
|
363 |
|
|
static void
|
364 |
|
|
canonicalize_float_value (prop_value_t *val)
|
365 |
|
|
{
|
366 |
|
|
enum machine_mode mode;
|
367 |
|
|
tree type;
|
368 |
|
|
REAL_VALUE_TYPE d;
|
369 |
|
|
|
370 |
|
|
if (val->lattice_val != CONSTANT
|
371 |
|
|
|| TREE_CODE (val->value) != REAL_CST)
|
372 |
|
|
return;
|
373 |
|
|
|
374 |
|
|
d = TREE_REAL_CST (val->value);
|
375 |
|
|
type = TREE_TYPE (val->value);
|
376 |
|
|
mode = TYPE_MODE (type);
|
377 |
|
|
|
378 |
|
|
if (!HONOR_SIGNED_ZEROS (mode)
|
379 |
|
|
&& REAL_VALUE_MINUS_ZERO (d))
|
380 |
|
|
{
|
381 |
|
|
val->value = build_real (type, dconst0);
|
382 |
|
|
return;
|
383 |
|
|
}
|
384 |
|
|
|
385 |
|
|
if (!HONOR_NANS (mode)
|
386 |
|
|
&& REAL_VALUE_ISNAN (d))
|
387 |
|
|
{
|
388 |
|
|
val->lattice_val = UNDEFINED;
|
389 |
|
|
val->value = NULL;
|
390 |
|
|
return;
|
391 |
|
|
}
|
392 |
|
|
}
|
393 |
|
|
|
394 |
|
|
/* Return whether the lattice transition is valid. */
|
395 |
|
|
|
396 |
|
|
static bool
|
397 |
|
|
valid_lattice_transition (prop_value_t old_val, prop_value_t new_val)
|
398 |
|
|
{
|
399 |
|
|
/* Lattice transitions must always be monotonically increasing in
|
400 |
|
|
value. */
|
401 |
|
|
if (old_val.lattice_val < new_val.lattice_val)
|
402 |
|
|
return true;
|
403 |
|
|
|
404 |
|
|
if (old_val.lattice_val != new_val.lattice_val)
|
405 |
|
|
return false;
|
406 |
|
|
|
407 |
|
|
if (!old_val.value && !new_val.value)
|
408 |
|
|
return true;
|
409 |
|
|
|
410 |
|
|
/* Now both lattice values are CONSTANT. */
|
411 |
|
|
|
412 |
|
|
/* Allow transitioning from &x to &x & ~3. */
|
413 |
|
|
if (TREE_CODE (old_val.value) != INTEGER_CST
|
414 |
|
|
&& TREE_CODE (new_val.value) == INTEGER_CST)
|
415 |
|
|
return true;
|
416 |
|
|
|
417 |
|
|
/* Bit-lattices have to agree in the still valid bits. */
|
418 |
|
|
if (TREE_CODE (old_val.value) == INTEGER_CST
|
419 |
|
|
&& TREE_CODE (new_val.value) == INTEGER_CST)
|
420 |
|
|
return double_int_equal_p
|
421 |
|
|
(double_int_and_not (tree_to_double_int (old_val.value),
|
422 |
|
|
new_val.mask),
|
423 |
|
|
double_int_and_not (tree_to_double_int (new_val.value),
|
424 |
|
|
new_val.mask));
|
425 |
|
|
|
426 |
|
|
/* Otherwise constant values have to agree. */
|
427 |
|
|
return operand_equal_p (old_val.value, new_val.value, 0);
|
428 |
|
|
}
|
429 |
|
|
|
430 |
|
|
/* Set the value for variable VAR to NEW_VAL. Return true if the new
|
431 |
|
|
value is different from VAR's previous value. */
|
432 |
|
|
|
433 |
|
|
static bool
|
434 |
|
|
set_lattice_value (tree var, prop_value_t new_val)
|
435 |
|
|
{
|
436 |
|
|
/* We can deal with old UNINITIALIZED values just fine here. */
|
437 |
|
|
prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
|
438 |
|
|
|
439 |
|
|
canonicalize_float_value (&new_val);
|
440 |
|
|
|
441 |
|
|
/* We have to be careful to not go up the bitwise lattice
|
442 |
|
|
represented by the mask.
|
443 |
|
|
??? This doesn't seem to be the best place to enforce this. */
|
444 |
|
|
if (new_val.lattice_val == CONSTANT
|
445 |
|
|
&& old_val->lattice_val == CONSTANT
|
446 |
|
|
&& TREE_CODE (new_val.value) == INTEGER_CST
|
447 |
|
|
&& TREE_CODE (old_val->value) == INTEGER_CST)
|
448 |
|
|
{
|
449 |
|
|
double_int diff;
|
450 |
|
|
diff = double_int_xor (tree_to_double_int (new_val.value),
|
451 |
|
|
tree_to_double_int (old_val->value));
|
452 |
|
|
new_val.mask = double_int_ior (new_val.mask,
|
453 |
|
|
double_int_ior (old_val->mask, diff));
|
454 |
|
|
}
|
455 |
|
|
|
456 |
|
|
gcc_assert (valid_lattice_transition (*old_val, new_val));
|
457 |
|
|
|
458 |
|
|
/* If *OLD_VAL and NEW_VAL are the same, return false to inform the
|
459 |
|
|
caller that this was a non-transition. */
|
460 |
|
|
if (old_val->lattice_val != new_val.lattice_val
|
461 |
|
|
|| (new_val.lattice_val == CONSTANT
|
462 |
|
|
&& TREE_CODE (new_val.value) == INTEGER_CST
|
463 |
|
|
&& (TREE_CODE (old_val->value) != INTEGER_CST
|
464 |
|
|
|| !double_int_equal_p (new_val.mask, old_val->mask))))
|
465 |
|
|
{
|
466 |
|
|
/* ??? We would like to delay creation of INTEGER_CSTs from
|
467 |
|
|
partially constants here. */
|
468 |
|
|
|
469 |
|
|
if (dump_file && (dump_flags & TDF_DETAILS))
|
470 |
|
|
{
|
471 |
|
|
dump_lattice_value (dump_file, "Lattice value changed to ", new_val);
|
472 |
|
|
fprintf (dump_file, ". Adding SSA edges to worklist.\n");
|
473 |
|
|
}
|
474 |
|
|
|
475 |
|
|
*old_val = new_val;
|
476 |
|
|
|
477 |
|
|
gcc_assert (new_val.lattice_val != UNINITIALIZED);
|
478 |
|
|
return true;
|
479 |
|
|
}
|
480 |
|
|
|
481 |
|
|
return false;
|
482 |
|
|
}
|
483 |
|
|
|
484 |
|
|
static prop_value_t get_value_for_expr (tree, bool);
|
485 |
|
|
static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
|
486 |
|
|
static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *,
|
487 |
|
|
tree, double_int, double_int,
|
488 |
|
|
tree, double_int, double_int);
|
489 |
|
|
|
490 |
|
|
/* Return a double_int that can be used for bitwise simplifications
|
491 |
|
|
from VAL. */
|
492 |
|
|
|
493 |
|
|
static double_int
|
494 |
|
|
value_to_double_int (prop_value_t val)
|
495 |
|
|
{
|
496 |
|
|
if (val.value
|
497 |
|
|
&& TREE_CODE (val.value) == INTEGER_CST)
|
498 |
|
|
return tree_to_double_int (val.value);
|
499 |
|
|
else
|
500 |
|
|
return double_int_zero;
|
501 |
|
|
}
|
502 |
|
|
|
503 |
|
|
/* Return the value for the address expression EXPR based on alignment
|
504 |
|
|
information. */
|
505 |
|
|
|
506 |
|
|
static prop_value_t
|
507 |
|
|
get_value_from_alignment (tree expr)
|
508 |
|
|
{
|
509 |
|
|
tree type = TREE_TYPE (expr);
|
510 |
|
|
prop_value_t val;
|
511 |
|
|
unsigned HOST_WIDE_INT bitpos;
|
512 |
|
|
unsigned int align;
|
513 |
|
|
|
514 |
|
|
gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
|
515 |
|
|
|
516 |
|
|
align = get_object_alignment_1 (TREE_OPERAND (expr, 0), &bitpos);
|
517 |
|
|
val.mask
|
518 |
|
|
= double_int_and_not (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
|
519 |
|
|
? double_int_mask (TYPE_PRECISION (type))
|
520 |
|
|
: double_int_minus_one,
|
521 |
|
|
uhwi_to_double_int (align / BITS_PER_UNIT - 1));
|
522 |
|
|
val.lattice_val = double_int_minus_one_p (val.mask) ? VARYING : CONSTANT;
|
523 |
|
|
if (val.lattice_val == CONSTANT)
|
524 |
|
|
val.value
|
525 |
|
|
= double_int_to_tree (type, uhwi_to_double_int (bitpos / BITS_PER_UNIT));
|
526 |
|
|
else
|
527 |
|
|
val.value = NULL_TREE;
|
528 |
|
|
|
529 |
|
|
return val;
|
530 |
|
|
}
|
531 |
|
|
|
532 |
|
|
/* Return the value for the tree operand EXPR. If FOR_BITS_P is true
|
533 |
|
|
return constant bits extracted from alignment information for
|
534 |
|
|
invariant addresses. */
|
535 |
|
|
|
536 |
|
|
static prop_value_t
|
537 |
|
|
get_value_for_expr (tree expr, bool for_bits_p)
|
538 |
|
|
{
|
539 |
|
|
prop_value_t val;
|
540 |
|
|
|
541 |
|
|
if (TREE_CODE (expr) == SSA_NAME)
|
542 |
|
|
{
|
543 |
|
|
val = *get_value (expr);
|
544 |
|
|
if (for_bits_p
|
545 |
|
|
&& val.lattice_val == CONSTANT
|
546 |
|
|
&& TREE_CODE (val.value) == ADDR_EXPR)
|
547 |
|
|
val = get_value_from_alignment (val.value);
|
548 |
|
|
}
|
549 |
|
|
else if (is_gimple_min_invariant (expr)
|
550 |
|
|
&& (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
|
551 |
|
|
{
|
552 |
|
|
val.lattice_val = CONSTANT;
|
553 |
|
|
val.value = expr;
|
554 |
|
|
val.mask = double_int_zero;
|
555 |
|
|
canonicalize_float_value (&val);
|
556 |
|
|
}
|
557 |
|
|
else if (TREE_CODE (expr) == ADDR_EXPR)
|
558 |
|
|
val = get_value_from_alignment (expr);
|
559 |
|
|
else
|
560 |
|
|
{
|
561 |
|
|
val.lattice_val = VARYING;
|
562 |
|
|
val.mask = double_int_minus_one;
|
563 |
|
|
val.value = NULL_TREE;
|
564 |
|
|
}
|
565 |
|
|
return val;
|
566 |
|
|
}
|
567 |
|
|
|
568 |
|
|
/* Return the likely CCP lattice value for STMT.
|
569 |
|
|
|
570 |
|
|
If STMT has no operands, then return CONSTANT.
|
571 |
|
|
|
572 |
|
|
Else if undefinedness of operands of STMT cause its value to be
|
573 |
|
|
undefined, then return UNDEFINED.
|
574 |
|
|
|
575 |
|
|
Else if any operands of STMT are constants, then return CONSTANT.
|
576 |
|
|
|
577 |
|
|
Else return VARYING. */
|
578 |
|
|
|
579 |
|
|
static ccp_lattice_t
|
580 |
|
|
likely_value (gimple stmt)
|
581 |
|
|
{
|
582 |
|
|
bool has_constant_operand, has_undefined_operand, all_undefined_operands;
|
583 |
|
|
tree use;
|
584 |
|
|
ssa_op_iter iter;
|
585 |
|
|
unsigned i;
|
586 |
|
|
|
587 |
|
|
enum gimple_code code = gimple_code (stmt);
|
588 |
|
|
|
589 |
|
|
/* This function appears to be called only for assignments, calls,
|
590 |
|
|
conditionals, and switches, due to the logic in visit_stmt. */
|
591 |
|
|
gcc_assert (code == GIMPLE_ASSIGN
|
592 |
|
|
|| code == GIMPLE_CALL
|
593 |
|
|
|| code == GIMPLE_COND
|
594 |
|
|
|| code == GIMPLE_SWITCH);
|
595 |
|
|
|
596 |
|
|
/* If the statement has volatile operands, it won't fold to a
|
597 |
|
|
constant value. */
|
598 |
|
|
if (gimple_has_volatile_ops (stmt))
|
599 |
|
|
return VARYING;
|
600 |
|
|
|
601 |
|
|
/* Arrive here for more complex cases. */
|
602 |
|
|
has_constant_operand = false;
|
603 |
|
|
has_undefined_operand = false;
|
604 |
|
|
all_undefined_operands = true;
|
605 |
|
|
FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
|
606 |
|
|
{
|
607 |
|
|
prop_value_t *val = get_value (use);
|
608 |
|
|
|
609 |
|
|
if (val->lattice_val == UNDEFINED)
|
610 |
|
|
has_undefined_operand = true;
|
611 |
|
|
else
|
612 |
|
|
all_undefined_operands = false;
|
613 |
|
|
|
614 |
|
|
if (val->lattice_val == CONSTANT)
|
615 |
|
|
has_constant_operand = true;
|
616 |
|
|
}
|
617 |
|
|
|
618 |
|
|
/* There may be constants in regular rhs operands. For calls we
|
619 |
|
|
have to ignore lhs, fndecl and static chain, otherwise only
|
620 |
|
|
the lhs. */
|
621 |
|
|
for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
|
622 |
|
|
i < gimple_num_ops (stmt); ++i)
|
623 |
|
|
{
|
624 |
|
|
tree op = gimple_op (stmt, i);
|
625 |
|
|
if (!op || TREE_CODE (op) == SSA_NAME)
|
626 |
|
|
continue;
|
627 |
|
|
if (is_gimple_min_invariant (op))
|
628 |
|
|
has_constant_operand = true;
|
629 |
|
|
}
|
630 |
|
|
|
631 |
|
|
if (has_constant_operand)
|
632 |
|
|
all_undefined_operands = false;
|
633 |
|
|
|
634 |
|
|
/* If the operation combines operands like COMPLEX_EXPR make sure to
|
635 |
|
|
not mark the result UNDEFINED if only one part of the result is
|
636 |
|
|
undefined. */
|
637 |
|
|
if (has_undefined_operand && all_undefined_operands)
|
638 |
|
|
return UNDEFINED;
|
639 |
|
|
else if (code == GIMPLE_ASSIGN && has_undefined_operand)
|
640 |
|
|
{
|
641 |
|
|
switch (gimple_assign_rhs_code (stmt))
|
642 |
|
|
{
|
643 |
|
|
/* Unary operators are handled with all_undefined_operands. */
|
644 |
|
|
case PLUS_EXPR:
|
645 |
|
|
case MINUS_EXPR:
|
646 |
|
|
case POINTER_PLUS_EXPR:
|
647 |
|
|
/* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
|
648 |
|
|
Not bitwise operators, one VARYING operand may specify the
|
649 |
|
|
result completely. Not logical operators for the same reason.
|
650 |
|
|
Not COMPLEX_EXPR as one VARYING operand makes the result partly
|
651 |
|
|
not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
|
652 |
|
|
the undefined operand may be promoted. */
|
653 |
|
|
return UNDEFINED;
|
654 |
|
|
|
655 |
|
|
default:
|
656 |
|
|
;
|
657 |
|
|
}
|
658 |
|
|
}
|
659 |
|
|
/* If there was an UNDEFINED operand but the result may be not UNDEFINED
|
660 |
|
|
fall back to CONSTANT. During iteration UNDEFINED may still drop
|
661 |
|
|
to CONSTANT. */
|
662 |
|
|
if (has_undefined_operand)
|
663 |
|
|
return CONSTANT;
|
664 |
|
|
|
665 |
|
|
/* We do not consider virtual operands here -- load from read-only
|
666 |
|
|
memory may have only VARYING virtual operands, but still be
|
667 |
|
|
constant. */
|
668 |
|
|
if (has_constant_operand
|
669 |
|
|
|| gimple_references_memory_p (stmt))
|
670 |
|
|
return CONSTANT;
|
671 |
|
|
|
672 |
|
|
return VARYING;
|
673 |
|
|
}
|
674 |
|
|
|
675 |
|
|
/* Returns true if STMT cannot be constant. */
|
676 |
|
|
|
677 |
|
|
static bool
|
678 |
|
|
surely_varying_stmt_p (gimple stmt)
|
679 |
|
|
{
|
680 |
|
|
/* If the statement has operands that we cannot handle, it cannot be
|
681 |
|
|
constant. */
|
682 |
|
|
if (gimple_has_volatile_ops (stmt))
|
683 |
|
|
return true;
|
684 |
|
|
|
685 |
|
|
/* If it is a call and does not return a value or is not a
|
686 |
|
|
builtin and not an indirect call, it is varying. */
|
687 |
|
|
if (is_gimple_call (stmt))
|
688 |
|
|
{
|
689 |
|
|
tree fndecl;
|
690 |
|
|
if (!gimple_call_lhs (stmt)
|
691 |
|
|
|| ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
|
692 |
|
|
&& !DECL_BUILT_IN (fndecl)))
|
693 |
|
|
return true;
|
694 |
|
|
}
|
695 |
|
|
|
696 |
|
|
/* Any other store operation is not interesting. */
|
697 |
|
|
else if (gimple_vdef (stmt))
|
698 |
|
|
return true;
|
699 |
|
|
|
700 |
|
|
/* Anything other than assignments and conditional jumps are not
|
701 |
|
|
interesting for CCP. */
|
702 |
|
|
if (gimple_code (stmt) != GIMPLE_ASSIGN
|
703 |
|
|
&& gimple_code (stmt) != GIMPLE_COND
|
704 |
|
|
&& gimple_code (stmt) != GIMPLE_SWITCH
|
705 |
|
|
&& gimple_code (stmt) != GIMPLE_CALL)
|
706 |
|
|
return true;
|
707 |
|
|
|
708 |
|
|
return false;
|
709 |
|
|
}
|
710 |
|
|
|
711 |
|
|
/* Initialize local data structures for CCP. */
|
712 |
|
|
|
713 |
|
|
static void
|
714 |
|
|
ccp_initialize (void)
|
715 |
|
|
{
|
716 |
|
|
basic_block bb;
|
717 |
|
|
|
718 |
|
|
const_val = XCNEWVEC (prop_value_t, num_ssa_names);
|
719 |
|
|
|
720 |
|
|
/* Initialize simulation flags for PHI nodes and statements. */
|
721 |
|
|
FOR_EACH_BB (bb)
|
722 |
|
|
{
|
723 |
|
|
gimple_stmt_iterator i;
|
724 |
|
|
|
725 |
|
|
for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
|
726 |
|
|
{
|
727 |
|
|
gimple stmt = gsi_stmt (i);
|
728 |
|
|
bool is_varying;
|
729 |
|
|
|
730 |
|
|
/* If the statement is a control insn, then we do not
|
731 |
|
|
want to avoid simulating the statement once. Failure
|
732 |
|
|
to do so means that those edges will never get added. */
|
733 |
|
|
if (stmt_ends_bb_p (stmt))
|
734 |
|
|
is_varying = false;
|
735 |
|
|
else
|
736 |
|
|
is_varying = surely_varying_stmt_p (stmt);
|
737 |
|
|
|
738 |
|
|
if (is_varying)
|
739 |
|
|
{
|
740 |
|
|
tree def;
|
741 |
|
|
ssa_op_iter iter;
|
742 |
|
|
|
743 |
|
|
/* If the statement will not produce a constant, mark
|
744 |
|
|
all its outputs VARYING. */
|
745 |
|
|
FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
|
746 |
|
|
set_value_varying (def);
|
747 |
|
|
}
|
748 |
|
|
prop_set_simulate_again (stmt, !is_varying);
|
749 |
|
|
}
|
750 |
|
|
}
|
751 |
|
|
|
752 |
|
|
/* Now process PHI nodes. We never clear the simulate_again flag on
|
753 |
|
|
phi nodes, since we do not know which edges are executable yet,
|
754 |
|
|
except for phi nodes for virtual operands when we do not do store ccp. */
|
755 |
|
|
FOR_EACH_BB (bb)
|
756 |
|
|
{
|
757 |
|
|
gimple_stmt_iterator i;
|
758 |
|
|
|
759 |
|
|
for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
|
760 |
|
|
{
|
761 |
|
|
gimple phi = gsi_stmt (i);
|
762 |
|
|
|
763 |
|
|
if (!is_gimple_reg (gimple_phi_result (phi)))
|
764 |
|
|
prop_set_simulate_again (phi, false);
|
765 |
|
|
else
|
766 |
|
|
prop_set_simulate_again (phi, true);
|
767 |
|
|
}
|
768 |
|
|
}
|
769 |
|
|
}
|
770 |
|
|
|
771 |
|
|
/* Debug count support. Reset the values of ssa names
|
772 |
|
|
VARYING when the total number ssa names analyzed is
|
773 |
|
|
beyond the debug count specified. */
|
774 |
|
|
|
775 |
|
|
static void
|
776 |
|
|
do_dbg_cnt (void)
|
777 |
|
|
{
|
778 |
|
|
unsigned i;
|
779 |
|
|
for (i = 0; i < num_ssa_names; i++)
|
780 |
|
|
{
|
781 |
|
|
if (!dbg_cnt (ccp))
|
782 |
|
|
{
|
783 |
|
|
const_val[i].lattice_val = VARYING;
|
784 |
|
|
const_val[i].mask = double_int_minus_one;
|
785 |
|
|
const_val[i].value = NULL_TREE;
|
786 |
|
|
}
|
787 |
|
|
}
|
788 |
|
|
}
|
789 |
|
|
|
790 |
|
|
|
791 |
|
|
/* Do final substitution of propagated values, cleanup the flowgraph and
|
792 |
|
|
free allocated storage.
|
793 |
|
|
|
794 |
|
|
Return TRUE when something was optimized. */
|
795 |
|
|
|
796 |
|
|
static bool
|
797 |
|
|
ccp_finalize (void)
|
798 |
|
|
{
|
799 |
|
|
bool something_changed;
|
800 |
|
|
unsigned i;
|
801 |
|
|
|
802 |
|
|
do_dbg_cnt ();
|
803 |
|
|
|
804 |
|
|
/* Derive alignment and misalignment information from partially
|
805 |
|
|
constant pointers in the lattice. */
|
806 |
|
|
for (i = 1; i < num_ssa_names; ++i)
|
807 |
|
|
{
|
808 |
|
|
tree name = ssa_name (i);
|
809 |
|
|
prop_value_t *val;
|
810 |
|
|
struct ptr_info_def *pi;
|
811 |
|
|
unsigned int tem, align;
|
812 |
|
|
|
813 |
|
|
if (!name
|
814 |
|
|
|| !POINTER_TYPE_P (TREE_TYPE (name)))
|
815 |
|
|
continue;
|
816 |
|
|
|
817 |
|
|
val = get_value (name);
|
818 |
|
|
if (val->lattice_val != CONSTANT
|
819 |
|
|
|| TREE_CODE (val->value) != INTEGER_CST)
|
820 |
|
|
continue;
|
821 |
|
|
|
822 |
|
|
/* Trailing constant bits specify the alignment, trailing value
|
823 |
|
|
bits the misalignment. */
|
824 |
|
|
tem = val->mask.low;
|
825 |
|
|
align = (tem & -tem);
|
826 |
|
|
if (align == 1)
|
827 |
|
|
continue;
|
828 |
|
|
|
829 |
|
|
pi = get_ptr_info (name);
|
830 |
|
|
pi->align = align;
|
831 |
|
|
pi->misalign = TREE_INT_CST_LOW (val->value) & (align - 1);
|
832 |
|
|
}
|
833 |
|
|
|
834 |
|
|
/* Perform substitutions based on the known constant values. */
|
835 |
|
|
something_changed = substitute_and_fold (get_constant_value,
|
836 |
|
|
ccp_fold_stmt, true);
|
837 |
|
|
|
838 |
|
|
free (const_val);
|
839 |
|
|
const_val = NULL;
|
840 |
|
|
return something_changed;;
|
841 |
|
|
}
|
842 |
|
|
|
843 |
|
|
|
844 |
|
|
/* Compute the meet operator between *VAL1 and *VAL2. Store the result
|
845 |
|
|
in VAL1.
|
846 |
|
|
|
847 |
|
|
any M UNDEFINED = any
|
848 |
|
|
any M VARYING = VARYING
|
849 |
|
|
Ci M Cj = Ci if (i == j)
|
850 |
|
|
Ci M Cj = VARYING if (i != j)
|
851 |
|
|
*/
|
852 |
|
|
|
853 |
|
|
static void
|
854 |
|
|
ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
|
855 |
|
|
{
|
856 |
|
|
if (val1->lattice_val == UNDEFINED)
|
857 |
|
|
{
|
858 |
|
|
/* UNDEFINED M any = any */
|
859 |
|
|
*val1 = *val2;
|
860 |
|
|
}
|
861 |
|
|
else if (val2->lattice_val == UNDEFINED)
|
862 |
|
|
{
|
863 |
|
|
/* any M UNDEFINED = any
|
864 |
|
|
Nothing to do. VAL1 already contains the value we want. */
|
865 |
|
|
;
|
866 |
|
|
}
|
867 |
|
|
else if (val1->lattice_val == VARYING
|
868 |
|
|
|| val2->lattice_val == VARYING)
|
869 |
|
|
{
|
870 |
|
|
/* any M VARYING = VARYING. */
|
871 |
|
|
val1->lattice_val = VARYING;
|
872 |
|
|
val1->mask = double_int_minus_one;
|
873 |
|
|
val1->value = NULL_TREE;
|
874 |
|
|
}
|
875 |
|
|
else if (val1->lattice_val == CONSTANT
|
876 |
|
|
&& val2->lattice_val == CONSTANT
|
877 |
|
|
&& TREE_CODE (val1->value) == INTEGER_CST
|
878 |
|
|
&& TREE_CODE (val2->value) == INTEGER_CST)
|
879 |
|
|
{
|
880 |
|
|
/* Ci M Cj = Ci if (i == j)
|
881 |
|
|
Ci M Cj = VARYING if (i != j)
|
882 |
|
|
|
883 |
|
|
For INTEGER_CSTs mask unequal bits. If no equal bits remain,
|
884 |
|
|
drop to varying. */
|
885 |
|
|
val1->mask
|
886 |
|
|
= double_int_ior (double_int_ior (val1->mask,
|
887 |
|
|
val2->mask),
|
888 |
|
|
double_int_xor (tree_to_double_int (val1->value),
|
889 |
|
|
tree_to_double_int (val2->value)));
|
890 |
|
|
if (double_int_minus_one_p (val1->mask))
|
891 |
|
|
{
|
892 |
|
|
val1->lattice_val = VARYING;
|
893 |
|
|
val1->value = NULL_TREE;
|
894 |
|
|
}
|
895 |
|
|
}
|
896 |
|
|
else if (val1->lattice_val == CONSTANT
|
897 |
|
|
&& val2->lattice_val == CONSTANT
|
898 |
|
|
&& simple_cst_equal (val1->value, val2->value) == 1)
|
899 |
|
|
{
|
900 |
|
|
/* Ci M Cj = Ci if (i == j)
|
901 |
|
|
Ci M Cj = VARYING if (i != j)
|
902 |
|
|
|
903 |
|
|
VAL1 already contains the value we want for equivalent values. */
|
904 |
|
|
}
|
905 |
|
|
else if (val1->lattice_val == CONSTANT
|
906 |
|
|
&& val2->lattice_val == CONSTANT
|
907 |
|
|
&& (TREE_CODE (val1->value) == ADDR_EXPR
|
908 |
|
|
|| TREE_CODE (val2->value) == ADDR_EXPR))
|
909 |
|
|
{
|
910 |
|
|
/* When not equal addresses are involved try meeting for
|
911 |
|
|
alignment. */
|
912 |
|
|
prop_value_t tem = *val2;
|
913 |
|
|
if (TREE_CODE (val1->value) == ADDR_EXPR)
|
914 |
|
|
*val1 = get_value_for_expr (val1->value, true);
|
915 |
|
|
if (TREE_CODE (val2->value) == ADDR_EXPR)
|
916 |
|
|
tem = get_value_for_expr (val2->value, true);
|
917 |
|
|
ccp_lattice_meet (val1, &tem);
|
918 |
|
|
}
|
919 |
|
|
else
|
920 |
|
|
{
|
921 |
|
|
/* Any other combination is VARYING. */
|
922 |
|
|
val1->lattice_val = VARYING;
|
923 |
|
|
val1->mask = double_int_minus_one;
|
924 |
|
|
val1->value = NULL_TREE;
|
925 |
|
|
}
|
926 |
|
|
}
|
927 |
|
|
|
928 |
|
|
|
929 |
|
|
/* Loop through the PHI_NODE's parameters for BLOCK and compare their
|
930 |
|
|
lattice values to determine PHI_NODE's lattice value. The value of a
|
931 |
|
|
PHI node is determined calling ccp_lattice_meet with all the arguments
|
932 |
|
|
of the PHI node that are incoming via executable edges. */
|
933 |
|
|
|
934 |
|
|
static enum ssa_prop_result
|
935 |
|
|
ccp_visit_phi_node (gimple phi)
|
936 |
|
|
{
|
937 |
|
|
unsigned i;
|
938 |
|
|
prop_value_t *old_val, new_val;
|
939 |
|
|
|
940 |
|
|
if (dump_file && (dump_flags & TDF_DETAILS))
|
941 |
|
|
{
|
942 |
|
|
fprintf (dump_file, "\nVisiting PHI node: ");
|
943 |
|
|
print_gimple_stmt (dump_file, phi, 0, dump_flags);
|
944 |
|
|
}
|
945 |
|
|
|
946 |
|
|
old_val = get_value (gimple_phi_result (phi));
|
947 |
|
|
switch (old_val->lattice_val)
|
948 |
|
|
{
|
949 |
|
|
case VARYING:
|
950 |
|
|
return SSA_PROP_VARYING;
|
951 |
|
|
|
952 |
|
|
case CONSTANT:
|
953 |
|
|
new_val = *old_val;
|
954 |
|
|
break;
|
955 |
|
|
|
956 |
|
|
case UNDEFINED:
|
957 |
|
|
new_val.lattice_val = UNDEFINED;
|
958 |
|
|
new_val.value = NULL_TREE;
|
959 |
|
|
break;
|
960 |
|
|
|
961 |
|
|
default:
|
962 |
|
|
gcc_unreachable ();
|
963 |
|
|
}
|
964 |
|
|
|
965 |
|
|
for (i = 0; i < gimple_phi_num_args (phi); i++)
|
966 |
|
|
{
|
967 |
|
|
/* Compute the meet operator over all the PHI arguments flowing
|
968 |
|
|
through executable edges. */
|
969 |
|
|
edge e = gimple_phi_arg_edge (phi, i);
|
970 |
|
|
|
971 |
|
|
if (dump_file && (dump_flags & TDF_DETAILS))
|
972 |
|
|
{
|
973 |
|
|
fprintf (dump_file,
|
974 |
|
|
"\n Argument #%d (%d -> %d %sexecutable)\n",
|
975 |
|
|
i, e->src->index, e->dest->index,
|
976 |
|
|
(e->flags & EDGE_EXECUTABLE) ? "" : "not ");
|
977 |
|
|
}
|
978 |
|
|
|
979 |
|
|
/* If the incoming edge is executable, Compute the meet operator for
|
980 |
|
|
the existing value of the PHI node and the current PHI argument. */
|
981 |
|
|
if (e->flags & EDGE_EXECUTABLE)
|
982 |
|
|
{
|
983 |
|
|
tree arg = gimple_phi_arg (phi, i)->def;
|
984 |
|
|
prop_value_t arg_val = get_value_for_expr (arg, false);
|
985 |
|
|
|
986 |
|
|
ccp_lattice_meet (&new_val, &arg_val);
|
987 |
|
|
|
988 |
|
|
if (dump_file && (dump_flags & TDF_DETAILS))
|
989 |
|
|
{
|
990 |
|
|
fprintf (dump_file, "\t");
|
991 |
|
|
print_generic_expr (dump_file, arg, dump_flags);
|
992 |
|
|
dump_lattice_value (dump_file, "\tValue: ", arg_val);
|
993 |
|
|
fprintf (dump_file, "\n");
|
994 |
|
|
}
|
995 |
|
|
|
996 |
|
|
if (new_val.lattice_val == VARYING)
|
997 |
|
|
break;
|
998 |
|
|
}
|
999 |
|
|
}
|
1000 |
|
|
|
1001 |
|
|
if (dump_file && (dump_flags & TDF_DETAILS))
|
1002 |
|
|
{
|
1003 |
|
|
dump_lattice_value (dump_file, "\n PHI node value: ", new_val);
|
1004 |
|
|
fprintf (dump_file, "\n\n");
|
1005 |
|
|
}
|
1006 |
|
|
|
1007 |
|
|
/* Make the transition to the new value. */
|
1008 |
|
|
if (set_lattice_value (gimple_phi_result (phi), new_val))
|
1009 |
|
|
{
|
1010 |
|
|
if (new_val.lattice_val == VARYING)
|
1011 |
|
|
return SSA_PROP_VARYING;
|
1012 |
|
|
else
|
1013 |
|
|
return SSA_PROP_INTERESTING;
|
1014 |
|
|
}
|
1015 |
|
|
else
|
1016 |
|
|
return SSA_PROP_NOT_INTERESTING;
|
1017 |
|
|
}
|
1018 |
|
|
|
1019 |
|
|
/* Return the constant value for OP or OP otherwise. */
|
1020 |
|
|
|
1021 |
|
|
static tree
|
1022 |
|
|
valueize_op (tree op)
|
1023 |
|
|
{
|
1024 |
|
|
if (TREE_CODE (op) == SSA_NAME)
|
1025 |
|
|
{
|
1026 |
|
|
tree tem = get_constant_value (op);
|
1027 |
|
|
if (tem)
|
1028 |
|
|
return tem;
|
1029 |
|
|
}
|
1030 |
|
|
return op;
|
1031 |
|
|
}
|
1032 |
|
|
|
1033 |
|
|
/* CCP specific front-end to the non-destructive constant folding
|
1034 |
|
|
routines.
|
1035 |
|
|
|
1036 |
|
|
Attempt to simplify the RHS of STMT knowing that one or more
|
1037 |
|
|
operands are constants.
|
1038 |
|
|
|
1039 |
|
|
If simplification is possible, return the simplified RHS,
|
1040 |
|
|
otherwise return the original RHS or NULL_TREE. */
|
1041 |
|
|
|
1042 |
|
|
static tree
|
1043 |
|
|
ccp_fold (gimple stmt)
|
1044 |
|
|
{
|
1045 |
|
|
location_t loc = gimple_location (stmt);
|
1046 |
|
|
switch (gimple_code (stmt))
|
1047 |
|
|
{
|
1048 |
|
|
case GIMPLE_COND:
|
1049 |
|
|
{
|
1050 |
|
|
/* Handle comparison operators that can appear in GIMPLE form. */
|
1051 |
|
|
tree op0 = valueize_op (gimple_cond_lhs (stmt));
|
1052 |
|
|
tree op1 = valueize_op (gimple_cond_rhs (stmt));
|
1053 |
|
|
enum tree_code code = gimple_cond_code (stmt);
|
1054 |
|
|
return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
|
1055 |
|
|
}
|
1056 |
|
|
|
1057 |
|
|
case GIMPLE_SWITCH:
|
1058 |
|
|
{
|
1059 |
|
|
/* Return the constant switch index. */
|
1060 |
|
|
return valueize_op (gimple_switch_index (stmt));
|
1061 |
|
|
}
|
1062 |
|
|
|
1063 |
|
|
case GIMPLE_ASSIGN:
|
1064 |
|
|
case GIMPLE_CALL:
|
1065 |
|
|
return gimple_fold_stmt_to_constant_1 (stmt, valueize_op);
|
1066 |
|
|
|
1067 |
|
|
default:
|
1068 |
|
|
gcc_unreachable ();
|
1069 |
|
|
}
|
1070 |
|
|
}
|
1071 |
|
|
|
1072 |
|
|
/* Apply the operation CODE in type TYPE to the value, mask pair
|
1073 |
|
|
RVAL and RMASK representing a value of type RTYPE and set
|
1074 |
|
|
the value, mask pair *VAL and *MASK to the result. */
|
1075 |
|
|
|
1076 |
|
|
static void
|
1077 |
|
|
bit_value_unop_1 (enum tree_code code, tree type,
|
1078 |
|
|
double_int *val, double_int *mask,
|
1079 |
|
|
tree rtype, double_int rval, double_int rmask)
|
1080 |
|
|
{
|
1081 |
|
|
switch (code)
|
1082 |
|
|
{
|
1083 |
|
|
case BIT_NOT_EXPR:
|
1084 |
|
|
*mask = rmask;
|
1085 |
|
|
*val = double_int_not (rval);
|
1086 |
|
|
break;
|
1087 |
|
|
|
1088 |
|
|
case NEGATE_EXPR:
|
1089 |
|
|
{
|
1090 |
|
|
double_int temv, temm;
|
1091 |
|
|
/* Return ~rval + 1. */
|
1092 |
|
|
bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
|
1093 |
|
|
bit_value_binop_1 (PLUS_EXPR, type, val, mask,
|
1094 |
|
|
type, temv, temm,
|
1095 |
|
|
type, double_int_one, double_int_zero);
|
1096 |
|
|
break;
|
1097 |
|
|
}
|
1098 |
|
|
|
1099 |
|
|
CASE_CONVERT:
|
1100 |
|
|
{
|
1101 |
|
|
bool uns;
|
1102 |
|
|
|
1103 |
|
|
/* First extend mask and value according to the original type. */
|
1104 |
|
|
uns = (TREE_CODE (rtype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (rtype)
|
1105 |
|
|
? 0 : TYPE_UNSIGNED (rtype));
|
1106 |
|
|
*mask = double_int_ext (rmask, TYPE_PRECISION (rtype), uns);
|
1107 |
|
|
*val = double_int_ext (rval, TYPE_PRECISION (rtype), uns);
|
1108 |
|
|
|
1109 |
|
|
/* Then extend mask and value according to the target type. */
|
1110 |
|
|
uns = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
|
1111 |
|
|
? 0 : TYPE_UNSIGNED (type));
|
1112 |
|
|
*mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
|
1113 |
|
|
*val = double_int_ext (*val, TYPE_PRECISION (type), uns);
|
1114 |
|
|
break;
|
1115 |
|
|
}
|
1116 |
|
|
|
1117 |
|
|
default:
|
1118 |
|
|
*mask = double_int_minus_one;
|
1119 |
|
|
break;
|
1120 |
|
|
}
|
1121 |
|
|
}
|
1122 |
|
|
|
1123 |
|
|
/* Apply the operation CODE in type TYPE to the value, mask pairs
|
1124 |
|
|
R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
|
1125 |
|
|
and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
|
1126 |
|
|
|
1127 |
|
|
static void
|
1128 |
|
|
bit_value_binop_1 (enum tree_code code, tree type,
|
1129 |
|
|
double_int *val, double_int *mask,
|
1130 |
|
|
tree r1type, double_int r1val, double_int r1mask,
|
1131 |
|
|
tree r2type, double_int r2val, double_int r2mask)
|
1132 |
|
|
{
|
1133 |
|
|
bool uns = (TREE_CODE (type) == INTEGER_TYPE
|
1134 |
|
|
&& TYPE_IS_SIZETYPE (type) ? 0 : TYPE_UNSIGNED (type));
|
1135 |
|
|
/* Assume we'll get a constant result. Use an initial varying value,
|
1136 |
|
|
we fall back to varying in the end if necessary. */
|
1137 |
|
|
*mask = double_int_minus_one;
|
1138 |
|
|
switch (code)
|
1139 |
|
|
{
|
1140 |
|
|
case BIT_AND_EXPR:
|
1141 |
|
|
/* The mask is constant where there is a known not
|
1142 |
|
|
set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
|
1143 |
|
|
*mask = double_int_and (double_int_ior (r1mask, r2mask),
|
1144 |
|
|
double_int_and (double_int_ior (r1val, r1mask),
|
1145 |
|
|
double_int_ior (r2val, r2mask)));
|
1146 |
|
|
*val = double_int_and (r1val, r2val);
|
1147 |
|
|
break;
|
1148 |
|
|
|
1149 |
|
|
case BIT_IOR_EXPR:
|
1150 |
|
|
/* The mask is constant where there is a known
|
1151 |
|
|
set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
|
1152 |
|
|
*mask = double_int_and_not
|
1153 |
|
|
(double_int_ior (r1mask, r2mask),
|
1154 |
|
|
double_int_ior (double_int_and_not (r1val, r1mask),
|
1155 |
|
|
double_int_and_not (r2val, r2mask)));
|
1156 |
|
|
*val = double_int_ior (r1val, r2val);
|
1157 |
|
|
break;
|
1158 |
|
|
|
1159 |
|
|
case BIT_XOR_EXPR:
|
1160 |
|
|
/* m1 | m2 */
|
1161 |
|
|
*mask = double_int_ior (r1mask, r2mask);
|
1162 |
|
|
*val = double_int_xor (r1val, r2val);
|
1163 |
|
|
break;
|
1164 |
|
|
|
1165 |
|
|
case LROTATE_EXPR:
|
1166 |
|
|
case RROTATE_EXPR:
|
1167 |
|
|
if (double_int_zero_p (r2mask))
|
1168 |
|
|
{
|
1169 |
|
|
HOST_WIDE_INT shift = r2val.low;
|
1170 |
|
|
if (code == RROTATE_EXPR)
|
1171 |
|
|
shift = -shift;
|
1172 |
|
|
*mask = double_int_lrotate (r1mask, shift, TYPE_PRECISION (type));
|
1173 |
|
|
*val = double_int_lrotate (r1val, shift, TYPE_PRECISION (type));
|
1174 |
|
|
}
|
1175 |
|
|
break;
|
1176 |
|
|
|
1177 |
|
|
case LSHIFT_EXPR:
|
1178 |
|
|
case RSHIFT_EXPR:
|
1179 |
|
|
/* ??? We can handle partially known shift counts if we know
|
1180 |
|
|
its sign. That way we can tell that (x << (y | 8)) & 255
|
1181 |
|
|
is zero. */
|
1182 |
|
|
if (double_int_zero_p (r2mask))
|
1183 |
|
|
{
|
1184 |
|
|
HOST_WIDE_INT shift = r2val.low;
|
1185 |
|
|
if (code == RSHIFT_EXPR)
|
1186 |
|
|
shift = -shift;
|
1187 |
|
|
/* We need to know if we are doing a left or a right shift
|
1188 |
|
|
to properly shift in zeros for left shift and unsigned
|
1189 |
|
|
right shifts and the sign bit for signed right shifts.
|
1190 |
|
|
For signed right shifts we shift in varying in case
|
1191 |
|
|
the sign bit was varying. */
|
1192 |
|
|
if (shift > 0)
|
1193 |
|
|
{
|
1194 |
|
|
*mask = double_int_lshift (r1mask, shift,
|
1195 |
|
|
TYPE_PRECISION (type), false);
|
1196 |
|
|
*val = double_int_lshift (r1val, shift,
|
1197 |
|
|
TYPE_PRECISION (type), false);
|
1198 |
|
|
}
|
1199 |
|
|
else if (shift < 0)
|
1200 |
|
|
{
|
1201 |
|
|
/* ??? We can have sizetype related inconsistencies in
|
1202 |
|
|
the IL. */
|
1203 |
|
|
if ((TREE_CODE (r1type) == INTEGER_TYPE
|
1204 |
|
|
&& (TYPE_IS_SIZETYPE (r1type)
|
1205 |
|
|
? 0 : TYPE_UNSIGNED (r1type))) != uns)
|
1206 |
|
|
break;
|
1207 |
|
|
|
1208 |
|
|
shift = -shift;
|
1209 |
|
|
*mask = double_int_rshift (r1mask, shift,
|
1210 |
|
|
TYPE_PRECISION (type), !uns);
|
1211 |
|
|
*val = double_int_rshift (r1val, shift,
|
1212 |
|
|
TYPE_PRECISION (type), !uns);
|
1213 |
|
|
}
|
1214 |
|
|
else
|
1215 |
|
|
{
|
1216 |
|
|
*mask = r1mask;
|
1217 |
|
|
*val = r1val;
|
1218 |
|
|
}
|
1219 |
|
|
}
|
1220 |
|
|
break;
|
1221 |
|
|
|
1222 |
|
|
case PLUS_EXPR:
|
1223 |
|
|
case POINTER_PLUS_EXPR:
|
1224 |
|
|
{
|
1225 |
|
|
double_int lo, hi;
|
1226 |
|
|
/* Do the addition with unknown bits set to zero, to give carry-ins of
|
1227 |
|
|
zero wherever possible. */
|
1228 |
|
|
lo = double_int_add (double_int_and_not (r1val, r1mask),
|
1229 |
|
|
double_int_and_not (r2val, r2mask));
|
1230 |
|
|
lo = double_int_ext (lo, TYPE_PRECISION (type), uns);
|
1231 |
|
|
/* Do the addition with unknown bits set to one, to give carry-ins of
|
1232 |
|
|
one wherever possible. */
|
1233 |
|
|
hi = double_int_add (double_int_ior (r1val, r1mask),
|
1234 |
|
|
double_int_ior (r2val, r2mask));
|
1235 |
|
|
hi = double_int_ext (hi, TYPE_PRECISION (type), uns);
|
1236 |
|
|
/* Each bit in the result is known if (a) the corresponding bits in
|
1237 |
|
|
both inputs are known, and (b) the carry-in to that bit position
|
1238 |
|
|
is known. We can check condition (b) by seeing if we got the same
|
1239 |
|
|
result with minimised carries as with maximised carries. */
|
1240 |
|
|
*mask = double_int_ior (double_int_ior (r1mask, r2mask),
|
1241 |
|
|
double_int_xor (lo, hi));
|
1242 |
|
|
*mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
|
1243 |
|
|
/* It shouldn't matter whether we choose lo or hi here. */
|
1244 |
|
|
*val = lo;
|
1245 |
|
|
break;
|
1246 |
|
|
}
|
1247 |
|
|
|
1248 |
|
|
case MINUS_EXPR:
|
1249 |
|
|
{
|
1250 |
|
|
double_int temv, temm;
|
1251 |
|
|
bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
|
1252 |
|
|
r2type, r2val, r2mask);
|
1253 |
|
|
bit_value_binop_1 (PLUS_EXPR, type, val, mask,
|
1254 |
|
|
r1type, r1val, r1mask,
|
1255 |
|
|
r2type, temv, temm);
|
1256 |
|
|
break;
|
1257 |
|
|
}
|
1258 |
|
|
|
1259 |
|
|
case MULT_EXPR:
|
1260 |
|
|
{
|
1261 |
|
|
/* Just track trailing zeros in both operands and transfer
|
1262 |
|
|
them to the other. */
|
1263 |
|
|
int r1tz = double_int_ctz (double_int_ior (r1val, r1mask));
|
1264 |
|
|
int r2tz = double_int_ctz (double_int_ior (r2val, r2mask));
|
1265 |
|
|
if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT)
|
1266 |
|
|
{
|
1267 |
|
|
*mask = double_int_zero;
|
1268 |
|
|
*val = double_int_zero;
|
1269 |
|
|
}
|
1270 |
|
|
else if (r1tz + r2tz > 0)
|
1271 |
|
|
{
|
1272 |
|
|
*mask = double_int_not (double_int_mask (r1tz + r2tz));
|
1273 |
|
|
*mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
|
1274 |
|
|
*val = double_int_zero;
|
1275 |
|
|
}
|
1276 |
|
|
break;
|
1277 |
|
|
}
|
1278 |
|
|
|
1279 |
|
|
case EQ_EXPR:
|
1280 |
|
|
case NE_EXPR:
|
1281 |
|
|
{
|
1282 |
|
|
double_int m = double_int_ior (r1mask, r2mask);
|
1283 |
|
|
if (!double_int_equal_p (double_int_and_not (r1val, m),
|
1284 |
|
|
double_int_and_not (r2val, m)))
|
1285 |
|
|
{
|
1286 |
|
|
*mask = double_int_zero;
|
1287 |
|
|
*val = ((code == EQ_EXPR) ? double_int_zero : double_int_one);
|
1288 |
|
|
}
|
1289 |
|
|
else
|
1290 |
|
|
{
|
1291 |
|
|
/* We know the result of a comparison is always one or zero. */
|
1292 |
|
|
*mask = double_int_one;
|
1293 |
|
|
*val = double_int_zero;
|
1294 |
|
|
}
|
1295 |
|
|
break;
|
1296 |
|
|
}
|
1297 |
|
|
|
1298 |
|
|
case GE_EXPR:
|
1299 |
|
|
case GT_EXPR:
|
1300 |
|
|
{
|
1301 |
|
|
double_int tem = r1val;
|
1302 |
|
|
r1val = r2val;
|
1303 |
|
|
r2val = tem;
|
1304 |
|
|
tem = r1mask;
|
1305 |
|
|
r1mask = r2mask;
|
1306 |
|
|
r2mask = tem;
|
1307 |
|
|
code = swap_tree_comparison (code);
|
1308 |
|
|
}
|
1309 |
|
|
/* Fallthru. */
|
1310 |
|
|
case LT_EXPR:
|
1311 |
|
|
case LE_EXPR:
|
1312 |
|
|
{
|
1313 |
|
|
int minmax, maxmin;
|
1314 |
|
|
/* If the most significant bits are not known we know nothing. */
|
1315 |
|
|
if (double_int_negative_p (r1mask) || double_int_negative_p (r2mask))
|
1316 |
|
|
break;
|
1317 |
|
|
|
1318 |
|
|
/* For comparisons the signedness is in the comparison operands. */
|
1319 |
|
|
uns = (TREE_CODE (r1type) == INTEGER_TYPE
|
1320 |
|
|
&& TYPE_IS_SIZETYPE (r1type) ? 0 : TYPE_UNSIGNED (r1type));
|
1321 |
|
|
/* ??? We can have sizetype related inconsistencies in the IL. */
|
1322 |
|
|
if ((TREE_CODE (r2type) == INTEGER_TYPE
|
1323 |
|
|
&& TYPE_IS_SIZETYPE (r2type) ? 0 : TYPE_UNSIGNED (r2type)) != uns)
|
1324 |
|
|
break;
|
1325 |
|
|
|
1326 |
|
|
/* If we know the most significant bits we know the values
|
1327 |
|
|
value ranges by means of treating varying bits as zero
|
1328 |
|
|
or one. Do a cross comparison of the max/min pairs. */
|
1329 |
|
|
maxmin = double_int_cmp (double_int_ior (r1val, r1mask),
|
1330 |
|
|
double_int_and_not (r2val, r2mask), uns);
|
1331 |
|
|
minmax = double_int_cmp (double_int_and_not (r1val, r1mask),
|
1332 |
|
|
double_int_ior (r2val, r2mask), uns);
|
1333 |
|
|
if (maxmin < 0) /* r1 is less than r2. */
|
1334 |
|
|
{
|
1335 |
|
|
*mask = double_int_zero;
|
1336 |
|
|
*val = double_int_one;
|
1337 |
|
|
}
|
1338 |
|
|
else if (minmax > 0) /* r1 is not less or equal to r2. */
|
1339 |
|
|
{
|
1340 |
|
|
*mask = double_int_zero;
|
1341 |
|
|
*val = double_int_zero;
|
1342 |
|
|
}
|
1343 |
|
|
else if (maxmin == minmax) /* r1 and r2 are equal. */
|
1344 |
|
|
{
|
1345 |
|
|
/* This probably should never happen as we'd have
|
1346 |
|
|
folded the thing during fully constant value folding. */
|
1347 |
|
|
*mask = double_int_zero;
|
1348 |
|
|
*val = (code == LE_EXPR ? double_int_one : double_int_zero);
|
1349 |
|
|
}
|
1350 |
|
|
else
|
1351 |
|
|
{
|
1352 |
|
|
/* We know the result of a comparison is always one or zero. */
|
1353 |
|
|
*mask = double_int_one;
|
1354 |
|
|
*val = double_int_zero;
|
1355 |
|
|
}
|
1356 |
|
|
break;
|
1357 |
|
|
}
|
1358 |
|
|
|
1359 |
|
|
default:;
|
1360 |
|
|
}
|
1361 |
|
|
}
|
1362 |
|
|
|
1363 |
|
|
/* Return the propagation value when applying the operation CODE to
|
1364 |
|
|
the value RHS yielding type TYPE. */
|
1365 |
|
|
|
1366 |
|
|
static prop_value_t
|
1367 |
|
|
bit_value_unop (enum tree_code code, tree type, tree rhs)
|
1368 |
|
|
{
|
1369 |
|
|
prop_value_t rval = get_value_for_expr (rhs, true);
|
1370 |
|
|
double_int value, mask;
|
1371 |
|
|
prop_value_t val;
|
1372 |
|
|
|
1373 |
|
|
if (rval.lattice_val == UNDEFINED)
|
1374 |
|
|
return rval;
|
1375 |
|
|
|
1376 |
|
|
gcc_assert ((rval.lattice_val == CONSTANT
|
1377 |
|
|
&& TREE_CODE (rval.value) == INTEGER_CST)
|
1378 |
|
|
|| double_int_minus_one_p (rval.mask));
|
1379 |
|
|
bit_value_unop_1 (code, type, &value, &mask,
|
1380 |
|
|
TREE_TYPE (rhs), value_to_double_int (rval), rval.mask);
|
1381 |
|
|
if (!double_int_minus_one_p (mask))
|
1382 |
|
|
{
|
1383 |
|
|
val.lattice_val = CONSTANT;
|
1384 |
|
|
val.mask = mask;
|
1385 |
|
|
/* ??? Delay building trees here. */
|
1386 |
|
|
val.value = double_int_to_tree (type, value);
|
1387 |
|
|
}
|
1388 |
|
|
else
|
1389 |
|
|
{
|
1390 |
|
|
val.lattice_val = VARYING;
|
1391 |
|
|
val.value = NULL_TREE;
|
1392 |
|
|
val.mask = double_int_minus_one;
|
1393 |
|
|
}
|
1394 |
|
|
return val;
|
1395 |
|
|
}
|
1396 |
|
|
|
1397 |
|
|
/* Return the propagation value when applying the operation CODE to
|
1398 |
|
|
the values RHS1 and RHS2 yielding type TYPE. */
|
1399 |
|
|
|
1400 |
|
|
static prop_value_t
|
1401 |
|
|
bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
|
1402 |
|
|
{
|
1403 |
|
|
prop_value_t r1val = get_value_for_expr (rhs1, true);
|
1404 |
|
|
prop_value_t r2val = get_value_for_expr (rhs2, true);
|
1405 |
|
|
double_int value, mask;
|
1406 |
|
|
prop_value_t val;
|
1407 |
|
|
|
1408 |
|
|
if (r1val.lattice_val == UNDEFINED
|
1409 |
|
|
|| r2val.lattice_val == UNDEFINED)
|
1410 |
|
|
{
|
1411 |
|
|
val.lattice_val = VARYING;
|
1412 |
|
|
val.value = NULL_TREE;
|
1413 |
|
|
val.mask = double_int_minus_one;
|
1414 |
|
|
return val;
|
1415 |
|
|
}
|
1416 |
|
|
|
1417 |
|
|
gcc_assert ((r1val.lattice_val == CONSTANT
|
1418 |
|
|
&& TREE_CODE (r1val.value) == INTEGER_CST)
|
1419 |
|
|
|| double_int_minus_one_p (r1val.mask));
|
1420 |
|
|
gcc_assert ((r2val.lattice_val == CONSTANT
|
1421 |
|
|
&& TREE_CODE (r2val.value) == INTEGER_CST)
|
1422 |
|
|
|| double_int_minus_one_p (r2val.mask));
|
1423 |
|
|
bit_value_binop_1 (code, type, &value, &mask,
|
1424 |
|
|
TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask,
|
1425 |
|
|
TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask);
|
1426 |
|
|
if (!double_int_minus_one_p (mask))
|
1427 |
|
|
{
|
1428 |
|
|
val.lattice_val = CONSTANT;
|
1429 |
|
|
val.mask = mask;
|
1430 |
|
|
/* ??? Delay building trees here. */
|
1431 |
|
|
val.value = double_int_to_tree (type, value);
|
1432 |
|
|
}
|
1433 |
|
|
else
|
1434 |
|
|
{
|
1435 |
|
|
val.lattice_val = VARYING;
|
1436 |
|
|
val.value = NULL_TREE;
|
1437 |
|
|
val.mask = double_int_minus_one;
|
1438 |
|
|
}
|
1439 |
|
|
return val;
|
1440 |
|
|
}
|
1441 |
|
|
|
1442 |
|
|
/* Return the propagation value when applying __builtin_assume_aligned to
|
1443 |
|
|
its arguments. */
|
1444 |
|
|
|
1445 |
|
|
static prop_value_t
|
1446 |
|
|
bit_value_assume_aligned (gimple stmt)
|
1447 |
|
|
{
|
1448 |
|
|
tree ptr = gimple_call_arg (stmt, 0), align, misalign = NULL_TREE;
|
1449 |
|
|
tree type = TREE_TYPE (ptr);
|
1450 |
|
|
unsigned HOST_WIDE_INT aligni, misaligni = 0;
|
1451 |
|
|
prop_value_t ptrval = get_value_for_expr (ptr, true);
|
1452 |
|
|
prop_value_t alignval;
|
1453 |
|
|
double_int value, mask;
|
1454 |
|
|
prop_value_t val;
|
1455 |
|
|
if (ptrval.lattice_val == UNDEFINED)
|
1456 |
|
|
return ptrval;
|
1457 |
|
|
gcc_assert ((ptrval.lattice_val == CONSTANT
|
1458 |
|
|
&& TREE_CODE (ptrval.value) == INTEGER_CST)
|
1459 |
|
|
|| double_int_minus_one_p (ptrval.mask));
|
1460 |
|
|
align = gimple_call_arg (stmt, 1);
|
1461 |
|
|
if (!host_integerp (align, 1))
|
1462 |
|
|
return ptrval;
|
1463 |
|
|
aligni = tree_low_cst (align, 1);
|
1464 |
|
|
if (aligni <= 1
|
1465 |
|
|
|| (aligni & (aligni - 1)) != 0)
|
1466 |
|
|
return ptrval;
|
1467 |
|
|
if (gimple_call_num_args (stmt) > 2)
|
1468 |
|
|
{
|
1469 |
|
|
misalign = gimple_call_arg (stmt, 2);
|
1470 |
|
|
if (!host_integerp (misalign, 1))
|
1471 |
|
|
return ptrval;
|
1472 |
|
|
misaligni = tree_low_cst (misalign, 1);
|
1473 |
|
|
if (misaligni >= aligni)
|
1474 |
|
|
return ptrval;
|
1475 |
|
|
}
|
1476 |
|
|
align = build_int_cst_type (type, -aligni);
|
1477 |
|
|
alignval = get_value_for_expr (align, true);
|
1478 |
|
|
bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
|
1479 |
|
|
type, value_to_double_int (ptrval), ptrval.mask,
|
1480 |
|
|
type, value_to_double_int (alignval), alignval.mask);
|
1481 |
|
|
if (!double_int_minus_one_p (mask))
|
1482 |
|
|
{
|
1483 |
|
|
val.lattice_val = CONSTANT;
|
1484 |
|
|
val.mask = mask;
|
1485 |
|
|
gcc_assert ((mask.low & (aligni - 1)) == 0);
|
1486 |
|
|
gcc_assert ((value.low & (aligni - 1)) == 0);
|
1487 |
|
|
value.low |= misaligni;
|
1488 |
|
|
/* ??? Delay building trees here. */
|
1489 |
|
|
val.value = double_int_to_tree (type, value);
|
1490 |
|
|
}
|
1491 |
|
|
else
|
1492 |
|
|
{
|
1493 |
|
|
val.lattice_val = VARYING;
|
1494 |
|
|
val.value = NULL_TREE;
|
1495 |
|
|
val.mask = double_int_minus_one;
|
1496 |
|
|
}
|
1497 |
|
|
return val;
|
1498 |
|
|
}
|
1499 |
|
|
|
1500 |
|
|
/* Evaluate statement STMT.
|
1501 |
|
|
Valid only for assignments, calls, conditionals, and switches. */
|
1502 |
|
|
|
1503 |
|
|
static prop_value_t
|
1504 |
|
|
evaluate_stmt (gimple stmt)
|
1505 |
|
|
{
|
1506 |
|
|
prop_value_t val;
|
1507 |
|
|
tree simplified = NULL_TREE;
|
1508 |
|
|
ccp_lattice_t likelyvalue = likely_value (stmt);
|
1509 |
|
|
bool is_constant = false;
|
1510 |
|
|
unsigned int align;
|
1511 |
|
|
|
1512 |
|
|
if (dump_file && (dump_flags & TDF_DETAILS))
|
1513 |
|
|
{
|
1514 |
|
|
fprintf (dump_file, "which is likely ");
|
1515 |
|
|
switch (likelyvalue)
|
1516 |
|
|
{
|
1517 |
|
|
case CONSTANT:
|
1518 |
|
|
fprintf (dump_file, "CONSTANT");
|
1519 |
|
|
break;
|
1520 |
|
|
case UNDEFINED:
|
1521 |
|
|
fprintf (dump_file, "UNDEFINED");
|
1522 |
|
|
break;
|
1523 |
|
|
case VARYING:
|
1524 |
|
|
fprintf (dump_file, "VARYING");
|
1525 |
|
|
break;
|
1526 |
|
|
default:;
|
1527 |
|
|
}
|
1528 |
|
|
fprintf (dump_file, "\n");
|
1529 |
|
|
}
|
1530 |
|
|
|
1531 |
|
|
/* If the statement is likely to have a CONSTANT result, then try
|
1532 |
|
|
to fold the statement to determine the constant value. */
|
1533 |
|
|
/* FIXME. This is the only place that we call ccp_fold.
|
1534 |
|
|
Since likely_value never returns CONSTANT for calls, we will
|
1535 |
|
|
not attempt to fold them, including builtins that may profit. */
|
1536 |
|
|
if (likelyvalue == CONSTANT)
|
1537 |
|
|
{
|
1538 |
|
|
fold_defer_overflow_warnings ();
|
1539 |
|
|
simplified = ccp_fold (stmt);
|
1540 |
|
|
is_constant = simplified && is_gimple_min_invariant (simplified);
|
1541 |
|
|
fold_undefer_overflow_warnings (is_constant, stmt, 0);
|
1542 |
|
|
if (is_constant)
|
1543 |
|
|
{
|
1544 |
|
|
/* The statement produced a constant value. */
|
1545 |
|
|
val.lattice_val = CONSTANT;
|
1546 |
|
|
val.value = simplified;
|
1547 |
|
|
val.mask = double_int_zero;
|
1548 |
|
|
}
|
1549 |
|
|
}
|
1550 |
|
|
/* If the statement is likely to have a VARYING result, then do not
|
1551 |
|
|
bother folding the statement. */
|
1552 |
|
|
else if (likelyvalue == VARYING)
|
1553 |
|
|
{
|
1554 |
|
|
enum gimple_code code = gimple_code (stmt);
|
1555 |
|
|
if (code == GIMPLE_ASSIGN)
|
1556 |
|
|
{
|
1557 |
|
|
enum tree_code subcode = gimple_assign_rhs_code (stmt);
|
1558 |
|
|
|
1559 |
|
|
/* Other cases cannot satisfy is_gimple_min_invariant
|
1560 |
|
|
without folding. */
|
1561 |
|
|
if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
|
1562 |
|
|
simplified = gimple_assign_rhs1 (stmt);
|
1563 |
|
|
}
|
1564 |
|
|
else if (code == GIMPLE_SWITCH)
|
1565 |
|
|
simplified = gimple_switch_index (stmt);
|
1566 |
|
|
else
|
1567 |
|
|
/* These cannot satisfy is_gimple_min_invariant without folding. */
|
1568 |
|
|
gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
|
1569 |
|
|
is_constant = simplified && is_gimple_min_invariant (simplified);
|
1570 |
|
|
if (is_constant)
|
1571 |
|
|
{
|
1572 |
|
|
/* The statement produced a constant value. */
|
1573 |
|
|
val.lattice_val = CONSTANT;
|
1574 |
|
|
val.value = simplified;
|
1575 |
|
|
val.mask = double_int_zero;
|
1576 |
|
|
}
|
1577 |
|
|
}
|
1578 |
|
|
|
1579 |
|
|
/* Resort to simplification for bitwise tracking. */
|
1580 |
|
|
if (flag_tree_bit_ccp
|
1581 |
|
|
&& (likelyvalue == CONSTANT || is_gimple_call (stmt))
|
1582 |
|
|
&& !is_constant)
|
1583 |
|
|
{
|
1584 |
|
|
enum gimple_code code = gimple_code (stmt);
|
1585 |
|
|
tree fndecl;
|
1586 |
|
|
val.lattice_val = VARYING;
|
1587 |
|
|
val.value = NULL_TREE;
|
1588 |
|
|
val.mask = double_int_minus_one;
|
1589 |
|
|
if (code == GIMPLE_ASSIGN)
|
1590 |
|
|
{
|
1591 |
|
|
enum tree_code subcode = gimple_assign_rhs_code (stmt);
|
1592 |
|
|
tree rhs1 = gimple_assign_rhs1 (stmt);
|
1593 |
|
|
switch (get_gimple_rhs_class (subcode))
|
1594 |
|
|
{
|
1595 |
|
|
case GIMPLE_SINGLE_RHS:
|
1596 |
|
|
if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
|
1597 |
|
|
|| POINTER_TYPE_P (TREE_TYPE (rhs1)))
|
1598 |
|
|
val = get_value_for_expr (rhs1, true);
|
1599 |
|
|
break;
|
1600 |
|
|
|
1601 |
|
|
case GIMPLE_UNARY_RHS:
|
1602 |
|
|
if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
|
1603 |
|
|
|| POINTER_TYPE_P (TREE_TYPE (rhs1)))
|
1604 |
|
|
&& (INTEGRAL_TYPE_P (gimple_expr_type (stmt))
|
1605 |
|
|
|| POINTER_TYPE_P (gimple_expr_type (stmt))))
|
1606 |
|
|
val = bit_value_unop (subcode, gimple_expr_type (stmt), rhs1);
|
1607 |
|
|
break;
|
1608 |
|
|
|
1609 |
|
|
case GIMPLE_BINARY_RHS:
|
1610 |
|
|
if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
|
1611 |
|
|
|| POINTER_TYPE_P (TREE_TYPE (rhs1)))
|
1612 |
|
|
{
|
1613 |
|
|
tree lhs = gimple_assign_lhs (stmt);
|
1614 |
|
|
tree rhs2 = gimple_assign_rhs2 (stmt);
|
1615 |
|
|
val = bit_value_binop (subcode,
|
1616 |
|
|
TREE_TYPE (lhs), rhs1, rhs2);
|
1617 |
|
|
}
|
1618 |
|
|
break;
|
1619 |
|
|
|
1620 |
|
|
default:;
|
1621 |
|
|
}
|
1622 |
|
|
}
|
1623 |
|
|
else if (code == GIMPLE_COND)
|
1624 |
|
|
{
|
1625 |
|
|
enum tree_code code = gimple_cond_code (stmt);
|
1626 |
|
|
tree rhs1 = gimple_cond_lhs (stmt);
|
1627 |
|
|
tree rhs2 = gimple_cond_rhs (stmt);
|
1628 |
|
|
if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
|
1629 |
|
|
|| POINTER_TYPE_P (TREE_TYPE (rhs1)))
|
1630 |
|
|
val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
|
1631 |
|
|
}
|
1632 |
|
|
else if (code == GIMPLE_CALL
|
1633 |
|
|
&& (fndecl = gimple_call_fndecl (stmt))
|
1634 |
|
|
&& DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
|
1635 |
|
|
{
|
1636 |
|
|
switch (DECL_FUNCTION_CODE (fndecl))
|
1637 |
|
|
{
|
1638 |
|
|
case BUILT_IN_MALLOC:
|
1639 |
|
|
case BUILT_IN_REALLOC:
|
1640 |
|
|
case BUILT_IN_CALLOC:
|
1641 |
|
|
case BUILT_IN_STRDUP:
|
1642 |
|
|
case BUILT_IN_STRNDUP:
|
1643 |
|
|
val.lattice_val = CONSTANT;
|
1644 |
|
|
val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
|
1645 |
|
|
val.mask = shwi_to_double_int
|
1646 |
|
|
(~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT)
|
1647 |
|
|
/ BITS_PER_UNIT - 1));
|
1648 |
|
|
break;
|
1649 |
|
|
|
1650 |
|
|
case BUILT_IN_ALLOCA:
|
1651 |
|
|
case BUILT_IN_ALLOCA_WITH_ALIGN:
|
1652 |
|
|
align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
|
1653 |
|
|
? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
|
1654 |
|
|
: BIGGEST_ALIGNMENT);
|
1655 |
|
|
val.lattice_val = CONSTANT;
|
1656 |
|
|
val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
|
1657 |
|
|
val.mask = shwi_to_double_int
|
1658 |
|
|
(~(((HOST_WIDE_INT) align)
|
1659 |
|
|
/ BITS_PER_UNIT - 1));
|
1660 |
|
|
break;
|
1661 |
|
|
|
1662 |
|
|
/* These builtins return their first argument, unmodified. */
|
1663 |
|
|
case BUILT_IN_MEMCPY:
|
1664 |
|
|
case BUILT_IN_MEMMOVE:
|
1665 |
|
|
case BUILT_IN_MEMSET:
|
1666 |
|
|
case BUILT_IN_STRCPY:
|
1667 |
|
|
case BUILT_IN_STRNCPY:
|
1668 |
|
|
case BUILT_IN_MEMCPY_CHK:
|
1669 |
|
|
case BUILT_IN_MEMMOVE_CHK:
|
1670 |
|
|
case BUILT_IN_MEMSET_CHK:
|
1671 |
|
|
case BUILT_IN_STRCPY_CHK:
|
1672 |
|
|
case BUILT_IN_STRNCPY_CHK:
|
1673 |
|
|
val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
|
1674 |
|
|
break;
|
1675 |
|
|
|
1676 |
|
|
case BUILT_IN_ASSUME_ALIGNED:
|
1677 |
|
|
val = bit_value_assume_aligned (stmt);
|
1678 |
|
|
break;
|
1679 |
|
|
|
1680 |
|
|
default:;
|
1681 |
|
|
}
|
1682 |
|
|
}
|
1683 |
|
|
is_constant = (val.lattice_val == CONSTANT);
|
1684 |
|
|
}
|
1685 |
|
|
|
1686 |
|
|
if (!is_constant)
|
1687 |
|
|
{
|
1688 |
|
|
/* The statement produced a nonconstant value. If the statement
|
1689 |
|
|
had UNDEFINED operands, then the result of the statement
|
1690 |
|
|
should be UNDEFINED. Otherwise, the statement is VARYING. */
|
1691 |
|
|
if (likelyvalue == UNDEFINED)
|
1692 |
|
|
{
|
1693 |
|
|
val.lattice_val = likelyvalue;
|
1694 |
|
|
val.mask = double_int_zero;
|
1695 |
|
|
}
|
1696 |
|
|
else
|
1697 |
|
|
{
|
1698 |
|
|
val.lattice_val = VARYING;
|
1699 |
|
|
val.mask = double_int_minus_one;
|
1700 |
|
|
}
|
1701 |
|
|
|
1702 |
|
|
val.value = NULL_TREE;
|
1703 |
|
|
}
|
1704 |
|
|
|
1705 |
|
|
return val;
|
1706 |
|
|
}
|
1707 |
|
|
|
1708 |
|
|
/* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
|
1709 |
|
|
each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
|
1710 |
|
|
|
1711 |
|
|
static void
|
1712 |
|
|
insert_clobber_before_stack_restore (tree saved_val, tree var, htab_t *visited)
|
1713 |
|
|
{
|
1714 |
|
|
gimple stmt, clobber_stmt;
|
1715 |
|
|
tree clobber;
|
1716 |
|
|
imm_use_iterator iter;
|
1717 |
|
|
gimple_stmt_iterator i;
|
1718 |
|
|
gimple *slot;
|
1719 |
|
|
|
1720 |
|
|
FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
|
1721 |
|
|
if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
|
1722 |
|
|
{
|
1723 |
|
|
clobber = build_constructor (TREE_TYPE (var), NULL);
|
1724 |
|
|
TREE_THIS_VOLATILE (clobber) = 1;
|
1725 |
|
|
clobber_stmt = gimple_build_assign (var, clobber);
|
1726 |
|
|
|
1727 |
|
|
i = gsi_for_stmt (stmt);
|
1728 |
|
|
gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
|
1729 |
|
|
}
|
1730 |
|
|
else if (gimple_code (stmt) == GIMPLE_PHI)
|
1731 |
|
|
{
|
1732 |
|
|
if (*visited == NULL)
|
1733 |
|
|
*visited = htab_create (10, htab_hash_pointer, htab_eq_pointer, NULL);
|
1734 |
|
|
|
1735 |
|
|
slot = (gimple *)htab_find_slot (*visited, stmt, INSERT);
|
1736 |
|
|
if (*slot != NULL)
|
1737 |
|
|
continue;
|
1738 |
|
|
|
1739 |
|
|
*slot = stmt;
|
1740 |
|
|
insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
|
1741 |
|
|
visited);
|
1742 |
|
|
}
|
1743 |
|
|
else
|
1744 |
|
|
gcc_assert (is_gimple_debug (stmt));
|
1745 |
|
|
}
|
1746 |
|
|
|
1747 |
|
|
/* Advance the iterator to the previous non-debug gimple statement in the same
|
1748 |
|
|
or dominating basic block. */
|
1749 |
|
|
|
1750 |
|
|
static inline void
|
1751 |
|
|
gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
|
1752 |
|
|
{
|
1753 |
|
|
basic_block dom;
|
1754 |
|
|
|
1755 |
|
|
gsi_prev_nondebug (i);
|
1756 |
|
|
while (gsi_end_p (*i))
|
1757 |
|
|
{
|
1758 |
|
|
dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
|
1759 |
|
|
if (dom == NULL || dom == ENTRY_BLOCK_PTR)
|
1760 |
|
|
return;
|
1761 |
|
|
|
1762 |
|
|
*i = gsi_last_bb (dom);
|
1763 |
|
|
}
|
1764 |
|
|
}
|
1765 |
|
|
|
1766 |
|
|
/* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
|
1767 |
|
|
a clobber of VAR before each matching BUILT_IN_STACK_RESTORE. */
|
1768 |
|
|
|
1769 |
|
|
static void
|
1770 |
|
|
insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
|
1771 |
|
|
{
|
1772 |
|
|
bool save_found;
|
1773 |
|
|
gimple stmt;
|
1774 |
|
|
tree saved_val;
|
1775 |
|
|
htab_t visited = NULL;
|
1776 |
|
|
|
1777 |
|
|
for (save_found = false; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
|
1778 |
|
|
{
|
1779 |
|
|
stmt = gsi_stmt (i);
|
1780 |
|
|
|
1781 |
|
|
if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
|
1782 |
|
|
continue;
|
1783 |
|
|
save_found = true;
|
1784 |
|
|
|
1785 |
|
|
saved_val = gimple_call_lhs (stmt);
|
1786 |
|
|
if (saved_val == NULL_TREE)
|
1787 |
|
|
continue;
|
1788 |
|
|
|
1789 |
|
|
insert_clobber_before_stack_restore (saved_val, var, &visited);
|
1790 |
|
|
break;
|
1791 |
|
|
}
|
1792 |
|
|
|
1793 |
|
|
if (visited != NULL)
|
1794 |
|
|
htab_delete (visited);
|
1795 |
|
|
gcc_assert (save_found);
|
1796 |
|
|
}
|
1797 |
|
|
|
1798 |
|
|
/* Detects a __builtin_alloca_with_align with constant size argument. Declares
|
1799 |
|
|
fixed-size array and returns the address, if found, otherwise returns
|
1800 |
|
|
NULL_TREE. */
|
1801 |
|
|
|
1802 |
|
|
static tree
|
1803 |
|
|
fold_builtin_alloca_with_align (gimple stmt)
|
1804 |
|
|
{
|
1805 |
|
|
unsigned HOST_WIDE_INT size, threshold, n_elem;
|
1806 |
|
|
tree lhs, arg, block, var, elem_type, array_type;
|
1807 |
|
|
|
1808 |
|
|
/* Get lhs. */
|
1809 |
|
|
lhs = gimple_call_lhs (stmt);
|
1810 |
|
|
if (lhs == NULL_TREE)
|
1811 |
|
|
return NULL_TREE;
|
1812 |
|
|
|
1813 |
|
|
/* Detect constant argument. */
|
1814 |
|
|
arg = get_constant_value (gimple_call_arg (stmt, 0));
|
1815 |
|
|
if (arg == NULL_TREE
|
1816 |
|
|
|| TREE_CODE (arg) != INTEGER_CST
|
1817 |
|
|
|| !host_integerp (arg, 1))
|
1818 |
|
|
return NULL_TREE;
|
1819 |
|
|
|
1820 |
|
|
size = TREE_INT_CST_LOW (arg);
|
1821 |
|
|
|
1822 |
|
|
/* Heuristic: don't fold large allocas. */
|
1823 |
|
|
threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
|
1824 |
|
|
/* In case the alloca is located at function entry, it has the same lifetime
|
1825 |
|
|
as a declared array, so we allow a larger size. */
|
1826 |
|
|
block = gimple_block (stmt);
|
1827 |
|
|
if (!(cfun->after_inlining
|
1828 |
|
|
&& TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
|
1829 |
|
|
threshold /= 10;
|
1830 |
|
|
if (size > threshold)
|
1831 |
|
|
return NULL_TREE;
|
1832 |
|
|
|
1833 |
|
|
/* Declare array. */
|
1834 |
|
|
elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
|
1835 |
|
|
n_elem = size * 8 / BITS_PER_UNIT;
|
1836 |
|
|
array_type = build_array_type_nelts (elem_type, n_elem);
|
1837 |
|
|
var = create_tmp_var (array_type, NULL);
|
1838 |
|
|
DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
|
1839 |
|
|
{
|
1840 |
|
|
struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
|
1841 |
|
|
if (pi != NULL && !pi->pt.anything)
|
1842 |
|
|
{
|
1843 |
|
|
bool singleton_p;
|
1844 |
|
|
unsigned uid;
|
1845 |
|
|
singleton_p = pt_solution_singleton_p (&pi->pt, &uid);
|
1846 |
|
|
gcc_assert (singleton_p);
|
1847 |
|
|
SET_DECL_PT_UID (var, uid);
|
1848 |
|
|
}
|
1849 |
|
|
}
|
1850 |
|
|
|
1851 |
|
|
/* Fold alloca to the address of the array. */
|
1852 |
|
|
return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
|
1853 |
|
|
}
|
1854 |
|
|
|
1855 |
|
|
/* Fold the stmt at *GSI with CCP specific information that propagating
|
1856 |
|
|
and regular folding does not catch. */
|
1857 |
|
|
|
1858 |
|
|
static bool
|
1859 |
|
|
ccp_fold_stmt (gimple_stmt_iterator *gsi)
|
1860 |
|
|
{
|
1861 |
|
|
gimple stmt = gsi_stmt (*gsi);
|
1862 |
|
|
|
1863 |
|
|
switch (gimple_code (stmt))
|
1864 |
|
|
{
|
1865 |
|
|
case GIMPLE_COND:
|
1866 |
|
|
{
|
1867 |
|
|
prop_value_t val;
|
1868 |
|
|
/* Statement evaluation will handle type mismatches in constants
|
1869 |
|
|
more gracefully than the final propagation. This allows us to
|
1870 |
|
|
fold more conditionals here. */
|
1871 |
|
|
val = evaluate_stmt (stmt);
|
1872 |
|
|
if (val.lattice_val != CONSTANT
|
1873 |
|
|
|| !double_int_zero_p (val.mask))
|
1874 |
|
|
return false;
|
1875 |
|
|
|
1876 |
|
|
if (dump_file)
|
1877 |
|
|
{
|
1878 |
|
|
fprintf (dump_file, "Folding predicate ");
|
1879 |
|
|
print_gimple_expr (dump_file, stmt, 0, 0);
|
1880 |
|
|
fprintf (dump_file, " to ");
|
1881 |
|
|
print_generic_expr (dump_file, val.value, 0);
|
1882 |
|
|
fprintf (dump_file, "\n");
|
1883 |
|
|
}
|
1884 |
|
|
|
1885 |
|
|
if (integer_zerop (val.value))
|
1886 |
|
|
gimple_cond_make_false (stmt);
|
1887 |
|
|
else
|
1888 |
|
|
gimple_cond_make_true (stmt);
|
1889 |
|
|
|
1890 |
|
|
return true;
|
1891 |
|
|
}
|
1892 |
|
|
|
1893 |
|
|
case GIMPLE_CALL:
|
1894 |
|
|
{
|
1895 |
|
|
tree lhs = gimple_call_lhs (stmt);
|
1896 |
|
|
int flags = gimple_call_flags (stmt);
|
1897 |
|
|
tree val;
|
1898 |
|
|
tree argt;
|
1899 |
|
|
bool changed = false;
|
1900 |
|
|
unsigned i;
|
1901 |
|
|
|
1902 |
|
|
/* If the call was folded into a constant make sure it goes
|
1903 |
|
|
away even if we cannot propagate into all uses because of
|
1904 |
|
|
type issues. */
|
1905 |
|
|
if (lhs
|
1906 |
|
|
&& TREE_CODE (lhs) == SSA_NAME
|
1907 |
|
|
&& (val = get_constant_value (lhs))
|
1908 |
|
|
/* Don't optimize away calls that have side-effects. */
|
1909 |
|
|
&& (flags & (ECF_CONST|ECF_PURE)) != 0
|
1910 |
|
|
&& (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
|
1911 |
|
|
{
|
1912 |
|
|
tree new_rhs = unshare_expr (val);
|
1913 |
|
|
bool res;
|
1914 |
|
|
if (!useless_type_conversion_p (TREE_TYPE (lhs),
|
1915 |
|
|
TREE_TYPE (new_rhs)))
|
1916 |
|
|
new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
|
1917 |
|
|
res = update_call_from_tree (gsi, new_rhs);
|
1918 |
|
|
gcc_assert (res);
|
1919 |
|
|
return true;
|
1920 |
|
|
}
|
1921 |
|
|
|
1922 |
|
|
/* Internal calls provide no argument types, so the extra laxity
|
1923 |
|
|
for normal calls does not apply. */
|
1924 |
|
|
if (gimple_call_internal_p (stmt))
|
1925 |
|
|
return false;
|
1926 |
|
|
|
1927 |
|
|
/* The heuristic of fold_builtin_alloca_with_align differs before and
|
1928 |
|
|
after inlining, so we don't require the arg to be changed into a
|
1929 |
|
|
constant for folding, but just to be constant. */
|
1930 |
|
|
if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
|
1931 |
|
|
{
|
1932 |
|
|
tree new_rhs = fold_builtin_alloca_with_align (stmt);
|
1933 |
|
|
if (new_rhs)
|
1934 |
|
|
{
|
1935 |
|
|
bool res = update_call_from_tree (gsi, new_rhs);
|
1936 |
|
|
tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
|
1937 |
|
|
gcc_assert (res);
|
1938 |
|
|
insert_clobbers_for_var (*gsi, var);
|
1939 |
|
|
return true;
|
1940 |
|
|
}
|
1941 |
|
|
}
|
1942 |
|
|
|
1943 |
|
|
/* Propagate into the call arguments. Compared to replace_uses_in
|
1944 |
|
|
this can use the argument slot types for type verification
|
1945 |
|
|
instead of the current argument type. We also can safely
|
1946 |
|
|
drop qualifiers here as we are dealing with constants anyway. */
|
1947 |
|
|
argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
|
1948 |
|
|
for (i = 0; i < gimple_call_num_args (stmt) && argt;
|
1949 |
|
|
++i, argt = TREE_CHAIN (argt))
|
1950 |
|
|
{
|
1951 |
|
|
tree arg = gimple_call_arg (stmt, i);
|
1952 |
|
|
if (TREE_CODE (arg) == SSA_NAME
|
1953 |
|
|
&& (val = get_constant_value (arg))
|
1954 |
|
|
&& useless_type_conversion_p
|
1955 |
|
|
(TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
|
1956 |
|
|
TYPE_MAIN_VARIANT (TREE_TYPE (val))))
|
1957 |
|
|
{
|
1958 |
|
|
gimple_call_set_arg (stmt, i, unshare_expr (val));
|
1959 |
|
|
changed = true;
|
1960 |
|
|
}
|
1961 |
|
|
}
|
1962 |
|
|
|
1963 |
|
|
return changed;
|
1964 |
|
|
}
|
1965 |
|
|
|
1966 |
|
|
case GIMPLE_ASSIGN:
|
1967 |
|
|
{
|
1968 |
|
|
tree lhs = gimple_assign_lhs (stmt);
|
1969 |
|
|
tree val;
|
1970 |
|
|
|
1971 |
|
|
/* If we have a load that turned out to be constant replace it
|
1972 |
|
|
as we cannot propagate into all uses in all cases. */
|
1973 |
|
|
if (gimple_assign_single_p (stmt)
|
1974 |
|
|
&& TREE_CODE (lhs) == SSA_NAME
|
1975 |
|
|
&& (val = get_constant_value (lhs)))
|
1976 |
|
|
{
|
1977 |
|
|
tree rhs = unshare_expr (val);
|
1978 |
|
|
if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
|
1979 |
|
|
rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
|
1980 |
|
|
gimple_assign_set_rhs_from_tree (gsi, rhs);
|
1981 |
|
|
return true;
|
1982 |
|
|
}
|
1983 |
|
|
|
1984 |
|
|
return false;
|
1985 |
|
|
}
|
1986 |
|
|
|
1987 |
|
|
default:
|
1988 |
|
|
return false;
|
1989 |
|
|
}
|
1990 |
|
|
}
|
1991 |
|
|
|
1992 |
|
|
/* Visit the assignment statement STMT. Set the value of its LHS to the
|
1993 |
|
|
value computed by the RHS and store LHS in *OUTPUT_P. If STMT
|
1994 |
|
|
creates virtual definitions, set the value of each new name to that
|
1995 |
|
|
of the RHS (if we can derive a constant out of the RHS).
|
1996 |
|
|
Value-returning call statements also perform an assignment, and
|
1997 |
|
|
are handled here. */
|
1998 |
|
|
|
1999 |
|
|
static enum ssa_prop_result
|
2000 |
|
|
visit_assignment (gimple stmt, tree *output_p)
|
2001 |
|
|
{
|
2002 |
|
|
prop_value_t val;
|
2003 |
|
|
enum ssa_prop_result retval;
|
2004 |
|
|
|
2005 |
|
|
tree lhs = gimple_get_lhs (stmt);
|
2006 |
|
|
|
2007 |
|
|
gcc_assert (gimple_code (stmt) != GIMPLE_CALL
|
2008 |
|
|
|| gimple_call_lhs (stmt) != NULL_TREE);
|
2009 |
|
|
|
2010 |
|
|
if (gimple_assign_single_p (stmt)
|
2011 |
|
|
&& gimple_assign_rhs_code (stmt) == SSA_NAME)
|
2012 |
|
|
/* For a simple copy operation, we copy the lattice values. */
|
2013 |
|
|
val = *get_value (gimple_assign_rhs1 (stmt));
|
2014 |
|
|
else
|
2015 |
|
|
/* Evaluate the statement, which could be
|
2016 |
|
|
either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
|
2017 |
|
|
val = evaluate_stmt (stmt);
|
2018 |
|
|
|
2019 |
|
|
retval = SSA_PROP_NOT_INTERESTING;
|
2020 |
|
|
|
2021 |
|
|
/* Set the lattice value of the statement's output. */
|
2022 |
|
|
if (TREE_CODE (lhs) == SSA_NAME)
|
2023 |
|
|
{
|
2024 |
|
|
/* If STMT is an assignment to an SSA_NAME, we only have one
|
2025 |
|
|
value to set. */
|
2026 |
|
|
if (set_lattice_value (lhs, val))
|
2027 |
|
|
{
|
2028 |
|
|
*output_p = lhs;
|
2029 |
|
|
if (val.lattice_val == VARYING)
|
2030 |
|
|
retval = SSA_PROP_VARYING;
|
2031 |
|
|
else
|
2032 |
|
|
retval = SSA_PROP_INTERESTING;
|
2033 |
|
|
}
|
2034 |
|
|
}
|
2035 |
|
|
|
2036 |
|
|
return retval;
|
2037 |
|
|
}
|
2038 |
|
|
|
2039 |
|
|
|
2040 |
|
|
/* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
|
2041 |
|
|
if it can determine which edge will be taken. Otherwise, return
|
2042 |
|
|
SSA_PROP_VARYING. */
|
2043 |
|
|
|
2044 |
|
|
static enum ssa_prop_result
|
2045 |
|
|
visit_cond_stmt (gimple stmt, edge *taken_edge_p)
|
2046 |
|
|
{
|
2047 |
|
|
prop_value_t val;
|
2048 |
|
|
basic_block block;
|
2049 |
|
|
|
2050 |
|
|
block = gimple_bb (stmt);
|
2051 |
|
|
val = evaluate_stmt (stmt);
|
2052 |
|
|
if (val.lattice_val != CONSTANT
|
2053 |
|
|
|| !double_int_zero_p (val.mask))
|
2054 |
|
|
return SSA_PROP_VARYING;
|
2055 |
|
|
|
2056 |
|
|
/* Find which edge out of the conditional block will be taken and add it
|
2057 |
|
|
to the worklist. If no single edge can be determined statically,
|
2058 |
|
|
return SSA_PROP_VARYING to feed all the outgoing edges to the
|
2059 |
|
|
propagation engine. */
|
2060 |
|
|
*taken_edge_p = find_taken_edge (block, val.value);
|
2061 |
|
|
if (*taken_edge_p)
|
2062 |
|
|
return SSA_PROP_INTERESTING;
|
2063 |
|
|
else
|
2064 |
|
|
return SSA_PROP_VARYING;
|
2065 |
|
|
}
|
2066 |
|
|
|
2067 |
|
|
|
2068 |
|
|
/* Evaluate statement STMT. If the statement produces an output value and
|
2069 |
|
|
its evaluation changes the lattice value of its output, return
|
2070 |
|
|
SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
|
2071 |
|
|
output value.
|
2072 |
|
|
|
2073 |
|
|
If STMT is a conditional branch and we can determine its truth
|
2074 |
|
|
value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
|
2075 |
|
|
value, return SSA_PROP_VARYING. */
|
2076 |
|
|
|
2077 |
|
|
static enum ssa_prop_result
|
2078 |
|
|
ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
|
2079 |
|
|
{
|
2080 |
|
|
tree def;
|
2081 |
|
|
ssa_op_iter iter;
|
2082 |
|
|
|
2083 |
|
|
if (dump_file && (dump_flags & TDF_DETAILS))
|
2084 |
|
|
{
|
2085 |
|
|
fprintf (dump_file, "\nVisiting statement:\n");
|
2086 |
|
|
print_gimple_stmt (dump_file, stmt, 0, dump_flags);
|
2087 |
|
|
}
|
2088 |
|
|
|
2089 |
|
|
switch (gimple_code (stmt))
|
2090 |
|
|
{
|
2091 |
|
|
case GIMPLE_ASSIGN:
|
2092 |
|
|
/* If the statement is an assignment that produces a single
|
2093 |
|
|
output value, evaluate its RHS to see if the lattice value of
|
2094 |
|
|
its output has changed. */
|
2095 |
|
|
return visit_assignment (stmt, output_p);
|
2096 |
|
|
|
2097 |
|
|
case GIMPLE_CALL:
|
2098 |
|
|
/* A value-returning call also performs an assignment. */
|
2099 |
|
|
if (gimple_call_lhs (stmt) != NULL_TREE)
|
2100 |
|
|
return visit_assignment (stmt, output_p);
|
2101 |
|
|
break;
|
2102 |
|
|
|
2103 |
|
|
case GIMPLE_COND:
|
2104 |
|
|
case GIMPLE_SWITCH:
|
2105 |
|
|
/* If STMT is a conditional branch, see if we can determine
|
2106 |
|
|
which branch will be taken. */
|
2107 |
|
|
/* FIXME. It appears that we should be able to optimize
|
2108 |
|
|
computed GOTOs here as well. */
|
2109 |
|
|
return visit_cond_stmt (stmt, taken_edge_p);
|
2110 |
|
|
|
2111 |
|
|
default:
|
2112 |
|
|
break;
|
2113 |
|
|
}
|
2114 |
|
|
|
2115 |
|
|
/* Any other kind of statement is not interesting for constant
|
2116 |
|
|
propagation and, therefore, not worth simulating. */
|
2117 |
|
|
if (dump_file && (dump_flags & TDF_DETAILS))
|
2118 |
|
|
fprintf (dump_file, "No interesting values produced. Marked VARYING.\n");
|
2119 |
|
|
|
2120 |
|
|
/* Definitions made by statements other than assignments to
|
2121 |
|
|
SSA_NAMEs represent unknown modifications to their outputs.
|
2122 |
|
|
Mark them VARYING. */
|
2123 |
|
|
FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
|
2124 |
|
|
{
|
2125 |
|
|
prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } };
|
2126 |
|
|
set_lattice_value (def, v);
|
2127 |
|
|
}
|
2128 |
|
|
|
2129 |
|
|
return SSA_PROP_VARYING;
|
2130 |
|
|
}
|
2131 |
|
|
|
2132 |
|
|
|
2133 |
|
|
/* Main entry point for SSA Conditional Constant Propagation. */
|
2134 |
|
|
|
2135 |
|
|
static unsigned int
|
2136 |
|
|
do_ssa_ccp (void)
|
2137 |
|
|
{
|
2138 |
|
|
unsigned int todo = 0;
|
2139 |
|
|
calculate_dominance_info (CDI_DOMINATORS);
|
2140 |
|
|
ccp_initialize ();
|
2141 |
|
|
ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
|
2142 |
|
|
if (ccp_finalize ())
|
2143 |
|
|
todo = (TODO_cleanup_cfg | TODO_update_ssa | TODO_remove_unused_locals);
|
2144 |
|
|
free_dominance_info (CDI_DOMINATORS);
|
2145 |
|
|
return todo;
|
2146 |
|
|
}
|
2147 |
|
|
|
2148 |
|
|
|
2149 |
|
|
static bool
|
2150 |
|
|
gate_ccp (void)
|
2151 |
|
|
{
|
2152 |
|
|
return flag_tree_ccp != 0;
|
2153 |
|
|
}
|
2154 |
|
|
|
2155 |
|
|
|
2156 |
|
|
struct gimple_opt_pass pass_ccp =
|
2157 |
|
|
{
|
2158 |
|
|
{
|
2159 |
|
|
GIMPLE_PASS,
|
2160 |
|
|
"ccp", /* name */
|
2161 |
|
|
gate_ccp, /* gate */
|
2162 |
|
|
do_ssa_ccp, /* execute */
|
2163 |
|
|
NULL, /* sub */
|
2164 |
|
|
NULL, /* next */
|
2165 |
|
|
0, /* static_pass_number */
|
2166 |
|
|
TV_TREE_CCP, /* tv_id */
|
2167 |
|
|
PROP_cfg | PROP_ssa, /* properties_required */
|
2168 |
|
|
0, /* properties_provided */
|
2169 |
|
|
0, /* properties_destroyed */
|
2170 |
|
|
0, /* todo_flags_start */
|
2171 |
|
|
TODO_verify_ssa
|
2172 |
|
|
| TODO_verify_stmts | TODO_ggc_collect/* todo_flags_finish */
|
2173 |
|
|
}
|
2174 |
|
|
};
|
2175 |
|
|
|
2176 |
|
|
|
2177 |
|
|
|
2178 |
|
|
/* Try to optimize out __builtin_stack_restore. Optimize it out
|
2179 |
|
|
if there is another __builtin_stack_restore in the same basic
|
2180 |
|
|
block and no calls or ASM_EXPRs are in between, or if this block's
|
2181 |
|
|
only outgoing edge is to EXIT_BLOCK and there are no calls or
|
2182 |
|
|
ASM_EXPRs after this __builtin_stack_restore. */
|
2183 |
|
|
|
2184 |
|
|
static tree
|
2185 |
|
|
optimize_stack_restore (gimple_stmt_iterator i)
|
2186 |
|
|
{
|
2187 |
|
|
tree callee;
|
2188 |
|
|
gimple stmt;
|
2189 |
|
|
|
2190 |
|
|
basic_block bb = gsi_bb (i);
|
2191 |
|
|
gimple call = gsi_stmt (i);
|
2192 |
|
|
|
2193 |
|
|
if (gimple_code (call) != GIMPLE_CALL
|
2194 |
|
|
|| gimple_call_num_args (call) != 1
|
2195 |
|
|
|| TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
|
2196 |
|
|
|| !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
|
2197 |
|
|
return NULL_TREE;
|
2198 |
|
|
|
2199 |
|
|
for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
|
2200 |
|
|
{
|
2201 |
|
|
stmt = gsi_stmt (i);
|
2202 |
|
|
if (gimple_code (stmt) == GIMPLE_ASM)
|
2203 |
|
|
return NULL_TREE;
|
2204 |
|
|
if (gimple_code (stmt) != GIMPLE_CALL)
|
2205 |
|
|
continue;
|
2206 |
|
|
|
2207 |
|
|
callee = gimple_call_fndecl (stmt);
|
2208 |
|
|
if (!callee
|
2209 |
|
|
|| DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
|
2210 |
|
|
/* All regular builtins are ok, just obviously not alloca. */
|
2211 |
|
|
|| DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
|
2212 |
|
|
|| DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
|
2213 |
|
|
return NULL_TREE;
|
2214 |
|
|
|
2215 |
|
|
if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
|
2216 |
|
|
goto second_stack_restore;
|
2217 |
|
|
}
|
2218 |
|
|
|
2219 |
|
|
if (!gsi_end_p (i))
|
2220 |
|
|
return NULL_TREE;
|
2221 |
|
|
|
2222 |
|
|
/* Allow one successor of the exit block, or zero successors. */
|
2223 |
|
|
switch (EDGE_COUNT (bb->succs))
|
2224 |
|
|
{
|
2225 |
|
|
case 0:
|
2226 |
|
|
break;
|
2227 |
|
|
case 1:
|
2228 |
|
|
if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR)
|
2229 |
|
|
return NULL_TREE;
|
2230 |
|
|
break;
|
2231 |
|
|
default:
|
2232 |
|
|
return NULL_TREE;
|
2233 |
|
|
}
|
2234 |
|
|
second_stack_restore:
|
2235 |
|
|
|
2236 |
|
|
/* If there's exactly one use, then zap the call to __builtin_stack_save.
|
2237 |
|
|
If there are multiple uses, then the last one should remove the call.
|
2238 |
|
|
In any case, whether the call to __builtin_stack_save can be removed
|
2239 |
|
|
or not is irrelevant to removing the call to __builtin_stack_restore. */
|
2240 |
|
|
if (has_single_use (gimple_call_arg (call, 0)))
|
2241 |
|
|
{
|
2242 |
|
|
gimple stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
|
2243 |
|
|
if (is_gimple_call (stack_save))
|
2244 |
|
|
{
|
2245 |
|
|
callee = gimple_call_fndecl (stack_save);
|
2246 |
|
|
if (callee
|
2247 |
|
|
&& DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
|
2248 |
|
|
&& DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
|
2249 |
|
|
{
|
2250 |
|
|
gimple_stmt_iterator stack_save_gsi;
|
2251 |
|
|
tree rhs;
|
2252 |
|
|
|
2253 |
|
|
stack_save_gsi = gsi_for_stmt (stack_save);
|
2254 |
|
|
rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
|
2255 |
|
|
update_call_from_tree (&stack_save_gsi, rhs);
|
2256 |
|
|
}
|
2257 |
|
|
}
|
2258 |
|
|
}
|
2259 |
|
|
|
2260 |
|
|
/* No effect, so the statement will be deleted. */
|
2261 |
|
|
return integer_zero_node;
|
2262 |
|
|
}
|
2263 |
|
|
|
2264 |
|
|
/* If va_list type is a simple pointer and nothing special is needed,
|
2265 |
|
|
optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
|
2266 |
|
|
__builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
|
2267 |
|
|
pointer assignment. */
|
2268 |
|
|
|
2269 |
|
|
static tree
|
2270 |
|
|
optimize_stdarg_builtin (gimple call)
|
2271 |
|
|
{
|
2272 |
|
|
tree callee, lhs, rhs, cfun_va_list;
|
2273 |
|
|
bool va_list_simple_ptr;
|
2274 |
|
|
location_t loc = gimple_location (call);
|
2275 |
|
|
|
2276 |
|
|
if (gimple_code (call) != GIMPLE_CALL)
|
2277 |
|
|
return NULL_TREE;
|
2278 |
|
|
|
2279 |
|
|
callee = gimple_call_fndecl (call);
|
2280 |
|
|
|
2281 |
|
|
cfun_va_list = targetm.fn_abi_va_list (callee);
|
2282 |
|
|
va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
|
2283 |
|
|
&& (TREE_TYPE (cfun_va_list) == void_type_node
|
2284 |
|
|
|| TREE_TYPE (cfun_va_list) == char_type_node);
|
2285 |
|
|
|
2286 |
|
|
switch (DECL_FUNCTION_CODE (callee))
|
2287 |
|
|
{
|
2288 |
|
|
case BUILT_IN_VA_START:
|
2289 |
|
|
if (!va_list_simple_ptr
|
2290 |
|
|
|| targetm.expand_builtin_va_start != NULL
|
2291 |
|
|
|| builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
|
2292 |
|
|
return NULL_TREE;
|
2293 |
|
|
|
2294 |
|
|
if (gimple_call_num_args (call) != 2)
|
2295 |
|
|
return NULL_TREE;
|
2296 |
|
|
|
2297 |
|
|
lhs = gimple_call_arg (call, 0);
|
2298 |
|
|
if (!POINTER_TYPE_P (TREE_TYPE (lhs))
|
2299 |
|
|
|| TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
|
2300 |
|
|
!= TYPE_MAIN_VARIANT (cfun_va_list))
|
2301 |
|
|
return NULL_TREE;
|
2302 |
|
|
|
2303 |
|
|
lhs = build_fold_indirect_ref_loc (loc, lhs);
|
2304 |
|
|
rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
|
2305 |
|
|
1, integer_zero_node);
|
2306 |
|
|
rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
|
2307 |
|
|
return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
|
2308 |
|
|
|
2309 |
|
|
case BUILT_IN_VA_COPY:
|
2310 |
|
|
if (!va_list_simple_ptr)
|
2311 |
|
|
return NULL_TREE;
|
2312 |
|
|
|
2313 |
|
|
if (gimple_call_num_args (call) != 2)
|
2314 |
|
|
return NULL_TREE;
|
2315 |
|
|
|
2316 |
|
|
lhs = gimple_call_arg (call, 0);
|
2317 |
|
|
if (!POINTER_TYPE_P (TREE_TYPE (lhs))
|
2318 |
|
|
|| TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
|
2319 |
|
|
!= TYPE_MAIN_VARIANT (cfun_va_list))
|
2320 |
|
|
return NULL_TREE;
|
2321 |
|
|
|
2322 |
|
|
lhs = build_fold_indirect_ref_loc (loc, lhs);
|
2323 |
|
|
rhs = gimple_call_arg (call, 1);
|
2324 |
|
|
if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
|
2325 |
|
|
!= TYPE_MAIN_VARIANT (cfun_va_list))
|
2326 |
|
|
return NULL_TREE;
|
2327 |
|
|
|
2328 |
|
|
rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
|
2329 |
|
|
return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
|
2330 |
|
|
|
2331 |
|
|
case BUILT_IN_VA_END:
|
2332 |
|
|
/* No effect, so the statement will be deleted. */
|
2333 |
|
|
return integer_zero_node;
|
2334 |
|
|
|
2335 |
|
|
default:
|
2336 |
|
|
gcc_unreachable ();
|
2337 |
|
|
}
|
2338 |
|
|
}
|
2339 |
|
|
|
2340 |
|
|
/* A simple pass that attempts to fold all builtin functions. This pass
|
2341 |
|
|
is run after we've propagated as many constants as we can. */
|
2342 |
|
|
|
2343 |
|
|
static unsigned int
|
2344 |
|
|
execute_fold_all_builtins (void)
|
2345 |
|
|
{
|
2346 |
|
|
bool cfg_changed = false;
|
2347 |
|
|
basic_block bb;
|
2348 |
|
|
unsigned int todoflags = 0;
|
2349 |
|
|
|
2350 |
|
|
FOR_EACH_BB (bb)
|
2351 |
|
|
{
|
2352 |
|
|
gimple_stmt_iterator i;
|
2353 |
|
|
for (i = gsi_start_bb (bb); !gsi_end_p (i); )
|
2354 |
|
|
{
|
2355 |
|
|
gimple stmt, old_stmt;
|
2356 |
|
|
tree callee, result;
|
2357 |
|
|
enum built_in_function fcode;
|
2358 |
|
|
|
2359 |
|
|
stmt = gsi_stmt (i);
|
2360 |
|
|
|
2361 |
|
|
if (gimple_code (stmt) != GIMPLE_CALL)
|
2362 |
|
|
{
|
2363 |
|
|
gsi_next (&i);
|
2364 |
|
|
continue;
|
2365 |
|
|
}
|
2366 |
|
|
callee = gimple_call_fndecl (stmt);
|
2367 |
|
|
if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
|
2368 |
|
|
{
|
2369 |
|
|
gsi_next (&i);
|
2370 |
|
|
continue;
|
2371 |
|
|
}
|
2372 |
|
|
fcode = DECL_FUNCTION_CODE (callee);
|
2373 |
|
|
|
2374 |
|
|
result = gimple_fold_builtin (stmt);
|
2375 |
|
|
|
2376 |
|
|
if (result)
|
2377 |
|
|
gimple_remove_stmt_histograms (cfun, stmt);
|
2378 |
|
|
|
2379 |
|
|
if (!result)
|
2380 |
|
|
switch (DECL_FUNCTION_CODE (callee))
|
2381 |
|
|
{
|
2382 |
|
|
case BUILT_IN_CONSTANT_P:
|
2383 |
|
|
/* Resolve __builtin_constant_p. If it hasn't been
|
2384 |
|
|
folded to integer_one_node by now, it's fairly
|
2385 |
|
|
certain that the value simply isn't constant. */
|
2386 |
|
|
result = integer_zero_node;
|
2387 |
|
|
break;
|
2388 |
|
|
|
2389 |
|
|
case BUILT_IN_ASSUME_ALIGNED:
|
2390 |
|
|
/* Remove __builtin_assume_aligned. */
|
2391 |
|
|
result = gimple_call_arg (stmt, 0);
|
2392 |
|
|
break;
|
2393 |
|
|
|
2394 |
|
|
case BUILT_IN_STACK_RESTORE:
|
2395 |
|
|
result = optimize_stack_restore (i);
|
2396 |
|
|
if (result)
|
2397 |
|
|
break;
|
2398 |
|
|
gsi_next (&i);
|
2399 |
|
|
continue;
|
2400 |
|
|
|
2401 |
|
|
case BUILT_IN_VA_START:
|
2402 |
|
|
case BUILT_IN_VA_END:
|
2403 |
|
|
case BUILT_IN_VA_COPY:
|
2404 |
|
|
/* These shouldn't be folded before pass_stdarg. */
|
2405 |
|
|
result = optimize_stdarg_builtin (stmt);
|
2406 |
|
|
if (result)
|
2407 |
|
|
break;
|
2408 |
|
|
/* FALLTHRU */
|
2409 |
|
|
|
2410 |
|
|
default:
|
2411 |
|
|
gsi_next (&i);
|
2412 |
|
|
continue;
|
2413 |
|
|
}
|
2414 |
|
|
|
2415 |
|
|
if (dump_file && (dump_flags & TDF_DETAILS))
|
2416 |
|
|
{
|
2417 |
|
|
fprintf (dump_file, "Simplified\n ");
|
2418 |
|
|
print_gimple_stmt (dump_file, stmt, 0, dump_flags);
|
2419 |
|
|
}
|
2420 |
|
|
|
2421 |
|
|
old_stmt = stmt;
|
2422 |
|
|
if (!update_call_from_tree (&i, result))
|
2423 |
|
|
{
|
2424 |
|
|
gimplify_and_update_call_from_tree (&i, result);
|
2425 |
|
|
todoflags |= TODO_update_address_taken;
|
2426 |
|
|
}
|
2427 |
|
|
|
2428 |
|
|
stmt = gsi_stmt (i);
|
2429 |
|
|
update_stmt (stmt);
|
2430 |
|
|
|
2431 |
|
|
if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
|
2432 |
|
|
&& gimple_purge_dead_eh_edges (bb))
|
2433 |
|
|
cfg_changed = true;
|
2434 |
|
|
|
2435 |
|
|
if (dump_file && (dump_flags & TDF_DETAILS))
|
2436 |
|
|
{
|
2437 |
|
|
fprintf (dump_file, "to\n ");
|
2438 |
|
|
print_gimple_stmt (dump_file, stmt, 0, dump_flags);
|
2439 |
|
|
fprintf (dump_file, "\n");
|
2440 |
|
|
}
|
2441 |
|
|
|
2442 |
|
|
/* Retry the same statement if it changed into another
|
2443 |
|
|
builtin, there might be new opportunities now. */
|
2444 |
|
|
if (gimple_code (stmt) != GIMPLE_CALL)
|
2445 |
|
|
{
|
2446 |
|
|
gsi_next (&i);
|
2447 |
|
|
continue;
|
2448 |
|
|
}
|
2449 |
|
|
callee = gimple_call_fndecl (stmt);
|
2450 |
|
|
if (!callee
|
2451 |
|
|
|| DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
|
2452 |
|
|
|| DECL_FUNCTION_CODE (callee) == fcode)
|
2453 |
|
|
gsi_next (&i);
|
2454 |
|
|
}
|
2455 |
|
|
}
|
2456 |
|
|
|
2457 |
|
|
/* Delete unreachable blocks. */
|
2458 |
|
|
if (cfg_changed)
|
2459 |
|
|
todoflags |= TODO_cleanup_cfg;
|
2460 |
|
|
|
2461 |
|
|
return todoflags;
|
2462 |
|
|
}
|
2463 |
|
|
|
2464 |
|
|
|
2465 |
|
|
struct gimple_opt_pass pass_fold_builtins =
|
2466 |
|
|
{
|
2467 |
|
|
{
|
2468 |
|
|
GIMPLE_PASS,
|
2469 |
|
|
"fab", /* name */
|
2470 |
|
|
NULL, /* gate */
|
2471 |
|
|
execute_fold_all_builtins, /* execute */
|
2472 |
|
|
NULL, /* sub */
|
2473 |
|
|
NULL, /* next */
|
2474 |
|
|
0, /* static_pass_number */
|
2475 |
|
|
TV_NONE, /* tv_id */
|
2476 |
|
|
PROP_cfg | PROP_ssa, /* properties_required */
|
2477 |
|
|
0, /* properties_provided */
|
2478 |
|
|
0, /* properties_destroyed */
|
2479 |
|
|
0, /* todo_flags_start */
|
2480 |
|
|
TODO_verify_ssa
|
2481 |
|
|
| TODO_update_ssa /* todo_flags_finish */
|
2482 |
|
|
}
|
2483 |
|
|
};
|