1 |
38 |
julius |
/* Inlining decision heuristics.
|
2 |
|
|
Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
|
3 |
|
|
Contributed by Jan Hubicka
|
4 |
|
|
|
5 |
|
|
This file is part of GCC.
|
6 |
|
|
|
7 |
|
|
GCC is free software; you can redistribute it and/or modify it under
|
8 |
|
|
the terms of the GNU General Public License as published by the Free
|
9 |
|
|
Software Foundation; either version 3, or (at your option) any later
|
10 |
|
|
version.
|
11 |
|
|
|
12 |
|
|
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
13 |
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
14 |
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
15 |
|
|
for more details.
|
16 |
|
|
|
17 |
|
|
You should have received a copy of the GNU General Public License
|
18 |
|
|
along with GCC; see the file COPYING3. If not see
|
19 |
|
|
<http://www.gnu.org/licenses/>. */
|
20 |
|
|
|
21 |
|
|
/* Inlining decision heuristics
|
22 |
|
|
|
23 |
|
|
We separate inlining decisions from the inliner itself and store it
|
24 |
|
|
inside callgraph as so called inline plan. Refer to cgraph.c
|
25 |
|
|
documentation about particular representation of inline plans in the
|
26 |
|
|
callgraph.
|
27 |
|
|
|
28 |
|
|
There are three major parts of this file:
|
29 |
|
|
|
30 |
|
|
cgraph_mark_inline implementation
|
31 |
|
|
|
32 |
|
|
This function allows to mark given call inline and performs necessary
|
33 |
|
|
modifications of cgraph (production of the clones and updating overall
|
34 |
|
|
statistics)
|
35 |
|
|
|
36 |
|
|
inlining heuristics limits
|
37 |
|
|
|
38 |
|
|
These functions allow to check that particular inlining is allowed
|
39 |
|
|
by the limits specified by user (allowed function growth, overall unit
|
40 |
|
|
growth and so on).
|
41 |
|
|
|
42 |
|
|
inlining heuristics
|
43 |
|
|
|
44 |
|
|
This is implementation of IPA pass aiming to get as much of benefit
|
45 |
|
|
from inlining obeying the limits checked above.
|
46 |
|
|
|
47 |
|
|
The implementation of particular heuristics is separated from
|
48 |
|
|
the rest of code to make it easier to replace it with more complicated
|
49 |
|
|
implementation in the future. The rest of inlining code acts as a
|
50 |
|
|
library aimed to modify the callgraph and verify that the parameters
|
51 |
|
|
on code size growth fits.
|
52 |
|
|
|
53 |
|
|
To mark given call inline, use cgraph_mark_inline function, the
|
54 |
|
|
verification is performed by cgraph_default_inline_p and
|
55 |
|
|
cgraph_check_inline_limits.
|
56 |
|
|
|
57 |
|
|
The heuristics implements simple knapsack style algorithm ordering
|
58 |
|
|
all functions by their "profitability" (estimated by code size growth)
|
59 |
|
|
and inlining them in priority order.
|
60 |
|
|
|
61 |
|
|
cgraph_decide_inlining implements heuristics taking whole callgraph
|
62 |
|
|
into account, while cgraph_decide_inlining_incrementally considers
|
63 |
|
|
only one function at a time and is used in non-unit-at-a-time mode. */
|
64 |
|
|
|
65 |
|
|
#include "config.h"
|
66 |
|
|
#include "system.h"
|
67 |
|
|
#include "coretypes.h"
|
68 |
|
|
#include "tm.h"
|
69 |
|
|
#include "tree.h"
|
70 |
|
|
#include "tree-inline.h"
|
71 |
|
|
#include "langhooks.h"
|
72 |
|
|
#include "flags.h"
|
73 |
|
|
#include "cgraph.h"
|
74 |
|
|
#include "diagnostic.h"
|
75 |
|
|
#include "timevar.h"
|
76 |
|
|
#include "params.h"
|
77 |
|
|
#include "fibheap.h"
|
78 |
|
|
#include "intl.h"
|
79 |
|
|
#include "tree-pass.h"
|
80 |
|
|
#include "hashtab.h"
|
81 |
|
|
#include "coverage.h"
|
82 |
|
|
#include "ggc.h"
|
83 |
|
|
|
84 |
|
|
/* Statistics we collect about inlining algorithm. */
|
85 |
|
|
static int ncalls_inlined;
|
86 |
|
|
static int nfunctions_inlined;
|
87 |
|
|
static int initial_insns;
|
88 |
|
|
static int overall_insns;
|
89 |
|
|
static int max_insns;
|
90 |
|
|
static gcov_type max_count;
|
91 |
|
|
|
92 |
|
|
/* Estimate size of the function after inlining WHAT into TO. */
|
93 |
|
|
|
94 |
|
|
static int
|
95 |
|
|
cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
|
96 |
|
|
struct cgraph_node *what)
|
97 |
|
|
{
|
98 |
|
|
int size;
|
99 |
|
|
tree fndecl = what->decl, arg;
|
100 |
|
|
int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
|
101 |
|
|
|
102 |
|
|
for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
|
103 |
|
|
call_insns += estimate_move_cost (TREE_TYPE (arg));
|
104 |
|
|
size = (what->global.insns - call_insns) * times + to->global.insns;
|
105 |
|
|
gcc_assert (size >= 0);
|
106 |
|
|
return size;
|
107 |
|
|
}
|
108 |
|
|
|
109 |
|
|
/* E is expected to be an edge being inlined. Clone destination node of
|
110 |
|
|
the edge and redirect it to the new clone.
|
111 |
|
|
DUPLICATE is used for bookkeeping on whether we are actually creating new
|
112 |
|
|
clones or re-using node originally representing out-of-line function call.
|
113 |
|
|
*/
|
114 |
|
|
void
|
115 |
|
|
cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, bool update_original)
|
116 |
|
|
{
|
117 |
|
|
if (duplicate)
|
118 |
|
|
{
|
119 |
|
|
/* We may eliminate the need for out-of-line copy to be output.
|
120 |
|
|
In that case just go ahead and re-use it. */
|
121 |
|
|
if (!e->callee->callers->next_caller
|
122 |
|
|
&& !e->callee->needed
|
123 |
|
|
&& flag_unit_at_a_time)
|
124 |
|
|
{
|
125 |
|
|
gcc_assert (!e->callee->global.inlined_to);
|
126 |
|
|
if (DECL_SAVED_TREE (e->callee->decl))
|
127 |
|
|
overall_insns -= e->callee->global.insns, nfunctions_inlined++;
|
128 |
|
|
duplicate = false;
|
129 |
|
|
}
|
130 |
|
|
else
|
131 |
|
|
{
|
132 |
|
|
struct cgraph_node *n;
|
133 |
|
|
n = cgraph_clone_node (e->callee, e->count, e->loop_nest,
|
134 |
|
|
update_original);
|
135 |
|
|
cgraph_redirect_edge_callee (e, n);
|
136 |
|
|
}
|
137 |
|
|
}
|
138 |
|
|
|
139 |
|
|
if (e->caller->global.inlined_to)
|
140 |
|
|
e->callee->global.inlined_to = e->caller->global.inlined_to;
|
141 |
|
|
else
|
142 |
|
|
e->callee->global.inlined_to = e->caller;
|
143 |
|
|
|
144 |
|
|
/* Recursively clone all bodies. */
|
145 |
|
|
for (e = e->callee->callees; e; e = e->next_callee)
|
146 |
|
|
if (!e->inline_failed)
|
147 |
|
|
cgraph_clone_inlined_nodes (e, duplicate, update_original);
|
148 |
|
|
}
|
149 |
|
|
|
150 |
|
|
/* Mark edge E as inlined and update callgraph accordingly.
|
151 |
|
|
UPDATE_ORIGINAL specify whether profile of original function should be
|
152 |
|
|
updated. */
|
153 |
|
|
|
154 |
|
|
void
|
155 |
|
|
cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
|
156 |
|
|
{
|
157 |
|
|
int old_insns = 0, new_insns = 0;
|
158 |
|
|
struct cgraph_node *to = NULL, *what;
|
159 |
|
|
|
160 |
|
|
if (e->callee->inline_decl)
|
161 |
|
|
cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
|
162 |
|
|
|
163 |
|
|
gcc_assert (e->inline_failed);
|
164 |
|
|
e->inline_failed = NULL;
|
165 |
|
|
|
166 |
|
|
if (!e->callee->global.inlined && flag_unit_at_a_time)
|
167 |
|
|
DECL_POSSIBLY_INLINED (e->callee->decl) = true;
|
168 |
|
|
e->callee->global.inlined = true;
|
169 |
|
|
|
170 |
|
|
cgraph_clone_inlined_nodes (e, true, update_original);
|
171 |
|
|
|
172 |
|
|
what = e->callee;
|
173 |
|
|
|
174 |
|
|
/* Now update size of caller and all functions caller is inlined into. */
|
175 |
|
|
for (;e && !e->inline_failed; e = e->caller->callers)
|
176 |
|
|
{
|
177 |
|
|
old_insns = e->caller->global.insns;
|
178 |
|
|
new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
|
179 |
|
|
what);
|
180 |
|
|
gcc_assert (new_insns >= 0);
|
181 |
|
|
to = e->caller;
|
182 |
|
|
to->global.insns = new_insns;
|
183 |
|
|
}
|
184 |
|
|
gcc_assert (what->global.inlined_to == to);
|
185 |
|
|
if (new_insns > old_insns)
|
186 |
|
|
overall_insns += new_insns - old_insns;
|
187 |
|
|
ncalls_inlined++;
|
188 |
|
|
}
|
189 |
|
|
|
190 |
|
|
/* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
|
191 |
|
|
Return following unredirected edge in the list of callers
|
192 |
|
|
of EDGE->CALLEE */
|
193 |
|
|
|
194 |
|
|
static struct cgraph_edge *
|
195 |
|
|
cgraph_mark_inline (struct cgraph_edge *edge)
|
196 |
|
|
{
|
197 |
|
|
struct cgraph_node *to = edge->caller;
|
198 |
|
|
struct cgraph_node *what = edge->callee;
|
199 |
|
|
struct cgraph_edge *e, *next;
|
200 |
|
|
int times = 0;
|
201 |
|
|
|
202 |
|
|
/* Look for all calls, mark them inline and clone recursively
|
203 |
|
|
all inlined functions. */
|
204 |
|
|
for (e = what->callers; e; e = next)
|
205 |
|
|
{
|
206 |
|
|
next = e->next_caller;
|
207 |
|
|
if (e->caller == to && e->inline_failed)
|
208 |
|
|
{
|
209 |
|
|
cgraph_mark_inline_edge (e, true);
|
210 |
|
|
if (e == edge)
|
211 |
|
|
edge = next;
|
212 |
|
|
times++;
|
213 |
|
|
}
|
214 |
|
|
}
|
215 |
|
|
gcc_assert (times);
|
216 |
|
|
return edge;
|
217 |
|
|
}
|
218 |
|
|
|
219 |
|
|
/* Estimate the growth caused by inlining NODE into all callees. */
|
220 |
|
|
|
221 |
|
|
static int
|
222 |
|
|
cgraph_estimate_growth (struct cgraph_node *node)
|
223 |
|
|
{
|
224 |
|
|
int growth = 0;
|
225 |
|
|
struct cgraph_edge *e;
|
226 |
|
|
if (node->global.estimated_growth != INT_MIN)
|
227 |
|
|
return node->global.estimated_growth;
|
228 |
|
|
|
229 |
|
|
for (e = node->callers; e; e = e->next_caller)
|
230 |
|
|
if (e->inline_failed)
|
231 |
|
|
growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
|
232 |
|
|
- e->caller->global.insns);
|
233 |
|
|
|
234 |
|
|
/* ??? Wrong for self recursive functions or cases where we decide to not
|
235 |
|
|
inline for different reasons, but it is not big deal as in that case
|
236 |
|
|
we will keep the body around, but we will also avoid some inlining. */
|
237 |
|
|
if (!node->needed && !DECL_EXTERNAL (node->decl))
|
238 |
|
|
growth -= node->global.insns;
|
239 |
|
|
|
240 |
|
|
node->global.estimated_growth = growth;
|
241 |
|
|
return growth;
|
242 |
|
|
}
|
243 |
|
|
|
244 |
|
|
/* Return false when inlining WHAT into TO is not good idea
|
245 |
|
|
as it would cause too large growth of function bodies.
|
246 |
|
|
When ONE_ONLY is true, assume that only one call site is going
|
247 |
|
|
to be inlined, otherwise figure out how many call sites in
|
248 |
|
|
TO calls WHAT and verify that all can be inlined.
|
249 |
|
|
*/
|
250 |
|
|
|
251 |
|
|
static bool
|
252 |
|
|
cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
|
253 |
|
|
const char **reason, bool one_only)
|
254 |
|
|
{
|
255 |
|
|
int times = 0;
|
256 |
|
|
struct cgraph_edge *e;
|
257 |
|
|
int newsize;
|
258 |
|
|
int limit;
|
259 |
|
|
|
260 |
|
|
if (one_only)
|
261 |
|
|
times = 1;
|
262 |
|
|
else
|
263 |
|
|
for (e = to->callees; e; e = e->next_callee)
|
264 |
|
|
if (e->callee == what)
|
265 |
|
|
times++;
|
266 |
|
|
|
267 |
|
|
if (to->global.inlined_to)
|
268 |
|
|
to = to->global.inlined_to;
|
269 |
|
|
|
270 |
|
|
/* When inlining large function body called once into small function,
|
271 |
|
|
take the inlined function as base for limiting the growth. */
|
272 |
|
|
if (to->local.self_insns > what->local.self_insns)
|
273 |
|
|
limit = to->local.self_insns;
|
274 |
|
|
else
|
275 |
|
|
limit = what->local.self_insns;
|
276 |
|
|
|
277 |
|
|
limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
|
278 |
|
|
|
279 |
|
|
/* Check the size after inlining against the function limits. But allow
|
280 |
|
|
the function to shrink if it went over the limits by forced inlining. */
|
281 |
|
|
newsize = cgraph_estimate_size_after_inlining (times, to, what);
|
282 |
|
|
if (newsize >= to->global.insns
|
283 |
|
|
&& newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
|
284 |
|
|
&& newsize > limit)
|
285 |
|
|
{
|
286 |
|
|
if (reason)
|
287 |
|
|
*reason = N_("--param large-function-growth limit reached");
|
288 |
|
|
return false;
|
289 |
|
|
}
|
290 |
|
|
return true;
|
291 |
|
|
}
|
292 |
|
|
|
293 |
|
|
/* Return true when function N is small enough to be inlined. */
|
294 |
|
|
|
295 |
|
|
bool
|
296 |
|
|
cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
|
297 |
|
|
{
|
298 |
|
|
tree decl = n->decl;
|
299 |
|
|
|
300 |
|
|
if (n->inline_decl)
|
301 |
|
|
decl = n->inline_decl;
|
302 |
|
|
if (!DECL_INLINE (decl))
|
303 |
|
|
{
|
304 |
|
|
if (reason)
|
305 |
|
|
*reason = N_("function not inlinable");
|
306 |
|
|
return false;
|
307 |
|
|
}
|
308 |
|
|
|
309 |
|
|
if (!DECL_STRUCT_FUNCTION (decl)->cfg)
|
310 |
|
|
{
|
311 |
|
|
if (reason)
|
312 |
|
|
*reason = N_("function body not available");
|
313 |
|
|
return false;
|
314 |
|
|
}
|
315 |
|
|
|
316 |
|
|
if (DECL_DECLARED_INLINE_P (decl))
|
317 |
|
|
{
|
318 |
|
|
if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
|
319 |
|
|
{
|
320 |
|
|
if (reason)
|
321 |
|
|
*reason = N_("--param max-inline-insns-single limit reached");
|
322 |
|
|
return false;
|
323 |
|
|
}
|
324 |
|
|
}
|
325 |
|
|
else
|
326 |
|
|
{
|
327 |
|
|
if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
|
328 |
|
|
{
|
329 |
|
|
if (reason)
|
330 |
|
|
*reason = N_("--param max-inline-insns-auto limit reached");
|
331 |
|
|
return false;
|
332 |
|
|
}
|
333 |
|
|
}
|
334 |
|
|
|
335 |
|
|
return true;
|
336 |
|
|
}
|
337 |
|
|
|
338 |
|
|
/* Return true when inlining WHAT would create recursive inlining.
|
339 |
|
|
We call recursive inlining all cases where same function appears more than
|
340 |
|
|
once in the single recursion nest path in the inline graph. */
|
341 |
|
|
|
342 |
|
|
static bool
|
343 |
|
|
cgraph_recursive_inlining_p (struct cgraph_node *to,
|
344 |
|
|
struct cgraph_node *what,
|
345 |
|
|
const char **reason)
|
346 |
|
|
{
|
347 |
|
|
bool recursive;
|
348 |
|
|
if (to->global.inlined_to)
|
349 |
|
|
recursive = what->decl == to->global.inlined_to->decl;
|
350 |
|
|
else
|
351 |
|
|
recursive = what->decl == to->decl;
|
352 |
|
|
/* Marking recursive function inline has sane semantic and thus we should
|
353 |
|
|
not warn on it. */
|
354 |
|
|
if (recursive && reason)
|
355 |
|
|
*reason = (what->local.disregard_inline_limits
|
356 |
|
|
? N_("recursive inlining") : "");
|
357 |
|
|
return recursive;
|
358 |
|
|
}
|
359 |
|
|
|
360 |
|
|
/* Return true if the call can be hot. */
|
361 |
|
|
static bool
|
362 |
|
|
cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
|
363 |
|
|
{
|
364 |
|
|
if (profile_info && flag_branch_probabilities
|
365 |
|
|
&& (edge->count
|
366 |
|
|
<= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION)))
|
367 |
|
|
return false;
|
368 |
|
|
return true;
|
369 |
|
|
}
|
370 |
|
|
|
371 |
|
|
/* A cost model driving the inlining heuristics in a way so the edges with
|
372 |
|
|
smallest badness are inlined first. After each inlining is performed
|
373 |
|
|
the costs of all caller edges of nodes affected are recomputed so the
|
374 |
|
|
metrics may accurately depend on values such as number of inlinable callers
|
375 |
|
|
of the function or function body size.
|
376 |
|
|
|
377 |
|
|
With profiling we use number of executions of each edge to drive the cost.
|
378 |
|
|
We also should distinguish hot and cold calls where the cold calls are
|
379 |
|
|
inlined into only when code size is overall improved.
|
380 |
|
|
*/
|
381 |
|
|
|
382 |
|
|
static int
|
383 |
|
|
cgraph_edge_badness (struct cgraph_edge *edge)
|
384 |
|
|
{
|
385 |
|
|
if (max_count)
|
386 |
|
|
{
|
387 |
|
|
int growth =
|
388 |
|
|
cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
|
389 |
|
|
growth -= edge->caller->global.insns;
|
390 |
|
|
|
391 |
|
|
/* Always prefer inlining saving code size. */
|
392 |
|
|
if (growth <= 0)
|
393 |
|
|
return INT_MIN - growth;
|
394 |
|
|
return ((int)((double)edge->count * INT_MIN / max_count)) / growth;
|
395 |
|
|
}
|
396 |
|
|
else
|
397 |
|
|
{
|
398 |
|
|
int nest = MIN (edge->loop_nest, 8);
|
399 |
|
|
int badness = cgraph_estimate_growth (edge->callee) * 256;
|
400 |
|
|
|
401 |
|
|
/* Decrease badness if call is nested. */
|
402 |
|
|
if (badness > 0)
|
403 |
|
|
badness >>= nest;
|
404 |
|
|
else
|
405 |
|
|
badness <<= nest;
|
406 |
|
|
|
407 |
|
|
/* Make recursive inlining happen always after other inlining is done. */
|
408 |
|
|
if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
|
409 |
|
|
return badness + 1;
|
410 |
|
|
else
|
411 |
|
|
return badness;
|
412 |
|
|
}
|
413 |
|
|
}
|
414 |
|
|
|
415 |
|
|
/* Recompute heap nodes for each of caller edge. */
|
416 |
|
|
|
417 |
|
|
static void
|
418 |
|
|
update_caller_keys (fibheap_t heap, struct cgraph_node *node,
|
419 |
|
|
bitmap updated_nodes)
|
420 |
|
|
{
|
421 |
|
|
struct cgraph_edge *edge;
|
422 |
|
|
const char *failed_reason;
|
423 |
|
|
|
424 |
|
|
if (!node->local.inlinable || node->local.disregard_inline_limits
|
425 |
|
|
|| node->global.inlined_to)
|
426 |
|
|
return;
|
427 |
|
|
if (bitmap_bit_p (updated_nodes, node->uid))
|
428 |
|
|
return;
|
429 |
|
|
bitmap_set_bit (updated_nodes, node->uid);
|
430 |
|
|
node->global.estimated_growth = INT_MIN;
|
431 |
|
|
|
432 |
|
|
if (!node->local.inlinable)
|
433 |
|
|
return;
|
434 |
|
|
/* Prune out edges we won't inline into anymore. */
|
435 |
|
|
if (!cgraph_default_inline_p (node, &failed_reason))
|
436 |
|
|
{
|
437 |
|
|
for (edge = node->callers; edge; edge = edge->next_caller)
|
438 |
|
|
if (edge->aux)
|
439 |
|
|
{
|
440 |
|
|
fibheap_delete_node (heap, edge->aux);
|
441 |
|
|
edge->aux = NULL;
|
442 |
|
|
if (edge->inline_failed)
|
443 |
|
|
edge->inline_failed = failed_reason;
|
444 |
|
|
}
|
445 |
|
|
return;
|
446 |
|
|
}
|
447 |
|
|
|
448 |
|
|
for (edge = node->callers; edge; edge = edge->next_caller)
|
449 |
|
|
if (edge->inline_failed)
|
450 |
|
|
{
|
451 |
|
|
int badness = cgraph_edge_badness (edge);
|
452 |
|
|
if (edge->aux)
|
453 |
|
|
{
|
454 |
|
|
fibnode_t n = edge->aux;
|
455 |
|
|
gcc_assert (n->data == edge);
|
456 |
|
|
if (n->key == badness)
|
457 |
|
|
continue;
|
458 |
|
|
|
459 |
|
|
/* fibheap_replace_key only increase the keys. */
|
460 |
|
|
if (fibheap_replace_key (heap, n, badness))
|
461 |
|
|
continue;
|
462 |
|
|
fibheap_delete_node (heap, edge->aux);
|
463 |
|
|
}
|
464 |
|
|
edge->aux = fibheap_insert (heap, badness, edge);
|
465 |
|
|
}
|
466 |
|
|
}
|
467 |
|
|
|
468 |
|
|
/* Recompute heap nodes for each of caller edges of each of callees. */
|
469 |
|
|
|
470 |
|
|
static void
|
471 |
|
|
update_callee_keys (fibheap_t heap, struct cgraph_node *node,
|
472 |
|
|
bitmap updated_nodes)
|
473 |
|
|
{
|
474 |
|
|
struct cgraph_edge *e;
|
475 |
|
|
node->global.estimated_growth = INT_MIN;
|
476 |
|
|
|
477 |
|
|
for (e = node->callees; e; e = e->next_callee)
|
478 |
|
|
if (e->inline_failed)
|
479 |
|
|
update_caller_keys (heap, e->callee, updated_nodes);
|
480 |
|
|
else if (!e->inline_failed)
|
481 |
|
|
update_callee_keys (heap, e->callee, updated_nodes);
|
482 |
|
|
}
|
483 |
|
|
|
484 |
|
|
/* Enqueue all recursive calls from NODE into priority queue depending on
|
485 |
|
|
how likely we want to recursively inline the call. */
|
486 |
|
|
|
487 |
|
|
static void
|
488 |
|
|
lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
|
489 |
|
|
fibheap_t heap)
|
490 |
|
|
{
|
491 |
|
|
static int priority;
|
492 |
|
|
struct cgraph_edge *e;
|
493 |
|
|
for (e = where->callees; e; e = e->next_callee)
|
494 |
|
|
if (e->callee == node)
|
495 |
|
|
{
|
496 |
|
|
/* When profile feedback is available, prioritize by expected number
|
497 |
|
|
of calls. Without profile feedback we maintain simple queue
|
498 |
|
|
to order candidates via recursive depths. */
|
499 |
|
|
fibheap_insert (heap,
|
500 |
|
|
!max_count ? priority++
|
501 |
|
|
: -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
|
502 |
|
|
e);
|
503 |
|
|
}
|
504 |
|
|
for (e = where->callees; e; e = e->next_callee)
|
505 |
|
|
if (!e->inline_failed)
|
506 |
|
|
lookup_recursive_calls (node, e->callee, heap);
|
507 |
|
|
}
|
508 |
|
|
|
509 |
|
|
/* Find callgraph nodes closing a circle in the graph. The
|
510 |
|
|
resulting hashtab can be used to avoid walking the circles.
|
511 |
|
|
Uses the cgraph nodes ->aux field which needs to be zero
|
512 |
|
|
before and will be zero after operation. */
|
513 |
|
|
|
514 |
|
|
static void
|
515 |
|
|
cgraph_find_cycles (struct cgraph_node *node, htab_t cycles)
|
516 |
|
|
{
|
517 |
|
|
struct cgraph_edge *e;
|
518 |
|
|
|
519 |
|
|
if (node->aux)
|
520 |
|
|
{
|
521 |
|
|
void **slot;
|
522 |
|
|
slot = htab_find_slot (cycles, node, INSERT);
|
523 |
|
|
if (!*slot)
|
524 |
|
|
{
|
525 |
|
|
if (dump_file)
|
526 |
|
|
fprintf (dump_file, "Cycle contains %s\n", cgraph_node_name (node));
|
527 |
|
|
*slot = node;
|
528 |
|
|
}
|
529 |
|
|
return;
|
530 |
|
|
}
|
531 |
|
|
|
532 |
|
|
node->aux = node;
|
533 |
|
|
for (e = node->callees; e; e = e->next_callee)
|
534 |
|
|
cgraph_find_cycles (e->callee, cycles);
|
535 |
|
|
node->aux = 0;
|
536 |
|
|
}
|
537 |
|
|
|
538 |
|
|
/* Flatten the cgraph node. We have to be careful in recursing
|
539 |
|
|
as to not run endlessly in circles of the callgraph.
|
540 |
|
|
We do so by using a hashtab of cycle entering nodes as generated
|
541 |
|
|
by cgraph_find_cycles. */
|
542 |
|
|
|
543 |
|
|
static void
|
544 |
|
|
cgraph_flatten_node (struct cgraph_node *node, htab_t cycles)
|
545 |
|
|
{
|
546 |
|
|
struct cgraph_edge *e;
|
547 |
|
|
|
548 |
|
|
for (e = node->callees; e; e = e->next_callee)
|
549 |
|
|
{
|
550 |
|
|
/* Inline call, if possible, and recurse. Be sure we are not
|
551 |
|
|
entering callgraph circles here. */
|
552 |
|
|
if (e->inline_failed
|
553 |
|
|
&& e->callee->local.inlinable
|
554 |
|
|
&& !cgraph_recursive_inlining_p (node, e->callee,
|
555 |
|
|
&e->inline_failed)
|
556 |
|
|
&& !htab_find (cycles, e->callee))
|
557 |
|
|
{
|
558 |
|
|
if (dump_file)
|
559 |
|
|
fprintf (dump_file, " inlining %s", cgraph_node_name (e->callee));
|
560 |
|
|
cgraph_mark_inline_edge (e, true);
|
561 |
|
|
cgraph_flatten_node (e->callee, cycles);
|
562 |
|
|
}
|
563 |
|
|
else if (dump_file)
|
564 |
|
|
fprintf (dump_file, " !inlining %s", cgraph_node_name (e->callee));
|
565 |
|
|
}
|
566 |
|
|
}
|
567 |
|
|
|
568 |
|
|
/* Decide on recursive inlining: in the case function has recursive calls,
|
569 |
|
|
inline until body size reaches given argument. */
|
570 |
|
|
|
571 |
|
|
static bool
|
572 |
|
|
cgraph_decide_recursive_inlining (struct cgraph_node *node)
|
573 |
|
|
{
|
574 |
|
|
int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
|
575 |
|
|
int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
|
576 |
|
|
int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
|
577 |
|
|
fibheap_t heap;
|
578 |
|
|
struct cgraph_edge *e;
|
579 |
|
|
struct cgraph_node *master_clone, *next;
|
580 |
|
|
int depth = 0;
|
581 |
|
|
int n = 0;
|
582 |
|
|
|
583 |
|
|
if (DECL_DECLARED_INLINE_P (node->decl))
|
584 |
|
|
{
|
585 |
|
|
limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
|
586 |
|
|
max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
|
587 |
|
|
}
|
588 |
|
|
|
589 |
|
|
/* Make sure that function is small enough to be considered for inlining. */
|
590 |
|
|
if (!max_depth
|
591 |
|
|
|| cgraph_estimate_size_after_inlining (1, node, node) >= limit)
|
592 |
|
|
return false;
|
593 |
|
|
heap = fibheap_new ();
|
594 |
|
|
lookup_recursive_calls (node, node, heap);
|
595 |
|
|
if (fibheap_empty (heap))
|
596 |
|
|
{
|
597 |
|
|
fibheap_delete (heap);
|
598 |
|
|
return false;
|
599 |
|
|
}
|
600 |
|
|
|
601 |
|
|
if (dump_file)
|
602 |
|
|
fprintf (dump_file,
|
603 |
|
|
" Performing recursive inlining on %s\n",
|
604 |
|
|
cgraph_node_name (node));
|
605 |
|
|
|
606 |
|
|
/* We need original clone to copy around. */
|
607 |
|
|
master_clone = cgraph_clone_node (node, node->count, 1, false);
|
608 |
|
|
master_clone->needed = true;
|
609 |
|
|
for (e = master_clone->callees; e; e = e->next_callee)
|
610 |
|
|
if (!e->inline_failed)
|
611 |
|
|
cgraph_clone_inlined_nodes (e, true, false);
|
612 |
|
|
|
613 |
|
|
/* Do the inlining and update list of recursive call during process. */
|
614 |
|
|
while (!fibheap_empty (heap)
|
615 |
|
|
&& (cgraph_estimate_size_after_inlining (1, node, master_clone)
|
616 |
|
|
<= limit))
|
617 |
|
|
{
|
618 |
|
|
struct cgraph_edge *curr = fibheap_extract_min (heap);
|
619 |
|
|
struct cgraph_node *cnode;
|
620 |
|
|
|
621 |
|
|
depth = 1;
|
622 |
|
|
for (cnode = curr->caller;
|
623 |
|
|
cnode->global.inlined_to; cnode = cnode->callers->caller)
|
624 |
|
|
if (node->decl == curr->callee->decl)
|
625 |
|
|
depth++;
|
626 |
|
|
if (depth > max_depth)
|
627 |
|
|
{
|
628 |
|
|
if (dump_file)
|
629 |
|
|
fprintf (dump_file,
|
630 |
|
|
" maxmal depth reached\n");
|
631 |
|
|
continue;
|
632 |
|
|
}
|
633 |
|
|
|
634 |
|
|
if (max_count)
|
635 |
|
|
{
|
636 |
|
|
if (!cgraph_maybe_hot_edge_p (curr))
|
637 |
|
|
{
|
638 |
|
|
if (dump_file)
|
639 |
|
|
fprintf (dump_file, " Not inlining cold call\n");
|
640 |
|
|
continue;
|
641 |
|
|
}
|
642 |
|
|
if (curr->count * 100 / node->count < probability)
|
643 |
|
|
{
|
644 |
|
|
if (dump_file)
|
645 |
|
|
fprintf (dump_file,
|
646 |
|
|
" Probability of edge is too small\n");
|
647 |
|
|
continue;
|
648 |
|
|
}
|
649 |
|
|
}
|
650 |
|
|
|
651 |
|
|
if (dump_file)
|
652 |
|
|
{
|
653 |
|
|
fprintf (dump_file,
|
654 |
|
|
" Inlining call of depth %i", depth);
|
655 |
|
|
if (node->count)
|
656 |
|
|
{
|
657 |
|
|
fprintf (dump_file, " called approx. %.2f times per call",
|
658 |
|
|
(double)curr->count / node->count);
|
659 |
|
|
}
|
660 |
|
|
fprintf (dump_file, "\n");
|
661 |
|
|
}
|
662 |
|
|
cgraph_redirect_edge_callee (curr, master_clone);
|
663 |
|
|
cgraph_mark_inline_edge (curr, false);
|
664 |
|
|
lookup_recursive_calls (node, curr->callee, heap);
|
665 |
|
|
n++;
|
666 |
|
|
}
|
667 |
|
|
if (!fibheap_empty (heap) && dump_file)
|
668 |
|
|
fprintf (dump_file, " Recursive inlining growth limit met.\n");
|
669 |
|
|
|
670 |
|
|
fibheap_delete (heap);
|
671 |
|
|
if (dump_file)
|
672 |
|
|
fprintf (dump_file,
|
673 |
|
|
"\n Inlined %i times, body grown from %i to %i insns\n", n,
|
674 |
|
|
master_clone->global.insns, node->global.insns);
|
675 |
|
|
|
676 |
|
|
/* Remove master clone we used for inlining. We rely that clones inlined
|
677 |
|
|
into master clone gets queued just before master clone so we don't
|
678 |
|
|
need recursion. */
|
679 |
|
|
for (node = cgraph_nodes; node != master_clone;
|
680 |
|
|
node = next)
|
681 |
|
|
{
|
682 |
|
|
next = node->next;
|
683 |
|
|
if (node->global.inlined_to == master_clone)
|
684 |
|
|
cgraph_remove_node (node);
|
685 |
|
|
}
|
686 |
|
|
cgraph_remove_node (master_clone);
|
687 |
|
|
/* FIXME: Recursive inlining actually reduces number of calls of the
|
688 |
|
|
function. At this place we should probably walk the function and
|
689 |
|
|
inline clones and compensate the counts accordingly. This probably
|
690 |
|
|
doesn't matter much in practice. */
|
691 |
|
|
return n > 0;
|
692 |
|
|
}
|
693 |
|
|
|
694 |
|
|
/* Set inline_failed for all callers of given function to REASON. */
|
695 |
|
|
|
696 |
|
|
static void
|
697 |
|
|
cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
|
698 |
|
|
{
|
699 |
|
|
struct cgraph_edge *e;
|
700 |
|
|
|
701 |
|
|
if (dump_file)
|
702 |
|
|
fprintf (dump_file, "Inlining failed: %s\n", reason);
|
703 |
|
|
for (e = node->callers; e; e = e->next_caller)
|
704 |
|
|
if (e->inline_failed)
|
705 |
|
|
e->inline_failed = reason;
|
706 |
|
|
}
|
707 |
|
|
|
708 |
|
|
/* We use greedy algorithm for inlining of small functions:
|
709 |
|
|
All inline candidates are put into prioritized heap based on estimated
|
710 |
|
|
growth of the overall number of instructions and then update the estimates.
|
711 |
|
|
|
712 |
|
|
INLINED and INLINED_CALEES are just pointers to arrays large enough
|
713 |
|
|
to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
|
714 |
|
|
|
715 |
|
|
static void
|
716 |
|
|
cgraph_decide_inlining_of_small_functions (void)
|
717 |
|
|
{
|
718 |
|
|
struct cgraph_node *node;
|
719 |
|
|
struct cgraph_edge *edge;
|
720 |
|
|
const char *failed_reason;
|
721 |
|
|
fibheap_t heap = fibheap_new ();
|
722 |
|
|
bitmap updated_nodes = BITMAP_ALLOC (NULL);
|
723 |
|
|
|
724 |
|
|
if (dump_file)
|
725 |
|
|
fprintf (dump_file, "\nDeciding on smaller functions:\n");
|
726 |
|
|
|
727 |
|
|
/* Put all inline candidates into the heap. */
|
728 |
|
|
|
729 |
|
|
for (node = cgraph_nodes; node; node = node->next)
|
730 |
|
|
{
|
731 |
|
|
if (!node->local.inlinable || !node->callers
|
732 |
|
|
|| node->local.disregard_inline_limits)
|
733 |
|
|
continue;
|
734 |
|
|
if (dump_file)
|
735 |
|
|
fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
|
736 |
|
|
|
737 |
|
|
node->global.estimated_growth = INT_MIN;
|
738 |
|
|
if (!cgraph_default_inline_p (node, &failed_reason))
|
739 |
|
|
{
|
740 |
|
|
cgraph_set_inline_failed (node, failed_reason);
|
741 |
|
|
continue;
|
742 |
|
|
}
|
743 |
|
|
|
744 |
|
|
for (edge = node->callers; edge; edge = edge->next_caller)
|
745 |
|
|
if (edge->inline_failed)
|
746 |
|
|
{
|
747 |
|
|
gcc_assert (!edge->aux);
|
748 |
|
|
edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
|
749 |
|
|
}
|
750 |
|
|
}
|
751 |
|
|
while (overall_insns <= max_insns && (edge = fibheap_extract_min (heap)))
|
752 |
|
|
{
|
753 |
|
|
int old_insns = overall_insns;
|
754 |
|
|
struct cgraph_node *where;
|
755 |
|
|
int growth =
|
756 |
|
|
cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
|
757 |
|
|
|
758 |
|
|
growth -= edge->caller->global.insns;
|
759 |
|
|
|
760 |
|
|
if (dump_file)
|
761 |
|
|
{
|
762 |
|
|
fprintf (dump_file,
|
763 |
|
|
"\nConsidering %s with %i insns\n",
|
764 |
|
|
cgraph_node_name (edge->callee),
|
765 |
|
|
edge->callee->global.insns);
|
766 |
|
|
fprintf (dump_file,
|
767 |
|
|
" to be inlined into %s\n"
|
768 |
|
|
" Estimated growth after inlined into all callees is %+i insns.\n"
|
769 |
|
|
" Estimated badness is %i.\n",
|
770 |
|
|
cgraph_node_name (edge->caller),
|
771 |
|
|
cgraph_estimate_growth (edge->callee),
|
772 |
|
|
cgraph_edge_badness (edge));
|
773 |
|
|
if (edge->count)
|
774 |
|
|
fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
|
775 |
|
|
}
|
776 |
|
|
gcc_assert (edge->aux);
|
777 |
|
|
edge->aux = NULL;
|
778 |
|
|
if (!edge->inline_failed)
|
779 |
|
|
continue;
|
780 |
|
|
|
781 |
|
|
/* When not having profile info ready we don't weight by any way the
|
782 |
|
|
position of call in procedure itself. This means if call of
|
783 |
|
|
function A from function B seems profitable to inline, the recursive
|
784 |
|
|
call of function A in inline copy of A in B will look profitable too
|
785 |
|
|
and we end up inlining until reaching maximal function growth. This
|
786 |
|
|
is not good idea so prohibit the recursive inlining.
|
787 |
|
|
|
788 |
|
|
??? When the frequencies are taken into account we might not need this
|
789 |
|
|
restriction. */
|
790 |
|
|
if (!max_count)
|
791 |
|
|
{
|
792 |
|
|
where = edge->caller;
|
793 |
|
|
while (where->global.inlined_to)
|
794 |
|
|
{
|
795 |
|
|
if (where->decl == edge->callee->decl)
|
796 |
|
|
break;
|
797 |
|
|
where = where->callers->caller;
|
798 |
|
|
}
|
799 |
|
|
if (where->global.inlined_to)
|
800 |
|
|
{
|
801 |
|
|
edge->inline_failed
|
802 |
|
|
= (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
|
803 |
|
|
if (dump_file)
|
804 |
|
|
fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
|
805 |
|
|
continue;
|
806 |
|
|
}
|
807 |
|
|
}
|
808 |
|
|
|
809 |
|
|
if (!cgraph_maybe_hot_edge_p (edge) && growth > 0)
|
810 |
|
|
{
|
811 |
|
|
if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
|
812 |
|
|
&edge->inline_failed))
|
813 |
|
|
{
|
814 |
|
|
edge->inline_failed =
|
815 |
|
|
N_("call is unlikely");
|
816 |
|
|
if (dump_file)
|
817 |
|
|
fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
|
818 |
|
|
}
|
819 |
|
|
continue;
|
820 |
|
|
}
|
821 |
|
|
if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
|
822 |
|
|
{
|
823 |
|
|
if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
|
824 |
|
|
&edge->inline_failed))
|
825 |
|
|
{
|
826 |
|
|
if (dump_file)
|
827 |
|
|
fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
|
828 |
|
|
}
|
829 |
|
|
continue;
|
830 |
|
|
}
|
831 |
|
|
if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
|
832 |
|
|
&edge->inline_failed))
|
833 |
|
|
{
|
834 |
|
|
where = edge->caller;
|
835 |
|
|
if (where->global.inlined_to)
|
836 |
|
|
where = where->global.inlined_to;
|
837 |
|
|
if (!cgraph_decide_recursive_inlining (where))
|
838 |
|
|
continue;
|
839 |
|
|
update_callee_keys (heap, where, updated_nodes);
|
840 |
|
|
}
|
841 |
|
|
else
|
842 |
|
|
{
|
843 |
|
|
struct cgraph_node *callee;
|
844 |
|
|
if (!cgraph_check_inline_limits (edge->caller, edge->callee,
|
845 |
|
|
&edge->inline_failed, true))
|
846 |
|
|
{
|
847 |
|
|
if (dump_file)
|
848 |
|
|
fprintf (dump_file, " Not inlining into %s:%s.\n",
|
849 |
|
|
cgraph_node_name (edge->caller), edge->inline_failed);
|
850 |
|
|
continue;
|
851 |
|
|
}
|
852 |
|
|
callee = edge->callee;
|
853 |
|
|
cgraph_mark_inline_edge (edge, true);
|
854 |
|
|
update_callee_keys (heap, callee, updated_nodes);
|
855 |
|
|
}
|
856 |
|
|
where = edge->caller;
|
857 |
|
|
if (where->global.inlined_to)
|
858 |
|
|
where = where->global.inlined_to;
|
859 |
|
|
|
860 |
|
|
/* Our profitability metric can depend on local properties
|
861 |
|
|
such as number of inlinable calls and size of the function body.
|
862 |
|
|
After inlining these properties might change for the function we
|
863 |
|
|
inlined into (since it's body size changed) and for the functions
|
864 |
|
|
called by function we inlined (since number of it inlinable callers
|
865 |
|
|
might change). */
|
866 |
|
|
update_caller_keys (heap, where, updated_nodes);
|
867 |
|
|
bitmap_clear (updated_nodes);
|
868 |
|
|
|
869 |
|
|
if (dump_file)
|
870 |
|
|
{
|
871 |
|
|
fprintf (dump_file,
|
872 |
|
|
" Inlined into %s which now has %i insns,"
|
873 |
|
|
"net change of %+i insns.\n",
|
874 |
|
|
cgraph_node_name (edge->caller),
|
875 |
|
|
edge->caller->global.insns,
|
876 |
|
|
overall_insns - old_insns);
|
877 |
|
|
}
|
878 |
|
|
}
|
879 |
|
|
while ((edge = fibheap_extract_min (heap)) != NULL)
|
880 |
|
|
{
|
881 |
|
|
gcc_assert (edge->aux);
|
882 |
|
|
edge->aux = NULL;
|
883 |
|
|
if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
|
884 |
|
|
&& !cgraph_recursive_inlining_p (edge->caller, edge->callee,
|
885 |
|
|
&edge->inline_failed))
|
886 |
|
|
edge->inline_failed = N_("--param inline-unit-growth limit reached");
|
887 |
|
|
}
|
888 |
|
|
fibheap_delete (heap);
|
889 |
|
|
BITMAP_FREE (updated_nodes);
|
890 |
|
|
}
|
891 |
|
|
|
892 |
|
|
/* Decide on the inlining. We do so in the topological order to avoid
|
893 |
|
|
expenses on updating data structures. */
|
894 |
|
|
|
895 |
|
|
static unsigned int
|
896 |
|
|
cgraph_decide_inlining (void)
|
897 |
|
|
{
|
898 |
|
|
struct cgraph_node *node;
|
899 |
|
|
int nnodes;
|
900 |
|
|
struct cgraph_node **order =
|
901 |
|
|
XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
|
902 |
|
|
int old_insns = 0;
|
903 |
|
|
int i;
|
904 |
|
|
|
905 |
|
|
timevar_push (TV_INLINE_HEURISTICS);
|
906 |
|
|
max_count = 0;
|
907 |
|
|
for (node = cgraph_nodes; node; node = node->next)
|
908 |
|
|
if (node->analyzed && (node->needed || node->reachable))
|
909 |
|
|
{
|
910 |
|
|
struct cgraph_edge *e;
|
911 |
|
|
|
912 |
|
|
/* At the moment, no IPA passes change function bodies before inlining.
|
913 |
|
|
Save some time by not recomputing function body sizes if early inlining
|
914 |
|
|
already did so. */
|
915 |
|
|
if (!flag_early_inlining)
|
916 |
|
|
node->local.self_insns = node->global.insns
|
917 |
|
|
= estimate_num_insns (node->decl);
|
918 |
|
|
|
919 |
|
|
initial_insns += node->local.self_insns;
|
920 |
|
|
gcc_assert (node->local.self_insns == node->global.insns);
|
921 |
|
|
for (e = node->callees; e; e = e->next_callee)
|
922 |
|
|
if (max_count < e->count)
|
923 |
|
|
max_count = e->count;
|
924 |
|
|
}
|
925 |
|
|
overall_insns = initial_insns;
|
926 |
|
|
gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
|
927 |
|
|
|
928 |
|
|
max_insns = overall_insns;
|
929 |
|
|
if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
|
930 |
|
|
max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
|
931 |
|
|
|
932 |
|
|
max_insns = ((HOST_WIDEST_INT) max_insns
|
933 |
|
|
* (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
|
934 |
|
|
|
935 |
|
|
nnodes = cgraph_postorder (order);
|
936 |
|
|
|
937 |
|
|
if (dump_file)
|
938 |
|
|
fprintf (dump_file,
|
939 |
|
|
"\nDeciding on inlining. Starting with %i insns.\n",
|
940 |
|
|
initial_insns);
|
941 |
|
|
|
942 |
|
|
for (node = cgraph_nodes; node; node = node->next)
|
943 |
|
|
node->aux = 0;
|
944 |
|
|
|
945 |
|
|
if (dump_file)
|
946 |
|
|
fprintf (dump_file, "\nInlining always_inline functions:\n");
|
947 |
|
|
|
948 |
|
|
/* In the first pass mark all always_inline edges. Do this with a priority
|
949 |
|
|
so none of our later choices will make this impossible. */
|
950 |
|
|
for (i = nnodes - 1; i >= 0; i--)
|
951 |
|
|
{
|
952 |
|
|
struct cgraph_edge *e, *next;
|
953 |
|
|
|
954 |
|
|
node = order[i];
|
955 |
|
|
|
956 |
|
|
/* Handle nodes to be flattened, but don't update overall unit size. */
|
957 |
|
|
if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
|
958 |
|
|
{
|
959 |
|
|
int old_overall_insns = overall_insns;
|
960 |
|
|
htab_t cycles;
|
961 |
|
|
if (dump_file)
|
962 |
|
|
fprintf (dump_file,
|
963 |
|
|
"Flattening %s\n", cgraph_node_name (node));
|
964 |
|
|
cycles = htab_create (7, htab_hash_pointer, htab_eq_pointer, NULL);
|
965 |
|
|
cgraph_find_cycles (node, cycles);
|
966 |
|
|
cgraph_flatten_node (node, cycles);
|
967 |
|
|
htab_delete (cycles);
|
968 |
|
|
overall_insns = old_overall_insns;
|
969 |
|
|
/* We don't need to consider always_inline functions inside the flattened
|
970 |
|
|
function anymore. */
|
971 |
|
|
continue;
|
972 |
|
|
}
|
973 |
|
|
|
974 |
|
|
if (!node->local.disregard_inline_limits)
|
975 |
|
|
continue;
|
976 |
|
|
if (dump_file)
|
977 |
|
|
fprintf (dump_file,
|
978 |
|
|
"\nConsidering %s %i insns (always inline)\n",
|
979 |
|
|
cgraph_node_name (node), node->global.insns);
|
980 |
|
|
old_insns = overall_insns;
|
981 |
|
|
for (e = node->callers; e; e = next)
|
982 |
|
|
{
|
983 |
|
|
next = e->next_caller;
|
984 |
|
|
if (!e->inline_failed)
|
985 |
|
|
continue;
|
986 |
|
|
if (cgraph_recursive_inlining_p (e->caller, e->callee,
|
987 |
|
|
&e->inline_failed))
|
988 |
|
|
continue;
|
989 |
|
|
cgraph_mark_inline_edge (e, true);
|
990 |
|
|
if (dump_file)
|
991 |
|
|
fprintf (dump_file,
|
992 |
|
|
" Inlined into %s which now has %i insns.\n",
|
993 |
|
|
cgraph_node_name (e->caller),
|
994 |
|
|
e->caller->global.insns);
|
995 |
|
|
}
|
996 |
|
|
if (dump_file)
|
997 |
|
|
fprintf (dump_file,
|
998 |
|
|
" Inlined for a net change of %+i insns.\n",
|
999 |
|
|
overall_insns - old_insns);
|
1000 |
|
|
}
|
1001 |
|
|
|
1002 |
|
|
if (!flag_really_no_inline)
|
1003 |
|
|
cgraph_decide_inlining_of_small_functions ();
|
1004 |
|
|
|
1005 |
|
|
if (!flag_really_no_inline
|
1006 |
|
|
&& flag_inline_functions_called_once)
|
1007 |
|
|
{
|
1008 |
|
|
if (dump_file)
|
1009 |
|
|
fprintf (dump_file, "\nDeciding on functions called once:\n");
|
1010 |
|
|
|
1011 |
|
|
/* And finally decide what functions are called once. */
|
1012 |
|
|
|
1013 |
|
|
for (i = nnodes - 1; i >= 0; i--)
|
1014 |
|
|
{
|
1015 |
|
|
node = order[i];
|
1016 |
|
|
|
1017 |
|
|
if (node->callers && !node->callers->next_caller && !node->needed
|
1018 |
|
|
&& node->local.inlinable && node->callers->inline_failed
|
1019 |
|
|
&& !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl))
|
1020 |
|
|
{
|
1021 |
|
|
bool ok = true;
|
1022 |
|
|
struct cgraph_node *node1;
|
1023 |
|
|
|
1024 |
|
|
/* Verify that we won't duplicate the caller. */
|
1025 |
|
|
for (node1 = node->callers->caller;
|
1026 |
|
|
node1->callers && !node1->callers->inline_failed
|
1027 |
|
|
&& ok; node1 = node1->callers->caller)
|
1028 |
|
|
if (node1->callers->next_caller || node1->needed)
|
1029 |
|
|
ok = false;
|
1030 |
|
|
if (ok)
|
1031 |
|
|
{
|
1032 |
|
|
if (dump_file)
|
1033 |
|
|
{
|
1034 |
|
|
fprintf (dump_file,
|
1035 |
|
|
"\nConsidering %s %i insns.\n",
|
1036 |
|
|
cgraph_node_name (node), node->global.insns);
|
1037 |
|
|
fprintf (dump_file,
|
1038 |
|
|
" Called once from %s %i insns.\n",
|
1039 |
|
|
cgraph_node_name (node->callers->caller),
|
1040 |
|
|
node->callers->caller->global.insns);
|
1041 |
|
|
}
|
1042 |
|
|
|
1043 |
|
|
old_insns = overall_insns;
|
1044 |
|
|
|
1045 |
|
|
if (cgraph_check_inline_limits (node->callers->caller, node,
|
1046 |
|
|
NULL, false))
|
1047 |
|
|
{
|
1048 |
|
|
cgraph_mark_inline (node->callers);
|
1049 |
|
|
if (dump_file)
|
1050 |
|
|
fprintf (dump_file,
|
1051 |
|
|
" Inlined into %s which now has %i insns"
|
1052 |
|
|
" for a net change of %+i insns.\n",
|
1053 |
|
|
cgraph_node_name (node->callers->caller),
|
1054 |
|
|
node->callers->caller->global.insns,
|
1055 |
|
|
overall_insns - old_insns);
|
1056 |
|
|
}
|
1057 |
|
|
else
|
1058 |
|
|
{
|
1059 |
|
|
if (dump_file)
|
1060 |
|
|
fprintf (dump_file,
|
1061 |
|
|
" Inline limit reached, not inlined.\n");
|
1062 |
|
|
}
|
1063 |
|
|
}
|
1064 |
|
|
}
|
1065 |
|
|
}
|
1066 |
|
|
}
|
1067 |
|
|
|
1068 |
|
|
if (dump_file)
|
1069 |
|
|
fprintf (dump_file,
|
1070 |
|
|
"\nInlined %i calls, eliminated %i functions, "
|
1071 |
|
|
"%i insns turned to %i insns.\n\n",
|
1072 |
|
|
ncalls_inlined, nfunctions_inlined, initial_insns,
|
1073 |
|
|
overall_insns);
|
1074 |
|
|
free (order);
|
1075 |
|
|
timevar_pop (TV_INLINE_HEURISTICS);
|
1076 |
|
|
return 0;
|
1077 |
|
|
}
|
1078 |
|
|
|
1079 |
|
|
/* Decide on the inlining. We do so in the topological order to avoid
|
1080 |
|
|
expenses on updating data structures. */
|
1081 |
|
|
|
1082 |
|
|
bool
|
1083 |
|
|
cgraph_decide_inlining_incrementally (struct cgraph_node *node, bool early)
|
1084 |
|
|
{
|
1085 |
|
|
struct cgraph_edge *e;
|
1086 |
|
|
bool inlined = false;
|
1087 |
|
|
const char *failed_reason;
|
1088 |
|
|
|
1089 |
|
|
/* First of all look for always inline functions. */
|
1090 |
|
|
for (e = node->callees; e; e = e->next_callee)
|
1091 |
|
|
if (e->callee->local.disregard_inline_limits
|
1092 |
|
|
&& e->inline_failed
|
1093 |
|
|
&& !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed)
|
1094 |
|
|
/* ??? It is possible that renaming variable removed the function body
|
1095 |
|
|
in duplicate_decls. See gcc.c-torture/compile/20011119-2.c */
|
1096 |
|
|
&& (DECL_SAVED_TREE (e->callee->decl) || e->callee->inline_decl))
|
1097 |
|
|
{
|
1098 |
|
|
if (dump_file && early)
|
1099 |
|
|
{
|
1100 |
|
|
fprintf (dump_file, " Early inlining %s",
|
1101 |
|
|
cgraph_node_name (e->callee));
|
1102 |
|
|
fprintf (dump_file, " into %s\n", cgraph_node_name (node));
|
1103 |
|
|
}
|
1104 |
|
|
cgraph_mark_inline (e);
|
1105 |
|
|
inlined = true;
|
1106 |
|
|
}
|
1107 |
|
|
|
1108 |
|
|
/* Now do the automatic inlining. */
|
1109 |
|
|
if (!flag_really_no_inline)
|
1110 |
|
|
for (e = node->callees; e; e = e->next_callee)
|
1111 |
|
|
if (e->callee->local.inlinable
|
1112 |
|
|
&& e->inline_failed
|
1113 |
|
|
&& !e->callee->local.disregard_inline_limits
|
1114 |
|
|
&& !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed)
|
1115 |
|
|
&& (!early
|
1116 |
|
|
|| (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
|
1117 |
|
|
<= e->caller->global.insns))
|
1118 |
|
|
&& cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
|
1119 |
|
|
false)
|
1120 |
|
|
&& (DECL_SAVED_TREE (e->callee->decl) || e->callee->inline_decl))
|
1121 |
|
|
{
|
1122 |
|
|
if (cgraph_default_inline_p (e->callee, &failed_reason))
|
1123 |
|
|
{
|
1124 |
|
|
if (dump_file && early)
|
1125 |
|
|
{
|
1126 |
|
|
fprintf (dump_file, " Early inlining %s",
|
1127 |
|
|
cgraph_node_name (e->callee));
|
1128 |
|
|
fprintf (dump_file, " into %s\n", cgraph_node_name (node));
|
1129 |
|
|
}
|
1130 |
|
|
cgraph_mark_inline (e);
|
1131 |
|
|
inlined = true;
|
1132 |
|
|
}
|
1133 |
|
|
else if (!early)
|
1134 |
|
|
e->inline_failed = failed_reason;
|
1135 |
|
|
}
|
1136 |
|
|
if (early && inlined)
|
1137 |
|
|
{
|
1138 |
|
|
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
|
1139 |
|
|
tree_register_cfg_hooks ();
|
1140 |
|
|
current_function_decl = node->decl;
|
1141 |
|
|
optimize_inline_calls (current_function_decl);
|
1142 |
|
|
node->local.self_insns = node->global.insns;
|
1143 |
|
|
current_function_decl = NULL;
|
1144 |
|
|
pop_cfun ();
|
1145 |
|
|
}
|
1146 |
|
|
return inlined;
|
1147 |
|
|
}
|
1148 |
|
|
|
1149 |
|
|
/* When inlining shall be performed. */
|
1150 |
|
|
static bool
|
1151 |
|
|
cgraph_gate_inlining (void)
|
1152 |
|
|
{
|
1153 |
|
|
return flag_inline_trees;
|
1154 |
|
|
}
|
1155 |
|
|
|
1156 |
|
|
struct tree_opt_pass pass_ipa_inline =
|
1157 |
|
|
{
|
1158 |
|
|
"inline", /* name */
|
1159 |
|
|
cgraph_gate_inlining, /* gate */
|
1160 |
|
|
cgraph_decide_inlining, /* execute */
|
1161 |
|
|
NULL, /* sub */
|
1162 |
|
|
NULL, /* next */
|
1163 |
|
|
0, /* static_pass_number */
|
1164 |
|
|
TV_INTEGRATION, /* tv_id */
|
1165 |
|
|
0, /* properties_required */
|
1166 |
|
|
PROP_cfg, /* properties_provided */
|
1167 |
|
|
0, /* properties_destroyed */
|
1168 |
|
|
0, /* todo_flags_start */
|
1169 |
|
|
TODO_dump_cgraph | TODO_dump_func, /* todo_flags_finish */
|
1170 |
|
|
|
1171 |
|
|
};
|
1172 |
|
|
|
1173 |
|
|
/* Because inlining might remove no-longer reachable nodes, we need to
|
1174 |
|
|
keep the array visible to garbage collector to avoid reading collected
|
1175 |
|
|
out nodes. */
|
1176 |
|
|
static int nnodes;
|
1177 |
|
|
static GTY ((length ("nnodes"))) struct cgraph_node **order;
|
1178 |
|
|
|
1179 |
|
|
/* Do inlining of small functions. Doing so early helps profiling and other
|
1180 |
|
|
passes to be somewhat more effective and avoids some code duplication in
|
1181 |
|
|
later real inlining pass for testcases with very many function calls. */
|
1182 |
|
|
static unsigned int
|
1183 |
|
|
cgraph_early_inlining (void)
|
1184 |
|
|
{
|
1185 |
|
|
struct cgraph_node *node;
|
1186 |
|
|
int i;
|
1187 |
|
|
|
1188 |
|
|
if (sorrycount || errorcount)
|
1189 |
|
|
return 0;
|
1190 |
|
|
#ifdef ENABLE_CHECKING
|
1191 |
|
|
for (node = cgraph_nodes; node; node = node->next)
|
1192 |
|
|
gcc_assert (!node->aux);
|
1193 |
|
|
#endif
|
1194 |
|
|
|
1195 |
|
|
order = ggc_alloc (sizeof (*order) * cgraph_n_nodes);
|
1196 |
|
|
nnodes = cgraph_postorder (order);
|
1197 |
|
|
for (i = nnodes - 1; i >= 0; i--)
|
1198 |
|
|
{
|
1199 |
|
|
node = order[i];
|
1200 |
|
|
if (node->analyzed && (node->needed || node->reachable))
|
1201 |
|
|
node->local.self_insns = node->global.insns
|
1202 |
|
|
= estimate_num_insns (node->decl);
|
1203 |
|
|
}
|
1204 |
|
|
for (i = nnodes - 1; i >= 0; i--)
|
1205 |
|
|
{
|
1206 |
|
|
node = order[i];
|
1207 |
|
|
if (node->analyzed && node->local.inlinable
|
1208 |
|
|
&& (node->needed || node->reachable)
|
1209 |
|
|
&& node->callers)
|
1210 |
|
|
{
|
1211 |
|
|
if (cgraph_decide_inlining_incrementally (node, true))
|
1212 |
|
|
ggc_collect ();
|
1213 |
|
|
}
|
1214 |
|
|
}
|
1215 |
|
|
cgraph_remove_unreachable_nodes (true, dump_file);
|
1216 |
|
|
#ifdef ENABLE_CHECKING
|
1217 |
|
|
for (node = cgraph_nodes; node; node = node->next)
|
1218 |
|
|
gcc_assert (!node->global.inlined_to);
|
1219 |
|
|
#endif
|
1220 |
|
|
ggc_free (order);
|
1221 |
|
|
order = NULL;
|
1222 |
|
|
nnodes = 0;
|
1223 |
|
|
return 0;
|
1224 |
|
|
}
|
1225 |
|
|
|
1226 |
|
|
/* When inlining shall be performed. */
|
1227 |
|
|
static bool
|
1228 |
|
|
cgraph_gate_early_inlining (void)
|
1229 |
|
|
{
|
1230 |
|
|
return flag_inline_trees && flag_early_inlining;
|
1231 |
|
|
}
|
1232 |
|
|
|
1233 |
|
|
struct tree_opt_pass pass_early_ipa_inline =
|
1234 |
|
|
{
|
1235 |
|
|
"einline", /* name */
|
1236 |
|
|
cgraph_gate_early_inlining, /* gate */
|
1237 |
|
|
cgraph_early_inlining, /* execute */
|
1238 |
|
|
NULL, /* sub */
|
1239 |
|
|
NULL, /* next */
|
1240 |
|
|
0, /* static_pass_number */
|
1241 |
|
|
TV_INTEGRATION, /* tv_id */
|
1242 |
|
|
0, /* properties_required */
|
1243 |
|
|
PROP_cfg, /* properties_provided */
|
1244 |
|
|
0, /* properties_destroyed */
|
1245 |
|
|
0, /* todo_flags_start */
|
1246 |
|
|
TODO_dump_cgraph | TODO_dump_func, /* todo_flags_finish */
|
1247 |
|
|
|
1248 |
|
|
};
|
1249 |
|
|
|
1250 |
|
|
#include "gt-ipa-inline.h"
|