]> gcc.gnu.org Git - gcc.git/blame - gcc/ipa-inline.cc
OpenACC: Fix reduction tree-sharing issue [PR106982]
[gcc.git] / gcc / ipa-inline.cc
CommitLineData
ca31b95f 1/* Inlining decision heuristics.
7adcbafe 2 Copyright (C) 2003-2022 Free Software Foundation, Inc.
ca31b95f
JH
3 Contributed by Jan Hubicka
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9dcd6f09 9Software Foundation; either version 3, or (at your option) any later
ca31b95f
JH
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
9dcd6f09
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
ca31b95f
JH
20
21/* Inlining decision heuristics
22
4c0f7679 23 The implementation of inliner is organized as follows:
ca31b95f 24
ca31b95f
JH
25 inlining heuristics limits
26
4c0f7679
JH
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
30
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
ca31b95f
JH
35
36 inlining heuristics
37
4c0f7679
JH
38 The inliner itself is split into two passes:
39
40 pass_early_inlining
ca31b95f 41
4c0f7679
JH
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
ca31b95f 45
4c0f7679
JH
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
09a2806f 50 and results of early inlining adds a lot of optimization opportunities
4c0f7679 51 for the local optimization.
ca31b95f 52
09a2806f 53 The pass handle the obvious inlining decisions within the compilation
4c0f7679
JH
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
ca31b95f 56
4c0f7679
JH
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
873aa8f5 61
67914693 62 Because of lack of whole unit knowledge, the pass cannot really make
4c0f7679
JH
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
09a2806f
JH
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
873aa8f5 67
4c0f7679 68 pass_ipa_inline
873aa8f5 69
4c0f7679
JH
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
873aa8f5 72
4c0f7679
JH
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
873aa8f5 76
4c0f7679
JH
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
873aa8f5 81
4c0f7679
JH
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
87
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
ca31b95f
JH
91
92#include "config.h"
93#include "system.h"
94#include "coretypes.h"
c7131fb2 95#include "backend.h"
957060b5
AM
96#include "target.h"
97#include "rtl.h"
ca31b95f 98#include "tree.h"
c7131fb2 99#include "gimple.h"
957060b5
AM
100#include "alloc-pool.h"
101#include "tree-pass.h"
102#include "gimple-ssa.h"
103#include "cgraph.h"
957060b5 104#include "lto-streamer.h"
d8a2d370
DN
105#include "trans-mem.h"
106#include "calls.h"
ca31b95f 107#include "tree-inline.h"
59f2e9d8 108#include "profile.h"
dd912cb8 109#include "symbol-summary.h"
8bc5448f 110#include "tree-vrp.h"
3e293154 111#include "ipa-prop.h"
27d020cf 112#include "ipa-fnsummary.h"
03dfc36d 113#include "ipa-inline.h"
af8bca3c 114#include "ipa-utils.h"
1b08b734 115#include "sreal.h"
be3c16c4 116#include "auto-profile.h"
9b2b7279 117#include "builtins.h"
4a910049 118#include "fibonacci_heap.h"
314e6352
ML
119#include "stringpool.h"
120#include "attribs.h"
45b2222a 121#include "asan.h"
4a910049 122
f0e1509b
ML
123typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
124typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
85057983 125
ca31b95f 126/* Statistics we collect about inlining algorithm. */
85057983 127static int overall_size;
3995f3a2
JH
128static profile_count max_count;
129static profile_count spec_rem;
ca31b95f 130
4c0f7679
JH
131/* Return false when inlining edge E would lead to violating
132 limits on function unit growth or stack usage growth.
133
134 The relative function body growth limit is present generally
09a2806f 135 to avoid problems with non-linear behavior of the compiler.
4c0f7679
JH
136 To allow inlining huge functions into tiny wrapper, the limit
137 is always based on the bigger of the two functions considered.
138
139 For stack growth limits we always base the growth in stack usage
140 of the callers. We want to prevent applications from segfaulting
141 on stack overflow when functions with huge stack frames gets
142 inlined. */
ca31b95f
JH
143
144static bool
4c0f7679 145caller_growth_limits (struct cgraph_edge *e)
ca31b95f 146{
d7d1d041 147 struct cgraph_node *to = e->caller;
d52f5295 148 struct cgraph_node *what = e->callee->ultimate_alias_target ();
ca31b95f 149 int newsize;
4c0f7679
JH
150 int limit = 0;
151 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
f658ad30 152 ipa_size_summary *outer_info = ipa_size_summaries->get (to);
4c0f7679
JH
153
154 /* Look for function e->caller is inlined to. While doing
155 so work out the largest function body on the way. As
156 described above, we want to base our function growth
157 limits based on that. Not on the self size of the
158 outer function, not on the self size of inline code
159 we immediately inline to. This is the most relaxed
160 interpretation of the rule "do not grow large functions
161 too much in order to prevent compiler from exploding". */
09dfe187 162 while (true)
4c0f7679 163 {
f658ad30
JH
164 ipa_size_summary *size_info = ipa_size_summaries->get (to);
165 if (limit < size_info->self_size)
166 limit = size_info->self_size;
167 if (stack_size_limit < size_info->estimated_self_stack_size)
168 stack_size_limit = size_info->estimated_self_stack_size;
a62bfab5 169 if (to->inlined_to)
4c0f7679 170 to = to->callers->caller;
09dfe187
JH
171 else
172 break;
4c0f7679 173 }
6971d714 174
f658ad30
JH
175 ipa_fn_summary *what_info = ipa_fn_summaries->get (what);
176 ipa_size_summary *what_size_info = ipa_size_summaries->get (what);
e7f23018 177
f658ad30
JH
178 if (limit < what_size_info->self_size)
179 limit = what_size_info->self_size;
ca31b95f 180
1e83bd70 181 limit += limit * opt_for_fn (to->decl, param_large_function_growth) / 100;
ca31b95f 182
6971d714
RG
183 /* Check the size after inlining against the function limits. But allow
184 the function to shrink if it went over the limits by forced inlining. */
03dfc36d 185 newsize = estimate_size_after_inlining (to, e);
f658ad30 186 if (newsize >= ipa_size_summaries->get (what)->size
709d7838
LXH
187 && newsize > opt_for_fn (to->decl, param_large_function_insns)
188 && newsize > limit)
ca31b95f 189 {
4c0f7679 190 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
ca31b95f
JH
191 return false;
192 }
ff28a94d 193
09dfe187
JH
194 if (!what_info->estimated_stack_size)
195 return true;
196
09a2806f
JH
197 /* FIXME: Stack size limit often prevents inlining in Fortran programs
198 due to large i/o datastructures used by the Fortran front-end.
4c0f7679
JH
199 We ought to ignore this limit when we know that the edge is executed
200 on every invocation of the caller (i.e. its call statement dominates
201 exit block). We do not track this information, yet. */
09dfe187 202 stack_size_limit += ((gcov_type)stack_size_limit
1e83bd70
JH
203 * opt_for_fn (to->decl, param_stack_frame_growth)
204 / 100);
ff28a94d 205
f658ad30 206 inlined_stack = (ipa_get_stack_frame_offset (to)
4c0f7679 207 + outer_info->estimated_self_stack_size
e7f23018 208 + what_info->estimated_stack_size);
4c0f7679
JH
209 /* Check new stack consumption with stack consumption at the place
210 stack is used. */
211 if (inlined_stack > stack_size_limit
09a2806f 212 /* If function already has large stack usage from sibling
4c0f7679
JH
213 inline call, we can inline, too.
214 This bit overoptimistically assume that we are good at stack
215 packing. */
f658ad30 216 && inlined_stack > ipa_fn_summaries->get (to)->estimated_stack_size
1e83bd70 217 && inlined_stack > opt_for_fn (to->decl, param_large_stack_frame))
ff28a94d 218 {
4c0f7679 219 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
ff28a94d
JH
220 return false;
221 }
ca31b95f
JH
222 return true;
223}
224
4c0f7679
JH
225/* Dump info about why inlining has failed. */
226
227static void
228report_inline_failed_reason (struct cgraph_edge *e)
229{
4174a33a 230 if (dump_enabled_p ())
4c0f7679 231 {
4174a33a
DM
232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
233 " not inlinable: %C -> %C, %s\n",
234 e->caller, e->callee,
235 cgraph_inline_failed_string (e->inline_failed));
bb1e543c
JH
236 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
237 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
238 && e->caller->lto_file_data
1f6f9079 239 && e->callee->ultimate_alias_target ()->lto_file_data)
bb1e543c 240 {
4174a33a
DM
241 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
242 " LTO objects: %s, %s\n",
243 e->caller->lto_file_data->file_name,
244 e->callee->ultimate_alias_target ()->lto_file_data->file_name);
bb1e543c
JH
245 }
246 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
9228f64c
DM
247 if (dump_file)
248 cl_target_option_print_diff
249 (dump_file, 2, target_opts_for_fn (e->caller->decl),
250 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
bb1e543c 251 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
9228f64c
DM
252 if (dump_file)
253 cl_optimization_print_diff
254 (dump_file, 2, opts_for_fn (e->caller->decl),
255 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
4c0f7679
JH
256 }
257}
258
25a07c7e
YG
259 /* Decide whether sanitizer-related attributes allow inlining. */
260
261static bool
262sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
263{
25a07c7e
YG
264 if (!caller || !callee)
265 return true;
266
4089df8e
ML
267 /* Follow clang and allow inlining for always_inline functions. */
268 if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (callee)))
6206a883
JJ
269 return true;
270
4089df8e
ML
271 const sanitize_code codes[] =
272 {
273 SANITIZE_ADDRESS,
274 SANITIZE_THREAD,
275 SANITIZE_UNDEFINED,
276 SANITIZE_UNDEFINED_NONDEFAULT,
277 SANITIZE_POINTER_COMPARE,
278 SANITIZE_POINTER_SUBTRACT
279 };
280
281 for (unsigned i = 0; i < sizeof (codes) / sizeof (codes[0]); i++)
282 if (sanitize_flags_p (codes[i], caller)
283 != sanitize_flags_p (codes[i], callee))
284 return false;
285
cec4d4a6
ML
286 if (sanitize_coverage_p (caller) != sanitize_coverage_p (callee))
287 return false;
288
4089df8e 289 return true;
25a07c7e
YG
290}
291
8e926cb1
JH
292/* Used for flags where it is safe to inline when caller's value is
293 grater than callee's. */
294#define check_maybe_up(flag) \
295 (opts_for_fn (caller->decl)->x_##flag \
296 != opts_for_fn (callee->decl)->x_##flag \
297 && (!always_inline \
298 || opts_for_fn (caller->decl)->x_##flag \
299 < opts_for_fn (callee->decl)->x_##flag))
300/* Used for flags where it is safe to inline when caller's value is
301 smaller than callee's. */
302#define check_maybe_down(flag) \
303 (opts_for_fn (caller->decl)->x_##flag \
304 != opts_for_fn (callee->decl)->x_##flag \
305 && (!always_inline \
306 || opts_for_fn (caller->decl)->x_##flag \
307 > opts_for_fn (callee->decl)->x_##flag))
308/* Used for flags where exact match is needed for correctness. */
309#define check_match(flag) \
310 (opts_for_fn (caller->decl)->x_##flag \
311 != opts_for_fn (callee->decl)->x_##flag)
312
9a4841a3 313/* Decide if we can inline the edge and possibly update
4c0f7679
JH
314 inline_failed reason.
315 We check whether inlining is possible at all and whether
316 caller growth limits allow doing so.
317
9a4841a3 318 if REPORT is true, output reason to the dump file. */
ca31b95f 319
61a05df1 320static bool
09ce3660 321can_inline_edge_p (struct cgraph_edge *e, bool report,
9a4841a3 322 bool early = false)
ca31b95f 323{
7ce7e4d4
JH
324 gcc_checking_assert (e->inline_failed);
325
326 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
327 {
328 if (report)
329 report_inline_failed_reason (e);
330 return false;
331 }
332
4c0f7679 333 bool inlinable = true;
a5b1779f 334 enum availability avail;
a62bfab5
ML
335 cgraph_node *caller = (e->caller->inlined_to
336 ? e->caller->inlined_to : e->caller);
e6007a27 337 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
ea99e0be 338
7ce7e4d4 339 if (!callee->definition)
4c0f7679
JH
340 {
341 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
342 inlinable = false;
343 }
8a4a6d2e 344 if (!early && (!opt_for_fn (callee->decl, optimize)
12b8cb2e 345 || !opt_for_fn (caller->decl, optimize)))
29f1e2b1
JH
346 {
347 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
348 inlinable = false;
349 }
1f26ac87
JM
350 else if (callee->calls_comdat_local)
351 {
352 e->inline_failed = CIF_USES_COMDAT_LOCAL;
353 inlinable = false;
354 }
d52f5295 355 else if (avail <= AVAIL_INTERPOSABLE)
9de21a23 356 {
4c0f7679 357 e->inline_failed = CIF_OVERWRITABLE;
6957a6f6 358 inlinable = false;
9de21a23 359 }
1a0bf5e1
JH
360 /* All edges with call_stmt_cannot_inline_p should have inline_failed
361 initialized to one of FINAL_ERROR reasons. */
89faf322 362 else if (e->call_stmt_cannot_inline_p)
1a0bf5e1 363 gcc_unreachable ();
4c0f7679 364 /* Don't inline if the functions have different EH personalities. */
bb1e543c 365 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
67348ccc 366 && DECL_FUNCTION_PERSONALITY (callee->decl)
bb1e543c 367 && (DECL_FUNCTION_PERSONALITY (caller->decl)
67348ccc 368 != DECL_FUNCTION_PERSONALITY (callee->decl)))
4c0f7679
JH
369 {
370 e->inline_failed = CIF_EH_PERSONALITY;
371 inlinable = false;
372 }
a7ff6e27
AH
373 /* TM pure functions should not be inlined into non-TM_pure
374 functions. */
7ce7e4d4 375 else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl))
0a35513e
AH
376 {
377 e->inline_failed = CIF_UNSPECIFIED;
378 inlinable = false;
379 }
09a2806f 380 /* Check compatibility of target optimization options. */
bb1e543c 381 else if (!targetm.target_option.can_inline_p (caller->decl,
67348ccc 382 callee->decl))
4c0f7679
JH
383 {
384 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
385 inlinable = false;
386 }
56f62793
ML
387 else if (ipa_fn_summaries->get (callee) == NULL
388 || !ipa_fn_summaries->get (callee)->inlinable)
5058c037
JH
389 {
390 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
391 inlinable = false;
392 }
25a07c7e 393 /* Don't inline a function with mismatched sanitization attributes. */
bb1e543c 394 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
25a07c7e 395 {
4089df8e 396 e->inline_failed = CIF_SANITIZE_ATTRIBUTE_MISMATCH;
25a07c7e
YG
397 inlinable = false;
398 }
aad72d2e 399
9a4841a3
JH
400 if (!inlinable && report)
401 report_inline_failed_reason (e);
402 return inlinable;
403}
404
3fd58767 405/* Return inlining_insns_single limit for function N. If HINT or HINT2 is true
2925cad2 406 scale up the bound. */
562d1e95
JH
407
408static int
3fd58767 409inline_insns_single (cgraph_node *n, bool hint, bool hint2)
562d1e95 410{
3fd58767
JH
411 if (hint && hint2)
412 {
413 int64_t spd = opt_for_fn (n->decl, param_inline_heuristics_hint_percent);
414 spd = spd * spd;
415 if (spd > 1000000)
416 spd = 1000000;
417 return opt_for_fn (n->decl, param_max_inline_insns_single) * spd / 100;
418 }
419 if (hint || hint2)
1e83bd70
JH
420 return opt_for_fn (n->decl, param_max_inline_insns_single)
421 * opt_for_fn (n->decl, param_inline_heuristics_hint_percent) / 100;
422 return opt_for_fn (n->decl, param_max_inline_insns_single);
562d1e95
JH
423}
424
3fd58767 425/* Return inlining_insns_auto limit for function N. If HINT or HINT2 is true
2925cad2 426 scale up the bound. */
562d1e95
JH
427
428static int
3fd58767 429inline_insns_auto (cgraph_node *n, bool hint, bool hint2)
562d1e95 430{
78a502ca 431 int max_inline_insns_auto = opt_for_fn (n->decl, param_max_inline_insns_auto);
3fd58767
JH
432 if (hint && hint2)
433 {
434 int64_t spd = opt_for_fn (n->decl, param_inline_heuristics_hint_percent);
435 spd = spd * spd;
436 if (spd > 1000000)
437 spd = 1000000;
438 return max_inline_insns_auto * spd / 100;
439 }
440 if (hint || hint2)
1e83bd70
JH
441 return max_inline_insns_auto
442 * opt_for_fn (n->decl, param_inline_heuristics_hint_percent) / 100;
78a502ca 443 return max_inline_insns_auto;
562d1e95
JH
444}
445
9a4841a3
JH
446/* Decide if we can inline the edge and possibly update
447 inline_failed reason.
448 We check whether inlining is possible at all and whether
449 caller growth limits allow doing so.
450
451 if REPORT is true, output reason to the dump file.
452
453 if DISREGARD_LIMITS is true, ignore size limits. */
454
455static bool
456can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report,
457 bool disregard_limits = false, bool early = false)
458{
459 gcc_checking_assert (e->inline_failed);
460
461 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
462 {
463 if (report)
464 report_inline_failed_reason (e);
465 return false;
466 }
467
468 bool inlinable = true;
469 enum availability avail;
a62bfab5
ML
470 cgraph_node *caller = (e->caller->inlined_to
471 ? e->caller->inlined_to : e->caller);
9a4841a3
JH
472 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
473 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
474 tree callee_tree
475 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
4c0f7679 476 /* Check if caller growth allows the inlining. */
9a4841a3
JH
477 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
478 && !disregard_limits
479 && !lookup_attribute ("flatten",
480 DECL_ATTRIBUTES (caller->decl))
481 && !caller_growth_limits (e))
4c0f7679 482 inlinable = false;
bc53dee0
QZ
483 else if (callee->externally_visible
484 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl)
485 && flag_live_patching == LIVE_PATCHING_INLINE_ONLY_STATIC)
486 {
487 e->inline_failed = CIF_EXTERN_LIVE_ONLY_STATIC;
488 inlinable = false;
489 }
4c0f7679
JH
490 /* Don't inline a function with a higher optimization level than the
491 caller. FIXME: this is really just tip of iceberg of handling
492 optimization attribute. */
493 else if (caller_tree != callee_tree)
9de21a23 494 {
8e926cb1
JH
495 bool always_inline =
496 (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
497 && lookup_attribute ("always_inline",
498 DECL_ATTRIBUTES (callee->decl)));
56f62793
ML
499 ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
500 ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
8e926cb1 501
553bb257
KT
502 /* Until GCC 4.9 we did not check the semantics-altering flags
503 below and inlined across optimization boundaries.
504 Enabling checks below breaks several packages by refusing
ddb3773a
JH
505 to inline library always_inline functions. See PR65873.
506 Disable the check for early inlining for now until better solution
507 is found. */
508 if (always_inline && early)
509 ;
0c3068e0
RB
510 /* There are some options that change IL semantics which means
511 we cannot inline in these cases for correctness reason.
512 Not even for always_inline declared functions. */
2bf54d93 513 else if (check_match (flag_wrapv)
ddb3773a 514 || check_match (flag_trapv)
29f1e2b1 515 || check_match (flag_pcc_struct_return)
2523d721 516 || check_maybe_down (optimize_debug)
818b88a7
JH
517 /* When caller or callee does FP math, be sure FP codegen flags
518 compatible. */
519 || ((caller_info->fp_expressions && callee_info->fp_expressions)
520 && (check_maybe_up (flag_rounding_math)
521 || check_maybe_up (flag_trapping_math)
522 || check_maybe_down (flag_unsafe_math_optimizations)
523 || check_maybe_down (flag_finite_math_only)
524 || check_maybe_up (flag_signaling_nans)
525 || check_maybe_down (flag_cx_limited_range)
526 || check_maybe_up (flag_signed_zeros)
527 || check_maybe_down (flag_associative_math)
528 || check_maybe_down (flag_reciprocal_math)
0d2f700f 529 || check_maybe_down (flag_fp_int_builtin_inexact)
818b88a7
JH
530 /* Strictly speaking only when the callee contains function
531 calls that may end up setting errno. */
532 || check_maybe_up (flag_errno_math)))
ddb3773a
JH
533 /* We do not want to make code compiled with exceptions to be
534 brought into a non-EH function unless we know that the callee
535 does not throw.
536 This is tracked by DECL_FUNCTION_PERSONALITY. */
6a96e917
EB
537 || (check_maybe_up (flag_non_call_exceptions)
538 && DECL_FUNCTION_PERSONALITY (callee->decl))
ddb3773a
JH
539 || (check_maybe_up (flag_exceptions)
540 && DECL_FUNCTION_PERSONALITY (callee->decl))
956d615d 541 /* When devirtualization is disabled for callee, it is not safe
ddb3773a
JH
542 to inline it as we possibly mangled the type info.
543 Allow early inlining of always inlines. */
544 || (!early && check_maybe_down (flag_devirtualize)))
0c3068e0
RB
545 {
546 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
547 inlinable = false;
548 }
549 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
8e926cb1 550 else if (always_inline)
bb1e543c 551 ;
0c3068e0
RB
552 /* When user added an attribute to the callee honor it. */
553 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
554 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
4c0f7679 555 {
fd811f03 556 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
4c0f7679
JH
557 inlinable = false;
558 }
ddb3773a
JH
559 /* If explicit optimize attribute are not used, the mismatch is caused
560 by different command line options used to build different units.
561 Do not care about COMDAT functions - those are intended to be
562 optimized with the optimization flags of module they are used in.
563 Also do not care about mixing up size/speed optimization when
564 DECL_DISREGARD_INLINE_LIMITS is set. */
88636b62 565 else if ((callee->merged_comdat
ddb3773a
JH
566 && !lookup_attribute ("optimize",
567 DECL_ATTRIBUTES (caller->decl)))
568 || DECL_DISREGARD_INLINE_LIMITS (callee->decl))
569 ;
bb1e543c 570 /* If mismatch is caused by merging two LTO units with different
956d615d 571 optimization flags we want to be bit nicer. However never inline
bb1e543c
JH
572 if one of functions is not optimized at all. */
573 else if (!opt_for_fn (callee->decl, optimize)
574 || !opt_for_fn (caller->decl, optimize))
575 {
576 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
577 inlinable = false;
578 }
579 /* If callee is optimized for size and caller is not, allow inlining if
028d4092
ML
580 code shrinks or we are in param_max_inline_insns_single limit and
581 callee is inline (and thus likely an unified comdat).
582 This will allow caller to run faster. */
bb1e543c
JH
583 else if (opt_for_fn (callee->decl, optimize_size)
584 > opt_for_fn (caller->decl, optimize_size))
585 {
586 int growth = estimate_edge_growth (e);
1e83bd70 587 if (growth > opt_for_fn (caller->decl, param_max_inline_insns_size)
bb1e543c 588 && (!DECL_DECLARED_INLINE_P (callee->decl)
3fd58767
JH
589 && growth >= MAX (inline_insns_single (caller, false, false),
590 inline_insns_auto (caller, false, false))))
bb1e543c
JH
591 {
592 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
593 inlinable = false;
594 }
595 }
596 /* If callee is more aggressively optimized for performance than caller,
597 we generally want to inline only cheap (runtime wise) functions. */
598 else if (opt_for_fn (callee->decl, optimize_size)
599 < opt_for_fn (caller->decl, optimize_size)
600 || (opt_for_fn (callee->decl, optimize)
86f46e39 601 > opt_for_fn (caller->decl, optimize)))
bb1e543c
JH
602 {
603 if (estimate_edge_time (e)
56f62793 604 >= 20 + ipa_call_summaries->get (e)->call_stmt_time)
bb1e543c
JH
605 {
606 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
607 inlinable = false;
608 }
609 }
610
4c0f7679
JH
611 }
612
4c0f7679
JH
613 if (!inlinable && report)
614 report_inline_failed_reason (e);
615 return inlinable;
616}
617
618
619/* Return true if the edge E is inlinable during early inlining. */
620
621static bool
622can_early_inline_edge_p (struct cgraph_edge *e)
623{
9eb8785b
ML
624 cgraph_node *caller = (e->caller->inlined_to
625 ? e->caller->inlined_to : e->caller);
d52f5295 626 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
4c0f7679 627 /* Early inliner might get called at WPA stage when IPA pass adds new
67914693 628 function. In this case we cannot really do any of early inlining
4c0f7679 629 because function bodies are missing. */
1a0bf5e1
JH
630 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
631 return false;
67348ccc 632 if (!gimple_has_body_p (callee->decl))
4c0f7679
JH
633 {
634 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
9de21a23
JC
635 return false;
636 }
4c0f7679
JH
637 /* In early inliner some of callees may not be in SSA form yet
638 (i.e. the callgraph is cyclic and we did not process
639 the callee by early inliner, yet). We don't have CIF code for this
640 case; later we will re-do the decision in the real inliner. */
67348ccc
DM
641 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
642 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
f27e50db 643 {
4174a33a
DM
644 if (dump_enabled_p ())
645 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
646 " edge not inlinable: not in SSA form\n");
f27e50db
JH
647 return false;
648 }
9eb8785b
ML
649 else if (profile_arc_flag
650 && ((lookup_attribute ("no_profile_instrument_function",
651 DECL_ATTRIBUTES (caller->decl)) == NULL_TREE)
652 != (lookup_attribute ("no_profile_instrument_function",
653 DECL_ATTRIBUTES (callee->decl)) == NULL_TREE)))
654 return false;
655
9a4841a3
JH
656 if (!can_inline_edge_p (e, true, true)
657 || !can_inline_edge_by_limits_p (e, true, false, true))
4c0f7679
JH
658 return false;
659 return true;
660}
661
662
ae6e6a08 663/* Return number of calls in N. Ignore cheap builtins. */
4c0f7679 664
ae6e6a08
JH
665static int
666num_calls (struct cgraph_node *n)
4c0f7679
JH
667{
668 struct cgraph_edge *e;
ae6e6a08
JH
669 int num = 0;
670
4c0f7679 671 for (e = n->callees; e; e = e->next_callee)
67348ccc 672 if (!is_inexpensive_builtin (e->callee->decl))
ae6e6a08
JH
673 num++;
674 return num;
4c0f7679
JH
675}
676
f27e50db 677
4c0f7679 678/* Return true if we are interested in inlining small function. */
9de21a23 679
4c0f7679
JH
680static bool
681want_early_inline_function_p (struct cgraph_edge *e)
682{
683 bool want_inline = true;
d52f5295 684 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
4c0f7679 685
67348ccc 686 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
4c0f7679 687 ;
9a1e784a 688 /* For AutoFDO, we need to make sure that before profile summary, all
be3c16c4
DC
689 hot paths' IR look exactly the same as profiled binary. As a result,
690 in einliner, we will disregard size limit and inline those callsites
691 that are:
692 * inlined in the profiled binary, and
693 * the cloned callee has enough samples to be considered "hot". */
694 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
695 ;
67348ccc 696 else if (!DECL_DECLARED_INLINE_P (callee->decl)
2bf86c84 697 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
4c0f7679
JH
698 {
699 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
700 report_inline_failed_reason (e);
701 want_inline = false;
702 }
703 else
9de21a23 704 {
49e26500
JH
705 /* First take care of very large functions. */
706 int min_growth = estimate_min_edge_growth (e), growth = 0;
ae6e6a08 707 int n;
1e83bd70 708 int early_inlining_insns = param_early_inlining_insns;
0b92cf30 709
49e26500
JH
710 if (min_growth > early_inlining_insns)
711 {
712 if (dump_enabled_p ())
713 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
714 " will not early inline: %C->%C, "
715 "call is cold and code would grow "
716 "at least by %i\n",
717 e->caller, callee,
718 min_growth);
719 want_inline = false;
720 }
721 else
722 growth = estimate_edge_growth (e);
723
ae6e6a08 724
49e26500 725 if (!want_inline || growth <= param_max_inline_insns_size)
4c0f7679 726 ;
f256c274 727 else if (!e->maybe_hot_p ())
4c0f7679 728 {
4174a33a
DM
729 if (dump_enabled_p ())
730 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
731 " will not early inline: %C->%C, "
732 "call is cold and code would grow by %i\n",
733 e->caller, callee,
734 growth);
4c0f7679
JH
735 want_inline = false;
736 }
0b92cf30 737 else if (growth > early_inlining_insns)
9de21a23 738 {
4174a33a
DM
739 if (dump_enabled_p ())
740 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
741 " will not early inline: %C->%C, "
9c28689a
JH
742 "growth %i exceeds --param early-inlining-insns\n",
743 e->caller, callee, growth);
4c0f7679 744 want_inline = false;
9de21a23 745 }
ae6e6a08 746 else if ((n = num_calls (callee)) != 0
0b92cf30 747 && growth * (n + 1) > early_inlining_insns)
4c0f7679 748 {
4174a33a
DM
749 if (dump_enabled_p ())
750 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
751 " will not early inline: %C->%C, "
9c28689a 752 "growth %i exceeds --param early-inlining-insns "
4174a33a 753 "divided by number of calls\n",
9c28689a 754 e->caller, callee, growth);
4c0f7679
JH
755 want_inline = false;
756 }
757 }
758 return want_inline;
759}
760
d59171da
JH
761/* Compute time of the edge->caller + edge->callee execution when inlining
762 does not happen. */
763
6d4ab5f8 764inline sreal
4adaad64 765compute_uninlined_call_time (struct cgraph_edge *edge,
b5351388
JH
766 sreal uninlined_call_time,
767 sreal freq)
d59171da 768{
a62bfab5
ML
769 cgraph_node *caller = (edge->caller->inlined_to
770 ? edge->caller->inlined_to
208e5afa
JH
771 : edge->caller);
772
0009a6c3 773 if (freq > 0)
41f0e819 774 uninlined_call_time *= freq;
208e5afa
JH
775 else
776 uninlined_call_time = uninlined_call_time >> 11;
777
56f62793 778 sreal caller_time = ipa_fn_summaries->get (caller)->time;
d59171da
JH
779 return uninlined_call_time + caller_time;
780}
781
782/* Same as compute_uinlined_call_time but compute time when inlining
783 does happen. */
784
6d4ab5f8 785inline sreal
d59171da 786compute_inlined_call_time (struct cgraph_edge *edge,
b5351388
JH
787 sreal time,
788 sreal freq)
d59171da 789{
a62bfab5
ML
790 cgraph_node *caller = (edge->caller->inlined_to
791 ? edge->caller->inlined_to
208e5afa 792 : edge->caller);
56f62793 793 sreal caller_time = ipa_fn_summaries->get (caller)->time;
208e5afa 794
0009a6c3 795 if (freq > 0)
41f0e819 796 time *= freq;
208e5afa
JH
797 else
798 time = time >> 11;
799
e53b6e56 800 /* This calculation should match one in ipa-inline-analysis.cc
4adaad64 801 (estimate_edge_size_and_time). */
56f62793 802 time -= (sreal)ipa_call_summaries->get (edge)->call_stmt_time * freq;
208e5afa
JH
803 time += caller_time;
804 if (time <= 0)
805 time = ((sreal) 1) >> 8;
6d4ab5f8 806 gcc_checking_assert (time >= 0);
d59171da
JH
807 return time;
808}
809
956d615d
JJ
810/* Determine time saved by inlining EDGE of frequency FREQ
811 where callee's runtime w/o inlining is UNINLINED_TYPE
ea8dd3b6
JH
812 and with inlined is INLINED_TYPE. */
813
814inline sreal
815inlining_speedup (struct cgraph_edge *edge,
816 sreal freq,
817 sreal uninlined_time,
818 sreal inlined_time)
819{
820 sreal speedup = uninlined_time - inlined_time;
821 /* Handling of call_time should match one in ipa-inline-fnsummary.c
822 (estimate_edge_size_and_time). */
823 sreal call_time = ipa_call_summaries->get (edge)->call_stmt_time;
824
825 if (freq > 0)
826 {
827 speedup = (speedup + call_time);
828 if (freq != 1)
829 speedup = speedup * freq;
830 }
831 else if (freq == 0)
832 speedup = speedup >> 11;
833 gcc_checking_assert (speedup >= 0);
834 return speedup;
835}
836
42f7b0fa 837/* Return true if the speedup for inlining E is bigger than
3fd58767 838 param_inline_min_speedup. */
42f7b0fa
JH
839
840static bool
841big_speedup_p (struct cgraph_edge *e)
842{
4adaad64
JH
843 sreal unspec_time;
844 sreal spec_time = estimate_edge_time (e, &unspec_time);
b5351388
JH
845 sreal freq = e->sreal_frequency ();
846 sreal time = compute_uninlined_call_time (e, unspec_time, freq);
847 sreal inlined_time = compute_inlined_call_time (e, spec_time, freq);
a62bfab5
ML
848 cgraph_node *caller = (e->caller->inlined_to
849 ? e->caller->inlined_to
850 : e->caller);
1e83bd70 851 int limit = opt_for_fn (caller->decl, param_inline_min_speedup);
208e5afa 852
562d1e95 853 if ((time - inlined_time) * 100 > time * limit)
42f7b0fa
JH
854 return true;
855 return false;
856}
857
4c0f7679
JH
858/* Return true if we are interested in inlining small function.
859 When REPORT is true, report reason to dump file. */
860
861static bool
862want_inline_small_function_p (struct cgraph_edge *e, bool report)
863{
864 bool want_inline = true;
d52f5295 865 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
1e83bd70
JH
866 cgraph_node *to = (e->caller->inlined_to
867 ? e->caller->inlined_to : e->caller);
4c0f7679 868
01b9bf06
RS
869 /* Allow this function to be called before can_inline_edge_p,
870 since it's usually cheaper. */
871 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
872 want_inline = false;
873 else if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
4c0f7679 874 ;
67348ccc 875 else if (!DECL_DECLARED_INLINE_P (callee->decl)
2bf86c84 876 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
4c0f7679
JH
877 {
878 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
879 want_inline = false;
9de21a23 880 }
4cd8957f 881 /* Do fast and conservative check if the function can be good
2925cad2 882 inline candidate. */
b6d627e4 883 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
1bad9c18 884 && (!e->count.ipa ().initialized_p () || !e->maybe_hot_p ()))
56f62793
ML
885 && ipa_fn_summaries->get (callee)->min_size
886 - ipa_call_summaries->get (e)->call_stmt_size
3fd58767 887 > inline_insns_auto (e->caller, true, true))
4cd8957f 888 {
afeb8875 889 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
4cd8957f
JH
890 want_inline = false;
891 }
3995f3a2 892 else if ((DECL_DECLARED_INLINE_P (callee->decl)
1bad9c18 893 || e->count.ipa ().nonzero_p ())
56f62793
ML
894 && ipa_fn_summaries->get (callee)->min_size
895 - ipa_call_summaries->get (e)->call_stmt_size
3fd58767 896 > inline_insns_single (e->caller, true, true))
4cd8957f 897 {
9c28689a
JH
898 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
899 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
900 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
4cd8957f
JH
901 want_inline = false;
902 }
ca31b95f 903 else
9de21a23 904 {
4c0f7679 905 int growth = estimate_edge_growth (e);
0bceb671 906 ipa_hints hints = estimate_edge_hints (e);
3fd58767
JH
907 /* We have two independent groups of hints. If one matches in each
908 of groups the limits are inreased. If both groups matches, limit
909 is increased even more. */
2925cad2
JH
910 bool apply_hints = (hints & (INLINE_HINT_indirect_call
911 | INLINE_HINT_known_hot
912 | INLINE_HINT_loop_iterations
3fd58767
JH
913 | INLINE_HINT_loop_stride));
914 bool apply_hints2 = (hints & INLINE_HINT_builtin_constant_p);
4c0f7679 915
1e83bd70
JH
916 if (growth <= opt_for_fn (to->decl,
917 param_max_inline_insns_size))
4c0f7679 918 ;
028d4092 919 /* Apply param_max_inline_insns_single limit. Do not do so when
2925cad2
JH
920 hints suggests that inlining given function is very profitable.
921 Avoid computation of big_speedup_p when not necessary to change
922 outcome of decision. */
67348ccc 923 else if (DECL_DECLARED_INLINE_P (callee->decl)
3fd58767
JH
924 && growth >= inline_insns_single (e->caller, apply_hints,
925 apply_hints2)
926 && (apply_hints || apply_hints2
927 || growth >= inline_insns_single (e->caller, true,
928 apply_hints2)
2925cad2 929 || !big_speedup_p (e)))
4c0f7679 930 {
1e83bd70 931 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
4c0f7679
JH
932 want_inline = false;
933 }
67348ccc 934 else if (!DECL_DECLARED_INLINE_P (callee->decl)
f256c274 935 && !opt_for_fn (e->caller->decl, flag_inline_functions)
1e83bd70
JH
936 && growth >= opt_for_fn (to->decl,
937 param_max_inline_insns_small))
4c0f7679 938 {
49d9c9d2 939 /* growth_positive_p is expensive, always test it last. */
3fd58767 940 if (growth >= inline_insns_single (e->caller, false, false)
49d9c9d2 941 || growth_positive_p (callee, e, growth))
4cd8957f 942 {
562d1e95 943 e->inline_failed = CIF_NOT_DECLARED_INLINED;
4cd8957f 944 want_inline = false;
562d1e95 945 }
4c0f7679 946 }
028d4092
ML
947 /* Apply param_max_inline_insns_auto limit for functions not declared
948 inline. Bypass the limit when speedup seems big. */
67348ccc 949 else if (!DECL_DECLARED_INLINE_P (callee->decl)
3fd58767
JH
950 && growth >= inline_insns_auto (e->caller, apply_hints,
951 apply_hints2)
952 && (apply_hints || apply_hints2
953 || growth >= inline_insns_auto (e->caller, true,
954 apply_hints2)
2925cad2 955 || !big_speedup_p (e)))
4c0f7679 956 {
49d9c9d2 957 /* growth_positive_p is expensive, always test it last. */
3fd58767 958 if (growth >= inline_insns_single (e->caller, false, false)
49d9c9d2 959 || growth_positive_p (callee, e, growth))
4cd8957f 960 {
afeb8875 961 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
4cd8957f 962 want_inline = false;
562d1e95 963 }
4c0f7679 964 }
db22a743 965 /* If call is cold, do not inline when function body would grow. */
3dafb85c 966 else if (!e->maybe_hot_p ()
3fd58767 967 && (growth >= inline_insns_single (e->caller, false, false)
49d9c9d2 968 || growth_positive_p (callee, e, growth)))
9de21a23 969 {
562d1e95 970 e->inline_failed = CIF_UNLIKELY_CALL;
4c0f7679 971 want_inline = false;
9de21a23
JC
972 }
973 }
4c0f7679
JH
974 if (!want_inline && report)
975 report_inline_failed_reason (e);
976 return want_inline;
977}
9de21a23 978
4c0f7679 979/* EDGE is self recursive edge.
1e83bd70 980 We handle two cases - when function A is inlining into itself
4c0f7679
JH
981 or when function A is being inlined into another inliner copy of function
982 A within function B.
983
984 In first case OUTER_NODE points to the toplevel copy of A, while
985 in the second case OUTER_NODE points to the outermost copy of A in B.
986
987 In both cases we want to be extra selective since
988 inlining the call will just introduce new recursive calls to appear. */
09a2806f 989
4c0f7679
JH
990static bool
991want_inline_self_recursive_call_p (struct cgraph_edge *edge,
992 struct cgraph_node *outer_node,
993 bool peeling,
994 int depth)
995{
996 char const *reason = NULL;
997 bool want_inline = true;
0009a6c3 998 sreal caller_freq = 1;
1e83bd70
JH
999 int max_depth = opt_for_fn (outer_node->decl,
1000 param_max_inline_recursive_depth_auto);
4c0f7679 1001
67348ccc 1002 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
1e83bd70
JH
1003 max_depth = opt_for_fn (outer_node->decl,
1004 param_max_inline_recursive_depth);
4c0f7679 1005
3dafb85c 1006 if (!edge->maybe_hot_p ())
4c0f7679
JH
1007 {
1008 reason = "recursive call is cold";
1009 want_inline = false;
1010 }
4c0f7679
JH
1011 else if (depth > max_depth)
1012 {
1013 reason = "--param max-inline-recursive-depth exceeded.";
1014 want_inline = false;
1015 }
a62bfab5 1016 else if (outer_node->inlined_to
0009a6c3 1017 && (caller_freq = outer_node->callers->sreal_frequency ()) == 0)
bd936951 1018 {
0009a6c3 1019 reason = "caller frequency is 0";
bd936951
JH
1020 want_inline = false;
1021 }
1022
4c0f7679
JH
1023 if (!want_inline)
1024 ;
0009a6c3
JH
1025 /* Inlining of self recursive function into copy of itself within other
1026 function is transformation similar to loop peeling.
4c0f7679 1027
09a2806f 1028 Peeling is profitable if we can inline enough copies to make probability
4c0f7679
JH
1029 of actual call to the self recursive function very small. Be sure that
1030 the probability of recursion is small.
1031
09a2806f 1032 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
0009a6c3 1033 This way the expected number of recursion is at most max_depth. */
4c0f7679
JH
1034 else if (peeling)
1035 {
0009a6c3 1036 sreal max_prob = (sreal)1 - ((sreal)1 / (sreal)max_depth);
4c0f7679
JH
1037 int i;
1038 for (i = 1; i < depth; i++)
0009a6c3
JH
1039 max_prob = max_prob * max_prob;
1040 if (edge->sreal_frequency () >= max_prob * caller_freq)
4c0f7679
JH
1041 {
1042 reason = "frequency of recursive call is too large";
1043 want_inline = false;
1044 }
1045 }
0009a6c3
JH
1046 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if
1047 recursion depth is large. We reduce function call overhead and increase
1048 chances that things fit in hardware return predictor.
4c0f7679
JH
1049
1050 Recursive inlining might however increase cost of stack frame setup
1051 actually slowing down functions whose recursion tree is wide rather than
1052 deep.
1053
09a2806f 1054 Deciding reliably on when to do recursive inlining without profile feedback
4c0f7679
JH
1055 is tricky. For now we disable recursive inlining when probability of self
1056 recursion is low.
1057
0009a6c3
JH
1058 Recursive inlining of self recursive call within loop also results in
1059 large loop depths that generally optimize badly. We may want to throttle
1060 down inlining in those cases. In particular this seems to happen in one
1061 of libstdc++ rb tree methods. */
4c0f7679
JH
1062 else
1063 {
0009a6c3
JH
1064 if (edge->sreal_frequency () * 100
1065 <= caller_freq
1e83bd70
JH
1066 * opt_for_fn (outer_node->decl,
1067 param_min_inline_recursive_probability))
4c0f7679
JH
1068 {
1069 reason = "frequency of recursive call is too small";
1070 want_inline = false;
1071 }
1072 }
4174a33a
DM
1073 if (!want_inline && dump_enabled_p ())
1074 dump_printf_loc (MSG_MISSED_OPTIMIZATION, edge->call_stmt,
1075 " not inlining recursively: %s\n", reason);
4c0f7679 1076 return want_inline;
ca31b95f
JH
1077}
1078
19ba6aab
JH
1079/* Return true when NODE has uninlinable caller;
1080 set HAS_HOT_CALL if it has hot call.
9aa3f5c5
JH
1081 Worker for cgraph_for_node_and_aliases. */
1082
1083static bool
19ba6aab 1084check_callers (struct cgraph_node *node, void *has_hot_call)
9aa3f5c5 1085{
19ba6aab 1086 struct cgraph_edge *e;
f157c536
JH
1087 for (e = node->callers; e; e = e->next_caller)
1088 {
1089 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once)
1090 || !opt_for_fn (e->caller->decl, optimize))
1091 return true;
1092 if (!can_inline_edge_p (e, true))
1093 return true;
1094 if (e->recursive_p ())
1095 return true;
1096 if (!can_inline_edge_by_limits_p (e, true))
1097 return true;
1098 /* Inlining large functions to large loop depth is often harmful because
1099 of register pressure it implies. */
1100 if ((int)ipa_call_summaries->get (e)->loop_depth
1101 > param_inline_functions_called_once_loop_depth)
1102 return true;
1103 /* Do not produce gigantic functions. */
1104 if (estimate_size_after_inlining (e->caller->inlined_to ?
1105 e->caller->inlined_to : e->caller, e)
1106 > param_inline_functions_called_once_insns)
1107 return true;
1108 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
1109 *(bool *)has_hot_call = true;
1110 }
19ba6aab 1111 return false;
9aa3f5c5
JH
1112}
1113
a81b0a3d
JH
1114/* If NODE has a caller, return true. */
1115
1116static bool
1117has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1118{
1119 if (node->callers)
1120 return true;
1121 return false;
1122}
09a2806f 1123
100411f8
JH
1124/* Decide if inlining NODE would reduce unit size by eliminating
1125 the offline copy of function.
1126 When COLD is true the cold calls are considered, too. */
09a2806f
JH
1127
1128static bool
100411f8 1129want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
09a2806f 1130{
5970b079
EB
1131 bool has_hot_call = false;
1132
9789b553
JH
1133 /* Aliases gets inlined along with the function they alias. */
1134 if (node->alias)
5970b079
EB
1135 return false;
1136 /* Already inlined? */
a62bfab5 1137 if (node->inlined_to)
5970b079
EB
1138 return false;
1139 /* Does it have callers? */
1ede94c5 1140 if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true))
5970b079
EB
1141 return false;
1142 /* Inlining into all callers would increase size? */
49d9c9d2 1143 if (growth_positive_p (node, NULL, INT_MIN) > 0)
5970b079
EB
1144 return false;
1145 /* All inlines must be possible. */
1ede94c5
JH
1146 if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call,
1147 true))
5970b079
EB
1148 return false;
1149 if (!cold && !has_hot_call)
1150 return false;
1151 return true;
09a2806f
JH
1152}
1153
041cb615
JH
1154/* Return true if WHERE of SIZE is a possible candidate for wrapper heuristics
1155 in estimate_edge_badness. */
1156
1157static bool
1158wrapper_heuristics_may_apply (struct cgraph_node *where, int size)
1159{
1160 return size < (DECL_DECLARED_INLINE_P (where->decl)
3fd58767
JH
1161 ? inline_insns_single (where, false, false)
1162 : inline_insns_auto (where, false, false));
041cb615
JH
1163}
1164
670cd5c5
JH
1165/* A cost model driving the inlining heuristics in a way so the edges with
1166 smallest badness are inlined first. After each inlining is performed
0fa2e4df 1167 the costs of all caller edges of nodes affected are recomputed so the
670cd5c5 1168 metrics may accurately depend on values such as number of inlinable callers
45a80bb9 1169 of the function or function body size. */
670cd5c5 1170
f0e1509b 1171static sreal
4c0f7679 1172edge_badness (struct cgraph_edge *edge, bool dump)
670cd5c5 1173{
f0e1509b 1174 sreal badness;
ab38481c 1175 int growth;
4adaad64 1176 sreal edge_time, unspec_edge_time;
d52f5295 1177 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
99b1c316 1178 class ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
0bceb671 1179 ipa_hints hints;
a62bfab5
ML
1180 cgraph_node *caller = (edge->caller->inlined_to
1181 ? edge->caller->inlined_to
208e5afa 1182 : edge->caller);
1aa14195 1183
03dfc36d 1184 growth = estimate_edge_growth (edge);
4adaad64 1185 edge_time = estimate_edge_time (edge, &unspec_edge_time);
37678631 1186 hints = estimate_edge_hints (edge);
d59171da 1187 gcc_checking_assert (edge_time >= 0);
1bad9c18
JH
1188 /* Check that inlined time is better, but tolerate some roundoff issues.
1189 FIXME: When callee profile drops to 0 we account calls more. This
1190 should be fixed by never doing that. */
3f05a4f0
JH
1191 gcc_checking_assert ((edge_time * 100
1192 - callee_info->time * 101).to_int () <= 0
1bad9c18 1193 || callee->count.ipa ().initialized_p ());
f658ad30 1194 gcc_checking_assert (growth <= ipa_size_summaries->get (callee)->size);
e89964e3 1195
1ce18dc8
JH
1196 if (dump)
1197 {
464d0118
ML
1198 fprintf (dump_file, " Badness calculation for %s -> %s\n",
1199 edge->caller->dump_name (),
1200 edge->callee->dump_name ());
4adaad64 1201 fprintf (dump_file, " size growth %i, time %f unspec %f ",
1ce18dc8 1202 growth,
4adaad64
JH
1203 edge_time.to_double (),
1204 unspec_edge_time.to_double ());
0bceb671 1205 ipa_dump_hints (dump_file, hints);
42f7b0fa
JH
1206 if (big_speedup_p (edge))
1207 fprintf (dump_file, " big_speedup");
37678631 1208 fprintf (dump_file, "\n");
1ce18dc8 1209 }
45a80bb9
JH
1210
1211 /* Always prefer inlining saving code size. */
1212 if (growth <= 0)
1ce18dc8 1213 {
6d4ab5f8 1214 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1ce18dc8 1215 if (dump)
6d4ab5f8 1216 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1ce18dc8
JH
1217 growth);
1218 }
208e5afa
JH
1219 /* Inlining into EXTERNAL functions is not going to change anything unless
1220 they are themselves inlined. */
1221 else if (DECL_EXTERNAL (caller->decl))
1ce18dc8 1222 {
1ce18dc8 1223 if (dump)
208e5afa
JH
1224 fprintf (dump_file, " max: function is external\n");
1225 return sreal::max ();
1ce18dc8 1226 }
208e5afa 1227 /* When profile is available. Compute badness as:
b4c0a884 1228
208e5afa 1229 time_saved * caller_count
133a84ab
JH
1230 goodness = -------------------------------------------------
1231 growth_of_caller * overall_growth * combined_size
d59171da
JH
1232
1233 badness = - goodness
b4c0a884 1234
208e5afa
JH
1235 Again use negative value to make calls with profile appear hotter
1236 then calls without.
b4c0a884 1237 */
3995f3a2 1238 else if (opt_for_fn (caller->decl, flag_guess_branch_prob)
1bad9c18 1239 || caller->count.ipa ().nonzero_p ())
670cd5c5 1240 {
6d4ab5f8 1241 sreal numerator, denominator;
41f669d8 1242 int overall_growth;
b5351388 1243 sreal freq = edge->sreal_frequency ();
208e5afa 1244
ea8dd3b6 1245 numerator = inlining_speedup (edge, freq, unspec_edge_time, edge_time);
0009a6c3 1246 if (numerator <= 0)
208e5afa 1247 numerator = ((sreal) 1 >> 8);
1bad9c18
JH
1248 if (caller->count.ipa ().nonzero_p ())
1249 numerator *= caller->count.ipa ().to_gcov_type ();
1250 else if (caller->count.ipa ().initialized_p ())
208e5afa
JH
1251 numerator = numerator >> 11;
1252 denominator = growth;
41f669d8
JH
1253
1254 overall_growth = callee_info->growth;
1255
1256 /* Look for inliner wrappers of the form:
1257
1258 inline_caller ()
1259 {
1260 do_fast_job...
1261 if (need_more_work)
1262 noninline_callee ();
1263 }
956d615d 1264 Without penalizing this case, we usually inline noninline_callee
41f669d8
JH
1265 into the inline_caller because overall_growth is small preventing
1266 further inlining of inline_caller.
1267
1268 Penalize only callgraph edges to functions with small overall
1269 growth ...
1270 */
1271 if (growth > overall_growth
1272 /* ... and having only one caller which is not inlined ... */
1273 && callee_info->single_caller
a62bfab5 1274 && !edge->caller->inlined_to
41f669d8 1275 /* ... and edges executed only conditionally ... */
b5351388 1276 && freq < 1
41f669d8
JH
1277 /* ... consider case where callee is not inline but caller is ... */
1278 && ((!DECL_DECLARED_INLINE_P (edge->callee->decl)
1279 && DECL_DECLARED_INLINE_P (caller->decl))
1280 /* ... or when early optimizers decided to split and edge
1281 frequency still indicates splitting is a win ... */
1282 || (callee->split_part && !caller->split_part
1e83bd70
JH
1283 && freq * 100
1284 < opt_for_fn (caller->decl,
1285 param_partial_inlining_entry_probability)
41f669d8
JH
1286 /* ... and do not overwrite user specified hints. */
1287 && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
1288 || DECL_DECLARED_INLINE_P (caller->decl)))))
1289 {
56f62793 1290 ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
41f669d8
JH
1291 int caller_growth = caller_info->growth;
1292
1293 /* Only apply the penalty when caller looks like inline candidate,
2925cad2 1294 and it is not called once. */
41f669d8
JH
1295 if (!caller_info->single_caller && overall_growth < caller_growth
1296 && caller_info->inlinable
041cb615
JH
1297 && wrapper_heuristics_may_apply
1298 (caller, ipa_size_summaries->get (caller)->size))
41f669d8
JH
1299 {
1300 if (dump)
1301 fprintf (dump_file,
1302 " Wrapper penalty. Increasing growth %i to %i\n",
1303 overall_growth, caller_growth);
1304 overall_growth = caller_growth;
1305 }
1306 }
1307 if (overall_growth > 0)
1308 {
e4112065 1309 /* Strongly prefer functions with few callers that can be inlined
41f669d8
JH
1310 fully. The square root here leads to smaller binaries at average.
1311 Watch however for extreme cases and return to linear function
1312 when growth is large. */
1313 if (overall_growth < 256)
1314 overall_growth *= overall_growth;
1315 else
1316 overall_growth += 256 * 256 - 256;
1317 denominator *= overall_growth;
1318 }
f658ad30 1319 denominator *= ipa_size_summaries->get (caller)->size + growth;
6d4ab5f8
JH
1320
1321 badness = - numerator / denominator;
1322
1ce18dc8
JH
1323 if (dump)
1324 {
1325 fprintf (dump_file,
16998094
JM
1326 " %f: guessed profile. frequency %f, count %" PRId64
1327 " caller count %" PRId64
ea8dd3b6 1328 " time saved %f"
41f669d8
JH
1329 " overall growth %i (current) %i (original)"
1330 " %i (compensated)\n",
1331 badness.to_double (),
b5351388 1332 freq.to_double (),
f157c536
JH
1333 edge->count.ipa ().initialized_p ()
1334 ? edge->count.ipa ().to_gcov_type () : -1,
1335 caller->count.ipa ().initialized_p ()
1336 ? caller->count.ipa ().to_gcov_type () : -1,
1337 inlining_speedup (edge, freq, unspec_edge_time,
1338 edge_time).to_double (),
d59171da 1339 estimate_growth (callee),
41f669d8 1340 callee_info->growth, overall_growth);
1ce18dc8 1341 }
45a80bb9
JH
1342 }
1343 /* When function local profile is not available or it does not give
956d615d 1344 useful information (i.e. frequency is zero), base the cost on
45a80bb9
JH
1345 loop nest and overall size growth, so we optimize for overall number
1346 of functions fully inlined in program. */
1347 else
1348 {
56f62793 1349 int nest = MIN (ipa_call_summaries->get (edge)->loop_depth, 8);
6d4ab5f8 1350 badness = growth;
670cd5c5 1351
45a80bb9 1352 /* Decrease badness if call is nested. */
b8698a0f 1353 if (badness > 0)
f0e1509b 1354 badness = badness >> nest;
45a80bb9 1355 else
6d4ab5f8 1356 badness = badness << nest;
1ce18dc8 1357 if (dump)
41f669d8
JH
1358 fprintf (dump_file, " %f: no profile. nest %i\n",
1359 badness.to_double (), nest);
670cd5c5 1360 }
6d4ab5f8 1361 gcc_checking_assert (badness != 0);
1ce18dc8 1362
3dafb85c 1363 if (edge->recursive_p ())
6d4ab5f8
JH
1364 badness = badness.shift (badness > 0 ? 4 : -4);
1365 if ((hints & (INLINE_HINT_indirect_call
1366 | INLINE_HINT_loop_iterations
6d4ab5f8
JH
1367 | INLINE_HINT_loop_stride))
1368 || callee_info->growth <= 0)
1369 badness = badness.shift (badness > 0 ? -2 : 2);
caaa218f
JH
1370 if (hints & INLINE_HINT_builtin_constant_p)
1371 badness = badness.shift (badness > 0 ? -4 : 4);
6d4ab5f8
JH
1372 if (hints & (INLINE_HINT_same_scc))
1373 badness = badness.shift (badness > 0 ? 3 : -3);
1374 else if (hints & (INLINE_HINT_in_scc))
1375 badness = badness.shift (badness > 0 ? 2 : -2);
1376 else if (hints & (INLINE_HINT_cross_module))
1377 badness = badness.shift (badness > 0 ? 1 : -1);
208e5afa
JH
1378 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1379 badness = badness.shift (badness > 0 ? -4 : 4);
1380 else if ((hints & INLINE_HINT_declared_inline))
6d4ab5f8
JH
1381 badness = badness.shift (badness > 0 ? -3 : 3);
1382 if (dump)
1383 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1384 return badness;
670cd5c5
JH
1385}
1386
9b8051b4 1387/* Recompute badness of EDGE and update its key in HEAP if needed. */
4c0f7679 1388static inline void
4a910049 1389update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
9b8051b4 1390{
f0e1509b 1391 sreal badness = edge_badness (edge, false);
9b8051b4
JH
1392 if (edge->aux)
1393 {
4a910049
ML
1394 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1395 gcc_checking_assert (n->get_data () == edge);
9b8051b4 1396
6d4ab5f8 1397 /* fibonacci_heap::replace_key does busy updating of the
956d615d 1398 heap that is unnecessarily expensive.
6d4ab5f8
JH
1399 We do lazy increases: after extracting minimum if the key
1400 turns out to be out of date, it is re-inserted into heap
1401 with correct value. */
4a910049 1402 if (badness < n->get_key ())
9b8051b4 1403 {
4c0f7679
JH
1404 if (dump_file && (dump_flags & TDF_DETAILS))
1405 {
1406 fprintf (dump_file,
464d0118
ML
1407 " decreasing badness %s -> %s, %f to %f\n",
1408 edge->caller->dump_name (),
1409 edge->callee->dump_name (),
6d4ab5f8
JH
1410 n->get_key ().to_double (),
1411 badness.to_double ());
4c0f7679 1412 }
4a910049 1413 heap->decrease_key (n, badness);
9b8051b4
JH
1414 }
1415 }
1416 else
4c0f7679
JH
1417 {
1418 if (dump_file && (dump_flags & TDF_DETAILS))
1419 {
1420 fprintf (dump_file,
464d0118
ML
1421 " enqueuing call %s -> %s, badness %f\n",
1422 edge->caller->dump_name (),
1423 edge->callee->dump_name (),
6d4ab5f8 1424 badness.to_double ());
4c0f7679 1425 }
4a910049 1426 edge->aux = heap->insert (badness, edge);
4c0f7679 1427 }
9b8051b4
JH
1428}
1429
40fda55b
JH
1430
1431/* NODE was inlined.
956d615d 1432 All caller edges needs to be reset because
40fda55b
JH
1433 size estimates change. Similarly callees needs reset
1434 because better context may be known. */
1435
1436static void
1437reset_edge_caches (struct cgraph_node *node)
1438{
1439 struct cgraph_edge *edge;
1440 struct cgraph_edge *e = node->callees;
1441 struct cgraph_node *where = node;
e55637b7 1442 struct ipa_ref *ref;
40fda55b 1443
a62bfab5
ML
1444 if (where->inlined_to)
1445 where = where->inlined_to;
40fda55b 1446
ac6f2e59
JH
1447 reset_node_cache (where);
1448
9fb50ad8
ML
1449 if (edge_growth_cache != NULL)
1450 for (edge = where->callers; edge; edge = edge->next_caller)
1451 if (edge->inline_failed)
1452 edge_growth_cache->remove (edge);
e55637b7
ML
1453
1454 FOR_EACH_ALIAS (where, ref)
1455 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
40fda55b
JH
1456
1457 if (!e)
1458 return;
1459
1460 while (true)
1461 if (!e->inline_failed && e->callee->callees)
1462 e = e->callee->callees;
1463 else
1464 {
9fb50ad8
ML
1465 if (edge_growth_cache != NULL && e->inline_failed)
1466 edge_growth_cache->remove (e);
40fda55b
JH
1467 if (e->next_callee)
1468 e = e->next_callee;
1469 else
1470 {
1471 do
1472 {
1473 if (e->caller == node)
1474 return;
1475 e = e->caller->callers;
1476 }
1477 while (!e->next_callee);
1478 e = e->next_callee;
1479 }
1480 }
1481}
1482
1483/* Recompute HEAP nodes for each of caller of NODE.
1484 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1485 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1486 it is inlinable. Otherwise check all edges. */
670cd5c5
JH
1487
1488static void
4a910049 1489update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
40fda55b
JH
1490 bitmap updated_nodes,
1491 struct cgraph_edge *check_inlinablity_for)
670cd5c5
JH
1492{
1493 struct cgraph_edge *edge;
e55637b7 1494 struct ipa_ref *ref;
670cd5c5 1495
56f62793 1496 if ((!node->alias && !ipa_fn_summaries->get (node)->inlinable)
a62bfab5 1497 || node->inlined_to)
670cd5c5 1498 return;
4325656f 1499 if (!bitmap_set_bit (updated_nodes, node->get_uid ()))
670cd5c5 1500 return;
670cd5c5 1501
e55637b7
ML
1502 FOR_EACH_ALIAS (node, ref)
1503 {
1504 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1505 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1506 }
39e2db00 1507
cdc029b9 1508 for (edge = node->callers; edge; edge = edge->next_caller)
4c0f7679
JH
1509 if (edge->inline_failed)
1510 {
40fda55b
JH
1511 if (!check_inlinablity_for
1512 || check_inlinablity_for == edge)
f10d1a74 1513 {
9a4841a3
JH
1514 if (can_inline_edge_p (edge, false)
1515 && want_inline_small_function_p (edge, false)
1516 && can_inline_edge_by_limits_p (edge, false))
40fda55b
JH
1517 update_edge_key (heap, edge);
1518 else if (edge->aux)
1519 {
1520 report_inline_failed_reason (edge);
4a910049 1521 heap->delete_node ((edge_heap_node_t *) edge->aux);
40fda55b
JH
1522 edge->aux = NULL;
1523 }
f10d1a74 1524 }
40fda55b
JH
1525 else if (edge->aux)
1526 update_edge_key (heap, edge);
4c0f7679 1527 }
9b8051b4
JH
1528}
1529
7c6f2fb9
JH
1530/* Recompute HEAP nodes for each uninlined call in NODE
1531 If UPDATE_SINCE is non-NULL check if edges called within that function
1532 are inlinable (typically UPDATE_SINCE is the inline clone we introduced
1533 where all edges have new context).
1534
9b8051b4
JH
1535 This is used when we know that edge badnesses are going only to increase
1536 (we introduced new call site) and thus all we need is to insert newly
1537 created edges into heap. */
1538
1539static void
4a910049 1540update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
7c6f2fb9 1541 struct cgraph_node *update_since,
9b8051b4
JH
1542 bitmap updated_nodes)
1543{
1544 struct cgraph_edge *e = node->callees;
7c6f2fb9 1545 bool check_inlinability = update_since == node;
09a2806f 1546
9b8051b4
JH
1547 if (!e)
1548 return;
1549 while (true)
1550 if (!e->inline_failed && e->callee->callees)
7c6f2fb9
JH
1551 {
1552 if (e->callee == update_since)
1553 check_inlinability = true;
1554 e = e->callee->callees;
1555 }
9b8051b4 1556 else
670cd5c5 1557 {
a5b1779f
JH
1558 enum availability avail;
1559 struct cgraph_node *callee;
7c6f2fb9
JH
1560 if (!check_inlinability)
1561 {
1562 if (e->aux
1563 && !bitmap_bit_p (updated_nodes,
1564 e->callee->ultimate_alias_target
1565 (&avail, e->caller)->get_uid ()))
1566 update_edge_key (heap, e);
1567 }
58696ce5 1568 /* We do not reset callee growth cache here. Since we added a new call,
956d615d 1569 growth should have just increased and consequently badness metric
58696ce5 1570 don't need updating. */
7c6f2fb9
JH
1571 else if (e->inline_failed
1572 && (callee = e->callee->ultimate_alias_target (&avail,
1573 e->caller))
1574 && avail >= AVAIL_AVAILABLE
1575 && ipa_fn_summaries->get (callee) != NULL
1576 && ipa_fn_summaries->get (callee)->inlinable
1577 && !bitmap_bit_p (updated_nodes, callee->get_uid ()))
670cd5c5 1578 {
9a4841a3
JH
1579 if (can_inline_edge_p (e, false)
1580 && want_inline_small_function_p (e, false)
1581 && can_inline_edge_by_limits_p (e, false))
7c6f2fb9
JH
1582 {
1583 gcc_checking_assert (check_inlinability || can_inline_edge_p (e, false));
1584 gcc_checking_assert (check_inlinability || e->aux);
1585 update_edge_key (heap, e);
1586 }
40fda55b
JH
1587 else if (e->aux)
1588 {
1589 report_inline_failed_reason (e);
4a910049 1590 heap->delete_node ((edge_heap_node_t *) e->aux);
40fda55b
JH
1591 e->aux = NULL;
1592 }
9b8051b4 1593 }
7c6f2fb9
JH
1594 /* In case we redirected to unreachable node we only need to remove the
1595 fibheap entry. */
1596 else if (e->aux)
1597 {
1598 heap->delete_node ((edge_heap_node_t *) e->aux);
1599 e->aux = NULL;
1600 }
9b8051b4
JH
1601 if (e->next_callee)
1602 e = e->next_callee;
1603 else
1604 {
1605 do
1ce18dc8 1606 {
9b8051b4
JH
1607 if (e->caller == node)
1608 return;
7c6f2fb9
JH
1609 if (e->caller == update_since)
1610 check_inlinability = false;
9b8051b4 1611 e = e->caller->callers;
1ce18dc8 1612 }
9b8051b4
JH
1613 while (!e->next_callee);
1614 e = e->next_callee;
670cd5c5 1615 }
670cd5c5
JH
1616 }
1617}
1618
670cd5c5 1619/* Enqueue all recursive calls from NODE into priority queue depending on
0fa2e4df 1620 how likely we want to recursively inline the call. */
670cd5c5 1621
ca31b95f
JH
1622static void
1623lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
4a910049 1624 edge_heap_t *heap)
ca31b95f
JH
1625{
1626 struct cgraph_edge *e;
a5b1779f
JH
1627 enum availability avail;
1628
ca31b95f 1629 for (e = where->callees; e; e = e->next_callee)
a5b1779f 1630 if (e->callee == node
e6007a27 1631 || (e->callee->ultimate_alias_target (&avail, e->caller) == node
d52f5295 1632 && avail > AVAIL_INTERPOSABLE))
0009a6c3 1633 heap->insert (-e->sreal_frequency (), e);
ca31b95f
JH
1634 for (e = where->callees; e; e = e->next_callee)
1635 if (!e->inline_failed)
670cd5c5 1636 lookup_recursive_calls (node, e->callee, heap);
ca31b95f
JH
1637}
1638
1639/* Decide on recursive inlining: in the case function has recursive calls,
3e293154 1640 inline until body size reaches given argument. If any new indirect edges
e56f5f3e
JJ
1641 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1642 is NULL. */
670cd5c5
JH
1643
1644static bool
4c0f7679 1645recursive_inlining (struct cgraph_edge *edge,
d52f5295 1646 vec<cgraph_edge *> *new_edges)
ca31b95f 1647{
1e83bd70
JH
1648 cgraph_node *to = (edge->caller->inlined_to
1649 ? edge->caller->inlined_to : edge->caller);
1650 int limit = opt_for_fn (to->decl,
1651 param_max_inline_insns_recursive_auto);
d1704358 1652 edge_heap_t heap (sreal::min ());
d7d1d041 1653 struct cgraph_node *node;
ca31b95f 1654 struct cgraph_edge *e;
4c0f7679 1655 struct cgraph_node *master_clone = NULL, *next;
ca31b95f
JH
1656 int depth = 0;
1657 int n = 0;
1658
d7d1d041 1659 node = edge->caller;
a62bfab5
ML
1660 if (node->inlined_to)
1661 node = node->inlined_to;
d7d1d041 1662
67348ccc 1663 if (DECL_DECLARED_INLINE_P (node->decl))
1e83bd70 1664 limit = opt_for_fn (to->decl, param_max_inline_insns_recursive);
ca31b95f
JH
1665
1666 /* Make sure that function is small enough to be considered for inlining. */
4c0f7679 1667 if (estimate_size_after_inlining (node, edge) >= limit)
670cd5c5 1668 return false;
4a910049
ML
1669 lookup_recursive_calls (node, node, &heap);
1670 if (heap.empty ())
1671 return false;
ca31b95f
JH
1672
1673 if (dump_file)
b8698a0f 1674 fprintf (dump_file,
3629ff8a 1675 " Performing recursive inlining on %s\n", node->dump_name ());
ca31b95f 1676
ca31b95f 1677 /* Do the inlining and update list of recursive call during process. */
4a910049 1678 while (!heap.empty ())
ca31b95f 1679 {
4a910049 1680 struct cgraph_edge *curr = heap.extract_min ();
6ced940d 1681 struct cgraph_node *cnode, *dest = curr->callee;
d7d1d041 1682
9a4841a3 1683 if (!can_inline_edge_p (curr, true)
4eb50396 1684 || !can_inline_edge_by_limits_p (curr, true))
4c0f7679
JH
1685 continue;
1686
6ced940d
JH
1687 /* MASTER_CLONE is produced in the case we already started modified
1688 the function. Be sure to redirect edge to the original body before
1689 estimating growths otherwise we will be seeing growths after inlining
1690 the already modified body. */
1691 if (master_clone)
1692 {
3dafb85c 1693 curr->redirect_callee (master_clone);
9fb50ad8
ML
1694 if (edge_growth_cache != NULL)
1695 edge_growth_cache->remove (curr);
6ced940d
JH
1696 }
1697
1698 if (estimate_size_after_inlining (node, curr) > limit)
1699 {
3dafb85c 1700 curr->redirect_callee (dest);
9fb50ad8
ML
1701 if (edge_growth_cache != NULL)
1702 edge_growth_cache->remove (curr);
6ced940d
JH
1703 break;
1704 }
1705
c5a4444c
JH
1706 depth = 1;
1707 for (cnode = curr->caller;
a62bfab5 1708 cnode->inlined_to; cnode = cnode->callers->caller)
67348ccc 1709 if (node->decl
d52f5295 1710 == curr->callee->ultimate_alias_target ()->decl)
f791d333 1711 depth++;
c5a4444c 1712
4c0f7679 1713 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
6ced940d 1714 {
3dafb85c 1715 curr->redirect_callee (dest);
9fb50ad8
ML
1716 if (edge_growth_cache != NULL)
1717 edge_growth_cache->remove (curr);
6ced940d
JH
1718 continue;
1719 }
ca31b95f 1720
670cd5c5 1721 if (dump_file)
c5a4444c 1722 {
b8698a0f 1723 fprintf (dump_file,
c5a4444c 1724 " Inlining call of depth %i", depth);
ae94bb0e 1725 if (node->count.nonzero_p () && curr->count.initialized_p ())
c5a4444c
JH
1726 {
1727 fprintf (dump_file, " called approx. %.2f times per call",
3995f3a2
JH
1728 (double)curr->count.to_gcov_type ()
1729 / node->count.to_gcov_type ());
c5a4444c
JH
1730 }
1731 fprintf (dump_file, "\n");
1732 }
4c0f7679
JH
1733 if (!master_clone)
1734 {
1735 /* We need original clone to copy around. */
d52f5295 1736 master_clone = node->create_clone (node->decl, node->count,
1bad9c18 1737 false, vNULL, true, NULL, NULL);
4c0f7679
JH
1738 for (e = master_clone->callees; e; e = e->next_callee)
1739 if (!e->inline_failed)
1bad9c18 1740 clone_inlined_nodes (e, true, false, NULL);
3dafb85c 1741 curr->redirect_callee (master_clone);
9fb50ad8
ML
1742 if (edge_growth_cache != NULL)
1743 edge_growth_cache->remove (curr);
4c0f7679
JH
1744 }
1745
c170d40f 1746 inline_call (curr, false, new_edges, &overall_size, true);
b914768c 1747 reset_node_cache (node);
4a910049 1748 lookup_recursive_calls (node, curr->callee, &heap);
ca31b95f
JH
1749 n++;
1750 }
4c0f7679 1751
4a910049 1752 if (!heap.empty () && dump_file)
c5a4444c 1753 fprintf (dump_file, " Recursive inlining growth limit met.\n");
4c0f7679
JH
1754
1755 if (!master_clone)
1756 return false;
1757
4174a33a
DM
1758 if (dump_enabled_p ())
1759 dump_printf_loc (MSG_NOTE, edge->call_stmt,
1760 "\n Inlined %i times, "
1761 "body grown from size %i to %i, time %f to %f\n", n,
f658ad30
JH
1762 ipa_size_summaries->get (master_clone)->size,
1763 ipa_size_summaries->get (node)->size,
4174a33a
DM
1764 ipa_fn_summaries->get (master_clone)->time.to_double (),
1765 ipa_fn_summaries->get (node)->time.to_double ());
ca31b95f
JH
1766
1767 /* Remove master clone we used for inlining. We rely that clones inlined
1768 into master clone gets queued just before master clone so we don't
1769 need recursion. */
3dafb85c 1770 for (node = symtab->first_function (); node != master_clone;
96fc428c
JH
1771 node = next)
1772 {
3dafb85c 1773 next = symtab->next_function (node);
a62bfab5 1774 if (node->inlined_to == master_clone)
d52f5295 1775 node->remove ();
96fc428c 1776 }
d52f5295 1777 master_clone->remove ();
4c0f7679 1778 return true;
ca31b95f
JH
1779}
1780
09a2806f 1781
88512ba0 1782/* Given whole compilation unit estimate of INSNS, compute how large we can
b7c27d51 1783 allow the unit to grow. */
09a2806f 1784
8fcfc44f 1785static int64_t
1e83bd70 1786compute_max_insns (cgraph_node *node, int insns)
b7c27d51
JH
1787{
1788 int max_insns = insns;
1e83bd70
JH
1789 if (max_insns < opt_for_fn (node->decl, param_large_unit_insns))
1790 max_insns = opt_for_fn (node->decl, param_large_unit_insns);
b7c27d51 1791
a9243bfc 1792 return ((int64_t) max_insns
1e83bd70 1793 * (100 + opt_for_fn (node->decl, param_inline_unit_growth)) / 100);
b7c27d51
JH
1794}
1795
09a2806f 1796
3e293154 1797/* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
09a2806f 1798
3e293154 1799static void
00dcc88a 1800add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> &new_edges)
3e293154 1801{
9771b263 1802 while (new_edges.length () > 0)
3e293154 1803 {
9771b263 1804 struct cgraph_edge *edge = new_edges.pop ();
3e293154
MJ
1805
1806 gcc_assert (!edge->aux);
4517b378 1807 gcc_assert (edge->callee);
a5b1779f 1808 if (edge->inline_failed
4c0f7679 1809 && can_inline_edge_p (edge, true)
9a4841a3
JH
1810 && want_inline_small_function_p (edge, true)
1811 && can_inline_edge_by_limits_p (edge, true))
4a910049 1812 edge->aux = heap->insert (edge_badness (edge, false), edge);
3e293154
MJ
1813 }
1814}
1815
042ae7d2
JH
1816/* Remove EDGE from the fibheap. */
1817
1818static void
1819heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1820{
1821 if (e->aux)
1822 {
4a910049 1823 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
042ae7d2
JH
1824 e->aux = NULL;
1825 }
1826}
3e293154 1827
09ce3660
JH
1828/* Return true if speculation of edge E seems useful.
1829 If ANTICIPATE_INLINING is true, be conservative and hope that E
1830 may get inlined. */
1831
1832bool
1833speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1834{
4517b378
MJ
1835 /* If we have already decided to inline the edge, it seems useful. */
1836 if (!e->inline_failed)
1837 return true;
1838
09ce3660 1839 enum availability avail;
e6007a27
JH
1840 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail,
1841 e->caller);
09ce3660
JH
1842
1843 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1844
3dafb85c 1845 if (!e->maybe_hot_p ())
09ce3660
JH
1846 return false;
1847
1848 /* See if IP optimizations found something potentially useful about the
1849 function. For now we look only for CONST/PURE flags. Almost everything
1850 else we propagate is useless. */
1851 if (avail >= AVAIL_AVAILABLE)
1852 {
67348ccc 1853 int ecf_flags = flags_from_decl_or_type (target->decl);
09ce3660
JH
1854 if (ecf_flags & ECF_CONST)
1855 {
845bb366
JH
1856 if (!(e->speculative_call_indirect_edge ()->indirect_info
1857 ->ecf_flags & ECF_CONST))
09ce3660
JH
1858 return true;
1859 }
1860 else if (ecf_flags & ECF_PURE)
1861 {
845bb366
JH
1862 if (!(e->speculative_call_indirect_edge ()->indirect_info
1863 ->ecf_flags & ECF_PURE))
09ce3660
JH
1864 return true;
1865 }
1866 }
1867 /* If we did not managed to inline the function nor redirect
1868 to an ipa-cp clone (that are seen by having local flag set),
1869 it is probably pointless to inline it unless hardware is missing
1870 indirect call predictor. */
87f94429 1871 if (!anticipate_inlining && !target->local)
09ce3660
JH
1872 return false;
1873 /* For overwritable targets there is not much to do. */
4517b378
MJ
1874 if (!can_inline_edge_p (e, false)
1875 || !can_inline_edge_by_limits_p (e, false, true))
09ce3660
JH
1876 return false;
1877 /* OK, speculation seems interesting. */
1878 return true;
1879}
1880
1881/* We know that EDGE is not going to be inlined.
1882 See if we can remove speculation. */
1883
1884static void
4a910049 1885resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
09ce3660
JH
1886{
1887 if (edge->speculative && !speculation_useful_p (edge, false))
1888 {
1889 struct cgraph_node *node = edge->caller;
a62bfab5
ML
1890 struct cgraph_node *where = node->inlined_to
1891 ? node->inlined_to : node;
0e3de1d4 1892 auto_bitmap updated_nodes;
09ce3660 1893
1bad9c18
JH
1894 if (edge->count.ipa ().initialized_p ())
1895 spec_rem += edge->count.ipa ();
27c5a177 1896 cgraph_edge::resolve_speculation (edge);
09ce3660 1897 reset_edge_caches (where);
0bceb671 1898 ipa_update_overall_fn_summary (where);
09ce3660
JH
1899 update_caller_keys (edge_heap, where,
1900 updated_nodes, NULL);
7c6f2fb9 1901 update_callee_keys (edge_heap, where, NULL,
d0b66480 1902 updated_nodes);
09ce3660
JH
1903 }
1904}
1905
bb1e543c
JH
1906/* Return true if NODE should be accounted for overall size estimate.
1907 Skip all nodes optimized for size so we can measure the growth of hot
1908 part of program no matter of the padding. */
1909
1910bool
1911inline_account_function_p (struct cgraph_node *node)
1912{
1913 return (!DECL_EXTERNAL (node->decl)
1914 && !opt_for_fn (node->decl, optimize_size)
1915 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1916}
1917
41f669d8
JH
1918/* Count number of callers of NODE and store it into DATA (that
1919 points to int. Worker for cgraph_for_node_and_aliases. */
1920
1921static bool
1922sum_callers (struct cgraph_node *node, void *data)
1923{
1924 struct cgraph_edge *e;
1925 int *num_calls = (int *)data;
1926
1927 for (e = node->callers; e; e = e->next_caller)
1928 (*num_calls)++;
1929 return false;
1930}
1931
97e59627
ML
1932/* We only propagate across edges with non-interposable callee. */
1933
1934inline bool
1935ignore_edge_p (struct cgraph_edge *e)
1936{
1937 enum availability avail;
1938 e->callee->function_or_virtual_thunk_symbol (&avail, e->caller);
1939 return (avail <= AVAIL_INTERPOSABLE);
1940}
1941
ca31b95f 1942/* We use greedy algorithm for inlining of small functions:
09a2806f
JH
1943 All inline candidates are put into prioritized heap ordered in
1944 increasing badness.
ca31b95f 1945
09a2806f 1946 The inlining of small functions is bounded by unit growth parameters. */
ca31b95f
JH
1947
1948static void
4c0f7679 1949inline_small_functions (void)
ca31b95f
JH
1950{
1951 struct cgraph_node *node;
670cd5c5 1952 struct cgraph_edge *edge;
f0e1509b 1953 edge_heap_t edge_heap (sreal::min ());
0e3de1d4 1954 auto_bitmap updated_nodes;
1e83bd70 1955 int min_size;
d52f5295 1956 auto_vec<cgraph_edge *> new_indirect_edges;
09a2806f 1957 int initial_size = 0;
3dafb85c 1958 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
042ae7d2 1959 struct cgraph_edge_hook_list *edge_removal_hook_holder;
2bf86c84 1960 new_indirect_edges.create (8);
670cd5c5 1961
042ae7d2 1962 edge_removal_hook_holder
4a910049 1963 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
042ae7d2 1964
1a3118e9
JH
1965 /* Compute overall unit size and other global parameters used by badness
1966 metrics. */
ca31b95f 1967
3995f3a2 1968 max_count = profile_count::uninitialized ();
97e59627 1969 ipa_reduced_postorder (order, true, ignore_edge_p);
68cc8feb 1970 free (order);
1a3118e9 1971
c47d0034 1972 FOR_EACH_DEFINED_FUNCTION (node)
a62bfab5 1973 if (!node->inlined_to)
e7f23018 1974 {
bb1e543c 1975 if (!node->alias && node->analyzed
67f3791f 1976 && (node->has_gimple_body_p () || node->thunk)
29f1e2b1 1977 && opt_for_fn (node->decl, optimize))
a5b1779f 1978 {
99b1c316 1979 class ipa_fn_summary *info = ipa_fn_summaries->get (node);
67348ccc 1980 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
ca31b95f 1981
5e750dc6
JH
1982 /* Do not account external functions, they will be optimized out
1983 if not inlined. Also only count the non-cold portion of program. */
bb1e543c 1984 if (inline_account_function_p (node))
f658ad30 1985 initial_size += ipa_size_summaries->get (node)->size;
d59171da 1986 info->growth = estimate_growth (node);
41f669d8
JH
1987
1988 int num_calls = 0;
1989 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
1990 true);
1991 if (num_calls == 1)
1992 info->single_caller = true;
bf3f6510
JH
1993 if (dfs && dfs->next_cycle)
1994 {
1995 struct cgraph_node *n2;
1996 int id = dfs->scc_no + 1;
1997 for (n2 = node; n2;
1a03b929 1998 n2 = ((struct ipa_dfs_info *) n2->aux)->next_cycle)
29f1e2b1
JH
1999 if (opt_for_fn (n2->decl, optimize))
2000 {
f658ad30 2001 ipa_fn_summary *info2 = ipa_fn_summaries->get
a62bfab5 2002 (n2->inlined_to ? n2->inlined_to : n2);
29f1e2b1
JH
2003 if (info2->scc_no)
2004 break;
2005 info2->scc_no = id;
2006 }
bf3f6510 2007 }
a5b1779f 2008 }
09a2806f 2009
e7f23018 2010 for (edge = node->callers; edge; edge = edge->next_caller)
1bad9c18 2011 max_count = max_count.max (edge->count.ipa ());
e7f23018 2012 }
b48ccf0d 2013 ipa_free_postorder_info ();
ac6f2e59 2014 initialize_growth_caches ();
b48ccf0d
JH
2015
2016 if (dump_file)
2017 fprintf (dump_file,
2018 "\nDeciding on inlining of small functions. Starting with size %i.\n",
2019 initial_size);
b7c27d51 2020
8a8dccb2 2021 overall_size = initial_size;
85057983 2022 min_size = overall_size;
1a3118e9 2023
a99be3c9 2024 /* Populate the heap with all edges we might inline. */
1a3118e9 2025
c47d0034 2026 FOR_EACH_DEFINED_FUNCTION (node)
09ce3660
JH
2027 {
2028 bool update = false;
bcda57c1 2029 struct cgraph_edge *next = NULL;
c1eed5a1 2030 bool has_speculative = false;
1a3118e9 2031
c9521268
RB
2032 if (!opt_for_fn (node->decl, optimize)
2033 /* With -Og we do not want to perform IPA inlining of small
2034 functions since there are no scalar cleanups after it
2035 that would realize the anticipated win. All abstraction
2036 is removed during early inlining. */
2037 || opt_for_fn (node->decl, optimize_debug))
29f1e2b1
JH
2038 continue;
2039
09ce3660 2040 if (dump_file)
464d0118 2041 fprintf (dump_file, "Enqueueing calls in %s.\n", node->dump_name ());
09ce3660 2042
8fcfc44f 2043 for (edge = node->callees; edge; edge = edge->next_callee)
09ce3660 2044 {
1a3118e9 2045 if (edge->inline_failed
09ce3660 2046 && !edge->aux
1a3118e9
JH
2047 && can_inline_edge_p (edge, true)
2048 && want_inline_small_function_p (edge, true)
9a4841a3 2049 && can_inline_edge_by_limits_p (edge, true)
1a3118e9
JH
2050 && edge->inline_failed)
2051 {
2052 gcc_assert (!edge->aux);
4a910049 2053 update_edge_key (&edge_heap, edge);
1a3118e9 2054 }
c1eed5a1
JH
2055 if (edge->speculative)
2056 has_speculative = true;
2057 }
2058 if (has_speculative)
2059 for (edge = node->callees; edge; edge = next)
2aae99f7 2060 {
2e98ac86 2061 next = edge->next_callee;
2aae99f7
XHL
2062 if (edge->speculative
2063 && !speculation_useful_p (edge, edge->aux != NULL))
2064 {
27c5a177 2065 cgraph_edge::resolve_speculation (edge);
2aae99f7
XHL
2066 update = true;
2067 }
2aae99f7 2068 }
09ce3660
JH
2069 if (update)
2070 {
a62bfab5
ML
2071 struct cgraph_node *where = node->inlined_to
2072 ? node->inlined_to : node;
0bceb671 2073 ipa_update_overall_fn_summary (where);
09ce3660 2074 reset_edge_caches (where);
4a910049 2075 update_caller_keys (&edge_heap, where,
09ce3660 2076 updated_nodes, NULL);
7c6f2fb9 2077 update_callee_keys (&edge_heap, where, NULL,
2001028a 2078 updated_nodes);
09ce3660
JH
2079 bitmap_clear (updated_nodes);
2080 }
2081 }
1a3118e9 2082
09a2806f 2083 gcc_assert (in_lto_p
3995f3a2 2084 || !(max_count > 0)
09a2806f 2085 || (profile_info && flag_branch_probabilities));
b7c27d51 2086
4a910049 2087 while (!edge_heap.empty ())
ca31b95f 2088 {
85057983 2089 int old_size = overall_size;
1ce18dc8 2090 struct cgraph_node *where, *callee;
f0e1509b
ML
2091 sreal badness = edge_heap.min_key ();
2092 sreal current_badness;
1ce18dc8 2093 int growth;
670cd5c5 2094
4a910049 2095 edge = edge_heap.extract_min ();
1ce18dc8
JH
2096 gcc_assert (edge->aux);
2097 edge->aux = NULL;
9de6f6c3 2098 if (!edge->inline_failed || !edge->callee->analyzed)
1ce18dc8 2099 continue;
cdc029b9 2100
1bad9c18
JH
2101 /* Be sure that caches are maintained consistent.
2102 This check is affected by scaling roundoff errors when compiling for
2103 IPA this we skip it in that case. */
b914768c 2104 if (flag_checking && !edge->callee->count.ipa_p ()
517048ce 2105 && (!max_count.initialized_p () || !max_count.nonzero_p ()))
1bad9c18
JH
2106 {
2107 sreal cached_badness = edge_badness (edge, false);
2108
2109 int old_size_est = estimate_edge_size (edge);
2110 sreal old_time_est = estimate_edge_time (edge);
2111 int old_hints_est = estimate_edge_hints (edge);
2112
9fb50ad8
ML
2113 if (edge_growth_cache != NULL)
2114 edge_growth_cache->remove (edge);
b914768c
JH
2115 reset_node_cache (edge->caller->inlined_to
2116 ? edge->caller->inlined_to
2117 : edge->caller);
1bad9c18
JH
2118 gcc_assert (old_size_est == estimate_edge_size (edge));
2119 gcc_assert (old_time_est == estimate_edge_time (edge));
2120 /* FIXME:
2121
2122 gcc_assert (old_hints_est == estimate_edge_hints (edge));
2123
2124 fails with profile feedback because some hints depends on
2125 maybe_hot_edge_p predicate and because callee gets inlined to other
2126 calls, the edge may become cold.
2127 This ought to be fixed by computing relative probabilities
2128 for given invocation but that will be better done once whole
2129 code is converted to sreals. Disable for now and revert to "wrong"
2130 value so enable/disable checking paths agree. */
9fb50ad8 2131 edge_growth_cache->get (edge)->hints = old_hints_est + 1;
1bad9c18
JH
2132
2133 /* When updating the edge costs, we only decrease badness in the keys.
956d615d 2134 Increases of badness are handled lazily; when we see key with out
1bad9c18
JH
2135 of date value on it, we re-insert it now. */
2136 current_badness = edge_badness (edge, false);
2137 gcc_assert (cached_badness == current_badness);
2138 gcc_assert (current_badness >= badness);
2139 }
f4118c87
JH
2140 else
2141 current_badness = edge_badness (edge, false);
cdc029b9
JH
2142 if (current_badness != badness)
2143 {
d75de25b 2144 if (edge_heap.min () && current_badness > edge_heap.min_key ())
6d4ab5f8
JH
2145 {
2146 edge->aux = edge_heap.insert (current_badness, edge);
2147 continue;
2148 }
2149 else
2150 badness = current_badness;
cdc029b9 2151 }
4c0f7679 2152
9a4841a3
JH
2153 if (!can_inline_edge_p (edge, true)
2154 || !can_inline_edge_by_limits_p (edge, true))
09ce3660 2155 {
4a910049 2156 resolve_noninline_speculation (&edge_heap, edge);
09ce3660
JH
2157 continue;
2158 }
cdc029b9 2159
d52f5295 2160 callee = edge->callee->ultimate_alias_target ();
03dfc36d 2161 growth = estimate_edge_growth (edge);
ca31b95f 2162 if (dump_file)
ca31b95f 2163 {
b8698a0f 2164 fprintf (dump_file,
464d0118
ML
2165 "\nConsidering %s with %i size\n",
2166 callee->dump_name (),
f658ad30 2167 ipa_size_summaries->get (callee)->size);
b8698a0f 2168 fprintf (dump_file,
464d0118 2169 " to be inlined into %s in %s:%i\n"
6d4ab5f8 2170 " Estimated badness is %f, frequency %.2f.\n",
464d0118 2171 edge->caller->dump_name (),
9e145afd 2172 edge->call_stmt
355fe088 2173 && (LOCATION_LOCUS (gimple_location ((const gimple *)
1b7706c8
JJ
2174 edge->call_stmt))
2175 > BUILTINS_LOCATION)
355fe088 2176 ? gimple_filename ((const gimple *) edge->call_stmt)
9e145afd
N
2177 : "unknown",
2178 edge->call_stmt
355fe088 2179 ? gimple_lineno ((const gimple *) edge->call_stmt)
9e145afd 2180 : -1,
6d4ab5f8 2181 badness.to_double (),
0cea1d34 2182 edge->sreal_frequency ().to_double ());
1bad9c18 2183 if (edge->count.ipa ().initialized_p ())
3995f3a2
JH
2184 {
2185 fprintf (dump_file, " Called ");
1bad9c18 2186 edge->count.ipa ().dump (dump_file);
517048ce 2187 fprintf (dump_file, " times\n");
3995f3a2 2188 }
1ce18dc8 2189 if (dump_flags & TDF_DETAILS)
4c0f7679 2190 edge_badness (edge, true);
ca31b95f
JH
2191 }
2192
1e83bd70
JH
2193 where = edge->caller;
2194
2195 if (overall_size + growth > compute_max_insns (where, min_size)
67348ccc 2196 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
670cd5c5 2197 {
4c0f7679
JH
2198 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
2199 report_inline_failed_reason (edge);
4a910049 2200 resolve_noninline_speculation (&edge_heap, edge);
670cd5c5
JH
2201 continue;
2202 }
4c0f7679
JH
2203
2204 if (!want_inline_small_function_p (edge, true))
09ce3660 2205 {
4a910049 2206 resolve_noninline_speculation (&edge_heap, edge);
09ce3660
JH
2207 continue;
2208 }
09a2806f 2209
7c6f2fb9
JH
2210 profile_count old_count = callee->count;
2211
7ba03e5e
JL
2212 /* Heuristics for inlining small functions work poorly for
2213 recursive calls where we do effects similar to loop unrolling.
2214 When inlining such edge seems profitable, leave decision on
09a2806f 2215 specific inliner. */
3dafb85c 2216 if (edge->recursive_p ())
670cd5c5 2217 {
a62bfab5
ML
2218 if (where->inlined_to)
2219 where = where->inlined_to;
4c0f7679 2220 if (!recursive_inlining (edge,
2bf86c84
JH
2221 opt_for_fn (edge->caller->decl,
2222 flag_indirect_inlining)
4c0f7679 2223 ? &new_indirect_edges : NULL))
d7d1d041
RG
2224 {
2225 edge->inline_failed = CIF_RECURSIVE_INLINING;
4a910049 2226 resolve_noninline_speculation (&edge_heap, edge);
d7d1d041
RG
2227 continue;
2228 }
40fda55b 2229 reset_edge_caches (where);
09a2806f
JH
2230 /* Recursive inliner inlines all recursive calls of the function
2231 at once. Consequently we need to update all callee keys. */
2bf86c84 2232 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
4a910049 2233 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
7c6f2fb9 2234 update_callee_keys (&edge_heap, where, where, updated_nodes);
09ce3660 2235 bitmap_clear (updated_nodes);
670cd5c5
JH
2236 }
2237 else
2238 {
4c0f7679
JH
2239 struct cgraph_node *outer_node = NULL;
2240 int depth = 0;
2241
7ba03e5e
JL
2242 /* Consider the case where self recursive function A is inlined
2243 into B. This is desired optimization in some cases, since it
2244 leads to effect similar of loop peeling and we might completely
2245 optimize out the recursive call. However we must be extra
2246 selective. */
4c0f7679
JH
2247
2248 where = edge->caller;
a62bfab5 2249 while (where->inlined_to)
670cd5c5 2250 {
67348ccc 2251 if (where->decl == callee->decl)
4c0f7679
JH
2252 outer_node = where, depth++;
2253 where = where->callers->caller;
2254 }
2255 if (outer_node
2256 && !want_inline_self_recursive_call_p (edge, outer_node,
2257 true, depth))
2258 {
2259 edge->inline_failed
67348ccc 2260 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
4c0f7679 2261 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
4a910049 2262 resolve_noninline_speculation (&edge_heap, edge);
670cd5c5
JH
2263 continue;
2264 }
4c0f7679
JH
2265 else if (depth && dump_file)
2266 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
2267
a62bfab5 2268 gcc_checking_assert (!callee->inlined_to);
041cb615
JH
2269
2270 int old_size = ipa_size_summaries->get (where)->size;
2271 sreal old_time = ipa_fn_summaries->get (where)->time;
2272
c170d40f 2273 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
1f6f9079 2274 reset_edge_caches (edge->callee);
8d890d37 2275 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
40fda55b 2276
041cb615 2277 /* If caller's size and time increased we do not need to update
956d615d 2278 all edges because badness is not going to decrease. */
041cb615
JH
2279 if (old_size <= ipa_size_summaries->get (where)->size
2280 && old_time <= ipa_fn_summaries->get (where)->time
2281 /* Wrapper penalty may be non-monotonous in this respect.
2282 Fortunately it only affects small functions. */
2283 && !wrapper_heuristics_may_apply (where, old_size))
7c6f2fb9
JH
2284 update_callee_keys (&edge_heap, edge->callee, edge->callee,
2285 updated_nodes);
041cb615 2286 else
7c6f2fb9
JH
2287 update_callee_keys (&edge_heap, where,
2288 edge->callee,
2289 updated_nodes);
670cd5c5
JH
2290 }
2291 where = edge->caller;
a62bfab5
ML
2292 if (where->inlined_to)
2293 where = where->inlined_to;
670cd5c5
JH
2294
2295 /* Our profitability metric can depend on local properties
2296 such as number of inlinable calls and size of the function body.
2297 After inlining these properties might change for the function we
2298 inlined into (since it's body size changed) and for the functions
2299 called by function we inlined (since number of it inlinable callers
2300 might change). */
4a910049 2301 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
208e5afa
JH
2302 /* Offline copy count has possibly changed, recompute if profile is
2303 available. */
7c6f2fb9
JH
2304 struct cgraph_node *n
2305 = cgraph_node::get (edge->callee->decl)->ultimate_alias_target ();
2306 if (n != edge->callee && n->analyzed && !(n->count == old_count)
2307 && n->count.ipa_p ())
2308 update_callee_keys (&edge_heap, n, NULL, updated_nodes);
670cd5c5 2309 bitmap_clear (updated_nodes);
ca31b95f 2310
4174a33a 2311 if (dump_enabled_p ())
50fe876d 2312 {
f658ad30 2313 ipa_fn_summary *s = ipa_fn_summaries->get (where);
4174a33a
DM
2314
2315 /* dump_printf can't handle %+i. */
2316 char buf_net_change[100];
2317 snprintf (buf_net_change, sizeof buf_net_change, "%+i",
2318 overall_size - old_size);
2319
2320 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, edge->call_stmt,
2321 " Inlined %C into %C which now has time %f and "
b74d8dc4 2322 "size %i, net change of %s%s.\n",
4174a33a 2323 edge->callee, edge->caller,
f658ad30
JH
2324 s->time.to_double (),
2325 ipa_size_summaries->get (edge->caller)->size,
b74d8dc4
JH
2326 buf_net_change,
2327 cross_module_call_p (edge) ? " (cross module)":"");
50fe876d 2328 }
85057983 2329 if (min_size > overall_size)
b7c27d51 2330 {
85057983 2331 min_size = overall_size;
b7c27d51
JH
2332
2333 if (dump_file)
85057983 2334 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
b7c27d51 2335 }
ca31b95f 2336 }
3e293154 2337
632b4f8e 2338 free_growth_caches ();
4174a33a
DM
2339 if (dump_enabled_p ())
2340 dump_printf (MSG_NOTE,
2341 "Unit growth for small function inlining: %i->%i (%i%%)\n",
2342 initial_size, overall_size,
2343 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
3dafb85c 2344 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
ca31b95f
JH
2345}
2346
09a2806f
JH
2347/* Flatten NODE. Performed both during early inlining and
2348 at IPA inlining time. */
af961c7f
RG
2349
2350static void
f6e809c8 2351flatten_function (struct cgraph_node *node, bool early, bool update)
af961c7f
RG
2352{
2353 struct cgraph_edge *e;
2354
2355 /* We shouldn't be called recursively when we are being processed. */
67348ccc 2356 gcc_assert (node->aux == NULL);
af961c7f 2357
67348ccc 2358 node->aux = (void *) node;
af961c7f
RG
2359
2360 for (e = node->callees; e; e = e->next_callee)
2361 {
2362 struct cgraph_node *orig_callee;
d52f5295 2363 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
af961c7f 2364
af961c7f 2365 /* We've hit cycle? It is time to give up. */
67348ccc 2366 if (callee->aux)
af961c7f 2367 {
4174a33a
DM
2368 if (dump_enabled_p ())
2369 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
2370 "Not inlining %C into %C to avoid cycle.\n",
2371 callee, e->caller);
a99670f9
JH
2372 if (cgraph_inline_failed_type (e->inline_failed) != CIF_FINAL_ERROR)
2373 e->inline_failed = CIF_RECURSIVE_INLINING;
af961c7f
RG
2374 continue;
2375 }
2376
2377 /* When the edge is already inlined, we just need to recurse into
2378 it in order to fully flatten the leaves. */
2379 if (!e->inline_failed)
2380 {
f6e809c8 2381 flatten_function (callee, early, false);
af961c7f
RG
2382 continue;
2383 }
2384
4c0f7679
JH
2385 /* Flatten attribute needs to be processed during late inlining. For
2386 extra code quality we however do flattening during early optimization,
2387 too. */
632b4f8e 2388 if (!early
4c0f7679 2389 ? !can_inline_edge_p (e, true)
9a4841a3 2390 && !can_inline_edge_by_limits_p (e, true)
4c0f7679
JH
2391 : !can_early_inline_edge_p (e))
2392 continue;
2393
3dafb85c 2394 if (e->recursive_p ())
af961c7f 2395 {
4174a33a
DM
2396 if (dump_enabled_p ())
2397 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
2398 "Not inlining: recursive call.\n");
af961c7f
RG
2399 continue;
2400 }
2401
67348ccc
DM
2402 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2403 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
59e0c6b7 2404 {
4174a33a
DM
2405 if (dump_enabled_p ())
2406 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
2407 "Not inlining: SSA form does not match.\n");
59e0c6b7
RG
2408 continue;
2409 }
2410
af961c7f
RG
2411 /* Inline the edge and flatten the inline clone. Avoid
2412 recursing through the original node if the node was cloned. */
4174a33a
DM
2413 if (dump_enabled_p ())
2414 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, e->call_stmt,
2415 " Inlining %C into %C.\n",
2416 callee, e->caller);
a5b1779f 2417 orig_callee = callee;
c170d40f 2418 inline_call (e, true, NULL, NULL, false);
af961c7f 2419 if (e->callee != orig_callee)
67348ccc 2420 orig_callee->aux = (void *) node;
f6e809c8 2421 flatten_function (e->callee, early, false);
af961c7f 2422 if (e->callee != orig_callee)
67348ccc 2423 orig_callee->aux = NULL;
af961c7f
RG
2424 }
2425
67348ccc 2426 node->aux = NULL;
7237f93e
JH
2427 cgraph_node *where = node->inlined_to ? node->inlined_to : node;
2428 if (update && opt_for_fn (where->decl, optimize))
2429 ipa_update_overall_fn_summary (where);
af961c7f
RG
2430}
2431
a81b0a3d
JH
2432/* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2433 DATA points to number of calls originally found so we avoid infinite
2434 recursion. */
2435
2436static bool
bddead15
RB
2437inline_to_all_callers_1 (struct cgraph_node *node, void *data,
2438 hash_set<cgraph_node *> *callers)
a81b0a3d
JH
2439{
2440 int *num_calls = (int *)data;
1bbb87c4
JH
2441 bool callee_removed = false;
2442
a62bfab5 2443 while (node->callers && !node->inlined_to)
a81b0a3d
JH
2444 {
2445 struct cgraph_node *caller = node->callers->caller;
2446
1af8bfe5 2447 if (!can_inline_edge_p (node->callers, true)
9a4841a3 2448 || !can_inline_edge_by_limits_p (node->callers, true)
1af8bfe5
JH
2449 || node->callers->recursive_p ())
2450 {
2451 if (dump_file)
2452 fprintf (dump_file, "Uninlinable call found; giving up.\n");
2453 *num_calls = 0;
2454 return false;
2455 }
2456
a81b0a3d
JH
2457 if (dump_file)
2458 {
5e626cd9 2459 cgraph_node *ultimate = node->ultimate_alias_target ();
a81b0a3d
JH
2460 fprintf (dump_file,
2461 "\nInlining %s size %i.\n",
3629ff8a 2462 ultimate->dump_name (),
f658ad30 2463 ipa_size_summaries->get (ultimate)->size);
a81b0a3d
JH
2464 fprintf (dump_file,
2465 " Called once from %s %i insns.\n",
3629ff8a 2466 node->callers->caller->dump_name (),
f658ad30 2467 ipa_size_summaries->get (node->callers->caller)->size);
a81b0a3d
JH
2468 }
2469
bddead15
RB
2470 /* Remember which callers we inlined to, delaying updating the
2471 overall summary. */
2472 callers->add (node->callers->caller);
2473 inline_call (node->callers, true, NULL, NULL, false, &callee_removed);
a81b0a3d
JH
2474 if (dump_file)
2475 fprintf (dump_file,
2476 " Inlined into %s which now has %i size\n",
3629ff8a 2477 caller->dump_name (),
f658ad30 2478 ipa_size_summaries->get (caller)->size);
a81b0a3d
JH
2479 if (!(*num_calls)--)
2480 {
2481 if (dump_file)
2482 fprintf (dump_file, "New calls found; giving up.\n");
1bbb87c4 2483 return callee_removed;
a81b0a3d 2484 }
1bbb87c4
JH
2485 if (callee_removed)
2486 return true;
a81b0a3d
JH
2487 }
2488 return false;
2489}
2490
bddead15
RB
2491/* Wrapper around inline_to_all_callers_1 doing delayed overall summary
2492 update. */
2493
2494static bool
2495inline_to_all_callers (struct cgraph_node *node, void *data)
2496{
2497 hash_set<cgraph_node *> callers;
2498 bool res = inline_to_all_callers_1 (node, data, &callers);
2499 /* Perform the delayed update of the overall summary of all callers
2500 processed. This avoids quadratic behavior in the cases where
2501 we have a lot of calls to the same function. */
2502 for (hash_set<cgraph_node *>::iterator i = callers.begin ();
2503 i != callers.end (); ++i)
7237f93e 2504 ipa_update_overall_fn_summary ((*i)->inlined_to ? (*i)->inlined_to : *i);
bddead15
RB
2505 return res;
2506}
2507
e86a910f
JH
2508/* Output overall time estimate. */
2509static void
2510dump_overall_stats (void)
2511{
ab38481c 2512 sreal sum_weighted = 0, sum = 0;
e86a910f
JH
2513 struct cgraph_node *node;
2514
2515 FOR_EACH_DEFINED_FUNCTION (node)
a62bfab5 2516 if (!node->inlined_to
e86a910f
JH
2517 && !node->alias)
2518 {
56f62793
ML
2519 ipa_fn_summary *s = ipa_fn_summaries->get (node);
2520 if (s != NULL)
2521 {
2522 sum += s->time;
2523 if (node->count.ipa ().initialized_p ())
2524 sum_weighted += s->time * node->count.ipa ().to_gcov_type ();
2525 }
e86a910f
JH
2526 }
2527 fprintf (dump_file, "Overall time estimate: "
ab38481c
JH
2528 "%f weighted by profile: "
2529 "%f\n", sum.to_double (), sum_weighted.to_double ());
e86a910f
JH
2530}
2531
2532/* Output some useful stats about inlining. */
2533
2534static void
2535dump_inline_stats (void)
2536{
a9243bfc
RB
2537 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2538 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2539 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2540 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2541 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2542 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
0009a6c3
JH
2543 int64_t reason[CIF_N_REASONS][2];
2544 sreal reason_freq[CIF_N_REASONS];
e86a910f
JH
2545 int i;
2546 struct cgraph_node *node;
2547
2548 memset (reason, 0, sizeof (reason));
0009a6c3
JH
2549 for (i=0; i < CIF_N_REASONS; i++)
2550 reason_freq[i] = 0;
e86a910f
JH
2551 FOR_EACH_DEFINED_FUNCTION (node)
2552 {
2553 struct cgraph_edge *e;
2554 for (e = node->callees; e; e = e->next_callee)
2555 {
2556 if (e->inline_failed)
2557 {
1bad9c18
JH
2558 if (e->count.ipa ().initialized_p ())
2559 reason[(int) e->inline_failed][0] += e->count.ipa ().to_gcov_type ();
0009a6c3
JH
2560 reason_freq[(int) e->inline_failed] += e->sreal_frequency ();
2561 reason[(int) e->inline_failed][1] ++;
3995f3a2 2562 if (DECL_VIRTUAL_P (e->callee->decl)
1bad9c18 2563 && e->count.ipa ().initialized_p ())
e86a910f
JH
2564 {
2565 if (e->indirect_inlining_edge)
1bad9c18 2566 noninlined_virt_indir_cnt += e->count.ipa ().to_gcov_type ();
e86a910f 2567 else
1bad9c18 2568 noninlined_virt_cnt += e->count.ipa ().to_gcov_type ();
e86a910f 2569 }
1bad9c18 2570 else if (e->count.ipa ().initialized_p ())
e86a910f
JH
2571 {
2572 if (e->indirect_inlining_edge)
1bad9c18 2573 noninlined_indir_cnt += e->count.ipa ().to_gcov_type ();
e86a910f 2574 else
1bad9c18 2575 noninlined_cnt += e->count.ipa ().to_gcov_type ();
e86a910f
JH
2576 }
2577 }
1bad9c18 2578 else if (e->count.ipa ().initialized_p ())
e86a910f
JH
2579 {
2580 if (e->speculative)
2581 {
2582 if (DECL_VIRTUAL_P (e->callee->decl))
1bad9c18 2583 inlined_speculative_ply += e->count.ipa ().to_gcov_type ();
e86a910f 2584 else
1bad9c18 2585 inlined_speculative += e->count.ipa ().to_gcov_type ();
e86a910f
JH
2586 }
2587 else if (DECL_VIRTUAL_P (e->callee->decl))
2588 {
2589 if (e->indirect_inlining_edge)
1bad9c18 2590 inlined_virt_indir_cnt += e->count.ipa ().to_gcov_type ();
e86a910f 2591 else
1bad9c18 2592 inlined_virt_cnt += e->count.ipa ().to_gcov_type ();
e86a910f
JH
2593 }
2594 else
2595 {
2596 if (e->indirect_inlining_edge)
1bad9c18 2597 inlined_indir_cnt += e->count.ipa ().to_gcov_type ();
e86a910f 2598 else
1bad9c18 2599 inlined_cnt += e->count.ipa ().to_gcov_type ();
e86a910f
JH
2600 }
2601 }
2602 }
2603 for (e = node->indirect_calls; e; e = e->next_callee)
3995f3a2 2604 if (e->indirect_info->polymorphic
1bad9c18
JH
2605 & e->count.ipa ().initialized_p ())
2606 indirect_poly_cnt += e->count.ipa ().to_gcov_type ();
2607 else if (e->count.ipa ().initialized_p ())
2608 indirect_cnt += e->count.ipa ().to_gcov_type ();
e86a910f 2609 }
3995f3a2 2610 if (max_count.initialized_p ())
e86a910f
JH
2611 {
2612 fprintf (dump_file,
16998094
JM
2613 "Inlined %" PRId64 " + speculative "
2614 "%" PRId64 " + speculative polymorphic "
2615 "%" PRId64 " + previously indirect "
2616 "%" PRId64 " + virtual "
2617 "%" PRId64 " + virtual and previously indirect "
2618 "%" PRId64 "\n" "Not inlined "
2619 "%" PRId64 " + previously indirect "
2620 "%" PRId64 " + virtual "
2621 "%" PRId64 " + virtual and previously indirect "
956d615d 2622 "%" PRId64 " + still indirect "
16998094
JM
2623 "%" PRId64 " + still indirect polymorphic "
2624 "%" PRId64 "\n", inlined_cnt,
e86a910f
JH
2625 inlined_speculative, inlined_speculative_ply,
2626 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2627 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2628 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
3995f3a2
JH
2629 fprintf (dump_file, "Removed speculations ");
2630 spec_rem.dump (dump_file);
2631 fprintf (dump_file, "\n");
e86a910f
JH
2632 }
2633 dump_overall_stats ();
2634 fprintf (dump_file, "\nWhy inlining failed?\n");
2635 for (i = 0; i < CIF_N_REASONS; i++)
0009a6c3
JH
2636 if (reason[i][1])
2637 fprintf (dump_file, "%-50s: %8i calls, %8f freq, %" PRId64" count\n",
e86a910f 2638 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
0009a6c3 2639 (int) reason[i][1], reason_freq[i].to_double (), reason[i][0]);
e86a910f
JH
2640}
2641
d6ea70a0
JJ
2642/* Called when node is removed. */
2643
2644static void
2645flatten_remove_node_hook (struct cgraph_node *node, void *data)
2646{
2647 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) == NULL)
2648 return;
2649
2650 hash_set<struct cgraph_node *> *removed
2651 = (hash_set<struct cgraph_node *> *) data;
2652 removed->add (node);
2653}
2654
ca31b95f
JH
2655/* Decide on the inlining. We do so in the topological order to avoid
2656 expenses on updating data structures. */
2657
c2924966 2658static unsigned int
4c0f7679 2659ipa_inline (void)
ca31b95f
JH
2660{
2661 struct cgraph_node *node;
2662 int nnodes;
b591a8b7 2663 struct cgraph_node **order;
d6ea70a0 2664 int i, j;
09ce3660 2665 int cold;
8a41354f
JH
2666 bool remove_functions = false;
2667
3dafb85c 2668 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
b591a8b7 2669
10a5dd5d 2670 if (dump_file)
0bceb671 2671 ipa_dump_fn_summaries (dump_file);
670cd5c5 2672
af8bca3c 2673 nnodes = ipa_reverse_postorder (order);
e7a74006 2674 spec_rem = profile_count::zero ();
ca31b95f 2675
65c70e6b 2676 FOR_EACH_FUNCTION (node)
7ce7e4d4
JH
2677 {
2678 node->aux = 0;
2679
2680 /* Recompute the default reasons for inlining because they may have
2681 changed during merging. */
2682 if (in_lto_p)
2683 {
2684 for (cgraph_edge *e = node->callees; e; e = e->next_callee)
2685 {
2686 gcc_assert (e->inline_failed);
2687 initialize_inline_failed (e);
2688 }
2689 for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
2690 initialize_inline_failed (e);
2691 }
2692 }
ca31b95f
JH
2693
2694 if (dump_file)
af961c7f 2695 fprintf (dump_file, "\nFlattening functions:\n");
ca31b95f 2696
d6ea70a0
JJ
2697 /* First shrink order array, so that it only contains nodes with
2698 flatten attribute. */
2699 for (i = nnodes - 1, j = i; i >= 0; i--)
2700 {
2701 node = order[i];
2895b172 2702 if (node->definition
f22712bd
JH
2703 /* Do not try to flatten aliases. These may happen for example when
2704 creating local aliases. */
2705 && !node->alias
2895b172
JH
2706 && lookup_attribute ("flatten",
2707 DECL_ATTRIBUTES (node->decl)) != NULL)
d6ea70a0
JJ
2708 order[j--] = order[i];
2709 }
2710
2711 /* After the above loop, order[j + 1] ... order[nnodes - 1] contain
2712 nodes with flatten attribute. If there is more than one such
2713 node, we need to register a node removal hook, as flatten_function
2714 could remove other nodes with flatten attribute. See PR82801. */
2715 struct cgraph_node_hook_list *node_removal_hook_holder = NULL;
2716 hash_set<struct cgraph_node *> *flatten_removed_nodes = NULL;
2717 if (j < nnodes - 2)
2718 {
2719 flatten_removed_nodes = new hash_set<struct cgraph_node *>;
2720 node_removal_hook_holder
2721 = symtab->add_cgraph_removal_hook (&flatten_remove_node_hook,
2722 flatten_removed_nodes);
2723 }
2724
af961c7f
RG
2725 /* In the first pass handle functions to be flattened. Do this with
2726 a priority so none of our later choices will make this impossible. */
d6ea70a0 2727 for (i = nnodes - 1; i > j; i--)
ca31b95f 2728 {
af961c7f 2729 node = order[i];
d6ea70a0
JJ
2730 if (flatten_removed_nodes
2731 && flatten_removed_nodes->contains (node))
2732 continue;
af961c7f 2733
09a2806f 2734 /* Handle nodes to be flattened.
af961c7f
RG
2735 Ideally when processing callees we stop inlining at the
2736 entry of cycles, possibly cloning that entry point and
2737 try to flatten itself turning it into a self-recursive
2738 function. */
d6ea70a0 2739 if (dump_file)
3629ff8a 2740 fprintf (dump_file, "Flattening %s\n", node->dump_name ());
f6e809c8 2741 flatten_function (node, false, true);
d6ea70a0
JJ
2742 }
2743
2744 if (j < nnodes - 2)
2745 {
2746 symtab->remove_cgraph_removal_hook (node_removal_hook_holder);
2747 delete flatten_removed_nodes;
ca31b95f 2748 }
d6ea70a0
JJ
2749 free (order);
2750
e86a910f
JH
2751 if (dump_file)
2752 dump_overall_stats ();
ca31b95f 2753
4c0f7679 2754 inline_small_functions ();
e70670cf 2755
17e0fc92
JH
2756 gcc_assert (symtab->state == IPA_SSA);
2757 symtab->state = IPA_SSA_AFTER_INLINING;
2758 /* Do first after-inlining removal. We want to remove all "stale" extern
2759 inline functions and virtual functions so we really know what is called
2760 once. */
2761 symtab->remove_unreachable_nodes (dump_file);
ca31b95f 2762
100411f8
JH
2763 /* Inline functions with a property that after inlining into all callers the
2764 code size will shrink because the out-of-line copy is eliminated.
2765 We do this regardless on the callee size as long as function growth limits
2766 are met. */
09ce3660
JH
2767 if (dump_file)
2768 fprintf (dump_file,
17e0fc92
JH
2769 "\nDeciding on functions to be inlined into all callers and "
2770 "removing useless speculations:\n");
09ce3660
JH
2771
2772 /* Inlining one function called once has good chance of preventing
2773 inlining other function into the same callee. Ideally we should
2774 work in priority order, but probably inlining hot functions first
2775 is good cut without the extra pain of maintaining the queue.
2776
2777 ??? this is not really fitting the bill perfectly: inlining function
2778 into callee often leads to better optimization of callee due to
2779 increased context for optimization.
2780 For example if main() function calls a function that outputs help
956d615d 2781 and then function that does the main optimization, we should inline
09ce3660
JH
2782 the second with priority even if both calls are cold by themselves.
2783
2784 We probably want to implement new predicate replacing our use of
2785 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2786 to be hot. */
2787 for (cold = 0; cold <= 1; cold ++)
355866de 2788 {
09ce3660 2789 FOR_EACH_DEFINED_FUNCTION (node)
ca31b95f 2790 {
09ce3660
JH
2791 struct cgraph_edge *edge, *next;
2792 bool update=false;
2793
29f1e2b1
JH
2794 if (!opt_for_fn (node->decl, optimize)
2795 || !opt_for_fn (node->decl, flag_inline_functions_called_once))
2796 continue;
2797
09ce3660 2798 for (edge = node->callees; edge; edge = next)
ca31b95f 2799 {
09ce3660
JH
2800 next = edge->next_callee;
2801 if (edge->speculative && !speculation_useful_p (edge, false))
e3c7b49c 2802 {
1bad9c18
JH
2803 if (edge->count.ipa ().initialized_p ())
2804 spec_rem += edge->count.ipa ();
27c5a177 2805 cgraph_edge::resolve_speculation (edge);
09ce3660 2806 update = true;
8a41354f 2807 remove_functions = true;
09ce3660
JH
2808 }
2809 }
2810 if (update)
2811 {
a62bfab5
ML
2812 struct cgraph_node *where = node->inlined_to
2813 ? node->inlined_to : node;
09ce3660 2814 reset_edge_caches (where);
0bceb671 2815 ipa_update_overall_fn_summary (where);
09ce3660 2816 }
2bf86c84 2817 if (want_inline_function_to_all_callers_p (node, cold))
09ce3660
JH
2818 {
2819 int num_calls = 0;
1ede94c5
JH
2820 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
2821 true);
2822 while (node->call_for_symbol_and_aliases
17e0fc92 2823 (inline_to_all_callers, &num_calls, true))
1bbb87c4 2824 ;
f019b607 2825 remove_functions = true;
ca31b95f
JH
2826 }
2827 }
2828 }
2829
4174a33a
DM
2830 if (dump_enabled_p ())
2831 dump_printf (MSG_NOTE,
2832 "\nInlined %i calls, eliminated %i functions\n\n",
2833 ncalls_inlined, nfunctions_inlined);
ca31b95f 2834 if (dump_file)
4174a33a 2835 dump_inline_stats ();
09a2806f 2836
898b8927 2837 if (dump_file)
0bceb671 2838 ipa_dump_fn_summaries (dump_file);
8a41354f 2839 return remove_functions ? TODO_remove_functions : 0;
ca31b95f
JH
2840}
2841
275b4baa
RG
2842/* Inline always-inline function calls in NODE. */
2843
2844static bool
4c0f7679 2845inline_always_inline_functions (struct cgraph_node *node)
275b4baa
RG
2846{
2847 struct cgraph_edge *e;
2848 bool inlined = false;
2849
2850 for (e = node->callees; e; e = e->next_callee)
2851 {
d52f5295 2852 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
67348ccc 2853 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
275b4baa
RG
2854 continue;
2855
3dafb85c 2856 if (e->recursive_p ())
275b4baa 2857 {
4174a33a
DM
2858 if (dump_enabled_p ())
2859 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
2860 " Not inlining recursive call to %C.\n",
2861 e->callee);
275b4baa
RG
2862 e->inline_failed = CIF_RECURSIVE_INLINING;
2863 continue;
2864 }
2865
4c0f7679 2866 if (!can_early_inline_edge_p (e))
bef8491a
ST
2867 {
2868 /* Set inlined to true if the callee is marked "always_inline" but
2869 is not inlinable. This will allow flagging an error later in
e53b6e56 2870 expand_call_inline in tree-inline.cc. */
bef8491a 2871 if (lookup_attribute ("always_inline",
67348ccc 2872 DECL_ATTRIBUTES (callee->decl)) != NULL)
bef8491a
ST
2873 inlined = true;
2874 continue;
2875 }
275b4baa 2876
4174a33a
DM
2877 if (dump_enabled_p ())
2878 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, e->call_stmt,
2879 " Inlining %C into %C (always_inline).\n",
2880 e->callee, e->caller);
c170d40f 2881 inline_call (e, true, NULL, NULL, false);
275b4baa
RG
2882 inlined = true;
2883 }
c170d40f 2884 if (inlined)
0bceb671 2885 ipa_update_overall_fn_summary (node);
275b4baa
RG
2886
2887 return inlined;
2888}
2889
ca31b95f 2890/* Decide on the inlining. We do so in the topological order to avoid
af961c7f 2891 expenses on updating data structures. */
ca31b95f 2892
7fa49e7b 2893static bool
4c0f7679 2894early_inline_small_functions (struct cgraph_node *node)
ca31b95f
JH
2895{
2896 struct cgraph_edge *e;
d63db217 2897 bool inlined = false;
7fa49e7b 2898
275b4baa 2899 for (e = node->callees; e; e = e->next_callee)
c3056c2d 2900 {
d52f5295 2901 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
56f62793 2902
956d615d 2903 /* We can encounter not-yet-analyzed function during
56f62793
ML
2904 early inlining on callgraphs with strongly
2905 connected components. */
2906 ipa_fn_summary *s = ipa_fn_summaries->get (callee);
2907 if (s == NULL || !s->inlinable || !e->inline_failed)
275b4baa
RG
2908 continue;
2909
2910 /* Do not consider functions not declared inline. */
67348ccc 2911 if (!DECL_DECLARED_INLINE_P (callee->decl)
2bf86c84
JH
2912 && !opt_for_fn (node->decl, flag_inline_small_functions)
2913 && !opt_for_fn (node->decl, flag_inline_functions))
275b4baa
RG
2914 continue;
2915
4174a33a
DM
2916 if (dump_enabled_p ())
2917 dump_printf_loc (MSG_NOTE, e->call_stmt,
2918 "Considering inline candidate %C.\n",
2919 callee);
ca31b95f 2920
4c0f7679
JH
2921 if (!can_early_inline_edge_p (e))
2922 continue;
2923
3dafb85c 2924 if (e->recursive_p ())
275b4baa 2925 {
4174a33a
DM
2926 if (dump_enabled_p ())
2927 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
2928 " Not inlining: recursive call.\n");
22ad64b6 2929 continue;
275b4baa 2930 }
af961c7f 2931
4c0f7679 2932 if (!want_early_inline_function_p (e))
275b4baa 2933 continue;
ca31b95f 2934
4174a33a
DM
2935 if (dump_enabled_p ())
2936 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, e->call_stmt,
2937 " Inlining %C into %C.\n",
2938 callee, e->caller);
bddead15 2939 inline_call (e, true, NULL, NULL, false);
4c0f7679 2940 inlined = true;
38bc76da 2941 }
275b4baa 2942
bddead15 2943 if (inlined)
0bceb671 2944 ipa_update_overall_fn_summary (node);
bddead15 2945
7fa49e7b 2946 return inlined;
ca31b95f
JH
2947}
2948
be55bfe6 2949unsigned int
be3c16c4 2950early_inliner (function *fun)
d63db217 2951{
d52f5295 2952 struct cgraph_node *node = cgraph_node::get (current_function_decl);
10a5dd5d 2953 struct cgraph_edge *edge;
7fa49e7b 2954 unsigned int todo = 0;
796bda22 2955 int iterations = 0;
275b4baa 2956 bool inlined = false;
d63db217 2957
1da2ed5f 2958 if (seen_error ())
c2924966 2959 return 0;
af961c7f 2960
ecb62563
JH
2961 /* Do nothing if datastructures for ipa-inliner are already computed. This
2962 happens when some pass decides to construct new function and
2963 cgraph_add_new_function calls lowering passes and early optimization on
2964 it. This may confuse ourself when early inliner decide to inline call to
2965 function clone, because function clones don't have parameter list in
2966 ipa-prop matching their signature. */
dd912cb8 2967 if (ipa_node_params_sum)
ecb62563
JH
2968 return 0;
2969
b2b29377
MM
2970 if (flag_checking)
2971 node->verify ();
d122681a 2972 node->remove_all_references ();
275b4baa
RG
2973
2974 /* Even when not optimizing or not inlining inline always-inline
2975 functions. */
4c0f7679 2976 inlined = inline_always_inline_functions (node);
275b4baa 2977
af961c7f
RG
2978 if (!optimize
2979 || flag_no_inline
4c0f7679
JH
2980 || !flag_early_inlining
2981 /* Never inline regular functions into always-inline functions
2982 during incremental inlining. This sucks as functions calling
2983 always inline functions will get less optimized, but at the
2984 same time inlining of functions calling always inline
09a2806f 2985 function into an always inline function might introduce
4c0f7679
JH
2986 cycles of edges to be always inlined in the callgraph.
2987
2988 We might want to be smarter and just avoid this type of inlining. */
d67bce7c
JH
2989 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2990 && lookup_attribute ("always_inline",
2991 DECL_ATTRIBUTES (node->decl))))
275b4baa
RG
2992 ;
2993 else if (lookup_attribute ("flatten",
67348ccc 2994 DECL_ATTRIBUTES (node->decl)) != NULL)
7fa49e7b 2995 {
275b4baa
RG
2996 /* When the function is marked to be flattened, recursively inline
2997 all calls in it. */
4174a33a
DM
2998 if (dump_enabled_p ())
2999 dump_printf (MSG_OPTIMIZED_LOCATIONS,
3000 "Flattening %C\n", node);
f6e809c8 3001 flatten_function (node, true, true);
275b4baa 3002 inlined = true;
7fa49e7b 3003 }
af961c7f
RG
3004 else
3005 {
d67bce7c
JH
3006 /* If some always_inline functions was inlined, apply the changes.
3007 This way we will not account always inline into growth limits and
3008 moreover we will inline calls from always inlines that we skipped
56aae4b7 3009 previously because of conditional above. */
d67bce7c
JH
3010 if (inlined)
3011 {
3012 timevar_push (TV_INTEGRATION);
3013 todo |= optimize_inline_calls (current_function_decl);
1cf06f1e
MP
3014 /* optimize_inline_calls call above might have introduced new
3015 statements that don't have inline parameters computed. */
3016 for (edge = node->callees; edge; edge = edge->next_callee)
3017 {
56f62793
ML
3018 /* We can enounter not-yet-analyzed function during
3019 early inlining on callgraphs with strongly
3020 connected components. */
99353fcf 3021 ipa_call_summary *es = ipa_call_summaries->get_create (edge);
263e19c7
JH
3022 es->call_stmt_size
3023 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
3024 es->call_stmt_time
3025 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
1cf06f1e 3026 }
0bceb671 3027 ipa_update_overall_fn_summary (node);
d67bce7c
JH
3028 inlined = false;
3029 timevar_pop (TV_INTEGRATION);
3030 }
af961c7f
RG
3031 /* We iterate incremental inlining to get trivial cases of indirect
3032 inlining. */
fdfd7f53
ML
3033 while (iterations < opt_for_fn (node->decl,
3034 param_early_inliner_max_iterations)
4c0f7679 3035 && early_inline_small_functions (node))
af961c7f
RG
3036 {
3037 timevar_push (TV_INTEGRATION);
3038 todo |= optimize_inline_calls (current_function_decl);
4c0f7679
JH
3039
3040 /* Technically we ought to recompute inline parameters so the new
3041 iteration of early inliner works as expected. We however have
3042 values approximately right and thus we only need to update edge
3043 info that might be cleared out for newly discovered edges. */
3044 for (edge = node->callees; edge; edge = edge->next_callee)
3045 {
d5e254e1 3046 /* We have no summary for new bound store calls yet. */
b412559e
ML
3047 ipa_call_summary *es = ipa_call_summaries->get_create (edge);
3048 es->call_stmt_size
3049 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
3050 es->call_stmt_time
3051 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
4c0f7679 3052 }
fdfd7f53
ML
3053 if (iterations < opt_for_fn (node->decl,
3054 param_early_inliner_max_iterations) - 1)
0bceb671 3055 ipa_update_overall_fn_summary (node);
af961c7f 3056 timevar_pop (TV_INTEGRATION);
275b4baa
RG
3057 iterations++;
3058 inlined = false;
af961c7f
RG
3059 }
3060 if (dump_file)
3061 fprintf (dump_file, "Iterations: %i\n", iterations);
3062 }
3063
275b4baa
RG
3064 if (inlined)
3065 {
3066 timevar_push (TV_INTEGRATION);
3067 todo |= optimize_inline_calls (current_function_decl);
3068 timevar_pop (TV_INTEGRATION);
3069 }
3070
be55bfe6 3071 fun->always_inline_functions_inlined = true;
d63db217 3072
af961c7f 3073 return todo;
d63db217
JH
3074}
3075
be3c16c4
DC
3076/* Do inlining of small functions. Doing so early helps profiling and other
3077 passes to be somewhat more effective and avoids some code duplication in
3078 later real inlining pass for testcases with very many function calls. */
3079
3080namespace {
3081
3082const pass_data pass_data_early_inline =
3083{
3084 GIMPLE_PASS, /* type */
3085 "einline", /* name */
3086 OPTGROUP_INLINE, /* optinfo_flags */
3087 TV_EARLY_INLINING, /* tv_id */
3088 PROP_ssa, /* properties_required */
3089 0, /* properties_provided */
3090 0, /* properties_destroyed */
3091 0, /* todo_flags_start */
3092 0, /* todo_flags_finish */
3093};
3094
3095class pass_early_inline : public gimple_opt_pass
3096{
3097public:
3098 pass_early_inline (gcc::context *ctxt)
3099 : gimple_opt_pass (pass_data_early_inline, ctxt)
3100 {}
3101
3102 /* opt_pass methods: */
3103 virtual unsigned int execute (function *);
3104
3105}; // class pass_early_inline
3106
3107unsigned int
3108pass_early_inline::execute (function *fun)
3109{
3110 return early_inliner (fun);
3111}
3112
27a4cd48
DM
3113} // anon namespace
3114
3115gimple_opt_pass *
3116make_pass_early_inline (gcc::context *ctxt)
3117{
3118 return new pass_early_inline (ctxt);
3119}
3120
27a4cd48
DM
3121namespace {
3122
3123const pass_data pass_data_ipa_inline =
873aa8f5 3124{
27a4cd48
DM
3125 IPA_PASS, /* type */
3126 "inline", /* name */
3127 OPTGROUP_INLINE, /* optinfo_flags */
27a4cd48
DM
3128 TV_IPA_INLINING, /* tv_id */
3129 0, /* properties_required */
3130 0, /* properties_provided */
3131 0, /* properties_destroyed */
8605403e 3132 0, /* todo_flags_start */
8a41354f 3133 ( TODO_dump_symtab ), /* todo_flags_finish */
ca31b95f 3134};
27a4cd48
DM
3135
3136class pass_ipa_inline : public ipa_opt_pass_d
3137{
3138public:
c3284718
RS
3139 pass_ipa_inline (gcc::context *ctxt)
3140 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
d2db2e6b
JH
3141 NULL, /* generate_summary */
3142 NULL, /* write_summary */
3143 NULL, /* read_summary */
c3284718
RS
3144 NULL, /* write_optimization_summary */
3145 NULL, /* read_optimization_summary */
3146 NULL, /* stmt_fixup */
3147 0, /* function_transform_todo_flags_start */
3148 inline_transform, /* function_transform */
3149 NULL) /* variable_transform */
27a4cd48
DM
3150 {}
3151
3152 /* opt_pass methods: */
be55bfe6 3153 virtual unsigned int execute (function *) { return ipa_inline (); }
27a4cd48
DM
3154
3155}; // class pass_ipa_inline
3156
3157} // anon namespace
3158
3159ipa_opt_pass_d *
3160make_pass_ipa_inline (gcc::context *ctxt)
3161{
3162 return new pass_ipa_inline (ctxt);
3163}
This page took 8.660351 seconds and 5 git commands to generate.