]> gcc.gnu.org Git - gcc.git/blob - gcc/ipa-inline-analysis.c
Limit AA walking when inlining analysis examines parameters
[gcc.git] / gcc / ipa-inline-analysis.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Analysis used by the inliner and other passes limiting code size growth.
22
23 We estimate for each function
24 - function body size
25 - average function execution time
26 - inlining size benefit (that is how much of function body size
27 and its call sequence is expected to disappear by inlining)
28 - inlining time benefit
29 - function frame size
30 For each call
31 - call statement size and time
32
33 inlinie_summary datastructures store above information locally (i.e.
34 parameters of the function itself) and globally (i.e. parameters of
35 the function created by applying all the inline decisions already
36 present in the callgraph).
37
38 We provide accestor to the inline_summary datastructure and
39 basic logic updating the parameters when inlining is performed.
40
41 The summaries are context sensitive. Context means
42 1) partial assignment of known constant values of operands
43 2) whether function is inlined into the call or not.
44 It is easy to add more variants. To represent function size and time
45 that depends on context (i.e. it is known to be optimized away when
46 context is known either by inlining or from IP-CP and clonning),
47 we use predicates. Predicates are logical formulas in
48 conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps
49 specifying what conditions must be true. Conditions are simple test
50 of the form described above.
51
52 In order to make predicate (possibly) true, all of its clauses must
53 be (possibly) true. To make clause (possibly) true, one of conditions
54 it mentions must be (possibly) true. There are fixed bounds on
55 number of clauses and conditions and all the manipulation functions
56 are conservative in positive direction. I.e. we may lose precision
57 by thinking that predicate may be true even when it is not.
58
59 estimate_edge_size and estimate_edge_growth can be used to query
60 function size/time in the given context. inline_merge_summary merges
61 properties of caller and callee after inlining.
62
63 Finally pass_inline_parameters is exported. This is used to drive
64 computation of function parameters used by the early inliner. IPA
65 inlined performs analysis via its analyze_function method. */
66
67 #include "config.h"
68 #include "system.h"
69 #include "coretypes.h"
70 #include "backend.h"
71 #include "tree.h"
72 #include "gimple.h"
73 #include "hard-reg-set.h"
74 #include "ssa.h"
75 #include "alias.h"
76 #include "fold-const.h"
77 #include "stor-layout.h"
78 #include "print-tree.h"
79 #include "tree-inline.h"
80 #include "langhooks.h"
81 #include "flags.h"
82 #include "diagnostic.h"
83 #include "gimple-pretty-print.h"
84 #include "params.h"
85 #include "tree-pass.h"
86 #include "coverage.h"
87 #include "cfganal.h"
88 #include "internal-fn.h"
89 #include "gimple-iterator.h"
90 #include "tree-cfg.h"
91 #include "tree-ssa-loop-niter.h"
92 #include "tree-ssa-loop.h"
93 #include "cgraph.h"
94 #include "alloc-pool.h"
95 #include "symbol-summary.h"
96 #include "ipa-prop.h"
97 #include "lto-streamer.h"
98 #include "data-streamer.h"
99 #include "tree-streamer.h"
100 #include "ipa-inline.h"
101 #include "cfgloop.h"
102 #include "tree-scalar-evolution.h"
103 #include "ipa-utils.h"
104 #include "cilk.h"
105 #include "cfgexpand.h"
106
107 /* Estimate runtime of function can easilly run into huge numbers with many
108 nested loops. Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an
109 integer. For anything larger we use gcov_type. */
110 #define MAX_TIME 500000
111
112 /* Number of bits in integer, but we really want to be stable across different
113 hosts. */
114 #define NUM_CONDITIONS 32
115
116 enum predicate_conditions
117 {
118 predicate_false_condition = 0,
119 predicate_not_inlined_condition = 1,
120 predicate_first_dynamic_condition = 2
121 };
122
123 /* Special condition code we use to represent test that operand is compile time
124 constant. */
125 #define IS_NOT_CONSTANT ERROR_MARK
126 /* Special condition code we use to represent test that operand is not changed
127 across invocation of the function. When operand IS_NOT_CONSTANT it is always
128 CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage
129 of executions even when they are not compile time constants. */
130 #define CHANGED IDENTIFIER_NODE
131
132 /* Holders of ipa cgraph hooks: */
133 static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
134 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
135 static void inline_edge_removal_hook (struct cgraph_edge *, void *);
136 static void inline_edge_duplication_hook (struct cgraph_edge *,
137 struct cgraph_edge *, void *);
138
139 /* VECtor holding inline summaries.
140 In GGC memory because conditions might point to constant trees. */
141 function_summary <inline_summary *> *inline_summaries;
142 vec<inline_edge_summary_t> inline_edge_summary_vec;
143
144 /* Cached node/edge growths. */
145 vec<edge_growth_cache_entry> edge_growth_cache;
146
147 /* Edge predicates goes here. */
148 static pool_allocator<predicate> edge_predicate_pool ("edge predicates", 10);
149
150 /* Return true predicate (tautology).
151 We represent it by empty list of clauses. */
152
153 static inline struct predicate
154 true_predicate (void)
155 {
156 struct predicate p;
157 p.clause[0] = 0;
158 return p;
159 }
160
161
162 /* Return predicate testing single condition number COND. */
163
164 static inline struct predicate
165 single_cond_predicate (int cond)
166 {
167 struct predicate p;
168 p.clause[0] = 1 << cond;
169 p.clause[1] = 0;
170 return p;
171 }
172
173
174 /* Return false predicate. First clause require false condition. */
175
176 static inline struct predicate
177 false_predicate (void)
178 {
179 return single_cond_predicate (predicate_false_condition);
180 }
181
182
183 /* Return true if P is (true). */
184
185 static inline bool
186 true_predicate_p (struct predicate *p)
187 {
188 return !p->clause[0];
189 }
190
191
192 /* Return true if P is (false). */
193
194 static inline bool
195 false_predicate_p (struct predicate *p)
196 {
197 if (p->clause[0] == (1 << predicate_false_condition))
198 {
199 gcc_checking_assert (!p->clause[1]
200 && p->clause[0] == 1 << predicate_false_condition);
201 return true;
202 }
203 return false;
204 }
205
206
207 /* Return predicate that is set true when function is not inlined. */
208
209 static inline struct predicate
210 not_inlined_predicate (void)
211 {
212 return single_cond_predicate (predicate_not_inlined_condition);
213 }
214
215 /* Simple description of whether a memory load or a condition refers to a load
216 from an aggregate and if so, how and where from in the aggregate.
217 Individual fields have the same meaning like fields with the same name in
218 struct condition. */
219
220 struct agg_position_info
221 {
222 HOST_WIDE_INT offset;
223 bool agg_contents;
224 bool by_ref;
225 };
226
227 /* Add condition to condition list CONDS. AGGPOS describes whether the used
228 oprand is loaded from an aggregate and where in the aggregate it is. It can
229 be NULL, which means this not a load from an aggregate. */
230
231 static struct predicate
232 add_condition (struct inline_summary *summary, int operand_num,
233 struct agg_position_info *aggpos,
234 enum tree_code code, tree val)
235 {
236 int i;
237 struct condition *c;
238 struct condition new_cond;
239 HOST_WIDE_INT offset;
240 bool agg_contents, by_ref;
241
242 if (aggpos)
243 {
244 offset = aggpos->offset;
245 agg_contents = aggpos->agg_contents;
246 by_ref = aggpos->by_ref;
247 }
248 else
249 {
250 offset = 0;
251 agg_contents = false;
252 by_ref = false;
253 }
254
255 gcc_checking_assert (operand_num >= 0);
256 for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++)
257 {
258 if (c->operand_num == operand_num
259 && c->code == code
260 && c->val == val
261 && c->agg_contents == agg_contents
262 && (!agg_contents || (c->offset == offset && c->by_ref == by_ref)))
263 return single_cond_predicate (i + predicate_first_dynamic_condition);
264 }
265 /* Too many conditions. Give up and return constant true. */
266 if (i == NUM_CONDITIONS - predicate_first_dynamic_condition)
267 return true_predicate ();
268
269 new_cond.operand_num = operand_num;
270 new_cond.code = code;
271 new_cond.val = val;
272 new_cond.agg_contents = agg_contents;
273 new_cond.by_ref = by_ref;
274 new_cond.offset = offset;
275 vec_safe_push (summary->conds, new_cond);
276 return single_cond_predicate (i + predicate_first_dynamic_condition);
277 }
278
279
280 /* Add clause CLAUSE into the predicate P. */
281
282 static inline void
283 add_clause (conditions conditions, struct predicate *p, clause_t clause)
284 {
285 int i;
286 int i2;
287 int insert_here = -1;
288 int c1, c2;
289
290 /* True clause. */
291 if (!clause)
292 return;
293
294 /* False clause makes the whole predicate false. Kill the other variants. */
295 if (clause == (1 << predicate_false_condition))
296 {
297 p->clause[0] = (1 << predicate_false_condition);
298 p->clause[1] = 0;
299 return;
300 }
301 if (false_predicate_p (p))
302 return;
303
304 /* No one should be silly enough to add false into nontrivial clauses. */
305 gcc_checking_assert (!(clause & (1 << predicate_false_condition)));
306
307 /* Look where to insert the clause. At the same time prune out
308 clauses of P that are implied by the new clause and thus
309 redundant. */
310 for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++)
311 {
312 p->clause[i2] = p->clause[i];
313
314 if (!p->clause[i])
315 break;
316
317 /* If p->clause[i] implies clause, there is nothing to add. */
318 if ((p->clause[i] & clause) == p->clause[i])
319 {
320 /* We had nothing to add, none of clauses should've become
321 redundant. */
322 gcc_checking_assert (i == i2);
323 return;
324 }
325
326 if (p->clause[i] < clause && insert_here < 0)
327 insert_here = i2;
328
329 /* If clause implies p->clause[i], then p->clause[i] becomes redundant.
330 Otherwise the p->clause[i] has to stay. */
331 if ((p->clause[i] & clause) != clause)
332 i2++;
333 }
334
335 /* Look for clauses that are obviously true. I.e.
336 op0 == 5 || op0 != 5. */
337 for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++)
338 {
339 condition *cc1;
340 if (!(clause & (1 << c1)))
341 continue;
342 cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
343 /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT
344 and thus there is no point for looking for them. */
345 if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT)
346 continue;
347 for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++)
348 if (clause & (1 << c2))
349 {
350 condition *cc1 =
351 &(*conditions)[c1 - predicate_first_dynamic_condition];
352 condition *cc2 =
353 &(*conditions)[c2 - predicate_first_dynamic_condition];
354 if (cc1->operand_num == cc2->operand_num
355 && cc1->val == cc2->val
356 && cc2->code != IS_NOT_CONSTANT
357 && cc2->code != CHANGED
358 && cc1->code == invert_tree_comparison (cc2->code,
359 HONOR_NANS (cc1->val)))
360 return;
361 }
362 }
363
364
365 /* We run out of variants. Be conservative in positive direction. */
366 if (i2 == MAX_CLAUSES)
367 return;
368 /* Keep clauses in decreasing order. This makes equivalence testing easy. */
369 p->clause[i2 + 1] = 0;
370 if (insert_here >= 0)
371 for (; i2 > insert_here; i2--)
372 p->clause[i2] = p->clause[i2 - 1];
373 else
374 insert_here = i2;
375 p->clause[insert_here] = clause;
376 }
377
378
379 /* Return P & P2. */
380
381 static struct predicate
382 and_predicates (conditions conditions,
383 struct predicate *p, struct predicate *p2)
384 {
385 struct predicate out = *p;
386 int i;
387
388 /* Avoid busy work. */
389 if (false_predicate_p (p2) || true_predicate_p (p))
390 return *p2;
391 if (false_predicate_p (p) || true_predicate_p (p2))
392 return *p;
393
394 /* See how far predicates match. */
395 for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++)
396 {
397 gcc_checking_assert (i < MAX_CLAUSES);
398 }
399
400 /* Combine the predicates rest. */
401 for (; p2->clause[i]; i++)
402 {
403 gcc_checking_assert (i < MAX_CLAUSES);
404 add_clause (conditions, &out, p2->clause[i]);
405 }
406 return out;
407 }
408
409
410 /* Return true if predicates are obviously equal. */
411
412 static inline bool
413 predicates_equal_p (struct predicate *p, struct predicate *p2)
414 {
415 int i;
416 for (i = 0; p->clause[i]; i++)
417 {
418 gcc_checking_assert (i < MAX_CLAUSES);
419 gcc_checking_assert (p->clause[i] > p->clause[i + 1]);
420 gcc_checking_assert (!p2->clause[i]
421 || p2->clause[i] > p2->clause[i + 1]);
422 if (p->clause[i] != p2->clause[i])
423 return false;
424 }
425 return !p2->clause[i];
426 }
427
428
429 /* Return P | P2. */
430
431 static struct predicate
432 or_predicates (conditions conditions,
433 struct predicate *p, struct predicate *p2)
434 {
435 struct predicate out = true_predicate ();
436 int i, j;
437
438 /* Avoid busy work. */
439 if (false_predicate_p (p2) || true_predicate_p (p))
440 return *p;
441 if (false_predicate_p (p) || true_predicate_p (p2))
442 return *p2;
443 if (predicates_equal_p (p, p2))
444 return *p;
445
446 /* OK, combine the predicates. */
447 for (i = 0; p->clause[i]; i++)
448 for (j = 0; p2->clause[j]; j++)
449 {
450 gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES);
451 add_clause (conditions, &out, p->clause[i] | p2->clause[j]);
452 }
453 return out;
454 }
455
456
457 /* Having partial truth assignment in POSSIBLE_TRUTHS, return false
458 if predicate P is known to be false. */
459
460 static bool
461 evaluate_predicate (struct predicate *p, clause_t possible_truths)
462 {
463 int i;
464
465 /* True remains true. */
466 if (true_predicate_p (p))
467 return true;
468
469 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
470
471 /* See if we can find clause we can disprove. */
472 for (i = 0; p->clause[i]; i++)
473 {
474 gcc_checking_assert (i < MAX_CLAUSES);
475 if (!(p->clause[i] & possible_truths))
476 return false;
477 }
478 return true;
479 }
480
481 /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated
482 instruction will be recomputed per invocation of the inlined call. */
483
484 static int
485 predicate_probability (conditions conds,
486 struct predicate *p, clause_t possible_truths,
487 vec<inline_param_summary> inline_param_summary)
488 {
489 int i;
490 int combined_prob = REG_BR_PROB_BASE;
491
492 /* True remains true. */
493 if (true_predicate_p (p))
494 return REG_BR_PROB_BASE;
495
496 if (false_predicate_p (p))
497 return 0;
498
499 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
500
501 /* See if we can find clause we can disprove. */
502 for (i = 0; p->clause[i]; i++)
503 {
504 gcc_checking_assert (i < MAX_CLAUSES);
505 if (!(p->clause[i] & possible_truths))
506 return 0;
507 else
508 {
509 int this_prob = 0;
510 int i2;
511 if (!inline_param_summary.exists ())
512 return REG_BR_PROB_BASE;
513 for (i2 = 0; i2 < NUM_CONDITIONS; i2++)
514 if ((p->clause[i] & possible_truths) & (1 << i2))
515 {
516 if (i2 >= predicate_first_dynamic_condition)
517 {
518 condition *c =
519 &(*conds)[i2 - predicate_first_dynamic_condition];
520 if (c->code == CHANGED
521 && (c->operand_num <
522 (int) inline_param_summary.length ()))
523 {
524 int iprob =
525 inline_param_summary[c->operand_num].change_prob;
526 this_prob = MAX (this_prob, iprob);
527 }
528 else
529 this_prob = REG_BR_PROB_BASE;
530 }
531 else
532 this_prob = REG_BR_PROB_BASE;
533 }
534 combined_prob = MIN (this_prob, combined_prob);
535 if (!combined_prob)
536 return 0;
537 }
538 }
539 return combined_prob;
540 }
541
542
543 /* Dump conditional COND. */
544
545 static void
546 dump_condition (FILE *f, conditions conditions, int cond)
547 {
548 condition *c;
549 if (cond == predicate_false_condition)
550 fprintf (f, "false");
551 else if (cond == predicate_not_inlined_condition)
552 fprintf (f, "not inlined");
553 else
554 {
555 c = &(*conditions)[cond - predicate_first_dynamic_condition];
556 fprintf (f, "op%i", c->operand_num);
557 if (c->agg_contents)
558 fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]",
559 c->by_ref ? "ref " : "", c->offset);
560 if (c->code == IS_NOT_CONSTANT)
561 {
562 fprintf (f, " not constant");
563 return;
564 }
565 if (c->code == CHANGED)
566 {
567 fprintf (f, " changed");
568 return;
569 }
570 fprintf (f, " %s ", op_symbol_code (c->code));
571 print_generic_expr (f, c->val, 1);
572 }
573 }
574
575
576 /* Dump clause CLAUSE. */
577
578 static void
579 dump_clause (FILE *f, conditions conds, clause_t clause)
580 {
581 int i;
582 bool found = false;
583 fprintf (f, "(");
584 if (!clause)
585 fprintf (f, "true");
586 for (i = 0; i < NUM_CONDITIONS; i++)
587 if (clause & (1 << i))
588 {
589 if (found)
590 fprintf (f, " || ");
591 found = true;
592 dump_condition (f, conds, i);
593 }
594 fprintf (f, ")");
595 }
596
597
598 /* Dump predicate PREDICATE. */
599
600 static void
601 dump_predicate (FILE *f, conditions conds, struct predicate *pred)
602 {
603 int i;
604 if (true_predicate_p (pred))
605 dump_clause (f, conds, 0);
606 else
607 for (i = 0; pred->clause[i]; i++)
608 {
609 if (i)
610 fprintf (f, " && ");
611 dump_clause (f, conds, pred->clause[i]);
612 }
613 fprintf (f, "\n");
614 }
615
616
617 /* Dump inline hints. */
618 void
619 dump_inline_hints (FILE *f, inline_hints hints)
620 {
621 if (!hints)
622 return;
623 fprintf (f, "inline hints:");
624 if (hints & INLINE_HINT_indirect_call)
625 {
626 hints &= ~INLINE_HINT_indirect_call;
627 fprintf (f, " indirect_call");
628 }
629 if (hints & INLINE_HINT_loop_iterations)
630 {
631 hints &= ~INLINE_HINT_loop_iterations;
632 fprintf (f, " loop_iterations");
633 }
634 if (hints & INLINE_HINT_loop_stride)
635 {
636 hints &= ~INLINE_HINT_loop_stride;
637 fprintf (f, " loop_stride");
638 }
639 if (hints & INLINE_HINT_same_scc)
640 {
641 hints &= ~INLINE_HINT_same_scc;
642 fprintf (f, " same_scc");
643 }
644 if (hints & INLINE_HINT_in_scc)
645 {
646 hints &= ~INLINE_HINT_in_scc;
647 fprintf (f, " in_scc");
648 }
649 if (hints & INLINE_HINT_cross_module)
650 {
651 hints &= ~INLINE_HINT_cross_module;
652 fprintf (f, " cross_module");
653 }
654 if (hints & INLINE_HINT_declared_inline)
655 {
656 hints &= ~INLINE_HINT_declared_inline;
657 fprintf (f, " declared_inline");
658 }
659 if (hints & INLINE_HINT_array_index)
660 {
661 hints &= ~INLINE_HINT_array_index;
662 fprintf (f, " array_index");
663 }
664 if (hints & INLINE_HINT_known_hot)
665 {
666 hints &= ~INLINE_HINT_known_hot;
667 fprintf (f, " known_hot");
668 }
669 gcc_assert (!hints);
670 }
671
672
673 /* Record SIZE and TIME under condition PRED into the inline summary. */
674
675 static void
676 account_size_time (struct inline_summary *summary, int size, int time,
677 struct predicate *pred)
678 {
679 size_time_entry *e;
680 bool found = false;
681 int i;
682
683 if (false_predicate_p (pred))
684 return;
685
686 /* We need to create initial empty unconitional clause, but otherwie
687 we don't need to account empty times and sizes. */
688 if (!size && !time && summary->entry)
689 return;
690
691 /* Watch overflow that might result from insane profiles. */
692 if (time > MAX_TIME * INLINE_TIME_SCALE)
693 time = MAX_TIME * INLINE_TIME_SCALE;
694 gcc_assert (time >= 0);
695
696 for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++)
697 if (predicates_equal_p (&e->predicate, pred))
698 {
699 found = true;
700 break;
701 }
702 if (i == 256)
703 {
704 i = 0;
705 found = true;
706 e = &(*summary->entry)[0];
707 gcc_assert (!e->predicate.clause[0]);
708 if (dump_file && (dump_flags & TDF_DETAILS))
709 fprintf (dump_file,
710 "\t\tReached limit on number of entries, "
711 "ignoring the predicate.");
712 }
713 if (dump_file && (dump_flags & TDF_DETAILS) && (time || size))
714 {
715 fprintf (dump_file,
716 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:",
717 ((double) size) / INLINE_SIZE_SCALE,
718 ((double) time) / INLINE_TIME_SCALE, found ? "" : "new ");
719 dump_predicate (dump_file, summary->conds, pred);
720 }
721 if (!found)
722 {
723 struct size_time_entry new_entry;
724 new_entry.size = size;
725 new_entry.time = time;
726 new_entry.predicate = *pred;
727 vec_safe_push (summary->entry, new_entry);
728 }
729 else
730 {
731 e->size += size;
732 e->time += time;
733 if (e->time > MAX_TIME * INLINE_TIME_SCALE)
734 e->time = MAX_TIME * INLINE_TIME_SCALE;
735 }
736 }
737
738 /* We proved E to be unreachable, redirect it to __bultin_unreachable. */
739
740 static struct cgraph_edge *
741 redirect_to_unreachable (struct cgraph_edge *e)
742 {
743 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
744 struct cgraph_node *target = cgraph_node::get_create
745 (builtin_decl_implicit (BUILT_IN_UNREACHABLE));
746
747 if (e->speculative)
748 e = e->resolve_speculation (target->decl);
749 else if (!e->callee)
750 e->make_direct (target);
751 else
752 e->redirect_callee (target);
753 struct inline_edge_summary *es = inline_edge_summary (e);
754 e->inline_failed = CIF_UNREACHABLE;
755 e->frequency = 0;
756 e->count = 0;
757 es->call_stmt_size = 0;
758 es->call_stmt_time = 0;
759 if (callee)
760 callee->remove_symbol_and_inline_clones ();
761 return e;
762 }
763
764 /* Set predicate for edge E. */
765
766 static void
767 edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
768 {
769 /* If the edge is determined to be never executed, redirect it
770 to BUILTIN_UNREACHABLE to save inliner from inlining into it. */
771 if (predicate && false_predicate_p (predicate)
772 /* When handling speculative edges, we need to do the redirection
773 just once. Do it always on the direct edge, so we do not
774 attempt to resolve speculation while duplicating the edge. */
775 && (!e->speculative || e->callee))
776 e = redirect_to_unreachable (e);
777
778 struct inline_edge_summary *es = inline_edge_summary (e);
779 if (predicate && !true_predicate_p (predicate))
780 {
781 if (!es->predicate)
782 es->predicate = edge_predicate_pool.allocate ();
783 *es->predicate = *predicate;
784 }
785 else
786 {
787 if (es->predicate)
788 edge_predicate_pool.remove (es->predicate);
789 es->predicate = NULL;
790 }
791 }
792
793 /* Set predicate for hint *P. */
794
795 static void
796 set_hint_predicate (struct predicate **p, struct predicate new_predicate)
797 {
798 if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
799 {
800 if (*p)
801 edge_predicate_pool.remove (*p);
802 *p = NULL;
803 }
804 else
805 {
806 if (!*p)
807 *p = edge_predicate_pool.allocate ();
808 **p = new_predicate;
809 }
810 }
811
812
813 /* KNOWN_VALS is partial mapping of parameters of NODE to constant values.
814 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
815 Return clause of possible truths. When INLINE_P is true, assume that we are
816 inlining.
817
818 ERROR_MARK means compile time invariant. */
819
820 static clause_t
821 evaluate_conditions_for_known_args (struct cgraph_node *node,
822 bool inline_p,
823 vec<tree> known_vals,
824 vec<ipa_agg_jump_function_p>
825 known_aggs)
826 {
827 clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
828 struct inline_summary *info = inline_summaries->get (node);
829 int i;
830 struct condition *c;
831
832 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
833 {
834 tree val;
835 tree res;
836
837 /* We allow call stmt to have fewer arguments than the callee function
838 (especially for K&R style programs). So bound check here (we assume
839 known_aggs vector, if non-NULL, has the same length as
840 known_vals). */
841 gcc_checking_assert (!known_aggs.exists ()
842 || (known_vals.length () == known_aggs.length ()));
843 if (c->operand_num >= (int) known_vals.length ())
844 {
845 clause |= 1 << (i + predicate_first_dynamic_condition);
846 continue;
847 }
848
849 if (c->agg_contents)
850 {
851 struct ipa_agg_jump_function *agg;
852
853 if (c->code == CHANGED
854 && !c->by_ref
855 && (known_vals[c->operand_num] == error_mark_node))
856 continue;
857
858 if (known_aggs.exists ())
859 {
860 agg = known_aggs[c->operand_num];
861 val = ipa_find_agg_cst_for_param (agg, c->offset, c->by_ref);
862 }
863 else
864 val = NULL_TREE;
865 }
866 else
867 {
868 val = known_vals[c->operand_num];
869 if (val == error_mark_node && c->code != CHANGED)
870 val = NULL_TREE;
871 }
872
873 if (!val)
874 {
875 clause |= 1 << (i + predicate_first_dynamic_condition);
876 continue;
877 }
878 if (c->code == IS_NOT_CONSTANT || c->code == CHANGED)
879 continue;
880
881 if (operand_equal_p (TYPE_SIZE (TREE_TYPE (c->val)),
882 TYPE_SIZE (TREE_TYPE (val)), 0))
883 {
884 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
885
886 res = val
887 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
888 : NULL;
889
890 if (res && integer_zerop (res))
891 continue;
892 }
893 clause |= 1 << (i + predicate_first_dynamic_condition);
894 }
895 return clause;
896 }
897
898
899 /* Work out what conditions might be true at invocation of E. */
900
901 static void
902 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
903 clause_t *clause_ptr,
904 vec<tree> *known_vals_ptr,
905 vec<ipa_polymorphic_call_context>
906 *known_contexts_ptr,
907 vec<ipa_agg_jump_function_p> *known_aggs_ptr)
908 {
909 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
910 struct inline_summary *info = inline_summaries->get (callee);
911 vec<tree> known_vals = vNULL;
912 vec<ipa_agg_jump_function_p> known_aggs = vNULL;
913
914 if (clause_ptr)
915 *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition;
916 if (known_vals_ptr)
917 known_vals_ptr->create (0);
918 if (known_contexts_ptr)
919 known_contexts_ptr->create (0);
920
921 if (ipa_node_params_sum
922 && !e->call_stmt_cannot_inline_p
923 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
924 {
925 struct ipa_node_params *parms_info;
926 struct ipa_edge_args *args = IPA_EDGE_REF (e);
927 struct inline_edge_summary *es = inline_edge_summary (e);
928 int i, count = ipa_get_cs_argument_count (args);
929
930 if (e->caller->global.inlined_to)
931 parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
932 else
933 parms_info = IPA_NODE_REF (e->caller);
934
935 if (count && (info->conds || known_vals_ptr))
936 known_vals.safe_grow_cleared (count);
937 if (count && (info->conds || known_aggs_ptr))
938 known_aggs.safe_grow_cleared (count);
939 if (count && known_contexts_ptr)
940 known_contexts_ptr->safe_grow_cleared (count);
941
942 for (i = 0; i < count; i++)
943 {
944 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
945 tree cst = ipa_value_from_jfunc (parms_info, jf);
946
947 if (!cst && e->call_stmt
948 && i < (int)gimple_call_num_args (e->call_stmt))
949 {
950 cst = gimple_call_arg (e->call_stmt, i);
951 if (!is_gimple_min_invariant (cst))
952 cst = NULL;
953 }
954 if (cst)
955 {
956 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
957 if (known_vals.exists ())
958 known_vals[i] = cst;
959 }
960 else if (inline_p && !es->param[i].change_prob)
961 known_vals[i] = error_mark_node;
962
963 if (known_contexts_ptr)
964 (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e,
965 i, jf);
966 /* TODO: When IPA-CP starts propagating and merging aggregate jump
967 functions, use its knowledge of the caller too, just like the
968 scalar case above. */
969 known_aggs[i] = &jf->agg;
970 }
971 }
972 else if (e->call_stmt && !e->call_stmt_cannot_inline_p
973 && ((clause_ptr && info->conds) || known_vals_ptr))
974 {
975 int i, count = (int)gimple_call_num_args (e->call_stmt);
976
977 if (count && (info->conds || known_vals_ptr))
978 known_vals.safe_grow_cleared (count);
979 for (i = 0; i < count; i++)
980 {
981 tree cst = gimple_call_arg (e->call_stmt, i);
982 if (!is_gimple_min_invariant (cst))
983 cst = NULL;
984 if (cst)
985 known_vals[i] = cst;
986 }
987 }
988
989 if (clause_ptr)
990 *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p,
991 known_vals, known_aggs);
992
993 if (known_vals_ptr)
994 *known_vals_ptr = known_vals;
995 else
996 known_vals.release ();
997
998 if (known_aggs_ptr)
999 *known_aggs_ptr = known_aggs;
1000 else
1001 known_aggs.release ();
1002 }
1003
1004
1005 /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */
1006
1007 static void
1008 inline_summary_alloc (void)
1009 {
1010 if (!edge_removal_hook_holder)
1011 edge_removal_hook_holder =
1012 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
1013 if (!edge_duplication_hook_holder)
1014 edge_duplication_hook_holder =
1015 symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL);
1016
1017 if (!inline_summaries)
1018 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
1019
1020 if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
1021 inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
1022 }
1023
1024 /* We are called multiple time for given function; clear
1025 data from previous run so they are not cumulated. */
1026
1027 static void
1028 reset_inline_edge_summary (struct cgraph_edge *e)
1029 {
1030 if (e->uid < (int) inline_edge_summary_vec.length ())
1031 {
1032 struct inline_edge_summary *es = inline_edge_summary (e);
1033
1034 es->call_stmt_size = es->call_stmt_time = 0;
1035 if (es->predicate)
1036 edge_predicate_pool.remove (es->predicate);
1037 es->predicate = NULL;
1038 es->param.release ();
1039 }
1040 }
1041
1042 /* We are called multiple time for given function; clear
1043 data from previous run so they are not cumulated. */
1044
1045 static void
1046 reset_inline_summary (struct cgraph_node *node,
1047 inline_summary *info)
1048 {
1049 struct cgraph_edge *e;
1050
1051 info->self_size = info->self_time = 0;
1052 info->estimated_stack_size = 0;
1053 info->estimated_self_stack_size = 0;
1054 info->stack_frame_offset = 0;
1055 info->size = 0;
1056 info->time = 0;
1057 info->growth = 0;
1058 info->scc_no = 0;
1059 if (info->loop_iterations)
1060 {
1061 edge_predicate_pool.remove (info->loop_iterations);
1062 info->loop_iterations = NULL;
1063 }
1064 if (info->loop_stride)
1065 {
1066 edge_predicate_pool.remove (info->loop_stride);
1067 info->loop_stride = NULL;
1068 }
1069 if (info->array_index)
1070 {
1071 edge_predicate_pool.remove (info->array_index);
1072 info->array_index = NULL;
1073 }
1074 vec_free (info->conds);
1075 vec_free (info->entry);
1076 for (e = node->callees; e; e = e->next_callee)
1077 reset_inline_edge_summary (e);
1078 for (e = node->indirect_calls; e; e = e->next_callee)
1079 reset_inline_edge_summary (e);
1080 }
1081
1082 /* Hook that is called by cgraph.c when a node is removed. */
1083
1084 void
1085 inline_summary_t::remove (cgraph_node *node, inline_summary *info)
1086 {
1087 reset_inline_summary (node, info);
1088 }
1089
1090 /* Remap predicate P of former function to be predicate of duplicated function.
1091 POSSIBLE_TRUTHS is clause of possible truths in the duplicated node,
1092 INFO is inline summary of the duplicated node. */
1093
1094 static struct predicate
1095 remap_predicate_after_duplication (struct predicate *p,
1096 clause_t possible_truths,
1097 struct inline_summary *info)
1098 {
1099 struct predicate new_predicate = true_predicate ();
1100 int j;
1101 for (j = 0; p->clause[j]; j++)
1102 if (!(possible_truths & p->clause[j]))
1103 {
1104 new_predicate = false_predicate ();
1105 break;
1106 }
1107 else
1108 add_clause (info->conds, &new_predicate,
1109 possible_truths & p->clause[j]);
1110 return new_predicate;
1111 }
1112
1113 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
1114 Additionally care about allocating new memory slot for updated predicate
1115 and set it to NULL when it becomes true or false (and thus uninteresting).
1116 */
1117
1118 static void
1119 remap_hint_predicate_after_duplication (struct predicate **p,
1120 clause_t possible_truths,
1121 struct inline_summary *info)
1122 {
1123 struct predicate new_predicate;
1124
1125 if (!*p)
1126 return;
1127
1128 new_predicate = remap_predicate_after_duplication (*p,
1129 possible_truths, info);
1130 /* We do not want to free previous predicate; it is used by node origin. */
1131 *p = NULL;
1132 set_hint_predicate (p, new_predicate);
1133 }
1134
1135
1136 /* Hook that is called by cgraph.c when a node is duplicated. */
1137 void
1138 inline_summary_t::duplicate (cgraph_node *src,
1139 cgraph_node *dst,
1140 inline_summary *,
1141 inline_summary *info)
1142 {
1143 inline_summary_alloc ();
1144 memcpy (info, inline_summaries->get (src), sizeof (inline_summary));
1145 /* TODO: as an optimization, we may avoid copying conditions
1146 that are known to be false or true. */
1147 info->conds = vec_safe_copy (info->conds);
1148
1149 /* When there are any replacements in the function body, see if we can figure
1150 out that something was optimized out. */
1151 if (ipa_node_params_sum && dst->clone.tree_map)
1152 {
1153 vec<size_time_entry, va_gc> *entry = info->entry;
1154 /* Use SRC parm info since it may not be copied yet. */
1155 struct ipa_node_params *parms_info = IPA_NODE_REF (src);
1156 vec<tree> known_vals = vNULL;
1157 int count = ipa_get_param_count (parms_info);
1158 int i, j;
1159 clause_t possible_truths;
1160 struct predicate true_pred = true_predicate ();
1161 size_time_entry *e;
1162 int optimized_out_size = 0;
1163 bool inlined_to_p = false;
1164 struct cgraph_edge *edge, *next;
1165
1166 info->entry = 0;
1167 known_vals.safe_grow_cleared (count);
1168 for (i = 0; i < count; i++)
1169 {
1170 struct ipa_replace_map *r;
1171
1172 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
1173 {
1174 if (((!r->old_tree && r->parm_num == i)
1175 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
1176 && r->replace_p && !r->ref_p)
1177 {
1178 known_vals[i] = r->new_tree;
1179 break;
1180 }
1181 }
1182 }
1183 possible_truths = evaluate_conditions_for_known_args (dst, false,
1184 known_vals,
1185 vNULL);
1186 known_vals.release ();
1187
1188 account_size_time (info, 0, 0, &true_pred);
1189
1190 /* Remap size_time vectors.
1191 Simplify the predicate by prunning out alternatives that are known
1192 to be false.
1193 TODO: as on optimization, we can also eliminate conditions known
1194 to be true. */
1195 for (i = 0; vec_safe_iterate (entry, i, &e); i++)
1196 {
1197 struct predicate new_predicate;
1198 new_predicate = remap_predicate_after_duplication (&e->predicate,
1199 possible_truths,
1200 info);
1201 if (false_predicate_p (&new_predicate))
1202 optimized_out_size += e->size;
1203 else
1204 account_size_time (info, e->size, e->time, &new_predicate);
1205 }
1206
1207 /* Remap edge predicates with the same simplification as above.
1208 Also copy constantness arrays. */
1209 for (edge = dst->callees; edge; edge = next)
1210 {
1211 struct predicate new_predicate;
1212 struct inline_edge_summary *es = inline_edge_summary (edge);
1213 next = edge->next_callee;
1214
1215 if (!edge->inline_failed)
1216 inlined_to_p = true;
1217 if (!es->predicate)
1218 continue;
1219 new_predicate = remap_predicate_after_duplication (es->predicate,
1220 possible_truths,
1221 info);
1222 if (false_predicate_p (&new_predicate)
1223 && !false_predicate_p (es->predicate))
1224 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1225 edge_set_predicate (edge, &new_predicate);
1226 }
1227
1228 /* Remap indirect edge predicates with the same simplificaiton as above.
1229 Also copy constantness arrays. */
1230 for (edge = dst->indirect_calls; edge; edge = next)
1231 {
1232 struct predicate new_predicate;
1233 struct inline_edge_summary *es = inline_edge_summary (edge);
1234 next = edge->next_callee;
1235
1236 gcc_checking_assert (edge->inline_failed);
1237 if (!es->predicate)
1238 continue;
1239 new_predicate = remap_predicate_after_duplication (es->predicate,
1240 possible_truths,
1241 info);
1242 if (false_predicate_p (&new_predicate)
1243 && !false_predicate_p (es->predicate))
1244 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1245 edge_set_predicate (edge, &new_predicate);
1246 }
1247 remap_hint_predicate_after_duplication (&info->loop_iterations,
1248 possible_truths, info);
1249 remap_hint_predicate_after_duplication (&info->loop_stride,
1250 possible_truths, info);
1251 remap_hint_predicate_after_duplication (&info->array_index,
1252 possible_truths, info);
1253
1254 /* If inliner or someone after inliner will ever start producing
1255 non-trivial clones, we will get trouble with lack of information
1256 about updating self sizes, because size vectors already contains
1257 sizes of the calees. */
1258 gcc_assert (!inlined_to_p || !optimized_out_size);
1259 }
1260 else
1261 {
1262 info->entry = vec_safe_copy (info->entry);
1263 if (info->loop_iterations)
1264 {
1265 predicate p = *info->loop_iterations;
1266 info->loop_iterations = NULL;
1267 set_hint_predicate (&info->loop_iterations, p);
1268 }
1269 if (info->loop_stride)
1270 {
1271 predicate p = *info->loop_stride;
1272 info->loop_stride = NULL;
1273 set_hint_predicate (&info->loop_stride, p);
1274 }
1275 if (info->array_index)
1276 {
1277 predicate p = *info->array_index;
1278 info->array_index = NULL;
1279 set_hint_predicate (&info->array_index, p);
1280 }
1281 }
1282 if (!dst->global.inlined_to)
1283 inline_update_overall_summary (dst);
1284 }
1285
1286
1287 /* Hook that is called by cgraph.c when a node is duplicated. */
1288
1289 static void
1290 inline_edge_duplication_hook (struct cgraph_edge *src,
1291 struct cgraph_edge *dst,
1292 ATTRIBUTE_UNUSED void *data)
1293 {
1294 struct inline_edge_summary *info;
1295 struct inline_edge_summary *srcinfo;
1296 inline_summary_alloc ();
1297 info = inline_edge_summary (dst);
1298 srcinfo = inline_edge_summary (src);
1299 memcpy (info, srcinfo, sizeof (struct inline_edge_summary));
1300 info->predicate = NULL;
1301 edge_set_predicate (dst, srcinfo->predicate);
1302 info->param = srcinfo->param.copy ();
1303 if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
1304 {
1305 info->call_stmt_size -= (eni_size_weights.indirect_call_cost
1306 - eni_size_weights.call_cost);
1307 info->call_stmt_time -= (eni_time_weights.indirect_call_cost
1308 - eni_time_weights.call_cost);
1309 }
1310 }
1311
1312
1313 /* Keep edge cache consistent across edge removal. */
1314
1315 static void
1316 inline_edge_removal_hook (struct cgraph_edge *edge,
1317 void *data ATTRIBUTE_UNUSED)
1318 {
1319 if (edge_growth_cache.exists ())
1320 reset_edge_growth_cache (edge);
1321 reset_inline_edge_summary (edge);
1322 }
1323
1324
1325 /* Initialize growth caches. */
1326
1327 void
1328 initialize_growth_caches (void)
1329 {
1330 if (symtab->edges_max_uid)
1331 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
1332 }
1333
1334
1335 /* Free growth caches. */
1336
1337 void
1338 free_growth_caches (void)
1339 {
1340 edge_growth_cache.release ();
1341 }
1342
1343
1344 /* Dump edge summaries associated to NODE and recursively to all clones.
1345 Indent by INDENT. */
1346
1347 static void
1348 dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node,
1349 struct inline_summary *info)
1350 {
1351 struct cgraph_edge *edge;
1352 for (edge = node->callees; edge; edge = edge->next_callee)
1353 {
1354 struct inline_edge_summary *es = inline_edge_summary (edge);
1355 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1356 int i;
1357
1358 fprintf (f,
1359 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
1360 " time: %2i callee size:%2i stack:%2i",
1361 indent, "", callee->name (), callee->order,
1362 !edge->inline_failed
1363 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
1364 indent, "", es->loop_depth, edge->frequency,
1365 es->call_stmt_size, es->call_stmt_time,
1366 (int) inline_summaries->get (callee)->size / INLINE_SIZE_SCALE,
1367 (int) inline_summaries->get (callee)->estimated_stack_size);
1368
1369 if (es->predicate)
1370 {
1371 fprintf (f, " predicate: ");
1372 dump_predicate (f, info->conds, es->predicate);
1373 }
1374 else
1375 fprintf (f, "\n");
1376 if (es->param.exists ())
1377 for (i = 0; i < (int) es->param.length (); i++)
1378 {
1379 int prob = es->param[i].change_prob;
1380
1381 if (!prob)
1382 fprintf (f, "%*s op%i is compile time invariant\n",
1383 indent + 2, "", i);
1384 else if (prob != REG_BR_PROB_BASE)
1385 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
1386 prob * 100.0 / REG_BR_PROB_BASE);
1387 }
1388 if (!edge->inline_failed)
1389 {
1390 fprintf (f, "%*sStack frame offset %i, callee self size %i,"
1391 " callee size %i\n",
1392 indent + 2, "",
1393 (int) inline_summaries->get (callee)->stack_frame_offset,
1394 (int) inline_summaries->get (callee)->estimated_self_stack_size,
1395 (int) inline_summaries->get (callee)->estimated_stack_size);
1396 dump_inline_edge_summary (f, indent + 2, callee, info);
1397 }
1398 }
1399 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
1400 {
1401 struct inline_edge_summary *es = inline_edge_summary (edge);
1402 fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
1403 " time: %2i",
1404 indent, "",
1405 es->loop_depth,
1406 edge->frequency, es->call_stmt_size, es->call_stmt_time);
1407 if (es->predicate)
1408 {
1409 fprintf (f, "predicate: ");
1410 dump_predicate (f, info->conds, es->predicate);
1411 }
1412 else
1413 fprintf (f, "\n");
1414 }
1415 }
1416
1417
1418 void
1419 dump_inline_summary (FILE *f, struct cgraph_node *node)
1420 {
1421 if (node->definition)
1422 {
1423 struct inline_summary *s = inline_summaries->get (node);
1424 size_time_entry *e;
1425 int i;
1426 fprintf (f, "Inline summary for %s/%i", node->name (),
1427 node->order);
1428 if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
1429 fprintf (f, " always_inline");
1430 if (s->inlinable)
1431 fprintf (f, " inlinable");
1432 if (s->contains_cilk_spawn)
1433 fprintf (f, " contains_cilk_spawn");
1434 fprintf (f, "\n self time: %i\n", s->self_time);
1435 fprintf (f, " global time: %i\n", s->time);
1436 fprintf (f, " self size: %i\n", s->self_size);
1437 fprintf (f, " global size: %i\n", s->size);
1438 fprintf (f, " min size: %i\n", s->min_size);
1439 fprintf (f, " self stack: %i\n",
1440 (int) s->estimated_self_stack_size);
1441 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
1442 if (s->growth)
1443 fprintf (f, " estimated growth:%i\n", (int) s->growth);
1444 if (s->scc_no)
1445 fprintf (f, " In SCC: %i\n", (int) s->scc_no);
1446 for (i = 0; vec_safe_iterate (s->entry, i, &e); i++)
1447 {
1448 fprintf (f, " size:%f, time:%f, predicate:",
1449 (double) e->size / INLINE_SIZE_SCALE,
1450 (double) e->time / INLINE_TIME_SCALE);
1451 dump_predicate (f, s->conds, &e->predicate);
1452 }
1453 if (s->loop_iterations)
1454 {
1455 fprintf (f, " loop iterations:");
1456 dump_predicate (f, s->conds, s->loop_iterations);
1457 }
1458 if (s->loop_stride)
1459 {
1460 fprintf (f, " loop stride:");
1461 dump_predicate (f, s->conds, s->loop_stride);
1462 }
1463 if (s->array_index)
1464 {
1465 fprintf (f, " array index:");
1466 dump_predicate (f, s->conds, s->array_index);
1467 }
1468 fprintf (f, " calls:\n");
1469 dump_inline_edge_summary (f, 4, node, s);
1470 fprintf (f, "\n");
1471 }
1472 }
1473
1474 DEBUG_FUNCTION void
1475 debug_inline_summary (struct cgraph_node *node)
1476 {
1477 dump_inline_summary (stderr, node);
1478 }
1479
1480 void
1481 dump_inline_summaries (FILE *f)
1482 {
1483 struct cgraph_node *node;
1484
1485 FOR_EACH_DEFINED_FUNCTION (node)
1486 if (!node->global.inlined_to)
1487 dump_inline_summary (f, node);
1488 }
1489
1490 /* Give initial reasons why inlining would fail on EDGE. This gets either
1491 nullified or usually overwritten by more precise reasons later. */
1492
1493 void
1494 initialize_inline_failed (struct cgraph_edge *e)
1495 {
1496 struct cgraph_node *callee = e->callee;
1497
1498 if (e->indirect_unknown_callee)
1499 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
1500 else if (!callee->definition)
1501 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
1502 else if (callee->local.redefined_extern_inline)
1503 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
1504 else if (e->call_stmt_cannot_inline_p)
1505 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
1506 else if (cfun && fn_contains_cilk_spawn_p (cfun))
1507 /* We can't inline if the function is spawing a function. */
1508 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
1509 else
1510 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
1511 }
1512
1513 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
1514 boolean variable pointed to by DATA. */
1515
1516 static bool
1517 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
1518 void *data)
1519 {
1520 bool *b = (bool *) data;
1521 *b = true;
1522 return true;
1523 }
1524
1525 /* If OP refers to value of function parameter, return the corresponding
1526 parameter. */
1527
1528 static tree
1529 unmodified_parm_1 (gimple stmt, tree op)
1530 {
1531 /* SSA_NAME referring to parm default def? */
1532 if (TREE_CODE (op) == SSA_NAME
1533 && SSA_NAME_IS_DEFAULT_DEF (op)
1534 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
1535 return SSA_NAME_VAR (op);
1536 /* Non-SSA parm reference? */
1537 if (TREE_CODE (op) == PARM_DECL)
1538 {
1539 bool modified = false;
1540
1541 ao_ref refd;
1542 ao_ref_init (&refd, op);
1543 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
1544 NULL);
1545 if (!modified)
1546 return op;
1547 }
1548 return NULL_TREE;
1549 }
1550
1551 /* If OP refers to value of function parameter, return the corresponding
1552 parameter. Also traverse chains of SSA register assignments. */
1553
1554 static tree
1555 unmodified_parm (gimple stmt, tree op)
1556 {
1557 tree res = unmodified_parm_1 (stmt, op);
1558 if (res)
1559 return res;
1560
1561 if (TREE_CODE (op) == SSA_NAME
1562 && !SSA_NAME_IS_DEFAULT_DEF (op)
1563 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1564 return unmodified_parm (SSA_NAME_DEF_STMT (op),
1565 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)));
1566 return NULL_TREE;
1567 }
1568
1569 /* If OP refers to a value of a function parameter or value loaded from an
1570 aggregate passed to a parameter (either by value or reference), return TRUE
1571 and store the number of the parameter to *INDEX_P and information whether
1572 and how it has been loaded from an aggregate into *AGGPOS. INFO describes
1573 the function parameters, STMT is the statement in which OP is used or
1574 loaded. */
1575
1576 static bool
1577 unmodified_parm_or_parm_agg_item (struct func_body_info *fbi,
1578 gimple stmt, tree op, int *index_p,
1579 struct agg_position_info *aggpos)
1580 {
1581 tree res = unmodified_parm_1 (stmt, op);
1582
1583 gcc_checking_assert (aggpos);
1584 if (res)
1585 {
1586 *index_p = ipa_get_param_decl_index (fbi->info, res);
1587 if (*index_p < 0)
1588 return false;
1589 aggpos->agg_contents = false;
1590 aggpos->by_ref = false;
1591 return true;
1592 }
1593
1594 if (TREE_CODE (op) == SSA_NAME)
1595 {
1596 if (SSA_NAME_IS_DEFAULT_DEF (op)
1597 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1598 return false;
1599 stmt = SSA_NAME_DEF_STMT (op);
1600 op = gimple_assign_rhs1 (stmt);
1601 if (!REFERENCE_CLASS_P (op))
1602 return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p,
1603 aggpos);
1604 }
1605
1606 aggpos->agg_contents = true;
1607 return ipa_load_from_parm_agg (fbi, fbi->info->descriptors,
1608 stmt, op, index_p, &aggpos->offset,
1609 NULL, &aggpos->by_ref);
1610 }
1611
1612 /* See if statement might disappear after inlining.
1613 0 - means not eliminated
1614 1 - half of statements goes away
1615 2 - for sure it is eliminated.
1616 We are not terribly sophisticated, basically looking for simple abstraction
1617 penalty wrappers. */
1618
1619 static int
1620 eliminated_by_inlining_prob (gimple stmt)
1621 {
1622 enum gimple_code code = gimple_code (stmt);
1623 enum tree_code rhs_code;
1624
1625 if (!optimize)
1626 return 0;
1627
1628 switch (code)
1629 {
1630 case GIMPLE_RETURN:
1631 return 2;
1632 case GIMPLE_ASSIGN:
1633 if (gimple_num_ops (stmt) != 2)
1634 return 0;
1635
1636 rhs_code = gimple_assign_rhs_code (stmt);
1637
1638 /* Casts of parameters, loads from parameters passed by reference
1639 and stores to return value or parameters are often free after
1640 inlining dua to SRA and further combining.
1641 Assume that half of statements goes away. */
1642 if (CONVERT_EXPR_CODE_P (rhs_code)
1643 || rhs_code == VIEW_CONVERT_EXPR
1644 || rhs_code == ADDR_EXPR
1645 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1646 {
1647 tree rhs = gimple_assign_rhs1 (stmt);
1648 tree lhs = gimple_assign_lhs (stmt);
1649 tree inner_rhs = get_base_address (rhs);
1650 tree inner_lhs = get_base_address (lhs);
1651 bool rhs_free = false;
1652 bool lhs_free = false;
1653
1654 if (!inner_rhs)
1655 inner_rhs = rhs;
1656 if (!inner_lhs)
1657 inner_lhs = lhs;
1658
1659 /* Reads of parameter are expected to be free. */
1660 if (unmodified_parm (stmt, inner_rhs))
1661 rhs_free = true;
1662 /* Match expressions of form &this->field. Those will most likely
1663 combine with something upstream after inlining. */
1664 else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1665 {
1666 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1667 if (TREE_CODE (op) == PARM_DECL)
1668 rhs_free = true;
1669 else if (TREE_CODE (op) == MEM_REF
1670 && unmodified_parm (stmt, TREE_OPERAND (op, 0)))
1671 rhs_free = true;
1672 }
1673
1674 /* When parameter is not SSA register because its address is taken
1675 and it is just copied into one, the statement will be completely
1676 free after inlining (we will copy propagate backward). */
1677 if (rhs_free && is_gimple_reg (lhs))
1678 return 2;
1679
1680 /* Reads of parameters passed by reference
1681 expected to be free (i.e. optimized out after inlining). */
1682 if (TREE_CODE (inner_rhs) == MEM_REF
1683 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0)))
1684 rhs_free = true;
1685
1686 /* Copying parameter passed by reference into gimple register is
1687 probably also going to copy propagate, but we can't be quite
1688 sure. */
1689 if (rhs_free && is_gimple_reg (lhs))
1690 lhs_free = true;
1691
1692 /* Writes to parameters, parameters passed by value and return value
1693 (either dirrectly or passed via invisible reference) are free.
1694
1695 TODO: We ought to handle testcase like
1696 struct a {int a,b;};
1697 struct a
1698 retrurnsturct (void)
1699 {
1700 struct a a ={1,2};
1701 return a;
1702 }
1703
1704 This translate into:
1705
1706 retrurnsturct ()
1707 {
1708 int a$b;
1709 int a$a;
1710 struct a a;
1711 struct a D.2739;
1712
1713 <bb 2>:
1714 D.2739.a = 1;
1715 D.2739.b = 2;
1716 return D.2739;
1717
1718 }
1719 For that we either need to copy ipa-split logic detecting writes
1720 to return value. */
1721 if (TREE_CODE (inner_lhs) == PARM_DECL
1722 || TREE_CODE (inner_lhs) == RESULT_DECL
1723 || (TREE_CODE (inner_lhs) == MEM_REF
1724 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0))
1725 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1726 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1727 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1728 (inner_lhs,
1729 0))) == RESULT_DECL))))
1730 lhs_free = true;
1731 if (lhs_free
1732 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1733 rhs_free = true;
1734 if (lhs_free && rhs_free)
1735 return 1;
1736 }
1737 return 0;
1738 default:
1739 return 0;
1740 }
1741 }
1742
1743
1744 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1745 predicates to the CFG edges. */
1746
1747 static void
1748 set_cond_stmt_execution_predicate (struct func_body_info *fbi,
1749 struct inline_summary *summary,
1750 basic_block bb)
1751 {
1752 gimple last;
1753 tree op;
1754 int index;
1755 struct agg_position_info aggpos;
1756 enum tree_code code, inverted_code;
1757 edge e;
1758 edge_iterator ei;
1759 gimple set_stmt;
1760 tree op2;
1761
1762 last = last_stmt (bb);
1763 if (!last || gimple_code (last) != GIMPLE_COND)
1764 return;
1765 if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1766 return;
1767 op = gimple_cond_lhs (last);
1768 /* TODO: handle conditionals like
1769 var = op0 < 4;
1770 if (var != 0). */
1771 if (unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &aggpos))
1772 {
1773 code = gimple_cond_code (last);
1774 inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
1775
1776 FOR_EACH_EDGE (e, ei, bb->succs)
1777 {
1778 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1779 ? code : inverted_code);
1780 /* invert_tree_comparison will return ERROR_MARK on FP
1781 comparsions that are not EQ/NE instead of returning proper
1782 unordered one. Be sure it is not confused with NON_CONSTANT. */
1783 if (this_code != ERROR_MARK)
1784 {
1785 struct predicate p = add_condition (summary, index, &aggpos,
1786 this_code,
1787 gimple_cond_rhs (last));
1788 e->aux = edge_predicate_pool.allocate ();
1789 *(struct predicate *) e->aux = p;
1790 }
1791 }
1792 }
1793
1794 if (TREE_CODE (op) != SSA_NAME)
1795 return;
1796 /* Special case
1797 if (builtin_constant_p (op))
1798 constant_code
1799 else
1800 nonconstant_code.
1801 Here we can predicate nonconstant_code. We can't
1802 really handle constant_code since we have no predicate
1803 for this and also the constant code is not known to be
1804 optimized away when inliner doen't see operand is constant.
1805 Other optimizers might think otherwise. */
1806 if (gimple_cond_code (last) != NE_EXPR
1807 || !integer_zerop (gimple_cond_rhs (last)))
1808 return;
1809 set_stmt = SSA_NAME_DEF_STMT (op);
1810 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1811 || gimple_call_num_args (set_stmt) != 1)
1812 return;
1813 op2 = gimple_call_arg (set_stmt, 0);
1814 if (!unmodified_parm_or_parm_agg_item (fbi, set_stmt, op2, &index, &aggpos))
1815 return;
1816 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1817 {
1818 struct predicate p = add_condition (summary, index, &aggpos,
1819 IS_NOT_CONSTANT, NULL_TREE);
1820 e->aux = edge_predicate_pool.allocate ();
1821 *(struct predicate *) e->aux = p;
1822 }
1823 }
1824
1825
1826 /* If BB ends by a switch we can turn into predicates, attach corresponding
1827 predicates to the CFG edges. */
1828
1829 static void
1830 set_switch_stmt_execution_predicate (struct func_body_info *fbi,
1831 struct inline_summary *summary,
1832 basic_block bb)
1833 {
1834 gimple lastg;
1835 tree op;
1836 int index;
1837 struct agg_position_info aggpos;
1838 edge e;
1839 edge_iterator ei;
1840 size_t n;
1841 size_t case_idx;
1842
1843 lastg = last_stmt (bb);
1844 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1845 return;
1846 gswitch *last = as_a <gswitch *> (lastg);
1847 op = gimple_switch_index (last);
1848 if (!unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &aggpos))
1849 return;
1850
1851 FOR_EACH_EDGE (e, ei, bb->succs)
1852 {
1853 e->aux = edge_predicate_pool.allocate ();
1854 *(struct predicate *) e->aux = false_predicate ();
1855 }
1856 n = gimple_switch_num_labels (last);
1857 for (case_idx = 0; case_idx < n; ++case_idx)
1858 {
1859 tree cl = gimple_switch_label (last, case_idx);
1860 tree min, max;
1861 struct predicate p;
1862
1863 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1864 min = CASE_LOW (cl);
1865 max = CASE_HIGH (cl);
1866
1867 /* For default we might want to construct predicate that none
1868 of cases is met, but it is bit hard to do not having negations
1869 of conditionals handy. */
1870 if (!min && !max)
1871 p = true_predicate ();
1872 else if (!max)
1873 p = add_condition (summary, index, &aggpos, EQ_EXPR, min);
1874 else
1875 {
1876 struct predicate p1, p2;
1877 p1 = add_condition (summary, index, &aggpos, GE_EXPR, min);
1878 p2 = add_condition (summary, index, &aggpos, LE_EXPR, max);
1879 p = and_predicates (summary->conds, &p1, &p2);
1880 }
1881 *(struct predicate *) e->aux
1882 = or_predicates (summary->conds, &p, (struct predicate *) e->aux);
1883 }
1884 }
1885
1886
1887 /* For each BB in NODE attach to its AUX pointer predicate under
1888 which it is executable. */
1889
1890 static void
1891 compute_bb_predicates (struct func_body_info *fbi,
1892 struct cgraph_node *node,
1893 struct inline_summary *summary)
1894 {
1895 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1896 bool done = false;
1897 basic_block bb;
1898
1899 FOR_EACH_BB_FN (bb, my_function)
1900 {
1901 set_cond_stmt_execution_predicate (fbi, summary, bb);
1902 set_switch_stmt_execution_predicate (fbi, summary, bb);
1903 }
1904
1905 /* Entry block is always executable. */
1906 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1907 = edge_predicate_pool.allocate ();
1908 *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1909 = true_predicate ();
1910
1911 /* A simple dataflow propagation of predicates forward in the CFG.
1912 TODO: work in reverse postorder. */
1913 while (!done)
1914 {
1915 done = true;
1916 FOR_EACH_BB_FN (bb, my_function)
1917 {
1918 struct predicate p = false_predicate ();
1919 edge e;
1920 edge_iterator ei;
1921 FOR_EACH_EDGE (e, ei, bb->preds)
1922 {
1923 if (e->src->aux)
1924 {
1925 struct predicate this_bb_predicate
1926 = *(struct predicate *) e->src->aux;
1927 if (e->aux)
1928 this_bb_predicate
1929 = and_predicates (summary->conds, &this_bb_predicate,
1930 (struct predicate *) e->aux);
1931 p = or_predicates (summary->conds, &p, &this_bb_predicate);
1932 if (true_predicate_p (&p))
1933 break;
1934 }
1935 }
1936 if (false_predicate_p (&p))
1937 gcc_assert (!bb->aux);
1938 else
1939 {
1940 if (!bb->aux)
1941 {
1942 done = false;
1943 bb->aux = edge_predicate_pool.allocate ();
1944 *((struct predicate *) bb->aux) = p;
1945 }
1946 else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1947 {
1948 /* This OR operation is needed to ensure monotonous data flow
1949 in the case we hit the limit on number of clauses and the
1950 and/or operations above give approximate answers. */
1951 p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux);
1952 if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1953 {
1954 done = false;
1955 *((struct predicate *) bb->aux) = p;
1956 }
1957 }
1958 }
1959 }
1960 }
1961 }
1962
1963
1964 /* We keep info about constantness of SSA names. */
1965
1966 typedef struct predicate predicate_t;
1967 /* Return predicate specifying when the STMT might have result that is not
1968 a compile time constant. */
1969
1970 static struct predicate
1971 will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
1972 struct inline_summary *summary,
1973 tree expr,
1974 vec<predicate_t> nonconstant_names)
1975 {
1976 tree parm;
1977 int index;
1978
1979 while (UNARY_CLASS_P (expr))
1980 expr = TREE_OPERAND (expr, 0);
1981
1982 parm = unmodified_parm (NULL, expr);
1983 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
1984 return add_condition (summary, index, NULL, CHANGED, NULL_TREE);
1985 if (is_gimple_min_invariant (expr))
1986 return false_predicate ();
1987 if (TREE_CODE (expr) == SSA_NAME)
1988 return nonconstant_names[SSA_NAME_VERSION (expr)];
1989 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
1990 {
1991 struct predicate p1 = will_be_nonconstant_expr_predicate
1992 (info, summary, TREE_OPERAND (expr, 0),
1993 nonconstant_names);
1994 struct predicate p2;
1995 if (true_predicate_p (&p1))
1996 return p1;
1997 p2 = will_be_nonconstant_expr_predicate (info, summary,
1998 TREE_OPERAND (expr, 1),
1999 nonconstant_names);
2000 return or_predicates (summary->conds, &p1, &p2);
2001 }
2002 else if (TREE_CODE (expr) == COND_EXPR)
2003 {
2004 struct predicate p1 = will_be_nonconstant_expr_predicate
2005 (info, summary, TREE_OPERAND (expr, 0),
2006 nonconstant_names);
2007 struct predicate p2;
2008 if (true_predicate_p (&p1))
2009 return p1;
2010 p2 = will_be_nonconstant_expr_predicate (info, summary,
2011 TREE_OPERAND (expr, 1),
2012 nonconstant_names);
2013 if (true_predicate_p (&p2))
2014 return p2;
2015 p1 = or_predicates (summary->conds, &p1, &p2);
2016 p2 = will_be_nonconstant_expr_predicate (info, summary,
2017 TREE_OPERAND (expr, 2),
2018 nonconstant_names);
2019 return or_predicates (summary->conds, &p1, &p2);
2020 }
2021 else
2022 {
2023 debug_tree (expr);
2024 gcc_unreachable ();
2025 }
2026 return false_predicate ();
2027 }
2028
2029
2030 /* Return predicate specifying when the STMT might have result that is not
2031 a compile time constant. */
2032
2033 static struct predicate
2034 will_be_nonconstant_predicate (struct func_body_info *fbi,
2035 struct inline_summary *summary,
2036 gimple stmt,
2037 vec<predicate_t> nonconstant_names)
2038 {
2039 struct predicate p = true_predicate ();
2040 ssa_op_iter iter;
2041 tree use;
2042 struct predicate op_non_const;
2043 bool is_load;
2044 int base_index;
2045 struct agg_position_info aggpos;
2046
2047 /* What statments might be optimized away
2048 when their arguments are constant. */
2049 if (gimple_code (stmt) != GIMPLE_ASSIGN
2050 && gimple_code (stmt) != GIMPLE_COND
2051 && gimple_code (stmt) != GIMPLE_SWITCH
2052 && (gimple_code (stmt) != GIMPLE_CALL
2053 || !(gimple_call_flags (stmt) & ECF_CONST)))
2054 return p;
2055
2056 /* Stores will stay anyway. */
2057 if (gimple_store_p (stmt))
2058 return p;
2059
2060 is_load = gimple_assign_load_p (stmt);
2061
2062 /* Loads can be optimized when the value is known. */
2063 if (is_load)
2064 {
2065 tree op;
2066 gcc_assert (gimple_assign_single_p (stmt));
2067 op = gimple_assign_rhs1 (stmt);
2068 if (!unmodified_parm_or_parm_agg_item (fbi, stmt, op, &base_index,
2069 &aggpos))
2070 return p;
2071 }
2072 else
2073 base_index = -1;
2074
2075 /* See if we understand all operands before we start
2076 adding conditionals. */
2077 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2078 {
2079 tree parm = unmodified_parm (stmt, use);
2080 /* For arguments we can build a condition. */
2081 if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0)
2082 continue;
2083 if (TREE_CODE (use) != SSA_NAME)
2084 return p;
2085 /* If we know when operand is constant,
2086 we still can say something useful. */
2087 if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)]))
2088 continue;
2089 return p;
2090 }
2091
2092 if (is_load)
2093 op_non_const =
2094 add_condition (summary, base_index, &aggpos, CHANGED, NULL);
2095 else
2096 op_non_const = false_predicate ();
2097 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2098 {
2099 tree parm = unmodified_parm (stmt, use);
2100 int index;
2101
2102 if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
2103 {
2104 if (index != base_index)
2105 p = add_condition (summary, index, NULL, CHANGED, NULL_TREE);
2106 else
2107 continue;
2108 }
2109 else
2110 p = nonconstant_names[SSA_NAME_VERSION (use)];
2111 op_non_const = or_predicates (summary->conds, &p, &op_non_const);
2112 }
2113 if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
2114 && gimple_op (stmt, 0)
2115 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2116 nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
2117 = op_non_const;
2118 return op_non_const;
2119 }
2120
2121 struct record_modified_bb_info
2122 {
2123 bitmap bb_set;
2124 gimple stmt;
2125 };
2126
2127 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
2128 set except for info->stmt. */
2129
2130 static bool
2131 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
2132 {
2133 struct record_modified_bb_info *info =
2134 (struct record_modified_bb_info *) data;
2135 if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
2136 return false;
2137 bitmap_set_bit (info->bb_set,
2138 SSA_NAME_IS_DEFAULT_DEF (vdef)
2139 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
2140 : gimple_bb (SSA_NAME_DEF_STMT (vdef))->index);
2141 return false;
2142 }
2143
2144 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
2145 will change since last invocation of STMT.
2146
2147 Value 0 is reserved for compile time invariants.
2148 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
2149 ought to be REG_BR_PROB_BASE / estimated_iters. */
2150
2151 static int
2152 param_change_prob (gimple stmt, int i)
2153 {
2154 tree op = gimple_call_arg (stmt, i);
2155 basic_block bb = gimple_bb (stmt);
2156 tree base;
2157
2158 /* Global invariants neve change. */
2159 if (is_gimple_min_invariant (op))
2160 return 0;
2161 /* We would have to do non-trivial analysis to really work out what
2162 is the probability of value to change (i.e. when init statement
2163 is in a sibling loop of the call).
2164
2165 We do an conservative estimate: when call is executed N times more often
2166 than the statement defining value, we take the frequency 1/N. */
2167 if (TREE_CODE (op) == SSA_NAME)
2168 {
2169 int init_freq;
2170
2171 if (!bb->frequency)
2172 return REG_BR_PROB_BASE;
2173
2174 if (SSA_NAME_IS_DEFAULT_DEF (op))
2175 init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2176 else
2177 init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency;
2178
2179 if (!init_freq)
2180 init_freq = 1;
2181 if (init_freq < bb->frequency)
2182 return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1);
2183 else
2184 return REG_BR_PROB_BASE;
2185 }
2186
2187 base = get_base_address (op);
2188 if (base)
2189 {
2190 ao_ref refd;
2191 int max;
2192 struct record_modified_bb_info info;
2193 bitmap_iterator bi;
2194 unsigned index;
2195 tree init = ctor_for_folding (base);
2196
2197 if (init != error_mark_node)
2198 return 0;
2199 if (!bb->frequency)
2200 return REG_BR_PROB_BASE;
2201 ao_ref_init (&refd, op);
2202 info.stmt = stmt;
2203 info.bb_set = BITMAP_ALLOC (NULL);
2204 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
2205 NULL);
2206 if (bitmap_bit_p (info.bb_set, bb->index))
2207 {
2208 BITMAP_FREE (info.bb_set);
2209 return REG_BR_PROB_BASE;
2210 }
2211
2212 /* Assume that every memory is initialized at entry.
2213 TODO: Can we easilly determine if value is always defined
2214 and thus we may skip entry block? */
2215 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
2216 max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2217 else
2218 max = 1;
2219
2220 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
2221 max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
2222
2223 BITMAP_FREE (info.bb_set);
2224 if (max < bb->frequency)
2225 return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1);
2226 else
2227 return REG_BR_PROB_BASE;
2228 }
2229 return REG_BR_PROB_BASE;
2230 }
2231
2232 /* Find whether a basic block BB is the final block of a (half) diamond CFG
2233 sub-graph and if the predicate the condition depends on is known. If so,
2234 return true and store the pointer the predicate in *P. */
2235
2236 static bool
2237 phi_result_unknown_predicate (struct ipa_node_params *info,
2238 inline_summary *summary, basic_block bb,
2239 struct predicate *p,
2240 vec<predicate_t> nonconstant_names)
2241 {
2242 edge e;
2243 edge_iterator ei;
2244 basic_block first_bb = NULL;
2245 gimple stmt;
2246
2247 if (single_pred_p (bb))
2248 {
2249 *p = false_predicate ();
2250 return true;
2251 }
2252
2253 FOR_EACH_EDGE (e, ei, bb->preds)
2254 {
2255 if (single_succ_p (e->src))
2256 {
2257 if (!single_pred_p (e->src))
2258 return false;
2259 if (!first_bb)
2260 first_bb = single_pred (e->src);
2261 else if (single_pred (e->src) != first_bb)
2262 return false;
2263 }
2264 else
2265 {
2266 if (!first_bb)
2267 first_bb = e->src;
2268 else if (e->src != first_bb)
2269 return false;
2270 }
2271 }
2272
2273 if (!first_bb)
2274 return false;
2275
2276 stmt = last_stmt (first_bb);
2277 if (!stmt
2278 || gimple_code (stmt) != GIMPLE_COND
2279 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
2280 return false;
2281
2282 *p = will_be_nonconstant_expr_predicate (info, summary,
2283 gimple_cond_lhs (stmt),
2284 nonconstant_names);
2285 if (true_predicate_p (p))
2286 return false;
2287 else
2288 return true;
2289 }
2290
2291 /* Given a PHI statement in a function described by inline properties SUMMARY
2292 and *P being the predicate describing whether the selected PHI argument is
2293 known, store a predicate for the result of the PHI statement into
2294 NONCONSTANT_NAMES, if possible. */
2295
2296 static void
2297 predicate_for_phi_result (struct inline_summary *summary, gphi *phi,
2298 struct predicate *p,
2299 vec<predicate_t> nonconstant_names)
2300 {
2301 unsigned i;
2302
2303 for (i = 0; i < gimple_phi_num_args (phi); i++)
2304 {
2305 tree arg = gimple_phi_arg (phi, i)->def;
2306 if (!is_gimple_min_invariant (arg))
2307 {
2308 gcc_assert (TREE_CODE (arg) == SSA_NAME);
2309 *p = or_predicates (summary->conds, p,
2310 &nonconstant_names[SSA_NAME_VERSION (arg)]);
2311 if (true_predicate_p (p))
2312 return;
2313 }
2314 }
2315
2316 if (dump_file && (dump_flags & TDF_DETAILS))
2317 {
2318 fprintf (dump_file, "\t\tphi predicate: ");
2319 dump_predicate (dump_file, summary->conds, p);
2320 }
2321 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
2322 }
2323
2324 /* Return predicate specifying when array index in access OP becomes non-constant. */
2325
2326 static struct predicate
2327 array_index_predicate (inline_summary *info,
2328 vec< predicate_t> nonconstant_names, tree op)
2329 {
2330 struct predicate p = false_predicate ();
2331 while (handled_component_p (op))
2332 {
2333 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
2334 {
2335 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
2336 p = or_predicates (info->conds, &p,
2337 &nonconstant_names[SSA_NAME_VERSION
2338 (TREE_OPERAND (op, 1))]);
2339 }
2340 op = TREE_OPERAND (op, 0);
2341 }
2342 return p;
2343 }
2344
2345 /* For a typical usage of __builtin_expect (a<b, 1), we
2346 may introduce an extra relation stmt:
2347 With the builtin, we have
2348 t1 = a <= b;
2349 t2 = (long int) t1;
2350 t3 = __builtin_expect (t2, 1);
2351 if (t3 != 0)
2352 goto ...
2353 Without the builtin, we have
2354 if (a<=b)
2355 goto...
2356 This affects the size/time estimation and may have
2357 an impact on the earlier inlining.
2358 Here find this pattern and fix it up later. */
2359
2360 static gimple
2361 find_foldable_builtin_expect (basic_block bb)
2362 {
2363 gimple_stmt_iterator bsi;
2364
2365 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2366 {
2367 gimple stmt = gsi_stmt (bsi);
2368 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
2369 || (is_gimple_call (stmt)
2370 && gimple_call_internal_p (stmt)
2371 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
2372 {
2373 tree var = gimple_call_lhs (stmt);
2374 tree arg = gimple_call_arg (stmt, 0);
2375 use_operand_p use_p;
2376 gimple use_stmt;
2377 bool match = false;
2378 bool done = false;
2379
2380 if (!var || !arg)
2381 continue;
2382 gcc_assert (TREE_CODE (var) == SSA_NAME);
2383
2384 while (TREE_CODE (arg) == SSA_NAME)
2385 {
2386 gimple stmt_tmp = SSA_NAME_DEF_STMT (arg);
2387 if (!is_gimple_assign (stmt_tmp))
2388 break;
2389 switch (gimple_assign_rhs_code (stmt_tmp))
2390 {
2391 case LT_EXPR:
2392 case LE_EXPR:
2393 case GT_EXPR:
2394 case GE_EXPR:
2395 case EQ_EXPR:
2396 case NE_EXPR:
2397 match = true;
2398 done = true;
2399 break;
2400 CASE_CONVERT:
2401 break;
2402 default:
2403 done = true;
2404 break;
2405 }
2406 if (done)
2407 break;
2408 arg = gimple_assign_rhs1 (stmt_tmp);
2409 }
2410
2411 if (match && single_imm_use (var, &use_p, &use_stmt)
2412 && gimple_code (use_stmt) == GIMPLE_COND)
2413 return use_stmt;
2414 }
2415 }
2416 return NULL;
2417 }
2418
2419 /* Return true when the basic blocks contains only clobbers followed by RESX.
2420 Such BBs are kept around to make removal of dead stores possible with
2421 presence of EH and will be optimized out by optimize_clobbers later in the
2422 game.
2423
2424 NEED_EH is used to recurse in case the clobber has non-EH predecestors
2425 that can be clobber only, too.. When it is false, the RESX is not necessary
2426 on the end of basic block. */
2427
2428 static bool
2429 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
2430 {
2431 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2432 edge_iterator ei;
2433 edge e;
2434
2435 if (need_eh)
2436 {
2437 if (gsi_end_p (gsi))
2438 return false;
2439 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
2440 return false;
2441 gsi_prev (&gsi);
2442 }
2443 else if (!single_succ_p (bb))
2444 return false;
2445
2446 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
2447 {
2448 gimple stmt = gsi_stmt (gsi);
2449 if (is_gimple_debug (stmt))
2450 continue;
2451 if (gimple_clobber_p (stmt))
2452 continue;
2453 if (gimple_code (stmt) == GIMPLE_LABEL)
2454 break;
2455 return false;
2456 }
2457
2458 /* See if all predecestors are either throws or clobber only BBs. */
2459 FOR_EACH_EDGE (e, ei, bb->preds)
2460 if (!(e->flags & EDGE_EH)
2461 && !clobber_only_eh_bb_p (e->src, false))
2462 return false;
2463
2464 return true;
2465 }
2466
2467 /* Compute function body size parameters for NODE.
2468 When EARLY is true, we compute only simple summaries without
2469 non-trivial predicates to drive the early inliner. */
2470
2471 static void
2472 estimate_function_body_sizes (struct cgraph_node *node, bool early)
2473 {
2474 gcov_type time = 0;
2475 /* Estimate static overhead for function prologue/epilogue and alignment. */
2476 int size = 2;
2477 /* Benefits are scaled by probability of elimination that is in range
2478 <0,2>. */
2479 basic_block bb;
2480 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
2481 int freq;
2482 struct inline_summary *info = inline_summaries->get (node);
2483 struct predicate bb_predicate;
2484 struct func_body_info fbi;
2485 vec<predicate_t> nonconstant_names = vNULL;
2486 int nblocks, n;
2487 int *order;
2488 predicate array_index = true_predicate ();
2489 gimple fix_builtin_expect_stmt;
2490
2491 gcc_assert (my_function && my_function->cfg);
2492 gcc_assert (cfun == my_function);
2493
2494 memset(&fbi, 0, sizeof(fbi));
2495 info->conds = NULL;
2496 info->entry = NULL;
2497
2498 /* When optimizing and analyzing for IPA inliner, initialize loop optimizer
2499 so we can produce proper inline hints.
2500
2501 When optimizing and analyzing for early inliner, initialize node params
2502 so we can produce correct BB predicates. */
2503
2504 if (opt_for_fn (node->decl, optimize))
2505 {
2506 calculate_dominance_info (CDI_DOMINATORS);
2507 if (!early)
2508 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2509 else
2510 {
2511 ipa_check_create_node_params ();
2512 ipa_initialize_node_params (node);
2513 }
2514
2515 if (ipa_node_params_sum)
2516 {
2517 fbi.node = node;
2518 fbi.info = IPA_NODE_REF (node);
2519 fbi.bb_infos = vNULL;
2520 fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
2521 fbi.param_count = count_formal_params(node->decl);
2522 nonconstant_names.safe_grow_cleared
2523 (SSANAMES (my_function)->length ());
2524 }
2525 }
2526
2527 if (dump_file)
2528 fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2529 node->name ());
2530
2531 /* When we run into maximal number of entries, we assign everything to the
2532 constant truth case. Be sure to have it in list. */
2533 bb_predicate = true_predicate ();
2534 account_size_time (info, 0, 0, &bb_predicate);
2535
2536 bb_predicate = not_inlined_predicate ();
2537 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate);
2538
2539 if (fbi.info)
2540 compute_bb_predicates (&fbi, node, info);
2541 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2542 nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2543 for (n = 0; n < nblocks; n++)
2544 {
2545 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2546 freq = compute_call_stmt_bb_frequency (node->decl, bb);
2547 if (clobber_only_eh_bb_p (bb))
2548 {
2549 if (dump_file && (dump_flags & TDF_DETAILS))
2550 fprintf (dump_file, "\n Ignoring BB %i;"
2551 " it will be optimized away by cleanup_clobbers\n",
2552 bb->index);
2553 continue;
2554 }
2555
2556 /* TODO: Obviously predicates can be propagated down across CFG. */
2557 if (fbi.info)
2558 {
2559 if (bb->aux)
2560 bb_predicate = *(struct predicate *) bb->aux;
2561 else
2562 bb_predicate = false_predicate ();
2563 }
2564 else
2565 bb_predicate = true_predicate ();
2566
2567 if (dump_file && (dump_flags & TDF_DETAILS))
2568 {
2569 fprintf (dump_file, "\n BB %i predicate:", bb->index);
2570 dump_predicate (dump_file, info->conds, &bb_predicate);
2571 }
2572
2573 if (fbi.info && nonconstant_names.exists ())
2574 {
2575 struct predicate phi_predicate;
2576 bool first_phi = true;
2577
2578 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2579 gsi_next (&bsi))
2580 {
2581 if (first_phi
2582 && !phi_result_unknown_predicate (fbi.info, info, bb,
2583 &phi_predicate,
2584 nonconstant_names))
2585 break;
2586 first_phi = false;
2587 if (dump_file && (dump_flags & TDF_DETAILS))
2588 {
2589 fprintf (dump_file, " ");
2590 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
2591 }
2592 predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2593 nonconstant_names);
2594 }
2595 }
2596
2597 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2598
2599 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2600 gsi_next (&bsi))
2601 {
2602 gimple stmt = gsi_stmt (bsi);
2603 int this_size = estimate_num_insns (stmt, &eni_size_weights);
2604 int this_time = estimate_num_insns (stmt, &eni_time_weights);
2605 int prob;
2606 struct predicate will_be_nonconstant;
2607
2608 /* This relation stmt should be folded after we remove
2609 buildin_expect call. Adjust the cost here. */
2610 if (stmt == fix_builtin_expect_stmt)
2611 {
2612 this_size--;
2613 this_time--;
2614 }
2615
2616 if (dump_file && (dump_flags & TDF_DETAILS))
2617 {
2618 fprintf (dump_file, " ");
2619 print_gimple_stmt (dump_file, stmt, 0, 0);
2620 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2621 ((double) freq) / CGRAPH_FREQ_BASE, this_size,
2622 this_time);
2623 }
2624
2625 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2626 {
2627 struct predicate this_array_index;
2628 this_array_index =
2629 array_index_predicate (info, nonconstant_names,
2630 gimple_assign_rhs1 (stmt));
2631 if (!false_predicate_p (&this_array_index))
2632 array_index =
2633 and_predicates (info->conds, &array_index,
2634 &this_array_index);
2635 }
2636 if (gimple_store_p (stmt) && nonconstant_names.exists ())
2637 {
2638 struct predicate this_array_index;
2639 this_array_index =
2640 array_index_predicate (info, nonconstant_names,
2641 gimple_get_lhs (stmt));
2642 if (!false_predicate_p (&this_array_index))
2643 array_index =
2644 and_predicates (info->conds, &array_index,
2645 &this_array_index);
2646 }
2647
2648
2649 if (is_gimple_call (stmt)
2650 && !gimple_call_internal_p (stmt))
2651 {
2652 struct cgraph_edge *edge = node->get_edge (stmt);
2653 struct inline_edge_summary *es = inline_edge_summary (edge);
2654
2655 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2656 resolved as constant. We however don't want to optimize
2657 out the cgraph edges. */
2658 if (nonconstant_names.exists ()
2659 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2660 && gimple_call_lhs (stmt)
2661 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2662 {
2663 struct predicate false_p = false_predicate ();
2664 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2665 = false_p;
2666 }
2667 if (ipa_node_params_sum)
2668 {
2669 int count = gimple_call_num_args (stmt);
2670 int i;
2671
2672 if (count)
2673 es->param.safe_grow_cleared (count);
2674 for (i = 0; i < count; i++)
2675 {
2676 int prob = param_change_prob (stmt, i);
2677 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2678 es->param[i].change_prob = prob;
2679 }
2680 }
2681
2682 es->call_stmt_size = this_size;
2683 es->call_stmt_time = this_time;
2684 es->loop_depth = bb_loop_depth (bb);
2685 edge_set_predicate (edge, &bb_predicate);
2686 }
2687
2688 /* TODO: When conditional jump or swithc is known to be constant, but
2689 we did not translate it into the predicates, we really can account
2690 just maximum of the possible paths. */
2691 if (fbi.info)
2692 will_be_nonconstant
2693 = will_be_nonconstant_predicate (&fbi, info,
2694 stmt, nonconstant_names);
2695 if (this_time || this_size)
2696 {
2697 struct predicate p;
2698
2699 this_time *= freq;
2700
2701 prob = eliminated_by_inlining_prob (stmt);
2702 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2703 fprintf (dump_file,
2704 "\t\t50%% will be eliminated by inlining\n");
2705 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2706 fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2707
2708 if (fbi.info)
2709 p = and_predicates (info->conds, &bb_predicate,
2710 &will_be_nonconstant);
2711 else
2712 p = true_predicate ();
2713
2714 if (!false_predicate_p (&p)
2715 || (is_gimple_call (stmt)
2716 && !false_predicate_p (&bb_predicate)))
2717 {
2718 time += this_time;
2719 size += this_size;
2720 if (time > MAX_TIME * INLINE_TIME_SCALE)
2721 time = MAX_TIME * INLINE_TIME_SCALE;
2722 }
2723
2724 /* We account everything but the calls. Calls have their own
2725 size/time info attached to cgraph edges. This is necessary
2726 in order to make the cost disappear after inlining. */
2727 if (!is_gimple_call (stmt))
2728 {
2729 if (prob)
2730 {
2731 struct predicate ip = not_inlined_predicate ();
2732 ip = and_predicates (info->conds, &ip, &p);
2733 account_size_time (info, this_size * prob,
2734 this_time * prob, &ip);
2735 }
2736 if (prob != 2)
2737 account_size_time (info, this_size * (2 - prob),
2738 this_time * (2 - prob), &p);
2739 }
2740
2741 gcc_assert (time >= 0);
2742 gcc_assert (size >= 0);
2743 }
2744 }
2745 }
2746 set_hint_predicate (&inline_summaries->get (node)->array_index, array_index);
2747 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
2748 if (time > MAX_TIME)
2749 time = MAX_TIME;
2750 free (order);
2751
2752 if (nonconstant_names.exists () && !early)
2753 {
2754 struct loop *loop;
2755 predicate loop_iterations = true_predicate ();
2756 predicate loop_stride = true_predicate ();
2757
2758 if (dump_file && (dump_flags & TDF_DETAILS))
2759 flow_loops_dump (dump_file, NULL, 0);
2760 scev_initialize ();
2761 FOR_EACH_LOOP (loop, 0)
2762 {
2763 vec<edge> exits;
2764 edge ex;
2765 unsigned int j, i;
2766 struct tree_niter_desc niter_desc;
2767 basic_block *body = get_loop_body (loop);
2768 bb_predicate = *(struct predicate *) loop->header->aux;
2769
2770 exits = get_loop_exit_edges (loop);
2771 FOR_EACH_VEC_ELT (exits, j, ex)
2772 if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2773 && !is_gimple_min_invariant (niter_desc.niter))
2774 {
2775 predicate will_be_nonconstant
2776 = will_be_nonconstant_expr_predicate (fbi.info, info,
2777 niter_desc.niter,
2778 nonconstant_names);
2779 if (!true_predicate_p (&will_be_nonconstant))
2780 will_be_nonconstant = and_predicates (info->conds,
2781 &bb_predicate,
2782 &will_be_nonconstant);
2783 if (!true_predicate_p (&will_be_nonconstant)
2784 && !false_predicate_p (&will_be_nonconstant))
2785 /* This is slightly inprecise. We may want to represent each
2786 loop with independent predicate. */
2787 loop_iterations =
2788 and_predicates (info->conds, &loop_iterations,
2789 &will_be_nonconstant);
2790 }
2791 exits.release ();
2792
2793 for (i = 0; i < loop->num_nodes; i++)
2794 {
2795 gimple_stmt_iterator gsi;
2796 bb_predicate = *(struct predicate *) body[i]->aux;
2797 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
2798 gsi_next (&gsi))
2799 {
2800 gimple stmt = gsi_stmt (gsi);
2801 affine_iv iv;
2802 ssa_op_iter iter;
2803 tree use;
2804
2805 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2806 {
2807 predicate will_be_nonconstant;
2808
2809 if (!simple_iv
2810 (loop, loop_containing_stmt (stmt), use, &iv, true)
2811 || is_gimple_min_invariant (iv.step))
2812 continue;
2813 will_be_nonconstant
2814 = will_be_nonconstant_expr_predicate (fbi.info, info,
2815 iv.step,
2816 nonconstant_names);
2817 if (!true_predicate_p (&will_be_nonconstant))
2818 will_be_nonconstant
2819 = and_predicates (info->conds,
2820 &bb_predicate,
2821 &will_be_nonconstant);
2822 if (!true_predicate_p (&will_be_nonconstant)
2823 && !false_predicate_p (&will_be_nonconstant))
2824 /* This is slightly inprecise. We may want to represent
2825 each loop with independent predicate. */
2826 loop_stride =
2827 and_predicates (info->conds, &loop_stride,
2828 &will_be_nonconstant);
2829 }
2830 }
2831 }
2832 free (body);
2833 }
2834 set_hint_predicate (&inline_summaries->get (node)->loop_iterations,
2835 loop_iterations);
2836 set_hint_predicate (&inline_summaries->get (node)->loop_stride, loop_stride);
2837 scev_finalize ();
2838 }
2839 FOR_ALL_BB_FN (bb, my_function)
2840 {
2841 edge e;
2842 edge_iterator ei;
2843
2844 if (bb->aux)
2845 edge_predicate_pool.remove ((predicate *)bb->aux);
2846 bb->aux = NULL;
2847 FOR_EACH_EDGE (e, ei, bb->succs)
2848 {
2849 if (e->aux)
2850 edge_predicate_pool.remove ((predicate *) e->aux);
2851 e->aux = NULL;
2852 }
2853 }
2854 inline_summaries->get (node)->self_time = time;
2855 inline_summaries->get (node)->self_size = size;
2856 nonconstant_names.release ();
2857 if (opt_for_fn (node->decl, optimize))
2858 {
2859 if (!early)
2860 loop_optimizer_finalize ();
2861 else if (!ipa_edge_args_vector)
2862 ipa_free_all_node_params ();
2863 free_dominance_info (CDI_DOMINATORS);
2864 }
2865 if (dump_file)
2866 {
2867 fprintf (dump_file, "\n");
2868 dump_inline_summary (dump_file, node);
2869 }
2870 }
2871
2872
2873 /* Compute parameters of functions used by inliner.
2874 EARLY is true when we compute parameters for the early inliner */
2875
2876 void
2877 compute_inline_parameters (struct cgraph_node *node, bool early)
2878 {
2879 HOST_WIDE_INT self_stack_size;
2880 struct cgraph_edge *e;
2881 struct inline_summary *info;
2882
2883 gcc_assert (!node->global.inlined_to);
2884
2885 inline_summary_alloc ();
2886
2887 info = inline_summaries->get (node);
2888 reset_inline_summary (node, info);
2889
2890 /* FIXME: Thunks are inlinable, but tree-inline don't know how to do that.
2891 Once this happen, we will need to more curefully predict call
2892 statement size. */
2893 if (node->thunk.thunk_p)
2894 {
2895 struct inline_edge_summary *es = inline_edge_summary (node->callees);
2896 struct predicate t = true_predicate ();
2897
2898 info->inlinable = 0;
2899 node->callees->call_stmt_cannot_inline_p = true;
2900 node->local.can_change_signature = false;
2901 es->call_stmt_time = 1;
2902 es->call_stmt_size = 1;
2903 account_size_time (info, 0, 0, &t);
2904 return;
2905 }
2906
2907 /* Even is_gimple_min_invariant rely on current_function_decl. */
2908 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2909
2910 /* Estimate the stack size for the function if we're optimizing. */
2911 self_stack_size = optimize ? estimated_stack_frame_size (node) : 0;
2912 info->estimated_self_stack_size = self_stack_size;
2913 info->estimated_stack_size = self_stack_size;
2914 info->stack_frame_offset = 0;
2915
2916 /* Can this function be inlined at all? */
2917 if (!opt_for_fn (node->decl, optimize)
2918 && !lookup_attribute ("always_inline",
2919 DECL_ATTRIBUTES (node->decl)))
2920 info->inlinable = false;
2921 else
2922 info->inlinable = tree_inlinable_function_p (node->decl);
2923
2924 info->contains_cilk_spawn = fn_contains_cilk_spawn_p (cfun);
2925
2926 /* Type attributes can use parameter indices to describe them. */
2927 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
2928 node->local.can_change_signature = false;
2929 else
2930 {
2931 /* Otherwise, inlinable functions always can change signature. */
2932 if (info->inlinable)
2933 node->local.can_change_signature = true;
2934 else
2935 {
2936 /* Functions calling builtin_apply can not change signature. */
2937 for (e = node->callees; e; e = e->next_callee)
2938 {
2939 tree cdecl = e->callee->decl;
2940 if (DECL_BUILT_IN (cdecl)
2941 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
2942 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
2943 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
2944 break;
2945 }
2946 node->local.can_change_signature = !e;
2947 }
2948 }
2949 estimate_function_body_sizes (node, early);
2950
2951 for (e = node->callees; e; e = e->next_callee)
2952 if (e->callee->comdat_local_p ())
2953 break;
2954 node->calls_comdat_local = (e != NULL);
2955
2956 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2957 info->time = info->self_time;
2958 info->size = info->self_size;
2959 info->stack_frame_offset = 0;
2960 info->estimated_stack_size = info->estimated_self_stack_size;
2961 #ifdef ENABLE_CHECKING
2962 inline_update_overall_summary (node);
2963 gcc_assert (info->time == info->self_time && info->size == info->self_size);
2964 #endif
2965
2966 pop_cfun ();
2967 }
2968
2969
2970 /* Compute parameters of functions used by inliner using
2971 current_function_decl. */
2972
2973 static unsigned int
2974 compute_inline_parameters_for_current (void)
2975 {
2976 compute_inline_parameters (cgraph_node::get (current_function_decl), true);
2977 return 0;
2978 }
2979
2980 namespace {
2981
2982 const pass_data pass_data_inline_parameters =
2983 {
2984 GIMPLE_PASS, /* type */
2985 "inline_param", /* name */
2986 OPTGROUP_INLINE, /* optinfo_flags */
2987 TV_INLINE_PARAMETERS, /* tv_id */
2988 0, /* properties_required */
2989 0, /* properties_provided */
2990 0, /* properties_destroyed */
2991 0, /* todo_flags_start */
2992 0, /* todo_flags_finish */
2993 };
2994
2995 class pass_inline_parameters : public gimple_opt_pass
2996 {
2997 public:
2998 pass_inline_parameters (gcc::context *ctxt)
2999 : gimple_opt_pass (pass_data_inline_parameters, ctxt)
3000 {}
3001
3002 /* opt_pass methods: */
3003 opt_pass * clone () { return new pass_inline_parameters (m_ctxt); }
3004 virtual unsigned int execute (function *)
3005 {
3006 return compute_inline_parameters_for_current ();
3007 }
3008
3009 }; // class pass_inline_parameters
3010
3011 } // anon namespace
3012
3013 gimple_opt_pass *
3014 make_pass_inline_parameters (gcc::context *ctxt)
3015 {
3016 return new pass_inline_parameters (ctxt);
3017 }
3018
3019
3020 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
3021 KNOWN_CONTEXTS and KNOWN_AGGS. */
3022
3023 static bool
3024 estimate_edge_devirt_benefit (struct cgraph_edge *ie,
3025 int *size, int *time,
3026 vec<tree> known_vals,
3027 vec<ipa_polymorphic_call_context> known_contexts,
3028 vec<ipa_agg_jump_function_p> known_aggs)
3029 {
3030 tree target;
3031 struct cgraph_node *callee;
3032 struct inline_summary *isummary;
3033 enum availability avail;
3034 bool speculative;
3035
3036 if (!known_vals.exists () && !known_contexts.exists ())
3037 return false;
3038 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
3039 return false;
3040
3041 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
3042 known_aggs, &speculative);
3043 if (!target || speculative)
3044 return false;
3045
3046 /* Account for difference in cost between indirect and direct calls. */
3047 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
3048 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
3049 gcc_checking_assert (*time >= 0);
3050 gcc_checking_assert (*size >= 0);
3051
3052 callee = cgraph_node::get (target);
3053 if (!callee || !callee->definition)
3054 return false;
3055 callee = callee->function_symbol (&avail);
3056 if (avail < AVAIL_AVAILABLE)
3057 return false;
3058 isummary = inline_summaries->get (callee);
3059 return isummary->inlinable;
3060 }
3061
3062 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
3063 handle edge E with probability PROB.
3064 Set HINTS if edge may be devirtualized.
3065 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
3066 site. */
3067
3068 static inline void
3069 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
3070 int *time,
3071 int prob,
3072 vec<tree> known_vals,
3073 vec<ipa_polymorphic_call_context> known_contexts,
3074 vec<ipa_agg_jump_function_p> known_aggs,
3075 inline_hints *hints)
3076 {
3077 struct inline_edge_summary *es = inline_edge_summary (e);
3078 int call_size = es->call_stmt_size;
3079 int call_time = es->call_stmt_time;
3080 int cur_size;
3081 if (!e->callee
3082 && estimate_edge_devirt_benefit (e, &call_size, &call_time,
3083 known_vals, known_contexts, known_aggs)
3084 && hints && e->maybe_hot_p ())
3085 *hints |= INLINE_HINT_indirect_call;
3086 cur_size = call_size * INLINE_SIZE_SCALE;
3087 *size += cur_size;
3088 if (min_size)
3089 *min_size += cur_size;
3090 *time += apply_probability ((gcov_type) call_time, prob)
3091 * e->frequency * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE);
3092 if (*time > MAX_TIME * INLINE_TIME_SCALE)
3093 *time = MAX_TIME * INLINE_TIME_SCALE;
3094 }
3095
3096
3097
3098 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
3099 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3100 describe context of the call site. */
3101
3102 static void
3103 estimate_calls_size_and_time (struct cgraph_node *node, int *size,
3104 int *min_size, int *time,
3105 inline_hints *hints,
3106 clause_t possible_truths,
3107 vec<tree> known_vals,
3108 vec<ipa_polymorphic_call_context> known_contexts,
3109 vec<ipa_agg_jump_function_p> known_aggs)
3110 {
3111 struct cgraph_edge *e;
3112 for (e = node->callees; e; e = e->next_callee)
3113 {
3114 struct inline_edge_summary *es = inline_edge_summary (e);
3115
3116 /* Do not care about zero sized builtins. */
3117 if (e->inline_failed && !es->call_stmt_size)
3118 {
3119 gcc_checking_assert (!es->call_stmt_time);
3120 continue;
3121 }
3122 if (!es->predicate
3123 || evaluate_predicate (es->predicate, possible_truths))
3124 {
3125 if (e->inline_failed)
3126 {
3127 /* Predicates of calls shall not use NOT_CHANGED codes,
3128 sowe do not need to compute probabilities. */
3129 estimate_edge_size_and_time (e, size,
3130 es->predicate ? NULL : min_size,
3131 time, REG_BR_PROB_BASE,
3132 known_vals, known_contexts,
3133 known_aggs, hints);
3134 }
3135 else
3136 estimate_calls_size_and_time (e->callee, size, min_size, time,
3137 hints,
3138 possible_truths,
3139 known_vals, known_contexts,
3140 known_aggs);
3141 }
3142 }
3143 for (e = node->indirect_calls; e; e = e->next_callee)
3144 {
3145 struct inline_edge_summary *es = inline_edge_summary (e);
3146 if (!es->predicate
3147 || evaluate_predicate (es->predicate, possible_truths))
3148 estimate_edge_size_and_time (e, size,
3149 es->predicate ? NULL : min_size,
3150 time, REG_BR_PROB_BASE,
3151 known_vals, known_contexts, known_aggs,
3152 hints);
3153 }
3154 }
3155
3156
3157 /* Estimate size and time needed to execute NODE assuming
3158 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3159 information about NODE's arguments. If non-NULL use also probability
3160 information present in INLINE_PARAM_SUMMARY vector.
3161 Additionally detemine hints determined by the context. Finally compute
3162 minimal size needed for the call that is independent on the call context and
3163 can be used for fast estimates. Return the values in RET_SIZE,
3164 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
3165
3166 static void
3167 estimate_node_size_and_time (struct cgraph_node *node,
3168 clause_t possible_truths,
3169 vec<tree> known_vals,
3170 vec<ipa_polymorphic_call_context> known_contexts,
3171 vec<ipa_agg_jump_function_p> known_aggs,
3172 int *ret_size, int *ret_min_size, int *ret_time,
3173 inline_hints *ret_hints,
3174 vec<inline_param_summary>
3175 inline_param_summary)
3176 {
3177 struct inline_summary *info = inline_summaries->get (node);
3178 size_time_entry *e;
3179 int size = 0;
3180 int time = 0;
3181 int min_size = 0;
3182 inline_hints hints = 0;
3183 int i;
3184
3185 if (dump_file && (dump_flags & TDF_DETAILS))
3186 {
3187 bool found = false;
3188 fprintf (dump_file, " Estimating body: %s/%i\n"
3189 " Known to be false: ", node->name (),
3190 node->order);
3191
3192 for (i = predicate_not_inlined_condition;
3193 i < (predicate_first_dynamic_condition
3194 + (int) vec_safe_length (info->conds)); i++)
3195 if (!(possible_truths & (1 << i)))
3196 {
3197 if (found)
3198 fprintf (dump_file, ", ");
3199 found = true;
3200 dump_condition (dump_file, info->conds, i);
3201 }
3202 }
3203
3204 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3205 if (evaluate_predicate (&e->predicate, possible_truths))
3206 {
3207 size += e->size;
3208 gcc_checking_assert (e->time >= 0);
3209 gcc_checking_assert (time >= 0);
3210 if (!inline_param_summary.exists ())
3211 time += e->time;
3212 else
3213 {
3214 int prob = predicate_probability (info->conds,
3215 &e->predicate,
3216 possible_truths,
3217 inline_param_summary);
3218 gcc_checking_assert (prob >= 0);
3219 gcc_checking_assert (prob <= REG_BR_PROB_BASE);
3220 time += apply_probability ((gcov_type) e->time, prob);
3221 }
3222 if (time > MAX_TIME * INLINE_TIME_SCALE)
3223 time = MAX_TIME * INLINE_TIME_SCALE;
3224 gcc_checking_assert (time >= 0);
3225
3226 }
3227 gcc_checking_assert (true_predicate_p (&(*info->entry)[0].predicate));
3228 min_size = (*info->entry)[0].size;
3229 gcc_checking_assert (size >= 0);
3230 gcc_checking_assert (time >= 0);
3231
3232 if (info->loop_iterations
3233 && !evaluate_predicate (info->loop_iterations, possible_truths))
3234 hints |= INLINE_HINT_loop_iterations;
3235 if (info->loop_stride
3236 && !evaluate_predicate (info->loop_stride, possible_truths))
3237 hints |= INLINE_HINT_loop_stride;
3238 if (info->array_index
3239 && !evaluate_predicate (info->array_index, possible_truths))
3240 hints |= INLINE_HINT_array_index;
3241 if (info->scc_no)
3242 hints |= INLINE_HINT_in_scc;
3243 if (DECL_DECLARED_INLINE_P (node->decl))
3244 hints |= INLINE_HINT_declared_inline;
3245
3246 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
3247 known_vals, known_contexts, known_aggs);
3248 gcc_checking_assert (size >= 0);
3249 gcc_checking_assert (time >= 0);
3250 time = RDIV (time, INLINE_TIME_SCALE);
3251 size = RDIV (size, INLINE_SIZE_SCALE);
3252 min_size = RDIV (min_size, INLINE_SIZE_SCALE);
3253
3254 if (dump_file && (dump_flags & TDF_DETAILS))
3255 fprintf (dump_file, "\n size:%i time:%i\n", (int) size, (int) time);
3256 if (ret_time)
3257 *ret_time = time;
3258 if (ret_size)
3259 *ret_size = size;
3260 if (ret_min_size)
3261 *ret_min_size = min_size;
3262 if (ret_hints)
3263 *ret_hints = hints;
3264 return;
3265 }
3266
3267
3268 /* Estimate size and time needed to execute callee of EDGE assuming that
3269 parameters known to be constant at caller of EDGE are propagated.
3270 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
3271 and types for parameters. */
3272
3273 void
3274 estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
3275 vec<tree> known_vals,
3276 vec<ipa_polymorphic_call_context>
3277 known_contexts,
3278 vec<ipa_agg_jump_function_p> known_aggs,
3279 int *ret_size, int *ret_time,
3280 inline_hints *hints)
3281 {
3282 clause_t clause;
3283
3284 clause = evaluate_conditions_for_known_args (node, false, known_vals,
3285 known_aggs);
3286 estimate_node_size_and_time (node, clause, known_vals, known_contexts,
3287 known_aggs, ret_size, NULL, ret_time, hints, vNULL);
3288 }
3289
3290 /* Translate all conditions from callee representation into caller
3291 representation and symbolically evaluate predicate P into new predicate.
3292
3293 INFO is inline_summary of function we are adding predicate into, CALLEE_INFO
3294 is summary of function predicate P is from. OPERAND_MAP is array giving
3295 callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all
3296 callee conditions that may be true in caller context. TOPLEV_PREDICATE is
3297 predicate under which callee is executed. OFFSET_MAP is an array of of
3298 offsets that need to be added to conditions, negative offset means that
3299 conditions relying on values passed by reference have to be discarded
3300 because they might not be preserved (and should be considered offset zero
3301 for other purposes). */
3302
3303 static struct predicate
3304 remap_predicate (struct inline_summary *info,
3305 struct inline_summary *callee_info,
3306 struct predicate *p,
3307 vec<int> operand_map,
3308 vec<int> offset_map,
3309 clause_t possible_truths, struct predicate *toplev_predicate)
3310 {
3311 int i;
3312 struct predicate out = true_predicate ();
3313
3314 /* True predicate is easy. */
3315 if (true_predicate_p (p))
3316 return *toplev_predicate;
3317 for (i = 0; p->clause[i]; i++)
3318 {
3319 clause_t clause = p->clause[i];
3320 int cond;
3321 struct predicate clause_predicate = false_predicate ();
3322
3323 gcc_assert (i < MAX_CLAUSES);
3324
3325 for (cond = 0; cond < NUM_CONDITIONS; cond++)
3326 /* Do we have condition we can't disprove? */
3327 if (clause & possible_truths & (1 << cond))
3328 {
3329 struct predicate cond_predicate;
3330 /* Work out if the condition can translate to predicate in the
3331 inlined function. */
3332 if (cond >= predicate_first_dynamic_condition)
3333 {
3334 struct condition *c;
3335
3336 c = &(*callee_info->conds)[cond
3337 -
3338 predicate_first_dynamic_condition];
3339 /* See if we can remap condition operand to caller's operand.
3340 Otherwise give up. */
3341 if (!operand_map.exists ()
3342 || (int) operand_map.length () <= c->operand_num
3343 || operand_map[c->operand_num] == -1
3344 /* TODO: For non-aggregate conditions, adding an offset is
3345 basically an arithmetic jump function processing which
3346 we should support in future. */
3347 || ((!c->agg_contents || !c->by_ref)
3348 && offset_map[c->operand_num] > 0)
3349 || (c->agg_contents && c->by_ref
3350 && offset_map[c->operand_num] < 0))
3351 cond_predicate = true_predicate ();
3352 else
3353 {
3354 struct agg_position_info ap;
3355 HOST_WIDE_INT offset_delta = offset_map[c->operand_num];
3356 if (offset_delta < 0)
3357 {
3358 gcc_checking_assert (!c->agg_contents || !c->by_ref);
3359 offset_delta = 0;
3360 }
3361 gcc_assert (!c->agg_contents
3362 || c->by_ref || offset_delta == 0);
3363 ap.offset = c->offset + offset_delta;
3364 ap.agg_contents = c->agg_contents;
3365 ap.by_ref = c->by_ref;
3366 cond_predicate = add_condition (info,
3367 operand_map[c->operand_num],
3368 &ap, c->code, c->val);
3369 }
3370 }
3371 /* Fixed conditions remains same, construct single
3372 condition predicate. */
3373 else
3374 {
3375 cond_predicate.clause[0] = 1 << cond;
3376 cond_predicate.clause[1] = 0;
3377 }
3378 clause_predicate = or_predicates (info->conds, &clause_predicate,
3379 &cond_predicate);
3380 }
3381 out = and_predicates (info->conds, &out, &clause_predicate);
3382 }
3383 return and_predicates (info->conds, &out, toplev_predicate);
3384 }
3385
3386
3387 /* Update summary information of inline clones after inlining.
3388 Compute peak stack usage. */
3389
3390 static void
3391 inline_update_callee_summaries (struct cgraph_node *node, int depth)
3392 {
3393 struct cgraph_edge *e;
3394 struct inline_summary *callee_info = inline_summaries->get (node);
3395 struct inline_summary *caller_info = inline_summaries->get (node->callers->caller);
3396 HOST_WIDE_INT peak;
3397
3398 callee_info->stack_frame_offset
3399 = caller_info->stack_frame_offset
3400 + caller_info->estimated_self_stack_size;
3401 peak = callee_info->stack_frame_offset
3402 + callee_info->estimated_self_stack_size;
3403 if (inline_summaries->get (node->global.inlined_to)->estimated_stack_size < peak)
3404 inline_summaries->get (node->global.inlined_to)->estimated_stack_size = peak;
3405 ipa_propagate_frequency (node);
3406 for (e = node->callees; e; e = e->next_callee)
3407 {
3408 if (!e->inline_failed)
3409 inline_update_callee_summaries (e->callee, depth);
3410 inline_edge_summary (e)->loop_depth += depth;
3411 }
3412 for (e = node->indirect_calls; e; e = e->next_callee)
3413 inline_edge_summary (e)->loop_depth += depth;
3414 }
3415
3416 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
3417 When functoin A is inlined in B and A calls C with parameter that
3418 changes with probability PROB1 and C is known to be passthroug
3419 of argument if B that change with probability PROB2, the probability
3420 of change is now PROB1*PROB2. */
3421
3422 static void
3423 remap_edge_change_prob (struct cgraph_edge *inlined_edge,
3424 struct cgraph_edge *edge)
3425 {
3426 if (ipa_node_params_sum)
3427 {
3428 int i;
3429 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3430 struct inline_edge_summary *es = inline_edge_summary (edge);
3431 struct inline_edge_summary *inlined_es
3432 = inline_edge_summary (inlined_edge);
3433
3434 for (i = 0; i < ipa_get_cs_argument_count (args); i++)
3435 {
3436 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3437 if (jfunc->type == IPA_JF_PASS_THROUGH
3438 && (ipa_get_jf_pass_through_formal_id (jfunc)
3439 < (int) inlined_es->param.length ()))
3440 {
3441 int jf_formal_id = ipa_get_jf_pass_through_formal_id (jfunc);
3442 int prob1 = es->param[i].change_prob;
3443 int prob2 = inlined_es->param[jf_formal_id].change_prob;
3444 int prob = combine_probabilities (prob1, prob2);
3445
3446 if (prob1 && prob2 && !prob)
3447 prob = 1;
3448
3449 es->param[i].change_prob = prob;
3450 }
3451 }
3452 }
3453 }
3454
3455 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
3456
3457 Remap predicates of callees of NODE. Rest of arguments match
3458 remap_predicate.
3459
3460 Also update change probabilities. */
3461
3462 static void
3463 remap_edge_summaries (struct cgraph_edge *inlined_edge,
3464 struct cgraph_node *node,
3465 struct inline_summary *info,
3466 struct inline_summary *callee_info,
3467 vec<int> operand_map,
3468 vec<int> offset_map,
3469 clause_t possible_truths,
3470 struct predicate *toplev_predicate)
3471 {
3472 struct cgraph_edge *e, *next;
3473 for (e = node->callees; e; e = next)
3474 {
3475 struct inline_edge_summary *es = inline_edge_summary (e);
3476 struct predicate p;
3477 next = e->next_callee;
3478
3479 if (e->inline_failed)
3480 {
3481 remap_edge_change_prob (inlined_edge, e);
3482
3483 if (es->predicate)
3484 {
3485 p = remap_predicate (info, callee_info,
3486 es->predicate, operand_map, offset_map,
3487 possible_truths, toplev_predicate);
3488 edge_set_predicate (e, &p);
3489 }
3490 else
3491 edge_set_predicate (e, toplev_predicate);
3492 }
3493 else
3494 remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
3495 operand_map, offset_map, possible_truths,
3496 toplev_predicate);
3497 }
3498 for (e = node->indirect_calls; e; e = next)
3499 {
3500 struct inline_edge_summary *es = inline_edge_summary (e);
3501 struct predicate p;
3502 next = e->next_callee;
3503
3504 remap_edge_change_prob (inlined_edge, e);
3505 if (es->predicate)
3506 {
3507 p = remap_predicate (info, callee_info,
3508 es->predicate, operand_map, offset_map,
3509 possible_truths, toplev_predicate);
3510 edge_set_predicate (e, &p);
3511 }
3512 else
3513 edge_set_predicate (e, toplev_predicate);
3514 }
3515 }
3516
3517 /* Same as remap_predicate, but set result into hint *HINT. */
3518
3519 static void
3520 remap_hint_predicate (struct inline_summary *info,
3521 struct inline_summary *callee_info,
3522 struct predicate **hint,
3523 vec<int> operand_map,
3524 vec<int> offset_map,
3525 clause_t possible_truths,
3526 struct predicate *toplev_predicate)
3527 {
3528 predicate p;
3529
3530 if (!*hint)
3531 return;
3532 p = remap_predicate (info, callee_info,
3533 *hint,
3534 operand_map, offset_map,
3535 possible_truths, toplev_predicate);
3536 if (!false_predicate_p (&p) && !true_predicate_p (&p))
3537 {
3538 if (!*hint)
3539 set_hint_predicate (hint, p);
3540 else
3541 **hint = and_predicates (info->conds, *hint, &p);
3542 }
3543 }
3544
3545 /* We inlined EDGE. Update summary of the function we inlined into. */
3546
3547 void
3548 inline_merge_summary (struct cgraph_edge *edge)
3549 {
3550 struct inline_summary *callee_info = inline_summaries->get (edge->callee);
3551 struct cgraph_node *to = (edge->caller->global.inlined_to
3552 ? edge->caller->global.inlined_to : edge->caller);
3553 struct inline_summary *info = inline_summaries->get (to);
3554 clause_t clause = 0; /* not_inline is known to be false. */
3555 size_time_entry *e;
3556 vec<int> operand_map = vNULL;
3557 vec<int> offset_map = vNULL;
3558 int i;
3559 struct predicate toplev_predicate;
3560 struct predicate true_p = true_predicate ();
3561 struct inline_edge_summary *es = inline_edge_summary (edge);
3562
3563 if (es->predicate)
3564 toplev_predicate = *es->predicate;
3565 else
3566 toplev_predicate = true_predicate ();
3567
3568 if (callee_info->conds)
3569 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL);
3570 if (ipa_node_params_sum && callee_info->conds)
3571 {
3572 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3573 int count = ipa_get_cs_argument_count (args);
3574 int i;
3575
3576 if (count)
3577 {
3578 operand_map.safe_grow_cleared (count);
3579 offset_map.safe_grow_cleared (count);
3580 }
3581 for (i = 0; i < count; i++)
3582 {
3583 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3584 int map = -1;
3585
3586 /* TODO: handle non-NOPs when merging. */
3587 if (jfunc->type == IPA_JF_PASS_THROUGH)
3588 {
3589 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3590 map = ipa_get_jf_pass_through_formal_id (jfunc);
3591 if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3592 offset_map[i] = -1;
3593 }
3594 else if (jfunc->type == IPA_JF_ANCESTOR)
3595 {
3596 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3597 if (offset >= 0 && offset < INT_MAX)
3598 {
3599 map = ipa_get_jf_ancestor_formal_id (jfunc);
3600 if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3601 offset = -1;
3602 offset_map[i] = offset;
3603 }
3604 }
3605 operand_map[i] = map;
3606 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3607 }
3608 }
3609 for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++)
3610 {
3611 struct predicate p = remap_predicate (info, callee_info,
3612 &e->predicate, operand_map,
3613 offset_map, clause,
3614 &toplev_predicate);
3615 if (!false_predicate_p (&p))
3616 {
3617 gcov_type add_time = ((gcov_type) e->time * edge->frequency
3618 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
3619 int prob = predicate_probability (callee_info->conds,
3620 &e->predicate,
3621 clause, es->param);
3622 add_time = apply_probability ((gcov_type) add_time, prob);
3623 if (add_time > MAX_TIME * INLINE_TIME_SCALE)
3624 add_time = MAX_TIME * INLINE_TIME_SCALE;
3625 if (prob != REG_BR_PROB_BASE
3626 && dump_file && (dump_flags & TDF_DETAILS))
3627 {
3628 fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3629 (double) prob / REG_BR_PROB_BASE);
3630 }
3631 account_size_time (info, e->size, add_time, &p);
3632 }
3633 }
3634 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3635 offset_map, clause, &toplev_predicate);
3636 remap_hint_predicate (info, callee_info,
3637 &callee_info->loop_iterations,
3638 operand_map, offset_map, clause, &toplev_predicate);
3639 remap_hint_predicate (info, callee_info,
3640 &callee_info->loop_stride,
3641 operand_map, offset_map, clause, &toplev_predicate);
3642 remap_hint_predicate (info, callee_info,
3643 &callee_info->array_index,
3644 operand_map, offset_map, clause, &toplev_predicate);
3645
3646 inline_update_callee_summaries (edge->callee,
3647 inline_edge_summary (edge)->loop_depth);
3648
3649 /* We do not maintain predicates of inlined edges, free it. */
3650 edge_set_predicate (edge, &true_p);
3651 /* Similarly remove param summaries. */
3652 es->param.release ();
3653 operand_map.release ();
3654 offset_map.release ();
3655 }
3656
3657 /* For performance reasons inline_merge_summary is not updating overall size
3658 and time. Recompute it. */
3659
3660 void
3661 inline_update_overall_summary (struct cgraph_node *node)
3662 {
3663 struct inline_summary *info = inline_summaries->get (node);
3664 size_time_entry *e;
3665 int i;
3666
3667 info->size = 0;
3668 info->time = 0;
3669 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3670 {
3671 info->size += e->size, info->time += e->time;
3672 if (info->time > MAX_TIME * INLINE_TIME_SCALE)
3673 info->time = MAX_TIME * INLINE_TIME_SCALE;
3674 }
3675 estimate_calls_size_and_time (node, &info->size, &info->min_size,
3676 &info->time, NULL,
3677 ~(clause_t) (1 << predicate_false_condition),
3678 vNULL, vNULL, vNULL);
3679 info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
3680 info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
3681 }
3682
3683 /* Return hints derrived from EDGE. */
3684 int
3685 simple_edge_hints (struct cgraph_edge *edge)
3686 {
3687 int hints = 0;
3688 struct cgraph_node *to = (edge->caller->global.inlined_to
3689 ? edge->caller->global.inlined_to : edge->caller);
3690 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
3691 if (inline_summaries->get (to)->scc_no
3692 && inline_summaries->get (to)->scc_no
3693 == inline_summaries->get (callee)->scc_no
3694 && !edge->recursive_p ())
3695 hints |= INLINE_HINT_same_scc;
3696
3697 if (callee->lto_file_data && edge->caller->lto_file_data
3698 && edge->caller->lto_file_data != callee->lto_file_data
3699 && !callee->merged)
3700 hints |= INLINE_HINT_cross_module;
3701
3702 return hints;
3703 }
3704
3705 /* Estimate the time cost for the caller when inlining EDGE.
3706 Only to be called via estimate_edge_time, that handles the
3707 caching mechanism.
3708
3709 When caching, also update the cache entry. Compute both time and
3710 size, since we always need both metrics eventually. */
3711
3712 int
3713 do_estimate_edge_time (struct cgraph_edge *edge)
3714 {
3715 int time;
3716 int size;
3717 inline_hints hints;
3718 struct cgraph_node *callee;
3719 clause_t clause;
3720 vec<tree> known_vals;
3721 vec<ipa_polymorphic_call_context> known_contexts;
3722 vec<ipa_agg_jump_function_p> known_aggs;
3723 struct inline_edge_summary *es = inline_edge_summary (edge);
3724 int min_size;
3725
3726 callee = edge->callee->ultimate_alias_target ();
3727
3728 gcc_checking_assert (edge->inline_failed);
3729 evaluate_properties_for_edge (edge, true,
3730 &clause, &known_vals, &known_contexts,
3731 &known_aggs);
3732 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3733 known_aggs, &size, &min_size, &time, &hints, es->param);
3734
3735 /* When we have profile feedback, we can quite safely identify hot
3736 edges and for those we disable size limits. Don't do that when
3737 probability that caller will call the callee is low however, since it
3738 may hurt optimization of the caller's hot path. */
3739 if (edge->count && edge->maybe_hot_p ()
3740 && (edge->count * 2
3741 > (edge->caller->global.inlined_to
3742 ? edge->caller->global.inlined_to->count : edge->caller->count)))
3743 hints |= INLINE_HINT_known_hot;
3744
3745 known_vals.release ();
3746 known_contexts.release ();
3747 known_aggs.release ();
3748 gcc_checking_assert (size >= 0);
3749 gcc_checking_assert (time >= 0);
3750
3751 /* When caching, update the cache entry. */
3752 if (edge_growth_cache.exists ())
3753 {
3754 inline_summaries->get (edge->callee)->min_size = min_size;
3755 if ((int) edge_growth_cache.length () <= edge->uid)
3756 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
3757 edge_growth_cache[edge->uid].time = time + (time >= 0);
3758
3759 edge_growth_cache[edge->uid].size = size + (size >= 0);
3760 hints |= simple_edge_hints (edge);
3761 edge_growth_cache[edge->uid].hints = hints + 1;
3762 }
3763 return time;
3764 }
3765
3766
3767 /* Return estimated callee growth after inlining EDGE.
3768 Only to be called via estimate_edge_size. */
3769
3770 int
3771 do_estimate_edge_size (struct cgraph_edge *edge)
3772 {
3773 int size;
3774 struct cgraph_node *callee;
3775 clause_t clause;
3776 vec<tree> known_vals;
3777 vec<ipa_polymorphic_call_context> known_contexts;
3778 vec<ipa_agg_jump_function_p> known_aggs;
3779
3780 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3781
3782 if (edge_growth_cache.exists ())
3783 {
3784 do_estimate_edge_time (edge);
3785 size = edge_growth_cache[edge->uid].size;
3786 gcc_checking_assert (size);
3787 return size - (size > 0);
3788 }
3789
3790 callee = edge->callee->ultimate_alias_target ();
3791
3792 /* Early inliner runs without caching, go ahead and do the dirty work. */
3793 gcc_checking_assert (edge->inline_failed);
3794 evaluate_properties_for_edge (edge, true,
3795 &clause, &known_vals, &known_contexts,
3796 &known_aggs);
3797 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3798 known_aggs, &size, NULL, NULL, NULL, vNULL);
3799 known_vals.release ();
3800 known_contexts.release ();
3801 known_aggs.release ();
3802 return size;
3803 }
3804
3805
3806 /* Estimate the growth of the caller when inlining EDGE.
3807 Only to be called via estimate_edge_size. */
3808
3809 inline_hints
3810 do_estimate_edge_hints (struct cgraph_edge *edge)
3811 {
3812 inline_hints hints;
3813 struct cgraph_node *callee;
3814 clause_t clause;
3815 vec<tree> known_vals;
3816 vec<ipa_polymorphic_call_context> known_contexts;
3817 vec<ipa_agg_jump_function_p> known_aggs;
3818
3819 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3820
3821 if (edge_growth_cache.exists ())
3822 {
3823 do_estimate_edge_time (edge);
3824 hints = edge_growth_cache[edge->uid].hints;
3825 gcc_checking_assert (hints);
3826 return hints - 1;
3827 }
3828
3829 callee = edge->callee->ultimate_alias_target ();
3830
3831 /* Early inliner runs without caching, go ahead and do the dirty work. */
3832 gcc_checking_assert (edge->inline_failed);
3833 evaluate_properties_for_edge (edge, true,
3834 &clause, &known_vals, &known_contexts,
3835 &known_aggs);
3836 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3837 known_aggs, NULL, NULL, NULL, &hints, vNULL);
3838 known_vals.release ();
3839 known_contexts.release ();
3840 known_aggs.release ();
3841 hints |= simple_edge_hints (edge);
3842 return hints;
3843 }
3844
3845
3846 /* Estimate self time of the function NODE after inlining EDGE. */
3847
3848 int
3849 estimate_time_after_inlining (struct cgraph_node *node,
3850 struct cgraph_edge *edge)
3851 {
3852 struct inline_edge_summary *es = inline_edge_summary (edge);
3853 if (!es->predicate || !false_predicate_p (es->predicate))
3854 {
3855 gcov_type time =
3856 inline_summaries->get (node)->time + estimate_edge_time (edge);
3857 if (time < 0)
3858 time = 0;
3859 if (time > MAX_TIME)
3860 time = MAX_TIME;
3861 return time;
3862 }
3863 return inline_summaries->get (node)->time;
3864 }
3865
3866
3867 /* Estimate the size of NODE after inlining EDGE which should be an
3868 edge to either NODE or a call inlined into NODE. */
3869
3870 int
3871 estimate_size_after_inlining (struct cgraph_node *node,
3872 struct cgraph_edge *edge)
3873 {
3874 struct inline_edge_summary *es = inline_edge_summary (edge);
3875 if (!es->predicate || !false_predicate_p (es->predicate))
3876 {
3877 int size = inline_summaries->get (node)->size + estimate_edge_growth (edge);
3878 gcc_assert (size >= 0);
3879 return size;
3880 }
3881 return inline_summaries->get (node)->size;
3882 }
3883
3884
3885 struct growth_data
3886 {
3887 struct cgraph_node *node;
3888 bool self_recursive;
3889 bool uninlinable;
3890 int growth;
3891 };
3892
3893
3894 /* Worker for do_estimate_growth. Collect growth for all callers. */
3895
3896 static bool
3897 do_estimate_growth_1 (struct cgraph_node *node, void *data)
3898 {
3899 struct cgraph_edge *e;
3900 struct growth_data *d = (struct growth_data *) data;
3901
3902 for (e = node->callers; e; e = e->next_caller)
3903 {
3904 gcc_checking_assert (e->inline_failed);
3905
3906 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
3907 {
3908 d->uninlinable = true;
3909 continue;
3910 }
3911
3912 if (e->recursive_p ())
3913 {
3914 d->self_recursive = true;
3915 continue;
3916 }
3917 d->growth += estimate_edge_growth (e);
3918 }
3919 return false;
3920 }
3921
3922
3923 /* Estimate the growth caused by inlining NODE into all callees. */
3924
3925 int
3926 estimate_growth (struct cgraph_node *node)
3927 {
3928 struct growth_data d = { node, false, false, 0 };
3929 struct inline_summary *info = inline_summaries->get (node);
3930
3931 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
3932
3933 /* For self recursive functions the growth estimation really should be
3934 infinity. We don't want to return very large values because the growth
3935 plays various roles in badness computation fractions. Be sure to not
3936 return zero or negative growths. */
3937 if (d.self_recursive)
3938 d.growth = d.growth < info->size ? info->size : d.growth;
3939 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
3940 ;
3941 else
3942 {
3943 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
3944 d.growth -= info->size;
3945 /* COMDAT functions are very often not shared across multiple units
3946 since they come from various template instantiations.
3947 Take this into account. */
3948 else if (DECL_COMDAT (node->decl)
3949 && node->can_remove_if_no_direct_calls_p ())
3950 d.growth -= (info->size
3951 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
3952 + 50) / 100;
3953 }
3954
3955 return d.growth;
3956 }
3957
3958 /* Verify if there are fewer than MAX_CALLERS. */
3959
3960 static bool
3961 check_callers (cgraph_node *node, int *max_callers)
3962 {
3963 ipa_ref *ref;
3964
3965 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
3966 return true;
3967
3968 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
3969 {
3970 (*max_callers)--;
3971 if (!*max_callers
3972 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
3973 return true;
3974 }
3975
3976 FOR_EACH_ALIAS (node, ref)
3977 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
3978 return true;
3979
3980 return false;
3981 }
3982
3983
3984 /* Make cheap estimation if growth of NODE is likely positive knowing
3985 EDGE_GROWTH of one particular edge.
3986 We assume that most of other edges will have similar growth
3987 and skip computation if there are too many callers. */
3988
3989 bool
3990 growth_likely_positive (struct cgraph_node *node,
3991 int edge_growth)
3992 {
3993 int max_callers;
3994 struct cgraph_edge *e;
3995 gcc_checking_assert (edge_growth > 0);
3996
3997 /* First quickly check if NODE is removable at all. */
3998 if (DECL_EXTERNAL (node->decl))
3999 return true;
4000 if (!node->can_remove_if_no_direct_calls_and_refs_p ()
4001 || node->address_taken)
4002 return true;
4003
4004 max_callers = inline_summaries->get (node)->size * 4 / edge_growth + 2;
4005
4006 for (e = node->callers; e; e = e->next_caller)
4007 {
4008 max_callers--;
4009 if (!max_callers
4010 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4011 return true;
4012 }
4013
4014 ipa_ref *ref;
4015 FOR_EACH_ALIAS (node, ref)
4016 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
4017 return true;
4018
4019 /* Unlike for functions called once, we play unsafe with
4020 COMDATs. We can allow that since we know functions
4021 in consideration are small (and thus risk is small) and
4022 moreover grow estimates already accounts that COMDAT
4023 functions may or may not disappear when eliminated from
4024 current unit. With good probability making aggressive
4025 choice in all units is going to make overall program
4026 smaller. */
4027 if (DECL_COMDAT (node->decl))
4028 {
4029 if (!node->can_remove_if_no_direct_calls_p ())
4030 return true;
4031 }
4032 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
4033 return true;
4034
4035 return estimate_growth (node) > 0;
4036 }
4037
4038
4039 /* This function performs intraprocedural analysis in NODE that is required to
4040 inline indirect calls. */
4041
4042 static void
4043 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
4044 {
4045 ipa_analyze_node (node);
4046 if (dump_file && (dump_flags & TDF_DETAILS))
4047 {
4048 ipa_print_node_params (dump_file, node);
4049 ipa_print_node_jump_functions (dump_file, node);
4050 }
4051 }
4052
4053
4054 /* Note function body size. */
4055
4056 void
4057 inline_analyze_function (struct cgraph_node *node)
4058 {
4059 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4060
4061 if (dump_file)
4062 fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
4063 node->name (), node->order);
4064 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
4065 inline_indirect_intraprocedural_analysis (node);
4066 compute_inline_parameters (node, false);
4067 if (!optimize)
4068 {
4069 struct cgraph_edge *e;
4070 for (e = node->callees; e; e = e->next_callee)
4071 {
4072 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4073 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4074 e->call_stmt_cannot_inline_p = true;
4075 }
4076 for (e = node->indirect_calls; e; e = e->next_callee)
4077 {
4078 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4079 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4080 e->call_stmt_cannot_inline_p = true;
4081 }
4082 }
4083
4084 pop_cfun ();
4085 }
4086
4087
4088 /* Called when new function is inserted to callgraph late. */
4089
4090 void
4091 inline_summary_t::insert (struct cgraph_node *node, inline_summary *)
4092 {
4093 inline_analyze_function (node);
4094 }
4095
4096 /* Note function body size. */
4097
4098 void
4099 inline_generate_summary (void)
4100 {
4101 struct cgraph_node *node;
4102
4103 /* When not optimizing, do not bother to analyze. Inlining is still done
4104 because edge redirection needs to happen there. */
4105 if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
4106 return;
4107
4108 if (!inline_summaries)
4109 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
4110
4111 inline_summaries->enable_insertion_hook ();
4112
4113 ipa_register_cgraph_hooks ();
4114 inline_free_summary ();
4115
4116 FOR_EACH_DEFINED_FUNCTION (node)
4117 if (!node->alias)
4118 inline_analyze_function (node);
4119 }
4120
4121
4122 /* Read predicate from IB. */
4123
4124 static struct predicate
4125 read_predicate (struct lto_input_block *ib)
4126 {
4127 struct predicate out;
4128 clause_t clause;
4129 int k = 0;
4130
4131 do
4132 {
4133 gcc_assert (k <= MAX_CLAUSES);
4134 clause = out.clause[k++] = streamer_read_uhwi (ib);
4135 }
4136 while (clause);
4137
4138 /* Zero-initialize the remaining clauses in OUT. */
4139 while (k <= MAX_CLAUSES)
4140 out.clause[k++] = 0;
4141
4142 return out;
4143 }
4144
4145
4146 /* Write inline summary for edge E to OB. */
4147
4148 static void
4149 read_inline_edge_summary (struct lto_input_block *ib, struct cgraph_edge *e)
4150 {
4151 struct inline_edge_summary *es = inline_edge_summary (e);
4152 struct predicate p;
4153 int length, i;
4154
4155 es->call_stmt_size = streamer_read_uhwi (ib);
4156 es->call_stmt_time = streamer_read_uhwi (ib);
4157 es->loop_depth = streamer_read_uhwi (ib);
4158 p = read_predicate (ib);
4159 edge_set_predicate (e, &p);
4160 length = streamer_read_uhwi (ib);
4161 if (length)
4162 {
4163 es->param.safe_grow_cleared (length);
4164 for (i = 0; i < length; i++)
4165 es->param[i].change_prob = streamer_read_uhwi (ib);
4166 }
4167 }
4168
4169
4170 /* Stream in inline summaries from the section. */
4171
4172 static void
4173 inline_read_section (struct lto_file_decl_data *file_data, const char *data,
4174 size_t len)
4175 {
4176 const struct lto_function_header *header =
4177 (const struct lto_function_header *) data;
4178 const int cfg_offset = sizeof (struct lto_function_header);
4179 const int main_offset = cfg_offset + header->cfg_size;
4180 const int string_offset = main_offset + header->main_size;
4181 struct data_in *data_in;
4182 unsigned int i, count2, j;
4183 unsigned int f_count;
4184
4185 lto_input_block ib ((const char *) data + main_offset, header->main_size,
4186 file_data->mode_table);
4187
4188 data_in =
4189 lto_data_in_create (file_data, (const char *) data + string_offset,
4190 header->string_size, vNULL);
4191 f_count = streamer_read_uhwi (&ib);
4192 for (i = 0; i < f_count; i++)
4193 {
4194 unsigned int index;
4195 struct cgraph_node *node;
4196 struct inline_summary *info;
4197 lto_symtab_encoder_t encoder;
4198 struct bitpack_d bp;
4199 struct cgraph_edge *e;
4200 predicate p;
4201
4202 index = streamer_read_uhwi (&ib);
4203 encoder = file_data->symtab_node_encoder;
4204 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
4205 index));
4206 info = inline_summaries->get (node);
4207
4208 info->estimated_stack_size
4209 = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
4210 info->size = info->self_size = streamer_read_uhwi (&ib);
4211 info->time = info->self_time = streamer_read_uhwi (&ib);
4212
4213 bp = streamer_read_bitpack (&ib);
4214 info->inlinable = bp_unpack_value (&bp, 1);
4215 info->contains_cilk_spawn = bp_unpack_value (&bp, 1);
4216
4217 count2 = streamer_read_uhwi (&ib);
4218 gcc_assert (!info->conds);
4219 for (j = 0; j < count2; j++)
4220 {
4221 struct condition c;
4222 c.operand_num = streamer_read_uhwi (&ib);
4223 c.code = (enum tree_code) streamer_read_uhwi (&ib);
4224 c.val = stream_read_tree (&ib, data_in);
4225 bp = streamer_read_bitpack (&ib);
4226 c.agg_contents = bp_unpack_value (&bp, 1);
4227 c.by_ref = bp_unpack_value (&bp, 1);
4228 if (c.agg_contents)
4229 c.offset = streamer_read_uhwi (&ib);
4230 vec_safe_push (info->conds, c);
4231 }
4232 count2 = streamer_read_uhwi (&ib);
4233 gcc_assert (!info->entry);
4234 for (j = 0; j < count2; j++)
4235 {
4236 struct size_time_entry e;
4237
4238 e.size = streamer_read_uhwi (&ib);
4239 e.time = streamer_read_uhwi (&ib);
4240 e.predicate = read_predicate (&ib);
4241
4242 vec_safe_push (info->entry, e);
4243 }
4244
4245 p = read_predicate (&ib);
4246 set_hint_predicate (&info->loop_iterations, p);
4247 p = read_predicate (&ib);
4248 set_hint_predicate (&info->loop_stride, p);
4249 p = read_predicate (&ib);
4250 set_hint_predicate (&info->array_index, p);
4251 for (e = node->callees; e; e = e->next_callee)
4252 read_inline_edge_summary (&ib, e);
4253 for (e = node->indirect_calls; e; e = e->next_callee)
4254 read_inline_edge_summary (&ib, e);
4255 }
4256
4257 lto_free_section_data (file_data, LTO_section_inline_summary, NULL, data,
4258 len);
4259 lto_data_in_delete (data_in);
4260 }
4261
4262
4263 /* Read inline summary. Jump functions are shared among ipa-cp
4264 and inliner, so when ipa-cp is active, we don't need to write them
4265 twice. */
4266
4267 void
4268 inline_read_summary (void)
4269 {
4270 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
4271 struct lto_file_decl_data *file_data;
4272 unsigned int j = 0;
4273
4274 inline_summary_alloc ();
4275
4276 while ((file_data = file_data_vec[j++]))
4277 {
4278 size_t len;
4279 const char *data = lto_get_section_data (file_data,
4280 LTO_section_inline_summary,
4281 NULL, &len);
4282 if (data)
4283 inline_read_section (file_data, data, len);
4284 else
4285 /* Fatal error here. We do not want to support compiling ltrans units
4286 with different version of compiler or different flags than the WPA
4287 unit, so this should never happen. */
4288 fatal_error (input_location,
4289 "ipa inline summary is missing in input file");
4290 }
4291 if (optimize)
4292 {
4293 ipa_register_cgraph_hooks ();
4294 if (!flag_ipa_cp)
4295 ipa_prop_read_jump_functions ();
4296 }
4297
4298 gcc_assert (inline_summaries);
4299 inline_summaries->enable_insertion_hook ();
4300 }
4301
4302
4303 /* Write predicate P to OB. */
4304
4305 static void
4306 write_predicate (struct output_block *ob, struct predicate *p)
4307 {
4308 int j;
4309 if (p)
4310 for (j = 0; p->clause[j]; j++)
4311 {
4312 gcc_assert (j < MAX_CLAUSES);
4313 streamer_write_uhwi (ob, p->clause[j]);
4314 }
4315 streamer_write_uhwi (ob, 0);
4316 }
4317
4318
4319 /* Write inline summary for edge E to OB. */
4320
4321 static void
4322 write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
4323 {
4324 struct inline_edge_summary *es = inline_edge_summary (e);
4325 int i;
4326
4327 streamer_write_uhwi (ob, es->call_stmt_size);
4328 streamer_write_uhwi (ob, es->call_stmt_time);
4329 streamer_write_uhwi (ob, es->loop_depth);
4330 write_predicate (ob, es->predicate);
4331 streamer_write_uhwi (ob, es->param.length ());
4332 for (i = 0; i < (int) es->param.length (); i++)
4333 streamer_write_uhwi (ob, es->param[i].change_prob);
4334 }
4335
4336
4337 /* Write inline summary for node in SET.
4338 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
4339 active, we don't need to write them twice. */
4340
4341 void
4342 inline_write_summary (void)
4343 {
4344 struct cgraph_node *node;
4345 struct output_block *ob = create_output_block (LTO_section_inline_summary);
4346 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
4347 unsigned int count = 0;
4348 int i;
4349
4350 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4351 {
4352 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4353 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4354 if (cnode && cnode->definition && !cnode->alias)
4355 count++;
4356 }
4357 streamer_write_uhwi (ob, count);
4358
4359 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4360 {
4361 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4362 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4363 if (cnode && (node = cnode)->definition && !node->alias)
4364 {
4365 struct inline_summary *info = inline_summaries->get (node);
4366 struct bitpack_d bp;
4367 struct cgraph_edge *edge;
4368 int i;
4369 size_time_entry *e;
4370 struct condition *c;
4371
4372 streamer_write_uhwi (ob,
4373 lto_symtab_encoder_encode (encoder,
4374
4375 node));
4376 streamer_write_hwi (ob, info->estimated_self_stack_size);
4377 streamer_write_hwi (ob, info->self_size);
4378 streamer_write_hwi (ob, info->self_time);
4379 bp = bitpack_create (ob->main_stream);
4380 bp_pack_value (&bp, info->inlinable, 1);
4381 bp_pack_value (&bp, info->contains_cilk_spawn, 1);
4382 streamer_write_bitpack (&bp);
4383 streamer_write_uhwi (ob, vec_safe_length (info->conds));
4384 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
4385 {
4386 streamer_write_uhwi (ob, c->operand_num);
4387 streamer_write_uhwi (ob, c->code);
4388 stream_write_tree (ob, c->val, true);
4389 bp = bitpack_create (ob->main_stream);
4390 bp_pack_value (&bp, c->agg_contents, 1);
4391 bp_pack_value (&bp, c->by_ref, 1);
4392 streamer_write_bitpack (&bp);
4393 if (c->agg_contents)
4394 streamer_write_uhwi (ob, c->offset);
4395 }
4396 streamer_write_uhwi (ob, vec_safe_length (info->entry));
4397 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
4398 {
4399 streamer_write_uhwi (ob, e->size);
4400 streamer_write_uhwi (ob, e->time);
4401 write_predicate (ob, &e->predicate);
4402 }
4403 write_predicate (ob, info->loop_iterations);
4404 write_predicate (ob, info->loop_stride);
4405 write_predicate (ob, info->array_index);
4406 for (edge = node->callees; edge; edge = edge->next_callee)
4407 write_inline_edge_summary (ob, edge);
4408 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
4409 write_inline_edge_summary (ob, edge);
4410 }
4411 }
4412 streamer_write_char_stream (ob->main_stream, 0);
4413 produce_asm (ob, NULL);
4414 destroy_output_block (ob);
4415
4416 if (optimize && !flag_ipa_cp)
4417 ipa_prop_write_jump_functions ();
4418 }
4419
4420
4421 /* Release inline summary. */
4422
4423 void
4424 inline_free_summary (void)
4425 {
4426 struct cgraph_node *node;
4427 if (edge_removal_hook_holder)
4428 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
4429 edge_removal_hook_holder = NULL;
4430 if (edge_duplication_hook_holder)
4431 symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
4432 edge_duplication_hook_holder = NULL;
4433 if (!inline_edge_summary_vec.exists ())
4434 return;
4435 FOR_EACH_DEFINED_FUNCTION (node)
4436 if (!node->alias)
4437 reset_inline_summary (node, inline_summaries->get (node));
4438 inline_summaries->release ();
4439 inline_summaries = NULL;
4440 inline_edge_summary_vec.release ();
4441 edge_predicate_pool.release ();
4442 }
This page took 0.241527 seconds and 5 git commands to generate.