]> gcc.gnu.org Git - gcc.git/blob - gcc/tree-ssa-ccp.c
Update copyright years.
[gcc.git] / gcc / tree-ssa-ccp.c
1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
28 following values:
29
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
37
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
41 or not.
42
43 CONSTANT -> V_i has been found to hold a constant
44 value C.
45
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
48 at compile time.
49
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
59 can be visited.
60
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
65
66
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
72
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
75
76 if (PRED)
77 a_9 = 3;
78 else
79 a_10 = 100;
80 a_11 = PHI (a_9, a_10)
81
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
86
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
94
95
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
100
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
108 never be extended.
109
110 References:
111
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
120
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "tm.h"
125 #include "tree.h"
126 #include "stor-layout.h"
127 #include "flags.h"
128 #include "tm_p.h"
129 #include "predict.h"
130 #include "vec.h"
131 #include "hashtab.h"
132 #include "hash-set.h"
133 #include "machmode.h"
134 #include "hard-reg-set.h"
135 #include "input.h"
136 #include "function.h"
137 #include "dominance.h"
138 #include "cfg.h"
139 #include "basic-block.h"
140 #include "gimple-pretty-print.h"
141 #include "hash-table.h"
142 #include "tree-ssa-alias.h"
143 #include "internal-fn.h"
144 #include "gimple-fold.h"
145 #include "tree-eh.h"
146 #include "gimple-expr.h"
147 #include "is-a.h"
148 #include "gimple.h"
149 #include "gimplify.h"
150 #include "gimple-iterator.h"
151 #include "gimple-ssa.h"
152 #include "tree-cfg.h"
153 #include "tree-phinodes.h"
154 #include "ssa-iterators.h"
155 #include "stringpool.h"
156 #include "tree-ssanames.h"
157 #include "tree-pass.h"
158 #include "tree-ssa-propagate.h"
159 #include "value-prof.h"
160 #include "langhooks.h"
161 #include "target.h"
162 #include "diagnostic-core.h"
163 #include "dbgcnt.h"
164 #include "params.h"
165 #include "wide-int-print.h"
166 #include "builtins.h"
167 #include "tree-chkp.h"
168
169
170 /* Possible lattice values. */
171 typedef enum
172 {
173 UNINITIALIZED,
174 UNDEFINED,
175 CONSTANT,
176 VARYING
177 } ccp_lattice_t;
178
179 struct ccp_prop_value_t {
180 /* Lattice value. */
181 ccp_lattice_t lattice_val;
182
183 /* Propagated value. */
184 tree value;
185
186 /* Mask that applies to the propagated value during CCP. For X
187 with a CONSTANT lattice value X & ~mask == value & ~mask. The
188 zero bits in the mask cover constant values. The ones mean no
189 information. */
190 widest_int mask;
191 };
192
193 /* Array of propagated constant values. After propagation,
194 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
195 the constant is held in an SSA name representing a memory store
196 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
197 memory reference used to store (i.e., the LHS of the assignment
198 doing the store). */
199 static ccp_prop_value_t *const_val;
200 static unsigned n_const_val;
201
202 static void canonicalize_value (ccp_prop_value_t *);
203 static bool ccp_fold_stmt (gimple_stmt_iterator *);
204
205 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
206
207 static void
208 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
209 {
210 switch (val.lattice_val)
211 {
212 case UNINITIALIZED:
213 fprintf (outf, "%sUNINITIALIZED", prefix);
214 break;
215 case UNDEFINED:
216 fprintf (outf, "%sUNDEFINED", prefix);
217 break;
218 case VARYING:
219 fprintf (outf, "%sVARYING", prefix);
220 break;
221 case CONSTANT:
222 if (TREE_CODE (val.value) != INTEGER_CST
223 || val.mask == 0)
224 {
225 fprintf (outf, "%sCONSTANT ", prefix);
226 print_generic_expr (outf, val.value, dump_flags);
227 }
228 else
229 {
230 widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
231 val.mask);
232 fprintf (outf, "%sCONSTANT ", prefix);
233 print_hex (cval, outf);
234 fprintf (outf, " (");
235 print_hex (val.mask, outf);
236 fprintf (outf, ")");
237 }
238 break;
239 default:
240 gcc_unreachable ();
241 }
242 }
243
244
245 /* Print lattice value VAL to stderr. */
246
247 void debug_lattice_value (ccp_prop_value_t val);
248
249 DEBUG_FUNCTION void
250 debug_lattice_value (ccp_prop_value_t val)
251 {
252 dump_lattice_value (stderr, "", val);
253 fprintf (stderr, "\n");
254 }
255
256 /* Extend NONZERO_BITS to a full mask, with the upper bits being set. */
257
258 static widest_int
259 extend_mask (const wide_int &nonzero_bits)
260 {
261 return (wi::mask <widest_int> (wi::get_precision (nonzero_bits), true)
262 | widest_int::from (nonzero_bits, UNSIGNED));
263 }
264
265 /* Compute a default value for variable VAR and store it in the
266 CONST_VAL array. The following rules are used to get default
267 values:
268
269 1- Global and static variables that are declared constant are
270 considered CONSTANT.
271
272 2- Any other value is considered UNDEFINED. This is useful when
273 considering PHI nodes. PHI arguments that are undefined do not
274 change the constant value of the PHI node, which allows for more
275 constants to be propagated.
276
277 3- Variables defined by statements other than assignments and PHI
278 nodes are considered VARYING.
279
280 4- Initial values of variables that are not GIMPLE registers are
281 considered VARYING. */
282
283 static ccp_prop_value_t
284 get_default_value (tree var)
285 {
286 ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
287 gimple stmt;
288
289 stmt = SSA_NAME_DEF_STMT (var);
290
291 if (gimple_nop_p (stmt))
292 {
293 /* Variables defined by an empty statement are those used
294 before being initialized. If VAR is a local variable, we
295 can assume initially that it is UNDEFINED, otherwise we must
296 consider it VARYING. */
297 if (!virtual_operand_p (var)
298 && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
299 val.lattice_val = UNDEFINED;
300 else
301 {
302 val.lattice_val = VARYING;
303 val.mask = -1;
304 if (flag_tree_bit_ccp)
305 {
306 wide_int nonzero_bits = get_nonzero_bits (var);
307 if (nonzero_bits != -1)
308 {
309 val.lattice_val = CONSTANT;
310 val.value = build_zero_cst (TREE_TYPE (var));
311 val.mask = extend_mask (nonzero_bits);
312 }
313 }
314 }
315 }
316 else if (is_gimple_assign (stmt))
317 {
318 tree cst;
319 if (gimple_assign_single_p (stmt)
320 && DECL_P (gimple_assign_rhs1 (stmt))
321 && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
322 {
323 val.lattice_val = CONSTANT;
324 val.value = cst;
325 }
326 else
327 {
328 /* Any other variable defined by an assignment is considered
329 UNDEFINED. */
330 val.lattice_val = UNDEFINED;
331 }
332 }
333 else if ((is_gimple_call (stmt)
334 && gimple_call_lhs (stmt) != NULL_TREE)
335 || gimple_code (stmt) == GIMPLE_PHI)
336 {
337 /* A variable defined by a call or a PHI node is considered
338 UNDEFINED. */
339 val.lattice_val = UNDEFINED;
340 }
341 else
342 {
343 /* Otherwise, VAR will never take on a constant value. */
344 val.lattice_val = VARYING;
345 val.mask = -1;
346 }
347
348 return val;
349 }
350
351
352 /* Get the constant value associated with variable VAR. */
353
354 static inline ccp_prop_value_t *
355 get_value (tree var)
356 {
357 ccp_prop_value_t *val;
358
359 if (const_val == NULL
360 || SSA_NAME_VERSION (var) >= n_const_val)
361 return NULL;
362
363 val = &const_val[SSA_NAME_VERSION (var)];
364 if (val->lattice_val == UNINITIALIZED)
365 *val = get_default_value (var);
366
367 canonicalize_value (val);
368
369 return val;
370 }
371
372 /* Return the constant tree value associated with VAR. */
373
374 static inline tree
375 get_constant_value (tree var)
376 {
377 ccp_prop_value_t *val;
378 if (TREE_CODE (var) != SSA_NAME)
379 {
380 if (is_gimple_min_invariant (var))
381 return var;
382 return NULL_TREE;
383 }
384 val = get_value (var);
385 if (val
386 && val->lattice_val == CONSTANT
387 && (TREE_CODE (val->value) != INTEGER_CST
388 || val->mask == 0))
389 return val->value;
390 return NULL_TREE;
391 }
392
393 /* Sets the value associated with VAR to VARYING. */
394
395 static inline void
396 set_value_varying (tree var)
397 {
398 ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
399
400 val->lattice_val = VARYING;
401 val->value = NULL_TREE;
402 val->mask = -1;
403 }
404
405 /* For integer constants, make sure to drop TREE_OVERFLOW. */
406
407 static void
408 canonicalize_value (ccp_prop_value_t *val)
409 {
410 if (val->lattice_val != CONSTANT)
411 return;
412
413 if (TREE_OVERFLOW_P (val->value))
414 val->value = drop_tree_overflow (val->value);
415 }
416
417 /* Return whether the lattice transition is valid. */
418
419 static bool
420 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
421 {
422 /* Lattice transitions must always be monotonically increasing in
423 value. */
424 if (old_val.lattice_val < new_val.lattice_val)
425 return true;
426
427 if (old_val.lattice_val != new_val.lattice_val)
428 return false;
429
430 if (!old_val.value && !new_val.value)
431 return true;
432
433 /* Now both lattice values are CONSTANT. */
434
435 /* Allow transitioning from PHI <&x, not executable> == &x
436 to PHI <&x, &y> == common alignment. */
437 if (TREE_CODE (old_val.value) != INTEGER_CST
438 && TREE_CODE (new_val.value) == INTEGER_CST)
439 return true;
440
441 /* Bit-lattices have to agree in the still valid bits. */
442 if (TREE_CODE (old_val.value) == INTEGER_CST
443 && TREE_CODE (new_val.value) == INTEGER_CST)
444 return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
445 == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
446
447 /* Otherwise constant values have to agree. */
448 if (operand_equal_p (old_val.value, new_val.value, 0))
449 return true;
450
451 /* At least the kinds and types should agree now. */
452 if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
453 || !types_compatible_p (TREE_TYPE (old_val.value),
454 TREE_TYPE (new_val.value)))
455 return false;
456
457 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
458 to non-NaN. */
459 tree type = TREE_TYPE (new_val.value);
460 if (SCALAR_FLOAT_TYPE_P (type)
461 && !HONOR_NANS (type))
462 {
463 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
464 return true;
465 }
466 else if (VECTOR_FLOAT_TYPE_P (type)
467 && !HONOR_NANS (type))
468 {
469 for (unsigned i = 0; i < VECTOR_CST_NELTS (old_val.value); ++i)
470 if (!REAL_VALUE_ISNAN
471 (TREE_REAL_CST (VECTOR_CST_ELT (old_val.value, i)))
472 && !operand_equal_p (VECTOR_CST_ELT (old_val.value, i),
473 VECTOR_CST_ELT (new_val.value, i), 0))
474 return false;
475 return true;
476 }
477 else if (COMPLEX_FLOAT_TYPE_P (type)
478 && !HONOR_NANS (type))
479 {
480 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
481 && !operand_equal_p (TREE_REALPART (old_val.value),
482 TREE_REALPART (new_val.value), 0))
483 return false;
484 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
485 && !operand_equal_p (TREE_IMAGPART (old_val.value),
486 TREE_IMAGPART (new_val.value), 0))
487 return false;
488 return true;
489 }
490 return false;
491 }
492
493 /* Set the value for variable VAR to NEW_VAL. Return true if the new
494 value is different from VAR's previous value. */
495
496 static bool
497 set_lattice_value (tree var, ccp_prop_value_t new_val)
498 {
499 /* We can deal with old UNINITIALIZED values just fine here. */
500 ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
501
502 canonicalize_value (&new_val);
503
504 /* We have to be careful to not go up the bitwise lattice
505 represented by the mask.
506 ??? This doesn't seem to be the best place to enforce this. */
507 if (new_val.lattice_val == CONSTANT
508 && old_val->lattice_val == CONSTANT
509 && TREE_CODE (new_val.value) == INTEGER_CST
510 && TREE_CODE (old_val->value) == INTEGER_CST)
511 {
512 widest_int diff = (wi::to_widest (new_val.value)
513 ^ wi::to_widest (old_val->value));
514 new_val.mask = new_val.mask | old_val->mask | diff;
515 }
516
517 gcc_checking_assert (valid_lattice_transition (*old_val, new_val));
518
519 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
520 caller that this was a non-transition. */
521 if (old_val->lattice_val != new_val.lattice_val
522 || (new_val.lattice_val == CONSTANT
523 && TREE_CODE (new_val.value) == INTEGER_CST
524 && (TREE_CODE (old_val->value) != INTEGER_CST
525 || new_val.mask != old_val->mask)))
526 {
527 /* ??? We would like to delay creation of INTEGER_CSTs from
528 partially constants here. */
529
530 if (dump_file && (dump_flags & TDF_DETAILS))
531 {
532 dump_lattice_value (dump_file, "Lattice value changed to ", new_val);
533 fprintf (dump_file, ". Adding SSA edges to worklist.\n");
534 }
535
536 *old_val = new_val;
537
538 gcc_assert (new_val.lattice_val != UNINITIALIZED);
539 return true;
540 }
541
542 return false;
543 }
544
545 static ccp_prop_value_t get_value_for_expr (tree, bool);
546 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
547 static void bit_value_binop_1 (enum tree_code, tree, widest_int *, widest_int *,
548 tree, const widest_int &, const widest_int &,
549 tree, const widest_int &, const widest_int &);
550
551 /* Return a widest_int that can be used for bitwise simplifications
552 from VAL. */
553
554 static widest_int
555 value_to_wide_int (ccp_prop_value_t val)
556 {
557 if (val.value
558 && TREE_CODE (val.value) == INTEGER_CST)
559 return wi::to_widest (val.value);
560
561 return 0;
562 }
563
564 /* Return the value for the address expression EXPR based on alignment
565 information. */
566
567 static ccp_prop_value_t
568 get_value_from_alignment (tree expr)
569 {
570 tree type = TREE_TYPE (expr);
571 ccp_prop_value_t val;
572 unsigned HOST_WIDE_INT bitpos;
573 unsigned int align;
574
575 gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
576
577 get_pointer_alignment_1 (expr, &align, &bitpos);
578 val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
579 ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
580 : -1).and_not (align / BITS_PER_UNIT - 1);
581 val.lattice_val = val.mask == -1 ? VARYING : CONSTANT;
582 if (val.lattice_val == CONSTANT)
583 val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
584 else
585 val.value = NULL_TREE;
586
587 return val;
588 }
589
590 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
591 return constant bits extracted from alignment information for
592 invariant addresses. */
593
594 static ccp_prop_value_t
595 get_value_for_expr (tree expr, bool for_bits_p)
596 {
597 ccp_prop_value_t val;
598
599 if (TREE_CODE (expr) == SSA_NAME)
600 {
601 val = *get_value (expr);
602 if (for_bits_p
603 && val.lattice_val == CONSTANT
604 && TREE_CODE (val.value) == ADDR_EXPR)
605 val = get_value_from_alignment (val.value);
606 }
607 else if (is_gimple_min_invariant (expr)
608 && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
609 {
610 val.lattice_val = CONSTANT;
611 val.value = expr;
612 val.mask = 0;
613 canonicalize_value (&val);
614 }
615 else if (TREE_CODE (expr) == ADDR_EXPR)
616 val = get_value_from_alignment (expr);
617 else
618 {
619 val.lattice_val = VARYING;
620 val.mask = -1;
621 val.value = NULL_TREE;
622 }
623 return val;
624 }
625
626 /* Return the likely CCP lattice value for STMT.
627
628 If STMT has no operands, then return CONSTANT.
629
630 Else if undefinedness of operands of STMT cause its value to be
631 undefined, then return UNDEFINED.
632
633 Else if any operands of STMT are constants, then return CONSTANT.
634
635 Else return VARYING. */
636
637 static ccp_lattice_t
638 likely_value (gimple stmt)
639 {
640 bool has_constant_operand, has_undefined_operand, all_undefined_operands;
641 tree use;
642 ssa_op_iter iter;
643 unsigned i;
644
645 enum gimple_code code = gimple_code (stmt);
646
647 /* This function appears to be called only for assignments, calls,
648 conditionals, and switches, due to the logic in visit_stmt. */
649 gcc_assert (code == GIMPLE_ASSIGN
650 || code == GIMPLE_CALL
651 || code == GIMPLE_COND
652 || code == GIMPLE_SWITCH);
653
654 /* If the statement has volatile operands, it won't fold to a
655 constant value. */
656 if (gimple_has_volatile_ops (stmt))
657 return VARYING;
658
659 /* Arrive here for more complex cases. */
660 has_constant_operand = false;
661 has_undefined_operand = false;
662 all_undefined_operands = true;
663 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
664 {
665 ccp_prop_value_t *val = get_value (use);
666
667 if (val->lattice_val == UNDEFINED)
668 has_undefined_operand = true;
669 else
670 all_undefined_operands = false;
671
672 if (val->lattice_val == CONSTANT)
673 has_constant_operand = true;
674 }
675
676 /* There may be constants in regular rhs operands. For calls we
677 have to ignore lhs, fndecl and static chain, otherwise only
678 the lhs. */
679 for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
680 i < gimple_num_ops (stmt); ++i)
681 {
682 tree op = gimple_op (stmt, i);
683 if (!op || TREE_CODE (op) == SSA_NAME)
684 continue;
685 if (is_gimple_min_invariant (op))
686 has_constant_operand = true;
687 }
688
689 if (has_constant_operand)
690 all_undefined_operands = false;
691
692 if (has_undefined_operand
693 && code == GIMPLE_CALL
694 && gimple_call_internal_p (stmt))
695 switch (gimple_call_internal_fn (stmt))
696 {
697 /* These 3 builtins use the first argument just as a magic
698 way how to find out a decl uid. */
699 case IFN_GOMP_SIMD_LANE:
700 case IFN_GOMP_SIMD_VF:
701 case IFN_GOMP_SIMD_LAST_LANE:
702 has_undefined_operand = false;
703 break;
704 default:
705 break;
706 }
707
708 /* If the operation combines operands like COMPLEX_EXPR make sure to
709 not mark the result UNDEFINED if only one part of the result is
710 undefined. */
711 if (has_undefined_operand && all_undefined_operands)
712 return UNDEFINED;
713 else if (code == GIMPLE_ASSIGN && has_undefined_operand)
714 {
715 switch (gimple_assign_rhs_code (stmt))
716 {
717 /* Unary operators are handled with all_undefined_operands. */
718 case PLUS_EXPR:
719 case MINUS_EXPR:
720 case POINTER_PLUS_EXPR:
721 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
722 Not bitwise operators, one VARYING operand may specify the
723 result completely. Not logical operators for the same reason.
724 Not COMPLEX_EXPR as one VARYING operand makes the result partly
725 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
726 the undefined operand may be promoted. */
727 return UNDEFINED;
728
729 case ADDR_EXPR:
730 /* If any part of an address is UNDEFINED, like the index
731 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
732 return UNDEFINED;
733
734 default:
735 ;
736 }
737 }
738 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
739 fall back to CONSTANT. During iteration UNDEFINED may still drop
740 to CONSTANT. */
741 if (has_undefined_operand)
742 return CONSTANT;
743
744 /* We do not consider virtual operands here -- load from read-only
745 memory may have only VARYING virtual operands, but still be
746 constant. */
747 if (has_constant_operand
748 || gimple_references_memory_p (stmt))
749 return CONSTANT;
750
751 return VARYING;
752 }
753
754 /* Returns true if STMT cannot be constant. */
755
756 static bool
757 surely_varying_stmt_p (gimple stmt)
758 {
759 /* If the statement has operands that we cannot handle, it cannot be
760 constant. */
761 if (gimple_has_volatile_ops (stmt))
762 return true;
763
764 /* If it is a call and does not return a value or is not a
765 builtin and not an indirect call or a call to function with
766 assume_aligned/alloc_align attribute, it is varying. */
767 if (is_gimple_call (stmt))
768 {
769 tree fndecl, fntype = gimple_call_fntype (stmt);
770 if (!gimple_call_lhs (stmt)
771 || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
772 && !DECL_BUILT_IN (fndecl)
773 && !lookup_attribute ("assume_aligned",
774 TYPE_ATTRIBUTES (fntype))
775 && !lookup_attribute ("alloc_align",
776 TYPE_ATTRIBUTES (fntype))))
777 return true;
778 }
779
780 /* Any other store operation is not interesting. */
781 else if (gimple_vdef (stmt))
782 return true;
783
784 /* Anything other than assignments and conditional jumps are not
785 interesting for CCP. */
786 if (gimple_code (stmt) != GIMPLE_ASSIGN
787 && gimple_code (stmt) != GIMPLE_COND
788 && gimple_code (stmt) != GIMPLE_SWITCH
789 && gimple_code (stmt) != GIMPLE_CALL)
790 return true;
791
792 return false;
793 }
794
795 /* Initialize local data structures for CCP. */
796
797 static void
798 ccp_initialize (void)
799 {
800 basic_block bb;
801
802 n_const_val = num_ssa_names;
803 const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
804
805 /* Initialize simulation flags for PHI nodes and statements. */
806 FOR_EACH_BB_FN (bb, cfun)
807 {
808 gimple_stmt_iterator i;
809
810 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
811 {
812 gimple stmt = gsi_stmt (i);
813 bool is_varying;
814
815 /* If the statement is a control insn, then we do not
816 want to avoid simulating the statement once. Failure
817 to do so means that those edges will never get added. */
818 if (stmt_ends_bb_p (stmt))
819 is_varying = false;
820 else
821 is_varying = surely_varying_stmt_p (stmt);
822
823 if (is_varying)
824 {
825 tree def;
826 ssa_op_iter iter;
827
828 /* If the statement will not produce a constant, mark
829 all its outputs VARYING. */
830 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
831 set_value_varying (def);
832 }
833 prop_set_simulate_again (stmt, !is_varying);
834 }
835 }
836
837 /* Now process PHI nodes. We never clear the simulate_again flag on
838 phi nodes, since we do not know which edges are executable yet,
839 except for phi nodes for virtual operands when we do not do store ccp. */
840 FOR_EACH_BB_FN (bb, cfun)
841 {
842 gphi_iterator i;
843
844 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
845 {
846 gphi *phi = i.phi ();
847
848 if (virtual_operand_p (gimple_phi_result (phi)))
849 prop_set_simulate_again (phi, false);
850 else
851 prop_set_simulate_again (phi, true);
852 }
853 }
854 }
855
856 /* Debug count support. Reset the values of ssa names
857 VARYING when the total number ssa names analyzed is
858 beyond the debug count specified. */
859
860 static void
861 do_dbg_cnt (void)
862 {
863 unsigned i;
864 for (i = 0; i < num_ssa_names; i++)
865 {
866 if (!dbg_cnt (ccp))
867 {
868 const_val[i].lattice_val = VARYING;
869 const_val[i].mask = -1;
870 const_val[i].value = NULL_TREE;
871 }
872 }
873 }
874
875
876 /* Do final substitution of propagated values, cleanup the flowgraph and
877 free allocated storage.
878
879 Return TRUE when something was optimized. */
880
881 static bool
882 ccp_finalize (void)
883 {
884 bool something_changed;
885 unsigned i;
886
887 do_dbg_cnt ();
888
889 /* Derive alignment and misalignment information from partially
890 constant pointers in the lattice or nonzero bits from partially
891 constant integers. */
892 for (i = 1; i < num_ssa_names; ++i)
893 {
894 tree name = ssa_name (i);
895 ccp_prop_value_t *val;
896 unsigned int tem, align;
897
898 if (!name
899 || (!POINTER_TYPE_P (TREE_TYPE (name))
900 && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
901 /* Don't record nonzero bits before IPA to avoid
902 using too much memory. */
903 || first_pass_instance)))
904 continue;
905
906 val = get_value (name);
907 if (val->lattice_val != CONSTANT
908 || TREE_CODE (val->value) != INTEGER_CST)
909 continue;
910
911 if (POINTER_TYPE_P (TREE_TYPE (name)))
912 {
913 /* Trailing mask bits specify the alignment, trailing value
914 bits the misalignment. */
915 tem = val->mask.to_uhwi ();
916 align = (tem & -tem);
917 if (align > 1)
918 set_ptr_info_alignment (get_ptr_info (name), align,
919 (TREE_INT_CST_LOW (val->value)
920 & (align - 1)));
921 }
922 else
923 {
924 unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
925 wide_int nonzero_bits = wide_int::from (val->mask, precision,
926 UNSIGNED) | val->value;
927 nonzero_bits &= get_nonzero_bits (name);
928 set_nonzero_bits (name, nonzero_bits);
929 }
930 }
931
932 /* Perform substitutions based on the known constant values. */
933 something_changed = substitute_and_fold (get_constant_value,
934 ccp_fold_stmt, true);
935
936 free (const_val);
937 const_val = NULL;
938 return something_changed;;
939 }
940
941
942 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
943 in VAL1.
944
945 any M UNDEFINED = any
946 any M VARYING = VARYING
947 Ci M Cj = Ci if (i == j)
948 Ci M Cj = VARYING if (i != j)
949 */
950
951 static void
952 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
953 {
954 if (val1->lattice_val == UNDEFINED)
955 {
956 /* UNDEFINED M any = any */
957 *val1 = *val2;
958 }
959 else if (val2->lattice_val == UNDEFINED)
960 {
961 /* any M UNDEFINED = any
962 Nothing to do. VAL1 already contains the value we want. */
963 ;
964 }
965 else if (val1->lattice_val == VARYING
966 || val2->lattice_val == VARYING)
967 {
968 /* any M VARYING = VARYING. */
969 val1->lattice_val = VARYING;
970 val1->mask = -1;
971 val1->value = NULL_TREE;
972 }
973 else if (val1->lattice_val == CONSTANT
974 && val2->lattice_val == CONSTANT
975 && TREE_CODE (val1->value) == INTEGER_CST
976 && TREE_CODE (val2->value) == INTEGER_CST)
977 {
978 /* Ci M Cj = Ci if (i == j)
979 Ci M Cj = VARYING if (i != j)
980
981 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
982 drop to varying. */
983 val1->mask = (val1->mask | val2->mask
984 | (wi::to_widest (val1->value)
985 ^ wi::to_widest (val2->value)));
986 if (val1->mask == -1)
987 {
988 val1->lattice_val = VARYING;
989 val1->value = NULL_TREE;
990 }
991 }
992 else if (val1->lattice_val == CONSTANT
993 && val2->lattice_val == CONSTANT
994 && simple_cst_equal (val1->value, val2->value) == 1)
995 {
996 /* Ci M Cj = Ci if (i == j)
997 Ci M Cj = VARYING if (i != j)
998
999 VAL1 already contains the value we want for equivalent values. */
1000 }
1001 else if (val1->lattice_val == CONSTANT
1002 && val2->lattice_val == CONSTANT
1003 && (TREE_CODE (val1->value) == ADDR_EXPR
1004 || TREE_CODE (val2->value) == ADDR_EXPR))
1005 {
1006 /* When not equal addresses are involved try meeting for
1007 alignment. */
1008 ccp_prop_value_t tem = *val2;
1009 if (TREE_CODE (val1->value) == ADDR_EXPR)
1010 *val1 = get_value_for_expr (val1->value, true);
1011 if (TREE_CODE (val2->value) == ADDR_EXPR)
1012 tem = get_value_for_expr (val2->value, true);
1013 ccp_lattice_meet (val1, &tem);
1014 }
1015 else
1016 {
1017 /* Any other combination is VARYING. */
1018 val1->lattice_val = VARYING;
1019 val1->mask = -1;
1020 val1->value = NULL_TREE;
1021 }
1022 }
1023
1024
1025 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1026 lattice values to determine PHI_NODE's lattice value. The value of a
1027 PHI node is determined calling ccp_lattice_meet with all the arguments
1028 of the PHI node that are incoming via executable edges. */
1029
1030 static enum ssa_prop_result
1031 ccp_visit_phi_node (gphi *phi)
1032 {
1033 unsigned i;
1034 ccp_prop_value_t *old_val, new_val;
1035
1036 if (dump_file && (dump_flags & TDF_DETAILS))
1037 {
1038 fprintf (dump_file, "\nVisiting PHI node: ");
1039 print_gimple_stmt (dump_file, phi, 0, dump_flags);
1040 }
1041
1042 old_val = get_value (gimple_phi_result (phi));
1043 switch (old_val->lattice_val)
1044 {
1045 case VARYING:
1046 return SSA_PROP_VARYING;
1047
1048 case CONSTANT:
1049 new_val = *old_val;
1050 break;
1051
1052 case UNDEFINED:
1053 new_val.lattice_val = UNDEFINED;
1054 new_val.value = NULL_TREE;
1055 break;
1056
1057 default:
1058 gcc_unreachable ();
1059 }
1060
1061 for (i = 0; i < gimple_phi_num_args (phi); i++)
1062 {
1063 /* Compute the meet operator over all the PHI arguments flowing
1064 through executable edges. */
1065 edge e = gimple_phi_arg_edge (phi, i);
1066
1067 if (dump_file && (dump_flags & TDF_DETAILS))
1068 {
1069 fprintf (dump_file,
1070 "\n Argument #%d (%d -> %d %sexecutable)\n",
1071 i, e->src->index, e->dest->index,
1072 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1073 }
1074
1075 /* If the incoming edge is executable, Compute the meet operator for
1076 the existing value of the PHI node and the current PHI argument. */
1077 if (e->flags & EDGE_EXECUTABLE)
1078 {
1079 tree arg = gimple_phi_arg (phi, i)->def;
1080 ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1081
1082 ccp_lattice_meet (&new_val, &arg_val);
1083
1084 if (dump_file && (dump_flags & TDF_DETAILS))
1085 {
1086 fprintf (dump_file, "\t");
1087 print_generic_expr (dump_file, arg, dump_flags);
1088 dump_lattice_value (dump_file, "\tValue: ", arg_val);
1089 fprintf (dump_file, "\n");
1090 }
1091
1092 if (new_val.lattice_val == VARYING)
1093 break;
1094 }
1095 }
1096
1097 if (dump_file && (dump_flags & TDF_DETAILS))
1098 {
1099 dump_lattice_value (dump_file, "\n PHI node value: ", new_val);
1100 fprintf (dump_file, "\n\n");
1101 }
1102
1103 /* Make the transition to the new value. */
1104 if (set_lattice_value (gimple_phi_result (phi), new_val))
1105 {
1106 if (new_val.lattice_val == VARYING)
1107 return SSA_PROP_VARYING;
1108 else
1109 return SSA_PROP_INTERESTING;
1110 }
1111 else
1112 return SSA_PROP_NOT_INTERESTING;
1113 }
1114
1115 /* Return the constant value for OP or OP otherwise. */
1116
1117 static tree
1118 valueize_op (tree op)
1119 {
1120 if (TREE_CODE (op) == SSA_NAME)
1121 {
1122 tree tem = get_constant_value (op);
1123 if (tem)
1124 return tem;
1125 }
1126 return op;
1127 }
1128
1129 /* Return the constant value for OP, but signal to not follow SSA
1130 edges if the definition may be simulated again. */
1131
1132 static tree
1133 valueize_op_1 (tree op)
1134 {
1135 if (TREE_CODE (op) == SSA_NAME)
1136 {
1137 tree tem = get_constant_value (op);
1138 if (tem)
1139 return tem;
1140 /* If the definition may be simulated again we cannot follow
1141 this SSA edge as the SSA propagator does not necessarily
1142 re-visit the use. */
1143 gimple def_stmt = SSA_NAME_DEF_STMT (op);
1144 if (prop_simulate_again_p (def_stmt))
1145 return NULL_TREE;
1146 }
1147 return op;
1148 }
1149
1150 /* CCP specific front-end to the non-destructive constant folding
1151 routines.
1152
1153 Attempt to simplify the RHS of STMT knowing that one or more
1154 operands are constants.
1155
1156 If simplification is possible, return the simplified RHS,
1157 otherwise return the original RHS or NULL_TREE. */
1158
1159 static tree
1160 ccp_fold (gimple stmt)
1161 {
1162 location_t loc = gimple_location (stmt);
1163 switch (gimple_code (stmt))
1164 {
1165 case GIMPLE_COND:
1166 {
1167 /* Handle comparison operators that can appear in GIMPLE form. */
1168 tree op0 = valueize_op (gimple_cond_lhs (stmt));
1169 tree op1 = valueize_op (gimple_cond_rhs (stmt));
1170 enum tree_code code = gimple_cond_code (stmt);
1171 return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1172 }
1173
1174 case GIMPLE_SWITCH:
1175 {
1176 /* Return the constant switch index. */
1177 return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1178 }
1179
1180 case GIMPLE_ASSIGN:
1181 case GIMPLE_CALL:
1182 return gimple_fold_stmt_to_constant_1 (stmt,
1183 valueize_op, valueize_op_1);
1184
1185 default:
1186 gcc_unreachable ();
1187 }
1188 }
1189
1190 /* Apply the operation CODE in type TYPE to the value, mask pair
1191 RVAL and RMASK representing a value of type RTYPE and set
1192 the value, mask pair *VAL and *MASK to the result. */
1193
1194 static void
1195 bit_value_unop_1 (enum tree_code code, tree type,
1196 widest_int *val, widest_int *mask,
1197 tree rtype, const widest_int &rval, const widest_int &rmask)
1198 {
1199 switch (code)
1200 {
1201 case BIT_NOT_EXPR:
1202 *mask = rmask;
1203 *val = ~rval;
1204 break;
1205
1206 case NEGATE_EXPR:
1207 {
1208 widest_int temv, temm;
1209 /* Return ~rval + 1. */
1210 bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
1211 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1212 type, temv, temm, type, 1, 0);
1213 break;
1214 }
1215
1216 CASE_CONVERT:
1217 {
1218 signop sgn;
1219
1220 /* First extend mask and value according to the original type. */
1221 sgn = TYPE_SIGN (rtype);
1222 *mask = wi::ext (rmask, TYPE_PRECISION (rtype), sgn);
1223 *val = wi::ext (rval, TYPE_PRECISION (rtype), sgn);
1224
1225 /* Then extend mask and value according to the target type. */
1226 sgn = TYPE_SIGN (type);
1227 *mask = wi::ext (*mask, TYPE_PRECISION (type), sgn);
1228 *val = wi::ext (*val, TYPE_PRECISION (type), sgn);
1229 break;
1230 }
1231
1232 default:
1233 *mask = -1;
1234 break;
1235 }
1236 }
1237
1238 /* Apply the operation CODE in type TYPE to the value, mask pairs
1239 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1240 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1241
1242 static void
1243 bit_value_binop_1 (enum tree_code code, tree type,
1244 widest_int *val, widest_int *mask,
1245 tree r1type, const widest_int &r1val,
1246 const widest_int &r1mask, tree r2type,
1247 const widest_int &r2val, const widest_int &r2mask)
1248 {
1249 signop sgn = TYPE_SIGN (type);
1250 int width = TYPE_PRECISION (type);
1251 bool swap_p = false;
1252
1253 /* Assume we'll get a constant result. Use an initial non varying
1254 value, we fall back to varying in the end if necessary. */
1255 *mask = -1;
1256
1257 switch (code)
1258 {
1259 case BIT_AND_EXPR:
1260 /* The mask is constant where there is a known not
1261 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1262 *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1263 *val = r1val & r2val;
1264 break;
1265
1266 case BIT_IOR_EXPR:
1267 /* The mask is constant where there is a known
1268 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1269 *mask = (r1mask | r2mask)
1270 .and_not (r1val.and_not (r1mask) | r2val.and_not (r2mask));
1271 *val = r1val | r2val;
1272 break;
1273
1274 case BIT_XOR_EXPR:
1275 /* m1 | m2 */
1276 *mask = r1mask | r2mask;
1277 *val = r1val ^ r2val;
1278 break;
1279
1280 case LROTATE_EXPR:
1281 case RROTATE_EXPR:
1282 if (r2mask == 0)
1283 {
1284 widest_int shift = r2val;
1285 if (shift == 0)
1286 {
1287 *mask = r1mask;
1288 *val = r1val;
1289 }
1290 else
1291 {
1292 if (wi::neg_p (shift))
1293 {
1294 shift = -shift;
1295 if (code == RROTATE_EXPR)
1296 code = LROTATE_EXPR;
1297 else
1298 code = RROTATE_EXPR;
1299 }
1300 if (code == RROTATE_EXPR)
1301 {
1302 *mask = wi::rrotate (r1mask, shift, width);
1303 *val = wi::rrotate (r1val, shift, width);
1304 }
1305 else
1306 {
1307 *mask = wi::lrotate (r1mask, shift, width);
1308 *val = wi::lrotate (r1val, shift, width);
1309 }
1310 }
1311 }
1312 break;
1313
1314 case LSHIFT_EXPR:
1315 case RSHIFT_EXPR:
1316 /* ??? We can handle partially known shift counts if we know
1317 its sign. That way we can tell that (x << (y | 8)) & 255
1318 is zero. */
1319 if (r2mask == 0)
1320 {
1321 widest_int shift = r2val;
1322 if (shift == 0)
1323 {
1324 *mask = r1mask;
1325 *val = r1val;
1326 }
1327 else
1328 {
1329 if (wi::neg_p (shift))
1330 {
1331 shift = -shift;
1332 if (code == RSHIFT_EXPR)
1333 code = LSHIFT_EXPR;
1334 else
1335 code = RSHIFT_EXPR;
1336 }
1337 if (code == RSHIFT_EXPR)
1338 {
1339 *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1340 *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1341 }
1342 else
1343 {
1344 *mask = wi::ext (wi::lshift (r1mask, shift), width, sgn);
1345 *val = wi::ext (wi::lshift (r1val, shift), width, sgn);
1346 }
1347 }
1348 }
1349 break;
1350
1351 case PLUS_EXPR:
1352 case POINTER_PLUS_EXPR:
1353 {
1354 /* Do the addition with unknown bits set to zero, to give carry-ins of
1355 zero wherever possible. */
1356 widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
1357 lo = wi::ext (lo, width, sgn);
1358 /* Do the addition with unknown bits set to one, to give carry-ins of
1359 one wherever possible. */
1360 widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1361 hi = wi::ext (hi, width, sgn);
1362 /* Each bit in the result is known if (a) the corresponding bits in
1363 both inputs are known, and (b) the carry-in to that bit position
1364 is known. We can check condition (b) by seeing if we got the same
1365 result with minimised carries as with maximised carries. */
1366 *mask = r1mask | r2mask | (lo ^ hi);
1367 *mask = wi::ext (*mask, width, sgn);
1368 /* It shouldn't matter whether we choose lo or hi here. */
1369 *val = lo;
1370 break;
1371 }
1372
1373 case MINUS_EXPR:
1374 {
1375 widest_int temv, temm;
1376 bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
1377 r2type, r2val, r2mask);
1378 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1379 r1type, r1val, r1mask,
1380 r2type, temv, temm);
1381 break;
1382 }
1383
1384 case MULT_EXPR:
1385 {
1386 /* Just track trailing zeros in both operands and transfer
1387 them to the other. */
1388 int r1tz = wi::ctz (r1val | r1mask);
1389 int r2tz = wi::ctz (r2val | r2mask);
1390 if (r1tz + r2tz >= width)
1391 {
1392 *mask = 0;
1393 *val = 0;
1394 }
1395 else if (r1tz + r2tz > 0)
1396 {
1397 *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1398 width, sgn);
1399 *val = 0;
1400 }
1401 break;
1402 }
1403
1404 case EQ_EXPR:
1405 case NE_EXPR:
1406 {
1407 widest_int m = r1mask | r2mask;
1408 if (r1val.and_not (m) != r2val.and_not (m))
1409 {
1410 *mask = 0;
1411 *val = ((code == EQ_EXPR) ? 0 : 1);
1412 }
1413 else
1414 {
1415 /* We know the result of a comparison is always one or zero. */
1416 *mask = 1;
1417 *val = 0;
1418 }
1419 break;
1420 }
1421
1422 case GE_EXPR:
1423 case GT_EXPR:
1424 swap_p = true;
1425 code = swap_tree_comparison (code);
1426 /* Fall through. */
1427 case LT_EXPR:
1428 case LE_EXPR:
1429 {
1430 int minmax, maxmin;
1431
1432 const widest_int &o1val = swap_p ? r2val : r1val;
1433 const widest_int &o1mask = swap_p ? r2mask : r1mask;
1434 const widest_int &o2val = swap_p ? r1val : r2val;
1435 const widest_int &o2mask = swap_p ? r1mask : r2mask;
1436
1437 /* If the most significant bits are not known we know nothing. */
1438 if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1439 break;
1440
1441 /* For comparisons the signedness is in the comparison operands. */
1442 sgn = TYPE_SIGN (r1type);
1443
1444 /* If we know the most significant bits we know the values
1445 value ranges by means of treating varying bits as zero
1446 or one. Do a cross comparison of the max/min pairs. */
1447 maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn);
1448 minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn);
1449 if (maxmin < 0) /* o1 is less than o2. */
1450 {
1451 *mask = 0;
1452 *val = 1;
1453 }
1454 else if (minmax > 0) /* o1 is not less or equal to o2. */
1455 {
1456 *mask = 0;
1457 *val = 0;
1458 }
1459 else if (maxmin == minmax) /* o1 and o2 are equal. */
1460 {
1461 /* This probably should never happen as we'd have
1462 folded the thing during fully constant value folding. */
1463 *mask = 0;
1464 *val = (code == LE_EXPR ? 1 : 0);
1465 }
1466 else
1467 {
1468 /* We know the result of a comparison is always one or zero. */
1469 *mask = 1;
1470 *val = 0;
1471 }
1472 break;
1473 }
1474
1475 default:;
1476 }
1477 }
1478
1479 /* Return the propagation value when applying the operation CODE to
1480 the value RHS yielding type TYPE. */
1481
1482 static ccp_prop_value_t
1483 bit_value_unop (enum tree_code code, tree type, tree rhs)
1484 {
1485 ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1486 widest_int value, mask;
1487 ccp_prop_value_t val;
1488
1489 if (rval.lattice_val == UNDEFINED)
1490 return rval;
1491
1492 gcc_assert ((rval.lattice_val == CONSTANT
1493 && TREE_CODE (rval.value) == INTEGER_CST)
1494 || rval.mask == -1);
1495 bit_value_unop_1 (code, type, &value, &mask,
1496 TREE_TYPE (rhs), value_to_wide_int (rval), rval.mask);
1497 if (mask != -1)
1498 {
1499 val.lattice_val = CONSTANT;
1500 val.mask = mask;
1501 /* ??? Delay building trees here. */
1502 val.value = wide_int_to_tree (type, value);
1503 }
1504 else
1505 {
1506 val.lattice_val = VARYING;
1507 val.value = NULL_TREE;
1508 val.mask = -1;
1509 }
1510 return val;
1511 }
1512
1513 /* Return the propagation value when applying the operation CODE to
1514 the values RHS1 and RHS2 yielding type TYPE. */
1515
1516 static ccp_prop_value_t
1517 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1518 {
1519 ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1520 ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1521 widest_int value, mask;
1522 ccp_prop_value_t val;
1523
1524 if (r1val.lattice_val == UNDEFINED
1525 || r2val.lattice_val == UNDEFINED)
1526 {
1527 val.lattice_val = VARYING;
1528 val.value = NULL_TREE;
1529 val.mask = -1;
1530 return val;
1531 }
1532
1533 gcc_assert ((r1val.lattice_val == CONSTANT
1534 && TREE_CODE (r1val.value) == INTEGER_CST)
1535 || r1val.mask == -1);
1536 gcc_assert ((r2val.lattice_val == CONSTANT
1537 && TREE_CODE (r2val.value) == INTEGER_CST)
1538 || r2val.mask == -1);
1539 bit_value_binop_1 (code, type, &value, &mask,
1540 TREE_TYPE (rhs1), value_to_wide_int (r1val), r1val.mask,
1541 TREE_TYPE (rhs2), value_to_wide_int (r2val), r2val.mask);
1542 if (mask != -1)
1543 {
1544 val.lattice_val = CONSTANT;
1545 val.mask = mask;
1546 /* ??? Delay building trees here. */
1547 val.value = wide_int_to_tree (type, value);
1548 }
1549 else
1550 {
1551 val.lattice_val = VARYING;
1552 val.value = NULL_TREE;
1553 val.mask = -1;
1554 }
1555 return val;
1556 }
1557
1558 /* Return the propagation value for __builtin_assume_aligned
1559 and functions with assume_aligned or alloc_aligned attribute.
1560 For __builtin_assume_aligned, ATTR is NULL_TREE,
1561 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1562 is false, for alloc_aligned attribute ATTR is non-NULL and
1563 ALLOC_ALIGNED is true. */
1564
1565 static ccp_prop_value_t
1566 bit_value_assume_aligned (gimple stmt, tree attr, ccp_prop_value_t ptrval,
1567 bool alloc_aligned)
1568 {
1569 tree align, misalign = NULL_TREE, type;
1570 unsigned HOST_WIDE_INT aligni, misaligni = 0;
1571 ccp_prop_value_t alignval;
1572 widest_int value, mask;
1573 ccp_prop_value_t val;
1574
1575 if (attr == NULL_TREE)
1576 {
1577 tree ptr = gimple_call_arg (stmt, 0);
1578 type = TREE_TYPE (ptr);
1579 ptrval = get_value_for_expr (ptr, true);
1580 }
1581 else
1582 {
1583 tree lhs = gimple_call_lhs (stmt);
1584 type = TREE_TYPE (lhs);
1585 }
1586
1587 if (ptrval.lattice_val == UNDEFINED)
1588 return ptrval;
1589 gcc_assert ((ptrval.lattice_val == CONSTANT
1590 && TREE_CODE (ptrval.value) == INTEGER_CST)
1591 || ptrval.mask == -1);
1592 if (attr == NULL_TREE)
1593 {
1594 /* Get aligni and misaligni from __builtin_assume_aligned. */
1595 align = gimple_call_arg (stmt, 1);
1596 if (!tree_fits_uhwi_p (align))
1597 return ptrval;
1598 aligni = tree_to_uhwi (align);
1599 if (gimple_call_num_args (stmt) > 2)
1600 {
1601 misalign = gimple_call_arg (stmt, 2);
1602 if (!tree_fits_uhwi_p (misalign))
1603 return ptrval;
1604 misaligni = tree_to_uhwi (misalign);
1605 }
1606 }
1607 else
1608 {
1609 /* Get aligni and misaligni from assume_aligned or
1610 alloc_align attributes. */
1611 if (TREE_VALUE (attr) == NULL_TREE)
1612 return ptrval;
1613 attr = TREE_VALUE (attr);
1614 align = TREE_VALUE (attr);
1615 if (!tree_fits_uhwi_p (align))
1616 return ptrval;
1617 aligni = tree_to_uhwi (align);
1618 if (alloc_aligned)
1619 {
1620 if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1621 return ptrval;
1622 align = gimple_call_arg (stmt, aligni - 1);
1623 if (!tree_fits_uhwi_p (align))
1624 return ptrval;
1625 aligni = tree_to_uhwi (align);
1626 }
1627 else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1628 {
1629 misalign = TREE_VALUE (TREE_CHAIN (attr));
1630 if (!tree_fits_uhwi_p (misalign))
1631 return ptrval;
1632 misaligni = tree_to_uhwi (misalign);
1633 }
1634 }
1635 if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1636 return ptrval;
1637
1638 align = build_int_cst_type (type, -aligni);
1639 alignval = get_value_for_expr (align, true);
1640 bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
1641 type, value_to_wide_int (ptrval), ptrval.mask,
1642 type, value_to_wide_int (alignval), alignval.mask);
1643 if (mask != -1)
1644 {
1645 val.lattice_val = CONSTANT;
1646 val.mask = mask;
1647 gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1648 gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1649 value |= misaligni;
1650 /* ??? Delay building trees here. */
1651 val.value = wide_int_to_tree (type, value);
1652 }
1653 else
1654 {
1655 val.lattice_val = VARYING;
1656 val.value = NULL_TREE;
1657 val.mask = -1;
1658 }
1659 return val;
1660 }
1661
1662 /* Evaluate statement STMT.
1663 Valid only for assignments, calls, conditionals, and switches. */
1664
1665 static ccp_prop_value_t
1666 evaluate_stmt (gimple stmt)
1667 {
1668 ccp_prop_value_t val;
1669 tree simplified = NULL_TREE;
1670 ccp_lattice_t likelyvalue = likely_value (stmt);
1671 bool is_constant = false;
1672 unsigned int align;
1673
1674 if (dump_file && (dump_flags & TDF_DETAILS))
1675 {
1676 fprintf (dump_file, "which is likely ");
1677 switch (likelyvalue)
1678 {
1679 case CONSTANT:
1680 fprintf (dump_file, "CONSTANT");
1681 break;
1682 case UNDEFINED:
1683 fprintf (dump_file, "UNDEFINED");
1684 break;
1685 case VARYING:
1686 fprintf (dump_file, "VARYING");
1687 break;
1688 default:;
1689 }
1690 fprintf (dump_file, "\n");
1691 }
1692
1693 /* If the statement is likely to have a CONSTANT result, then try
1694 to fold the statement to determine the constant value. */
1695 /* FIXME. This is the only place that we call ccp_fold.
1696 Since likely_value never returns CONSTANT for calls, we will
1697 not attempt to fold them, including builtins that may profit. */
1698 if (likelyvalue == CONSTANT)
1699 {
1700 fold_defer_overflow_warnings ();
1701 simplified = ccp_fold (stmt);
1702 is_constant = simplified && is_gimple_min_invariant (simplified);
1703 fold_undefer_overflow_warnings (is_constant, stmt, 0);
1704 if (is_constant)
1705 {
1706 /* The statement produced a constant value. */
1707 val.lattice_val = CONSTANT;
1708 val.value = simplified;
1709 val.mask = 0;
1710 }
1711 }
1712 /* If the statement is likely to have a VARYING result, then do not
1713 bother folding the statement. */
1714 else if (likelyvalue == VARYING)
1715 {
1716 enum gimple_code code = gimple_code (stmt);
1717 if (code == GIMPLE_ASSIGN)
1718 {
1719 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1720
1721 /* Other cases cannot satisfy is_gimple_min_invariant
1722 without folding. */
1723 if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1724 simplified = gimple_assign_rhs1 (stmt);
1725 }
1726 else if (code == GIMPLE_SWITCH)
1727 simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1728 else
1729 /* These cannot satisfy is_gimple_min_invariant without folding. */
1730 gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1731 is_constant = simplified && is_gimple_min_invariant (simplified);
1732 if (is_constant)
1733 {
1734 /* The statement produced a constant value. */
1735 val.lattice_val = CONSTANT;
1736 val.value = simplified;
1737 val.mask = 0;
1738 }
1739 }
1740
1741 /* Resort to simplification for bitwise tracking. */
1742 if (flag_tree_bit_ccp
1743 && (likelyvalue == CONSTANT || is_gimple_call (stmt))
1744 && !is_constant)
1745 {
1746 enum gimple_code code = gimple_code (stmt);
1747 val.lattice_val = VARYING;
1748 val.value = NULL_TREE;
1749 val.mask = -1;
1750 if (code == GIMPLE_ASSIGN)
1751 {
1752 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1753 tree rhs1 = gimple_assign_rhs1 (stmt);
1754 switch (get_gimple_rhs_class (subcode))
1755 {
1756 case GIMPLE_SINGLE_RHS:
1757 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1758 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1759 val = get_value_for_expr (rhs1, true);
1760 break;
1761
1762 case GIMPLE_UNARY_RHS:
1763 if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1764 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1765 && (INTEGRAL_TYPE_P (gimple_expr_type (stmt))
1766 || POINTER_TYPE_P (gimple_expr_type (stmt))))
1767 val = bit_value_unop (subcode, gimple_expr_type (stmt), rhs1);
1768 break;
1769
1770 case GIMPLE_BINARY_RHS:
1771 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1772 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1773 {
1774 tree lhs = gimple_assign_lhs (stmt);
1775 tree rhs2 = gimple_assign_rhs2 (stmt);
1776 val = bit_value_binop (subcode,
1777 TREE_TYPE (lhs), rhs1, rhs2);
1778 }
1779 break;
1780
1781 default:;
1782 }
1783 }
1784 else if (code == GIMPLE_COND)
1785 {
1786 enum tree_code code = gimple_cond_code (stmt);
1787 tree rhs1 = gimple_cond_lhs (stmt);
1788 tree rhs2 = gimple_cond_rhs (stmt);
1789 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1790 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1791 val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1792 }
1793 else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1794 {
1795 tree fndecl = gimple_call_fndecl (stmt);
1796 switch (DECL_FUNCTION_CODE (fndecl))
1797 {
1798 case BUILT_IN_MALLOC:
1799 case BUILT_IN_REALLOC:
1800 case BUILT_IN_CALLOC:
1801 case BUILT_IN_STRDUP:
1802 case BUILT_IN_STRNDUP:
1803 val.lattice_val = CONSTANT;
1804 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1805 val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1806 / BITS_PER_UNIT - 1);
1807 break;
1808
1809 case BUILT_IN_ALLOCA:
1810 case BUILT_IN_ALLOCA_WITH_ALIGN:
1811 align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
1812 ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
1813 : BIGGEST_ALIGNMENT);
1814 val.lattice_val = CONSTANT;
1815 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1816 val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1817 break;
1818
1819 /* These builtins return their first argument, unmodified. */
1820 case BUILT_IN_MEMCPY:
1821 case BUILT_IN_MEMMOVE:
1822 case BUILT_IN_MEMSET:
1823 case BUILT_IN_STRCPY:
1824 case BUILT_IN_STRNCPY:
1825 case BUILT_IN_MEMCPY_CHK:
1826 case BUILT_IN_MEMMOVE_CHK:
1827 case BUILT_IN_MEMSET_CHK:
1828 case BUILT_IN_STRCPY_CHK:
1829 case BUILT_IN_STRNCPY_CHK:
1830 val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1831 break;
1832
1833 case BUILT_IN_ASSUME_ALIGNED:
1834 val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1835 break;
1836
1837 case BUILT_IN_ALIGNED_ALLOC:
1838 {
1839 tree align = get_constant_value (gimple_call_arg (stmt, 0));
1840 if (align
1841 && tree_fits_uhwi_p (align))
1842 {
1843 unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1844 if (aligni > 1
1845 /* align must be power-of-two */
1846 && (aligni & (aligni - 1)) == 0)
1847 {
1848 val.lattice_val = CONSTANT;
1849 val.value = build_int_cst (ptr_type_node, 0);
1850 val.mask = -aligni;
1851 }
1852 }
1853 break;
1854 }
1855
1856 default:;
1857 }
1858 }
1859 if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
1860 {
1861 tree fntype = gimple_call_fntype (stmt);
1862 if (fntype)
1863 {
1864 tree attrs = lookup_attribute ("assume_aligned",
1865 TYPE_ATTRIBUTES (fntype));
1866 if (attrs)
1867 val = bit_value_assume_aligned (stmt, attrs, val, false);
1868 attrs = lookup_attribute ("alloc_align",
1869 TYPE_ATTRIBUTES (fntype));
1870 if (attrs)
1871 val = bit_value_assume_aligned (stmt, attrs, val, true);
1872 }
1873 }
1874 is_constant = (val.lattice_val == CONSTANT);
1875 }
1876
1877 if (flag_tree_bit_ccp
1878 && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
1879 || (!is_constant && likelyvalue != UNDEFINED))
1880 && gimple_get_lhs (stmt)
1881 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
1882 {
1883 tree lhs = gimple_get_lhs (stmt);
1884 wide_int nonzero_bits = get_nonzero_bits (lhs);
1885 if (nonzero_bits != -1)
1886 {
1887 if (!is_constant)
1888 {
1889 val.lattice_val = CONSTANT;
1890 val.value = build_zero_cst (TREE_TYPE (lhs));
1891 val.mask = extend_mask (nonzero_bits);
1892 is_constant = true;
1893 }
1894 else
1895 {
1896 if (wi::bit_and_not (val.value, nonzero_bits) != 0)
1897 val.value = wide_int_to_tree (TREE_TYPE (lhs),
1898 nonzero_bits & val.value);
1899 if (nonzero_bits == 0)
1900 val.mask = 0;
1901 else
1902 val.mask = val.mask & extend_mask (nonzero_bits);
1903 }
1904 }
1905 }
1906
1907 if (!is_constant)
1908 {
1909 /* The statement produced a nonconstant value. If the statement
1910 had UNDEFINED operands, then the result of the statement
1911 should be UNDEFINED. Otherwise, the statement is VARYING. */
1912 if (likelyvalue == UNDEFINED)
1913 {
1914 val.lattice_val = likelyvalue;
1915 val.mask = 0;
1916 }
1917 else
1918 {
1919 val.lattice_val = VARYING;
1920 val.mask = -1;
1921 }
1922
1923 val.value = NULL_TREE;
1924 }
1925
1926 return val;
1927 }
1928
1929 typedef hash_table<pointer_hash<gimple_statement_base> > gimple_htab;
1930
1931 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1932 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
1933
1934 static void
1935 insert_clobber_before_stack_restore (tree saved_val, tree var,
1936 gimple_htab **visited)
1937 {
1938 gimple stmt;
1939 gassign *clobber_stmt;
1940 tree clobber;
1941 imm_use_iterator iter;
1942 gimple_stmt_iterator i;
1943 gimple *slot;
1944
1945 FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
1946 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1947 {
1948 clobber = build_constructor (TREE_TYPE (var),
1949 NULL);
1950 TREE_THIS_VOLATILE (clobber) = 1;
1951 clobber_stmt = gimple_build_assign (var, clobber);
1952
1953 i = gsi_for_stmt (stmt);
1954 gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
1955 }
1956 else if (gimple_code (stmt) == GIMPLE_PHI)
1957 {
1958 if (!*visited)
1959 *visited = new gimple_htab (10);
1960
1961 slot = (*visited)->find_slot (stmt, INSERT);
1962 if (*slot != NULL)
1963 continue;
1964
1965 *slot = stmt;
1966 insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
1967 visited);
1968 }
1969 else if (gimple_assign_ssa_name_copy_p (stmt))
1970 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
1971 visited);
1972 else if (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET))
1973 continue;
1974 else
1975 gcc_assert (is_gimple_debug (stmt));
1976 }
1977
1978 /* Advance the iterator to the previous non-debug gimple statement in the same
1979 or dominating basic block. */
1980
1981 static inline void
1982 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
1983 {
1984 basic_block dom;
1985
1986 gsi_prev_nondebug (i);
1987 while (gsi_end_p (*i))
1988 {
1989 dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
1990 if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1991 return;
1992
1993 *i = gsi_last_bb (dom);
1994 }
1995 }
1996
1997 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
1998 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
1999
2000 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2001 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
2002 that case the function gives up without inserting the clobbers. */
2003
2004 static void
2005 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2006 {
2007 gimple stmt;
2008 tree saved_val;
2009 gimple_htab *visited = NULL;
2010
2011 for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2012 {
2013 stmt = gsi_stmt (i);
2014
2015 if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2016 continue;
2017
2018 saved_val = gimple_call_lhs (stmt);
2019 if (saved_val == NULL_TREE)
2020 continue;
2021
2022 insert_clobber_before_stack_restore (saved_val, var, &visited);
2023 break;
2024 }
2025
2026 delete visited;
2027 }
2028
2029 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2030 fixed-size array and returns the address, if found, otherwise returns
2031 NULL_TREE. */
2032
2033 static tree
2034 fold_builtin_alloca_with_align (gimple stmt)
2035 {
2036 unsigned HOST_WIDE_INT size, threshold, n_elem;
2037 tree lhs, arg, block, var, elem_type, array_type;
2038
2039 /* Get lhs. */
2040 lhs = gimple_call_lhs (stmt);
2041 if (lhs == NULL_TREE)
2042 return NULL_TREE;
2043
2044 /* Detect constant argument. */
2045 arg = get_constant_value (gimple_call_arg (stmt, 0));
2046 if (arg == NULL_TREE
2047 || TREE_CODE (arg) != INTEGER_CST
2048 || !tree_fits_uhwi_p (arg))
2049 return NULL_TREE;
2050
2051 size = tree_to_uhwi (arg);
2052
2053 /* Heuristic: don't fold large allocas. */
2054 threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
2055 /* In case the alloca is located at function entry, it has the same lifetime
2056 as a declared array, so we allow a larger size. */
2057 block = gimple_block (stmt);
2058 if (!(cfun->after_inlining
2059 && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2060 threshold /= 10;
2061 if (size > threshold)
2062 return NULL_TREE;
2063
2064 /* Declare array. */
2065 elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2066 n_elem = size * 8 / BITS_PER_UNIT;
2067 array_type = build_array_type_nelts (elem_type, n_elem);
2068 var = create_tmp_var (array_type);
2069 DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
2070 {
2071 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2072 if (pi != NULL && !pi->pt.anything)
2073 {
2074 bool singleton_p;
2075 unsigned uid;
2076 singleton_p = pt_solution_singleton_p (&pi->pt, &uid);
2077 gcc_assert (singleton_p);
2078 SET_DECL_PT_UID (var, uid);
2079 }
2080 }
2081
2082 /* Fold alloca to the address of the array. */
2083 return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2084 }
2085
2086 /* Fold the stmt at *GSI with CCP specific information that propagating
2087 and regular folding does not catch. */
2088
2089 static bool
2090 ccp_fold_stmt (gimple_stmt_iterator *gsi)
2091 {
2092 gimple stmt = gsi_stmt (*gsi);
2093
2094 switch (gimple_code (stmt))
2095 {
2096 case GIMPLE_COND:
2097 {
2098 gcond *cond_stmt = as_a <gcond *> (stmt);
2099 ccp_prop_value_t val;
2100 /* Statement evaluation will handle type mismatches in constants
2101 more gracefully than the final propagation. This allows us to
2102 fold more conditionals here. */
2103 val = evaluate_stmt (stmt);
2104 if (val.lattice_val != CONSTANT
2105 || val.mask != 0)
2106 return false;
2107
2108 if (dump_file)
2109 {
2110 fprintf (dump_file, "Folding predicate ");
2111 print_gimple_expr (dump_file, stmt, 0, 0);
2112 fprintf (dump_file, " to ");
2113 print_generic_expr (dump_file, val.value, 0);
2114 fprintf (dump_file, "\n");
2115 }
2116
2117 if (integer_zerop (val.value))
2118 gimple_cond_make_false (cond_stmt);
2119 else
2120 gimple_cond_make_true (cond_stmt);
2121
2122 return true;
2123 }
2124
2125 case GIMPLE_CALL:
2126 {
2127 tree lhs = gimple_call_lhs (stmt);
2128 int flags = gimple_call_flags (stmt);
2129 tree val;
2130 tree argt;
2131 bool changed = false;
2132 unsigned i;
2133
2134 /* If the call was folded into a constant make sure it goes
2135 away even if we cannot propagate into all uses because of
2136 type issues. */
2137 if (lhs
2138 && TREE_CODE (lhs) == SSA_NAME
2139 && (val = get_constant_value (lhs))
2140 /* Don't optimize away calls that have side-effects. */
2141 && (flags & (ECF_CONST|ECF_PURE)) != 0
2142 && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2143 {
2144 tree new_rhs = unshare_expr (val);
2145 bool res;
2146 if (!useless_type_conversion_p (TREE_TYPE (lhs),
2147 TREE_TYPE (new_rhs)))
2148 new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2149 res = update_call_from_tree (gsi, new_rhs);
2150 gcc_assert (res);
2151 return true;
2152 }
2153
2154 /* Internal calls provide no argument types, so the extra laxity
2155 for normal calls does not apply. */
2156 if (gimple_call_internal_p (stmt))
2157 return false;
2158
2159 /* The heuristic of fold_builtin_alloca_with_align differs before and
2160 after inlining, so we don't require the arg to be changed into a
2161 constant for folding, but just to be constant. */
2162 if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
2163 {
2164 tree new_rhs = fold_builtin_alloca_with_align (stmt);
2165 if (new_rhs)
2166 {
2167 bool res = update_call_from_tree (gsi, new_rhs);
2168 tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2169 gcc_assert (res);
2170 insert_clobbers_for_var (*gsi, var);
2171 return true;
2172 }
2173 }
2174
2175 /* Propagate into the call arguments. Compared to replace_uses_in
2176 this can use the argument slot types for type verification
2177 instead of the current argument type. We also can safely
2178 drop qualifiers here as we are dealing with constants anyway. */
2179 argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2180 for (i = 0; i < gimple_call_num_args (stmt) && argt;
2181 ++i, argt = TREE_CHAIN (argt))
2182 {
2183 tree arg = gimple_call_arg (stmt, i);
2184 if (TREE_CODE (arg) == SSA_NAME
2185 && (val = get_constant_value (arg))
2186 && useless_type_conversion_p
2187 (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2188 TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2189 {
2190 gimple_call_set_arg (stmt, i, unshare_expr (val));
2191 changed = true;
2192 }
2193 }
2194
2195 return changed;
2196 }
2197
2198 case GIMPLE_ASSIGN:
2199 {
2200 tree lhs = gimple_assign_lhs (stmt);
2201 tree val;
2202
2203 /* If we have a load that turned out to be constant replace it
2204 as we cannot propagate into all uses in all cases. */
2205 if (gimple_assign_single_p (stmt)
2206 && TREE_CODE (lhs) == SSA_NAME
2207 && (val = get_constant_value (lhs)))
2208 {
2209 tree rhs = unshare_expr (val);
2210 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2211 rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2212 gimple_assign_set_rhs_from_tree (gsi, rhs);
2213 return true;
2214 }
2215
2216 return false;
2217 }
2218
2219 default:
2220 return false;
2221 }
2222 }
2223
2224 /* Visit the assignment statement STMT. Set the value of its LHS to the
2225 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2226 creates virtual definitions, set the value of each new name to that
2227 of the RHS (if we can derive a constant out of the RHS).
2228 Value-returning call statements also perform an assignment, and
2229 are handled here. */
2230
2231 static enum ssa_prop_result
2232 visit_assignment (gimple stmt, tree *output_p)
2233 {
2234 ccp_prop_value_t val;
2235 enum ssa_prop_result retval;
2236
2237 tree lhs = gimple_get_lhs (stmt);
2238
2239 gcc_assert (gimple_code (stmt) != GIMPLE_CALL
2240 || gimple_call_lhs (stmt) != NULL_TREE);
2241
2242 if (gimple_assign_single_p (stmt)
2243 && gimple_assign_rhs_code (stmt) == SSA_NAME)
2244 /* For a simple copy operation, we copy the lattice values. */
2245 val = *get_value (gimple_assign_rhs1 (stmt));
2246 else
2247 /* Evaluate the statement, which could be
2248 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2249 val = evaluate_stmt (stmt);
2250
2251 retval = SSA_PROP_NOT_INTERESTING;
2252
2253 /* Set the lattice value of the statement's output. */
2254 if (TREE_CODE (lhs) == SSA_NAME)
2255 {
2256 /* If STMT is an assignment to an SSA_NAME, we only have one
2257 value to set. */
2258 if (set_lattice_value (lhs, val))
2259 {
2260 *output_p = lhs;
2261 if (val.lattice_val == VARYING)
2262 retval = SSA_PROP_VARYING;
2263 else
2264 retval = SSA_PROP_INTERESTING;
2265 }
2266 }
2267
2268 return retval;
2269 }
2270
2271
2272 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2273 if it can determine which edge will be taken. Otherwise, return
2274 SSA_PROP_VARYING. */
2275
2276 static enum ssa_prop_result
2277 visit_cond_stmt (gimple stmt, edge *taken_edge_p)
2278 {
2279 ccp_prop_value_t val;
2280 basic_block block;
2281
2282 block = gimple_bb (stmt);
2283 val = evaluate_stmt (stmt);
2284 if (val.lattice_val != CONSTANT
2285 || val.mask != 0)
2286 return SSA_PROP_VARYING;
2287
2288 /* Find which edge out of the conditional block will be taken and add it
2289 to the worklist. If no single edge can be determined statically,
2290 return SSA_PROP_VARYING to feed all the outgoing edges to the
2291 propagation engine. */
2292 *taken_edge_p = find_taken_edge (block, val.value);
2293 if (*taken_edge_p)
2294 return SSA_PROP_INTERESTING;
2295 else
2296 return SSA_PROP_VARYING;
2297 }
2298
2299
2300 /* Evaluate statement STMT. If the statement produces an output value and
2301 its evaluation changes the lattice value of its output, return
2302 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2303 output value.
2304
2305 If STMT is a conditional branch and we can determine its truth
2306 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2307 value, return SSA_PROP_VARYING. */
2308
2309 static enum ssa_prop_result
2310 ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
2311 {
2312 tree def;
2313 ssa_op_iter iter;
2314
2315 if (dump_file && (dump_flags & TDF_DETAILS))
2316 {
2317 fprintf (dump_file, "\nVisiting statement:\n");
2318 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2319 }
2320
2321 switch (gimple_code (stmt))
2322 {
2323 case GIMPLE_ASSIGN:
2324 /* If the statement is an assignment that produces a single
2325 output value, evaluate its RHS to see if the lattice value of
2326 its output has changed. */
2327 return visit_assignment (stmt, output_p);
2328
2329 case GIMPLE_CALL:
2330 /* A value-returning call also performs an assignment. */
2331 if (gimple_call_lhs (stmt) != NULL_TREE)
2332 return visit_assignment (stmt, output_p);
2333 break;
2334
2335 case GIMPLE_COND:
2336 case GIMPLE_SWITCH:
2337 /* If STMT is a conditional branch, see if we can determine
2338 which branch will be taken. */
2339 /* FIXME. It appears that we should be able to optimize
2340 computed GOTOs here as well. */
2341 return visit_cond_stmt (stmt, taken_edge_p);
2342
2343 default:
2344 break;
2345 }
2346
2347 /* Any other kind of statement is not interesting for constant
2348 propagation and, therefore, not worth simulating. */
2349 if (dump_file && (dump_flags & TDF_DETAILS))
2350 fprintf (dump_file, "No interesting values produced. Marked VARYING.\n");
2351
2352 /* Definitions made by statements other than assignments to
2353 SSA_NAMEs represent unknown modifications to their outputs.
2354 Mark them VARYING. */
2355 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2356 {
2357 ccp_prop_value_t v = { VARYING, NULL_TREE, -1 };
2358 set_lattice_value (def, v);
2359 }
2360
2361 return SSA_PROP_VARYING;
2362 }
2363
2364
2365 /* Main entry point for SSA Conditional Constant Propagation. */
2366
2367 static unsigned int
2368 do_ssa_ccp (void)
2369 {
2370 unsigned int todo = 0;
2371 calculate_dominance_info (CDI_DOMINATORS);
2372 ccp_initialize ();
2373 ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
2374 if (ccp_finalize ())
2375 todo = (TODO_cleanup_cfg | TODO_update_ssa);
2376 free_dominance_info (CDI_DOMINATORS);
2377 return todo;
2378 }
2379
2380
2381 namespace {
2382
2383 const pass_data pass_data_ccp =
2384 {
2385 GIMPLE_PASS, /* type */
2386 "ccp", /* name */
2387 OPTGROUP_NONE, /* optinfo_flags */
2388 TV_TREE_CCP, /* tv_id */
2389 ( PROP_cfg | PROP_ssa ), /* properties_required */
2390 0, /* properties_provided */
2391 0, /* properties_destroyed */
2392 0, /* todo_flags_start */
2393 TODO_update_address_taken, /* todo_flags_finish */
2394 };
2395
2396 class pass_ccp : public gimple_opt_pass
2397 {
2398 public:
2399 pass_ccp (gcc::context *ctxt)
2400 : gimple_opt_pass (pass_data_ccp, ctxt)
2401 {}
2402
2403 /* opt_pass methods: */
2404 opt_pass * clone () { return new pass_ccp (m_ctxt); }
2405 virtual bool gate (function *) { return flag_tree_ccp != 0; }
2406 virtual unsigned int execute (function *) { return do_ssa_ccp (); }
2407
2408 }; // class pass_ccp
2409
2410 } // anon namespace
2411
2412 gimple_opt_pass *
2413 make_pass_ccp (gcc::context *ctxt)
2414 {
2415 return new pass_ccp (ctxt);
2416 }
2417
2418
2419
2420 /* Try to optimize out __builtin_stack_restore. Optimize it out
2421 if there is another __builtin_stack_restore in the same basic
2422 block and no calls or ASM_EXPRs are in between, or if this block's
2423 only outgoing edge is to EXIT_BLOCK and there are no calls or
2424 ASM_EXPRs after this __builtin_stack_restore. */
2425
2426 static tree
2427 optimize_stack_restore (gimple_stmt_iterator i)
2428 {
2429 tree callee;
2430 gimple stmt;
2431
2432 basic_block bb = gsi_bb (i);
2433 gimple call = gsi_stmt (i);
2434
2435 if (gimple_code (call) != GIMPLE_CALL
2436 || gimple_call_num_args (call) != 1
2437 || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2438 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2439 return NULL_TREE;
2440
2441 for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2442 {
2443 stmt = gsi_stmt (i);
2444 if (gimple_code (stmt) == GIMPLE_ASM)
2445 return NULL_TREE;
2446 if (gimple_code (stmt) != GIMPLE_CALL)
2447 continue;
2448
2449 callee = gimple_call_fndecl (stmt);
2450 if (!callee
2451 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2452 /* All regular builtins are ok, just obviously not alloca. */
2453 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
2454 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
2455 return NULL_TREE;
2456
2457 if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2458 goto second_stack_restore;
2459 }
2460
2461 if (!gsi_end_p (i))
2462 return NULL_TREE;
2463
2464 /* Allow one successor of the exit block, or zero successors. */
2465 switch (EDGE_COUNT (bb->succs))
2466 {
2467 case 0:
2468 break;
2469 case 1:
2470 if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2471 return NULL_TREE;
2472 break;
2473 default:
2474 return NULL_TREE;
2475 }
2476 second_stack_restore:
2477
2478 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2479 If there are multiple uses, then the last one should remove the call.
2480 In any case, whether the call to __builtin_stack_save can be removed
2481 or not is irrelevant to removing the call to __builtin_stack_restore. */
2482 if (has_single_use (gimple_call_arg (call, 0)))
2483 {
2484 gimple stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2485 if (is_gimple_call (stack_save))
2486 {
2487 callee = gimple_call_fndecl (stack_save);
2488 if (callee
2489 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2490 && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2491 {
2492 gimple_stmt_iterator stack_save_gsi;
2493 tree rhs;
2494
2495 stack_save_gsi = gsi_for_stmt (stack_save);
2496 rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2497 update_call_from_tree (&stack_save_gsi, rhs);
2498 }
2499 }
2500 }
2501
2502 /* No effect, so the statement will be deleted. */
2503 return integer_zero_node;
2504 }
2505
2506 /* If va_list type is a simple pointer and nothing special is needed,
2507 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2508 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2509 pointer assignment. */
2510
2511 static tree
2512 optimize_stdarg_builtin (gimple call)
2513 {
2514 tree callee, lhs, rhs, cfun_va_list;
2515 bool va_list_simple_ptr;
2516 location_t loc = gimple_location (call);
2517
2518 if (gimple_code (call) != GIMPLE_CALL)
2519 return NULL_TREE;
2520
2521 callee = gimple_call_fndecl (call);
2522
2523 cfun_va_list = targetm.fn_abi_va_list (callee);
2524 va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2525 && (TREE_TYPE (cfun_va_list) == void_type_node
2526 || TREE_TYPE (cfun_va_list) == char_type_node);
2527
2528 switch (DECL_FUNCTION_CODE (callee))
2529 {
2530 case BUILT_IN_VA_START:
2531 if (!va_list_simple_ptr
2532 || targetm.expand_builtin_va_start != NULL
2533 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2534 return NULL_TREE;
2535
2536 if (gimple_call_num_args (call) != 2)
2537 return NULL_TREE;
2538
2539 lhs = gimple_call_arg (call, 0);
2540 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2541 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2542 != TYPE_MAIN_VARIANT (cfun_va_list))
2543 return NULL_TREE;
2544
2545 lhs = build_fold_indirect_ref_loc (loc, lhs);
2546 rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2547 1, integer_zero_node);
2548 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2549 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2550
2551 case BUILT_IN_VA_COPY:
2552 if (!va_list_simple_ptr)
2553 return NULL_TREE;
2554
2555 if (gimple_call_num_args (call) != 2)
2556 return NULL_TREE;
2557
2558 lhs = gimple_call_arg (call, 0);
2559 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2560 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2561 != TYPE_MAIN_VARIANT (cfun_va_list))
2562 return NULL_TREE;
2563
2564 lhs = build_fold_indirect_ref_loc (loc, lhs);
2565 rhs = gimple_call_arg (call, 1);
2566 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2567 != TYPE_MAIN_VARIANT (cfun_va_list))
2568 return NULL_TREE;
2569
2570 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2571 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2572
2573 case BUILT_IN_VA_END:
2574 /* No effect, so the statement will be deleted. */
2575 return integer_zero_node;
2576
2577 default:
2578 gcc_unreachable ();
2579 }
2580 }
2581
2582 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2583 the incoming jumps. Return true if at least one jump was changed. */
2584
2585 static bool
2586 optimize_unreachable (gimple_stmt_iterator i)
2587 {
2588 basic_block bb = gsi_bb (i);
2589 gimple_stmt_iterator gsi;
2590 gimple stmt;
2591 edge_iterator ei;
2592 edge e;
2593 bool ret;
2594
2595 if (flag_sanitize & SANITIZE_UNREACHABLE)
2596 return false;
2597
2598 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2599 {
2600 stmt = gsi_stmt (gsi);
2601
2602 if (is_gimple_debug (stmt))
2603 continue;
2604
2605 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2606 {
2607 /* Verify we do not need to preserve the label. */
2608 if (FORCED_LABEL (gimple_label_label (label_stmt)))
2609 return false;
2610
2611 continue;
2612 }
2613
2614 /* Only handle the case that __builtin_unreachable is the first statement
2615 in the block. We rely on DCE to remove stmts without side-effects
2616 before __builtin_unreachable. */
2617 if (gsi_stmt (gsi) != gsi_stmt (i))
2618 return false;
2619 }
2620
2621 ret = false;
2622 FOR_EACH_EDGE (e, ei, bb->preds)
2623 {
2624 gsi = gsi_last_bb (e->src);
2625 if (gsi_end_p (gsi))
2626 continue;
2627
2628 stmt = gsi_stmt (gsi);
2629 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2630 {
2631 if (e->flags & EDGE_TRUE_VALUE)
2632 gimple_cond_make_false (cond_stmt);
2633 else if (e->flags & EDGE_FALSE_VALUE)
2634 gimple_cond_make_true (cond_stmt);
2635 else
2636 gcc_unreachable ();
2637 update_stmt (cond_stmt);
2638 }
2639 else
2640 {
2641 /* Todo: handle other cases, f.i. switch statement. */
2642 continue;
2643 }
2644
2645 ret = true;
2646 }
2647
2648 return ret;
2649 }
2650
2651 /* A simple pass that attempts to fold all builtin functions. This pass
2652 is run after we've propagated as many constants as we can. */
2653
2654 namespace {
2655
2656 const pass_data pass_data_fold_builtins =
2657 {
2658 GIMPLE_PASS, /* type */
2659 "fab", /* name */
2660 OPTGROUP_NONE, /* optinfo_flags */
2661 TV_NONE, /* tv_id */
2662 ( PROP_cfg | PROP_ssa ), /* properties_required */
2663 0, /* properties_provided */
2664 0, /* properties_destroyed */
2665 0, /* todo_flags_start */
2666 TODO_update_ssa, /* todo_flags_finish */
2667 };
2668
2669 class pass_fold_builtins : public gimple_opt_pass
2670 {
2671 public:
2672 pass_fold_builtins (gcc::context *ctxt)
2673 : gimple_opt_pass (pass_data_fold_builtins, ctxt)
2674 {}
2675
2676 /* opt_pass methods: */
2677 opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
2678 virtual unsigned int execute (function *);
2679
2680 }; // class pass_fold_builtins
2681
2682 unsigned int
2683 pass_fold_builtins::execute (function *fun)
2684 {
2685 bool cfg_changed = false;
2686 basic_block bb;
2687 unsigned int todoflags = 0;
2688
2689 FOR_EACH_BB_FN (bb, fun)
2690 {
2691 gimple_stmt_iterator i;
2692 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
2693 {
2694 gimple stmt, old_stmt;
2695 tree callee;
2696 enum built_in_function fcode;
2697
2698 stmt = gsi_stmt (i);
2699
2700 if (gimple_code (stmt) != GIMPLE_CALL)
2701 {
2702 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
2703 after the last GIMPLE DSE they aren't needed and might
2704 unnecessarily keep the SSA_NAMEs live. */
2705 if (gimple_clobber_p (stmt))
2706 {
2707 tree lhs = gimple_assign_lhs (stmt);
2708 if (TREE_CODE (lhs) == MEM_REF
2709 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
2710 {
2711 unlink_stmt_vdef (stmt);
2712 gsi_remove (&i, true);
2713 release_defs (stmt);
2714 continue;
2715 }
2716 }
2717 gsi_next (&i);
2718 continue;
2719 }
2720
2721 callee = gimple_call_fndecl (stmt);
2722 if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
2723 {
2724 gsi_next (&i);
2725 continue;
2726 }
2727
2728 fcode = DECL_FUNCTION_CODE (callee);
2729 if (fold_stmt (&i))
2730 ;
2731 else
2732 {
2733 tree result = NULL_TREE;
2734 switch (DECL_FUNCTION_CODE (callee))
2735 {
2736 case BUILT_IN_CONSTANT_P:
2737 /* Resolve __builtin_constant_p. If it hasn't been
2738 folded to integer_one_node by now, it's fairly
2739 certain that the value simply isn't constant. */
2740 result = integer_zero_node;
2741 break;
2742
2743 case BUILT_IN_ASSUME_ALIGNED:
2744 /* Remove __builtin_assume_aligned. */
2745 result = gimple_call_arg (stmt, 0);
2746 break;
2747
2748 case BUILT_IN_STACK_RESTORE:
2749 result = optimize_stack_restore (i);
2750 if (result)
2751 break;
2752 gsi_next (&i);
2753 continue;
2754
2755 case BUILT_IN_UNREACHABLE:
2756 if (optimize_unreachable (i))
2757 cfg_changed = true;
2758 break;
2759
2760 case BUILT_IN_VA_START:
2761 case BUILT_IN_VA_END:
2762 case BUILT_IN_VA_COPY:
2763 /* These shouldn't be folded before pass_stdarg. */
2764 result = optimize_stdarg_builtin (stmt);
2765 if (result)
2766 break;
2767 /* FALLTHRU */
2768
2769 default:;
2770 }
2771
2772 if (!result)
2773 {
2774 gsi_next (&i);
2775 continue;
2776 }
2777
2778 if (!update_call_from_tree (&i, result))
2779 gimplify_and_update_call_from_tree (&i, result);
2780 }
2781
2782 todoflags |= TODO_update_address_taken;
2783
2784 if (dump_file && (dump_flags & TDF_DETAILS))
2785 {
2786 fprintf (dump_file, "Simplified\n ");
2787 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2788 }
2789
2790 old_stmt = stmt;
2791 stmt = gsi_stmt (i);
2792 update_stmt (stmt);
2793
2794 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
2795 && gimple_purge_dead_eh_edges (bb))
2796 cfg_changed = true;
2797
2798 if (dump_file && (dump_flags & TDF_DETAILS))
2799 {
2800 fprintf (dump_file, "to\n ");
2801 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2802 fprintf (dump_file, "\n");
2803 }
2804
2805 /* Retry the same statement if it changed into another
2806 builtin, there might be new opportunities now. */
2807 if (gimple_code (stmt) != GIMPLE_CALL)
2808 {
2809 gsi_next (&i);
2810 continue;
2811 }
2812 callee = gimple_call_fndecl (stmt);
2813 if (!callee
2814 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2815 || DECL_FUNCTION_CODE (callee) == fcode)
2816 gsi_next (&i);
2817 }
2818 }
2819
2820 /* Delete unreachable blocks. */
2821 if (cfg_changed)
2822 todoflags |= TODO_cleanup_cfg;
2823
2824 return todoflags;
2825 }
2826
2827 } // anon namespace
2828
2829 gimple_opt_pass *
2830 make_pass_fold_builtins (gcc::context *ctxt)
2831 {
2832 return new pass_fold_builtins (ctxt);
2833 }
This page took 0.164108 seconds and 5 git commands to generate.