1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "basic-block.h"
29 #include "gimple-pretty-print.h"
32 #include "pointer-set.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
35 #include "gimple-expr.h"
39 #include "gimple-iterator.h"
40 #include "gimple-ssa.h"
42 #include "tree-phinodes.h"
43 #include "ssa-iterators.h"
44 #include "tree-ssa-loop-ivopts.h"
45 #include "tree-ssa-loop-niter.h"
46 #include "tree-ssa-loop.h"
49 #include "tree-chrec.h"
50 #include "tree-scalar-evolution.h"
51 #include "tree-data-ref.h"
54 #include "diagnostic-core.h"
55 #include "tree-inline.h"
56 #include "tree-pass.h"
57 #include "stringpool.h"
58 #include "tree-ssanames.h"
59 #include "wide-int-print.h"
62 #define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
64 /* The maximum number of dominator BBs we search for conditions
65 of loop header copies we use for simplifying a conditional
67 #define MAX_DOMINATORS_TO_WALK 8
71 Analysis of number of iterations of an affine exit test.
75 /* Bounds on some value, BELOW <= X <= UP. */
83 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
86 split_to_var_and_offset (tree expr
, tree
*var
, mpz_t offset
)
88 tree type
= TREE_TYPE (expr
);
93 mpz_set_ui (offset
, 0);
95 switch (TREE_CODE (expr
))
102 case POINTER_PLUS_EXPR
:
103 op0
= TREE_OPERAND (expr
, 0);
104 op1
= TREE_OPERAND (expr
, 1);
106 if (TREE_CODE (op1
) != INTEGER_CST
)
110 /* Always sign extend the offset. */
111 wi::to_mpz (op1
, offset
, SIGNED
);
113 mpz_neg (offset
, offset
);
117 *var
= build_int_cst_type (type
, 0);
118 wi::to_mpz (expr
, offset
, TYPE_SIGN (type
));
126 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
127 in TYPE to MIN and MAX. */
130 determine_value_range (struct loop
*loop
, tree type
, tree var
, mpz_t off
,
131 mpz_t min
, mpz_t max
)
134 enum value_range_type rtype
= VR_VARYING
;
136 /* If the expression is a constant, we know its value exactly. */
137 if (integer_zerop (var
))
144 get_type_static_bounds (type
, min
, max
);
146 /* See if we have some range info from VRP. */
147 if (TREE_CODE (var
) == SSA_NAME
&& INTEGRAL_TYPE_P (type
))
149 edge e
= loop_preheader_edge (loop
);
150 signop sgn
= TYPE_SIGN (type
);
151 gimple_stmt_iterator gsi
;
153 /* Either for VAR itself... */
154 rtype
= get_range_info (var
, &minv
, &maxv
);
155 /* Or for PHI results in loop->header where VAR is used as
156 PHI argument from the loop preheader edge. */
157 for (gsi
= gsi_start_phis (loop
->header
); !gsi_end_p (gsi
); gsi_next (&gsi
))
159 gimple phi
= gsi_stmt (gsi
);
161 if (PHI_ARG_DEF_FROM_EDGE (phi
, e
) == var
162 && (get_range_info (gimple_phi_result (phi
), &minc
, &maxc
)
165 if (rtype
!= VR_RANGE
)
173 minv
= wi::max (minv
, minc
, sgn
);
174 maxv
= wi::min (maxv
, maxc
, sgn
);
175 /* If the PHI result range are inconsistent with
176 the VAR range, give up on looking at the PHI
177 results. This can happen if VR_UNDEFINED is
179 if (wi::gt_p (minv
, maxv
, sgn
))
181 rtype
= get_range_info (var
, &minv
, &maxv
);
187 if (rtype
== VR_RANGE
)
190 gcc_assert (wi::le_p (minv
, maxv
, sgn
));
193 wi::to_mpz (minv
, minm
, sgn
);
194 wi::to_mpz (maxv
, maxm
, sgn
);
195 mpz_add (minm
, minm
, off
);
196 mpz_add (maxm
, maxm
, off
);
197 /* If the computation may not wrap or off is zero, then this
198 is always fine. If off is negative and minv + off isn't
199 smaller than type's minimum, or off is positive and
200 maxv + off isn't bigger than type's maximum, use the more
201 precise range too. */
202 if (nowrap_type_p (type
)
203 || mpz_sgn (off
) == 0
204 || (mpz_sgn (off
) < 0 && mpz_cmp (minm
, min
) >= 0)
205 || (mpz_sgn (off
) > 0 && mpz_cmp (maxm
, max
) <= 0))
218 /* If the computation may wrap, we know nothing about the value, except for
219 the range of the type. */
220 if (!nowrap_type_p (type
))
223 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
224 add it to MIN, otherwise to MAX. */
225 if (mpz_sgn (off
) < 0)
226 mpz_add (max
, max
, off
);
228 mpz_add (min
, min
, off
);
231 /* Stores the bounds on the difference of the values of the expressions
232 (var + X) and (var + Y), computed in TYPE, to BNDS. */
235 bound_difference_of_offsetted_base (tree type
, mpz_t x
, mpz_t y
,
238 int rel
= mpz_cmp (x
, y
);
239 bool may_wrap
= !nowrap_type_p (type
);
242 /* If X == Y, then the expressions are always equal.
243 If X > Y, there are the following possibilities:
244 a) neither of var + X and var + Y overflow or underflow, or both of
245 them do. Then their difference is X - Y.
246 b) var + X overflows, and var + Y does not. Then the values of the
247 expressions are var + X - M and var + Y, where M is the range of
248 the type, and their difference is X - Y - M.
249 c) var + Y underflows and var + X does not. Their difference again
251 Therefore, if the arithmetics in type does not overflow, then the
252 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
253 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
254 (X - Y, X - Y + M). */
258 mpz_set_ui (bnds
->below
, 0);
259 mpz_set_ui (bnds
->up
, 0);
264 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), m
, UNSIGNED
);
265 mpz_add_ui (m
, m
, 1);
266 mpz_sub (bnds
->up
, x
, y
);
267 mpz_set (bnds
->below
, bnds
->up
);
272 mpz_sub (bnds
->below
, bnds
->below
, m
);
274 mpz_add (bnds
->up
, bnds
->up
, m
);
280 /* From condition C0 CMP C1 derives information regarding the
281 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
282 and stores it to BNDS. */
285 refine_bounds_using_guard (tree type
, tree varx
, mpz_t offx
,
286 tree vary
, mpz_t offy
,
287 tree c0
, enum tree_code cmp
, tree c1
,
290 tree varc0
, varc1
, tmp
, ctype
;
291 mpz_t offc0
, offc1
, loffx
, loffy
, bnd
;
293 bool no_wrap
= nowrap_type_p (type
);
302 STRIP_SIGN_NOPS (c0
);
303 STRIP_SIGN_NOPS (c1
);
304 ctype
= TREE_TYPE (c0
);
305 if (!useless_type_conversion_p (ctype
, type
))
311 /* We could derive quite precise information from EQ_EXPR, however, such
312 a guard is unlikely to appear, so we do not bother with handling
317 /* NE_EXPR comparisons do not contain much of useful information, except for
318 special case of comparing with the bounds of the type. */
319 if (TREE_CODE (c1
) != INTEGER_CST
320 || !INTEGRAL_TYPE_P (type
))
323 /* Ensure that the condition speaks about an expression in the same type
325 ctype
= TREE_TYPE (c0
);
326 if (TYPE_PRECISION (ctype
) != TYPE_PRECISION (type
))
328 c0
= fold_convert (type
, c0
);
329 c1
= fold_convert (type
, c1
);
331 if (TYPE_MIN_VALUE (type
)
332 && operand_equal_p (c1
, TYPE_MIN_VALUE (type
), 0))
337 if (TYPE_MAX_VALUE (type
)
338 && operand_equal_p (c1
, TYPE_MAX_VALUE (type
), 0))
351 split_to_var_and_offset (expand_simple_operations (c0
), &varc0
, offc0
);
352 split_to_var_and_offset (expand_simple_operations (c1
), &varc1
, offc1
);
354 /* We are only interested in comparisons of expressions based on VARX and
355 VARY. TODO -- we might also be able to derive some bounds from
356 expressions containing just one of the variables. */
358 if (operand_equal_p (varx
, varc1
, 0))
360 tmp
= varc0
; varc0
= varc1
; varc1
= tmp
;
361 mpz_swap (offc0
, offc1
);
362 cmp
= swap_tree_comparison (cmp
);
365 if (!operand_equal_p (varx
, varc0
, 0)
366 || !operand_equal_p (vary
, varc1
, 0))
369 mpz_init_set (loffx
, offx
);
370 mpz_init_set (loffy
, offy
);
372 if (cmp
== GT_EXPR
|| cmp
== GE_EXPR
)
374 tmp
= varx
; varx
= vary
; vary
= tmp
;
375 mpz_swap (offc0
, offc1
);
376 mpz_swap (loffx
, loffy
);
377 cmp
= swap_tree_comparison (cmp
);
381 /* If there is no overflow, the condition implies that
383 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
385 The overflows and underflows may complicate things a bit; each
386 overflow decreases the appropriate offset by M, and underflow
387 increases it by M. The above inequality would not necessarily be
390 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
391 VARX + OFFC0 overflows, but VARX + OFFX does not.
392 This may only happen if OFFX < OFFC0.
393 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
394 VARY + OFFC1 underflows and VARY + OFFY does not.
395 This may only happen if OFFY > OFFC1. */
404 x_ok
= (integer_zerop (varx
)
405 || mpz_cmp (loffx
, offc0
) >= 0);
406 y_ok
= (integer_zerop (vary
)
407 || mpz_cmp (loffy
, offc1
) <= 0);
413 mpz_sub (bnd
, loffx
, loffy
);
414 mpz_add (bnd
, bnd
, offc1
);
415 mpz_sub (bnd
, bnd
, offc0
);
418 mpz_sub_ui (bnd
, bnd
, 1);
423 if (mpz_cmp (bnds
->below
, bnd
) < 0)
424 mpz_set (bnds
->below
, bnd
);
428 if (mpz_cmp (bnd
, bnds
->up
) < 0)
429 mpz_set (bnds
->up
, bnd
);
441 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
442 The subtraction is considered to be performed in arbitrary precision,
445 We do not attempt to be too clever regarding the value ranges of X and
446 Y; most of the time, they are just integers or ssa names offsetted by
447 integer. However, we try to use the information contained in the
448 comparisons before the loop (usually created by loop header copying). */
451 bound_difference (struct loop
*loop
, tree x
, tree y
, bounds
*bnds
)
453 tree type
= TREE_TYPE (x
);
456 mpz_t minx
, maxx
, miny
, maxy
;
464 /* Get rid of unnecessary casts, but preserve the value of
469 mpz_init (bnds
->below
);
473 split_to_var_and_offset (x
, &varx
, offx
);
474 split_to_var_and_offset (y
, &vary
, offy
);
476 if (!integer_zerop (varx
)
477 && operand_equal_p (varx
, vary
, 0))
479 /* Special case VARX == VARY -- we just need to compare the
480 offsets. The matters are a bit more complicated in the
481 case addition of offsets may wrap. */
482 bound_difference_of_offsetted_base (type
, offx
, offy
, bnds
);
486 /* Otherwise, use the value ranges to determine the initial
487 estimates on below and up. */
492 determine_value_range (loop
, type
, varx
, offx
, minx
, maxx
);
493 determine_value_range (loop
, type
, vary
, offy
, miny
, maxy
);
495 mpz_sub (bnds
->below
, minx
, maxy
);
496 mpz_sub (bnds
->up
, maxx
, miny
);
503 /* If both X and Y are constants, we cannot get any more precise. */
504 if (integer_zerop (varx
) && integer_zerop (vary
))
507 /* Now walk the dominators of the loop header and use the entry
508 guards to refine the estimates. */
509 for (bb
= loop
->header
;
510 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
511 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
513 if (!single_pred_p (bb
))
515 e
= single_pred_edge (bb
);
517 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
520 cond
= last_stmt (e
->src
);
521 c0
= gimple_cond_lhs (cond
);
522 cmp
= gimple_cond_code (cond
);
523 c1
= gimple_cond_rhs (cond
);
525 if (e
->flags
& EDGE_FALSE_VALUE
)
526 cmp
= invert_tree_comparison (cmp
, false);
528 refine_bounds_using_guard (type
, varx
, offx
, vary
, offy
,
538 /* Update the bounds in BNDS that restrict the value of X to the bounds
539 that restrict the value of X + DELTA. X can be obtained as a
540 difference of two values in TYPE. */
543 bounds_add (bounds
*bnds
, const widest_int
&delta
, tree type
)
548 wi::to_mpz (delta
, mdelta
, SIGNED
);
551 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), max
, UNSIGNED
);
553 mpz_add (bnds
->up
, bnds
->up
, mdelta
);
554 mpz_add (bnds
->below
, bnds
->below
, mdelta
);
556 if (mpz_cmp (bnds
->up
, max
) > 0)
557 mpz_set (bnds
->up
, max
);
560 if (mpz_cmp (bnds
->below
, max
) < 0)
561 mpz_set (bnds
->below
, max
);
567 /* Update the bounds in BNDS that restrict the value of X to the bounds
568 that restrict the value of -X. */
571 bounds_negate (bounds
*bnds
)
575 mpz_init_set (tmp
, bnds
->up
);
576 mpz_neg (bnds
->up
, bnds
->below
);
577 mpz_neg (bnds
->below
, tmp
);
581 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
584 inverse (tree x
, tree mask
)
586 tree type
= TREE_TYPE (x
);
588 unsigned ctr
= tree_floor_log2 (mask
);
590 if (TYPE_PRECISION (type
) <= HOST_BITS_PER_WIDE_INT
)
592 unsigned HOST_WIDE_INT ix
;
593 unsigned HOST_WIDE_INT imask
;
594 unsigned HOST_WIDE_INT irslt
= 1;
596 gcc_assert (cst_and_fits_in_hwi (x
));
597 gcc_assert (cst_and_fits_in_hwi (mask
));
599 ix
= int_cst_value (x
);
600 imask
= int_cst_value (mask
);
609 rslt
= build_int_cst_type (type
, irslt
);
613 rslt
= build_int_cst (type
, 1);
616 rslt
= int_const_binop (MULT_EXPR
, rslt
, x
);
617 x
= int_const_binop (MULT_EXPR
, x
, x
);
619 rslt
= int_const_binop (BIT_AND_EXPR
, rslt
, mask
);
625 /* Derives the upper bound BND on the number of executions of loop with exit
626 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
627 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
628 that the loop ends through this exit, i.e., the induction variable ever
629 reaches the value of C.
631 The value C is equal to final - base, where final and base are the final and
632 initial value of the actual induction variable in the analysed loop. BNDS
633 bounds the value of this difference when computed in signed type with
634 unbounded range, while the computation of C is performed in an unsigned
635 type with the range matching the range of the type of the induction variable.
636 In particular, BNDS.up contains an upper bound on C in the following cases:
637 -- if the iv must reach its final value without overflow, i.e., if
638 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
639 -- if final >= base, which we know to hold when BNDS.below >= 0. */
642 number_of_iterations_ne_max (mpz_t bnd
, bool no_overflow
, tree c
, tree s
,
643 bounds
*bnds
, bool exit_must_be_taken
)
647 tree type
= TREE_TYPE (c
);
648 bool bnds_u_valid
= ((no_overflow
&& exit_must_be_taken
)
649 || mpz_sgn (bnds
->below
) >= 0);
652 || (TREE_CODE (c
) == INTEGER_CST
653 && TREE_CODE (s
) == INTEGER_CST
654 && wi::mod_trunc (c
, s
, TYPE_SIGN (type
)) == 0)
655 || (TYPE_OVERFLOW_UNDEFINED (type
)
656 && multiple_of_p (type
, c
, s
)))
658 /* If C is an exact multiple of S, then its value will be reached before
659 the induction variable overflows (unless the loop is exited in some
660 other way before). Note that the actual induction variable in the
661 loop (which ranges from base to final instead of from 0 to C) may
662 overflow, in which case BNDS.up will not be giving a correct upper
663 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
665 exit_must_be_taken
= true;
668 /* If the induction variable can overflow, the number of iterations is at
669 most the period of the control variable (or infinite, but in that case
670 the whole # of iterations analysis will fail). */
673 max
= wi::mask
<widest_int
> (TYPE_PRECISION (type
) - wi::ctz (s
), false);
674 wi::to_mpz (max
, bnd
, UNSIGNED
);
678 /* Now we know that the induction variable does not overflow, so the loop
679 iterates at most (range of type / S) times. */
680 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), bnd
, UNSIGNED
);
682 /* If the induction variable is guaranteed to reach the value of C before
684 if (exit_must_be_taken
)
686 /* ... then we can strengthen this to C / S, and possibly we can use
687 the upper bound on C given by BNDS. */
688 if (TREE_CODE (c
) == INTEGER_CST
)
689 wi::to_mpz (c
, bnd
, UNSIGNED
);
690 else if (bnds_u_valid
)
691 mpz_set (bnd
, bnds
->up
);
695 wi::to_mpz (s
, d
, UNSIGNED
);
696 mpz_fdiv_q (bnd
, bnd
, d
);
700 /* Determines number of iterations of loop whose ending condition
701 is IV <> FINAL. TYPE is the type of the iv. The number of
702 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
703 we know that the exit must be taken eventually, i.e., that the IV
704 ever reaches the value FINAL (we derived this earlier, and possibly set
705 NITER->assumptions to make sure this is the case). BNDS contains the
706 bounds on the difference FINAL - IV->base. */
709 number_of_iterations_ne (tree type
, affine_iv
*iv
, tree final
,
710 struct tree_niter_desc
*niter
, bool exit_must_be_taken
,
713 tree niter_type
= unsigned_type_for (type
);
714 tree s
, c
, d
, bits
, assumption
, tmp
, bound
;
717 niter
->control
= *iv
;
718 niter
->bound
= final
;
719 niter
->cmp
= NE_EXPR
;
721 /* Rearrange the terms so that we get inequality S * i <> C, with S
722 positive. Also cast everything to the unsigned type. If IV does
723 not overflow, BNDS bounds the value of C. Also, this is the
724 case if the computation |FINAL - IV->base| does not overflow, i.e.,
725 if BNDS->below in the result is nonnegative. */
726 if (tree_int_cst_sign_bit (iv
->step
))
728 s
= fold_convert (niter_type
,
729 fold_build1 (NEGATE_EXPR
, type
, iv
->step
));
730 c
= fold_build2 (MINUS_EXPR
, niter_type
,
731 fold_convert (niter_type
, iv
->base
),
732 fold_convert (niter_type
, final
));
733 bounds_negate (bnds
);
737 s
= fold_convert (niter_type
, iv
->step
);
738 c
= fold_build2 (MINUS_EXPR
, niter_type
,
739 fold_convert (niter_type
, final
),
740 fold_convert (niter_type
, iv
->base
));
744 number_of_iterations_ne_max (max
, iv
->no_overflow
, c
, s
, bnds
,
746 niter
->max
= widest_int::from (wi::from_mpz (niter_type
, max
, false),
747 TYPE_SIGN (niter_type
));
750 /* First the trivial cases -- when the step is 1. */
751 if (integer_onep (s
))
757 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
758 is infinite. Otherwise, the number of iterations is
759 (inverse(s/d) * (c/d)) mod (size of mode/d). */
760 bits
= num_ending_zeros (s
);
761 bound
= build_low_bits_mask (niter_type
,
762 (TYPE_PRECISION (niter_type
)
763 - tree_to_uhwi (bits
)));
765 d
= fold_binary_to_constant (LSHIFT_EXPR
, niter_type
,
766 build_int_cst (niter_type
, 1), bits
);
767 s
= fold_binary_to_constant (RSHIFT_EXPR
, niter_type
, s
, bits
);
769 if (!exit_must_be_taken
)
771 /* If we cannot assume that the exit is taken eventually, record the
772 assumptions for divisibility of c. */
773 assumption
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, c
, d
);
774 assumption
= fold_build2 (EQ_EXPR
, boolean_type_node
,
775 assumption
, build_int_cst (niter_type
, 0));
776 if (!integer_nonzerop (assumption
))
777 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
778 niter
->assumptions
, assumption
);
781 c
= fold_build2 (EXACT_DIV_EXPR
, niter_type
, c
, d
);
782 tmp
= fold_build2 (MULT_EXPR
, niter_type
, c
, inverse (s
, bound
));
783 niter
->niter
= fold_build2 (BIT_AND_EXPR
, niter_type
, tmp
, bound
);
787 /* Checks whether we can determine the final value of the control variable
788 of the loop with ending condition IV0 < IV1 (computed in TYPE).
789 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
790 of the step. The assumptions necessary to ensure that the computation
791 of the final value does not overflow are recorded in NITER. If we
792 find the final value, we adjust DELTA and return TRUE. Otherwise
793 we return false. BNDS bounds the value of IV1->base - IV0->base,
794 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
795 true if we know that the exit must be taken eventually. */
798 number_of_iterations_lt_to_ne (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
799 struct tree_niter_desc
*niter
,
800 tree
*delta
, tree step
,
801 bool exit_must_be_taken
, bounds
*bnds
)
803 tree niter_type
= TREE_TYPE (step
);
804 tree mod
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, *delta
, step
);
807 tree assumption
= boolean_true_node
, bound
, noloop
;
808 bool ret
= false, fv_comp_no_overflow
;
810 if (POINTER_TYPE_P (type
))
813 if (TREE_CODE (mod
) != INTEGER_CST
)
815 if (integer_nonzerop (mod
))
816 mod
= fold_build2 (MINUS_EXPR
, niter_type
, step
, mod
);
817 tmod
= fold_convert (type1
, mod
);
820 wi::to_mpz (mod
, mmod
, UNSIGNED
);
821 mpz_neg (mmod
, mmod
);
823 /* If the induction variable does not overflow and the exit is taken,
824 then the computation of the final value does not overflow. This is
825 also obviously the case if the new final value is equal to the
826 current one. Finally, we postulate this for pointer type variables,
827 as the code cannot rely on the object to that the pointer points being
828 placed at the end of the address space (and more pragmatically,
829 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
830 if (integer_zerop (mod
) || POINTER_TYPE_P (type
))
831 fv_comp_no_overflow
= true;
832 else if (!exit_must_be_taken
)
833 fv_comp_no_overflow
= false;
835 fv_comp_no_overflow
=
836 (iv0
->no_overflow
&& integer_nonzerop (iv0
->step
))
837 || (iv1
->no_overflow
&& integer_nonzerop (iv1
->step
));
839 if (integer_nonzerop (iv0
->step
))
841 /* The final value of the iv is iv1->base + MOD, assuming that this
842 computation does not overflow, and that
843 iv0->base <= iv1->base + MOD. */
844 if (!fv_comp_no_overflow
)
846 bound
= fold_build2 (MINUS_EXPR
, type1
,
847 TYPE_MAX_VALUE (type1
), tmod
);
848 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
850 if (integer_zerop (assumption
))
853 if (mpz_cmp (mmod
, bnds
->below
) < 0)
854 noloop
= boolean_false_node
;
855 else if (POINTER_TYPE_P (type
))
856 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
858 fold_build_pointer_plus (iv1
->base
, tmod
));
860 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
862 fold_build2 (PLUS_EXPR
, type1
,
867 /* The final value of the iv is iv0->base - MOD, assuming that this
868 computation does not overflow, and that
869 iv0->base - MOD <= iv1->base. */
870 if (!fv_comp_no_overflow
)
872 bound
= fold_build2 (PLUS_EXPR
, type1
,
873 TYPE_MIN_VALUE (type1
), tmod
);
874 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
876 if (integer_zerop (assumption
))
879 if (mpz_cmp (mmod
, bnds
->below
) < 0)
880 noloop
= boolean_false_node
;
881 else if (POINTER_TYPE_P (type
))
882 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
883 fold_build_pointer_plus (iv0
->base
,
884 fold_build1 (NEGATE_EXPR
,
888 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
889 fold_build2 (MINUS_EXPR
, type1
,
894 if (!integer_nonzerop (assumption
))
895 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
898 if (!integer_zerop (noloop
))
899 niter
->may_be_zero
= fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
902 bounds_add (bnds
, wi::to_widest (mod
), type
);
903 *delta
= fold_build2 (PLUS_EXPR
, niter_type
, *delta
, mod
);
911 /* Add assertions to NITER that ensure that the control variable of the loop
912 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
913 are TYPE. Returns false if we can prove that there is an overflow, true
914 otherwise. STEP is the absolute value of the step. */
917 assert_no_overflow_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
918 struct tree_niter_desc
*niter
, tree step
)
920 tree bound
, d
, assumption
, diff
;
921 tree niter_type
= TREE_TYPE (step
);
923 if (integer_nonzerop (iv0
->step
))
925 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
926 if (iv0
->no_overflow
)
929 /* If iv0->base is a constant, we can determine the last value before
930 overflow precisely; otherwise we conservatively assume
933 if (TREE_CODE (iv0
->base
) == INTEGER_CST
)
935 d
= fold_build2 (MINUS_EXPR
, niter_type
,
936 fold_convert (niter_type
, TYPE_MAX_VALUE (type
)),
937 fold_convert (niter_type
, iv0
->base
));
938 diff
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, d
, step
);
941 diff
= fold_build2 (MINUS_EXPR
, niter_type
, step
,
942 build_int_cst (niter_type
, 1));
943 bound
= fold_build2 (MINUS_EXPR
, type
,
944 TYPE_MAX_VALUE (type
), fold_convert (type
, diff
));
945 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
950 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
951 if (iv1
->no_overflow
)
954 if (TREE_CODE (iv1
->base
) == INTEGER_CST
)
956 d
= fold_build2 (MINUS_EXPR
, niter_type
,
957 fold_convert (niter_type
, iv1
->base
),
958 fold_convert (niter_type
, TYPE_MIN_VALUE (type
)));
959 diff
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, d
, step
);
962 diff
= fold_build2 (MINUS_EXPR
, niter_type
, step
,
963 build_int_cst (niter_type
, 1));
964 bound
= fold_build2 (PLUS_EXPR
, type
,
965 TYPE_MIN_VALUE (type
), fold_convert (type
, diff
));
966 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
970 if (integer_zerop (assumption
))
972 if (!integer_nonzerop (assumption
))
973 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
974 niter
->assumptions
, assumption
);
976 iv0
->no_overflow
= true;
977 iv1
->no_overflow
= true;
981 /* Add an assumption to NITER that a loop whose ending condition
982 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
983 bounds the value of IV1->base - IV0->base. */
986 assert_loop_rolls_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
987 struct tree_niter_desc
*niter
, bounds
*bnds
)
989 tree assumption
= boolean_true_node
, bound
, diff
;
990 tree mbz
, mbzl
, mbzr
, type1
;
991 bool rolls_p
, no_overflow_p
;
995 /* We are going to compute the number of iterations as
996 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
997 variant of TYPE. This formula only works if
999 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1001 (where MAX is the maximum value of the unsigned variant of TYPE, and
1002 the computations in this formula are performed in full precision,
1003 i.e., without overflows).
1005 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1006 we have a condition of the form iv0->base - step < iv1->base before the loop,
1007 and for loops iv0->base < iv1->base - step * i the condition
1008 iv0->base < iv1->base + step, due to loop header copying, which enable us
1009 to prove the lower bound.
1011 The upper bound is more complicated. Unless the expressions for initial
1012 and final value themselves contain enough information, we usually cannot
1013 derive it from the context. */
1015 /* First check whether the answer does not follow from the bounds we gathered
1017 if (integer_nonzerop (iv0
->step
))
1018 dstep
= wi::to_widest (iv0
->step
);
1021 dstep
= wi::sext (wi::to_widest (iv1
->step
), TYPE_PRECISION (type
));
1026 wi::to_mpz (dstep
, mstep
, UNSIGNED
);
1027 mpz_neg (mstep
, mstep
);
1028 mpz_add_ui (mstep
, mstep
, 1);
1030 rolls_p
= mpz_cmp (mstep
, bnds
->below
) <= 0;
1033 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), max
, UNSIGNED
);
1034 mpz_add (max
, max
, mstep
);
1035 no_overflow_p
= (mpz_cmp (bnds
->up
, max
) <= 0
1036 /* For pointers, only values lying inside a single object
1037 can be compared or manipulated by pointer arithmetics.
1038 Gcc in general does not allow or handle objects larger
1039 than half of the address space, hence the upper bound
1040 is satisfied for pointers. */
1041 || POINTER_TYPE_P (type
));
1045 if (rolls_p
&& no_overflow_p
)
1049 if (POINTER_TYPE_P (type
))
1052 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1053 we must be careful not to introduce overflow. */
1055 if (integer_nonzerop (iv0
->step
))
1057 diff
= fold_build2 (MINUS_EXPR
, type1
,
1058 iv0
->step
, build_int_cst (type1
, 1));
1060 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1061 0 address never belongs to any object, we can assume this for
1063 if (!POINTER_TYPE_P (type
))
1065 bound
= fold_build2 (PLUS_EXPR
, type1
,
1066 TYPE_MIN_VALUE (type
), diff
);
1067 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
1071 /* And then we can compute iv0->base - diff, and compare it with
1073 mbzl
= fold_build2 (MINUS_EXPR
, type1
,
1074 fold_convert (type1
, iv0
->base
), diff
);
1075 mbzr
= fold_convert (type1
, iv1
->base
);
1079 diff
= fold_build2 (PLUS_EXPR
, type1
,
1080 iv1
->step
, build_int_cst (type1
, 1));
1082 if (!POINTER_TYPE_P (type
))
1084 bound
= fold_build2 (PLUS_EXPR
, type1
,
1085 TYPE_MAX_VALUE (type
), diff
);
1086 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
1090 mbzl
= fold_convert (type1
, iv0
->base
);
1091 mbzr
= fold_build2 (MINUS_EXPR
, type1
,
1092 fold_convert (type1
, iv1
->base
), diff
);
1095 if (!integer_nonzerop (assumption
))
1096 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1097 niter
->assumptions
, assumption
);
1100 mbz
= fold_build2 (GT_EXPR
, boolean_type_node
, mbzl
, mbzr
);
1101 niter
->may_be_zero
= fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
1102 niter
->may_be_zero
, mbz
);
1106 /* Determines number of iterations of loop whose ending condition
1107 is IV0 < IV1. TYPE is the type of the iv. The number of
1108 iterations is stored to NITER. BNDS bounds the difference
1109 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1110 that the exit must be taken eventually. */
1113 number_of_iterations_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1114 struct tree_niter_desc
*niter
,
1115 bool exit_must_be_taken
, bounds
*bnds
)
1117 tree niter_type
= unsigned_type_for (type
);
1118 tree delta
, step
, s
;
1121 if (integer_nonzerop (iv0
->step
))
1123 niter
->control
= *iv0
;
1124 niter
->cmp
= LT_EXPR
;
1125 niter
->bound
= iv1
->base
;
1129 niter
->control
= *iv1
;
1130 niter
->cmp
= GT_EXPR
;
1131 niter
->bound
= iv0
->base
;
1134 delta
= fold_build2 (MINUS_EXPR
, niter_type
,
1135 fold_convert (niter_type
, iv1
->base
),
1136 fold_convert (niter_type
, iv0
->base
));
1138 /* First handle the special case that the step is +-1. */
1139 if ((integer_onep (iv0
->step
) && integer_zerop (iv1
->step
))
1140 || (integer_all_onesp (iv1
->step
) && integer_zerop (iv0
->step
)))
1142 /* for (i = iv0->base; i < iv1->base; i++)
1146 for (i = iv1->base; i > iv0->base; i--).
1148 In both cases # of iterations is iv1->base - iv0->base, assuming that
1149 iv1->base >= iv0->base.
1151 First try to derive a lower bound on the value of
1152 iv1->base - iv0->base, computed in full precision. If the difference
1153 is nonnegative, we are done, otherwise we must record the
1156 if (mpz_sgn (bnds
->below
) < 0)
1157 niter
->may_be_zero
= fold_build2 (LT_EXPR
, boolean_type_node
,
1158 iv1
->base
, iv0
->base
);
1159 niter
->niter
= delta
;
1160 niter
->max
= widest_int::from (wi::from_mpz (niter_type
, bnds
->up
, false),
1161 TYPE_SIGN (niter_type
));
1165 if (integer_nonzerop (iv0
->step
))
1166 step
= fold_convert (niter_type
, iv0
->step
);
1168 step
= fold_convert (niter_type
,
1169 fold_build1 (NEGATE_EXPR
, type
, iv1
->step
));
1171 /* If we can determine the final value of the control iv exactly, we can
1172 transform the condition to != comparison. In particular, this will be
1173 the case if DELTA is constant. */
1174 if (number_of_iterations_lt_to_ne (type
, iv0
, iv1
, niter
, &delta
, step
,
1175 exit_must_be_taken
, bnds
))
1179 zps
.base
= build_int_cst (niter_type
, 0);
1181 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1182 zps does not overflow. */
1183 zps
.no_overflow
= true;
1185 return number_of_iterations_ne (type
, &zps
, delta
, niter
, true, bnds
);
1188 /* Make sure that the control iv does not overflow. */
1189 if (!assert_no_overflow_lt (type
, iv0
, iv1
, niter
, step
))
1192 /* We determine the number of iterations as (delta + step - 1) / step. For
1193 this to work, we must know that iv1->base >= iv0->base - step + 1,
1194 otherwise the loop does not roll. */
1195 assert_loop_rolls_lt (type
, iv0
, iv1
, niter
, bnds
);
1197 s
= fold_build2 (MINUS_EXPR
, niter_type
,
1198 step
, build_int_cst (niter_type
, 1));
1199 delta
= fold_build2 (PLUS_EXPR
, niter_type
, delta
, s
);
1200 niter
->niter
= fold_build2 (FLOOR_DIV_EXPR
, niter_type
, delta
, step
);
1204 wi::to_mpz (step
, mstep
, UNSIGNED
);
1205 mpz_add (tmp
, bnds
->up
, mstep
);
1206 mpz_sub_ui (tmp
, tmp
, 1);
1207 mpz_fdiv_q (tmp
, tmp
, mstep
);
1208 niter
->max
= widest_int::from (wi::from_mpz (niter_type
, tmp
, false),
1209 TYPE_SIGN (niter_type
));
1216 /* Determines number of iterations of loop whose ending condition
1217 is IV0 <= IV1. TYPE is the type of the iv. The number of
1218 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1219 we know that this condition must eventually become false (we derived this
1220 earlier, and possibly set NITER->assumptions to make sure this
1221 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1224 number_of_iterations_le (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1225 struct tree_niter_desc
*niter
, bool exit_must_be_taken
,
1230 if (POINTER_TYPE_P (type
))
1233 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1234 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1235 value of the type. This we must know anyway, since if it is
1236 equal to this value, the loop rolls forever. We do not check
1237 this condition for pointer type ivs, as the code cannot rely on
1238 the object to that the pointer points being placed at the end of
1239 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1240 not defined for pointers). */
1242 if (!exit_must_be_taken
&& !POINTER_TYPE_P (type
))
1244 if (integer_nonzerop (iv0
->step
))
1245 assumption
= fold_build2 (NE_EXPR
, boolean_type_node
,
1246 iv1
->base
, TYPE_MAX_VALUE (type
));
1248 assumption
= fold_build2 (NE_EXPR
, boolean_type_node
,
1249 iv0
->base
, TYPE_MIN_VALUE (type
));
1251 if (integer_zerop (assumption
))
1253 if (!integer_nonzerop (assumption
))
1254 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1255 niter
->assumptions
, assumption
);
1258 if (integer_nonzerop (iv0
->step
))
1260 if (POINTER_TYPE_P (type
))
1261 iv1
->base
= fold_build_pointer_plus_hwi (iv1
->base
, 1);
1263 iv1
->base
= fold_build2 (PLUS_EXPR
, type1
, iv1
->base
,
1264 build_int_cst (type1
, 1));
1266 else if (POINTER_TYPE_P (type
))
1267 iv0
->base
= fold_build_pointer_plus_hwi (iv0
->base
, -1);
1269 iv0
->base
= fold_build2 (MINUS_EXPR
, type1
,
1270 iv0
->base
, build_int_cst (type1
, 1));
1272 bounds_add (bnds
, 1, type1
);
1274 return number_of_iterations_lt (type
, iv0
, iv1
, niter
, exit_must_be_taken
,
1278 /* Dumps description of affine induction variable IV to FILE. */
1281 dump_affine_iv (FILE *file
, affine_iv
*iv
)
1283 if (!integer_zerop (iv
->step
))
1284 fprintf (file
, "[");
1286 print_generic_expr (dump_file
, iv
->base
, TDF_SLIM
);
1288 if (!integer_zerop (iv
->step
))
1290 fprintf (file
, ", + , ");
1291 print_generic_expr (dump_file
, iv
->step
, TDF_SLIM
);
1292 fprintf (file
, "]%s", iv
->no_overflow
? "(no_overflow)" : "");
1296 /* Determine the number of iterations according to condition (for staying
1297 inside loop) which compares two induction variables using comparison
1298 operator CODE. The induction variable on left side of the comparison
1299 is IV0, the right-hand side is IV1. Both induction variables must have
1300 type TYPE, which must be an integer or pointer type. The steps of the
1301 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1303 LOOP is the loop whose number of iterations we are determining.
1305 ONLY_EXIT is true if we are sure this is the only way the loop could be
1306 exited (including possibly non-returning function calls, exceptions, etc.)
1307 -- in this case we can use the information whether the control induction
1308 variables can overflow or not in a more efficient way.
1310 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1312 The results (number of iterations and assumptions as described in
1313 comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1314 Returns false if it fails to determine number of iterations, true if it
1315 was determined (possibly with some assumptions). */
1318 number_of_iterations_cond (struct loop
*loop
,
1319 tree type
, affine_iv
*iv0
, enum tree_code code
,
1320 affine_iv
*iv1
, struct tree_niter_desc
*niter
,
1321 bool only_exit
, bool every_iteration
)
1323 bool exit_must_be_taken
= false, ret
;
1326 /* If the test is not executed every iteration, wrapping may make the test
1328 TODO: the overflow case can be still used as unreliable estimate of upper
1329 bound. But we have no API to pass it down to number of iterations code
1330 and, at present, it will not use it anyway. */
1331 if (!every_iteration
1332 && (!iv0
->no_overflow
|| !iv1
->no_overflow
1333 || code
== NE_EXPR
|| code
== EQ_EXPR
))
1336 /* The meaning of these assumptions is this:
1338 then the rest of information does not have to be valid
1339 if may_be_zero then the loop does not roll, even if
1341 niter
->assumptions
= boolean_true_node
;
1342 niter
->may_be_zero
= boolean_false_node
;
1343 niter
->niter
= NULL_TREE
;
1345 niter
->bound
= NULL_TREE
;
1346 niter
->cmp
= ERROR_MARK
;
1348 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1349 the control variable is on lhs. */
1350 if (code
== GE_EXPR
|| code
== GT_EXPR
1351 || (code
== NE_EXPR
&& integer_zerop (iv0
->step
)))
1354 code
= swap_tree_comparison (code
);
1357 if (POINTER_TYPE_P (type
))
1359 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1360 to the same object. If they do, the control variable cannot wrap
1361 (as wrap around the bounds of memory will never return a pointer
1362 that would be guaranteed to point to the same object, even if we
1363 avoid undefined behavior by casting to size_t and back). */
1364 iv0
->no_overflow
= true;
1365 iv1
->no_overflow
= true;
1368 /* If the control induction variable does not overflow and the only exit
1369 from the loop is the one that we analyze, we know it must be taken
1373 if (!integer_zerop (iv0
->step
) && iv0
->no_overflow
)
1374 exit_must_be_taken
= true;
1375 else if (!integer_zerop (iv1
->step
) && iv1
->no_overflow
)
1376 exit_must_be_taken
= true;
1379 /* We can handle the case when neither of the sides of the comparison is
1380 invariant, provided that the test is NE_EXPR. This rarely occurs in
1381 practice, but it is simple enough to manage. */
1382 if (!integer_zerop (iv0
->step
) && !integer_zerop (iv1
->step
))
1384 tree step_type
= POINTER_TYPE_P (type
) ? sizetype
: type
;
1385 if (code
!= NE_EXPR
)
1388 iv0
->step
= fold_binary_to_constant (MINUS_EXPR
, step_type
,
1389 iv0
->step
, iv1
->step
);
1390 iv0
->no_overflow
= false;
1391 iv1
->step
= build_int_cst (step_type
, 0);
1392 iv1
->no_overflow
= true;
1395 /* If the result of the comparison is a constant, the loop is weird. More
1396 precise handling would be possible, but the situation is not common enough
1397 to waste time on it. */
1398 if (integer_zerop (iv0
->step
) && integer_zerop (iv1
->step
))
1401 /* Ignore loops of while (i-- < 10) type. */
1402 if (code
!= NE_EXPR
)
1404 if (iv0
->step
&& tree_int_cst_sign_bit (iv0
->step
))
1407 if (!integer_zerop (iv1
->step
) && !tree_int_cst_sign_bit (iv1
->step
))
1411 /* If the loop exits immediately, there is nothing to do. */
1412 tree tem
= fold_binary (code
, boolean_type_node
, iv0
->base
, iv1
->base
);
1413 if (tem
&& integer_zerop (tem
))
1415 niter
->niter
= build_int_cst (unsigned_type_for (type
), 0);
1420 /* OK, now we know we have a senseful loop. Handle several cases, depending
1421 on what comparison operator is used. */
1422 bound_difference (loop
, iv1
->base
, iv0
->base
, &bnds
);
1424 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1427 "Analyzing # of iterations of loop %d\n", loop
->num
);
1429 fprintf (dump_file
, " exit condition ");
1430 dump_affine_iv (dump_file
, iv0
);
1431 fprintf (dump_file
, " %s ",
1432 code
== NE_EXPR
? "!="
1433 : code
== LT_EXPR
? "<"
1435 dump_affine_iv (dump_file
, iv1
);
1436 fprintf (dump_file
, "\n");
1438 fprintf (dump_file
, " bounds on difference of bases: ");
1439 mpz_out_str (dump_file
, 10, bnds
.below
);
1440 fprintf (dump_file
, " ... ");
1441 mpz_out_str (dump_file
, 10, bnds
.up
);
1442 fprintf (dump_file
, "\n");
1448 gcc_assert (integer_zerop (iv1
->step
));
1449 ret
= number_of_iterations_ne (type
, iv0
, iv1
->base
, niter
,
1450 exit_must_be_taken
, &bnds
);
1454 ret
= number_of_iterations_lt (type
, iv0
, iv1
, niter
, exit_must_be_taken
,
1459 ret
= number_of_iterations_le (type
, iv0
, iv1
, niter
, exit_must_be_taken
,
1467 mpz_clear (bnds
.up
);
1468 mpz_clear (bnds
.below
);
1470 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1474 fprintf (dump_file
, " result:\n");
1475 if (!integer_nonzerop (niter
->assumptions
))
1477 fprintf (dump_file
, " under assumptions ");
1478 print_generic_expr (dump_file
, niter
->assumptions
, TDF_SLIM
);
1479 fprintf (dump_file
, "\n");
1482 if (!integer_zerop (niter
->may_be_zero
))
1484 fprintf (dump_file
, " zero if ");
1485 print_generic_expr (dump_file
, niter
->may_be_zero
, TDF_SLIM
);
1486 fprintf (dump_file
, "\n");
1489 fprintf (dump_file
, " # of iterations ");
1490 print_generic_expr (dump_file
, niter
->niter
, TDF_SLIM
);
1491 fprintf (dump_file
, ", bounded by ");
1492 print_decu (niter
->max
, dump_file
);
1493 fprintf (dump_file
, "\n");
1496 fprintf (dump_file
, " failed\n\n");
1501 /* Substitute NEW for OLD in EXPR and fold the result. */
1504 simplify_replace_tree (tree expr
, tree old
, tree new_tree
)
1507 tree ret
= NULL_TREE
, e
, se
;
1512 /* Do not bother to replace constants. */
1513 if (CONSTANT_CLASS_P (old
))
1517 || operand_equal_p (expr
, old
, 0))
1518 return unshare_expr (new_tree
);
1523 n
= TREE_OPERAND_LENGTH (expr
);
1524 for (i
= 0; i
< n
; i
++)
1526 e
= TREE_OPERAND (expr
, i
);
1527 se
= simplify_replace_tree (e
, old
, new_tree
);
1532 ret
= copy_node (expr
);
1534 TREE_OPERAND (ret
, i
) = se
;
1537 return (ret
? fold (ret
) : expr
);
1540 /* Expand definitions of ssa names in EXPR as long as they are simple
1541 enough, and return the new expression. */
1544 expand_simple_operations (tree expr
)
1547 tree ret
= NULL_TREE
, e
, ee
, e1
;
1548 enum tree_code code
;
1551 if (expr
== NULL_TREE
)
1554 if (is_gimple_min_invariant (expr
))
1557 code
= TREE_CODE (expr
);
1558 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
)))
1560 n
= TREE_OPERAND_LENGTH (expr
);
1561 for (i
= 0; i
< n
; i
++)
1563 e
= TREE_OPERAND (expr
, i
);
1564 ee
= expand_simple_operations (e
);
1569 ret
= copy_node (expr
);
1571 TREE_OPERAND (ret
, i
) = ee
;
1577 fold_defer_overflow_warnings ();
1579 fold_undefer_and_ignore_overflow_warnings ();
1583 if (TREE_CODE (expr
) != SSA_NAME
)
1586 stmt
= SSA_NAME_DEF_STMT (expr
);
1587 if (gimple_code (stmt
) == GIMPLE_PHI
)
1589 basic_block src
, dest
;
1591 if (gimple_phi_num_args (stmt
) != 1)
1593 e
= PHI_ARG_DEF (stmt
, 0);
1595 /* Avoid propagating through loop exit phi nodes, which
1596 could break loop-closed SSA form restrictions. */
1597 dest
= gimple_bb (stmt
);
1598 src
= single_pred (dest
);
1599 if (TREE_CODE (e
) == SSA_NAME
1600 && src
->loop_father
!= dest
->loop_father
)
1603 return expand_simple_operations (e
);
1605 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1608 /* Avoid expanding to expressions that contain SSA names that need
1609 to take part in abnormal coalescing. */
1611 FOR_EACH_SSA_TREE_OPERAND (e
, stmt
, iter
, SSA_OP_USE
)
1612 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e
))
1615 e
= gimple_assign_rhs1 (stmt
);
1616 code
= gimple_assign_rhs_code (stmt
);
1617 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
)
1619 if (is_gimple_min_invariant (e
))
1622 if (code
== SSA_NAME
)
1623 return expand_simple_operations (e
);
1631 /* Casts are simple. */
1632 ee
= expand_simple_operations (e
);
1633 return fold_build1 (code
, TREE_TYPE (expr
), ee
);
1637 case POINTER_PLUS_EXPR
:
1638 /* And increments and decrements by a constant are simple. */
1639 e1
= gimple_assign_rhs2 (stmt
);
1640 if (!is_gimple_min_invariant (e1
))
1643 ee
= expand_simple_operations (e
);
1644 return fold_build2 (code
, TREE_TYPE (expr
), ee
, e1
);
1651 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1652 expression (or EXPR unchanged, if no simplification was possible). */
1655 tree_simplify_using_condition_1 (tree cond
, tree expr
)
1658 tree e
, te
, e0
, e1
, e2
, notcond
;
1659 enum tree_code code
= TREE_CODE (expr
);
1661 if (code
== INTEGER_CST
)
1664 if (code
== TRUTH_OR_EXPR
1665 || code
== TRUTH_AND_EXPR
1666 || code
== COND_EXPR
)
1670 e0
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 0));
1671 if (TREE_OPERAND (expr
, 0) != e0
)
1674 e1
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 1));
1675 if (TREE_OPERAND (expr
, 1) != e1
)
1678 if (code
== COND_EXPR
)
1680 e2
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 2));
1681 if (TREE_OPERAND (expr
, 2) != e2
)
1689 if (code
== COND_EXPR
)
1690 expr
= fold_build3 (code
, boolean_type_node
, e0
, e1
, e2
);
1692 expr
= fold_build2 (code
, boolean_type_node
, e0
, e1
);
1698 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1699 propagation, and vice versa. Fold does not handle this, since it is
1700 considered too expensive. */
1701 if (TREE_CODE (cond
) == EQ_EXPR
)
1703 e0
= TREE_OPERAND (cond
, 0);
1704 e1
= TREE_OPERAND (cond
, 1);
1706 /* We know that e0 == e1. Check whether we cannot simplify expr
1708 e
= simplify_replace_tree (expr
, e0
, e1
);
1709 if (integer_zerop (e
) || integer_nonzerop (e
))
1712 e
= simplify_replace_tree (expr
, e1
, e0
);
1713 if (integer_zerop (e
) || integer_nonzerop (e
))
1716 if (TREE_CODE (expr
) == EQ_EXPR
)
1718 e0
= TREE_OPERAND (expr
, 0);
1719 e1
= TREE_OPERAND (expr
, 1);
1721 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
1722 e
= simplify_replace_tree (cond
, e0
, e1
);
1723 if (integer_zerop (e
))
1725 e
= simplify_replace_tree (cond
, e1
, e0
);
1726 if (integer_zerop (e
))
1729 if (TREE_CODE (expr
) == NE_EXPR
)
1731 e0
= TREE_OPERAND (expr
, 0);
1732 e1
= TREE_OPERAND (expr
, 1);
1734 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
1735 e
= simplify_replace_tree (cond
, e0
, e1
);
1736 if (integer_zerop (e
))
1737 return boolean_true_node
;
1738 e
= simplify_replace_tree (cond
, e1
, e0
);
1739 if (integer_zerop (e
))
1740 return boolean_true_node
;
1743 te
= expand_simple_operations (expr
);
1745 /* Check whether COND ==> EXPR. */
1746 notcond
= invert_truthvalue (cond
);
1747 e
= fold_binary (TRUTH_OR_EXPR
, boolean_type_node
, notcond
, te
);
1748 if (e
&& integer_nonzerop (e
))
1751 /* Check whether COND ==> not EXPR. */
1752 e
= fold_binary (TRUTH_AND_EXPR
, boolean_type_node
, cond
, te
);
1753 if (e
&& integer_zerop (e
))
1759 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1760 expression (or EXPR unchanged, if no simplification was possible).
1761 Wrapper around tree_simplify_using_condition_1 that ensures that chains
1762 of simple operations in definitions of ssa names in COND are expanded,
1763 so that things like casts or incrementing the value of the bound before
1764 the loop do not cause us to fail. */
1767 tree_simplify_using_condition (tree cond
, tree expr
)
1769 cond
= expand_simple_operations (cond
);
1771 return tree_simplify_using_condition_1 (cond
, expr
);
1774 /* Tries to simplify EXPR using the conditions on entry to LOOP.
1775 Returns the simplified expression (or EXPR unchanged, if no
1776 simplification was possible).*/
1779 simplify_using_initial_conditions (struct loop
*loop
, tree expr
)
1787 if (TREE_CODE (expr
) == INTEGER_CST
)
1790 /* Limit walking the dominators to avoid quadraticness in
1791 the number of BBs times the number of loops in degenerate
1793 for (bb
= loop
->header
;
1794 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
1795 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
1797 if (!single_pred_p (bb
))
1799 e
= single_pred_edge (bb
);
1801 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
1804 stmt
= last_stmt (e
->src
);
1805 cond
= fold_build2 (gimple_cond_code (stmt
),
1807 gimple_cond_lhs (stmt
),
1808 gimple_cond_rhs (stmt
));
1809 if (e
->flags
& EDGE_FALSE_VALUE
)
1810 cond
= invert_truthvalue (cond
);
1811 expr
= tree_simplify_using_condition (cond
, expr
);
1818 /* Tries to simplify EXPR using the evolutions of the loop invariants
1819 in the superloops of LOOP. Returns the simplified expression
1820 (or EXPR unchanged, if no simplification was possible). */
1823 simplify_using_outer_evolutions (struct loop
*loop
, tree expr
)
1825 enum tree_code code
= TREE_CODE (expr
);
1829 if (is_gimple_min_invariant (expr
))
1832 if (code
== TRUTH_OR_EXPR
1833 || code
== TRUTH_AND_EXPR
1834 || code
== COND_EXPR
)
1838 e0
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 0));
1839 if (TREE_OPERAND (expr
, 0) != e0
)
1842 e1
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 1));
1843 if (TREE_OPERAND (expr
, 1) != e1
)
1846 if (code
== COND_EXPR
)
1848 e2
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 2));
1849 if (TREE_OPERAND (expr
, 2) != e2
)
1857 if (code
== COND_EXPR
)
1858 expr
= fold_build3 (code
, boolean_type_node
, e0
, e1
, e2
);
1860 expr
= fold_build2 (code
, boolean_type_node
, e0
, e1
);
1866 e
= instantiate_parameters (loop
, expr
);
1867 if (is_gimple_min_invariant (e
))
1873 /* Returns true if EXIT is the only possible exit from LOOP. */
1876 loop_only_exit_p (const struct loop
*loop
, const_edge exit
)
1879 gimple_stmt_iterator bsi
;
1883 if (exit
!= single_exit (loop
))
1886 body
= get_loop_body (loop
);
1887 for (i
= 0; i
< loop
->num_nodes
; i
++)
1889 for (bsi
= gsi_start_bb (body
[i
]); !gsi_end_p (bsi
); gsi_next (&bsi
))
1891 call
= gsi_stmt (bsi
);
1892 if (gimple_code (call
) != GIMPLE_CALL
)
1895 if (gimple_has_side_effects (call
))
1907 /* Stores description of number of iterations of LOOP derived from
1908 EXIT (an exit edge of the LOOP) in NITER. Returns true if some
1909 useful information could be derived (and fields of NITER has
1910 meaning described in comments at struct tree_niter_desc
1911 declaration), false otherwise. If WARN is true and
1912 -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
1913 potentially unsafe assumptions.
1914 When EVERY_ITERATION is true, only tests that are known to be executed
1915 every iteration are considered (i.e. only test that alone bounds the loop).
1919 number_of_iterations_exit (struct loop
*loop
, edge exit
,
1920 struct tree_niter_desc
*niter
,
1921 bool warn
, bool every_iteration
)
1926 enum tree_code code
;
1930 safe
= dominated_by_p (CDI_DOMINATORS
, loop
->latch
, exit
->src
);
1932 if (every_iteration
&& !safe
)
1935 niter
->assumptions
= boolean_false_node
;
1936 stmt
= last_stmt (exit
->src
);
1937 if (!stmt
|| gimple_code (stmt
) != GIMPLE_COND
)
1940 /* We want the condition for staying inside loop. */
1941 code
= gimple_cond_code (stmt
);
1942 if (exit
->flags
& EDGE_TRUE_VALUE
)
1943 code
= invert_tree_comparison (code
, false);
1958 op0
= gimple_cond_lhs (stmt
);
1959 op1
= gimple_cond_rhs (stmt
);
1960 type
= TREE_TYPE (op0
);
1962 if (TREE_CODE (type
) != INTEGER_TYPE
1963 && !POINTER_TYPE_P (type
))
1966 if (!simple_iv (loop
, loop_containing_stmt (stmt
), op0
, &iv0
, false))
1968 if (!simple_iv (loop
, loop_containing_stmt (stmt
), op1
, &iv1
, false))
1971 /* We don't want to see undefined signed overflow warnings while
1972 computing the number of iterations. */
1973 fold_defer_overflow_warnings ();
1975 iv0
.base
= expand_simple_operations (iv0
.base
);
1976 iv1
.base
= expand_simple_operations (iv1
.base
);
1977 if (!number_of_iterations_cond (loop
, type
, &iv0
, code
, &iv1
, niter
,
1978 loop_only_exit_p (loop
, exit
), safe
))
1980 fold_undefer_and_ignore_overflow_warnings ();
1986 niter
->assumptions
= simplify_using_outer_evolutions (loop
,
1987 niter
->assumptions
);
1988 niter
->may_be_zero
= simplify_using_outer_evolutions (loop
,
1989 niter
->may_be_zero
);
1990 niter
->niter
= simplify_using_outer_evolutions (loop
, niter
->niter
);
1994 = simplify_using_initial_conditions (loop
,
1995 niter
->assumptions
);
1997 = simplify_using_initial_conditions (loop
,
1998 niter
->may_be_zero
);
2000 fold_undefer_and_ignore_overflow_warnings ();
2002 /* If NITER has simplified into a constant, update MAX. */
2003 if (TREE_CODE (niter
->niter
) == INTEGER_CST
)
2004 niter
->max
= wi::to_widest (niter
->niter
);
2006 if (integer_onep (niter
->assumptions
))
2009 /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
2010 But if we can prove that there is overflow or some other source of weird
2011 behavior, ignore the loop even with -funsafe-loop-optimizations. */
2012 if (integer_zerop (niter
->assumptions
) || !single_exit (loop
))
2015 if (flag_unsafe_loop_optimizations
)
2016 niter
->assumptions
= boolean_true_node
;
2020 const char *wording
;
2021 location_t loc
= gimple_location (stmt
);
2023 /* We can provide a more specific warning if one of the operator is
2024 constant and the other advances by +1 or -1. */
2025 if (!integer_zerop (iv1
.step
)
2026 ? (integer_zerop (iv0
.step
)
2027 && (integer_onep (iv1
.step
) || integer_all_onesp (iv1
.step
)))
2028 : (integer_onep (iv0
.step
) || integer_all_onesp (iv0
.step
)))
2030 flag_unsafe_loop_optimizations
2031 ? N_("assuming that the loop is not infinite")
2032 : N_("cannot optimize possibly infinite loops");
2035 flag_unsafe_loop_optimizations
2036 ? N_("assuming that the loop counter does not overflow")
2037 : N_("cannot optimize loop, the loop counter may overflow");
2039 warning_at ((LOCATION_LINE (loc
) > 0) ? loc
: input_location
,
2040 OPT_Wunsafe_loop_optimizations
, "%s", gettext (wording
));
2043 return flag_unsafe_loop_optimizations
;
2046 /* Try to determine the number of iterations of LOOP. If we succeed,
2047 expression giving number of iterations is returned and *EXIT is
2048 set to the edge from that the information is obtained. Otherwise
2049 chrec_dont_know is returned. */
2052 find_loop_niter (struct loop
*loop
, edge
*exit
)
2055 vec
<edge
> exits
= get_loop_exit_edges (loop
);
2057 tree niter
= NULL_TREE
, aniter
;
2058 struct tree_niter_desc desc
;
2061 FOR_EACH_VEC_ELT (exits
, i
, ex
)
2063 if (!number_of_iterations_exit (loop
, ex
, &desc
, false))
2066 if (integer_nonzerop (desc
.may_be_zero
))
2068 /* We exit in the first iteration through this exit.
2069 We won't find anything better. */
2070 niter
= build_int_cst (unsigned_type_node
, 0);
2075 if (!integer_zerop (desc
.may_be_zero
))
2078 aniter
= desc
.niter
;
2082 /* Nothing recorded yet. */
2088 /* Prefer constants, the lower the better. */
2089 if (TREE_CODE (aniter
) != INTEGER_CST
)
2092 if (TREE_CODE (niter
) != INTEGER_CST
)
2099 if (tree_int_cst_lt (aniter
, niter
))
2108 return niter
? niter
: chrec_dont_know
;
2111 /* Return true if loop is known to have bounded number of iterations. */
2114 finite_loop_p (struct loop
*loop
)
2119 if (flag_unsafe_loop_optimizations
)
2121 flags
= flags_from_decl_or_type (current_function_decl
);
2122 if ((flags
& (ECF_CONST
|ECF_PURE
)) && !(flags
& ECF_LOOPING_CONST_OR_PURE
))
2124 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2125 fprintf (dump_file
, "Found loop %i to be finite: it is within pure or const function.\n",
2130 if (loop
->any_upper_bound
2131 || max_loop_iterations (loop
, &nit
))
2133 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2134 fprintf (dump_file
, "Found loop %i to be finite: upper bound found.\n",
2143 Analysis of a number of iterations of a loop by a brute-force evaluation.
2147 /* Bound on the number of iterations we try to evaluate. */
2149 #define MAX_ITERATIONS_TO_TRACK \
2150 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2152 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2153 result by a chain of operations such that all but exactly one of their
2154 operands are constants. */
2157 chain_of_csts_start (struct loop
*loop
, tree x
)
2159 gimple stmt
= SSA_NAME_DEF_STMT (x
);
2161 basic_block bb
= gimple_bb (stmt
);
2162 enum tree_code code
;
2165 || !flow_bb_inside_loop_p (loop
, bb
))
2168 if (gimple_code (stmt
) == GIMPLE_PHI
)
2170 if (bb
== loop
->header
)
2176 if (gimple_code (stmt
) != GIMPLE_ASSIGN
2177 || gimple_assign_rhs_class (stmt
) == GIMPLE_TERNARY_RHS
)
2180 code
= gimple_assign_rhs_code (stmt
);
2181 if (gimple_references_memory_p (stmt
)
2182 || TREE_CODE_CLASS (code
) == tcc_reference
2183 || (code
== ADDR_EXPR
2184 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt
))))
2187 use
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_USE
);
2188 if (use
== NULL_TREE
)
2191 return chain_of_csts_start (loop
, use
);
2194 /* Determines whether the expression X is derived from a result of a phi node
2195 in header of LOOP such that
2197 * the derivation of X consists only from operations with constants
2198 * the initial value of the phi node is constant
2199 * the value of the phi node in the next iteration can be derived from the
2200 value in the current iteration by a chain of operations with constants.
2202 If such phi node exists, it is returned, otherwise NULL is returned. */
2205 get_base_for (struct loop
*loop
, tree x
)
2210 if (is_gimple_min_invariant (x
))
2213 phi
= chain_of_csts_start (loop
, x
);
2217 init
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
2218 next
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
2220 if (TREE_CODE (next
) != SSA_NAME
)
2223 if (!is_gimple_min_invariant (init
))
2226 if (chain_of_csts_start (loop
, next
) != phi
)
2232 /* Given an expression X, then
2234 * if X is NULL_TREE, we return the constant BASE.
2235 * otherwise X is a SSA name, whose value in the considered loop is derived
2236 by a chain of operations with constant from a result of a phi node in
2237 the header of the loop. Then we return value of X when the value of the
2238 result of this phi node is given by the constant BASE. */
2241 get_val_for (tree x
, tree base
)
2245 gcc_checking_assert (is_gimple_min_invariant (base
));
2250 stmt
= SSA_NAME_DEF_STMT (x
);
2251 if (gimple_code (stmt
) == GIMPLE_PHI
)
2254 gcc_checking_assert (is_gimple_assign (stmt
));
2256 /* STMT must be either an assignment of a single SSA name or an
2257 expression involving an SSA name and a constant. Try to fold that
2258 expression using the value for the SSA name. */
2259 if (gimple_assign_ssa_name_copy_p (stmt
))
2260 return get_val_for (gimple_assign_rhs1 (stmt
), base
);
2261 else if (gimple_assign_rhs_class (stmt
) == GIMPLE_UNARY_RHS
2262 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
2264 return fold_build1 (gimple_assign_rhs_code (stmt
),
2265 gimple_expr_type (stmt
),
2266 get_val_for (gimple_assign_rhs1 (stmt
), base
));
2268 else if (gimple_assign_rhs_class (stmt
) == GIMPLE_BINARY_RHS
)
2270 tree rhs1
= gimple_assign_rhs1 (stmt
);
2271 tree rhs2
= gimple_assign_rhs2 (stmt
);
2272 if (TREE_CODE (rhs1
) == SSA_NAME
)
2273 rhs1
= get_val_for (rhs1
, base
);
2274 else if (TREE_CODE (rhs2
) == SSA_NAME
)
2275 rhs2
= get_val_for (rhs2
, base
);
2278 return fold_build2 (gimple_assign_rhs_code (stmt
),
2279 gimple_expr_type (stmt
), rhs1
, rhs2
);
2286 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2287 by brute force -- i.e. by determining the value of the operands of the
2288 condition at EXIT in first few iterations of the loop (assuming that
2289 these values are constant) and determining the first one in that the
2290 condition is not satisfied. Returns the constant giving the number
2291 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2294 loop_niter_by_eval (struct loop
*loop
, edge exit
)
2297 tree op
[2], val
[2], next
[2], aval
[2];
2302 cond
= last_stmt (exit
->src
);
2303 if (!cond
|| gimple_code (cond
) != GIMPLE_COND
)
2304 return chrec_dont_know
;
2306 cmp
= gimple_cond_code (cond
);
2307 if (exit
->flags
& EDGE_TRUE_VALUE
)
2308 cmp
= invert_tree_comparison (cmp
, false);
2318 op
[0] = gimple_cond_lhs (cond
);
2319 op
[1] = gimple_cond_rhs (cond
);
2323 return chrec_dont_know
;
2326 for (j
= 0; j
< 2; j
++)
2328 if (is_gimple_min_invariant (op
[j
]))
2331 next
[j
] = NULL_TREE
;
2336 phi
= get_base_for (loop
, op
[j
]);
2338 return chrec_dont_know
;
2339 val
[j
] = PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
2340 next
[j
] = PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
2344 /* Don't issue signed overflow warnings. */
2345 fold_defer_overflow_warnings ();
2347 for (i
= 0; i
< MAX_ITERATIONS_TO_TRACK
; i
++)
2349 for (j
= 0; j
< 2; j
++)
2350 aval
[j
] = get_val_for (op
[j
], val
[j
]);
2352 acnd
= fold_binary (cmp
, boolean_type_node
, aval
[0], aval
[1]);
2353 if (acnd
&& integer_zerop (acnd
))
2355 fold_undefer_and_ignore_overflow_warnings ();
2356 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2358 "Proved that loop %d iterates %d times using brute force.\n",
2360 return build_int_cst (unsigned_type_node
, i
);
2363 for (j
= 0; j
< 2; j
++)
2365 val
[j
] = get_val_for (next
[j
], val
[j
]);
2366 if (!is_gimple_min_invariant (val
[j
]))
2368 fold_undefer_and_ignore_overflow_warnings ();
2369 return chrec_dont_know
;
2374 fold_undefer_and_ignore_overflow_warnings ();
2376 return chrec_dont_know
;
2379 /* Finds the exit of the LOOP by that the loop exits after a constant
2380 number of iterations and stores the exit edge to *EXIT. The constant
2381 giving the number of iterations of LOOP is returned. The number of
2382 iterations is determined using loop_niter_by_eval (i.e. by brute force
2383 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2384 determines the number of iterations, chrec_dont_know is returned. */
2387 find_loop_niter_by_eval (struct loop
*loop
, edge
*exit
)
2390 vec
<edge
> exits
= get_loop_exit_edges (loop
);
2392 tree niter
= NULL_TREE
, aniter
;
2396 /* Loops with multiple exits are expensive to handle and less important. */
2397 if (!flag_expensive_optimizations
2398 && exits
.length () > 1)
2401 return chrec_dont_know
;
2404 FOR_EACH_VEC_ELT (exits
, i
, ex
)
2406 if (!just_once_each_iteration_p (loop
, ex
->src
))
2409 aniter
= loop_niter_by_eval (loop
, ex
);
2410 if (chrec_contains_undetermined (aniter
))
2414 && !tree_int_cst_lt (aniter
, niter
))
2422 return niter
? niter
: chrec_dont_know
;
2427 Analysis of upper bounds on number of iterations of a loop.
2431 static widest_int
derive_constant_upper_bound_ops (tree
, tree
,
2432 enum tree_code
, tree
);
2434 /* Returns a constant upper bound on the value of the right-hand side of
2435 an assignment statement STMT. */
2438 derive_constant_upper_bound_assign (gimple stmt
)
2440 enum tree_code code
= gimple_assign_rhs_code (stmt
);
2441 tree op0
= gimple_assign_rhs1 (stmt
);
2442 tree op1
= gimple_assign_rhs2 (stmt
);
2444 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt
)),
2448 /* Returns a constant upper bound on the value of expression VAL. VAL
2449 is considered to be unsigned. If its type is signed, its value must
2453 derive_constant_upper_bound (tree val
)
2455 enum tree_code code
;
2458 extract_ops_from_tree (val
, &code
, &op0
, &op1
);
2459 return derive_constant_upper_bound_ops (TREE_TYPE (val
), op0
, code
, op1
);
2462 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2463 whose type is TYPE. The expression is considered to be unsigned. If
2464 its type is signed, its value must be nonnegative. */
2467 derive_constant_upper_bound_ops (tree type
, tree op0
,
2468 enum tree_code code
, tree op1
)
2471 widest_int bnd
, max
, mmax
, cst
;
2474 if (INTEGRAL_TYPE_P (type
))
2475 maxt
= TYPE_MAX_VALUE (type
);
2477 maxt
= upper_bound_in_type (type
, type
);
2479 max
= wi::to_widest (maxt
);
2484 return wi::to_widest (op0
);
2487 subtype
= TREE_TYPE (op0
);
2488 if (!TYPE_UNSIGNED (subtype
)
2489 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2490 that OP0 is nonnegative. */
2491 && TYPE_UNSIGNED (type
)
2492 && !tree_expr_nonnegative_p (op0
))
2494 /* If we cannot prove that the casted expression is nonnegative,
2495 we cannot establish more useful upper bound than the precision
2496 of the type gives us. */
2500 /* We now know that op0 is an nonnegative value. Try deriving an upper
2502 bnd
= derive_constant_upper_bound (op0
);
2504 /* If the bound does not fit in TYPE, max. value of TYPE could be
2506 if (wi::ltu_p (max
, bnd
))
2512 case POINTER_PLUS_EXPR
:
2514 if (TREE_CODE (op1
) != INTEGER_CST
2515 || !tree_expr_nonnegative_p (op0
))
2518 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2519 choose the most logical way how to treat this constant regardless
2520 of the signedness of the type. */
2521 cst
= wi::sext (wi::to_widest (op1
), TYPE_PRECISION (type
));
2522 if (code
!= MINUS_EXPR
)
2525 bnd
= derive_constant_upper_bound (op0
);
2527 if (wi::neg_p (cst
))
2530 /* Avoid CST == 0x80000... */
2531 if (wi::neg_p (cst
))
2534 /* OP0 + CST. We need to check that
2535 BND <= MAX (type) - CST. */
2538 if (wi::ltu_p (bnd
, max
))
2545 /* OP0 - CST, where CST >= 0.
2547 If TYPE is signed, we have already verified that OP0 >= 0, and we
2548 know that the result is nonnegative. This implies that
2551 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2552 otherwise the operation underflows.
2555 /* This should only happen if the type is unsigned; however, for
2556 buggy programs that use overflowing signed arithmetics even with
2557 -fno-wrapv, this condition may also be true for signed values. */
2558 if (wi::ltu_p (bnd
, cst
))
2561 if (TYPE_UNSIGNED (type
))
2563 tree tem
= fold_binary (GE_EXPR
, boolean_type_node
, op0
,
2564 wide_int_to_tree (type
, cst
));
2565 if (!tem
|| integer_nonzerop (tem
))
2574 case FLOOR_DIV_EXPR
:
2575 case EXACT_DIV_EXPR
:
2576 if (TREE_CODE (op1
) != INTEGER_CST
2577 || tree_int_cst_sign_bit (op1
))
2580 bnd
= derive_constant_upper_bound (op0
);
2581 return wi::udiv_floor (bnd
, wi::to_widest (op1
));
2584 if (TREE_CODE (op1
) != INTEGER_CST
2585 || tree_int_cst_sign_bit (op1
))
2587 return wi::to_widest (op1
);
2590 stmt
= SSA_NAME_DEF_STMT (op0
);
2591 if (gimple_code (stmt
) != GIMPLE_ASSIGN
2592 || gimple_assign_lhs (stmt
) != op0
)
2594 return derive_constant_upper_bound_assign (stmt
);
2601 /* Emit a -Waggressive-loop-optimizations warning if needed. */
2604 do_warn_aggressive_loop_optimizations (struct loop
*loop
,
2605 widest_int i_bound
, gimple stmt
)
2607 /* Don't warn if the loop doesn't have known constant bound. */
2608 if (!loop
->nb_iterations
2609 || TREE_CODE (loop
->nb_iterations
) != INTEGER_CST
2610 || !warn_aggressive_loop_optimizations
2611 /* To avoid warning multiple times for the same loop,
2612 only start warning when we preserve loops. */
2613 || (cfun
->curr_properties
& PROP_loops
) == 0
2614 /* Only warn once per loop. */
2615 || loop
->warned_aggressive_loop_optimizations
2616 /* Only warn if undefined behavior gives us lower estimate than the
2617 known constant bound. */
2618 || wi::cmpu (i_bound
, wi::to_widest (loop
->nb_iterations
)) >= 0
2619 /* And undefined behavior happens unconditionally. */
2620 || !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (stmt
)))
2623 edge e
= single_exit (loop
);
2627 gimple estmt
= last_stmt (e
->src
);
2628 if (warning_at (gimple_location (stmt
), OPT_Waggressive_loop_optimizations
,
2629 "iteration %E invokes undefined behavior",
2630 wide_int_to_tree (TREE_TYPE (loop
->nb_iterations
),
2632 inform (gimple_location (estmt
), "containing loop");
2633 loop
->warned_aggressive_loop_optimizations
= true;
2636 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
2637 is true if the loop is exited immediately after STMT, and this exit
2638 is taken at last when the STMT is executed BOUND + 1 times.
2639 REALISTIC is true if BOUND is expected to be close to the real number
2640 of iterations. UPPER is true if we are sure the loop iterates at most
2641 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
2644 record_estimate (struct loop
*loop
, tree bound
, const widest_int
&i_bound
,
2645 gimple at_stmt
, bool is_exit
, bool realistic
, bool upper
)
2649 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2651 fprintf (dump_file
, "Statement %s", is_exit
? "(exit)" : "");
2652 print_gimple_stmt (dump_file
, at_stmt
, 0, TDF_SLIM
);
2653 fprintf (dump_file
, " is %sexecuted at most ",
2654 upper
? "" : "probably ");
2655 print_generic_expr (dump_file
, bound
, TDF_SLIM
);
2656 fprintf (dump_file
, " (bounded by ");
2657 print_decu (i_bound
, dump_file
);
2658 fprintf (dump_file
, ") + 1 times in loop %d.\n", loop
->num
);
2661 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2662 real number of iterations. */
2663 if (TREE_CODE (bound
) != INTEGER_CST
)
2666 gcc_checking_assert (i_bound
== wi::to_widest (bound
));
2667 if (!upper
&& !realistic
)
2670 /* If we have a guaranteed upper bound, record it in the appropriate
2671 list, unless this is an !is_exit bound (i.e. undefined behavior in
2672 at_stmt) in a loop with known constant number of iterations. */
2675 || loop
->nb_iterations
== NULL_TREE
2676 || TREE_CODE (loop
->nb_iterations
) != INTEGER_CST
))
2678 struct nb_iter_bound
*elt
= ggc_alloc
<nb_iter_bound
> ();
2680 elt
->bound
= i_bound
;
2681 elt
->stmt
= at_stmt
;
2682 elt
->is_exit
= is_exit
;
2683 elt
->next
= loop
->bounds
;
2687 /* If statement is executed on every path to the loop latch, we can directly
2688 infer the upper bound on the # of iterations of the loop. */
2689 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (at_stmt
)))
2692 /* Update the number of iteration estimates according to the bound.
2693 If at_stmt is an exit then the loop latch is executed at most BOUND times,
2694 otherwise it can be executed BOUND + 1 times. We will lower the estimate
2695 later if such statement must be executed on last iteration */
2700 widest_int new_i_bound
= i_bound
+ delta
;
2702 /* If an overflow occurred, ignore the result. */
2703 if (wi::ltu_p (new_i_bound
, delta
))
2706 if (upper
&& !is_exit
)
2707 do_warn_aggressive_loop_optimizations (loop
, new_i_bound
, at_stmt
);
2708 record_niter_bound (loop
, new_i_bound
, realistic
, upper
);
2711 /* Record the estimate on number of iterations of LOOP based on the fact that
2712 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
2713 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
2714 estimated number of iterations is expected to be close to the real one.
2715 UPPER is true if we are sure the induction variable does not wrap. */
2718 record_nonwrapping_iv (struct loop
*loop
, tree base
, tree step
, gimple stmt
,
2719 tree low
, tree high
, bool realistic
, bool upper
)
2721 tree niter_bound
, extreme
, delta
;
2722 tree type
= TREE_TYPE (base
), unsigned_type
;
2724 if (TREE_CODE (step
) != INTEGER_CST
|| integer_zerop (step
))
2727 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2729 fprintf (dump_file
, "Induction variable (");
2730 print_generic_expr (dump_file
, TREE_TYPE (base
), TDF_SLIM
);
2731 fprintf (dump_file
, ") ");
2732 print_generic_expr (dump_file
, base
, TDF_SLIM
);
2733 fprintf (dump_file
, " + ");
2734 print_generic_expr (dump_file
, step
, TDF_SLIM
);
2735 fprintf (dump_file
, " * iteration does not wrap in statement ");
2736 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2737 fprintf (dump_file
, " in loop %d.\n", loop
->num
);
2740 unsigned_type
= unsigned_type_for (type
);
2741 base
= fold_convert (unsigned_type
, base
);
2742 step
= fold_convert (unsigned_type
, step
);
2744 if (tree_int_cst_sign_bit (step
))
2746 extreme
= fold_convert (unsigned_type
, low
);
2747 if (TREE_CODE (base
) != INTEGER_CST
)
2748 base
= fold_convert (unsigned_type
, high
);
2749 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, base
, extreme
);
2750 step
= fold_build1 (NEGATE_EXPR
, unsigned_type
, step
);
2754 extreme
= fold_convert (unsigned_type
, high
);
2755 if (TREE_CODE (base
) != INTEGER_CST
)
2756 base
= fold_convert (unsigned_type
, low
);
2757 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, extreme
, base
);
2760 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
2761 would get out of the range. */
2762 niter_bound
= fold_build2 (FLOOR_DIV_EXPR
, unsigned_type
, delta
, step
);
2763 widest_int max
= derive_constant_upper_bound (niter_bound
);
2764 record_estimate (loop
, niter_bound
, max
, stmt
, false, realistic
, upper
);
2767 /* Determine information about number of iterations a LOOP from the index
2768 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
2769 guaranteed to be executed in every iteration of LOOP. Callback for
2779 idx_infer_loop_bounds (tree base
, tree
*idx
, void *dta
)
2781 struct ilb_data
*data
= (struct ilb_data
*) dta
;
2782 tree ev
, init
, step
;
2783 tree low
, high
, type
, next
;
2784 bool sign
, upper
= true, at_end
= false;
2785 struct loop
*loop
= data
->loop
;
2786 bool reliable
= true;
2788 if (TREE_CODE (base
) != ARRAY_REF
)
2791 /* For arrays at the end of the structure, we are not guaranteed that they
2792 do not really extend over their declared size. However, for arrays of
2793 size greater than one, this is unlikely to be intended. */
2794 if (array_at_struct_end_p (base
))
2800 struct loop
*dloop
= loop_containing_stmt (data
->stmt
);
2804 ev
= analyze_scalar_evolution (dloop
, *idx
);
2805 ev
= instantiate_parameters (loop
, ev
);
2806 init
= initial_condition (ev
);
2807 step
= evolution_part_in_loop_num (ev
, loop
->num
);
2811 || TREE_CODE (step
) != INTEGER_CST
2812 || integer_zerop (step
)
2813 || tree_contains_chrecs (init
, NULL
)
2814 || chrec_contains_symbols_defined_in_loop (init
, loop
->num
))
2817 low
= array_ref_low_bound (base
);
2818 high
= array_ref_up_bound (base
);
2820 /* The case of nonconstant bounds could be handled, but it would be
2822 if (TREE_CODE (low
) != INTEGER_CST
2824 || TREE_CODE (high
) != INTEGER_CST
)
2826 sign
= tree_int_cst_sign_bit (step
);
2827 type
= TREE_TYPE (step
);
2829 /* The array of length 1 at the end of a structure most likely extends
2830 beyond its bounds. */
2832 && operand_equal_p (low
, high
, 0))
2835 /* In case the relevant bound of the array does not fit in type, or
2836 it does, but bound + step (in type) still belongs into the range of the
2837 array, the index may wrap and still stay within the range of the array
2838 (consider e.g. if the array is indexed by the full range of
2841 To make things simpler, we require both bounds to fit into type, although
2842 there are cases where this would not be strictly necessary. */
2843 if (!int_fits_type_p (high
, type
)
2844 || !int_fits_type_p (low
, type
))
2846 low
= fold_convert (type
, low
);
2847 high
= fold_convert (type
, high
);
2850 next
= fold_binary (PLUS_EXPR
, type
, low
, step
);
2852 next
= fold_binary (PLUS_EXPR
, type
, high
, step
);
2854 if (tree_int_cst_compare (low
, next
) <= 0
2855 && tree_int_cst_compare (next
, high
) <= 0)
2858 /* If access is not executed on every iteration, we must ensure that overlow may
2859 not make the access valid later. */
2860 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (data
->stmt
))
2861 && scev_probably_wraps_p (initial_condition_in_loop_num (ev
, loop
->num
),
2862 step
, data
->stmt
, loop
, true))
2865 record_nonwrapping_iv (loop
, init
, step
, data
->stmt
, low
, high
, reliable
, upper
);
2869 /* Determine information about number of iterations a LOOP from the bounds
2870 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
2871 STMT is guaranteed to be executed in every iteration of LOOP.*/
2874 infer_loop_bounds_from_ref (struct loop
*loop
, gimple stmt
, tree ref
)
2876 struct ilb_data data
;
2880 for_each_index (&ref
, idx_infer_loop_bounds
, &data
);
2883 /* Determine information about number of iterations of a LOOP from the way
2884 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
2885 executed in every iteration of LOOP. */
2888 infer_loop_bounds_from_array (struct loop
*loop
, gimple stmt
)
2890 if (is_gimple_assign (stmt
))
2892 tree op0
= gimple_assign_lhs (stmt
);
2893 tree op1
= gimple_assign_rhs1 (stmt
);
2895 /* For each memory access, analyze its access function
2896 and record a bound on the loop iteration domain. */
2897 if (REFERENCE_CLASS_P (op0
))
2898 infer_loop_bounds_from_ref (loop
, stmt
, op0
);
2900 if (REFERENCE_CLASS_P (op1
))
2901 infer_loop_bounds_from_ref (loop
, stmt
, op1
);
2903 else if (is_gimple_call (stmt
))
2906 unsigned i
, n
= gimple_call_num_args (stmt
);
2908 lhs
= gimple_call_lhs (stmt
);
2909 if (lhs
&& REFERENCE_CLASS_P (lhs
))
2910 infer_loop_bounds_from_ref (loop
, stmt
, lhs
);
2912 for (i
= 0; i
< n
; i
++)
2914 arg
= gimple_call_arg (stmt
, i
);
2915 if (REFERENCE_CLASS_P (arg
))
2916 infer_loop_bounds_from_ref (loop
, stmt
, arg
);
2921 /* Determine information about number of iterations of a LOOP from the fact
2922 that pointer arithmetics in STMT does not overflow. */
2925 infer_loop_bounds_from_pointer_arith (struct loop
*loop
, gimple stmt
)
2927 tree def
, base
, step
, scev
, type
, low
, high
;
2930 if (!is_gimple_assign (stmt
)
2931 || gimple_assign_rhs_code (stmt
) != POINTER_PLUS_EXPR
)
2934 def
= gimple_assign_lhs (stmt
);
2935 if (TREE_CODE (def
) != SSA_NAME
)
2938 type
= TREE_TYPE (def
);
2939 if (!nowrap_type_p (type
))
2942 ptr
= gimple_assign_rhs1 (stmt
);
2943 if (!expr_invariant_in_loop_p (loop
, ptr
))
2946 var
= gimple_assign_rhs2 (stmt
);
2947 if (TYPE_PRECISION (type
) != TYPE_PRECISION (TREE_TYPE (var
)))
2950 scev
= instantiate_parameters (loop
, analyze_scalar_evolution (loop
, def
));
2951 if (chrec_contains_undetermined (scev
))
2954 base
= initial_condition_in_loop_num (scev
, loop
->num
);
2955 step
= evolution_part_in_loop_num (scev
, loop
->num
);
2958 || TREE_CODE (step
) != INTEGER_CST
2959 || tree_contains_chrecs (base
, NULL
)
2960 || chrec_contains_symbols_defined_in_loop (base
, loop
->num
))
2963 low
= lower_bound_in_type (type
, type
);
2964 high
= upper_bound_in_type (type
, type
);
2966 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
2967 produce a NULL pointer. The contrary would mean NULL points to an object,
2968 while NULL is supposed to compare unequal with the address of all objects.
2969 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
2970 NULL pointer since that would mean wrapping, which we assume here not to
2971 happen. So, we can exclude NULL from the valid range of pointer
2973 if (flag_delete_null_pointer_checks
&& int_cst_value (low
) == 0)
2974 low
= build_int_cstu (TREE_TYPE (low
), TYPE_ALIGN_UNIT (TREE_TYPE (type
)));
2976 record_nonwrapping_iv (loop
, base
, step
, stmt
, low
, high
, false, true);
2979 /* Determine information about number of iterations of a LOOP from the fact
2980 that signed arithmetics in STMT does not overflow. */
2983 infer_loop_bounds_from_signedness (struct loop
*loop
, gimple stmt
)
2985 tree def
, base
, step
, scev
, type
, low
, high
;
2987 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
2990 def
= gimple_assign_lhs (stmt
);
2992 if (TREE_CODE (def
) != SSA_NAME
)
2995 type
= TREE_TYPE (def
);
2996 if (!INTEGRAL_TYPE_P (type
)
2997 || !TYPE_OVERFLOW_UNDEFINED (type
))
3000 scev
= instantiate_parameters (loop
, analyze_scalar_evolution (loop
, def
));
3001 if (chrec_contains_undetermined (scev
))
3004 base
= initial_condition_in_loop_num (scev
, loop
->num
);
3005 step
= evolution_part_in_loop_num (scev
, loop
->num
);
3008 || TREE_CODE (step
) != INTEGER_CST
3009 || tree_contains_chrecs (base
, NULL
)
3010 || chrec_contains_symbols_defined_in_loop (base
, loop
->num
))
3013 low
= lower_bound_in_type (type
, type
);
3014 high
= upper_bound_in_type (type
, type
);
3016 record_nonwrapping_iv (loop
, base
, step
, stmt
, low
, high
, false, true);
3019 /* The following analyzers are extracting informations on the bounds
3020 of LOOP from the following undefined behaviors:
3022 - data references should not access elements over the statically
3025 - signed variables should not overflow when flag_wrapv is not set.
3029 infer_loop_bounds_from_undefined (struct loop
*loop
)
3033 gimple_stmt_iterator bsi
;
3037 bbs
= get_loop_body (loop
);
3039 for (i
= 0; i
< loop
->num_nodes
; i
++)
3043 /* If BB is not executed in each iteration of the loop, we cannot
3044 use the operations in it to infer reliable upper bound on the
3045 # of iterations of the loop. However, we can use it as a guess.
3046 Reliable guesses come only from array bounds. */
3047 reliable
= dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
);
3049 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
3051 gimple stmt
= gsi_stmt (bsi
);
3053 infer_loop_bounds_from_array (loop
, stmt
);
3057 infer_loop_bounds_from_signedness (loop
, stmt
);
3058 infer_loop_bounds_from_pointer_arith (loop
, stmt
);
3067 /* Compare wide ints, callback for qsort. */
3070 wide_int_cmp (const void *p1
, const void *p2
)
3072 const widest_int
*d1
= (const widest_int
*) p1
;
3073 const widest_int
*d2
= (const widest_int
*) p2
;
3074 return wi::cmpu (*d1
, *d2
);
3077 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3078 Lookup by binary search. */
3081 bound_index (vec
<widest_int
> bounds
, const widest_int
&bound
)
3083 unsigned int end
= bounds
.length ();
3084 unsigned int begin
= 0;
3086 /* Find a matching index by means of a binary search. */
3087 while (begin
!= end
)
3089 unsigned int middle
= (begin
+ end
) / 2;
3090 widest_int index
= bounds
[middle
];
3094 else if (wi::ltu_p (index
, bound
))
3102 /* We recorded loop bounds only for statements dominating loop latch (and thus
3103 executed each loop iteration). If there are any bounds on statements not
3104 dominating the loop latch we can improve the estimate by walking the loop
3105 body and seeing if every path from loop header to loop latch contains
3106 some bounded statement. */
3109 discover_iteration_bound_by_body_walk (struct loop
*loop
)
3111 pointer_map_t
*bb_bounds
;
3112 struct nb_iter_bound
*elt
;
3113 vec
<widest_int
> bounds
= vNULL
;
3114 vec
<vec
<basic_block
> > queues
= vNULL
;
3115 vec
<basic_block
> queue
= vNULL
;
3116 ptrdiff_t queue_index
;
3117 ptrdiff_t latch_index
= 0;
3118 pointer_map_t
*block_priority
;
3120 /* Discover what bounds may interest us. */
3121 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
3123 widest_int bound
= elt
->bound
;
3125 /* Exit terminates loop at given iteration, while non-exits produce undefined
3126 effect on the next iteration. */
3130 /* If an overflow occurred, ignore the result. */
3135 if (!loop
->any_upper_bound
3136 || wi::ltu_p (bound
, loop
->nb_iterations_upper_bound
))
3137 bounds
.safe_push (bound
);
3140 /* Exit early if there is nothing to do. */
3141 if (!bounds
.exists ())
3144 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3145 fprintf (dump_file
, " Trying to walk loop body to reduce the bound.\n");
3147 /* Sort the bounds in decreasing order. */
3148 bounds
.qsort (wide_int_cmp
);
3150 /* For every basic block record the lowest bound that is guaranteed to
3151 terminate the loop. */
3153 bb_bounds
= pointer_map_create ();
3154 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
3156 widest_int bound
= elt
->bound
;
3160 /* If an overflow occurred, ignore the result. */
3165 if (!loop
->any_upper_bound
3166 || wi::ltu_p (bound
, loop
->nb_iterations_upper_bound
))
3168 ptrdiff_t index
= bound_index (bounds
, bound
);
3169 void **entry
= pointer_map_contains (bb_bounds
,
3170 gimple_bb (elt
->stmt
));
3172 *pointer_map_insert (bb_bounds
,
3173 gimple_bb (elt
->stmt
)) = (void *)index
;
3174 else if ((ptrdiff_t)*entry
> index
)
3175 *entry
= (void *)index
;
3179 block_priority
= pointer_map_create ();
3181 /* Perform shortest path discovery loop->header ... loop->latch.
3183 The "distance" is given by the smallest loop bound of basic block
3184 present in the path and we look for path with largest smallest bound
3187 To avoid the need for fibonacci heap on double ints we simply compress
3188 double ints into indexes to BOUNDS array and then represent the queue
3189 as arrays of queues for every index.
3190 Index of BOUNDS.length() means that the execution of given BB has
3191 no bounds determined.
3193 VISITED is a pointer map translating basic block into smallest index
3194 it was inserted into the priority queue with. */
3197 /* Start walk in loop header with index set to infinite bound. */
3198 queue_index
= bounds
.length ();
3199 queues
.safe_grow_cleared (queue_index
+ 1);
3200 queue
.safe_push (loop
->header
);
3201 queues
[queue_index
] = queue
;
3202 *pointer_map_insert (block_priority
, loop
->header
) = (void *)queue_index
;
3204 for (; queue_index
>= 0; queue_index
--)
3206 if (latch_index
< queue_index
)
3208 while (queues
[queue_index
].length ())
3211 ptrdiff_t bound_index
= queue_index
;
3216 queue
= queues
[queue_index
];
3219 /* OK, we later inserted the BB with lower priority, skip it. */
3220 if ((ptrdiff_t)*pointer_map_contains (block_priority
, bb
) > queue_index
)
3223 /* See if we can improve the bound. */
3224 entry
= pointer_map_contains (bb_bounds
, bb
);
3225 if (entry
&& (ptrdiff_t)*entry
< bound_index
)
3226 bound_index
= (ptrdiff_t)*entry
;
3228 /* Insert succesors into the queue, watch for latch edge
3229 and record greatest index we saw. */
3230 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3232 bool insert
= false;
3235 if (loop_exit_edge_p (loop
, e
))
3238 if (e
== loop_latch_edge (loop
)
3239 && latch_index
< bound_index
)
3240 latch_index
= bound_index
;
3241 else if (!(entry
= pointer_map_contains (block_priority
, e
->dest
)))
3244 *pointer_map_insert (block_priority
, e
->dest
) = (void *)bound_index
;
3246 else if ((ptrdiff_t)*entry
< bound_index
)
3249 *entry
= (void *)bound_index
;
3253 queues
[bound_index
].safe_push (e
->dest
);
3257 queues
[queue_index
].release ();
3260 gcc_assert (latch_index
>= 0);
3261 if ((unsigned)latch_index
< bounds
.length ())
3263 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3265 fprintf (dump_file
, "Found better loop bound ");
3266 print_decu (bounds
[latch_index
], dump_file
);
3267 fprintf (dump_file
, "\n");
3269 record_niter_bound (loop
, bounds
[latch_index
], false, true);
3274 pointer_map_destroy (bb_bounds
);
3275 pointer_map_destroy (block_priority
);
3278 /* See if every path cross the loop goes through a statement that is known
3279 to not execute at the last iteration. In that case we can decrese iteration
3283 maybe_lower_iteration_bound (struct loop
*loop
)
3285 hash_set
<gimple
> *not_executed_last_iteration
= NULL
;
3286 struct nb_iter_bound
*elt
;
3287 bool found_exit
= false;
3288 vec
<basic_block
> queue
= vNULL
;
3291 /* Collect all statements with interesting (i.e. lower than
3292 nb_iterations_upper_bound) bound on them.
3294 TODO: Due to the way record_estimate choose estimates to store, the bounds
3295 will be always nb_iterations_upper_bound-1. We can change this to record
3296 also statements not dominating the loop latch and update the walk bellow
3297 to the shortest path algorthm. */
3298 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
3301 && wi::ltu_p (elt
->bound
, loop
->nb_iterations_upper_bound
))
3303 if (!not_executed_last_iteration
)
3304 not_executed_last_iteration
= new hash_set
<gimple
>;
3305 not_executed_last_iteration
->add (elt
->stmt
);
3308 if (!not_executed_last_iteration
)
3311 /* Start DFS walk in the loop header and see if we can reach the
3312 loop latch or any of the exits (including statements with side
3313 effects that may terminate the loop otherwise) without visiting
3314 any of the statements known to have undefined effect on the last
3316 queue
.safe_push (loop
->header
);
3317 visited
= BITMAP_ALLOC (NULL
);
3318 bitmap_set_bit (visited
, loop
->header
->index
);
3323 basic_block bb
= queue
.pop ();
3324 gimple_stmt_iterator gsi
;
3325 bool stmt_found
= false;
3327 /* Loop for possible exits and statements bounding the execution. */
3328 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3330 gimple stmt
= gsi_stmt (gsi
);
3331 if (not_executed_last_iteration
->contains (stmt
))
3336 if (gimple_has_side_effects (stmt
))
3345 /* If no bounding statement is found, continue the walk. */
3351 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3353 if (loop_exit_edge_p (loop
, e
)
3354 || e
== loop_latch_edge (loop
))
3359 if (bitmap_set_bit (visited
, e
->dest
->index
))
3360 queue
.safe_push (e
->dest
);
3364 while (queue
.length () && !found_exit
);
3366 /* If every path through the loop reach bounding statement before exit,
3367 then we know the last iteration of the loop will have undefined effect
3368 and we can decrease number of iterations. */
3372 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3373 fprintf (dump_file
, "Reducing loop iteration estimate by 1; "
3374 "undefined statement must be executed at the last iteration.\n");
3375 record_niter_bound (loop
, loop
->nb_iterations_upper_bound
- 1,
3378 BITMAP_FREE (visited
);
3380 delete not_executed_last_iteration
;
3383 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
3384 is true also use estimates derived from undefined behavior. */
3387 estimate_numbers_of_iterations_loop (struct loop
*loop
)
3392 struct tree_niter_desc niter_desc
;
3397 /* Give up if we already have tried to compute an estimation. */
3398 if (loop
->estimate_state
!= EST_NOT_COMPUTED
)
3401 loop
->estimate_state
= EST_AVAILABLE
;
3402 /* Force estimate compuation but leave any existing upper bound in place. */
3403 loop
->any_estimate
= false;
3405 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
3406 to be constant, we avoid undefined behavior implied bounds and instead
3407 diagnose those loops with -Waggressive-loop-optimizations. */
3408 number_of_latch_executions (loop
);
3410 exits
= get_loop_exit_edges (loop
);
3411 likely_exit
= single_likely_exit (loop
);
3412 FOR_EACH_VEC_ELT (exits
, i
, ex
)
3414 if (!number_of_iterations_exit (loop
, ex
, &niter_desc
, false, false))
3417 niter
= niter_desc
.niter
;
3418 type
= TREE_TYPE (niter
);
3419 if (TREE_CODE (niter_desc
.may_be_zero
) != INTEGER_CST
)
3420 niter
= build3 (COND_EXPR
, type
, niter_desc
.may_be_zero
,
3421 build_int_cst (type
, 0),
3423 record_estimate (loop
, niter
, niter_desc
.max
,
3424 last_stmt (ex
->src
),
3425 true, ex
== likely_exit
, true);
3429 if (flag_aggressive_loop_optimizations
)
3430 infer_loop_bounds_from_undefined (loop
);
3432 discover_iteration_bound_by_body_walk (loop
);
3434 maybe_lower_iteration_bound (loop
);
3436 /* If we have a measured profile, use it to estimate the number of
3438 if (loop
->header
->count
!= 0)
3440 gcov_type nit
= expected_loop_iterations_unbounded (loop
) + 1;
3441 bound
= gcov_type_to_wide_int (nit
);
3442 record_niter_bound (loop
, bound
, true, false);
3445 /* If we know the exact number of iterations of this loop, try to
3446 not break code with undefined behavior by not recording smaller
3447 maximum number of iterations. */
3448 if (loop
->nb_iterations
3449 && TREE_CODE (loop
->nb_iterations
) == INTEGER_CST
)
3451 loop
->any_upper_bound
= true;
3452 loop
->nb_iterations_upper_bound
= wi::to_widest (loop
->nb_iterations
);
3456 /* Sets NIT to the estimated number of executions of the latch of the
3457 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
3458 large as the number of iterations. If we have no reliable estimate,
3459 the function returns false, otherwise returns true. */
3462 estimated_loop_iterations (struct loop
*loop
, widest_int
*nit
)
3464 /* When SCEV information is available, try to update loop iterations
3465 estimate. Otherwise just return whatever we recorded earlier. */
3466 if (scev_initialized_p ())
3467 estimate_numbers_of_iterations_loop (loop
);
3469 return (get_estimated_loop_iterations (loop
, nit
));
3472 /* Similar to estimated_loop_iterations, but returns the estimate only
3473 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3474 on the number of iterations of LOOP could not be derived, returns -1. */
3477 estimated_loop_iterations_int (struct loop
*loop
)
3480 HOST_WIDE_INT hwi_nit
;
3482 if (!estimated_loop_iterations (loop
, &nit
))
3485 if (!wi::fits_shwi_p (nit
))
3487 hwi_nit
= nit
.to_shwi ();
3489 return hwi_nit
< 0 ? -1 : hwi_nit
;
3493 /* Sets NIT to an upper bound for the maximum number of executions of the
3494 latch of the LOOP. If we have no reliable estimate, the function returns
3495 false, otherwise returns true. */
3498 max_loop_iterations (struct loop
*loop
, widest_int
*nit
)
3500 /* When SCEV information is available, try to update loop iterations
3501 estimate. Otherwise just return whatever we recorded earlier. */
3502 if (scev_initialized_p ())
3503 estimate_numbers_of_iterations_loop (loop
);
3505 return get_max_loop_iterations (loop
, nit
);
3508 /* Similar to max_loop_iterations, but returns the estimate only
3509 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3510 on the number of iterations of LOOP could not be derived, returns -1. */
3513 max_loop_iterations_int (struct loop
*loop
)
3516 HOST_WIDE_INT hwi_nit
;
3518 if (!max_loop_iterations (loop
, &nit
))
3521 if (!wi::fits_shwi_p (nit
))
3523 hwi_nit
= nit
.to_shwi ();
3525 return hwi_nit
< 0 ? -1 : hwi_nit
;
3528 /* Returns an estimate for the number of executions of statements
3529 in the LOOP. For statements before the loop exit, this exceeds
3530 the number of execution of the latch by one. */
3533 estimated_stmt_executions_int (struct loop
*loop
)
3535 HOST_WIDE_INT nit
= estimated_loop_iterations_int (loop
);
3541 snit
= (HOST_WIDE_INT
) ((unsigned HOST_WIDE_INT
) nit
+ 1);
3543 /* If the computation overflows, return -1. */
3544 return snit
< 0 ? -1 : snit
;
3547 /* Sets NIT to the estimated maximum number of executions of the latch of the
3548 LOOP, plus one. If we have no reliable estimate, the function returns
3549 false, otherwise returns true. */
3552 max_stmt_executions (struct loop
*loop
, widest_int
*nit
)
3554 widest_int nit_minus_one
;
3556 if (!max_loop_iterations (loop
, nit
))
3559 nit_minus_one
= *nit
;
3563 return wi::gtu_p (*nit
, nit_minus_one
);
3566 /* Sets NIT to the estimated number of executions of the latch of the
3567 LOOP, plus one. If we have no reliable estimate, the function returns
3568 false, otherwise returns true. */
3571 estimated_stmt_executions (struct loop
*loop
, widest_int
*nit
)
3573 widest_int nit_minus_one
;
3575 if (!estimated_loop_iterations (loop
, nit
))
3578 nit_minus_one
= *nit
;
3582 return wi::gtu_p (*nit
, nit_minus_one
);
3585 /* Records estimates on numbers of iterations of loops. */
3588 estimate_numbers_of_iterations (void)
3592 /* We don't want to issue signed overflow warnings while getting
3593 loop iteration estimates. */
3594 fold_defer_overflow_warnings ();
3596 FOR_EACH_LOOP (loop
, 0)
3598 estimate_numbers_of_iterations_loop (loop
);
3601 fold_undefer_and_ignore_overflow_warnings ();
3604 /* Returns true if statement S1 dominates statement S2. */
3607 stmt_dominates_stmt_p (gimple s1
, gimple s2
)
3609 basic_block bb1
= gimple_bb (s1
), bb2
= gimple_bb (s2
);
3617 gimple_stmt_iterator bsi
;
3619 if (gimple_code (s2
) == GIMPLE_PHI
)
3622 if (gimple_code (s1
) == GIMPLE_PHI
)
3625 for (bsi
= gsi_start_bb (bb1
); gsi_stmt (bsi
) != s2
; gsi_next (&bsi
))
3626 if (gsi_stmt (bsi
) == s1
)
3632 return dominated_by_p (CDI_DOMINATORS
, bb2
, bb1
);
3635 /* Returns true when we can prove that the number of executions of
3636 STMT in the loop is at most NITER, according to the bound on
3637 the number of executions of the statement NITER_BOUND->stmt recorded in
3638 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
3640 ??? This code can become quite a CPU hog - we can have many bounds,
3641 and large basic block forcing stmt_dominates_stmt_p to be queried
3642 many times on a large basic blocks, so the whole thing is O(n^2)
3643 for scev_probably_wraps_p invocation (that can be done n times).
3645 It would make more sense (and give better answers) to remember BB
3646 bounds computed by discover_iteration_bound_by_body_walk. */
3649 n_of_executions_at_most (gimple stmt
,
3650 struct nb_iter_bound
*niter_bound
,
3653 widest_int bound
= niter_bound
->bound
;
3654 tree nit_type
= TREE_TYPE (niter
), e
;
3657 gcc_assert (TYPE_UNSIGNED (nit_type
));
3659 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
3660 the number of iterations is small. */
3661 if (!wi::fits_to_tree_p (bound
, nit_type
))
3664 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
3665 times. This means that:
3667 -- if NITER_BOUND->is_exit is true, then everything after
3668 it at most NITER_BOUND->bound times.
3670 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
3671 is executed, then NITER_BOUND->stmt is executed as well in the same
3672 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
3674 If we can determine that NITER_BOUND->stmt is always executed
3675 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
3676 We conclude that if both statements belong to the same
3677 basic block and STMT is before NITER_BOUND->stmt and there are no
3678 statements with side effects in between. */
3680 if (niter_bound
->is_exit
)
3682 if (stmt
== niter_bound
->stmt
3683 || !stmt_dominates_stmt_p (niter_bound
->stmt
, stmt
))
3689 if (!stmt_dominates_stmt_p (niter_bound
->stmt
, stmt
))
3691 gimple_stmt_iterator bsi
;
3692 if (gimple_bb (stmt
) != gimple_bb (niter_bound
->stmt
)
3693 || gimple_code (stmt
) == GIMPLE_PHI
3694 || gimple_code (niter_bound
->stmt
) == GIMPLE_PHI
)
3697 /* By stmt_dominates_stmt_p we already know that STMT appears
3698 before NITER_BOUND->STMT. Still need to test that the loop
3699 can not be terinated by a side effect in between. */
3700 for (bsi
= gsi_for_stmt (stmt
); gsi_stmt (bsi
) != niter_bound
->stmt
;
3702 if (gimple_has_side_effects (gsi_stmt (bsi
)))
3706 || !wi::fits_to_tree_p (bound
, nit_type
))
3712 e
= fold_binary (cmp
, boolean_type_node
,
3713 niter
, wide_int_to_tree (nit_type
, bound
));
3714 return e
&& integer_nonzerop (e
);
3717 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
3720 nowrap_type_p (tree type
)
3722 if (INTEGRAL_TYPE_P (type
)
3723 && TYPE_OVERFLOW_UNDEFINED (type
))
3726 if (POINTER_TYPE_P (type
))
3732 /* Return false only when the induction variable BASE + STEP * I is
3733 known to not overflow: i.e. when the number of iterations is small
3734 enough with respect to the step and initial condition in order to
3735 keep the evolution confined in TYPEs bounds. Return true when the
3736 iv is known to overflow or when the property is not computable.
3738 USE_OVERFLOW_SEMANTICS is true if this function should assume that
3739 the rules for overflow of the given language apply (e.g., that signed
3740 arithmetics in C does not overflow). */
3743 scev_probably_wraps_p (tree base
, tree step
,
3744 gimple at_stmt
, struct loop
*loop
,
3745 bool use_overflow_semantics
)
3747 tree delta
, step_abs
;
3748 tree unsigned_type
, valid_niter
;
3749 tree type
= TREE_TYPE (step
);
3752 struct nb_iter_bound
*bound
;
3754 /* FIXME: We really need something like
3755 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
3757 We used to test for the following situation that frequently appears
3758 during address arithmetics:
3760 D.1621_13 = (long unsigned intD.4) D.1620_12;
3761 D.1622_14 = D.1621_13 * 8;
3762 D.1623_15 = (doubleD.29 *) D.1622_14;
3764 And derived that the sequence corresponding to D_14
3765 can be proved to not wrap because it is used for computing a
3766 memory access; however, this is not really the case -- for example,
3767 if D_12 = (unsigned char) [254,+,1], then D_14 has values
3768 2032, 2040, 0, 8, ..., but the code is still legal. */
3770 if (chrec_contains_undetermined (base
)
3771 || chrec_contains_undetermined (step
))
3774 if (integer_zerop (step
))
3777 /* If we can use the fact that signed and pointer arithmetics does not
3778 wrap, we are done. */
3779 if (use_overflow_semantics
&& nowrap_type_p (TREE_TYPE (base
)))
3782 /* To be able to use estimates on number of iterations of the loop,
3783 we must have an upper bound on the absolute value of the step. */
3784 if (TREE_CODE (step
) != INTEGER_CST
)
3787 /* Don't issue signed overflow warnings. */
3788 fold_defer_overflow_warnings ();
3790 /* Otherwise, compute the number of iterations before we reach the
3791 bound of the type, and verify that the loop is exited before this
3793 unsigned_type
= unsigned_type_for (type
);
3794 base
= fold_convert (unsigned_type
, base
);
3796 if (tree_int_cst_sign_bit (step
))
3798 tree extreme
= fold_convert (unsigned_type
,
3799 lower_bound_in_type (type
, type
));
3800 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, base
, extreme
);
3801 step_abs
= fold_build1 (NEGATE_EXPR
, unsigned_type
,
3802 fold_convert (unsigned_type
, step
));
3806 tree extreme
= fold_convert (unsigned_type
,
3807 upper_bound_in_type (type
, type
));
3808 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, extreme
, base
);
3809 step_abs
= fold_convert (unsigned_type
, step
);
3812 valid_niter
= fold_build2 (FLOOR_DIV_EXPR
, unsigned_type
, delta
, step_abs
);
3814 estimate_numbers_of_iterations_loop (loop
);
3816 if (max_loop_iterations (loop
, &niter
)
3817 && wi::fits_to_tree_p (niter
, TREE_TYPE (valid_niter
))
3818 && (e
= fold_binary (GT_EXPR
, boolean_type_node
, valid_niter
,
3819 wide_int_to_tree (TREE_TYPE (valid_niter
),
3821 && integer_nonzerop (e
))
3823 fold_undefer_and_ignore_overflow_warnings ();
3827 for (bound
= loop
->bounds
; bound
; bound
= bound
->next
)
3829 if (n_of_executions_at_most (at_stmt
, bound
, valid_niter
))
3831 fold_undefer_and_ignore_overflow_warnings ();
3836 fold_undefer_and_ignore_overflow_warnings ();
3838 /* At this point we still don't have a proof that the iv does not
3839 overflow: give up. */
3843 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
3846 free_numbers_of_iterations_estimates_loop (struct loop
*loop
)
3848 struct nb_iter_bound
*bound
, *next
;
3850 loop
->nb_iterations
= NULL
;
3851 loop
->estimate_state
= EST_NOT_COMPUTED
;
3852 for (bound
= loop
->bounds
; bound
; bound
= next
)
3858 loop
->bounds
= NULL
;
3861 /* Frees the information on upper bounds on numbers of iterations of loops. */
3864 free_numbers_of_iterations_estimates (void)
3868 FOR_EACH_LOOP (loop
, 0)
3870 free_numbers_of_iterations_estimates_loop (loop
);
3874 /* Substitute value VAL for ssa name NAME inside expressions held
3878 substitute_in_loop_info (struct loop
*loop
, tree name
, tree val
)
3880 loop
->nb_iterations
= simplify_replace_tree (loop
->nb_iterations
, name
, val
);