1 /* Code for range operators.
2 Copyright (C) 2017-2022 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "insn-codes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
44 #include "gimple-walk.h"
47 #include "value-relation.h"
49 #include "tree-ssa-ccp.h"
51 // Convert irange bitmasks into a VALUE MASK pair suitable for calling CCP.
54 irange_to_masked_value (const irange
&r
, widest_int
&value
, widest_int
&mask
)
59 value
= widest_int::from (r
.lower_bound (), TYPE_SIGN (r
.type ()));
63 mask
= widest_int::from (r
.get_nonzero_bits (), TYPE_SIGN (r
.type ()));
68 // Update the known bitmasks in R when applying the operation CODE to
72 update_known_bitmask (irange
&r
, tree_code code
,
73 const irange
&lh
, const irange
&rh
)
75 if (r
.undefined_p () || lh
.undefined_p () || rh
.undefined_p ())
78 widest_int value
, mask
, lh_mask
, rh_mask
, lh_value
, rh_value
;
79 tree type
= r
.type ();
80 signop sign
= TYPE_SIGN (type
);
81 int prec
= TYPE_PRECISION (type
);
82 signop lh_sign
= TYPE_SIGN (lh
.type ());
83 signop rh_sign
= TYPE_SIGN (rh
.type ());
84 int lh_prec
= TYPE_PRECISION (lh
.type ());
85 int rh_prec
= TYPE_PRECISION (rh
.type ());
87 irange_to_masked_value (lh
, lh_value
, lh_mask
);
88 irange_to_masked_value (rh
, rh_value
, rh_mask
);
89 bit_value_binop (code
, sign
, prec
, &value
, &mask
,
90 lh_sign
, lh_prec
, lh_value
, lh_mask
,
91 rh_sign
, rh_prec
, rh_value
, rh_mask
);
93 int_range
<2> tmp (type
);
94 tmp
.set_nonzero_bits (value
| mask
);
98 // Return the upper limit for a type.
100 static inline wide_int
101 max_limit (const_tree type
)
103 return wi::max_value (TYPE_PRECISION (type
) , TYPE_SIGN (type
));
106 // Return the lower limit for a type.
108 static inline wide_int
109 min_limit (const_tree type
)
111 return wi::min_value (TYPE_PRECISION (type
) , TYPE_SIGN (type
));
114 // Return false if shifting by OP is undefined behavior. Otherwise, return
115 // true and the range it is to be shifted by. This allows trimming out of
116 // undefined ranges, leaving only valid ranges if there are any.
119 get_shift_range (irange
&r
, tree type
, const irange
&op
)
121 if (op
.undefined_p ())
124 // Build valid range and intersect it with the shift range.
125 r
= value_range (build_int_cst_type (op
.type (), 0),
126 build_int_cst_type (op
.type (), TYPE_PRECISION (type
) - 1));
129 // If there are no valid ranges in the shift range, returned false.
130 if (r
.undefined_p ())
135 // Return TRUE if 0 is within [WMIN, WMAX].
138 wi_includes_zero_p (tree type
, const wide_int
&wmin
, const wide_int
&wmax
)
140 signop sign
= TYPE_SIGN (type
);
141 return wi::le_p (wmin
, 0, sign
) && wi::ge_p (wmax
, 0, sign
);
144 // Return TRUE if [WMIN, WMAX] is the singleton 0.
147 wi_zero_p (tree type
, const wide_int
&wmin
, const wide_int
&wmax
)
149 unsigned prec
= TYPE_PRECISION (type
);
150 return wmin
== wmax
&& wi::eq_p (wmin
, wi::zero (prec
));
153 // Default wide_int fold operation returns [MIN, MAX].
156 range_operator::wi_fold (irange
&r
, tree type
,
157 const wide_int
&lh_lb ATTRIBUTE_UNUSED
,
158 const wide_int
&lh_ub ATTRIBUTE_UNUSED
,
159 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
160 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
162 gcc_checking_assert (r
.supports_type_p (type
));
163 r
.set_varying (type
);
166 // Call wi_fold, except further split small subranges into constants.
167 // This can provide better precision. For something 8 >> [0,1]
168 // Instead of [8, 16], we will produce [8,8][16,16]
171 range_operator::wi_fold_in_parts (irange
&r
, tree type
,
172 const wide_int
&lh_lb
,
173 const wide_int
&lh_ub
,
174 const wide_int
&rh_lb
,
175 const wide_int
&rh_ub
) const
178 widest_int rh_range
= wi::sub (widest_int::from (rh_ub
, TYPE_SIGN (type
)),
179 widest_int::from (rh_lb
, TYPE_SIGN (type
)));
180 widest_int lh_range
= wi::sub (widest_int::from (lh_ub
, TYPE_SIGN (type
)),
181 widest_int::from (lh_lb
, TYPE_SIGN (type
)));
182 // If there are 2, 3, or 4 values in the RH range, do them separately.
183 // Call wi_fold_in_parts to check the RH side.
184 if (rh_range
> 0 && rh_range
< 4)
186 wi_fold_in_parts (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_lb
);
189 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 1, rh_lb
+ 1);
193 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 2, rh_lb
+ 2);
197 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_ub
, rh_ub
);
200 // Otherise check for 2, 3, or 4 values in the LH range and split them up.
201 // The RH side has been checked, so no recursion needed.
202 else if (lh_range
> 0 && lh_range
< 4)
204 wi_fold (r
, type
, lh_lb
, lh_lb
, rh_lb
, rh_ub
);
207 wi_fold (tmp
, type
, lh_lb
+ 1, lh_lb
+ 1, rh_lb
, rh_ub
);
211 wi_fold (tmp
, type
, lh_lb
+ 2, lh_lb
+ 2, rh_lb
, rh_ub
);
215 wi_fold (tmp
, type
, lh_ub
, lh_ub
, rh_lb
, rh_ub
);
218 // Otherwise just call wi_fold.
220 wi_fold (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
223 // The default for fold is to break all ranges into sub-ranges and
224 // invoke the wi_fold method on each sub-range pair.
227 range_operator::fold_range (irange
&r
, tree type
,
230 relation_trio trio
) const
232 gcc_checking_assert (r
.supports_type_p (type
));
233 if (empty_range_varying (r
, type
, lh
, rh
))
236 relation_kind rel
= trio
.op1_op2 ();
237 unsigned num_lh
= lh
.num_pairs ();
238 unsigned num_rh
= rh
.num_pairs ();
240 // If both ranges are single pairs, fold directly into the result range.
241 // If the number of subranges grows too high, produce a summary result as the
242 // loop becomes exponential with little benefit. See PR 103821.
243 if ((num_lh
== 1 && num_rh
== 1) || num_lh
* num_rh
> 12)
245 wi_fold_in_parts (r
, type
, lh
.lower_bound (), lh
.upper_bound (),
246 rh
.lower_bound (), rh
.upper_bound ());
247 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
253 for (unsigned x
= 0; x
< num_lh
; ++x
)
254 for (unsigned y
= 0; y
< num_rh
; ++y
)
256 wide_int lh_lb
= lh
.lower_bound (x
);
257 wide_int lh_ub
= lh
.upper_bound (x
);
258 wide_int rh_lb
= rh
.lower_bound (y
);
259 wide_int rh_ub
= rh
.upper_bound (y
);
260 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
264 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
268 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
272 // The default for op1_range is to return false.
275 range_operator::op1_range (irange
&r ATTRIBUTE_UNUSED
,
276 tree type ATTRIBUTE_UNUSED
,
277 const irange
&lhs ATTRIBUTE_UNUSED
,
278 const irange
&op2 ATTRIBUTE_UNUSED
,
284 // The default for op2_range is to return false.
287 range_operator::op2_range (irange
&r ATTRIBUTE_UNUSED
,
288 tree type ATTRIBUTE_UNUSED
,
289 const irange
&lhs ATTRIBUTE_UNUSED
,
290 const irange
&op1 ATTRIBUTE_UNUSED
,
296 // The default relation routines return VREL_VARYING.
299 range_operator::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
300 const irange
&op1 ATTRIBUTE_UNUSED
,
301 const irange
&op2 ATTRIBUTE_UNUSED
,
302 relation_kind rel ATTRIBUTE_UNUSED
) const
308 range_operator::lhs_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
309 const irange
&op1 ATTRIBUTE_UNUSED
,
310 const irange
&op2 ATTRIBUTE_UNUSED
,
311 relation_kind rel ATTRIBUTE_UNUSED
) const
317 range_operator::op1_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
) const
322 // Default is no relation affects the LHS.
325 range_operator::op1_op2_relation_effect (irange
&lhs_range ATTRIBUTE_UNUSED
,
326 tree type ATTRIBUTE_UNUSED
,
327 const irange
&op1_range ATTRIBUTE_UNUSED
,
328 const irange
&op2_range ATTRIBUTE_UNUSED
,
329 relation_kind rel ATTRIBUTE_UNUSED
) const
334 // Create and return a range from a pair of wide-ints that are known
335 // to have overflowed (or underflowed).
338 value_range_from_overflowed_bounds (irange
&r
, tree type
,
339 const wide_int
&wmin
,
340 const wide_int
&wmax
)
342 const signop sgn
= TYPE_SIGN (type
);
343 const unsigned int prec
= TYPE_PRECISION (type
);
345 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
346 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
351 if (wi::cmp (tmin
, tmax
, sgn
) < 0)
354 if (wi::cmp (tmax
, tem
, sgn
) > 0)
357 // If the anti-range would cover nothing, drop to varying.
358 // Likewise if the anti-range bounds are outside of the types
360 if (covers
|| wi::cmp (tmin
, tmax
, sgn
) > 0)
361 r
.set_varying (type
);
364 tree tree_min
= wide_int_to_tree (type
, tmin
);
365 tree tree_max
= wide_int_to_tree (type
, tmax
);
366 r
.set (tree_min
, tree_max
, VR_ANTI_RANGE
);
370 // Create and return a range from a pair of wide-ints. MIN_OVF and
371 // MAX_OVF describe any overflow that might have occurred while
372 // calculating WMIN and WMAX respectively.
375 value_range_with_overflow (irange
&r
, tree type
,
376 const wide_int
&wmin
, const wide_int
&wmax
,
377 wi::overflow_type min_ovf
= wi::OVF_NONE
,
378 wi::overflow_type max_ovf
= wi::OVF_NONE
)
380 const signop sgn
= TYPE_SIGN (type
);
381 const unsigned int prec
= TYPE_PRECISION (type
);
382 const bool overflow_wraps
= TYPE_OVERFLOW_WRAPS (type
);
384 // For one bit precision if max != min, then the range covers all
386 if (prec
== 1 && wi::ne_p (wmax
, wmin
))
388 r
.set_varying (type
);
394 // If overflow wraps, truncate the values and adjust the range,
395 // kind, and bounds appropriately.
396 if ((min_ovf
!= wi::OVF_NONE
) == (max_ovf
!= wi::OVF_NONE
))
398 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
399 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
400 // If the limits are swapped, we wrapped around and cover
402 if (wi::gt_p (tmin
, tmax
, sgn
))
403 r
.set_varying (type
);
405 // No overflow or both overflow or underflow. The range
406 // kind stays normal.
407 r
.set (wide_int_to_tree (type
, tmin
),
408 wide_int_to_tree (type
, tmax
));
412 if ((min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_NONE
)
413 || (max_ovf
== wi::OVF_OVERFLOW
&& min_ovf
== wi::OVF_NONE
))
414 value_range_from_overflowed_bounds (r
, type
, wmin
, wmax
);
416 // Other underflow and/or overflow, drop to VR_VARYING.
417 r
.set_varying (type
);
421 // If both bounds either underflowed or overflowed, then the result
423 if ((min_ovf
== wi::OVF_OVERFLOW
&& max_ovf
== wi::OVF_OVERFLOW
)
424 || (min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_UNDERFLOW
))
430 // If overflow does not wrap, saturate to [MIN, MAX].
431 wide_int new_lb
, new_ub
;
432 if (min_ovf
== wi::OVF_UNDERFLOW
)
433 new_lb
= wi::min_value (prec
, sgn
);
434 else if (min_ovf
== wi::OVF_OVERFLOW
)
435 new_lb
= wi::max_value (prec
, sgn
);
439 if (max_ovf
== wi::OVF_UNDERFLOW
)
440 new_ub
= wi::min_value (prec
, sgn
);
441 else if (max_ovf
== wi::OVF_OVERFLOW
)
442 new_ub
= wi::max_value (prec
, sgn
);
446 r
.set (wide_int_to_tree (type
, new_lb
),
447 wide_int_to_tree (type
, new_ub
));
451 // Create and return a range from a pair of wide-ints. Canonicalize
452 // the case where the bounds are swapped. In which case, we transform
453 // [10,5] into [MIN,5][10,MAX].
456 create_possibly_reversed_range (irange
&r
, tree type
,
457 const wide_int
&new_lb
, const wide_int
&new_ub
)
459 signop s
= TYPE_SIGN (type
);
460 // If the bounds are swapped, treat the result as if an overflow occured.
461 if (wi::gt_p (new_lb
, new_ub
, s
))
462 value_range_from_overflowed_bounds (r
, type
, new_lb
, new_ub
);
464 // Otherwise it's just a normal range.
465 r
.set (wide_int_to_tree (type
, new_lb
), wide_int_to_tree (type
, new_ub
));
468 // Return the summary information about boolean range LHS. If EMPTY/FULL,
469 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
472 get_bool_state (vrange
&r
, const vrange
&lhs
, tree val_type
)
474 // If there is no result, then this is unexecutable.
475 if (lhs
.undefined_p ())
484 // For TRUE, we can't just test for [1,1] because Ada can have
485 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
486 if (lhs
.contains_p (build_zero_cst (lhs
.type ())))
488 r
.set_varying (val_type
);
496 class operator_equal
: public range_operator
498 using range_operator::fold_range
;
499 using range_operator::op1_range
;
500 using range_operator::op2_range
;
502 virtual bool fold_range (irange
&r
, tree type
,
505 relation_trio
= TRIO_VARYING
) const;
506 virtual bool op1_range (irange
&r
, tree type
,
509 relation_trio
= TRIO_VARYING
) const;
510 virtual bool op2_range (irange
&r
, tree type
,
513 relation_trio
= TRIO_VARYING
) const;
514 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
517 // Check if the LHS range indicates a relation between OP1 and OP2.
520 equal_op1_op2_relation (const irange
&lhs
)
522 if (lhs
.undefined_p ())
523 return VREL_UNDEFINED
;
525 // FALSE = op1 == op2 indicates NE_EXPR.
529 // TRUE = op1 == op2 indicates EQ_EXPR.
530 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
536 operator_equal::op1_op2_relation (const irange
&lhs
) const
538 return equal_op1_op2_relation (lhs
);
543 operator_equal::fold_range (irange
&r
, tree type
,
546 relation_trio rel
) const
548 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_EQ
))
551 // We can be sure the values are always equal or not if both ranges
552 // consist of a single value, and then compare them.
553 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
554 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
556 if (wi::eq_p (op1
.lower_bound (), op2
.upper_bound()))
557 r
= range_true (type
);
559 r
= range_false (type
);
563 // If ranges do not intersect, we know the range is not equal,
564 // otherwise we don't know anything for sure.
565 int_range_max tmp
= op1
;
567 if (tmp
.undefined_p ())
568 r
= range_false (type
);
570 r
= range_true_and_false (type
);
576 operator_equal::op1_range (irange
&r
, tree type
,
581 switch (get_bool_state (r
, lhs
, type
))
584 // If it's true, the result is the same as OP2.
589 // If the result is false, the only time we know anything is
590 // if OP2 is a constant.
591 if (wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
597 r
.set_varying (type
);
607 operator_equal::op2_range (irange
&r
, tree type
,
610 relation_trio rel
) const
612 return operator_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
615 class operator_not_equal
: public range_operator
617 using range_operator::fold_range
;
618 using range_operator::op1_range
;
619 using range_operator::op2_range
;
621 virtual bool fold_range (irange
&r
, tree type
,
624 relation_trio
= TRIO_VARYING
) const;
625 virtual bool op1_range (irange
&r
, tree type
,
628 relation_trio
= TRIO_VARYING
) const;
629 virtual bool op2_range (irange
&r
, tree type
,
632 relation_trio
= TRIO_VARYING
) const;
633 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
636 // Check if the LHS range indicates a relation between OP1 and OP2.
639 not_equal_op1_op2_relation (const irange
&lhs
)
641 if (lhs
.undefined_p ())
642 return VREL_UNDEFINED
;
644 // FALSE = op1 != op2 indicates EQ_EXPR.
648 // TRUE = op1 != op2 indicates NE_EXPR.
649 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
655 operator_not_equal::op1_op2_relation (const irange
&lhs
) const
657 return not_equal_op1_op2_relation (lhs
);
661 operator_not_equal::fold_range (irange
&r
, tree type
,
664 relation_trio rel
) const
666 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_NE
))
669 // We can be sure the values are always equal or not if both ranges
670 // consist of a single value, and then compare them.
671 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
672 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
674 if (wi::ne_p (op1
.lower_bound (), op2
.upper_bound()))
675 r
= range_true (type
);
677 r
= range_false (type
);
681 // If ranges do not intersect, we know the range is not equal,
682 // otherwise we don't know anything for sure.
683 int_range_max tmp
= op1
;
685 if (tmp
.undefined_p ())
686 r
= range_true (type
);
688 r
= range_true_and_false (type
);
694 operator_not_equal::op1_range (irange
&r
, tree type
,
699 switch (get_bool_state (r
, lhs
, type
))
702 // If the result is true, the only time we know anything is if
703 // OP2 is a constant.
704 if (wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
710 r
.set_varying (type
);
714 // If it's false, the result is the same as OP2.
726 operator_not_equal::op2_range (irange
&r
, tree type
,
729 relation_trio rel
) const
731 return operator_not_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
734 // (X < VAL) produces the range of [MIN, VAL - 1].
737 build_lt (irange
&r
, tree type
, const wide_int
&val
)
739 wi::overflow_type ov
;
741 signop sgn
= TYPE_SIGN (type
);
743 // Signed 1 bit cannot represent 1 for subtraction.
745 lim
= wi::add (val
, -1, sgn
, &ov
);
747 lim
= wi::sub (val
, 1, sgn
, &ov
);
749 // If val - 1 underflows, check if X < MIN, which is an empty range.
753 r
= int_range
<1> (type
, min_limit (type
), lim
);
756 // (X <= VAL) produces the range of [MIN, VAL].
759 build_le (irange
&r
, tree type
, const wide_int
&val
)
761 r
= int_range
<1> (type
, min_limit (type
), val
);
764 // (X > VAL) produces the range of [VAL + 1, MAX].
767 build_gt (irange
&r
, tree type
, const wide_int
&val
)
769 wi::overflow_type ov
;
771 signop sgn
= TYPE_SIGN (type
);
773 // Signed 1 bit cannot represent 1 for addition.
775 lim
= wi::sub (val
, -1, sgn
, &ov
);
777 lim
= wi::add (val
, 1, sgn
, &ov
);
778 // If val + 1 overflows, check is for X > MAX, which is an empty range.
782 r
= int_range
<1> (type
, lim
, max_limit (type
));
785 // (X >= val) produces the range of [VAL, MAX].
788 build_ge (irange
&r
, tree type
, const wide_int
&val
)
790 r
= int_range
<1> (type
, val
, max_limit (type
));
794 class operator_lt
: public range_operator
796 using range_operator::fold_range
;
797 using range_operator::op1_range
;
798 using range_operator::op2_range
;
800 virtual bool fold_range (irange
&r
, tree type
,
803 relation_trio
= TRIO_VARYING
) const;
804 virtual bool op1_range (irange
&r
, tree type
,
807 relation_trio
= TRIO_VARYING
) const;
808 virtual bool op2_range (irange
&r
, tree type
,
811 relation_trio
= TRIO_VARYING
) const;
812 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
815 // Check if the LHS range indicates a relation between OP1 and OP2.
818 lt_op1_op2_relation (const irange
&lhs
)
820 if (lhs
.undefined_p ())
821 return VREL_UNDEFINED
;
823 // FALSE = op1 < op2 indicates GE_EXPR.
827 // TRUE = op1 < op2 indicates LT_EXPR.
828 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
834 operator_lt::op1_op2_relation (const irange
&lhs
) const
836 return lt_op1_op2_relation (lhs
);
840 operator_lt::fold_range (irange
&r
, tree type
,
843 relation_trio rel
) const
845 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LT
))
848 signop sign
= TYPE_SIGN (op1
.type ());
849 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
851 if (wi::lt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
852 r
= range_true (type
);
853 else if (!wi::lt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
854 r
= range_false (type
);
855 // Use nonzero bits to determine if < 0 is false.
856 else if (op2
.zero_p () && !wi::neg_p (op1
.get_nonzero_bits (), sign
))
857 r
= range_false (type
);
859 r
= range_true_and_false (type
);
864 operator_lt::op1_range (irange
&r
, tree type
,
869 switch (get_bool_state (r
, lhs
, type
))
872 build_lt (r
, type
, op2
.upper_bound ());
876 build_ge (r
, type
, op2
.lower_bound ());
886 operator_lt::op2_range (irange
&r
, tree type
,
891 switch (get_bool_state (r
, lhs
, type
))
894 build_gt (r
, type
, op1
.lower_bound ());
898 build_le (r
, type
, op1
.upper_bound ());
908 class operator_le
: public range_operator
910 using range_operator::fold_range
;
911 using range_operator::op1_range
;
912 using range_operator::op2_range
;
914 virtual bool fold_range (irange
&r
, tree type
,
917 relation_trio
= TRIO_VARYING
) const;
918 virtual bool op1_range (irange
&r
, tree type
,
921 relation_trio
= TRIO_VARYING
) const;
922 virtual bool op2_range (irange
&r
, tree type
,
925 relation_trio
= TRIO_VARYING
) const;
926 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
929 // Check if the LHS range indicates a relation between OP1 and OP2.
932 le_op1_op2_relation (const irange
&lhs
)
934 if (lhs
.undefined_p ())
935 return VREL_UNDEFINED
;
937 // FALSE = op1 <= op2 indicates GT_EXPR.
941 // TRUE = op1 <= op2 indicates LE_EXPR.
942 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
948 operator_le::op1_op2_relation (const irange
&lhs
) const
950 return le_op1_op2_relation (lhs
);
954 operator_le::fold_range (irange
&r
, tree type
,
957 relation_trio rel
) const
959 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LE
))
962 signop sign
= TYPE_SIGN (op1
.type ());
963 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
965 if (wi::le_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
966 r
= range_true (type
);
967 else if (!wi::le_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
968 r
= range_false (type
);
970 r
= range_true_and_false (type
);
975 operator_le::op1_range (irange
&r
, tree type
,
980 switch (get_bool_state (r
, lhs
, type
))
983 build_le (r
, type
, op2
.upper_bound ());
987 build_gt (r
, type
, op2
.lower_bound ());
997 operator_le::op2_range (irange
&r
, tree type
,
1000 relation_trio
) const
1002 switch (get_bool_state (r
, lhs
, type
))
1005 build_ge (r
, type
, op1
.lower_bound ());
1009 build_lt (r
, type
, op1
.upper_bound ());
1019 class operator_gt
: public range_operator
1021 using range_operator::fold_range
;
1022 using range_operator::op1_range
;
1023 using range_operator::op2_range
;
1025 virtual bool fold_range (irange
&r
, tree type
,
1028 relation_trio
= TRIO_VARYING
) const;
1029 virtual bool op1_range (irange
&r
, tree type
,
1032 relation_trio
= TRIO_VARYING
) const;
1033 virtual bool op2_range (irange
&r
, tree type
,
1036 relation_trio
= TRIO_VARYING
) const;
1037 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
1040 // Check if the LHS range indicates a relation between OP1 and OP2.
1043 gt_op1_op2_relation (const irange
&lhs
)
1045 if (lhs
.undefined_p ())
1046 return VREL_UNDEFINED
;
1048 // FALSE = op1 > op2 indicates LE_EXPR.
1052 // TRUE = op1 > op2 indicates GT_EXPR.
1053 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
1055 return VREL_VARYING
;
1059 operator_gt::op1_op2_relation (const irange
&lhs
) const
1061 return gt_op1_op2_relation (lhs
);
1066 operator_gt::fold_range (irange
&r
, tree type
,
1067 const irange
&op1
, const irange
&op2
,
1068 relation_trio rel
) const
1070 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GT
))
1073 signop sign
= TYPE_SIGN (op1
.type ());
1074 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1076 if (wi::gt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1077 r
= range_true (type
);
1078 else if (!wi::gt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1079 r
= range_false (type
);
1081 r
= range_true_and_false (type
);
1086 operator_gt::op1_range (irange
&r
, tree type
,
1087 const irange
&lhs
, const irange
&op2
,
1088 relation_trio
) const
1090 switch (get_bool_state (r
, lhs
, type
))
1093 build_gt (r
, type
, op2
.lower_bound ());
1097 build_le (r
, type
, op2
.upper_bound ());
1107 operator_gt::op2_range (irange
&r
, tree type
,
1110 relation_trio
) const
1112 switch (get_bool_state (r
, lhs
, type
))
1115 build_lt (r
, type
, op1
.upper_bound ());
1119 build_ge (r
, type
, op1
.lower_bound ());
1129 class operator_ge
: public range_operator
1131 using range_operator::fold_range
;
1132 using range_operator::op1_range
;
1133 using range_operator::op2_range
;
1135 virtual bool fold_range (irange
&r
, tree type
,
1138 relation_trio
= TRIO_VARYING
) const;
1139 virtual bool op1_range (irange
&r
, tree type
,
1142 relation_trio
= TRIO_VARYING
) const;
1143 virtual bool op2_range (irange
&r
, tree type
,
1146 relation_trio
= TRIO_VARYING
) const;
1147 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
1150 // Check if the LHS range indicates a relation between OP1 and OP2.
1153 ge_op1_op2_relation (const irange
&lhs
)
1155 if (lhs
.undefined_p ())
1156 return VREL_UNDEFINED
;
1158 // FALSE = op1 >= op2 indicates LT_EXPR.
1162 // TRUE = op1 >= op2 indicates GE_EXPR.
1163 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
1165 return VREL_VARYING
;
1169 operator_ge::op1_op2_relation (const irange
&lhs
) const
1171 return ge_op1_op2_relation (lhs
);
1175 operator_ge::fold_range (irange
&r
, tree type
,
1178 relation_trio rel
) const
1180 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GE
))
1183 signop sign
= TYPE_SIGN (op1
.type ());
1184 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1186 if (wi::ge_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1187 r
= range_true (type
);
1188 else if (!wi::ge_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1189 r
= range_false (type
);
1191 r
= range_true_and_false (type
);
1196 operator_ge::op1_range (irange
&r
, tree type
,
1199 relation_trio
) const
1201 switch (get_bool_state (r
, lhs
, type
))
1204 build_ge (r
, type
, op2
.lower_bound ());
1208 build_lt (r
, type
, op2
.upper_bound ());
1218 operator_ge::op2_range (irange
&r
, tree type
,
1221 relation_trio
) const
1223 switch (get_bool_state (r
, lhs
, type
))
1226 build_le (r
, type
, op1
.upper_bound ());
1230 build_gt (r
, type
, op1
.lower_bound ());
1240 class operator_plus
: public range_operator
1242 using range_operator::op1_range
;
1243 using range_operator::op2_range
;
1244 using range_operator::lhs_op1_relation
;
1245 using range_operator::lhs_op2_relation
;
1247 virtual bool op1_range (irange
&r
, tree type
,
1250 relation_trio
) const;
1251 virtual bool op2_range (irange
&r
, tree type
,
1254 relation_trio
) const;
1255 virtual void wi_fold (irange
&r
, tree type
,
1256 const wide_int
&lh_lb
,
1257 const wide_int
&lh_ub
,
1258 const wide_int
&rh_lb
,
1259 const wide_int
&rh_ub
) const;
1260 virtual relation_kind
lhs_op1_relation (const irange
&lhs
, const irange
&op1
,
1262 relation_kind rel
) const;
1263 virtual relation_kind
lhs_op2_relation (const irange
&lhs
, const irange
&op1
,
1265 relation_kind rel
) const;
1268 // Check to see if the range of OP2 indicates anything about the relation
1269 // between LHS and OP1.
1272 operator_plus::lhs_op1_relation (const irange
&lhs
,
1275 relation_kind
) const
1277 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
1278 return VREL_VARYING
;
1280 tree type
= lhs
.type ();
1281 unsigned prec
= TYPE_PRECISION (type
);
1282 wi::overflow_type ovf1
, ovf2
;
1283 signop sign
= TYPE_SIGN (type
);
1285 // LHS = OP1 + 0 indicates LHS == OP1.
1289 if (TYPE_OVERFLOW_WRAPS (type
))
1291 wi::add (op1
.lower_bound (), op2
.lower_bound (), sign
, &ovf1
);
1292 wi::add (op1
.upper_bound (), op2
.upper_bound (), sign
, &ovf2
);
1295 ovf1
= ovf2
= wi::OVF_NONE
;
1297 // Never wrapping additions.
1300 // Positive op2 means lhs > op1.
1301 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1303 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1306 // Negative op2 means lhs < op1.
1307 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1309 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1312 // Always wrapping additions.
1313 else if (ovf1
&& ovf1
== ovf2
)
1315 // Positive op2 means lhs < op1.
1316 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1318 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1321 // Negative op2 means lhs > op1.
1322 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1324 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1328 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1329 if (!range_includes_zero_p (&op2
))
1332 return VREL_VARYING
;
1335 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1339 operator_plus::lhs_op2_relation (const irange
&lhs
, const irange
&op1
,
1340 const irange
&op2
, relation_kind rel
) const
1342 return lhs_op1_relation (lhs
, op2
, op1
, rel
);
1346 operator_plus::wi_fold (irange
&r
, tree type
,
1347 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1348 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1350 wi::overflow_type ov_lb
, ov_ub
;
1351 signop s
= TYPE_SIGN (type
);
1352 wide_int new_lb
= wi::add (lh_lb
, rh_lb
, s
, &ov_lb
);
1353 wide_int new_ub
= wi::add (lh_ub
, rh_ub
, s
, &ov_ub
);
1354 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
1357 // Given addition or subtraction, determine the possible NORMAL ranges and
1358 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1359 // Return the relation that exists between the LHS and OP1 in order for the
1360 // NORMAL range to apply.
1361 // a return value of VREL_VARYING means no ranges were applicable.
1363 static relation_kind
1364 plus_minus_ranges (irange
&r_ov
, irange
&r_normal
, const irange
&offset
,
1367 relation_kind kind
= VREL_VARYING
;
1368 // For now, only deal with constant adds. This could be extended to ranges
1369 // when someone is so motivated.
1370 if (!offset
.singleton_p () || offset
.zero_p ())
1373 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1374 wide_int off
= offset
.lower_bound ();
1375 if (wi::neg_p (off
, SIGNED
))
1378 off
= wi::neg (off
);
1381 wi::overflow_type ov
;
1382 tree type
= offset
.type ();
1383 unsigned prec
= TYPE_PRECISION (type
);
1386 // calculate the normal range and relation for the operation.
1390 lb
= wi::zero (prec
);
1391 ub
= wi::sub (wi::to_wide (vrp_val_max (type
)), off
, UNSIGNED
, &ov
);
1398 ub
= wi::to_wide (vrp_val_max (type
));
1401 int_range
<2> normal_range (type
, lb
, ub
);
1402 int_range
<2> ov_range (type
, lb
, ub
, VR_ANTI_RANGE
);
1405 r_normal
= normal_range
;
1409 // Once op1 has been calculated by operator_plus or operator_minus, check
1410 // to see if the relation passed causes any part of the calculation to
1411 // be not possible. ie
1412 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1413 // and that further refines a_2 to [0, 0].
1414 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1415 // relation between LHS relatoin OP1 and ADD_P is true for PLUS, false for
1416 // MINUS. IF any adjustment can be made, R will reflect it.
1419 adjust_op1_for_overflow (irange
&r
, const irange
&op2
, relation_kind rel
,
1422 if (r
.undefined_p ())
1424 tree type
= r
.type ();
1425 // Check for unsigned overflow and calculate the overflow part.
1426 signop s
= TYPE_SIGN (type
);
1427 if (!TYPE_OVERFLOW_WRAPS (type
) || s
== SIGNED
)
1430 // Only work with <, <=, >, >= relations.
1431 if (!relation_lt_le_gt_ge_p (rel
))
1434 // Get the ranges for this offset.
1435 int_range_max normal
, overflow
;
1436 relation_kind k
= plus_minus_ranges (overflow
, normal
, op2
, add_p
);
1438 // VREL_VARYING means there are no adjustments.
1439 if (k
== VREL_VARYING
)
1442 // If the relations match use the normal range, otherwise use overflow range.
1443 if (relation_intersect (k
, rel
) == k
)
1444 r
.intersect (normal
);
1446 r
.intersect (overflow
);
1451 operator_plus::op1_range (irange
&r
, tree type
,
1454 relation_trio trio
) const
1456 if (lhs
.undefined_p ())
1458 // Start with the default operation.
1459 range_op_handler
minus (MINUS_EXPR
, type
);
1462 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
1463 relation_kind rel
= trio
.lhs_op2 ();
1464 // Check for a relation refinement.
1466 adjust_op1_for_overflow (r
, op2
, rel
, true /* PLUS_EXPR */);
1471 operator_plus::op2_range (irange
&r
, tree type
,
1474 relation_trio rel
) const
1476 return op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1480 class operator_minus
: public range_operator
1482 using range_operator::fold_range
;
1483 using range_operator::op1_range
;
1484 using range_operator::op2_range
;
1486 virtual bool op1_range (irange
&r
, tree type
,
1489 relation_trio
) const;
1490 virtual bool op2_range (irange
&r
, tree type
,
1493 relation_trio
) const;
1494 virtual void wi_fold (irange
&r
, tree type
,
1495 const wide_int
&lh_lb
,
1496 const wide_int
&lh_ub
,
1497 const wide_int
&rh_lb
,
1498 const wide_int
&rh_ub
) const;
1499 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
1502 relation_kind rel
) const;
1503 virtual bool op1_op2_relation_effect (irange
&lhs_range
,
1505 const irange
&op1_range
,
1506 const irange
&op2_range
,
1507 relation_kind rel
) const;
1511 operator_minus::wi_fold (irange
&r
, tree type
,
1512 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1513 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1515 wi::overflow_type ov_lb
, ov_ub
;
1516 signop s
= TYPE_SIGN (type
);
1517 wide_int new_lb
= wi::sub (lh_lb
, rh_ub
, s
, &ov_lb
);
1518 wide_int new_ub
= wi::sub (lh_ub
, rh_lb
, s
, &ov_ub
);
1519 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
1523 // Return the relation between LHS and OP1 based on the relation between
1527 operator_minus::lhs_op1_relation (const irange
&, const irange
&op1
,
1528 const irange
&, relation_kind rel
) const
1530 if (!op1
.undefined_p () && TYPE_SIGN (op1
.type ()) == UNSIGNED
)
1539 return VREL_VARYING
;
1542 // Check to see if the relation REL between OP1 and OP2 has any effect on the
1543 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
1544 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
1547 minus_op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1548 const irange
&op1_range ATTRIBUTE_UNUSED
,
1549 const irange
&op2_range ATTRIBUTE_UNUSED
,
1552 if (rel
== VREL_VARYING
)
1555 int_range
<2> rel_range
;
1556 unsigned prec
= TYPE_PRECISION (type
);
1557 signop sgn
= TYPE_SIGN (type
);
1559 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
1561 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
));
1562 else if (rel
== VREL_NE
)
1563 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
1565 else if (TYPE_OVERFLOW_WRAPS (type
))
1569 // For wrapping signed values and unsigned, if op1 > op2 or
1570 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
1573 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
1584 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
1586 rel_range
= int_range
<2> (type
, wi::one (prec
),
1587 wi::max_value (prec
, sgn
));
1589 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
1591 rel_range
= int_range
<2> (type
, wi::zero (prec
),
1592 wi::max_value (prec
, sgn
));
1594 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
1596 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
1597 wi::minus_one (prec
));
1599 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
1601 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
1608 lhs_range
.intersect (rel_range
);
1613 operator_minus::op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1614 const irange
&op1_range
,
1615 const irange
&op2_range
,
1616 relation_kind rel
) const
1618 return minus_op1_op2_relation_effect (lhs_range
, type
, op1_range
, op2_range
,
1623 operator_minus::op1_range (irange
&r
, tree type
,
1626 relation_trio trio
) const
1628 if (lhs
.undefined_p ())
1630 // Start with the default operation.
1631 range_op_handler
minus (PLUS_EXPR
, type
);
1634 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
1635 relation_kind rel
= trio
.lhs_op2 ();
1637 adjust_op1_for_overflow (r
, op2
, rel
, false /* PLUS_EXPR */);
1643 operator_minus::op2_range (irange
&r
, tree type
,
1646 relation_trio
) const
1648 if (lhs
.undefined_p ())
1650 return fold_range (r
, type
, op1
, lhs
);
1654 class operator_pointer_diff
: public range_operator
1656 virtual bool op1_op2_relation_effect (irange
&lhs_range
,
1658 const irange
&op1_range
,
1659 const irange
&op2_range
,
1660 relation_kind rel
) const;
1664 operator_pointer_diff::op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1665 const irange
&op1_range
,
1666 const irange
&op2_range
,
1667 relation_kind rel
) const
1669 return minus_op1_op2_relation_effect (lhs_range
, type
, op1_range
, op2_range
,
1674 class operator_min
: public range_operator
1677 virtual void wi_fold (irange
&r
, tree type
,
1678 const wide_int
&lh_lb
,
1679 const wide_int
&lh_ub
,
1680 const wide_int
&rh_lb
,
1681 const wide_int
&rh_ub
) const;
1685 operator_min::wi_fold (irange
&r
, tree type
,
1686 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1687 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1689 signop s
= TYPE_SIGN (type
);
1690 wide_int new_lb
= wi::min (lh_lb
, rh_lb
, s
);
1691 wide_int new_ub
= wi::min (lh_ub
, rh_ub
, s
);
1692 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1696 class operator_max
: public range_operator
1699 virtual void wi_fold (irange
&r
, tree type
,
1700 const wide_int
&lh_lb
,
1701 const wide_int
&lh_ub
,
1702 const wide_int
&rh_lb
,
1703 const wide_int
&rh_ub
) const;
1707 operator_max::wi_fold (irange
&r
, tree type
,
1708 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1709 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1711 signop s
= TYPE_SIGN (type
);
1712 wide_int new_lb
= wi::max (lh_lb
, rh_lb
, s
);
1713 wide_int new_ub
= wi::max (lh_ub
, rh_ub
, s
);
1714 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1718 class cross_product_operator
: public range_operator
1721 // Perform an operation between two wide-ints and place the result
1722 // in R. Return true if the operation overflowed.
1723 virtual bool wi_op_overflows (wide_int
&r
,
1726 const wide_int
&) const = 0;
1728 // Calculate the cross product of two sets of sub-ranges and return it.
1729 void wi_cross_product (irange
&r
, tree type
,
1730 const wide_int
&lh_lb
,
1731 const wide_int
&lh_ub
,
1732 const wide_int
&rh_lb
,
1733 const wide_int
&rh_ub
) const;
1736 // Calculate the cross product of two sets of ranges and return it.
1738 // Multiplications, divisions and shifts are a bit tricky to handle,
1739 // depending on the mix of signs we have in the two ranges, we need to
1740 // operate on different values to get the minimum and maximum values
1741 // for the new range. One approach is to figure out all the
1742 // variations of range combinations and do the operations.
1744 // However, this involves several calls to compare_values and it is
1745 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
1746 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
1747 // figure the smallest and largest values to form the new range.
1750 cross_product_operator::wi_cross_product (irange
&r
, tree type
,
1751 const wide_int
&lh_lb
,
1752 const wide_int
&lh_ub
,
1753 const wide_int
&rh_lb
,
1754 const wide_int
&rh_ub
) const
1756 wide_int cp1
, cp2
, cp3
, cp4
;
1757 // Default to varying.
1758 r
.set_varying (type
);
1760 // Compute the 4 cross operations, bailing if we get an overflow we
1762 if (wi_op_overflows (cp1
, type
, lh_lb
, rh_lb
))
1764 if (wi::eq_p (lh_lb
, lh_ub
))
1766 else if (wi_op_overflows (cp3
, type
, lh_ub
, rh_lb
))
1768 if (wi::eq_p (rh_lb
, rh_ub
))
1770 else if (wi_op_overflows (cp2
, type
, lh_lb
, rh_ub
))
1772 if (wi::eq_p (lh_lb
, lh_ub
))
1774 else if (wi_op_overflows (cp4
, type
, lh_ub
, rh_ub
))
1778 signop sign
= TYPE_SIGN (type
);
1779 if (wi::gt_p (cp1
, cp2
, sign
))
1780 std::swap (cp1
, cp2
);
1781 if (wi::gt_p (cp3
, cp4
, sign
))
1782 std::swap (cp3
, cp4
);
1784 // Choose min and max from the ordered pairs.
1785 wide_int res_lb
= wi::min (cp1
, cp3
, sign
);
1786 wide_int res_ub
= wi::max (cp2
, cp4
, sign
);
1787 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
1791 class operator_mult
: public cross_product_operator
1793 using range_operator::fold_range
;
1794 using range_operator::op1_range
;
1795 using range_operator::op2_range
;
1797 virtual bool fold_range (irange
&r
, tree type
,
1798 const irange
&lh
, const irange
&rh
,
1799 relation_trio
= TRIO_VARYING
) const final override
;
1800 virtual void wi_fold (irange
&r
, tree type
,
1801 const wide_int
&lh_lb
,
1802 const wide_int
&lh_ub
,
1803 const wide_int
&rh_lb
,
1804 const wide_int
&rh_ub
) const final override
;
1805 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
1806 const wide_int
&w0
, const wide_int
&w1
)
1807 const final override
;
1808 virtual bool op1_range (irange
&r
, tree type
,
1811 relation_trio
) const final override
;
1812 virtual bool op2_range (irange
&r
, tree type
,
1815 relation_trio
) const final override
;
1819 operator_mult::fold_range (irange
&r
, tree type
,
1820 const irange
&lh
, const irange
&rh
,
1821 relation_trio trio
) const
1823 if (!cross_product_operator::fold_range (r
, type
, lh
, rh
, trio
))
1826 update_known_bitmask (r
, MULT_EXPR
, lh
, rh
);
1831 operator_mult::op1_range (irange
&r
, tree type
,
1832 const irange
&lhs
, const irange
&op2
,
1833 relation_trio
) const
1836 if (lhs
.undefined_p ())
1839 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
1840 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
1841 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
1842 if (TYPE_OVERFLOW_WRAPS (type
))
1845 if (op2
.singleton_p (&offset
) && !integer_zerop (offset
))
1846 return range_op_handler (TRUNC_DIV_EXPR
, type
).fold_range (r
, type
,
1852 operator_mult::op2_range (irange
&r
, tree type
,
1853 const irange
&lhs
, const irange
&op1
,
1854 relation_trio rel
) const
1856 return operator_mult::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1860 operator_mult::wi_op_overflows (wide_int
&res
, tree type
,
1861 const wide_int
&w0
, const wide_int
&w1
) const
1863 wi::overflow_type overflow
= wi::OVF_NONE
;
1864 signop sign
= TYPE_SIGN (type
);
1865 res
= wi::mul (w0
, w1
, sign
, &overflow
);
1866 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
1868 // For multiplication, the sign of the overflow is given
1869 // by the comparison of the signs of the operands.
1870 if (sign
== UNSIGNED
|| w0
.sign_mask () == w1
.sign_mask ())
1871 res
= wi::max_value (w0
.get_precision (), sign
);
1873 res
= wi::min_value (w0
.get_precision (), sign
);
1880 operator_mult::wi_fold (irange
&r
, tree type
,
1881 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1882 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1884 if (TYPE_OVERFLOW_UNDEFINED (type
))
1886 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
1890 // Multiply the ranges when overflow wraps. This is basically fancy
1891 // code so we don't drop to varying with an unsigned
1894 // This test requires 2*prec bits if both operands are signed and
1895 // 2*prec + 2 bits if either is not. Therefore, extend the values
1896 // using the sign of the result to PREC2. From here on out,
1897 // everthing is just signed math no matter what the input types
1900 signop sign
= TYPE_SIGN (type
);
1901 unsigned prec
= TYPE_PRECISION (type
);
1902 widest2_int min0
= widest2_int::from (lh_lb
, sign
);
1903 widest2_int max0
= widest2_int::from (lh_ub
, sign
);
1904 widest2_int min1
= widest2_int::from (rh_lb
, sign
);
1905 widest2_int max1
= widest2_int::from (rh_ub
, sign
);
1906 widest2_int sizem1
= wi::mask
<widest2_int
> (prec
, false);
1907 widest2_int size
= sizem1
+ 1;
1909 // Canonicalize the intervals.
1910 if (sign
== UNSIGNED
)
1912 if (wi::ltu_p (size
, min0
+ max0
))
1917 if (wi::ltu_p (size
, min1
+ max1
))
1924 // Sort the 4 products so that min is in prod0 and max is in
1926 widest2_int prod0
= min0
* min1
;
1927 widest2_int prod1
= min0
* max1
;
1928 widest2_int prod2
= max0
* min1
;
1929 widest2_int prod3
= max0
* max1
;
1931 // min0min1 > max0max1
1933 std::swap (prod0
, prod3
);
1935 // min0max1 > max0min1
1937 std::swap (prod1
, prod2
);
1940 std::swap (prod0
, prod1
);
1943 std::swap (prod2
, prod3
);
1946 prod2
= prod3
- prod0
;
1947 if (wi::geu_p (prod2
, sizem1
))
1949 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
1950 if (TYPE_UNSIGNED (type
) && rh_lb
== rh_ub
1951 && wi::exact_log2 (rh_lb
) != -1 && prec
> 1)
1953 r
.set (type
, rh_lb
, wi::max_value (prec
, sign
));
1955 zero
.set_zero (type
);
1959 // The range covers all values.
1960 r
.set_varying (type
);
1964 wide_int new_lb
= wide_int::from (prod0
, prec
, sign
);
1965 wide_int new_ub
= wide_int::from (prod3
, prec
, sign
);
1966 create_possibly_reversed_range (r
, type
, new_lb
, new_ub
);
1971 class operator_div
: public cross_product_operator
1974 operator_div (enum tree_code c
) { code
= c
; }
1975 virtual void wi_fold (irange
&r
, tree type
,
1976 const wide_int
&lh_lb
,
1977 const wide_int
&lh_ub
,
1978 const wide_int
&rh_lb
,
1979 const wide_int
&rh_ub
) const final override
;
1980 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
1981 const wide_int
&, const wide_int
&)
1982 const final override
;
1983 virtual bool fold_range (irange
&r
, tree type
,
1984 const irange
&lh
, const irange
&rh
,
1985 relation_trio trio
) const final override
;
1987 enum tree_code code
;
1991 operator_div::fold_range (irange
&r
, tree type
,
1992 const irange
&lh
, const irange
&rh
,
1993 relation_trio trio
) const
1995 if (!cross_product_operator::fold_range (r
, type
, lh
, rh
, trio
))
1998 update_known_bitmask (r
, code
, lh
, rh
);
2003 operator_div::wi_op_overflows (wide_int
&res
, tree type
,
2004 const wide_int
&w0
, const wide_int
&w1
) const
2009 wi::overflow_type overflow
= wi::OVF_NONE
;
2010 signop sign
= TYPE_SIGN (type
);
2014 case EXACT_DIV_EXPR
:
2015 // EXACT_DIV_EXPR is implemented as TRUNC_DIV_EXPR in
2016 // operator_exact_divide. No need to handle it here.
2019 case TRUNC_DIV_EXPR
:
2020 res
= wi::div_trunc (w0
, w1
, sign
, &overflow
);
2022 case FLOOR_DIV_EXPR
:
2023 res
= wi::div_floor (w0
, w1
, sign
, &overflow
);
2025 case ROUND_DIV_EXPR
:
2026 res
= wi::div_round (w0
, w1
, sign
, &overflow
);
2029 res
= wi::div_ceil (w0
, w1
, sign
, &overflow
);
2035 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
2037 // For division, the only case is -INF / -1 = +INF.
2038 res
= wi::max_value (w0
.get_precision (), sign
);
2045 operator_div::wi_fold (irange
&r
, tree type
,
2046 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2047 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2049 const wide_int dividend_min
= lh_lb
;
2050 const wide_int dividend_max
= lh_ub
;
2051 const wide_int divisor_min
= rh_lb
;
2052 const wide_int divisor_max
= rh_ub
;
2053 signop sign
= TYPE_SIGN (type
);
2054 unsigned prec
= TYPE_PRECISION (type
);
2055 wide_int extra_min
, extra_max
;
2057 // If we know we won't divide by zero, just do the division.
2058 if (!wi_includes_zero_p (type
, divisor_min
, divisor_max
))
2060 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2061 divisor_min
, divisor_max
);
2065 // If we're definitely dividing by zero, there's nothing to do.
2066 if (wi_zero_p (type
, divisor_min
, divisor_max
))
2072 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2073 // skip any division by zero.
2075 // First divide by the negative numbers, if any.
2076 if (wi::neg_p (divisor_min
, sign
))
2077 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2078 divisor_min
, wi::minus_one (prec
));
2082 // Then divide by the non-zero positive numbers, if any.
2083 if (wi::gt_p (divisor_max
, wi::zero (prec
), sign
))
2086 wi_cross_product (tmp
, type
, dividend_min
, dividend_max
,
2087 wi::one (prec
), divisor_max
);
2090 // We shouldn't still have undefined here.
2091 gcc_checking_assert (!r
.undefined_p ());
2094 operator_div
op_trunc_div (TRUNC_DIV_EXPR
);
2095 operator_div
op_floor_div (FLOOR_DIV_EXPR
);
2096 operator_div
op_round_div (ROUND_DIV_EXPR
);
2097 operator_div
op_ceil_div (CEIL_DIV_EXPR
);
2100 class operator_exact_divide
: public operator_div
2102 using range_operator::op1_range
;
2104 operator_exact_divide () : operator_div (TRUNC_DIV_EXPR
) { }
2105 virtual bool op1_range (irange
&r
, tree type
,
2108 relation_trio
) const;
2113 operator_exact_divide::op1_range (irange
&r
, tree type
,
2116 relation_trio
) const
2118 if (lhs
.undefined_p ())
2121 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2122 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2123 // We wont bother trying to enumerate all the in between stuff :-P
2124 // TRUE accuraacy is [6,6][9,9][12,12]. This is unlikely to matter most of
2125 // the time however.
2126 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2127 if (op2
.singleton_p (&offset
)
2128 && !integer_zerop (offset
))
2129 return range_op_handler (MULT_EXPR
, type
).fold_range (r
, type
, lhs
, op2
);
2134 class operator_lshift
: public cross_product_operator
2136 using range_operator::fold_range
;
2137 using range_operator::op1_range
;
2139 virtual bool op1_range (irange
&r
, tree type
,
2142 relation_trio rel
= TRIO_VARYING
) const;
2143 virtual bool fold_range (irange
&r
, tree type
,
2146 relation_trio rel
= TRIO_VARYING
) const;
2148 virtual void wi_fold (irange
&r
, tree type
,
2149 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2150 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
2151 virtual bool wi_op_overflows (wide_int
&res
,
2154 const wide_int
&) const;
2157 class operator_rshift
: public cross_product_operator
2159 using range_operator::fold_range
;
2160 using range_operator::op1_range
;
2161 using range_operator::lhs_op1_relation
;
2163 virtual bool fold_range (irange
&r
, tree type
,
2166 relation_trio rel
= TRIO_VARYING
) const;
2167 virtual void wi_fold (irange
&r
, tree type
,
2168 const wide_int
&lh_lb
,
2169 const wide_int
&lh_ub
,
2170 const wide_int
&rh_lb
,
2171 const wide_int
&rh_ub
) const;
2172 virtual bool wi_op_overflows (wide_int
&res
,
2175 const wide_int
&w1
) const;
2176 virtual bool op1_range (irange
&, tree type
,
2179 relation_trio rel
= TRIO_VARYING
) const;
2180 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
2183 relation_kind rel
) const;
2188 operator_rshift::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
2191 relation_kind
) const
2193 // If both operands range are >= 0, then the LHS <= op1.
2194 if (!op1
.undefined_p () && !op2
.undefined_p ()
2195 && wi::ge_p (op1
.lower_bound (), 0, TYPE_SIGN (op1
.type ()))
2196 && wi::ge_p (op2
.lower_bound (), 0, TYPE_SIGN (op2
.type ())))
2198 return VREL_VARYING
;
2202 operator_lshift::fold_range (irange
&r
, tree type
,
2205 relation_trio rel
) const
2207 int_range_max shift_range
;
2208 if (!get_shift_range (shift_range
, type
, op2
))
2210 if (op2
.undefined_p ())
2213 r
.set_varying (type
);
2217 // Transform left shifts by constants into multiplies.
2218 if (shift_range
.singleton_p ())
2220 unsigned shift
= shift_range
.lower_bound ().to_uhwi ();
2221 wide_int tmp
= wi::set_bit_in_zero (shift
, TYPE_PRECISION (type
));
2222 int_range
<1> mult (type
, tmp
, tmp
);
2224 // Force wrapping multiplication.
2225 bool saved_flag_wrapv
= flag_wrapv
;
2226 bool saved_flag_wrapv_pointer
= flag_wrapv_pointer
;
2228 flag_wrapv_pointer
= 1;
2229 bool b
= op_mult
.fold_range (r
, type
, op1
, mult
);
2230 flag_wrapv
= saved_flag_wrapv
;
2231 flag_wrapv_pointer
= saved_flag_wrapv_pointer
;
2235 // Otherwise, invoke the generic fold routine.
2236 return range_operator::fold_range (r
, type
, op1
, shift_range
, rel
);
2240 operator_lshift::wi_fold (irange
&r
, tree type
,
2241 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2242 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2244 signop sign
= TYPE_SIGN (type
);
2245 unsigned prec
= TYPE_PRECISION (type
);
2246 int overflow_pos
= sign
== SIGNED
? prec
- 1 : prec
;
2247 int bound_shift
= overflow_pos
- rh_ub
.to_shwi ();
2248 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2249 // overflow. However, for that to happen, rh.max needs to be zero,
2250 // which means rh is a singleton range of zero, which means we simply return
2251 // [lh_lb, lh_ub] as the range.
2252 if (wi::eq_p (rh_ub
, rh_lb
) && wi::eq_p (rh_ub
, 0))
2254 r
= int_range
<2> (type
, lh_lb
, lh_ub
);
2258 wide_int bound
= wi::set_bit_in_zero (bound_shift
, prec
);
2259 wide_int complement
= ~(bound
- 1);
2260 wide_int low_bound
, high_bound
;
2261 bool in_bounds
= false;
2263 if (sign
== UNSIGNED
)
2266 high_bound
= complement
;
2267 if (wi::ltu_p (lh_ub
, low_bound
))
2269 // [5, 6] << [1, 2] == [10, 24].
2270 // We're shifting out only zeroes, the value increases
2274 else if (wi::ltu_p (high_bound
, lh_lb
))
2276 // [0xffffff00, 0xffffffff] << [1, 2]
2277 // == [0xfffffc00, 0xfffffffe].
2278 // We're shifting out only ones, the value decreases
2285 // [-1, 1] << [1, 2] == [-4, 4]
2286 low_bound
= complement
;
2288 if (wi::lts_p (lh_ub
, high_bound
)
2289 && wi::lts_p (low_bound
, lh_lb
))
2291 // For non-negative numbers, we're shifting out only zeroes,
2292 // the value increases monotonically. For negative numbers,
2293 // we're shifting out only ones, the value decreases
2300 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2302 r
.set_varying (type
);
2306 operator_lshift::wi_op_overflows (wide_int
&res
, tree type
,
2307 const wide_int
&w0
, const wide_int
&w1
) const
2309 signop sign
= TYPE_SIGN (type
);
2312 // It's unclear from the C standard whether shifts can overflow.
2313 // The following code ignores overflow; perhaps a C standard
2314 // interpretation ruling is needed.
2315 res
= wi::rshift (w0
, -w1
, sign
);
2318 res
= wi::lshift (w0
, w1
);
2323 operator_lshift::op1_range (irange
&r
,
2327 relation_trio
) const
2329 if (lhs
.undefined_p ())
2333 if (!lhs
.contains_p (build_zero_cst (type
)))
2334 r
.set_nonzero (type
);
2336 r
.set_varying (type
);
2338 if (op2
.singleton_p (&shift_amount
))
2340 wide_int shift
= wi::to_wide (shift_amount
);
2341 if (wi::lt_p (shift
, 0, SIGNED
))
2343 if (wi::ge_p (shift
, wi::uhwi (TYPE_PRECISION (type
),
2344 TYPE_PRECISION (op2
.type ())),
2353 // Work completely in unsigned mode to start.
2355 int_range_max tmp_range
;
2356 if (TYPE_SIGN (type
) == SIGNED
)
2358 int_range_max tmp
= lhs
;
2359 utype
= unsigned_type_for (type
);
2360 range_cast (tmp
, utype
);
2361 op_rshift
.fold_range (tmp_range
, utype
, tmp
, op2
);
2364 op_rshift
.fold_range (tmp_range
, utype
, lhs
, op2
);
2366 // Start with ranges which can produce the LHS by right shifting the
2367 // result by the shift amount.
2368 // ie [0x08, 0xF0] = op1 << 2 will start with
2369 // [00001000, 11110000] = op1 << 2
2370 // [0x02, 0x4C] aka [00000010, 00111100]
2372 // Then create a range from the LB with the least significant upper bit
2373 // set, to the upper bound with all the bits set.
2374 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2376 // Ideally we do this for each subrange, but just lump them all for now.
2377 unsigned low_bits
= TYPE_PRECISION (utype
)
2378 - TREE_INT_CST_LOW (shift_amount
);
2379 wide_int up_mask
= wi::mask (low_bits
, true, TYPE_PRECISION (utype
));
2380 wide_int new_ub
= wi::bit_or (up_mask
, tmp_range
.upper_bound ());
2381 wide_int new_lb
= wi::set_bit (tmp_range
.lower_bound (), low_bits
);
2382 int_range
<2> fill_range (utype
, new_lb
, new_ub
);
2383 tmp_range
.union_ (fill_range
);
2386 range_cast (tmp_range
, type
);
2388 r
.intersect (tmp_range
);
2392 return !r
.varying_p ();
2396 operator_rshift::op1_range (irange
&r
,
2400 relation_trio
) const
2403 if (lhs
.undefined_p ())
2405 if (op2
.singleton_p (&shift
))
2407 // Ignore nonsensical shifts.
2408 unsigned prec
= TYPE_PRECISION (type
);
2409 if (wi::ge_p (wi::to_wide (shift
),
2410 wi::uhwi (prec
, TYPE_PRECISION (TREE_TYPE (shift
))),
2413 if (wi::to_wide (shift
) == 0)
2419 // Folding the original operation may discard some impossible
2420 // ranges from the LHS.
2421 int_range_max lhs_refined
;
2422 op_rshift
.fold_range (lhs_refined
, type
, int_range
<1> (type
), op2
);
2423 lhs_refined
.intersect (lhs
);
2424 if (lhs_refined
.undefined_p ())
2429 int_range_max
shift_range (shift
, shift
);
2430 int_range_max lb
, ub
;
2431 op_lshift
.fold_range (lb
, type
, lhs_refined
, shift_range
);
2433 // 0000 0111 = OP1 >> 3
2435 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2436 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2437 // right hand side (0x07).
2438 tree mask
= fold_build1 (BIT_NOT_EXPR
, type
,
2439 fold_build2 (LSHIFT_EXPR
, type
,
2440 build_minus_one_cst (type
),
2442 int_range_max
mask_range (build_zero_cst (type
), mask
);
2443 op_plus
.fold_range (ub
, type
, lb
, mask_range
);
2446 if (!lhs_refined
.contains_p (build_zero_cst (type
)))
2448 mask_range
.invert ();
2449 r
.intersect (mask_range
);
2457 operator_rshift::wi_op_overflows (wide_int
&res
,
2460 const wide_int
&w1
) const
2462 signop sign
= TYPE_SIGN (type
);
2464 res
= wi::lshift (w0
, -w1
);
2467 // It's unclear from the C standard whether shifts can overflow.
2468 // The following code ignores overflow; perhaps a C standard
2469 // interpretation ruling is needed.
2470 res
= wi::rshift (w0
, w1
, sign
);
2476 operator_rshift::fold_range (irange
&r
, tree type
,
2479 relation_trio rel
) const
2481 int_range_max shift
;
2482 if (!get_shift_range (shift
, type
, op2
))
2484 if (op2
.undefined_p ())
2487 r
.set_varying (type
);
2491 return range_operator::fold_range (r
, type
, op1
, shift
, rel
);
2495 operator_rshift::wi_fold (irange
&r
, tree type
,
2496 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2497 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2499 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2503 class operator_cast
: public range_operator
2505 using range_operator::fold_range
;
2506 using range_operator::op1_range
;
2508 virtual bool fold_range (irange
&r
, tree type
,
2511 relation_trio rel
= TRIO_VARYING
) const;
2512 virtual bool op1_range (irange
&r
, tree type
,
2515 relation_trio rel
= TRIO_VARYING
) const;
2516 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
2519 relation_kind
) const;
2521 bool truncating_cast_p (const irange
&inner
, const irange
&outer
) const;
2522 bool inside_domain_p (const wide_int
&min
, const wide_int
&max
,
2523 const irange
&outer
) const;
2524 void fold_pair (irange
&r
, unsigned index
, const irange
&inner
,
2525 const irange
&outer
) const;
2528 // Add a partial equivalence between the LHS and op1 for casts.
2531 operator_cast::lhs_op1_relation (const irange
&lhs
,
2533 const irange
&op2 ATTRIBUTE_UNUSED
,
2534 relation_kind
) const
2536 if (lhs
.undefined_p () || op1
.undefined_p ())
2537 return VREL_VARYING
;
2538 unsigned lhs_prec
= TYPE_PRECISION (lhs
.type ());
2539 unsigned op1_prec
= TYPE_PRECISION (op1
.type ());
2540 // If the result gets sign extended into a larger type check first if this
2541 // qualifies as a partial equivalence.
2542 if (TYPE_SIGN (op1
.type ()) == SIGNED
&& lhs_prec
> op1_prec
)
2544 // If the result is sign extended, and the LHS is larger than op1,
2545 // check if op1's range can be negative as the sign extention will
2546 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2547 int_range
<3> negs
= range_negatives (op1
.type ());
2548 negs
.intersect (op1
);
2549 if (!negs
.undefined_p ())
2550 return VREL_VARYING
;
2553 unsigned prec
= MIN (lhs_prec
, op1_prec
);
2554 return bits_to_pe (prec
);
2557 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2560 operator_cast::truncating_cast_p (const irange
&inner
,
2561 const irange
&outer
) const
2563 return TYPE_PRECISION (outer
.type ()) < TYPE_PRECISION (inner
.type ());
2566 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2569 operator_cast::inside_domain_p (const wide_int
&min
,
2570 const wide_int
&max
,
2571 const irange
&range
) const
2573 wide_int domain_min
= wi::to_wide (vrp_val_min (range
.type ()));
2574 wide_int domain_max
= wi::to_wide (vrp_val_max (range
.type ()));
2575 signop domain_sign
= TYPE_SIGN (range
.type ());
2576 return (wi::le_p (min
, domain_max
, domain_sign
)
2577 && wi::le_p (max
, domain_max
, domain_sign
)
2578 && wi::ge_p (min
, domain_min
, domain_sign
)
2579 && wi::ge_p (max
, domain_min
, domain_sign
));
2583 // Helper for fold_range which work on a pair at a time.
2586 operator_cast::fold_pair (irange
&r
, unsigned index
,
2587 const irange
&inner
,
2588 const irange
&outer
) const
2590 tree inner_type
= inner
.type ();
2591 tree outer_type
= outer
.type ();
2592 signop inner_sign
= TYPE_SIGN (inner_type
);
2593 unsigned outer_prec
= TYPE_PRECISION (outer_type
);
2595 // check to see if casting from INNER to OUTER is a conversion that
2596 // fits in the resulting OUTER type.
2597 wide_int inner_lb
= inner
.lower_bound (index
);
2598 wide_int inner_ub
= inner
.upper_bound (index
);
2599 if (truncating_cast_p (inner
, outer
))
2601 // We may be able to accomodate a truncating cast if the
2602 // resulting range can be represented in the target type...
2603 if (wi::rshift (wi::sub (inner_ub
, inner_lb
),
2604 wi::uhwi (outer_prec
, TYPE_PRECISION (inner
.type ())),
2607 r
.set_varying (outer_type
);
2611 // ...but we must still verify that the final range fits in the
2612 // domain. This catches -fstrict-enum restrictions where the domain
2613 // range is smaller than what fits in the underlying type.
2614 wide_int min
= wide_int::from (inner_lb
, outer_prec
, inner_sign
);
2615 wide_int max
= wide_int::from (inner_ub
, outer_prec
, inner_sign
);
2616 if (inside_domain_p (min
, max
, outer
))
2617 create_possibly_reversed_range (r
, outer_type
, min
, max
);
2619 r
.set_varying (outer_type
);
2624 operator_cast::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2625 const irange
&inner
,
2626 const irange
&outer
,
2627 relation_trio
) const
2629 if (empty_range_varying (r
, type
, inner
, outer
))
2632 gcc_checking_assert (outer
.varying_p ());
2633 gcc_checking_assert (inner
.num_pairs () > 0);
2635 // Avoid a temporary by folding the first pair directly into the result.
2636 fold_pair (r
, 0, inner
, outer
);
2638 // Then process any additonal pairs by unioning with their results.
2639 for (unsigned x
= 1; x
< inner
.num_pairs (); ++x
)
2642 fold_pair (tmp
, x
, inner
, outer
);
2648 // Update the nonzero mask. Truncating casts are problematic unless
2649 // the conversion fits in the resulting outer type.
2650 wide_int nz
= inner
.get_nonzero_bits ();
2651 if (truncating_cast_p (inner
, outer
)
2652 && wi::rshift (nz
, wi::uhwi (TYPE_PRECISION (outer
.type ()),
2653 TYPE_PRECISION (inner
.type ())),
2654 TYPE_SIGN (inner
.type ())) != 0)
2656 nz
= wide_int::from (nz
, TYPE_PRECISION (type
), TYPE_SIGN (inner
.type ()));
2657 r
.set_nonzero_bits (nz
);
2663 operator_cast::op1_range (irange
&r
, tree type
,
2666 relation_trio
) const
2668 if (lhs
.undefined_p ())
2670 tree lhs_type
= lhs
.type ();
2671 gcc_checking_assert (types_compatible_p (op2
.type(), type
));
2673 // If we are calculating a pointer, shortcut to what we really care about.
2674 if (POINTER_TYPE_P (type
))
2676 // Conversion from other pointers or a constant (including 0/NULL)
2677 // are straightforward.
2678 if (POINTER_TYPE_P (lhs
.type ())
2679 || (lhs
.singleton_p ()
2680 && TYPE_PRECISION (lhs
.type ()) >= TYPE_PRECISION (type
)))
2683 range_cast (r
, type
);
2687 // If the LHS is not a pointer nor a singleton, then it is
2688 // either VARYING or non-zero.
2689 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
2690 r
.set_nonzero (type
);
2692 r
.set_varying (type
);
2698 if (truncating_cast_p (op2
, lhs
))
2700 if (lhs
.varying_p ())
2701 r
.set_varying (type
);
2704 // We want to insert the LHS as an unsigned value since it
2705 // would not trigger the signed bit of the larger type.
2706 int_range_max converted_lhs
= lhs
;
2707 range_cast (converted_lhs
, unsigned_type_for (lhs_type
));
2708 range_cast (converted_lhs
, type
);
2709 // Start by building the positive signed outer range for the type.
2710 wide_int lim
= wi::set_bit_in_zero (TYPE_PRECISION (lhs_type
),
2711 TYPE_PRECISION (type
));
2712 r
= int_range
<1> (type
, lim
, wi::max_value (TYPE_PRECISION (type
),
2714 // For the signed part, we need to simply union the 2 ranges now.
2715 r
.union_ (converted_lhs
);
2717 // Create maximal negative number outside of LHS bits.
2718 lim
= wi::mask (TYPE_PRECISION (lhs_type
), true,
2719 TYPE_PRECISION (type
));
2720 // Add this to the unsigned LHS range(s).
2721 int_range_max
lim_range (type
, lim
, lim
);
2722 int_range_max lhs_neg
;
2723 range_op_handler (PLUS_EXPR
, type
).fold_range (lhs_neg
, type
,
2726 // lhs_neg now has all the negative versions of the LHS.
2727 // Now union in all the values from SIGNED MIN (0x80000) to
2728 // lim-1 in order to fill in all the ranges with the upper
2731 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
2732 // we don't need to create a range from min to lim-1
2733 // calculate neg range traps trying to create [lim, lim - 1].
2734 wide_int min_val
= wi::min_value (TYPE_PRECISION (type
), SIGNED
);
2737 int_range_max
neg (type
,
2738 wi::min_value (TYPE_PRECISION (type
),
2741 lhs_neg
.union_ (neg
);
2743 // And finally, munge the signed and unsigned portions.
2746 // And intersect with any known value passed in the extra operand.
2752 if (TYPE_PRECISION (lhs_type
) == TYPE_PRECISION (type
))
2756 // The cast is not truncating, and the range is restricted to
2757 // the range of the RHS by this assignment.
2759 // Cast the range of the RHS to the type of the LHS.
2760 fold_range (tmp
, lhs_type
, int_range
<1> (type
), int_range
<1> (lhs_type
));
2761 // Intersect this with the LHS range will produce the range,
2762 // which will be cast to the RHS type before returning.
2763 tmp
.intersect (lhs
);
2766 // Cast the calculated range to the type of the RHS.
2767 fold_range (r
, type
, tmp
, int_range
<1> (type
));
2772 class operator_logical_and
: public range_operator
2774 using range_operator::fold_range
;
2775 using range_operator::op1_range
;
2776 using range_operator::op2_range
;
2778 virtual bool fold_range (irange
&r
, tree type
,
2781 relation_trio rel
= TRIO_VARYING
) const;
2782 virtual bool op1_range (irange
&r
, tree type
,
2785 relation_trio rel
= TRIO_VARYING
) const;
2786 virtual bool op2_range (irange
&r
, tree type
,
2789 relation_trio rel
= TRIO_VARYING
) const;
2794 operator_logical_and::fold_range (irange
&r
, tree type
,
2797 relation_trio
) const
2799 if (empty_range_varying (r
, type
, lh
, rh
))
2802 // 0 && anything is 0.
2803 if ((wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (lh
.upper_bound (), 0))
2804 || (wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (rh
.upper_bound (), 0)))
2805 r
= range_false (type
);
2806 else if (lh
.contains_p (build_zero_cst (lh
.type ()))
2807 || rh
.contains_p (build_zero_cst (rh
.type ())))
2808 // To reach this point, there must be a logical 1 on each side, and
2809 // the only remaining question is whether there is a zero or not.
2810 r
= range_true_and_false (type
);
2812 r
= range_true (type
);
2817 operator_logical_and::op1_range (irange
&r
, tree type
,
2819 const irange
&op2 ATTRIBUTE_UNUSED
,
2820 relation_trio
) const
2822 switch (get_bool_state (r
, lhs
, type
))
2825 // A true result means both sides of the AND must be true.
2826 r
= range_true (type
);
2829 // Any other result means only one side has to be false, the
2830 // other side can be anything. So we cannot be sure of any
2832 r
= range_true_and_false (type
);
2839 operator_logical_and::op2_range (irange
&r
, tree type
,
2842 relation_trio
) const
2844 return operator_logical_and::op1_range (r
, type
, lhs
, op1
);
2848 class operator_bitwise_and
: public range_operator
2850 using range_operator::fold_range
;
2851 using range_operator::op1_range
;
2852 using range_operator::op2_range
;
2854 virtual bool fold_range (irange
&r
, tree type
,
2857 relation_trio rel
= TRIO_VARYING
) const;
2858 virtual bool op1_range (irange
&r
, tree type
,
2861 relation_trio rel
= TRIO_VARYING
) const;
2862 virtual bool op2_range (irange
&r
, tree type
,
2865 relation_trio rel
= TRIO_VARYING
) const;
2866 virtual void wi_fold (irange
&r
, tree type
,
2867 const wide_int
&lh_lb
,
2868 const wide_int
&lh_ub
,
2869 const wide_int
&rh_lb
,
2870 const wide_int
&rh_ub
) const;
2871 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
2874 relation_kind
) const;
2876 void simple_op1_range_solver (irange
&r
, tree type
,
2878 const irange
&op2
) const;
2882 operator_bitwise_and::fold_range (irange
&r
, tree type
,
2885 relation_trio
) const
2887 if (range_operator::fold_range (r
, type
, lh
, rh
))
2889 if (!lh
.undefined_p () && !rh
.undefined_p ())
2890 r
.set_nonzero_bits (wi::bit_and (lh
.get_nonzero_bits (),
2891 rh
.get_nonzero_bits ()));
2898 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
2899 // by considering the number of leading redundant sign bit copies.
2900 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
2901 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
2903 wi_optimize_signed_bitwise_op (irange
&r
, tree type
,
2904 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2905 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
2907 int lh_clrsb
= MIN (wi::clrsb (lh_lb
), wi::clrsb (lh_ub
));
2908 int rh_clrsb
= MIN (wi::clrsb (rh_lb
), wi::clrsb (rh_ub
));
2909 int new_clrsb
= MIN (lh_clrsb
, rh_clrsb
);
2912 int type_prec
= TYPE_PRECISION (type
);
2913 int rprec
= (type_prec
- new_clrsb
) - 1;
2914 value_range_with_overflow (r
, type
,
2915 wi::mask (rprec
, true, type_prec
),
2916 wi::mask (rprec
, false, type_prec
));
2920 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
2924 operator_bitwise_and::lhs_op1_relation (const irange
&lhs
,
2927 relation_kind
) const
2929 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
2930 return VREL_VARYING
;
2931 if (!op2
.singleton_p ())
2932 return VREL_VARYING
;
2933 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
2934 int prec1
= TYPE_PRECISION (op1
.type ());
2935 int prec2
= TYPE_PRECISION (op2
.type ());
2937 wide_int mask
= op2
.lower_bound ();
2938 if (wi::eq_p (mask
, wi::mask (8, false, prec2
)))
2940 else if (wi::eq_p (mask
, wi::mask (16, false, prec2
)))
2942 else if (wi::eq_p (mask
, wi::mask (32, false, prec2
)))
2944 else if (wi::eq_p (mask
, wi::mask (64, false, prec2
)))
2946 return bits_to_pe (MIN (prec1
, mask_prec
));
2949 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
2950 // possible. Basically, see if we can optimize:
2954 // [LB op Z, UB op Z]
2956 // If the optimization was successful, accumulate the range in R and
2960 wi_optimize_and_or (irange
&r
,
2961 enum tree_code code
,
2963 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2964 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
2966 // Calculate the singleton mask among the ranges, if any.
2967 wide_int lower_bound
, upper_bound
, mask
;
2968 if (wi::eq_p (rh_lb
, rh_ub
))
2971 lower_bound
= lh_lb
;
2972 upper_bound
= lh_ub
;
2974 else if (wi::eq_p (lh_lb
, lh_ub
))
2977 lower_bound
= rh_lb
;
2978 upper_bound
= rh_ub
;
2983 // If Z is a constant which (for op | its bitwise not) has n
2984 // consecutive least significant bits cleared followed by m 1
2985 // consecutive bits set immediately above it and either
2986 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
2988 // The least significant n bits of all the values in the range are
2989 // cleared or set, the m bits above it are preserved and any bits
2990 // above these are required to be the same for all values in the
2994 if (code
== BIT_IOR_EXPR
)
2996 if (wi::eq_p (w
, 0))
2997 n
= w
.get_precision ();
3001 w
= ~(w
| wi::mask (n
, false, w
.get_precision ()));
3002 if (wi::eq_p (w
, 0))
3003 m
= w
.get_precision () - n
;
3005 m
= wi::ctz (w
) - n
;
3007 wide_int new_mask
= wi::mask (m
+ n
, true, w
.get_precision ());
3008 if ((new_mask
& lower_bound
) != (new_mask
& upper_bound
))
3011 wide_int res_lb
, res_ub
;
3012 if (code
== BIT_AND_EXPR
)
3014 res_lb
= wi::bit_and (lower_bound
, mask
);
3015 res_ub
= wi::bit_and (upper_bound
, mask
);
3017 else if (code
== BIT_IOR_EXPR
)
3019 res_lb
= wi::bit_or (lower_bound
, mask
);
3020 res_ub
= wi::bit_or (upper_bound
, mask
);
3024 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
3026 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3027 if (code
== BIT_IOR_EXPR
&& wi::ne_p (mask
, 0))
3030 tmp
.set_nonzero (type
);
3036 // For range [LB, UB] compute two wide_int bit masks.
3038 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3039 // for all numbers in the range the bit is 0, otherwise it might be 0
3042 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3043 // for all numbers in the range the bit is 1, otherwise it might be 0
3047 wi_set_zero_nonzero_bits (tree type
,
3048 const wide_int
&lb
, const wide_int
&ub
,
3049 wide_int
&maybe_nonzero
,
3050 wide_int
&mustbe_nonzero
)
3052 signop sign
= TYPE_SIGN (type
);
3054 if (wi::eq_p (lb
, ub
))
3055 maybe_nonzero
= mustbe_nonzero
= lb
;
3056 else if (wi::ge_p (lb
, 0, sign
) || wi::lt_p (ub
, 0, sign
))
3058 wide_int xor_mask
= lb
^ ub
;
3059 maybe_nonzero
= lb
| ub
;
3060 mustbe_nonzero
= lb
& ub
;
3063 wide_int mask
= wi::mask (wi::floor_log2 (xor_mask
), false,
3064 maybe_nonzero
.get_precision ());
3065 maybe_nonzero
= maybe_nonzero
| mask
;
3066 mustbe_nonzero
= wi::bit_and_not (mustbe_nonzero
, mask
);
3071 maybe_nonzero
= wi::minus_one (lb
.get_precision ());
3072 mustbe_nonzero
= wi::zero (lb
.get_precision ());
3077 operator_bitwise_and::wi_fold (irange
&r
, tree type
,
3078 const wide_int
&lh_lb
,
3079 const wide_int
&lh_ub
,
3080 const wide_int
&rh_lb
,
3081 const wide_int
&rh_ub
) const
3083 if (wi_optimize_and_or (r
, BIT_AND_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3086 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3087 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3088 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3089 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3090 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3091 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3093 wide_int new_lb
= mustbe_nonzero_lh
& mustbe_nonzero_rh
;
3094 wide_int new_ub
= maybe_nonzero_lh
& maybe_nonzero_rh
;
3095 signop sign
= TYPE_SIGN (type
);
3096 unsigned prec
= TYPE_PRECISION (type
);
3097 // If both input ranges contain only negative values, we can
3098 // truncate the result range maximum to the minimum of the
3099 // input range maxima.
3100 if (wi::lt_p (lh_ub
, 0, sign
) && wi::lt_p (rh_ub
, 0, sign
))
3102 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3103 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3105 // If either input range contains only non-negative values
3106 // we can truncate the result range maximum to the respective
3107 // maximum of the input range.
3108 if (wi::ge_p (lh_lb
, 0, sign
))
3109 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3110 if (wi::ge_p (rh_lb
, 0, sign
))
3111 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3112 // PR68217: In case of signed & sign-bit-CST should
3113 // result in [-INF, 0] instead of [-INF, INF].
3114 if (wi::gt_p (new_lb
, new_ub
, sign
))
3116 wide_int sign_bit
= wi::set_bit_in_zero (prec
- 1, prec
);
3118 && ((wi::eq_p (lh_lb
, lh_ub
)
3119 && !wi::cmps (lh_lb
, sign_bit
))
3120 || (wi::eq_p (rh_lb
, rh_ub
)
3121 && !wi::cmps (rh_lb
, sign_bit
))))
3123 new_lb
= wi::min_value (prec
, sign
);
3124 new_ub
= wi::zero (prec
);
3127 // If the limits got swapped around, return varying.
3128 if (wi::gt_p (new_lb
, new_ub
,sign
))
3131 && wi_optimize_signed_bitwise_op (r
, type
,
3135 r
.set_varying (type
);
3138 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3142 set_nonzero_range_from_mask (irange
&r
, tree type
, const irange
&lhs
)
3144 if (!lhs
.contains_p (build_zero_cst (type
)))
3145 r
= range_nonzero (type
);
3147 r
.set_varying (type
);
3150 // This was shamelessly stolen from register_edge_assert_for_2 and
3151 // adjusted to work with iranges.
3154 operator_bitwise_and::simple_op1_range_solver (irange
&r
, tree type
,
3156 const irange
&op2
) const
3158 if (!op2
.singleton_p ())
3160 set_nonzero_range_from_mask (r
, type
, lhs
);
3163 unsigned int nprec
= TYPE_PRECISION (type
);
3164 wide_int cst2v
= op2
.lower_bound ();
3165 bool cst2n
= wi::neg_p (cst2v
, TYPE_SIGN (type
));
3168 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
3170 sgnbit
= wi::zero (nprec
);
3172 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3174 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3175 // maximum unsigned value is ~0. For signed comparison, if CST2
3176 // doesn't have the most significant bit set, handle it similarly. If
3177 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3178 wide_int valv
= lhs
.lower_bound ();
3179 wide_int minv
= valv
& cst2v
, maxv
;
3180 bool we_know_nothing
= false;
3183 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3184 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3187 // If we can't determine anything on this bound, fall
3188 // through and conservatively solve for the other end point.
3189 we_know_nothing
= true;
3192 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
3193 if (we_know_nothing
)
3194 r
.set_varying (type
);
3196 r
= int_range
<1> (type
, minv
, maxv
);
3198 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3200 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3201 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3203 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3205 // For signed comparison, if CST2 doesn't have most significant bit
3206 // set, handle it similarly. If CST2 has MSB set, the maximum is
3207 // the same and minimum is INT_MIN.
3208 valv
= lhs
.upper_bound ();
3209 minv
= valv
& cst2v
;
3214 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3217 // If we couldn't determine anything on either bound, return
3219 if (we_know_nothing
)
3227 int_range
<1> upper_bits (type
, minv
, maxv
);
3228 r
.intersect (upper_bits
);
3232 operator_bitwise_and::op1_range (irange
&r
, tree type
,
3235 relation_trio
) const
3237 if (lhs
.undefined_p ())
3239 if (types_compatible_p (type
, boolean_type_node
))
3240 return op_logical_and
.op1_range (r
, type
, lhs
, op2
);
3243 for (unsigned i
= 0; i
< lhs
.num_pairs (); ++i
)
3245 int_range_max
chunk (lhs
.type (),
3246 lhs
.lower_bound (i
),
3247 lhs
.upper_bound (i
));
3249 simple_op1_range_solver (res
, type
, chunk
, op2
);
3252 if (r
.undefined_p ())
3253 set_nonzero_range_from_mask (r
, type
, lhs
);
3255 // For 0 = op1 & MASK, op1 is ~MASK.
3256 if (lhs
.zero_p () && op2
.singleton_p ())
3258 wide_int nz
= wi::bit_not (op2
.get_nonzero_bits ());
3259 int_range
<2> tmp (type
);
3260 tmp
.set_nonzero_bits (nz
);
3267 operator_bitwise_and::op2_range (irange
&r
, tree type
,
3270 relation_trio
) const
3272 return operator_bitwise_and::op1_range (r
, type
, lhs
, op1
);
3276 class operator_logical_or
: public range_operator
3278 using range_operator::fold_range
;
3279 using range_operator::op1_range
;
3280 using range_operator::op2_range
;
3282 virtual bool fold_range (irange
&r
, tree type
,
3285 relation_trio rel
= TRIO_VARYING
) const;
3286 virtual bool op1_range (irange
&r
, tree type
,
3289 relation_trio rel
= TRIO_VARYING
) const;
3290 virtual bool op2_range (irange
&r
, tree type
,
3293 relation_trio rel
= TRIO_VARYING
) const;
3297 operator_logical_or::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3300 relation_trio
) const
3302 if (empty_range_varying (r
, type
, lh
, rh
))
3311 operator_logical_or::op1_range (irange
&r
, tree type
,
3313 const irange
&op2 ATTRIBUTE_UNUSED
,
3314 relation_trio
) const
3316 switch (get_bool_state (r
, lhs
, type
))
3319 // A false result means both sides of the OR must be false.
3320 r
= range_false (type
);
3323 // Any other result means only one side has to be true, the
3324 // other side can be anything. so we can't be sure of any result
3326 r
= range_true_and_false (type
);
3333 operator_logical_or::op2_range (irange
&r
, tree type
,
3336 relation_trio
) const
3338 return operator_logical_or::op1_range (r
, type
, lhs
, op1
);
3342 class operator_bitwise_or
: public range_operator
3344 using range_operator::op1_range
;
3345 using range_operator::op2_range
;
3347 virtual bool op1_range (irange
&r
, tree type
,
3350 relation_trio rel
= TRIO_VARYING
) const;
3351 virtual bool op2_range (irange
&r
, tree type
,
3354 relation_trio rel
= TRIO_VARYING
) const;
3355 virtual void wi_fold (irange
&r
, tree type
,
3356 const wide_int
&lh_lb
,
3357 const wide_int
&lh_ub
,
3358 const wide_int
&rh_lb
,
3359 const wide_int
&rh_ub
) const;
3363 operator_bitwise_or::wi_fold (irange
&r
, tree type
,
3364 const wide_int
&lh_lb
,
3365 const wide_int
&lh_ub
,
3366 const wide_int
&rh_lb
,
3367 const wide_int
&rh_ub
) const
3369 if (wi_optimize_and_or (r
, BIT_IOR_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3372 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3373 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3374 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3375 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3376 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3377 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3378 wide_int new_lb
= mustbe_nonzero_lh
| mustbe_nonzero_rh
;
3379 wide_int new_ub
= maybe_nonzero_lh
| maybe_nonzero_rh
;
3380 signop sign
= TYPE_SIGN (type
);
3381 // If the input ranges contain only positive values we can
3382 // truncate the minimum of the result range to the maximum
3383 // of the input range minima.
3384 if (wi::ge_p (lh_lb
, 0, sign
)
3385 && wi::ge_p (rh_lb
, 0, sign
))
3387 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3388 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3390 // If either input range contains only negative values
3391 // we can truncate the minimum of the result range to the
3392 // respective minimum range.
3393 if (wi::lt_p (lh_ub
, 0, sign
))
3394 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3395 if (wi::lt_p (rh_ub
, 0, sign
))
3396 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3397 // If the limits got swapped around, return a conservative range.
3398 if (wi::gt_p (new_lb
, new_ub
, sign
))
3400 // Make sure that nonzero|X is nonzero.
3401 if (wi::gt_p (lh_lb
, 0, sign
)
3402 || wi::gt_p (rh_lb
, 0, sign
)
3403 || wi::lt_p (lh_ub
, 0, sign
)
3404 || wi::lt_p (rh_ub
, 0, sign
))
3405 r
.set_nonzero (type
);
3406 else if (sign
== SIGNED
3407 && wi_optimize_signed_bitwise_op (r
, type
,
3412 r
.set_varying (type
);
3415 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3419 operator_bitwise_or::op1_range (irange
&r
, tree type
,
3422 relation_trio
) const
3424 if (lhs
.undefined_p ())
3426 // If this is really a logical wi_fold, call that.
3427 if (types_compatible_p (type
, boolean_type_node
))
3428 return op_logical_or
.op1_range (r
, type
, lhs
, op2
);
3432 tree zero
= build_zero_cst (type
);
3433 r
= int_range
<1> (zero
, zero
);
3436 r
.set_varying (type
);
3441 operator_bitwise_or::op2_range (irange
&r
, tree type
,
3444 relation_trio
) const
3446 return operator_bitwise_or::op1_range (r
, type
, lhs
, op1
);
3450 class operator_bitwise_xor
: public range_operator
3452 using range_operator::op1_range
;
3453 using range_operator::op2_range
;
3455 virtual void wi_fold (irange
&r
, tree type
,
3456 const wide_int
&lh_lb
,
3457 const wide_int
&lh_ub
,
3458 const wide_int
&rh_lb
,
3459 const wide_int
&rh_ub
) const;
3460 virtual bool op1_range (irange
&r
, tree type
,
3463 relation_trio rel
= TRIO_VARYING
) const;
3464 virtual bool op2_range (irange
&r
, tree type
,
3467 relation_trio rel
= TRIO_VARYING
) const;
3468 virtual bool op1_op2_relation_effect (irange
&lhs_range
,
3470 const irange
&op1_range
,
3471 const irange
&op2_range
,
3472 relation_kind rel
) const;
3476 operator_bitwise_xor::wi_fold (irange
&r
, tree type
,
3477 const wide_int
&lh_lb
,
3478 const wide_int
&lh_ub
,
3479 const wide_int
&rh_lb
,
3480 const wide_int
&rh_ub
) const
3482 signop sign
= TYPE_SIGN (type
);
3483 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3484 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3485 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3486 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3487 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3488 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3490 wide_int result_zero_bits
= ((mustbe_nonzero_lh
& mustbe_nonzero_rh
)
3491 | ~(maybe_nonzero_lh
| maybe_nonzero_rh
));
3492 wide_int result_one_bits
3493 = (wi::bit_and_not (mustbe_nonzero_lh
, maybe_nonzero_rh
)
3494 | wi::bit_and_not (mustbe_nonzero_rh
, maybe_nonzero_lh
));
3495 wide_int new_ub
= ~result_zero_bits
;
3496 wide_int new_lb
= result_one_bits
;
3498 // If the range has all positive or all negative values, the result
3499 // is better than VARYING.
3500 if (wi::lt_p (new_lb
, 0, sign
) || wi::ge_p (new_ub
, 0, sign
))
3501 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3502 else if (sign
== SIGNED
3503 && wi_optimize_signed_bitwise_op (r
, type
,
3508 r
.set_varying (type
);
3510 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3511 if (wi::lt_p (lh_ub
, rh_lb
, sign
)
3512 || wi::lt_p (rh_ub
, lh_lb
, sign
)
3513 || wi::ne_p (result_one_bits
, 0))
3516 tmp
.set_nonzero (type
);
3522 operator_bitwise_xor::op1_op2_relation_effect (irange
&lhs_range
,
3526 relation_kind rel
) const
3528 if (rel
== VREL_VARYING
)
3531 int_range
<2> rel_range
;
3536 rel_range
.set_zero (type
);
3539 rel_range
.set_nonzero (type
);
3545 lhs_range
.intersect (rel_range
);
3550 operator_bitwise_xor::op1_range (irange
&r
, tree type
,
3553 relation_trio
) const
3555 if (lhs
.undefined_p () || lhs
.varying_p ())
3560 if (types_compatible_p (type
, boolean_type_node
))
3562 switch (get_bool_state (r
, lhs
, type
))
3565 if (op2
.varying_p ())
3566 r
.set_varying (type
);
3567 else if (op2
.zero_p ())
3568 r
= range_true (type
);
3570 r
= range_false (type
);
3580 r
.set_varying (type
);
3585 operator_bitwise_xor::op2_range (irange
&r
, tree type
,
3588 relation_trio
) const
3590 return operator_bitwise_xor::op1_range (r
, type
, lhs
, op1
);
3593 class operator_trunc_mod
: public range_operator
3595 using range_operator::op1_range
;
3596 using range_operator::op2_range
;
3598 virtual void wi_fold (irange
&r
, tree type
,
3599 const wide_int
&lh_lb
,
3600 const wide_int
&lh_ub
,
3601 const wide_int
&rh_lb
,
3602 const wide_int
&rh_ub
) const;
3603 virtual bool op1_range (irange
&r
, tree type
,
3606 relation_trio
) const;
3607 virtual bool op2_range (irange
&r
, tree type
,
3610 relation_trio
) const;
3614 operator_trunc_mod::wi_fold (irange
&r
, tree type
,
3615 const wide_int
&lh_lb
,
3616 const wide_int
&lh_ub
,
3617 const wide_int
&rh_lb
,
3618 const wide_int
&rh_ub
) const
3620 wide_int new_lb
, new_ub
, tmp
;
3621 signop sign
= TYPE_SIGN (type
);
3622 unsigned prec
= TYPE_PRECISION (type
);
3624 // Mod 0 is undefined.
3625 if (wi_zero_p (type
, rh_lb
, rh_ub
))
3631 // Check for constant and try to fold.
3632 if (lh_lb
== lh_ub
&& rh_lb
== rh_ub
)
3634 wi::overflow_type ov
= wi::OVF_NONE
;
3635 tmp
= wi::mod_trunc (lh_lb
, rh_lb
, sign
, &ov
);
3636 if (ov
== wi::OVF_NONE
)
3638 r
= int_range
<2> (type
, tmp
, tmp
);
3643 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
3648 new_ub
= wi::smax (new_ub
, tmp
);
3651 if (sign
== UNSIGNED
)
3652 new_lb
= wi::zero (prec
);
3657 if (wi::gts_p (tmp
, 0))
3658 tmp
= wi::zero (prec
);
3659 new_lb
= wi::smax (new_lb
, tmp
);
3662 if (sign
== SIGNED
&& wi::neg_p (tmp
))
3663 tmp
= wi::zero (prec
);
3664 new_ub
= wi::min (new_ub
, tmp
, sign
);
3666 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3670 operator_trunc_mod::op1_range (irange
&r
, tree type
,
3673 relation_trio
) const
3675 if (lhs
.undefined_p ())
3678 signop sign
= TYPE_SIGN (type
);
3679 unsigned prec
= TYPE_PRECISION (type
);
3680 // (a % b) >= x && x > 0 , then a >= x.
3681 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
3683 r
= value_range (type
, lhs
.lower_bound (), wi::max_value (prec
, sign
));
3686 // (a % b) <= x && x < 0 , then a <= x.
3687 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
3689 r
= value_range (type
, wi::min_value (prec
, sign
), lhs
.upper_bound ());
3696 operator_trunc_mod::op2_range (irange
&r
, tree type
,
3699 relation_trio
) const
3701 if (lhs
.undefined_p ())
3704 signop sign
= TYPE_SIGN (type
);
3705 unsigned prec
= TYPE_PRECISION (type
);
3706 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
3707 // or b > x for unsigned.
3708 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
3711 r
= value_range (type
, wi::neg (lhs
.lower_bound ()),
3712 lhs
.lower_bound (), VR_ANTI_RANGE
);
3713 else if (wi::lt_p (lhs
.lower_bound (), wi::max_value (prec
, sign
),
3715 r
= value_range (type
, lhs
.lower_bound () + 1,
3716 wi::max_value (prec
, sign
));
3721 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
3722 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
3724 if (wi::gt_p (lhs
.upper_bound (), wi::min_value (prec
, sign
), sign
))
3725 r
= value_range (type
, lhs
.upper_bound (),
3726 wi::neg (lhs
.upper_bound ()), VR_ANTI_RANGE
);
3735 class operator_logical_not
: public range_operator
3737 using range_operator::fold_range
;
3738 using range_operator::op1_range
;
3740 virtual bool fold_range (irange
&r
, tree type
,
3743 relation_trio rel
= TRIO_VARYING
) const;
3744 virtual bool op1_range (irange
&r
, tree type
,
3747 relation_trio rel
= TRIO_VARYING
) const;
3750 // Folding a logical NOT, oddly enough, involves doing nothing on the
3751 // forward pass through. During the initial walk backwards, the
3752 // logical NOT reversed the desired outcome on the way back, so on the
3753 // way forward all we do is pass the range forward.
3758 // to determine the TRUE branch, walking backward
3759 // if (b_3) if ([1,1])
3760 // b_3 = !b_2 [1,1] = ![0,0]
3761 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
3762 // which is the result we are looking for.. so.. pass it through.
3765 operator_logical_not::fold_range (irange
&r
, tree type
,
3767 const irange
&rh ATTRIBUTE_UNUSED
,
3768 relation_trio
) const
3770 if (empty_range_varying (r
, type
, lh
, rh
))
3774 if (!lh
.varying_p () && !lh
.undefined_p ())
3781 operator_logical_not::op1_range (irange
&r
,
3785 relation_trio
) const
3787 // Logical NOT is involutary...do it again.
3788 return fold_range (r
, type
, lhs
, op2
);
3792 class operator_bitwise_not
: public range_operator
3794 using range_operator::fold_range
;
3795 using range_operator::op1_range
;
3797 virtual bool fold_range (irange
&r
, tree type
,
3800 relation_trio rel
= TRIO_VARYING
) const;
3801 virtual bool op1_range (irange
&r
, tree type
,
3804 relation_trio rel
= TRIO_VARYING
) const;
3808 operator_bitwise_not::fold_range (irange
&r
, tree type
,
3811 relation_trio
) const
3813 if (empty_range_varying (r
, type
, lh
, rh
))
3816 if (types_compatible_p (type
, boolean_type_node
))
3817 return op_logical_not
.fold_range (r
, type
, lh
, rh
);
3819 // ~X is simply -1 - X.
3820 int_range
<1> minusone (type
, wi::minus_one (TYPE_PRECISION (type
)),
3821 wi::minus_one (TYPE_PRECISION (type
)));
3822 return range_op_handler (MINUS_EXPR
, type
).fold_range (r
, type
, minusone
, lh
);
3826 operator_bitwise_not::op1_range (irange
&r
, tree type
,
3829 relation_trio
) const
3831 if (lhs
.undefined_p ())
3833 if (types_compatible_p (type
, boolean_type_node
))
3834 return op_logical_not
.op1_range (r
, type
, lhs
, op2
);
3836 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
3837 return fold_range (r
, type
, lhs
, op2
);
3841 class operator_cst
: public range_operator
3843 using range_operator::fold_range
;
3845 virtual bool fold_range (irange
&r
, tree type
,
3848 relation_trio rel
= TRIO_VARYING
) const;
3852 operator_cst::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3854 const irange
&rh ATTRIBUTE_UNUSED
,
3855 relation_trio
) const
3862 class operator_identity
: public range_operator
3864 using range_operator::fold_range
;
3865 using range_operator::op1_range
;
3866 using range_operator::lhs_op1_relation
;
3868 virtual bool fold_range (irange
&r
, tree type
,
3871 relation_trio rel
= TRIO_VARYING
) const;
3872 virtual bool op1_range (irange
&r
, tree type
,
3875 relation_trio rel
= TRIO_VARYING
) const;
3876 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
3879 relation_kind rel
) const;
3882 // Determine if there is a relationship between LHS and OP1.
3885 operator_identity::lhs_op1_relation (const irange
&lhs
,
3886 const irange
&op1 ATTRIBUTE_UNUSED
,
3887 const irange
&op2 ATTRIBUTE_UNUSED
,
3888 relation_kind
) const
3890 if (lhs
.undefined_p ())
3891 return VREL_VARYING
;
3892 // Simply a copy, so they are equivalent.
3897 operator_identity::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3899 const irange
&rh ATTRIBUTE_UNUSED
,
3900 relation_trio
) const
3907 operator_identity::op1_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3909 const irange
&op2 ATTRIBUTE_UNUSED
,
3910 relation_trio
) const
3917 class operator_unknown
: public range_operator
3919 using range_operator::fold_range
;
3921 virtual bool fold_range (irange
&r
, tree type
,
3924 relation_trio rel
= TRIO_VARYING
) const;
3928 operator_unknown::fold_range (irange
&r
, tree type
,
3929 const irange
&lh ATTRIBUTE_UNUSED
,
3930 const irange
&rh ATTRIBUTE_UNUSED
,
3931 relation_trio
) const
3933 r
.set_varying (type
);
3938 class operator_abs
: public range_operator
3940 using range_operator::op1_range
;
3942 virtual void wi_fold (irange
&r
, tree type
,
3943 const wide_int
&lh_lb
,
3944 const wide_int
&lh_ub
,
3945 const wide_int
&rh_lb
,
3946 const wide_int
&rh_ub
) const;
3947 virtual bool op1_range (irange
&r
, tree type
,
3950 relation_trio
) const;
3954 operator_abs::wi_fold (irange
&r
, tree type
,
3955 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3956 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
3957 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
3960 signop sign
= TYPE_SIGN (type
);
3961 unsigned prec
= TYPE_PRECISION (type
);
3963 // Pass through LH for the easy cases.
3964 if (sign
== UNSIGNED
|| wi::ge_p (lh_lb
, 0, sign
))
3966 r
= int_range
<1> (type
, lh_lb
, lh_ub
);
3970 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
3972 wide_int min_value
= wi::min_value (prec
, sign
);
3973 wide_int max_value
= wi::max_value (prec
, sign
);
3974 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lh_lb
, min_value
))
3976 r
.set_varying (type
);
3980 // ABS_EXPR may flip the range around, if the original range
3981 // included negative values.
3982 if (wi::eq_p (lh_lb
, min_value
))
3984 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
3985 // returned [-MIN,-MIN] so this preserves that behaviour. PR37078
3986 if (wi::eq_p (lh_ub
, min_value
))
3988 r
= int_range
<1> (type
, min_value
, min_value
);
3994 min
= wi::abs (lh_lb
);
3996 if (wi::eq_p (lh_ub
, min_value
))
3999 max
= wi::abs (lh_ub
);
4001 // If the range contains zero then we know that the minimum value in the
4002 // range will be zero.
4003 if (wi::le_p (lh_lb
, 0, sign
) && wi::ge_p (lh_ub
, 0, sign
))
4005 if (wi::gt_p (min
, max
, sign
))
4007 min
= wi::zero (prec
);
4011 // If the range was reversed, swap MIN and MAX.
4012 if (wi::gt_p (min
, max
, sign
))
4013 std::swap (min
, max
);
4016 // If the new range has its limits swapped around (MIN > MAX), then
4017 // the operation caused one of them to wrap around. The only thing
4018 // we know is that the result is positive.
4019 if (wi::gt_p (min
, max
, sign
))
4021 min
= wi::zero (prec
);
4024 r
= int_range
<1> (type
, min
, max
);
4028 operator_abs::op1_range (irange
&r
, tree type
,
4031 relation_trio
) const
4033 if (empty_range_varying (r
, type
, lhs
, op2
))
4035 if (TYPE_UNSIGNED (type
))
4040 // Start with the positives because negatives are an impossible result.
4041 int_range_max positives
= range_positives (type
);
4042 positives
.intersect (lhs
);
4044 // Then add the negative of each pair:
4045 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4046 for (unsigned i
= 0; i
< positives
.num_pairs (); ++i
)
4047 r
.union_ (int_range
<1> (type
,
4048 -positives
.upper_bound (i
),
4049 -positives
.lower_bound (i
)));
4050 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4051 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4052 wide_int min_value
= wi::min_value (TYPE_PRECISION (type
), TYPE_SIGN (type
));
4053 wide_int lb
= lhs
.lower_bound ();
4054 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lb
, min_value
))
4055 r
.union_ (int_range
<2> (type
, lb
, lb
));
4060 class operator_absu
: public range_operator
4063 virtual void wi_fold (irange
&r
, tree type
,
4064 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4065 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4069 operator_absu::wi_fold (irange
&r
, tree type
,
4070 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4071 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4072 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4074 wide_int new_lb
, new_ub
;
4076 // Pass through VR0 the easy cases.
4077 if (wi::ges_p (lh_lb
, 0))
4084 new_lb
= wi::abs (lh_lb
);
4085 new_ub
= wi::abs (lh_ub
);
4087 // If the range contains zero then we know that the minimum
4088 // value in the range will be zero.
4089 if (wi::ges_p (lh_ub
, 0))
4091 if (wi::gtu_p (new_lb
, new_ub
))
4093 new_lb
= wi::zero (TYPE_PRECISION (type
));
4096 std::swap (new_lb
, new_ub
);
4099 gcc_checking_assert (TYPE_UNSIGNED (type
));
4100 r
= int_range
<1> (type
, new_lb
, new_ub
);
4104 class operator_negate
: public range_operator
4106 using range_operator::fold_range
;
4107 using range_operator::op1_range
;
4109 virtual bool fold_range (irange
&r
, tree type
,
4112 relation_trio rel
= TRIO_VARYING
) const;
4113 virtual bool op1_range (irange
&r
, tree type
,
4116 relation_trio rel
= TRIO_VARYING
) const;
4120 operator_negate::fold_range (irange
&r
, tree type
,
4123 relation_trio
) const
4125 if (empty_range_varying (r
, type
, lh
, rh
))
4127 // -X is simply 0 - X.
4128 return range_op_handler (MINUS_EXPR
, type
).fold_range (r
, type
,
4129 range_zero (type
), lh
);
4133 operator_negate::op1_range (irange
&r
, tree type
,
4136 relation_trio
) const
4138 // NEGATE is involutory.
4139 return fold_range (r
, type
, lhs
, op2
);
4143 class operator_addr_expr
: public range_operator
4145 using range_operator::fold_range
;
4146 using range_operator::op1_range
;
4148 virtual bool fold_range (irange
&r
, tree type
,
4151 relation_trio rel
= TRIO_VARYING
) const;
4152 virtual bool op1_range (irange
&r
, tree type
,
4155 relation_trio rel
= TRIO_VARYING
) const;
4159 operator_addr_expr::fold_range (irange
&r
, tree type
,
4162 relation_trio
) const
4164 if (empty_range_varying (r
, type
, lh
, rh
))
4167 // Return a non-null pointer of the LHS type (passed in op2).
4169 r
= range_zero (type
);
4170 else if (!lh
.contains_p (build_zero_cst (lh
.type ())))
4171 r
= range_nonzero (type
);
4173 r
.set_varying (type
);
4178 operator_addr_expr::op1_range (irange
&r
, tree type
,
4181 relation_trio
) const
4183 return operator_addr_expr::fold_range (r
, type
, lhs
, op2
);
4187 class pointer_plus_operator
: public range_operator
4190 virtual void wi_fold (irange
&r
, tree type
,
4191 const wide_int
&lh_lb
,
4192 const wide_int
&lh_ub
,
4193 const wide_int
&rh_lb
,
4194 const wide_int
&rh_ub
) const;
4198 pointer_plus_operator::wi_fold (irange
&r
, tree type
,
4199 const wide_int
&lh_lb
,
4200 const wide_int
&lh_ub
,
4201 const wide_int
&rh_lb
,
4202 const wide_int
&rh_ub
) const
4204 // Check for [0,0] + const, and simply return the const.
4205 if (lh_lb
== 0 && lh_ub
== 0 && rh_lb
== rh_ub
)
4207 tree val
= wide_int_to_tree (type
, rh_lb
);
4212 // For pointer types, we are really only interested in asserting
4213 // whether the expression evaluates to non-NULL.
4215 // With -fno-delete-null-pointer-checks we need to be more
4216 // conservative. As some object might reside at address 0,
4217 // then some offset could be added to it and the same offset
4218 // subtracted again and the result would be NULL.
4220 // static int a[12]; where &a[0] is NULL and
4223 // ptr will be NULL here, even when there is POINTER_PLUS_EXPR
4224 // where the first range doesn't include zero and the second one
4225 // doesn't either. As the second operand is sizetype (unsigned),
4226 // consider all ranges where the MSB could be set as possible
4227 // subtractions where the result might be NULL.
4228 if ((!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
4229 || !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
4230 && !TYPE_OVERFLOW_WRAPS (type
)
4231 && (flag_delete_null_pointer_checks
4232 || !wi::sign_mask (rh_ub
)))
4233 r
= range_nonzero (type
);
4234 else if (lh_lb
== lh_ub
&& lh_lb
== 0
4235 && rh_lb
== rh_ub
&& rh_lb
== 0)
4236 r
= range_zero (type
);
4238 r
.set_varying (type
);
4242 class pointer_min_max_operator
: public range_operator
4245 virtual void wi_fold (irange
& r
, tree type
,
4246 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4247 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4251 pointer_min_max_operator::wi_fold (irange
&r
, tree type
,
4252 const wide_int
&lh_lb
,
4253 const wide_int
&lh_ub
,
4254 const wide_int
&rh_lb
,
4255 const wide_int
&rh_ub
) const
4257 // For MIN/MAX expressions with pointers, we only care about
4258 // nullness. If both are non null, then the result is nonnull.
4259 // If both are null, then the result is null. Otherwise they
4261 if (!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
4262 && !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
4263 r
= range_nonzero (type
);
4264 else if (wi_zero_p (type
, lh_lb
, lh_ub
) && wi_zero_p (type
, rh_lb
, rh_ub
))
4265 r
= range_zero (type
);
4267 r
.set_varying (type
);
4271 class pointer_and_operator
: public range_operator
4274 virtual void wi_fold (irange
&r
, tree type
,
4275 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4276 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4280 pointer_and_operator::wi_fold (irange
&r
, tree type
,
4281 const wide_int
&lh_lb
,
4282 const wide_int
&lh_ub
,
4283 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4284 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4286 // For pointer types, we are really only interested in asserting
4287 // whether the expression evaluates to non-NULL.
4288 if (wi_zero_p (type
, lh_lb
, lh_ub
) || wi_zero_p (type
, lh_lb
, lh_ub
))
4289 r
= range_zero (type
);
4291 r
.set_varying (type
);
4295 class pointer_or_operator
: public range_operator
4297 using range_operator::op1_range
;
4298 using range_operator::op2_range
;
4300 virtual bool op1_range (irange
&r
, tree type
,
4303 relation_trio rel
= TRIO_VARYING
) const;
4304 virtual bool op2_range (irange
&r
, tree type
,
4307 relation_trio rel
= TRIO_VARYING
) const;
4308 virtual void wi_fold (irange
&r
, tree type
,
4309 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4310 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4314 pointer_or_operator::op1_range (irange
&r
, tree type
,
4316 const irange
&op2 ATTRIBUTE_UNUSED
,
4317 relation_trio
) const
4319 if (lhs
.undefined_p ())
4323 tree zero
= build_zero_cst (type
);
4324 r
= int_range
<1> (zero
, zero
);
4327 r
.set_varying (type
);
4332 pointer_or_operator::op2_range (irange
&r
, tree type
,
4335 relation_trio
) const
4337 return pointer_or_operator::op1_range (r
, type
, lhs
, op1
);
4341 pointer_or_operator::wi_fold (irange
&r
, tree type
,
4342 const wide_int
&lh_lb
,
4343 const wide_int
&lh_ub
,
4344 const wide_int
&rh_lb
,
4345 const wide_int
&rh_ub
) const
4347 // For pointer types, we are really only interested in asserting
4348 // whether the expression evaluates to non-NULL.
4349 if (!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
4350 && !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
4351 r
= range_nonzero (type
);
4352 else if (wi_zero_p (type
, lh_lb
, lh_ub
) && wi_zero_p (type
, rh_lb
, rh_ub
))
4353 r
= range_zero (type
);
4355 r
.set_varying (type
);
4358 // Return a pointer to the range_operator instance, if there is one
4359 // associated with tree_code CODE.
4362 range_op_table::operator[] (enum tree_code code
)
4364 gcc_checking_assert (code
> 0 && code
< MAX_TREE_CODES
);
4365 return m_range_tree
[code
];
4368 // Add OP to the handler table for CODE.
4371 range_op_table::set (enum tree_code code
, range_operator
&op
)
4373 gcc_checking_assert (m_range_tree
[code
] == NULL
);
4374 m_range_tree
[code
] = &op
;
4377 // Instantiate a range op table for integral operations.
4379 class integral_table
: public range_op_table
4383 } integral_tree_table
;
4385 integral_table::integral_table ()
4387 set (EQ_EXPR
, op_equal
);
4388 set (NE_EXPR
, op_not_equal
);
4389 set (LT_EXPR
, op_lt
);
4390 set (LE_EXPR
, op_le
);
4391 set (GT_EXPR
, op_gt
);
4392 set (GE_EXPR
, op_ge
);
4393 set (PLUS_EXPR
, op_plus
);
4394 set (MINUS_EXPR
, op_minus
);
4395 set (MIN_EXPR
, op_min
);
4396 set (MAX_EXPR
, op_max
);
4397 set (MULT_EXPR
, op_mult
);
4398 set (TRUNC_DIV_EXPR
, op_trunc_div
);
4399 set (FLOOR_DIV_EXPR
, op_floor_div
);
4400 set (ROUND_DIV_EXPR
, op_round_div
);
4401 set (CEIL_DIV_EXPR
, op_ceil_div
);
4402 set (EXACT_DIV_EXPR
, op_exact_div
);
4403 set (LSHIFT_EXPR
, op_lshift
);
4404 set (RSHIFT_EXPR
, op_rshift
);
4405 set (NOP_EXPR
, op_convert
);
4406 set (CONVERT_EXPR
, op_convert
);
4407 set (TRUTH_AND_EXPR
, op_logical_and
);
4408 set (BIT_AND_EXPR
, op_bitwise_and
);
4409 set (TRUTH_OR_EXPR
, op_logical_or
);
4410 set (BIT_IOR_EXPR
, op_bitwise_or
);
4411 set (BIT_XOR_EXPR
, op_bitwise_xor
);
4412 set (TRUNC_MOD_EXPR
, op_trunc_mod
);
4413 set (TRUTH_NOT_EXPR
, op_logical_not
);
4414 set (BIT_NOT_EXPR
, op_bitwise_not
);
4415 set (INTEGER_CST
, op_integer_cst
);
4416 set (SSA_NAME
, op_identity
);
4417 set (PAREN_EXPR
, op_identity
);
4418 set (OBJ_TYPE_REF
, op_identity
);
4419 set (IMAGPART_EXPR
, op_unknown
);
4420 set (REALPART_EXPR
, op_unknown
);
4421 set (POINTER_DIFF_EXPR
, op_pointer_diff
);
4422 set (ABS_EXPR
, op_abs
);
4423 set (ABSU_EXPR
, op_absu
);
4424 set (NEGATE_EXPR
, op_negate
);
4425 set (ADDR_EXPR
, op_addr
);
4428 // Instantiate a range op table for pointer operations.
4430 class pointer_table
: public range_op_table
4434 } pointer_tree_table
;
4436 pointer_table::pointer_table ()
4438 set (BIT_AND_EXPR
, op_pointer_and
);
4439 set (BIT_IOR_EXPR
, op_pointer_or
);
4440 set (MIN_EXPR
, op_ptr_min_max
);
4441 set (MAX_EXPR
, op_ptr_min_max
);
4442 set (POINTER_PLUS_EXPR
, op_pointer_plus
);
4444 set (EQ_EXPR
, op_equal
);
4445 set (NE_EXPR
, op_not_equal
);
4446 set (LT_EXPR
, op_lt
);
4447 set (LE_EXPR
, op_le
);
4448 set (GT_EXPR
, op_gt
);
4449 set (GE_EXPR
, op_ge
);
4450 set (SSA_NAME
, op_identity
);
4451 set (INTEGER_CST
, op_integer_cst
);
4452 set (ADDR_EXPR
, op_addr
);
4453 set (NOP_EXPR
, op_convert
);
4454 set (CONVERT_EXPR
, op_convert
);
4456 set (BIT_NOT_EXPR
, op_bitwise_not
);
4457 set (BIT_XOR_EXPR
, op_bitwise_xor
);
4460 // The tables are hidden and accessed via a simple extern function.
4462 static inline range_operator
*
4463 get_handler (enum tree_code code
, tree type
)
4465 // First check if there is a pointer specialization.
4466 if (POINTER_TYPE_P (type
))
4467 return pointer_tree_table
[code
];
4468 if (INTEGRAL_TYPE_P (type
))
4469 return integral_tree_table
[code
];
4473 // Return the floating point operator for CODE or NULL if none available.
4475 static inline range_operator_float
*
4476 get_float_handler (enum tree_code code
, tree
)
4478 return (*floating_tree_table
)[code
];
4482 range_op_handler::set_op_handler (tree_code code
, tree type
)
4484 if (irange::supports_p (type
))
4487 m_int
= get_handler (code
, type
);
4488 m_valid
= m_int
!= NULL
;
4490 else if (frange::supports_p (type
))
4493 m_float
= get_float_handler (code
, type
);
4494 m_valid
= m_float
!= NULL
;
4504 range_op_handler::range_op_handler ()
4511 range_op_handler::range_op_handler (tree_code code
, tree type
)
4513 set_op_handler (code
, type
);
4518 range_op_handler::fold_range (vrange
&r
, tree type
,
4521 relation_trio rel
) const
4523 gcc_checking_assert (m_valid
);
4525 return m_int
->fold_range (as_a
<irange
> (r
), type
,
4527 as_a
<irange
> (rh
), rel
);
4529 if (is_a
<irange
> (r
))
4531 if (is_a
<irange
> (rh
))
4532 return m_float
->fold_range (as_a
<irange
> (r
), type
,
4534 as_a
<irange
> (rh
), rel
);
4536 return m_float
->fold_range (as_a
<irange
> (r
), type
,
4538 as_a
<frange
> (rh
), rel
);
4540 return m_float
->fold_range (as_a
<frange
> (r
), type
,
4542 as_a
<frange
> (rh
), rel
);
4546 range_op_handler::op1_range (vrange
&r
, tree type
,
4549 relation_trio rel
) const
4551 gcc_checking_assert (m_valid
);
4553 if (lhs
.undefined_p ())
4556 return m_int
->op1_range (as_a
<irange
> (r
), type
,
4557 as_a
<irange
> (lhs
),
4558 as_a
<irange
> (op2
), rel
);
4560 if (is_a
<irange
> (lhs
))
4561 return m_float
->op1_range (as_a
<frange
> (r
), type
,
4562 as_a
<irange
> (lhs
),
4563 as_a
<frange
> (op2
), rel
);
4564 return m_float
->op1_range (as_a
<frange
> (r
), type
,
4565 as_a
<frange
> (lhs
),
4566 as_a
<frange
> (op2
), rel
);
4570 range_op_handler::op2_range (vrange
&r
, tree type
,
4573 relation_trio rel
) const
4575 gcc_checking_assert (m_valid
);
4576 if (lhs
.undefined_p ())
4579 return m_int
->op2_range (as_a
<irange
> (r
), type
,
4580 as_a
<irange
> (lhs
),
4581 as_a
<irange
> (op1
), rel
);
4583 if (is_a
<irange
> (lhs
))
4584 return m_float
->op2_range (as_a
<frange
> (r
), type
,
4585 as_a
<irange
> (lhs
),
4586 as_a
<frange
> (op1
), rel
);
4587 return m_float
->op2_range (as_a
<frange
> (r
), type
,
4588 as_a
<frange
> (lhs
),
4589 as_a
<frange
> (op1
), rel
);
4593 range_op_handler::lhs_op1_relation (const vrange
&lhs
,
4596 relation_kind rel
) const
4598 gcc_checking_assert (m_valid
);
4600 return m_int
->lhs_op1_relation (as_a
<irange
> (lhs
),
4601 as_a
<irange
> (op1
),
4602 as_a
<irange
> (op2
), rel
);
4604 if (is_a
<irange
> (lhs
))
4605 return m_float
->lhs_op1_relation (as_a
<irange
> (lhs
),
4606 as_a
<frange
> (op1
),
4607 as_a
<frange
> (op2
), rel
);
4608 return m_float
->lhs_op1_relation (as_a
<frange
> (lhs
),
4609 as_a
<frange
> (op1
),
4610 as_a
<frange
> (op2
), rel
);
4614 range_op_handler::lhs_op2_relation (const vrange
&lhs
,
4617 relation_kind rel
) const
4619 gcc_checking_assert (m_valid
);
4621 return m_int
->lhs_op2_relation (as_a
<irange
> (lhs
),
4622 as_a
<irange
> (op1
),
4623 as_a
<irange
> (op2
), rel
);
4625 if (is_a
<irange
> (lhs
))
4626 return m_float
->lhs_op2_relation (as_a
<irange
> (lhs
),
4627 as_a
<frange
> (op1
),
4628 as_a
<frange
> (op2
), rel
);
4629 return m_float
->lhs_op2_relation (as_a
<frange
> (lhs
),
4630 as_a
<frange
> (op1
),
4631 as_a
<frange
> (op2
), rel
);
4635 range_op_handler::op1_op2_relation (const vrange
&lhs
) const
4637 gcc_checking_assert (m_valid
);
4639 return m_int
->op1_op2_relation (as_a
<irange
> (lhs
));
4640 if (is_a
<irange
> (lhs
))
4641 return m_float
->op1_op2_relation (as_a
<irange
> (lhs
));
4642 return m_float
->op1_op2_relation (as_a
<frange
> (lhs
));
4645 // Cast the range in R to TYPE.
4648 range_cast (vrange
&r
, tree type
)
4650 Value_Range
tmp (r
);
4651 Value_Range
varying (type
);
4652 varying
.set_varying (type
);
4653 range_op_handler
op (CONVERT_EXPR
, type
);
4654 // Call op_convert, if it fails, the result is varying.
4655 if (!op
|| !op
.fold_range (r
, type
, tmp
, varying
))
4657 r
.set_varying (type
);
4664 #include "selftest.h"
4668 #define INT(N) build_int_cst (integer_type_node, (N))
4669 #define UINT(N) build_int_cstu (unsigned_type_node, (N))
4670 #define INT16(N) build_int_cst (short_integer_type_node, (N))
4671 #define UINT16(N) build_int_cstu (short_unsigned_type_node, (N))
4672 #define SCHAR(N) build_int_cst (signed_char_type_node, (N))
4673 #define UCHAR(N) build_int_cstu (unsigned_char_type_node, (N))
4676 range_op_cast_tests ()
4678 int_range
<1> r0
, r1
, r2
, rold
;
4679 r0
.set_varying (integer_type_node
);
4680 tree maxint
= wide_int_to_tree (integer_type_node
, r0
.upper_bound ());
4682 // If a range is in any way outside of the range for the converted
4683 // to range, default to the range for the new type.
4684 r0
.set_varying (short_integer_type_node
);
4685 tree minshort
= wide_int_to_tree (short_integer_type_node
, r0
.lower_bound ());
4686 tree maxshort
= wide_int_to_tree (short_integer_type_node
, r0
.upper_bound ());
4687 if (TYPE_PRECISION (TREE_TYPE (maxint
))
4688 > TYPE_PRECISION (short_integer_type_node
))
4690 r1
= int_range
<1> (integer_zero_node
, maxint
);
4691 range_cast (r1
, short_integer_type_node
);
4692 ASSERT_TRUE (r1
.lower_bound () == wi::to_wide (minshort
)
4693 && r1
.upper_bound() == wi::to_wide (maxshort
));
4696 // (unsigned char)[-5,-1] => [251,255].
4697 r0
= rold
= int_range
<1> (SCHAR (-5), SCHAR (-1));
4698 range_cast (r0
, unsigned_char_type_node
);
4699 ASSERT_TRUE (r0
== int_range
<1> (UCHAR (251), UCHAR (255)));
4700 range_cast (r0
, signed_char_type_node
);
4701 ASSERT_TRUE (r0
== rold
);
4703 // (signed char)[15, 150] => [-128,-106][15,127].
4704 r0
= rold
= int_range
<1> (UCHAR (15), UCHAR (150));
4705 range_cast (r0
, signed_char_type_node
);
4706 r1
= int_range
<1> (SCHAR (15), SCHAR (127));
4707 r2
= int_range
<1> (SCHAR (-128), SCHAR (-106));
4709 ASSERT_TRUE (r1
== r0
);
4710 range_cast (r0
, unsigned_char_type_node
);
4711 ASSERT_TRUE (r0
== rold
);
4713 // (unsigned char)[-5, 5] => [0,5][251,255].
4714 r0
= rold
= int_range
<1> (SCHAR (-5), SCHAR (5));
4715 range_cast (r0
, unsigned_char_type_node
);
4716 r1
= int_range
<1> (UCHAR (251), UCHAR (255));
4717 r2
= int_range
<1> (UCHAR (0), UCHAR (5));
4719 ASSERT_TRUE (r0
== r1
);
4720 range_cast (r0
, signed_char_type_node
);
4721 ASSERT_TRUE (r0
== rold
);
4723 // (unsigned char)[-5,5] => [0,5][251,255].
4724 r0
= int_range
<1> (INT (-5), INT (5));
4725 range_cast (r0
, unsigned_char_type_node
);
4726 r1
= int_range
<1> (UCHAR (0), UCHAR (5));
4727 r1
.union_ (int_range
<1> (UCHAR (251), UCHAR (255)));
4728 ASSERT_TRUE (r0
== r1
);
4730 // (unsigned char)[5U,1974U] => [0,255].
4731 r0
= int_range
<1> (UINT (5), UINT (1974));
4732 range_cast (r0
, unsigned_char_type_node
);
4733 ASSERT_TRUE (r0
== int_range
<1> (UCHAR (0), UCHAR (255)));
4734 range_cast (r0
, integer_type_node
);
4735 // Going to a wider range should not sign extend.
4736 ASSERT_TRUE (r0
== int_range
<1> (INT (0), INT (255)));
4738 // (unsigned char)[-350,15] => [0,255].
4739 r0
= int_range
<1> (INT (-350), INT (15));
4740 range_cast (r0
, unsigned_char_type_node
);
4741 ASSERT_TRUE (r0
== (int_range
<1>
4742 (TYPE_MIN_VALUE (unsigned_char_type_node
),
4743 TYPE_MAX_VALUE (unsigned_char_type_node
))));
4745 // Casting [-120,20] from signed char to unsigned short.
4746 // => [0, 20][0xff88, 0xffff].
4747 r0
= int_range
<1> (SCHAR (-120), SCHAR (20));
4748 range_cast (r0
, short_unsigned_type_node
);
4749 r1
= int_range
<1> (UINT16 (0), UINT16 (20));
4750 r2
= int_range
<1> (UINT16 (0xff88), UINT16 (0xffff));
4752 ASSERT_TRUE (r0
== r1
);
4753 // A truncating cast back to signed char will work because [-120, 20]
4754 // is representable in signed char.
4755 range_cast (r0
, signed_char_type_node
);
4756 ASSERT_TRUE (r0
== int_range
<1> (SCHAR (-120), SCHAR (20)));
4758 // unsigned char -> signed short
4759 // (signed short)[(unsigned char)25, (unsigned char)250]
4760 // => [(signed short)25, (signed short)250]
4761 r0
= rold
= int_range
<1> (UCHAR (25), UCHAR (250));
4762 range_cast (r0
, short_integer_type_node
);
4763 r1
= int_range
<1> (INT16 (25), INT16 (250));
4764 ASSERT_TRUE (r0
== r1
);
4765 range_cast (r0
, unsigned_char_type_node
);
4766 ASSERT_TRUE (r0
== rold
);
4768 // Test casting a wider signed [-MIN,MAX] to a nar`rower unsigned.
4769 r0
= int_range
<1> (TYPE_MIN_VALUE (long_long_integer_type_node
),
4770 TYPE_MAX_VALUE (long_long_integer_type_node
));
4771 range_cast (r0
, short_unsigned_type_node
);
4772 r1
= int_range
<1> (TYPE_MIN_VALUE (short_unsigned_type_node
),
4773 TYPE_MAX_VALUE (short_unsigned_type_node
));
4774 ASSERT_TRUE (r0
== r1
);
4776 // Casting NONZERO to a narrower type will wrap/overflow so
4777 // it's just the entire range for the narrower type.
4779 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4780 // is outside of the range of a smaller range, return the full
4782 if (TYPE_PRECISION (integer_type_node
)
4783 > TYPE_PRECISION (short_integer_type_node
))
4785 r0
= range_nonzero (integer_type_node
);
4786 range_cast (r0
, short_integer_type_node
);
4787 r1
= int_range
<1> (TYPE_MIN_VALUE (short_integer_type_node
),
4788 TYPE_MAX_VALUE (short_integer_type_node
));
4789 ASSERT_TRUE (r0
== r1
);
4792 // Casting NONZERO from a narrower signed to a wider signed.
4794 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4795 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4796 r0
= range_nonzero (short_integer_type_node
);
4797 range_cast (r0
, integer_type_node
);
4798 r1
= int_range
<1> (INT (-32768), INT (-1));
4799 r2
= int_range
<1> (INT (1), INT (32767));
4801 ASSERT_TRUE (r0
== r1
);
4805 range_op_lshift_tests ()
4807 // Test that 0x808.... & 0x8.... still contains 0x8....
4808 // for a large set of numbers.
4811 tree big_type
= long_long_unsigned_type_node
;
4812 // big_num = 0x808,0000,0000,0000
4813 tree big_num
= fold_build2 (LSHIFT_EXPR
, big_type
,
4814 build_int_cst (big_type
, 0x808),
4815 build_int_cst (big_type
, 48));
4816 op_bitwise_and
.fold_range (res
, big_type
,
4817 int_range
<1> (big_type
),
4818 int_range
<1> (big_num
, big_num
));
4819 // val = 0x8,0000,0000,0000
4820 tree val
= fold_build2 (LSHIFT_EXPR
, big_type
,
4821 build_int_cst (big_type
, 0x8),
4822 build_int_cst (big_type
, 48));
4823 ASSERT_TRUE (res
.contains_p (val
));
4826 if (TYPE_PRECISION (unsigned_type_node
) > 31)
4828 // unsigned VARYING = op1 << 1 should be VARYING.
4829 int_range
<2> lhs (unsigned_type_node
);
4830 int_range
<2> shift (INT (1), INT (1));
4832 op_lshift
.op1_range (op1
, unsigned_type_node
, lhs
, shift
);
4833 ASSERT_TRUE (op1
.varying_p ());
4835 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4836 int_range
<2> zero (UINT (0), UINT (0));
4837 op_lshift
.op1_range (op1
, unsigned_type_node
, zero
, shift
);
4838 ASSERT_TRUE (op1
.num_pairs () == 2);
4839 // Remove the [0,0] range.
4840 op1
.intersect (zero
);
4841 ASSERT_TRUE (op1
.num_pairs () == 1);
4842 // op1 << 1 should be [0x8000,0x8000] << 1,
4843 // which should result in [0,0].
4844 int_range_max result
;
4845 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
4846 ASSERT_TRUE (result
== zero
);
4848 // signed VARYING = op1 << 1 should be VARYING.
4849 if (TYPE_PRECISION (integer_type_node
) > 31)
4851 // unsigned VARYING = op1 << 1 hould be VARYING.
4852 int_range
<2> lhs (integer_type_node
);
4853 int_range
<2> shift (INT (1), INT (1));
4855 op_lshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
4856 ASSERT_TRUE (op1
.varying_p ());
4858 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4859 int_range
<2> zero (INT (0), INT (0));
4860 op_lshift
.op1_range (op1
, integer_type_node
, zero
, shift
);
4861 ASSERT_TRUE (op1
.num_pairs () == 2);
4862 // Remove the [0,0] range.
4863 op1
.intersect (zero
);
4864 ASSERT_TRUE (op1
.num_pairs () == 1);
4865 // op1 << 1 shuould be [0x8000,0x8000] << 1,
4866 // which should result in [0,0].
4867 int_range_max result
;
4868 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
4869 ASSERT_TRUE (result
== zero
);
4874 range_op_rshift_tests ()
4876 // unsigned: [3, MAX] = OP1 >> 1
4878 int_range_max
lhs (build_int_cst (unsigned_type_node
, 3),
4879 TYPE_MAX_VALUE (unsigned_type_node
));
4880 int_range_max
one (build_one_cst (unsigned_type_node
),
4881 build_one_cst (unsigned_type_node
));
4883 op_rshift
.op1_range (op1
, unsigned_type_node
, lhs
, one
);
4884 ASSERT_FALSE (op1
.contains_p (UINT (3)));
4887 // signed: [3, MAX] = OP1 >> 1
4889 int_range_max
lhs (INT (3), TYPE_MAX_VALUE (integer_type_node
));
4890 int_range_max
one (INT (1), INT (1));
4892 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
4893 ASSERT_FALSE (op1
.contains_p (INT (-2)));
4896 // This is impossible, so OP1 should be [].
4897 // signed: [MIN, MIN] = OP1 >> 1
4899 int_range_max
lhs (TYPE_MIN_VALUE (integer_type_node
),
4900 TYPE_MIN_VALUE (integer_type_node
));
4901 int_range_max
one (INT (1), INT (1));
4903 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
4904 ASSERT_TRUE (op1
.undefined_p ());
4907 // signed: ~[-1] = OP1 >> 31
4908 if (TYPE_PRECISION (integer_type_node
) > 31)
4910 int_range_max
lhs (INT (-1), INT (-1), VR_ANTI_RANGE
);
4911 int_range_max
shift (INT (31), INT (31));
4913 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
4914 int_range_max negatives
= range_negatives (integer_type_node
);
4915 negatives
.intersect (op1
);
4916 ASSERT_TRUE (negatives
.undefined_p ());
4921 range_op_bitwise_and_tests ()
4924 tree min
= vrp_val_min (integer_type_node
);
4925 tree max
= vrp_val_max (integer_type_node
);
4926 tree tiny
= fold_build2 (PLUS_EXPR
, integer_type_node
, min
,
4927 build_one_cst (integer_type_node
));
4928 int_range_max
i1 (tiny
, max
);
4929 int_range_max
i2 (build_int_cst (integer_type_node
, 255),
4930 build_int_cst (integer_type_node
, 255));
4932 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4933 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
4934 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
4936 // VARYING = OP1 & 255: OP1 is VARYING
4937 i1
= int_range
<1> (integer_type_node
);
4938 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
4939 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
4941 // For 0 = x & MASK, x is ~MASK.
4943 int_range
<2> zero (integer_zero_node
, integer_zero_node
);
4944 int_range
<2> mask
= int_range
<2> (INT (7), INT (7));
4945 op_bitwise_and
.op1_range (res
, integer_type_node
, zero
, mask
);
4946 wide_int inv
= wi::shwi (~7U, TYPE_PRECISION (integer_type_node
));
4947 ASSERT_TRUE (res
.get_nonzero_bits () == inv
);
4950 // (NONZERO | X) is nonzero.
4951 i1
.set_nonzero (integer_type_node
);
4952 i2
.set_varying (integer_type_node
);
4953 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
4954 ASSERT_TRUE (res
.nonzero_p ());
4956 // (NEGATIVE | X) is nonzero.
4957 i1
= int_range
<1> (INT (-5), INT (-3));
4958 i2
.set_varying (integer_type_node
);
4959 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
4960 ASSERT_FALSE (res
.contains_p (INT (0)));
4964 range_relational_tests ()
4966 int_range
<2> lhs (unsigned_char_type_node
);
4967 int_range
<2> op1 (UCHAR (8), UCHAR (10));
4968 int_range
<2> op2 (UCHAR (20), UCHAR (20));
4970 // Never wrapping additions mean LHS > OP1.
4971 relation_kind code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4972 ASSERT_TRUE (code
== VREL_GT
);
4974 // Most wrapping additions mean nothing...
4975 op1
= int_range
<2> (UCHAR (8), UCHAR (10));
4976 op2
= int_range
<2> (UCHAR (0), UCHAR (255));
4977 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4978 ASSERT_TRUE (code
== VREL_VARYING
);
4980 // However, always wrapping additions mean LHS < OP1.
4981 op1
= int_range
<2> (UCHAR (1), UCHAR (255));
4982 op2
= int_range
<2> (UCHAR (255), UCHAR (255));
4983 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4984 ASSERT_TRUE (code
== VREL_LT
);
4990 range_op_rshift_tests ();
4991 range_op_lshift_tests ();
4992 range_op_bitwise_and_tests ();
4993 range_op_cast_tests ();
4994 range_relational_tests ();
4996 extern void range_op_float_tests ();
4997 range_op_float_tests ();
5000 } // namespace selftest
5002 #endif // CHECKING_P