]> gcc.gnu.org Git - gcc.git/blob - gcc/tree-ssa-ifcombine.cc
Daily bump.
[gcc.git] / gcc / tree-ssa-ifcombine.cc
1 /* Combining of if-expressions on trees.
2 Copyright (C) 2007-2022 Free Software Foundation, Inc.
3 Contributed by Richard Guenther <rguenther@suse.de>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "ssa.h"
33 #include "tree-pretty-print.h"
34 /* rtl is needed only because arm back-end requires it for
35 BRANCH_COST. */
36 #include "fold-const.h"
37 #include "cfganal.h"
38 #include "gimple-fold.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-cfg.h"
42 #include "tree-ssa.h"
43 #include "attribs.h"
44 #include "asan.h"
45
46 #ifndef LOGICAL_OP_NON_SHORT_CIRCUIT
47 #define LOGICAL_OP_NON_SHORT_CIRCUIT \
48 (BRANCH_COST (optimize_function_for_speed_p (cfun), \
49 false) >= 2)
50 #endif
51
52 /* This pass combines COND_EXPRs to simplify control flow. It
53 currently recognizes bit tests and comparisons in chains that
54 represent logical and or logical or of two COND_EXPRs.
55
56 It does so by walking basic blocks in a approximate reverse
57 post-dominator order and trying to match CFG patterns that
58 represent logical and or logical or of two COND_EXPRs.
59 Transformations are done if the COND_EXPR conditions match
60 either
61
62 1. two single bit tests X & (1 << Yn) (for logical and)
63
64 2. two bit tests X & Yn (for logical or)
65
66 3. two comparisons X OPn Y (for logical or)
67
68 To simplify this pass, removing basic blocks and dead code
69 is left to CFG cleanup and DCE. */
70
71
72 /* Recognize a if-then-else CFG pattern starting to match with the
73 COND_BB basic-block containing the COND_EXPR. The recognized
74 then end else blocks are stored to *THEN_BB and *ELSE_BB. If
75 *THEN_BB and/or *ELSE_BB are already set, they are required to
76 match the then and else basic-blocks to make the pattern match.
77 Returns true if the pattern matched, false otherwise. */
78
79 static bool
80 recognize_if_then_else (basic_block cond_bb,
81 basic_block *then_bb, basic_block *else_bb)
82 {
83 edge t, e;
84
85 if (EDGE_COUNT (cond_bb->succs) != 2)
86 return false;
87
88 /* Find the then/else edges. */
89 t = EDGE_SUCC (cond_bb, 0);
90 e = EDGE_SUCC (cond_bb, 1);
91 if (!(t->flags & EDGE_TRUE_VALUE))
92 std::swap (t, e);
93 if (!(t->flags & EDGE_TRUE_VALUE)
94 || !(e->flags & EDGE_FALSE_VALUE))
95 return false;
96
97 /* Check if the edge destinations point to the required block. */
98 if (*then_bb
99 && t->dest != *then_bb)
100 return false;
101 if (*else_bb
102 && e->dest != *else_bb)
103 return false;
104
105 if (!*then_bb)
106 *then_bb = t->dest;
107 if (!*else_bb)
108 *else_bb = e->dest;
109
110 return true;
111 }
112
113 /* Verify if the basic block BB does not have side-effects. Return
114 true in this case, else false. */
115
116 static bool
117 bb_no_side_effects_p (basic_block bb)
118 {
119 gimple_stmt_iterator gsi;
120
121 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
122 {
123 gimple *stmt = gsi_stmt (gsi);
124
125 if (is_gimple_debug (stmt))
126 continue;
127
128 if (gimple_has_side_effects (stmt)
129 || gimple_uses_undefined_value_p (stmt)
130 || gimple_could_trap_p (stmt)
131 || gimple_vuse (stmt)
132 /* const calls don't match any of the above, yet they could
133 still have some side-effects - they could contain
134 gimple_could_trap_p statements, like floating point
135 exceptions or integer division by zero. See PR70586.
136 FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
137 should handle this. */
138 || is_gimple_call (stmt))
139 return false;
140 }
141
142 return true;
143 }
144
145 /* Return true if BB is an empty forwarder block to TO_BB. */
146
147 static bool
148 forwarder_block_to (basic_block bb, basic_block to_bb)
149 {
150 return empty_block_p (bb)
151 && single_succ_p (bb)
152 && single_succ (bb) == to_bb;
153 }
154
155 /* Verify if all PHI node arguments in DEST for edges from BB1 or
156 BB2 to DEST are the same. This makes the CFG merge point
157 free from side-effects. Return true in this case, else false. */
158
159 static bool
160 same_phi_args_p (basic_block bb1, basic_block bb2, basic_block dest)
161 {
162 edge e1 = find_edge (bb1, dest);
163 edge e2 = find_edge (bb2, dest);
164 gphi_iterator gsi;
165 gphi *phi;
166
167 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
168 {
169 phi = gsi.phi ();
170 if (!operand_equal_p (PHI_ARG_DEF_FROM_EDGE (phi, e1),
171 PHI_ARG_DEF_FROM_EDGE (phi, e2), 0))
172 return false;
173 }
174
175 return true;
176 }
177
178 /* Return the best representative SSA name for CANDIDATE which is used
179 in a bit test. */
180
181 static tree
182 get_name_for_bit_test (tree candidate)
183 {
184 /* Skip single-use names in favor of using the name from a
185 non-widening conversion definition. */
186 if (TREE_CODE (candidate) == SSA_NAME
187 && has_single_use (candidate))
188 {
189 gimple *def_stmt = SSA_NAME_DEF_STMT (candidate);
190 if (is_gimple_assign (def_stmt)
191 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
192 {
193 if (TYPE_PRECISION (TREE_TYPE (candidate))
194 <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
195 return gimple_assign_rhs1 (def_stmt);
196 }
197 }
198
199 return candidate;
200 }
201
202 /* Recognize a single bit test pattern in GIMPLE_COND and its defining
203 statements. Store the name being tested in *NAME and the bit
204 in *BIT. The GIMPLE_COND computes *NAME & (1 << *BIT).
205 Returns true if the pattern matched, false otherwise. */
206
207 static bool
208 recognize_single_bit_test (gcond *cond, tree *name, tree *bit, bool inv)
209 {
210 gimple *stmt;
211
212 /* Get at the definition of the result of the bit test. */
213 if (gimple_cond_code (cond) != (inv ? EQ_EXPR : NE_EXPR)
214 || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
215 || !integer_zerop (gimple_cond_rhs (cond)))
216 return false;
217 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
218 if (!is_gimple_assign (stmt))
219 return false;
220
221 /* Look at which bit is tested. One form to recognize is
222 D.1985_5 = state_3(D) >> control1_4(D);
223 D.1986_6 = (int) D.1985_5;
224 D.1987_7 = op0 & 1;
225 if (D.1987_7 != 0) */
226 if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
227 && integer_onep (gimple_assign_rhs2 (stmt))
228 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
229 {
230 tree orig_name = gimple_assign_rhs1 (stmt);
231
232 /* Look through copies and conversions to eventually
233 find the stmt that computes the shift. */
234 stmt = SSA_NAME_DEF_STMT (orig_name);
235
236 while (is_gimple_assign (stmt)
237 && ((CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt))
238 && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (stmt)))
239 <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt))))
240 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
241 || gimple_assign_ssa_name_copy_p (stmt)))
242 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
243
244 /* If we found such, decompose it. */
245 if (is_gimple_assign (stmt)
246 && gimple_assign_rhs_code (stmt) == RSHIFT_EXPR)
247 {
248 /* op0 & (1 << op1) */
249 *bit = gimple_assign_rhs2 (stmt);
250 *name = gimple_assign_rhs1 (stmt);
251 }
252 else
253 {
254 /* t & 1 */
255 *bit = integer_zero_node;
256 *name = get_name_for_bit_test (orig_name);
257 }
258
259 return true;
260 }
261
262 /* Another form is
263 D.1987_7 = op0 & (1 << CST)
264 if (D.1987_7 != 0) */
265 if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
266 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
267 && integer_pow2p (gimple_assign_rhs2 (stmt)))
268 {
269 *name = gimple_assign_rhs1 (stmt);
270 *bit = build_int_cst (integer_type_node,
271 tree_log2 (gimple_assign_rhs2 (stmt)));
272 return true;
273 }
274
275 /* Another form is
276 D.1986_6 = 1 << control1_4(D)
277 D.1987_7 = op0 & D.1986_6
278 if (D.1987_7 != 0) */
279 if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
280 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
281 && TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME)
282 {
283 gimple *tmp;
284
285 /* Both arguments of the BIT_AND_EXPR can be the single-bit
286 specifying expression. */
287 tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
288 if (is_gimple_assign (tmp)
289 && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR
290 && integer_onep (gimple_assign_rhs1 (tmp)))
291 {
292 *name = gimple_assign_rhs2 (stmt);
293 *bit = gimple_assign_rhs2 (tmp);
294 return true;
295 }
296
297 tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
298 if (is_gimple_assign (tmp)
299 && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR
300 && integer_onep (gimple_assign_rhs1 (tmp)))
301 {
302 *name = gimple_assign_rhs1 (stmt);
303 *bit = gimple_assign_rhs2 (tmp);
304 return true;
305 }
306 }
307
308 return false;
309 }
310
311 /* Recognize a bit test pattern in a GIMPLE_COND and its defining
312 statements. Store the name being tested in *NAME and the bits
313 in *BITS. The COND_EXPR computes *NAME & *BITS.
314 Returns true if the pattern matched, false otherwise. */
315
316 static bool
317 recognize_bits_test (gcond *cond, tree *name, tree *bits, bool inv)
318 {
319 gimple *stmt;
320
321 /* Get at the definition of the result of the bit test. */
322 if (gimple_cond_code (cond) != (inv ? EQ_EXPR : NE_EXPR)
323 || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
324 || !integer_zerop (gimple_cond_rhs (cond)))
325 return false;
326 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
327 if (!is_gimple_assign (stmt)
328 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR)
329 return false;
330
331 *name = get_name_for_bit_test (gimple_assign_rhs1 (stmt));
332 *bits = gimple_assign_rhs2 (stmt);
333
334 return true;
335 }
336
337
338 /* Update profile after code in outer_cond_bb was adjusted so
339 outer_cond_bb has no condition. */
340
341 static void
342 update_profile_after_ifcombine (basic_block inner_cond_bb,
343 basic_block outer_cond_bb)
344 {
345 edge outer_to_inner = find_edge (outer_cond_bb, inner_cond_bb);
346 edge outer2 = (EDGE_SUCC (outer_cond_bb, 0) == outer_to_inner
347 ? EDGE_SUCC (outer_cond_bb, 1)
348 : EDGE_SUCC (outer_cond_bb, 0));
349 edge inner_taken = EDGE_SUCC (inner_cond_bb, 0);
350 edge inner_not_taken = EDGE_SUCC (inner_cond_bb, 1);
351
352 if (inner_taken->dest != outer2->dest)
353 std::swap (inner_taken, inner_not_taken);
354 gcc_assert (inner_taken->dest == outer2->dest);
355
356 /* In the following we assume that inner_cond_bb has single predecessor. */
357 gcc_assert (single_pred_p (inner_cond_bb));
358
359 /* Path outer_cond_bb->(outer2) needs to be merged into path
360 outer_cond_bb->(outer_to_inner)->inner_cond_bb->(inner_taken)
361 and probability of inner_not_taken updated. */
362
363 inner_cond_bb->count = outer_cond_bb->count;
364
365 /* Handle special case where inner_taken probability is always. In this case
366 we know that the overall outcome will be always as well, but combining
367 probabilities will be conservative because it does not know that
368 outer2->probability is inverse of outer_to_inner->probability. */
369 if (inner_taken->probability == profile_probability::always ())
370 ;
371 else
372 inner_taken->probability = outer2->probability + outer_to_inner->probability
373 * inner_taken->probability;
374 inner_not_taken->probability = profile_probability::always ()
375 - inner_taken->probability;
376
377 outer_to_inner->probability = profile_probability::always ();
378 outer2->probability = profile_probability::never ();
379 }
380
381 /* If-convert on a and pattern with a common else block. The inner
382 if is specified by its INNER_COND_BB, the outer by OUTER_COND_BB.
383 inner_inv, outer_inv and result_inv indicate whether the conditions
384 are inverted.
385 Returns true if the edges to the common else basic-block were merged. */
386
387 static bool
388 ifcombine_ifandif (basic_block inner_cond_bb, bool inner_inv,
389 basic_block outer_cond_bb, bool outer_inv, bool result_inv)
390 {
391 gimple_stmt_iterator gsi;
392 gimple *inner_stmt, *outer_stmt;
393 gcond *inner_cond, *outer_cond;
394 tree name1, name2, bit1, bit2, bits1, bits2;
395
396 inner_stmt = last_stmt (inner_cond_bb);
397 if (!inner_stmt
398 || gimple_code (inner_stmt) != GIMPLE_COND)
399 return false;
400 inner_cond = as_a <gcond *> (inner_stmt);
401
402 outer_stmt = last_stmt (outer_cond_bb);
403 if (!outer_stmt
404 || gimple_code (outer_stmt) != GIMPLE_COND)
405 return false;
406 outer_cond = as_a <gcond *> (outer_stmt);
407
408 /* See if we test a single bit of the same name in both tests. In
409 that case remove the outer test, merging both else edges,
410 and change the inner one to test for
411 name & (bit1 | bit2) == (bit1 | bit2). */
412 if (recognize_single_bit_test (inner_cond, &name1, &bit1, inner_inv)
413 && recognize_single_bit_test (outer_cond, &name2, &bit2, outer_inv)
414 && name1 == name2)
415 {
416 tree t, t2;
417
418 /* Do it. */
419 gsi = gsi_for_stmt (inner_cond);
420 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
421 build_int_cst (TREE_TYPE (name1), 1), bit1);
422 t2 = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
423 build_int_cst (TREE_TYPE (name1), 1), bit2);
424 t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), t, t2);
425 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
426 true, GSI_SAME_STMT);
427 t2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
428 t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE,
429 true, GSI_SAME_STMT);
430 t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR,
431 boolean_type_node, t2, t);
432 t = canonicalize_cond_expr_cond (t);
433 if (!t)
434 return false;
435 gimple_cond_set_condition_from_tree (inner_cond, t);
436 update_stmt (inner_cond);
437
438 /* Leave CFG optimization to cfg_cleanup. */
439 gimple_cond_set_condition_from_tree (outer_cond,
440 outer_inv ? boolean_false_node : boolean_true_node);
441 update_stmt (outer_cond);
442
443 update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
444
445 if (dump_file)
446 {
447 fprintf (dump_file, "optimizing double bit test to ");
448 print_generic_expr (dump_file, name1);
449 fprintf (dump_file, " & T == T\nwith temporary T = (1 << ");
450 print_generic_expr (dump_file, bit1);
451 fprintf (dump_file, ") | (1 << ");
452 print_generic_expr (dump_file, bit2);
453 fprintf (dump_file, ")\n");
454 }
455
456 return true;
457 }
458
459 /* See if we have two bit tests of the same name in both tests.
460 In that case remove the outer test and change the inner one to
461 test for name & (bits1 | bits2) != 0. */
462 else if (recognize_bits_test (inner_cond, &name1, &bits1, !inner_inv)
463 && recognize_bits_test (outer_cond, &name2, &bits2, !outer_inv))
464 {
465 gimple_stmt_iterator gsi;
466 tree t;
467
468 /* Find the common name which is bit-tested. */
469 if (name1 == name2)
470 ;
471 else if (bits1 == bits2)
472 {
473 std::swap (name2, bits2);
474 std::swap (name1, bits1);
475 }
476 else if (name1 == bits2)
477 std::swap (name2, bits2);
478 else if (bits1 == name2)
479 std::swap (name1, bits1);
480 else
481 return false;
482
483 /* As we strip non-widening conversions in finding a common
484 name that is tested make sure to end up with an integral
485 type for building the bit operations. */
486 if (TYPE_PRECISION (TREE_TYPE (bits1))
487 >= TYPE_PRECISION (TREE_TYPE (bits2)))
488 {
489 bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
490 name1 = fold_convert (TREE_TYPE (bits1), name1);
491 bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
492 bits2 = fold_convert (TREE_TYPE (bits1), bits2);
493 }
494 else
495 {
496 bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
497 name1 = fold_convert (TREE_TYPE (bits2), name1);
498 bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
499 bits1 = fold_convert (TREE_TYPE (bits2), bits1);
500 }
501
502 /* Do it. */
503 gsi = gsi_for_stmt (inner_cond);
504 t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2);
505 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
506 true, GSI_SAME_STMT);
507 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
508 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
509 true, GSI_SAME_STMT);
510 t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR, boolean_type_node, t,
511 build_int_cst (TREE_TYPE (t), 0));
512 t = canonicalize_cond_expr_cond (t);
513 if (!t)
514 return false;
515 gimple_cond_set_condition_from_tree (inner_cond, t);
516 update_stmt (inner_cond);
517
518 /* Leave CFG optimization to cfg_cleanup. */
519 gimple_cond_set_condition_from_tree (outer_cond,
520 outer_inv ? boolean_false_node : boolean_true_node);
521 update_stmt (outer_cond);
522 update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
523
524 if (dump_file)
525 {
526 fprintf (dump_file, "optimizing bits or bits test to ");
527 print_generic_expr (dump_file, name1);
528 fprintf (dump_file, " & T != 0\nwith temporary T = ");
529 print_generic_expr (dump_file, bits1);
530 fprintf (dump_file, " | ");
531 print_generic_expr (dump_file, bits2);
532 fprintf (dump_file, "\n");
533 }
534
535 return true;
536 }
537
538 /* See if we have two comparisons that we can merge into one. */
539 else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison
540 && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison)
541 {
542 tree t;
543 enum tree_code inner_cond_code = gimple_cond_code (inner_cond);
544 enum tree_code outer_cond_code = gimple_cond_code (outer_cond);
545
546 /* Invert comparisons if necessary (and possible). */
547 if (inner_inv)
548 inner_cond_code = invert_tree_comparison (inner_cond_code,
549 HONOR_NANS (gimple_cond_lhs (inner_cond)));
550 if (inner_cond_code == ERROR_MARK)
551 return false;
552 if (outer_inv)
553 outer_cond_code = invert_tree_comparison (outer_cond_code,
554 HONOR_NANS (gimple_cond_lhs (outer_cond)));
555 if (outer_cond_code == ERROR_MARK)
556 return false;
557 /* Don't return false so fast, try maybe_fold_or_comparisons? */
558
559 if (!(t = maybe_fold_and_comparisons (boolean_type_node, inner_cond_code,
560 gimple_cond_lhs (inner_cond),
561 gimple_cond_rhs (inner_cond),
562 outer_cond_code,
563 gimple_cond_lhs (outer_cond),
564 gimple_cond_rhs (outer_cond),
565 gimple_bb (outer_cond))))
566 {
567 tree t1, t2;
568 gimple_stmt_iterator gsi;
569 bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
570 if (param_logical_op_non_short_circuit != -1)
571 logical_op_non_short_circuit
572 = param_logical_op_non_short_circuit;
573 if (!logical_op_non_short_circuit || sanitize_coverage_p ())
574 return false;
575 /* Only do this optimization if the inner bb contains only the conditional. */
576 if (!gsi_one_before_end_p (gsi_start_nondebug_after_labels_bb (inner_cond_bb)))
577 return false;
578 t1 = fold_build2_loc (gimple_location (inner_cond),
579 inner_cond_code,
580 boolean_type_node,
581 gimple_cond_lhs (inner_cond),
582 gimple_cond_rhs (inner_cond));
583 t2 = fold_build2_loc (gimple_location (outer_cond),
584 outer_cond_code,
585 boolean_type_node,
586 gimple_cond_lhs (outer_cond),
587 gimple_cond_rhs (outer_cond));
588 t = fold_build2_loc (gimple_location (inner_cond),
589 TRUTH_AND_EXPR, boolean_type_node, t1, t2);
590 if (result_inv)
591 {
592 t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
593 result_inv = false;
594 }
595 gsi = gsi_for_stmt (inner_cond);
596 t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr, NULL, true,
597 GSI_SAME_STMT);
598 }
599 if (result_inv)
600 t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
601 t = canonicalize_cond_expr_cond (t);
602 if (!t)
603 return false;
604 if (!is_gimple_condexpr_for_cond (t))
605 {
606 gsi = gsi_for_stmt (inner_cond);
607 t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr_for_cond,
608 NULL, true, GSI_SAME_STMT);
609 }
610 gimple_cond_set_condition_from_tree (inner_cond, t);
611 update_stmt (inner_cond);
612
613 /* Leave CFG optimization to cfg_cleanup. */
614 gimple_cond_set_condition_from_tree (outer_cond,
615 outer_inv ? boolean_false_node : boolean_true_node);
616 update_stmt (outer_cond);
617 update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
618
619 if (dump_file)
620 {
621 fprintf (dump_file, "optimizing two comparisons to ");
622 print_generic_expr (dump_file, t);
623 fprintf (dump_file, "\n");
624 }
625
626 return true;
627 }
628
629 return false;
630 }
631
632 /* Helper function for tree_ssa_ifcombine_bb. Recognize a CFG pattern and
633 dispatch to the appropriate if-conversion helper for a particular
634 set of INNER_COND_BB, OUTER_COND_BB, THEN_BB and ELSE_BB.
635 PHI_PRED_BB should be one of INNER_COND_BB, THEN_BB or ELSE_BB. */
636
637 static bool
638 tree_ssa_ifcombine_bb_1 (basic_block inner_cond_bb, basic_block outer_cond_bb,
639 basic_block then_bb, basic_block else_bb,
640 basic_block phi_pred_bb)
641 {
642 /* The && form is characterized by a common else_bb with
643 the two edges leading to it mergable. The latter is
644 guaranteed by matching PHI arguments in the else_bb and
645 the inner cond_bb having no side-effects. */
646 if (phi_pred_bb != else_bb
647 && recognize_if_then_else (outer_cond_bb, &inner_cond_bb, &else_bb)
648 && same_phi_args_p (outer_cond_bb, phi_pred_bb, else_bb))
649 {
650 /* We have
651 <outer_cond_bb>
652 if (q) goto inner_cond_bb; else goto else_bb;
653 <inner_cond_bb>
654 if (p) goto ...; else goto else_bb;
655 ...
656 <else_bb>
657 ...
658 */
659 return ifcombine_ifandif (inner_cond_bb, false, outer_cond_bb, false,
660 false);
661 }
662
663 /* And a version where the outer condition is negated. */
664 if (phi_pred_bb != else_bb
665 && recognize_if_then_else (outer_cond_bb, &else_bb, &inner_cond_bb)
666 && same_phi_args_p (outer_cond_bb, phi_pred_bb, else_bb))
667 {
668 /* We have
669 <outer_cond_bb>
670 if (q) goto else_bb; else goto inner_cond_bb;
671 <inner_cond_bb>
672 if (p) goto ...; else goto else_bb;
673 ...
674 <else_bb>
675 ...
676 */
677 return ifcombine_ifandif (inner_cond_bb, false, outer_cond_bb, true,
678 false);
679 }
680
681 /* The || form is characterized by a common then_bb with the
682 two edges leading to it mergable. The latter is guaranteed
683 by matching PHI arguments in the then_bb and the inner cond_bb
684 having no side-effects. */
685 if (phi_pred_bb != then_bb
686 && recognize_if_then_else (outer_cond_bb, &then_bb, &inner_cond_bb)
687 && same_phi_args_p (outer_cond_bb, phi_pred_bb, then_bb))
688 {
689 /* We have
690 <outer_cond_bb>
691 if (q) goto then_bb; else goto inner_cond_bb;
692 <inner_cond_bb>
693 if (q) goto then_bb; else goto ...;
694 <then_bb>
695 ...
696 */
697 return ifcombine_ifandif (inner_cond_bb, true, outer_cond_bb, true,
698 true);
699 }
700
701 /* And a version where the outer condition is negated. */
702 if (phi_pred_bb != then_bb
703 && recognize_if_then_else (outer_cond_bb, &inner_cond_bb, &then_bb)
704 && same_phi_args_p (outer_cond_bb, phi_pred_bb, then_bb))
705 {
706 /* We have
707 <outer_cond_bb>
708 if (q) goto inner_cond_bb; else goto then_bb;
709 <inner_cond_bb>
710 if (q) goto then_bb; else goto ...;
711 <then_bb>
712 ...
713 */
714 return ifcombine_ifandif (inner_cond_bb, true, outer_cond_bb, false,
715 true);
716 }
717
718 return false;
719 }
720
721 /* Recognize a CFG pattern and dispatch to the appropriate
722 if-conversion helper. We start with BB as the innermost
723 worker basic-block. Returns true if a transformation was done. */
724
725 static bool
726 tree_ssa_ifcombine_bb (basic_block inner_cond_bb)
727 {
728 basic_block then_bb = NULL, else_bb = NULL;
729
730 if (!recognize_if_then_else (inner_cond_bb, &then_bb, &else_bb))
731 return false;
732
733 /* Recognize && and || of two conditions with a common
734 then/else block which entry edges we can merge. That is:
735 if (a || b)
736 ;
737 and
738 if (a && b)
739 ;
740 This requires a single predecessor of the inner cond_bb. */
741 if (single_pred_p (inner_cond_bb)
742 && bb_no_side_effects_p (inner_cond_bb))
743 {
744 basic_block outer_cond_bb = single_pred (inner_cond_bb);
745
746 if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb,
747 then_bb, else_bb, inner_cond_bb))
748 return true;
749
750 if (forwarder_block_to (else_bb, then_bb))
751 {
752 /* Other possibilities for the && form, if else_bb is
753 empty forwarder block to then_bb. Compared to the above simpler
754 forms this can be treated as if then_bb and else_bb were swapped,
755 and the corresponding inner_cond_bb not inverted because of that.
756 For same_phi_args_p we look at equality of arguments between
757 edge from outer_cond_bb and the forwarder block. */
758 if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb, else_bb,
759 then_bb, else_bb))
760 return true;
761 }
762 else if (forwarder_block_to (then_bb, else_bb))
763 {
764 /* Other possibilities for the || form, if then_bb is
765 empty forwarder block to else_bb. Compared to the above simpler
766 forms this can be treated as if then_bb and else_bb were swapped,
767 and the corresponding inner_cond_bb not inverted because of that.
768 For same_phi_args_p we look at equality of arguments between
769 edge from outer_cond_bb and the forwarder block. */
770 if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb, else_bb,
771 then_bb, then_bb))
772 return true;
773 }
774 }
775
776 return false;
777 }
778
779 /* Main entry for the tree if-conversion pass. */
780
781 namespace {
782
783 const pass_data pass_data_tree_ifcombine =
784 {
785 GIMPLE_PASS, /* type */
786 "ifcombine", /* name */
787 OPTGROUP_NONE, /* optinfo_flags */
788 TV_TREE_IFCOMBINE, /* tv_id */
789 ( PROP_cfg | PROP_ssa ), /* properties_required */
790 0, /* properties_provided */
791 0, /* properties_destroyed */
792 0, /* todo_flags_start */
793 TODO_update_ssa, /* todo_flags_finish */
794 };
795
796 class pass_tree_ifcombine : public gimple_opt_pass
797 {
798 public:
799 pass_tree_ifcombine (gcc::context *ctxt)
800 : gimple_opt_pass (pass_data_tree_ifcombine, ctxt)
801 {}
802
803 /* opt_pass methods: */
804 virtual unsigned int execute (function *);
805
806 }; // class pass_tree_ifcombine
807
808 unsigned int
809 pass_tree_ifcombine::execute (function *fun)
810 {
811 basic_block *bbs;
812 bool cfg_changed = false;
813 int i;
814
815 bbs = single_pred_before_succ_order ();
816 calculate_dominance_info (CDI_DOMINATORS);
817
818 /* Search every basic block for COND_EXPR we may be able to optimize.
819
820 We walk the blocks in order that guarantees that a block with
821 a single predecessor is processed after the predecessor.
822 This ensures that we collapse outter ifs before visiting the
823 inner ones, and also that we do not try to visit a removed
824 block. This is opposite of PHI-OPT, because we cascade the
825 combining rather than cascading PHIs. */
826 for (i = n_basic_blocks_for_fn (fun) - NUM_FIXED_BLOCKS - 1; i >= 0; i--)
827 {
828 basic_block bb = bbs[i];
829 gimple *stmt = last_stmt (bb);
830
831 if (stmt
832 && gimple_code (stmt) == GIMPLE_COND)
833 if (tree_ssa_ifcombine_bb (bb))
834 {
835 /* Clear range info from all stmts in BB which is now executed
836 conditional on a always true/false condition. */
837 reset_flow_sensitive_info_in_bb (bb);
838 cfg_changed |= true;
839 }
840 }
841
842 free (bbs);
843
844 return cfg_changed ? TODO_cleanup_cfg : 0;
845 }
846
847 } // anon namespace
848
849 gimple_opt_pass *
850 make_pass_tree_ifcombine (gcc::context *ctxt)
851 {
852 return new pass_tree_ifcombine (ctxt);
853 }
This page took 0.067699 seconds and 5 git commands to generate.