]> gcc.gnu.org Git - gcc.git/blob - gcc/tree-ssa-phiopt.cc
ipa/105166 - avoid modref queries with mismatching types
[gcc.git] / gcc / tree-ssa-phiopt.cc
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2022 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "tree-ssa.h"
32 #include "optabs-tree.h"
33 #include "insn-config.h"
34 #include "gimple-pretty-print.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-cfg.h"
42 #include "tree-dfa.h"
43 #include "domwalk.h"
44 #include "cfgloop.h"
45 #include "tree-data-ref.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-inline.h"
48 #include "case-cfn-macros.h"
49 #include "tree-eh.h"
50 #include "gimple-fold.h"
51 #include "internal-fn.h"
52 #include "gimple-range.h"
53 #include "gimple-match.h"
54 #include "dbgcnt.h"
55
56 static unsigned int tree_ssa_phiopt_worker (bool, bool, bool);
57 static bool two_value_replacement (basic_block, basic_block, edge, gphi *,
58 tree, tree);
59 static bool match_simplify_replacement (basic_block, basic_block,
60 edge, edge, gphi *, tree, tree, bool);
61 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree,
62 gimple *);
63 static int value_replacement (basic_block, basic_block,
64 edge, edge, gphi *, tree, tree);
65 static bool minmax_replacement (basic_block, basic_block,
66 edge, edge, gphi *, tree, tree);
67 static bool spaceship_replacement (basic_block, basic_block,
68 edge, edge, gphi *, tree, tree);
69 static bool cond_removal_in_builtin_zero_pattern (basic_block, basic_block,
70 edge, edge, gphi *,
71 tree, tree);
72 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
73 hash_set<tree> *);
74 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
75 static hash_set<tree> * get_non_trapping ();
76 static void replace_phi_edge_with_variable (basic_block, edge, gphi *, tree);
77 static void hoist_adjacent_loads (basic_block, basic_block,
78 basic_block, basic_block);
79 static bool gate_hoist_loads (void);
80
81 /* This pass tries to transform conditional stores into unconditional
82 ones, enabling further simplifications with the simpler then and else
83 blocks. In particular it replaces this:
84
85 bb0:
86 if (cond) goto bb2; else goto bb1;
87 bb1:
88 *p = RHS;
89 bb2:
90
91 with
92
93 bb0:
94 if (cond) goto bb1; else goto bb2;
95 bb1:
96 condtmp' = *p;
97 bb2:
98 condtmp = PHI <RHS, condtmp'>
99 *p = condtmp;
100
101 This transformation can only be done under several constraints,
102 documented below. It also replaces:
103
104 bb0:
105 if (cond) goto bb2; else goto bb1;
106 bb1:
107 *p = RHS1;
108 goto bb3;
109 bb2:
110 *p = RHS2;
111 bb3:
112
113 with
114
115 bb0:
116 if (cond) goto bb3; else goto bb1;
117 bb1:
118 bb3:
119 condtmp = PHI <RHS1, RHS2>
120 *p = condtmp; */
121
122 static unsigned int
123 tree_ssa_cs_elim (void)
124 {
125 unsigned todo;
126 /* ??? We are not interested in loop related info, but the following
127 will create it, ICEing as we didn't init loops with pre-headers.
128 An interfacing issue of find_data_references_in_bb. */
129 loop_optimizer_init (LOOPS_NORMAL);
130 scev_initialize ();
131 todo = tree_ssa_phiopt_worker (true, false, false);
132 scev_finalize ();
133 loop_optimizer_finalize ();
134 return todo;
135 }
136
137 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
138
139 static gphi *
140 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
141 {
142 gimple_stmt_iterator i;
143 gphi *phi = NULL;
144 if (gimple_seq_singleton_p (seq))
145 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
146 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
147 {
148 gphi *p = as_a <gphi *> (gsi_stmt (i));
149 /* If the PHI arguments are equal then we can skip this PHI. */
150 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
151 gimple_phi_arg_def (p, e1->dest_idx)))
152 continue;
153
154 /* If we already have a PHI that has the two edge arguments are
155 different, then return it is not a singleton for these PHIs. */
156 if (phi)
157 return NULL;
158
159 phi = p;
160 }
161 return phi;
162 }
163
164 /* The core routine of conditional store replacement and normal
165 phi optimizations. Both share much of the infrastructure in how
166 to match applicable basic block patterns. DO_STORE_ELIM is true
167 when we want to do conditional store replacement, false otherwise.
168 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
169 of diamond control flow patterns, false otherwise. */
170 static unsigned int
171 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p)
172 {
173 basic_block bb;
174 basic_block *bb_order;
175 unsigned n, i;
176 bool cfgchanged = false;
177 hash_set<tree> *nontrap = 0;
178
179 calculate_dominance_info (CDI_DOMINATORS);
180
181 if (do_store_elim)
182 /* Calculate the set of non-trapping memory accesses. */
183 nontrap = get_non_trapping ();
184
185 /* Search every basic block for COND_EXPR we may be able to optimize.
186
187 We walk the blocks in order that guarantees that a block with
188 a single predecessor is processed before the predecessor.
189 This ensures that we collapse inner ifs before visiting the
190 outer ones, and also that we do not try to visit a removed
191 block. */
192 bb_order = single_pred_before_succ_order ();
193 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
194
195 for (i = 0; i < n; i++)
196 {
197 gimple *cond_stmt;
198 gphi *phi;
199 basic_block bb1, bb2;
200 edge e1, e2;
201 tree arg0, arg1;
202
203 bb = bb_order[i];
204
205 cond_stmt = last_stmt (bb);
206 /* Check to see if the last statement is a GIMPLE_COND. */
207 if (!cond_stmt
208 || gimple_code (cond_stmt) != GIMPLE_COND)
209 continue;
210
211 e1 = EDGE_SUCC (bb, 0);
212 bb1 = e1->dest;
213 e2 = EDGE_SUCC (bb, 1);
214 bb2 = e2->dest;
215
216 /* We cannot do the optimization on abnormal edges. */
217 if ((e1->flags & EDGE_ABNORMAL) != 0
218 || (e2->flags & EDGE_ABNORMAL) != 0)
219 continue;
220
221 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
222 if (EDGE_COUNT (bb1->succs) == 0
223 || EDGE_COUNT (bb2->succs) == 0)
224 continue;
225
226 /* Find the bb which is the fall through to the other. */
227 if (EDGE_SUCC (bb1, 0)->dest == bb2)
228 ;
229 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
230 {
231 std::swap (bb1, bb2);
232 std::swap (e1, e2);
233 }
234 else if (do_store_elim
235 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
236 {
237 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
238
239 if (!single_succ_p (bb1)
240 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
241 || !single_succ_p (bb2)
242 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
243 || EDGE_COUNT (bb3->preds) != 2)
244 continue;
245 if (cond_if_else_store_replacement (bb1, bb2, bb3))
246 cfgchanged = true;
247 continue;
248 }
249 else if (do_hoist_loads
250 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
251 {
252 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
253
254 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
255 && single_succ_p (bb1)
256 && single_succ_p (bb2)
257 && single_pred_p (bb1)
258 && single_pred_p (bb2)
259 && EDGE_COUNT (bb->succs) == 2
260 && EDGE_COUNT (bb3->preds) == 2
261 /* If one edge or the other is dominant, a conditional move
262 is likely to perform worse than the well-predicted branch. */
263 && !predictable_edge_p (EDGE_SUCC (bb, 0))
264 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
265 hoist_adjacent_loads (bb, bb1, bb2, bb3);
266 continue;
267 }
268 else
269 continue;
270
271 e1 = EDGE_SUCC (bb1, 0);
272
273 /* Make sure that bb1 is just a fall through. */
274 if (!single_succ_p (bb1)
275 || (e1->flags & EDGE_FALLTHRU) == 0)
276 continue;
277
278 if (do_store_elim)
279 {
280 /* Also make sure that bb1 only have one predecessor and that it
281 is bb. */
282 if (!single_pred_p (bb1)
283 || single_pred (bb1) != bb)
284 continue;
285
286 /* bb1 is the middle block, bb2 the join block, bb the split block,
287 e1 the fallthrough edge from bb1 to bb2. We can't do the
288 optimization if the join block has more than two predecessors. */
289 if (EDGE_COUNT (bb2->preds) > 2)
290 continue;
291 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
292 cfgchanged = true;
293 }
294 else
295 {
296 gimple_seq phis = phi_nodes (bb2);
297 gimple_stmt_iterator gsi;
298 bool candorest = true;
299
300 /* Value replacement can work with more than one PHI
301 so try that first. */
302 if (!early_p)
303 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
304 {
305 phi = as_a <gphi *> (gsi_stmt (gsi));
306 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
307 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
308 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
309 {
310 candorest = false;
311 cfgchanged = true;
312 break;
313 }
314 }
315
316 if (!candorest)
317 continue;
318
319 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
320 if (!phi)
321 continue;
322
323 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
324 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
325
326 /* Something is wrong if we cannot find the arguments in the PHI
327 node. */
328 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
329
330 gphi *newphi;
331 if (single_pred_p (bb1)
332 && (newphi = factor_out_conditional_conversion (e1, e2, phi,
333 arg0, arg1,
334 cond_stmt)))
335 {
336 phi = newphi;
337 /* factor_out_conditional_conversion may create a new PHI in
338 BB2 and eliminate an existing PHI in BB2. Recompute values
339 that may be affected by that change. */
340 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
341 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
342 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
343 }
344
345 /* Do the replacement of conditional if it can be done. */
346 if (!early_p && two_value_replacement (bb, bb1, e2, phi, arg0, arg1))
347 cfgchanged = true;
348 else if (match_simplify_replacement (bb, bb1, e1, e2, phi,
349 arg0, arg1,
350 early_p))
351 cfgchanged = true;
352 else if (!early_p
353 && single_pred_p (bb1)
354 && cond_removal_in_builtin_zero_pattern (bb, bb1, e1, e2,
355 phi, arg0, arg1))
356 cfgchanged = true;
357 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
358 cfgchanged = true;
359 else if (single_pred_p (bb1)
360 && spaceship_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
361 cfgchanged = true;
362 }
363 }
364
365 free (bb_order);
366
367 if (do_store_elim)
368 delete nontrap;
369 /* If the CFG has changed, we should cleanup the CFG. */
370 if (cfgchanged && do_store_elim)
371 {
372 /* In cond-store replacement we have added some loads on edges
373 and new VOPS (as we moved the store, and created a load). */
374 gsi_commit_edge_inserts ();
375 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
376 }
377 else if (cfgchanged)
378 return TODO_cleanup_cfg;
379 return 0;
380 }
381
382 /* Replace PHI node element whose edge is E in block BB with variable NEW.
383 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
384 is known to have two edges, one of which must reach BB). */
385
386 static void
387 replace_phi_edge_with_variable (basic_block cond_block,
388 edge e, gphi *phi, tree new_tree)
389 {
390 basic_block bb = gimple_bb (phi);
391 gimple_stmt_iterator gsi;
392 tree phi_result = PHI_RESULT (phi);
393
394 /* Duplicate range info if they are the only things setting the target PHI.
395 This is needed as later on, the new_tree will be replacing
396 The assignement of the PHI.
397 For an example:
398 bb1:
399 _4 = min<a_1, 255>
400 goto bb2
401
402 # RANGE [-INF, 255]
403 a_3 = PHI<_4(1)>
404 bb3:
405
406 use(a_3)
407 And _4 gets propagated into the use of a_3 and losing the range info.
408 This can't be done for more than 2 incoming edges as the propagation
409 won't happen.
410 The new_tree needs to be defined in the same basic block as the conditional. */
411 if (TREE_CODE (new_tree) == SSA_NAME
412 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
413 && INTEGRAL_TYPE_P (TREE_TYPE (phi_result))
414 && !SSA_NAME_RANGE_INFO (new_tree)
415 && SSA_NAME_RANGE_INFO (phi_result)
416 && gimple_bb (SSA_NAME_DEF_STMT (new_tree)) == cond_block
417 && dbg_cnt (phiopt_edge_range))
418 duplicate_ssa_name_range_info (new_tree,
419 SSA_NAME_RANGE_TYPE (phi_result),
420 SSA_NAME_RANGE_INFO (phi_result));
421
422 /* Change the PHI argument to new. */
423 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
424
425 /* Remove the empty basic block. */
426 edge edge_to_remove;
427 if (EDGE_SUCC (cond_block, 0)->dest == bb)
428 edge_to_remove = EDGE_SUCC (cond_block, 1);
429 else
430 edge_to_remove = EDGE_SUCC (cond_block, 0);
431 if (EDGE_COUNT (edge_to_remove->dest->preds) == 1)
432 {
433 e->flags |= EDGE_FALLTHRU;
434 e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
435 e->probability = profile_probability::always ();
436 delete_basic_block (edge_to_remove->dest);
437
438 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
439 gsi = gsi_last_bb (cond_block);
440 gsi_remove (&gsi, true);
441 }
442 else
443 {
444 /* If there are other edges into the middle block make
445 CFG cleanup deal with the edge removal to avoid
446 updating dominators here in a non-trivial way. */
447 gcond *cond = as_a <gcond *> (last_stmt (cond_block));
448 if (edge_to_remove->flags & EDGE_TRUE_VALUE)
449 gimple_cond_make_false (cond);
450 else
451 gimple_cond_make_true (cond);
452 }
453
454 statistics_counter_event (cfun, "Replace PHI with variable", 1);
455
456 if (dump_file && (dump_flags & TDF_DETAILS))
457 fprintf (dump_file,
458 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
459 cond_block->index,
460 bb->index);
461 }
462
463 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
464 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
465 to the result of PHI stmt. COND_STMT is the controlling predicate.
466 Return the newly-created PHI, if any. */
467
468 static gphi *
469 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
470 tree arg0, tree arg1, gimple *cond_stmt)
471 {
472 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
473 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
474 tree temp, result;
475 gphi *newphi;
476 gimple_stmt_iterator gsi, gsi_for_def;
477 location_t locus = gimple_location (phi);
478 enum tree_code convert_code;
479
480 /* Handle only PHI statements with two arguments. TODO: If all
481 other arguments to PHI are INTEGER_CST or if their defining
482 statement have the same unary operation, we can handle more
483 than two arguments too. */
484 if (gimple_phi_num_args (phi) != 2)
485 return NULL;
486
487 /* First canonicalize to simplify tests. */
488 if (TREE_CODE (arg0) != SSA_NAME)
489 {
490 std::swap (arg0, arg1);
491 std::swap (e0, e1);
492 }
493
494 if (TREE_CODE (arg0) != SSA_NAME
495 || (TREE_CODE (arg1) != SSA_NAME
496 && TREE_CODE (arg1) != INTEGER_CST))
497 return NULL;
498
499 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
500 a conversion. */
501 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
502 if (!gimple_assign_cast_p (arg0_def_stmt))
503 return NULL;
504
505 /* Use the RHS as new_arg0. */
506 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
507 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
508 if (convert_code == VIEW_CONVERT_EXPR)
509 {
510 new_arg0 = TREE_OPERAND (new_arg0, 0);
511 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
512 return NULL;
513 }
514 if (TREE_CODE (new_arg0) == SSA_NAME
515 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg0))
516 return NULL;
517
518 if (TREE_CODE (arg1) == SSA_NAME)
519 {
520 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
521 is a conversion. */
522 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
523 if (!is_gimple_assign (arg1_def_stmt)
524 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
525 return NULL;
526
527 /* Either arg1_def_stmt or arg0_def_stmt should be conditional. */
528 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt))
529 && dominated_by_p (CDI_DOMINATORS,
530 gimple_bb (phi), gimple_bb (arg1_def_stmt)))
531 return NULL;
532
533 /* Use the RHS as new_arg1. */
534 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
535 if (convert_code == VIEW_CONVERT_EXPR)
536 new_arg1 = TREE_OPERAND (new_arg1, 0);
537 if (TREE_CODE (new_arg1) == SSA_NAME
538 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg1))
539 return NULL;
540 }
541 else
542 {
543 /* arg0_def_stmt should be conditional. */
544 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt)))
545 return NULL;
546 /* If arg1 is an INTEGER_CST, fold it to new type. */
547 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
548 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
549 {
550 if (gimple_assign_cast_p (arg0_def_stmt))
551 {
552 /* For the INTEGER_CST case, we are just moving the
553 conversion from one place to another, which can often
554 hurt as the conversion moves further away from the
555 statement that computes the value. So, perform this
556 only if new_arg0 is an operand of COND_STMT, or
557 if arg0_def_stmt is the only non-debug stmt in
558 its basic block, because then it is possible this
559 could enable further optimizations (minmax replacement
560 etc.). See PR71016. */
561 if (new_arg0 != gimple_cond_lhs (cond_stmt)
562 && new_arg0 != gimple_cond_rhs (cond_stmt)
563 && gimple_bb (arg0_def_stmt) == e0->src)
564 {
565 gsi = gsi_for_stmt (arg0_def_stmt);
566 gsi_prev_nondebug (&gsi);
567 if (!gsi_end_p (gsi))
568 {
569 if (gassign *assign
570 = dyn_cast <gassign *> (gsi_stmt (gsi)))
571 {
572 tree lhs = gimple_assign_lhs (assign);
573 enum tree_code ass_code
574 = gimple_assign_rhs_code (assign);
575 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
576 return NULL;
577 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
578 return NULL;
579 gsi_prev_nondebug (&gsi);
580 if (!gsi_end_p (gsi))
581 return NULL;
582 }
583 else
584 return NULL;
585 }
586 gsi = gsi_for_stmt (arg0_def_stmt);
587 gsi_next_nondebug (&gsi);
588 if (!gsi_end_p (gsi))
589 return NULL;
590 }
591 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
592 }
593 else
594 return NULL;
595 }
596 else
597 return NULL;
598 }
599
600 /* If arg0/arg1 have > 1 use, then this transformation actually increases
601 the number of expressions evaluated at runtime. */
602 if (!has_single_use (arg0)
603 || (arg1_def_stmt && !has_single_use (arg1)))
604 return NULL;
605
606 /* If types of new_arg0 and new_arg1 are different bailout. */
607 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
608 return NULL;
609
610 /* Create a new PHI stmt. */
611 result = PHI_RESULT (phi);
612 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
613 newphi = create_phi_node (temp, gimple_bb (phi));
614
615 if (dump_file && (dump_flags & TDF_DETAILS))
616 {
617 fprintf (dump_file, "PHI ");
618 print_generic_expr (dump_file, gimple_phi_result (phi));
619 fprintf (dump_file,
620 " changed to factor conversion out from COND_EXPR.\n");
621 fprintf (dump_file, "New stmt with CAST that defines ");
622 print_generic_expr (dump_file, result);
623 fprintf (dump_file, ".\n");
624 }
625
626 /* Remove the old cast(s) that has single use. */
627 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
628 gsi_remove (&gsi_for_def, true);
629 release_defs (arg0_def_stmt);
630
631 if (arg1_def_stmt)
632 {
633 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
634 gsi_remove (&gsi_for_def, true);
635 release_defs (arg1_def_stmt);
636 }
637
638 add_phi_arg (newphi, new_arg0, e0, locus);
639 add_phi_arg (newphi, new_arg1, e1, locus);
640
641 /* Create the conversion stmt and insert it. */
642 if (convert_code == VIEW_CONVERT_EXPR)
643 {
644 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
645 new_stmt = gimple_build_assign (result, temp);
646 }
647 else
648 new_stmt = gimple_build_assign (result, convert_code, temp);
649 gsi = gsi_after_labels (gimple_bb (phi));
650 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
651
652 /* Remove the original PHI stmt. */
653 gsi = gsi_for_stmt (phi);
654 gsi_remove (&gsi, true);
655
656 statistics_counter_event (cfun, "factored out cast", 1);
657
658 return newphi;
659 }
660
661 /* Optimize
662 # x_5 in range [cst1, cst2] where cst2 = cst1 + 1
663 if (x_5 op cstN) # where op is == or != and N is 1 or 2
664 goto bb3;
665 else
666 goto bb4;
667 bb3:
668 bb4:
669 # r_6 = PHI<cst3(2), cst4(3)> # where cst3 == cst4 + 1 or cst4 == cst3 + 1
670
671 to r_6 = x_5 + (min (cst3, cst4) - cst1) or
672 r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
673 of cst3 and cst4 is smaller. */
674
675 static bool
676 two_value_replacement (basic_block cond_bb, basic_block middle_bb,
677 edge e1, gphi *phi, tree arg0, tree arg1)
678 {
679 /* Only look for adjacent integer constants. */
680 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
681 || !INTEGRAL_TYPE_P (TREE_TYPE (arg1))
682 || TREE_CODE (arg0) != INTEGER_CST
683 || TREE_CODE (arg1) != INTEGER_CST
684 || (tree_int_cst_lt (arg0, arg1)
685 ? wi::to_widest (arg0) + 1 != wi::to_widest (arg1)
686 : wi::to_widest (arg1) + 1 != wi::to_widest (arg0)))
687 return false;
688
689 if (!empty_block_p (middle_bb))
690 return false;
691
692 gimple *stmt = last_stmt (cond_bb);
693 tree lhs = gimple_cond_lhs (stmt);
694 tree rhs = gimple_cond_rhs (stmt);
695
696 if (TREE_CODE (lhs) != SSA_NAME
697 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
698 || TREE_CODE (rhs) != INTEGER_CST)
699 return false;
700
701 switch (gimple_cond_code (stmt))
702 {
703 case EQ_EXPR:
704 case NE_EXPR:
705 break;
706 default:
707 return false;
708 }
709
710 /* Defer boolean x ? 0 : {1,-1} or x ? {1,-1} : 0 to
711 match_simplify_replacement. */
712 if (TREE_CODE (TREE_TYPE (lhs)) == BOOLEAN_TYPE
713 && (integer_zerop (arg0)
714 || integer_zerop (arg1)
715 || TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
716 || (TYPE_PRECISION (TREE_TYPE (arg0))
717 <= TYPE_PRECISION (TREE_TYPE (lhs)))))
718 return false;
719
720 wide_int min, max;
721 value_range r;
722 get_range_query (cfun)->range_of_expr (r, lhs);
723
724 if (r.kind () == VR_RANGE)
725 {
726 min = r.lower_bound ();
727 max = r.upper_bound ();
728 }
729 else
730 {
731 int prec = TYPE_PRECISION (TREE_TYPE (lhs));
732 signop sgn = TYPE_SIGN (TREE_TYPE (lhs));
733 min = wi::min_value (prec, sgn);
734 max = wi::max_value (prec, sgn);
735 }
736 if (min + 1 != max
737 || (wi::to_wide (rhs) != min
738 && wi::to_wide (rhs) != max))
739 return false;
740
741 /* We need to know which is the true edge and which is the false
742 edge so that we know when to invert the condition below. */
743 edge true_edge, false_edge;
744 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
745 if ((gimple_cond_code (stmt) == EQ_EXPR)
746 ^ (wi::to_wide (rhs) == max)
747 ^ (e1 == false_edge))
748 std::swap (arg0, arg1);
749
750 tree type;
751 if (TYPE_PRECISION (TREE_TYPE (lhs)) == TYPE_PRECISION (TREE_TYPE (arg0)))
752 {
753 /* Avoid performing the arithmetics in bool type which has different
754 semantics, otherwise prefer unsigned types from the two with
755 the same precision. */
756 if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
757 || !TYPE_UNSIGNED (TREE_TYPE (arg0)))
758 type = TREE_TYPE (lhs);
759 else
760 type = TREE_TYPE (arg0);
761 }
762 else if (TYPE_PRECISION (TREE_TYPE (lhs)) > TYPE_PRECISION (TREE_TYPE (arg0)))
763 type = TREE_TYPE (lhs);
764 else
765 type = TREE_TYPE (arg0);
766
767 min = wide_int::from (min, TYPE_PRECISION (type),
768 TYPE_SIGN (TREE_TYPE (lhs)));
769 wide_int a = wide_int::from (wi::to_wide (arg0), TYPE_PRECISION (type),
770 TYPE_SIGN (TREE_TYPE (arg0)));
771 enum tree_code code;
772 wi::overflow_type ovf;
773 if (tree_int_cst_lt (arg0, arg1))
774 {
775 code = PLUS_EXPR;
776 a -= min;
777 if (!TYPE_UNSIGNED (type))
778 {
779 /* lhs is known to be in range [min, min+1] and we want to add a
780 to it. Check if that operation can overflow for those 2 values
781 and if yes, force unsigned type. */
782 wi::add (min + (wi::neg_p (a) ? 0 : 1), a, SIGNED, &ovf);
783 if (ovf)
784 type = unsigned_type_for (type);
785 }
786 }
787 else
788 {
789 code = MINUS_EXPR;
790 a += min;
791 if (!TYPE_UNSIGNED (type))
792 {
793 /* lhs is known to be in range [min, min+1] and we want to subtract
794 it from a. Check if that operation can overflow for those 2
795 values and if yes, force unsigned type. */
796 wi::sub (a, min + (wi::neg_p (min) ? 0 : 1), SIGNED, &ovf);
797 if (ovf)
798 type = unsigned_type_for (type);
799 }
800 }
801
802 tree arg = wide_int_to_tree (type, a);
803 gimple_seq stmts = NULL;
804 lhs = gimple_convert (&stmts, type, lhs);
805 tree new_rhs;
806 if (code == PLUS_EXPR)
807 new_rhs = gimple_build (&stmts, PLUS_EXPR, type, lhs, arg);
808 else
809 new_rhs = gimple_build (&stmts, MINUS_EXPR, type, arg, lhs);
810 new_rhs = gimple_convert (&stmts, TREE_TYPE (arg0), new_rhs);
811 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
812 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
813
814 replace_phi_edge_with_variable (cond_bb, e1, phi, new_rhs);
815
816 /* Note that we optimized this PHI. */
817 return true;
818 }
819
820 /* Return TRUE if SEQ/OP pair should be allowed during early phiopt.
821 Currently this is to allow MIN/MAX and ABS/NEGATE and constants. */
822 static bool
823 phiopt_early_allow (gimple_seq &seq, gimple_match_op &op)
824 {
825 /* Don't allow functions. */
826 if (!op.code.is_tree_code ())
827 return false;
828 tree_code code = (tree_code)op.code;
829
830 /* For non-empty sequence, only allow one statement. */
831 if (!gimple_seq_empty_p (seq))
832 {
833 /* Check to make sure op was already a SSA_NAME. */
834 if (code != SSA_NAME)
835 return false;
836 if (!gimple_seq_singleton_p (seq))
837 return false;
838 gimple *stmt = gimple_seq_first_stmt (seq);
839 /* Only allow assignments. */
840 if (!is_gimple_assign (stmt))
841 return false;
842 if (gimple_assign_lhs (stmt) != op.ops[0])
843 return false;
844 code = gimple_assign_rhs_code (stmt);
845 }
846
847 switch (code)
848 {
849 case MIN_EXPR:
850 case MAX_EXPR:
851 case ABS_EXPR:
852 case ABSU_EXPR:
853 case NEGATE_EXPR:
854 case SSA_NAME:
855 return true;
856 case INTEGER_CST:
857 case REAL_CST:
858 case VECTOR_CST:
859 case FIXED_CST:
860 return true;
861 default:
862 return false;
863 }
864 }
865
866 /* gimple_simplify_phiopt is like gimple_simplify but designed for PHIOPT.
867 Return NULL if nothing can be simplified or the resulting simplified value
868 with parts pushed if EARLY_P was true. Also rejects non allowed tree code
869 if EARLY_P is set.
870 Takes the comparison from COMP_STMT and two args, ARG0 and ARG1 and tries
871 to simplify CMP ? ARG0 : ARG1.
872 Also try to simplify (!CMP) ? ARG1 : ARG0 if the non-inverse failed. */
873 static tree
874 gimple_simplify_phiopt (bool early_p, tree type, gimple *comp_stmt,
875 tree arg0, tree arg1,
876 gimple_seq *seq)
877 {
878 tree result;
879 gimple_seq seq1 = NULL;
880 enum tree_code comp_code = gimple_cond_code (comp_stmt);
881 location_t loc = gimple_location (comp_stmt);
882 tree cmp0 = gimple_cond_lhs (comp_stmt);
883 tree cmp1 = gimple_cond_rhs (comp_stmt);
884 /* To handle special cases like floating point comparison, it is easier and
885 less error-prone to build a tree and gimplify it on the fly though it is
886 less efficient.
887 Don't use fold_build2 here as that might create (bool)a instead of just
888 "a != 0". */
889 tree cond = build2_loc (loc, comp_code, boolean_type_node,
890 cmp0, cmp1);
891 gimple_match_op op (gimple_match_cond::UNCOND,
892 COND_EXPR, type, cond, arg0, arg1);
893
894 if (op.resimplify (&seq1, follow_all_ssa_edges))
895 {
896 /* Early we want only to allow some generated tree codes. */
897 if (!early_p
898 || phiopt_early_allow (seq1, op))
899 {
900 result = maybe_push_res_to_seq (&op, &seq1);
901 if (result)
902 {
903 if (loc != UNKNOWN_LOCATION)
904 annotate_all_with_location (seq1, loc);
905 gimple_seq_add_seq_without_update (seq, seq1);
906 return result;
907 }
908 }
909 }
910 gimple_seq_discard (seq1);
911 seq1 = NULL;
912
913 /* Try the inverted comparison, that is !COMP ? ARG1 : ARG0. */
914 comp_code = invert_tree_comparison (comp_code, HONOR_NANS (cmp0));
915
916 if (comp_code == ERROR_MARK)
917 return NULL;
918
919 cond = build2_loc (loc,
920 comp_code, boolean_type_node,
921 cmp0, cmp1);
922 gimple_match_op op1 (gimple_match_cond::UNCOND,
923 COND_EXPR, type, cond, arg1, arg0);
924
925 if (op1.resimplify (&seq1, follow_all_ssa_edges))
926 {
927 /* Early we want only to allow some generated tree codes. */
928 if (!early_p
929 || phiopt_early_allow (seq1, op1))
930 {
931 result = maybe_push_res_to_seq (&op1, &seq1);
932 if (result)
933 {
934 if (loc != UNKNOWN_LOCATION)
935 annotate_all_with_location (seq1, loc);
936 gimple_seq_add_seq_without_update (seq, seq1);
937 return result;
938 }
939 }
940 }
941 gimple_seq_discard (seq1);
942
943 return NULL;
944 }
945
946 /* The function match_simplify_replacement does the main work of doing the
947 replacement using match and simplify. Return true if the replacement is done.
948 Otherwise return false.
949 BB is the basic block where the replacement is going to be done on. ARG0
950 is argument 0 from PHI. Likewise for ARG1. */
951
952 static bool
953 match_simplify_replacement (basic_block cond_bb, basic_block middle_bb,
954 edge e0, edge e1, gphi *phi,
955 tree arg0, tree arg1, bool early_p)
956 {
957 gimple *stmt;
958 gimple_stmt_iterator gsi;
959 edge true_edge, false_edge;
960 gimple_seq seq = NULL;
961 tree result;
962 gimple *stmt_to_move = NULL;
963
964 /* Special case A ? B : B as this will always simplify to B. */
965 if (operand_equal_for_phi_arg_p (arg0, arg1))
966 return false;
967
968 /* If the basic block only has a cheap preparation statement,
969 allow it and move it once the transformation is done. */
970 if (!empty_block_p (middle_bb))
971 {
972 if (!single_pred_p (middle_bb))
973 return false;
974
975 stmt_to_move = last_and_only_stmt (middle_bb);
976 if (!stmt_to_move)
977 return false;
978
979 if (gimple_vuse (stmt_to_move))
980 return false;
981
982 if (gimple_could_trap_p (stmt_to_move)
983 || gimple_has_side_effects (stmt_to_move))
984 return false;
985
986 if (gimple_uses_undefined_value_p (stmt_to_move))
987 return false;
988
989 /* Allow assignments and not no calls.
990 As const calls don't match any of the above, yet they could
991 still have some side-effects - they could contain
992 gimple_could_trap_p statements, like floating point
993 exceptions or integer division by zero. See PR70586.
994 FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
995 should handle this. */
996 if (!is_gimple_assign (stmt_to_move))
997 return false;
998
999 tree lhs = gimple_assign_lhs (stmt_to_move);
1000 gimple *use_stmt;
1001 use_operand_p use_p;
1002
1003 /* Allow only a statement which feeds into the phi. */
1004 if (!lhs || TREE_CODE (lhs) != SSA_NAME
1005 || !single_imm_use (lhs, &use_p, &use_stmt)
1006 || use_stmt != phi)
1007 return false;
1008 }
1009
1010 /* At this point we know we have a GIMPLE_COND with two successors.
1011 One successor is BB, the other successor is an empty block which
1012 falls through into BB.
1013
1014 There is a single PHI node at the join point (BB).
1015
1016 So, given the condition COND, and the two PHI arguments, match and simplify
1017 can happen on (COND) ? arg0 : arg1. */
1018
1019 stmt = last_stmt (cond_bb);
1020
1021 /* We need to know which is the true edge and which is the false
1022 edge so that we know when to invert the condition below. */
1023 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1024 if (e1 == true_edge || e0 == false_edge)
1025 std::swap (arg0, arg1);
1026
1027 tree type = TREE_TYPE (gimple_phi_result (phi));
1028 result = gimple_simplify_phiopt (early_p, type, stmt,
1029 arg0, arg1,
1030 &seq);
1031 if (!result)
1032 return false;
1033
1034 gsi = gsi_last_bb (cond_bb);
1035 /* Insert the sequence generated from gimple_simplify_phiopt. */
1036 if (seq)
1037 gsi_insert_seq_before (&gsi, seq, GSI_CONTINUE_LINKING);
1038
1039 /* If there was a statement to move and the result of the statement
1040 is going to be used, move it to right before the original
1041 conditional. */
1042 if (stmt_to_move
1043 && (gimple_assign_lhs (stmt_to_move) == result
1044 || !has_single_use (gimple_assign_lhs (stmt_to_move))))
1045 {
1046 if (dump_file && (dump_flags & TDF_DETAILS))
1047 {
1048 fprintf (dump_file, "statement un-sinked:\n");
1049 print_gimple_stmt (dump_file, stmt_to_move, 0,
1050 TDF_VOPS|TDF_MEMSYMS);
1051 }
1052 gimple_stmt_iterator gsi1 = gsi_for_stmt (stmt_to_move);
1053 gsi_move_before (&gsi1, &gsi);
1054 reset_flow_sensitive_info (gimple_assign_lhs (stmt_to_move));
1055 }
1056
1057 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1058
1059 /* Add Statistic here even though replace_phi_edge_with_variable already
1060 does it as we want to be able to count when match-simplify happens vs
1061 the others. */
1062 statistics_counter_event (cfun, "match-simplify PHI replacement", 1);
1063
1064 /* Note that we optimized this PHI. */
1065 return true;
1066 }
1067
1068 /* Update *ARG which is defined in STMT so that it contains the
1069 computed value if that seems profitable. Return true if the
1070 statement is made dead by that rewriting. */
1071
1072 static bool
1073 jump_function_from_stmt (tree *arg, gimple *stmt)
1074 {
1075 enum tree_code code = gimple_assign_rhs_code (stmt);
1076 if (code == ADDR_EXPR)
1077 {
1078 /* For arg = &p->i transform it to p, if possible. */
1079 tree rhs1 = gimple_assign_rhs1 (stmt);
1080 poly_int64 offset;
1081 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
1082 &offset);
1083 if (tem
1084 && TREE_CODE (tem) == MEM_REF
1085 && known_eq (mem_ref_offset (tem) + offset, 0))
1086 {
1087 *arg = TREE_OPERAND (tem, 0);
1088 return true;
1089 }
1090 }
1091 /* TODO: Much like IPA-CP jump-functions we want to handle constant
1092 additions symbolically here, and we'd need to update the comparison
1093 code that compares the arg + cst tuples in our caller. For now the
1094 code above exactly handles the VEC_BASE pattern from vec.h. */
1095 return false;
1096 }
1097
1098 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
1099 of the form SSA_NAME NE 0.
1100
1101 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
1102 the two input values of the EQ_EXPR match arg0 and arg1.
1103
1104 If so update *code and return TRUE. Otherwise return FALSE. */
1105
1106 static bool
1107 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
1108 enum tree_code *code, const_tree rhs)
1109 {
1110 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
1111 statement. */
1112 if (TREE_CODE (rhs) == SSA_NAME)
1113 {
1114 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
1115
1116 /* Verify the defining statement has an EQ_EXPR on the RHS. */
1117 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
1118 {
1119 /* Finally verify the source operands of the EQ_EXPR are equal
1120 to arg0 and arg1. */
1121 tree op0 = gimple_assign_rhs1 (def1);
1122 tree op1 = gimple_assign_rhs2 (def1);
1123 if ((operand_equal_for_phi_arg_p (arg0, op0)
1124 && operand_equal_for_phi_arg_p (arg1, op1))
1125 || (operand_equal_for_phi_arg_p (arg0, op1)
1126 && operand_equal_for_phi_arg_p (arg1, op0)))
1127 {
1128 /* We will perform the optimization. */
1129 *code = gimple_assign_rhs_code (def1);
1130 return true;
1131 }
1132 }
1133 }
1134 return false;
1135 }
1136
1137 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
1138
1139 Also return TRUE if arg0/arg1 are equal to the source arguments of a
1140 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
1141
1142 Return FALSE otherwise. */
1143
1144 static bool
1145 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
1146 enum tree_code *code, gimple *cond)
1147 {
1148 gimple *def;
1149 tree lhs = gimple_cond_lhs (cond);
1150 tree rhs = gimple_cond_rhs (cond);
1151
1152 if ((operand_equal_for_phi_arg_p (arg0, lhs)
1153 && operand_equal_for_phi_arg_p (arg1, rhs))
1154 || (operand_equal_for_phi_arg_p (arg1, lhs)
1155 && operand_equal_for_phi_arg_p (arg0, rhs)))
1156 return true;
1157
1158 /* Now handle more complex case where we have an EQ comparison
1159 which feeds a BIT_AND_EXPR which feeds COND.
1160
1161 First verify that COND is of the form SSA_NAME NE 0. */
1162 if (*code != NE_EXPR || !integer_zerop (rhs)
1163 || TREE_CODE (lhs) != SSA_NAME)
1164 return false;
1165
1166 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
1167 def = SSA_NAME_DEF_STMT (lhs);
1168 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
1169 return false;
1170
1171 /* Now verify arg0/arg1 correspond to the source arguments of an
1172 EQ comparison feeding the BIT_AND_EXPR. */
1173
1174 tree tmp = gimple_assign_rhs1 (def);
1175 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1176 return true;
1177
1178 tmp = gimple_assign_rhs2 (def);
1179 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1180 return true;
1181
1182 return false;
1183 }
1184
1185 /* Returns true if ARG is a neutral element for operation CODE
1186 on the RIGHT side. */
1187
1188 static bool
1189 neutral_element_p (tree_code code, tree arg, bool right)
1190 {
1191 switch (code)
1192 {
1193 case PLUS_EXPR:
1194 case BIT_IOR_EXPR:
1195 case BIT_XOR_EXPR:
1196 return integer_zerop (arg);
1197
1198 case LROTATE_EXPR:
1199 case RROTATE_EXPR:
1200 case LSHIFT_EXPR:
1201 case RSHIFT_EXPR:
1202 case MINUS_EXPR:
1203 case POINTER_PLUS_EXPR:
1204 return right && integer_zerop (arg);
1205
1206 case MULT_EXPR:
1207 return integer_onep (arg);
1208
1209 case TRUNC_DIV_EXPR:
1210 case CEIL_DIV_EXPR:
1211 case FLOOR_DIV_EXPR:
1212 case ROUND_DIV_EXPR:
1213 case EXACT_DIV_EXPR:
1214 return right && integer_onep (arg);
1215
1216 case BIT_AND_EXPR:
1217 return integer_all_onesp (arg);
1218
1219 default:
1220 return false;
1221 }
1222 }
1223
1224 /* Returns true if ARG is an absorbing element for operation CODE. */
1225
1226 static bool
1227 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1228 {
1229 switch (code)
1230 {
1231 case BIT_IOR_EXPR:
1232 return integer_all_onesp (arg);
1233
1234 case MULT_EXPR:
1235 case BIT_AND_EXPR:
1236 return integer_zerop (arg);
1237
1238 case LSHIFT_EXPR:
1239 case RSHIFT_EXPR:
1240 case LROTATE_EXPR:
1241 case RROTATE_EXPR:
1242 return !right && integer_zerop (arg);
1243
1244 case TRUNC_DIV_EXPR:
1245 case CEIL_DIV_EXPR:
1246 case FLOOR_DIV_EXPR:
1247 case ROUND_DIV_EXPR:
1248 case EXACT_DIV_EXPR:
1249 case TRUNC_MOD_EXPR:
1250 case CEIL_MOD_EXPR:
1251 case FLOOR_MOD_EXPR:
1252 case ROUND_MOD_EXPR:
1253 return (!right
1254 && integer_zerop (arg)
1255 && tree_single_nonzero_warnv_p (rval, NULL));
1256
1257 default:
1258 return false;
1259 }
1260 }
1261
1262 /* The function value_replacement does the main work of doing the value
1263 replacement. Return non-zero if the replacement is done. Otherwise return
1264 0. If we remove the middle basic block, return 2.
1265 BB is the basic block where the replacement is going to be done on. ARG0
1266 is argument 0 from the PHI. Likewise for ARG1. */
1267
1268 static int
1269 value_replacement (basic_block cond_bb, basic_block middle_bb,
1270 edge e0, edge e1, gphi *phi, tree arg0, tree arg1)
1271 {
1272 gimple_stmt_iterator gsi;
1273 gimple *cond;
1274 edge true_edge, false_edge;
1275 enum tree_code code;
1276 bool empty_or_with_defined_p = true;
1277
1278 /* If the type says honor signed zeros we cannot do this
1279 optimization. */
1280 if (HONOR_SIGNED_ZEROS (arg1))
1281 return 0;
1282
1283 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1284 arguments, then adjust arg0 or arg1. */
1285 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1286 while (!gsi_end_p (gsi))
1287 {
1288 gimple *stmt = gsi_stmt (gsi);
1289 tree lhs;
1290 gsi_next_nondebug (&gsi);
1291 if (!is_gimple_assign (stmt))
1292 {
1293 if (gimple_code (stmt) != GIMPLE_PREDICT
1294 && gimple_code (stmt) != GIMPLE_NOP)
1295 empty_or_with_defined_p = false;
1296 continue;
1297 }
1298 /* Now try to adjust arg0 or arg1 according to the computation
1299 in the statement. */
1300 lhs = gimple_assign_lhs (stmt);
1301 if (!(lhs == arg0
1302 && jump_function_from_stmt (&arg0, stmt))
1303 || (lhs == arg1
1304 && jump_function_from_stmt (&arg1, stmt)))
1305 empty_or_with_defined_p = false;
1306 }
1307
1308 cond = last_stmt (cond_bb);
1309 code = gimple_cond_code (cond);
1310
1311 /* This transformation is only valid for equality comparisons. */
1312 if (code != NE_EXPR && code != EQ_EXPR)
1313 return 0;
1314
1315 /* We need to know which is the true edge and which is the false
1316 edge so that we know if have abs or negative abs. */
1317 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1318
1319 /* At this point we know we have a COND_EXPR with two successors.
1320 One successor is BB, the other successor is an empty block which
1321 falls through into BB.
1322
1323 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1324
1325 There is a single PHI node at the join point (BB) with two arguments.
1326
1327 We now need to verify that the two arguments in the PHI node match
1328 the two arguments to the equality comparison. */
1329
1330 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
1331 {
1332 edge e;
1333 tree arg;
1334
1335 /* For NE_EXPR, we want to build an assignment result = arg where
1336 arg is the PHI argument associated with the true edge. For
1337 EQ_EXPR we want the PHI argument associated with the false edge. */
1338 e = (code == NE_EXPR ? true_edge : false_edge);
1339
1340 /* Unfortunately, E may not reach BB (it may instead have gone to
1341 OTHER_BLOCK). If that is the case, then we want the single outgoing
1342 edge from OTHER_BLOCK which reaches BB and represents the desired
1343 path from COND_BLOCK. */
1344 if (e->dest == middle_bb)
1345 e = single_succ_edge (e->dest);
1346
1347 /* Now we know the incoming edge to BB that has the argument for the
1348 RHS of our new assignment statement. */
1349 if (e0 == e)
1350 arg = arg0;
1351 else
1352 arg = arg1;
1353
1354 /* If the middle basic block was empty or is defining the
1355 PHI arguments and this is a single phi where the args are different
1356 for the edges e0 and e1 then we can remove the middle basic block. */
1357 if (empty_or_with_defined_p
1358 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1359 e0, e1) == phi)
1360 {
1361 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1362 /* Note that we optimized this PHI. */
1363 return 2;
1364 }
1365 else
1366 {
1367 if (!single_pred_p (middle_bb))
1368 return 0;
1369 statistics_counter_event (cfun, "Replace PHI with "
1370 "variable/value_replacement", 1);
1371
1372 /* Replace the PHI arguments with arg. */
1373 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1374 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1375 if (dump_file && (dump_flags & TDF_DETAILS))
1376 {
1377 fprintf (dump_file, "PHI ");
1378 print_generic_expr (dump_file, gimple_phi_result (phi));
1379 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1380 cond_bb->index);
1381 print_generic_expr (dump_file, arg);
1382 fprintf (dump_file, ".\n");
1383 }
1384 return 1;
1385 }
1386 }
1387
1388 if (!single_pred_p (middle_bb))
1389 return 0;
1390
1391 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1392 gsi = gsi_last_nondebug_bb (middle_bb);
1393 if (gsi_end_p (gsi))
1394 return 0;
1395
1396 gimple *assign = gsi_stmt (gsi);
1397 if (!is_gimple_assign (assign)
1398 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1399 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1400 return 0;
1401
1402 if (gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS)
1403 {
1404 /* If last stmt of the middle_bb is a conversion, handle it like
1405 a preparation statement through constant evaluation with
1406 checking for UB. */
1407 enum tree_code sc = gimple_assign_rhs_code (assign);
1408 if (CONVERT_EXPR_CODE_P (sc))
1409 assign = NULL;
1410 else
1411 return 0;
1412 }
1413
1414 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1415 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1416 return 0;
1417
1418 /* Allow up to 2 cheap preparation statements that prepare argument
1419 for assign, e.g.:
1420 if (y_4 != 0)
1421 goto <bb 3>;
1422 else
1423 goto <bb 4>;
1424 <bb 3>:
1425 _1 = (int) y_4;
1426 iftmp.0_6 = x_5(D) r<< _1;
1427 <bb 4>:
1428 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1429 or:
1430 if (y_3(D) == 0)
1431 goto <bb 4>;
1432 else
1433 goto <bb 3>;
1434 <bb 3>:
1435 y_4 = y_3(D) & 31;
1436 _1 = (int) y_4;
1437 _6 = x_5(D) r<< _1;
1438 <bb 4>:
1439 # _2 = PHI <x_5(D)(2), _6(3)> */
1440 gimple *prep_stmt[2] = { NULL, NULL };
1441 int prep_cnt;
1442 for (prep_cnt = 0; ; prep_cnt++)
1443 {
1444 if (prep_cnt || assign)
1445 gsi_prev_nondebug (&gsi);
1446 if (gsi_end_p (gsi))
1447 break;
1448
1449 gimple *g = gsi_stmt (gsi);
1450 if (gimple_code (g) == GIMPLE_LABEL)
1451 break;
1452
1453 if (prep_cnt == 2 || !is_gimple_assign (g))
1454 return 0;
1455
1456 tree lhs = gimple_assign_lhs (g);
1457 tree rhs1 = gimple_assign_rhs1 (g);
1458 use_operand_p use_p;
1459 gimple *use_stmt;
1460 if (TREE_CODE (lhs) != SSA_NAME
1461 || TREE_CODE (rhs1) != SSA_NAME
1462 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1463 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1464 || !single_imm_use (lhs, &use_p, &use_stmt)
1465 || ((prep_cnt || assign)
1466 && use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign)))
1467 return 0;
1468 switch (gimple_assign_rhs_code (g))
1469 {
1470 CASE_CONVERT:
1471 break;
1472 case PLUS_EXPR:
1473 case BIT_AND_EXPR:
1474 case BIT_IOR_EXPR:
1475 case BIT_XOR_EXPR:
1476 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1477 return 0;
1478 break;
1479 default:
1480 return 0;
1481 }
1482 prep_stmt[prep_cnt] = g;
1483 }
1484
1485 /* Only transform if it removes the condition. */
1486 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1487 return 0;
1488
1489 /* Size-wise, this is always profitable. */
1490 if (optimize_bb_for_speed_p (cond_bb)
1491 /* The special case is useless if it has a low probability. */
1492 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1493 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1494 /* If assign is cheap, there is no point avoiding it. */
1495 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1496 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1497 return 0;
1498
1499 tree cond_lhs = gimple_cond_lhs (cond);
1500 tree cond_rhs = gimple_cond_rhs (cond);
1501
1502 /* Propagate the cond_rhs constant through preparation stmts,
1503 make sure UB isn't invoked while doing that. */
1504 for (int i = prep_cnt - 1; i >= 0; --i)
1505 {
1506 gimple *g = prep_stmt[i];
1507 tree grhs1 = gimple_assign_rhs1 (g);
1508 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1509 return 0;
1510 cond_lhs = gimple_assign_lhs (g);
1511 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1512 if (TREE_CODE (cond_rhs) != INTEGER_CST
1513 || TREE_OVERFLOW (cond_rhs))
1514 return 0;
1515 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1516 {
1517 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1518 gimple_assign_rhs2 (g));
1519 if (TREE_OVERFLOW (cond_rhs))
1520 return 0;
1521 }
1522 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1523 if (TREE_CODE (cond_rhs) != INTEGER_CST
1524 || TREE_OVERFLOW (cond_rhs))
1525 return 0;
1526 }
1527
1528 tree lhs, rhs1, rhs2;
1529 enum tree_code code_def;
1530 if (assign)
1531 {
1532 lhs = gimple_assign_lhs (assign);
1533 rhs1 = gimple_assign_rhs1 (assign);
1534 rhs2 = gimple_assign_rhs2 (assign);
1535 code_def = gimple_assign_rhs_code (assign);
1536 }
1537 else
1538 {
1539 gcc_assert (prep_cnt > 0);
1540 lhs = cond_lhs;
1541 rhs1 = NULL_TREE;
1542 rhs2 = NULL_TREE;
1543 code_def = ERROR_MARK;
1544 }
1545
1546 if (((code == NE_EXPR && e1 == false_edge)
1547 || (code == EQ_EXPR && e1 == true_edge))
1548 && arg0 == lhs
1549 && ((assign == NULL
1550 && operand_equal_for_phi_arg_p (arg1, cond_rhs))
1551 || (assign
1552 && arg1 == rhs1
1553 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1554 && neutral_element_p (code_def, cond_rhs, true))
1555 || (assign
1556 && arg1 == rhs2
1557 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1558 && neutral_element_p (code_def, cond_rhs, false))
1559 || (assign
1560 && operand_equal_for_phi_arg_p (arg1, cond_rhs)
1561 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1562 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1563 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1564 && absorbing_element_p (code_def,
1565 cond_rhs, false, rhs2))))))
1566 {
1567 gsi = gsi_for_stmt (cond);
1568 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1569 def-stmt in:
1570 if (n_5 != 0)
1571 goto <bb 3>;
1572 else
1573 goto <bb 4>;
1574
1575 <bb 3>:
1576 # RANGE [0, 4294967294]
1577 u_6 = n_5 + 4294967295;
1578
1579 <bb 4>:
1580 # u_3 = PHI <u_6(3), 4294967295(2)> */
1581 reset_flow_sensitive_info (lhs);
1582 gimple_stmt_iterator gsi_from;
1583 for (int i = prep_cnt - 1; i >= 0; --i)
1584 {
1585 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1586 reset_flow_sensitive_info (plhs);
1587 gsi_from = gsi_for_stmt (prep_stmt[i]);
1588 gsi_move_before (&gsi_from, &gsi);
1589 }
1590 if (assign)
1591 {
1592 gsi_from = gsi_for_stmt (assign);
1593 gsi_move_before (&gsi_from, &gsi);
1594 }
1595 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1596 return 2;
1597 }
1598
1599 return 0;
1600 }
1601
1602 /* The function minmax_replacement does the main work of doing the minmax
1603 replacement. Return true if the replacement is done. Otherwise return
1604 false.
1605 BB is the basic block where the replacement is going to be done on. ARG0
1606 is argument 0 from the PHI. Likewise for ARG1. */
1607
1608 static bool
1609 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1610 edge e0, edge e1, gphi *phi, tree arg0, tree arg1)
1611 {
1612 tree result;
1613 edge true_edge, false_edge;
1614 enum tree_code minmax, ass_code;
1615 tree smaller, larger, arg_true, arg_false;
1616 gimple_stmt_iterator gsi, gsi_from;
1617
1618 tree type = TREE_TYPE (PHI_RESULT (phi));
1619
1620 /* The optimization may be unsafe due to NaNs. */
1621 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1622 return false;
1623
1624 gcond *cond = as_a <gcond *> (last_stmt (cond_bb));
1625 enum tree_code cmp = gimple_cond_code (cond);
1626 tree rhs = gimple_cond_rhs (cond);
1627
1628 /* Turn EQ/NE of extreme values to order comparisons. */
1629 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1630 && TREE_CODE (rhs) == INTEGER_CST
1631 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1632 {
1633 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1634 {
1635 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1636 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1637 wi::min_value (TREE_TYPE (rhs)) + 1);
1638 }
1639 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1640 {
1641 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1642 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1643 wi::max_value (TREE_TYPE (rhs)) - 1);
1644 }
1645 }
1646
1647 /* This transformation is only valid for order comparisons. Record which
1648 operand is smaller/larger if the result of the comparison is true. */
1649 tree alt_smaller = NULL_TREE;
1650 tree alt_larger = NULL_TREE;
1651 if (cmp == LT_EXPR || cmp == LE_EXPR)
1652 {
1653 smaller = gimple_cond_lhs (cond);
1654 larger = rhs;
1655 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1656 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1657 if (TREE_CODE (larger) == INTEGER_CST
1658 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1659 {
1660 if (cmp == LT_EXPR)
1661 {
1662 wi::overflow_type overflow;
1663 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1664 TYPE_SIGN (TREE_TYPE (larger)),
1665 &overflow);
1666 if (! overflow)
1667 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1668 }
1669 else
1670 {
1671 wi::overflow_type overflow;
1672 wide_int alt = wi::add (wi::to_wide (larger), 1,
1673 TYPE_SIGN (TREE_TYPE (larger)),
1674 &overflow);
1675 if (! overflow)
1676 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1677 }
1678 }
1679 }
1680 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1681 {
1682 smaller = rhs;
1683 larger = gimple_cond_lhs (cond);
1684 /* If we have larger > CST it is equivalent to larger >= CST+1.
1685 Likewise larger >= CST is equivalent to larger > CST-1. */
1686 if (TREE_CODE (smaller) == INTEGER_CST
1687 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1688 {
1689 wi::overflow_type overflow;
1690 if (cmp == GT_EXPR)
1691 {
1692 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1693 TYPE_SIGN (TREE_TYPE (smaller)),
1694 &overflow);
1695 if (! overflow)
1696 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1697 }
1698 else
1699 {
1700 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1701 TYPE_SIGN (TREE_TYPE (smaller)),
1702 &overflow);
1703 if (! overflow)
1704 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1705 }
1706 }
1707 }
1708 else
1709 return false;
1710
1711 /* Handle the special case of (signed_type)x < 0 being equivalent
1712 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1713 to x <= MAX_VAL(signed_type). */
1714 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1715 && INTEGRAL_TYPE_P (type)
1716 && TYPE_UNSIGNED (type)
1717 && integer_zerop (rhs))
1718 {
1719 tree op = gimple_cond_lhs (cond);
1720 if (TREE_CODE (op) == SSA_NAME
1721 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1722 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1723 {
1724 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1725 if (gimple_assign_cast_p (def_stmt))
1726 {
1727 tree op1 = gimple_assign_rhs1 (def_stmt);
1728 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1729 && TYPE_UNSIGNED (TREE_TYPE (op1))
1730 && (TYPE_PRECISION (TREE_TYPE (op))
1731 == TYPE_PRECISION (TREE_TYPE (op1)))
1732 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1733 {
1734 wide_int w1 = wi::max_value (TREE_TYPE (op));
1735 wide_int w2 = wi::add (w1, 1);
1736 if (cmp == LT_EXPR)
1737 {
1738 larger = op1;
1739 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1740 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1741 alt_larger = NULL_TREE;
1742 }
1743 else
1744 {
1745 smaller = op1;
1746 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1747 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1748 alt_smaller = NULL_TREE;
1749 }
1750 }
1751 }
1752 }
1753 }
1754
1755 /* We need to know which is the true edge and which is the false
1756 edge so that we know if have abs or negative abs. */
1757 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1758
1759 /* Forward the edges over the middle basic block. */
1760 if (true_edge->dest == middle_bb)
1761 true_edge = EDGE_SUCC (true_edge->dest, 0);
1762 if (false_edge->dest == middle_bb)
1763 false_edge = EDGE_SUCC (false_edge->dest, 0);
1764
1765 if (true_edge == e0)
1766 {
1767 gcc_assert (false_edge == e1);
1768 arg_true = arg0;
1769 arg_false = arg1;
1770 }
1771 else
1772 {
1773 gcc_assert (false_edge == e0);
1774 gcc_assert (true_edge == e1);
1775 arg_true = arg1;
1776 arg_false = arg0;
1777 }
1778
1779 if (empty_block_p (middle_bb))
1780 {
1781 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1782 || (alt_smaller
1783 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1784 && (operand_equal_for_phi_arg_p (arg_false, larger)
1785 || (alt_larger
1786 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1787 {
1788 /* Case
1789
1790 if (smaller < larger)
1791 rslt = smaller;
1792 else
1793 rslt = larger; */
1794 minmax = MIN_EXPR;
1795 }
1796 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1797 || (alt_smaller
1798 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1799 && (operand_equal_for_phi_arg_p (arg_true, larger)
1800 || (alt_larger
1801 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1802 minmax = MAX_EXPR;
1803 else
1804 return false;
1805 }
1806 else
1807 {
1808 /* Recognize the following case, assuming d <= u:
1809
1810 if (a <= u)
1811 b = MAX (a, d);
1812 x = PHI <b, u>
1813
1814 This is equivalent to
1815
1816 b = MAX (a, d);
1817 x = MIN (b, u); */
1818
1819 gimple *assign = last_and_only_stmt (middle_bb);
1820 tree lhs, op0, op1, bound;
1821
1822 if (!single_pred_p (middle_bb))
1823 return false;
1824
1825 if (!assign
1826 || gimple_code (assign) != GIMPLE_ASSIGN)
1827 return false;
1828
1829 lhs = gimple_assign_lhs (assign);
1830 ass_code = gimple_assign_rhs_code (assign);
1831 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1832 return false;
1833 op0 = gimple_assign_rhs1 (assign);
1834 op1 = gimple_assign_rhs2 (assign);
1835
1836 if (true_edge->src == middle_bb)
1837 {
1838 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1839 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1840 return false;
1841
1842 if (operand_equal_for_phi_arg_p (arg_false, larger)
1843 || (alt_larger
1844 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1845 {
1846 /* Case
1847
1848 if (smaller < larger)
1849 {
1850 r' = MAX_EXPR (smaller, bound)
1851 }
1852 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1853 if (ass_code != MAX_EXPR)
1854 return false;
1855
1856 minmax = MIN_EXPR;
1857 if (operand_equal_for_phi_arg_p (op0, smaller)
1858 || (alt_smaller
1859 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1860 bound = op1;
1861 else if (operand_equal_for_phi_arg_p (op1, smaller)
1862 || (alt_smaller
1863 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1864 bound = op0;
1865 else
1866 return false;
1867
1868 /* We need BOUND <= LARGER. */
1869 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1870 bound, larger)))
1871 return false;
1872 }
1873 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1874 || (alt_smaller
1875 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1876 {
1877 /* Case
1878
1879 if (smaller < larger)
1880 {
1881 r' = MIN_EXPR (larger, bound)
1882 }
1883 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1884 if (ass_code != MIN_EXPR)
1885 return false;
1886
1887 minmax = MAX_EXPR;
1888 if (operand_equal_for_phi_arg_p (op0, larger)
1889 || (alt_larger
1890 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1891 bound = op1;
1892 else if (operand_equal_for_phi_arg_p (op1, larger)
1893 || (alt_larger
1894 && operand_equal_for_phi_arg_p (op1, alt_larger)))
1895 bound = op0;
1896 else
1897 return false;
1898
1899 /* We need BOUND >= SMALLER. */
1900 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1901 bound, smaller)))
1902 return false;
1903 }
1904 else
1905 return false;
1906 }
1907 else
1908 {
1909 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1910 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1911 return false;
1912
1913 if (operand_equal_for_phi_arg_p (arg_true, larger)
1914 || (alt_larger
1915 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1916 {
1917 /* Case
1918
1919 if (smaller > larger)
1920 {
1921 r' = MIN_EXPR (smaller, bound)
1922 }
1923 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1924 if (ass_code != MIN_EXPR)
1925 return false;
1926
1927 minmax = MAX_EXPR;
1928 if (operand_equal_for_phi_arg_p (op0, smaller)
1929 || (alt_smaller
1930 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1931 bound = op1;
1932 else if (operand_equal_for_phi_arg_p (op1, smaller)
1933 || (alt_smaller
1934 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1935 bound = op0;
1936 else
1937 return false;
1938
1939 /* We need BOUND >= LARGER. */
1940 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1941 bound, larger)))
1942 return false;
1943 }
1944 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1945 || (alt_smaller
1946 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1947 {
1948 /* Case
1949
1950 if (smaller > larger)
1951 {
1952 r' = MAX_EXPR (larger, bound)
1953 }
1954 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1955 if (ass_code != MAX_EXPR)
1956 return false;
1957
1958 minmax = MIN_EXPR;
1959 if (operand_equal_for_phi_arg_p (op0, larger))
1960 bound = op1;
1961 else if (operand_equal_for_phi_arg_p (op1, larger))
1962 bound = op0;
1963 else
1964 return false;
1965
1966 /* We need BOUND <= SMALLER. */
1967 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1968 bound, smaller)))
1969 return false;
1970 }
1971 else
1972 return false;
1973 }
1974
1975 /* Move the statement from the middle block. */
1976 gsi = gsi_last_bb (cond_bb);
1977 gsi_from = gsi_last_nondebug_bb (middle_bb);
1978 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
1979 SSA_OP_DEF));
1980 gsi_move_before (&gsi_from, &gsi);
1981 }
1982
1983 /* Emit the statement to compute min/max. */
1984 gimple_seq stmts = NULL;
1985 tree phi_result = PHI_RESULT (phi);
1986 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
1987
1988 gsi = gsi_last_bb (cond_bb);
1989 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
1990
1991 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1992
1993 return true;
1994 }
1995
1996 /* Attempt to optimize (x <=> y) cmp 0 and similar comparisons.
1997 For strong ordering <=> try to match something like:
1998 <bb 2> : // cond3_bb (== cond2_bb)
1999 if (x_4(D) != y_5(D))
2000 goto <bb 3>; [INV]
2001 else
2002 goto <bb 6>; [INV]
2003
2004 <bb 3> : // cond_bb
2005 if (x_4(D) < y_5(D))
2006 goto <bb 6>; [INV]
2007 else
2008 goto <bb 4>; [INV]
2009
2010 <bb 4> : // middle_bb
2011
2012 <bb 6> : // phi_bb
2013 # iftmp.0_2 = PHI <1(4), 0(2), -1(3)>
2014 _1 = iftmp.0_2 == 0;
2015
2016 and for partial ordering <=> something like:
2017
2018 <bb 2> : // cond3_bb
2019 if (a_3(D) == b_5(D))
2020 goto <bb 6>; [50.00%]
2021 else
2022 goto <bb 3>; [50.00%]
2023
2024 <bb 3> [local count: 536870913]: // cond2_bb
2025 if (a_3(D) < b_5(D))
2026 goto <bb 6>; [50.00%]
2027 else
2028 goto <bb 4>; [50.00%]
2029
2030 <bb 4> [local count: 268435456]: // cond_bb
2031 if (a_3(D) > b_5(D))
2032 goto <bb 6>; [50.00%]
2033 else
2034 goto <bb 5>; [50.00%]
2035
2036 <bb 5> [local count: 134217728]: // middle_bb
2037
2038 <bb 6> [local count: 1073741824]: // phi_bb
2039 # SR.27_4 = PHI <0(2), -1(3), 1(4), 2(5)>
2040 _2 = SR.27_4 > 0; */
2041
2042 static bool
2043 spaceship_replacement (basic_block cond_bb, basic_block middle_bb,
2044 edge e0, edge e1, gphi *phi,
2045 tree arg0, tree arg1)
2046 {
2047 tree phires = PHI_RESULT (phi);
2048 if (!INTEGRAL_TYPE_P (TREE_TYPE (phires))
2049 || TYPE_UNSIGNED (TREE_TYPE (phires))
2050 || !tree_fits_shwi_p (arg0)
2051 || !tree_fits_shwi_p (arg1)
2052 || !IN_RANGE (tree_to_shwi (arg0), -1, 2)
2053 || !IN_RANGE (tree_to_shwi (arg1), -1, 2))
2054 return false;
2055
2056 basic_block phi_bb = gimple_bb (phi);
2057 gcc_assert (phi_bb == e0->dest && phi_bb == e1->dest);
2058 if (!IN_RANGE (EDGE_COUNT (phi_bb->preds), 3, 4))
2059 return false;
2060
2061 use_operand_p use_p;
2062 gimple *use_stmt;
2063 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phires))
2064 return false;
2065 if (!single_imm_use (phires, &use_p, &use_stmt))
2066 return false;
2067 enum tree_code cmp;
2068 tree lhs, rhs;
2069 gimple *orig_use_stmt = use_stmt;
2070 tree orig_use_lhs = NULL_TREE;
2071 int prec = TYPE_PRECISION (TREE_TYPE (phires));
2072 bool is_cast = false;
2073
2074 /* Deal with the case when match.pd has rewritten the (res & ~1) == 0
2075 into res <= 1 and has left a type-cast for signed types. */
2076 if (gimple_assign_cast_p (use_stmt))
2077 {
2078 orig_use_lhs = gimple_assign_lhs (use_stmt);
2079 /* match.pd would have only done this for a signed type,
2080 so the conversion must be to an unsigned one. */
2081 tree ty1 = TREE_TYPE (gimple_assign_rhs1 (use_stmt));
2082 tree ty2 = TREE_TYPE (orig_use_lhs);
2083
2084 if (!TYPE_UNSIGNED (ty2) || !INTEGRAL_TYPE_P (ty2))
2085 return false;
2086 if (TYPE_PRECISION (ty1) != TYPE_PRECISION (ty2))
2087 return false;
2088 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2089 return false;
2090 if (EDGE_COUNT (phi_bb->preds) != 4)
2091 return false;
2092 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2093 return false;
2094
2095 is_cast = true;
2096 }
2097 else if (is_gimple_assign (use_stmt)
2098 && gimple_assign_rhs_code (use_stmt) == BIT_AND_EXPR
2099 && TREE_CODE (gimple_assign_rhs2 (use_stmt)) == INTEGER_CST
2100 && (wi::to_wide (gimple_assign_rhs2 (use_stmt))
2101 == wi::shifted_mask (1, prec - 1, false, prec)))
2102 {
2103 /* For partial_ordering result operator>= with unspec as second
2104 argument is (res & 1) == res, folded by match.pd into
2105 (res & ~1) == 0. */
2106 orig_use_lhs = gimple_assign_lhs (use_stmt);
2107 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
2108 return false;
2109 if (EDGE_COUNT (phi_bb->preds) != 4)
2110 return false;
2111 if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
2112 return false;
2113 }
2114 if (gimple_code (use_stmt) == GIMPLE_COND)
2115 {
2116 cmp = gimple_cond_code (use_stmt);
2117 lhs = gimple_cond_lhs (use_stmt);
2118 rhs = gimple_cond_rhs (use_stmt);
2119 }
2120 else if (is_gimple_assign (use_stmt))
2121 {
2122 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2123 {
2124 cmp = gimple_assign_rhs_code (use_stmt);
2125 lhs = gimple_assign_rhs1 (use_stmt);
2126 rhs = gimple_assign_rhs2 (use_stmt);
2127 }
2128 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
2129 {
2130 tree cond = gimple_assign_rhs1 (use_stmt);
2131 if (!COMPARISON_CLASS_P (cond))
2132 return false;
2133 cmp = TREE_CODE (cond);
2134 lhs = TREE_OPERAND (cond, 0);
2135 rhs = TREE_OPERAND (cond, 1);
2136 }
2137 else
2138 return false;
2139 }
2140 else
2141 return false;
2142 switch (cmp)
2143 {
2144 case EQ_EXPR:
2145 case NE_EXPR:
2146 case LT_EXPR:
2147 case GT_EXPR:
2148 case LE_EXPR:
2149 case GE_EXPR:
2150 break;
2151 default:
2152 return false;
2153 }
2154 if (lhs != (orig_use_lhs ? orig_use_lhs : phires)
2155 || !tree_fits_shwi_p (rhs)
2156 || !IN_RANGE (tree_to_shwi (rhs), -1, 1))
2157 return false;
2158
2159 if (is_cast)
2160 {
2161 if (TREE_CODE (rhs) != INTEGER_CST)
2162 return false;
2163 /* As for -ffast-math we assume the 2 return to be
2164 impossible, canonicalize (unsigned) res <= 1U or
2165 (unsigned) res < 2U into res >= 0 and (unsigned) res > 1U
2166 or (unsigned) res >= 2U as res < 0. */
2167 switch (cmp)
2168 {
2169 case LE_EXPR:
2170 if (!integer_onep (rhs))
2171 return false;
2172 cmp = GE_EXPR;
2173 break;
2174 case LT_EXPR:
2175 if (wi::ne_p (wi::to_widest (rhs), 2))
2176 return false;
2177 cmp = GE_EXPR;
2178 break;
2179 case GT_EXPR:
2180 if (!integer_onep (rhs))
2181 return false;
2182 cmp = LT_EXPR;
2183 break;
2184 case GE_EXPR:
2185 if (wi::ne_p (wi::to_widest (rhs), 2))
2186 return false;
2187 cmp = LT_EXPR;
2188 break;
2189 default:
2190 return false;
2191 }
2192 rhs = build_zero_cst (TREE_TYPE (phires));
2193 }
2194 else if (orig_use_lhs)
2195 {
2196 if ((cmp != EQ_EXPR && cmp != NE_EXPR) || !integer_zerop (rhs))
2197 return false;
2198 /* As for -ffast-math we assume the 2 return to be
2199 impossible, canonicalize (res & ~1) == 0 into
2200 res >= 0 and (res & ~1) != 0 as res < 0. */
2201 cmp = cmp == EQ_EXPR ? GE_EXPR : LT_EXPR;
2202 }
2203
2204 if (!empty_block_p (middle_bb))
2205 return false;
2206
2207 gcond *cond1 = as_a <gcond *> (last_stmt (cond_bb));
2208 enum tree_code cmp1 = gimple_cond_code (cond1);
2209 switch (cmp1)
2210 {
2211 case LT_EXPR:
2212 case LE_EXPR:
2213 case GT_EXPR:
2214 case GE_EXPR:
2215 break;
2216 default:
2217 return false;
2218 }
2219 tree lhs1 = gimple_cond_lhs (cond1);
2220 tree rhs1 = gimple_cond_rhs (cond1);
2221 /* The optimization may be unsafe due to NaNs. */
2222 if (HONOR_NANS (TREE_TYPE (lhs1)))
2223 return false;
2224 if (TREE_CODE (lhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs1))
2225 return false;
2226 if (TREE_CODE (rhs1) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1))
2227 return false;
2228
2229 if (!single_pred_p (cond_bb) || !cond_only_block_p (cond_bb))
2230 return false;
2231
2232 basic_block cond2_bb = single_pred (cond_bb);
2233 if (EDGE_COUNT (cond2_bb->succs) != 2)
2234 return false;
2235 edge cond2_phi_edge;
2236 if (EDGE_SUCC (cond2_bb, 0)->dest == cond_bb)
2237 {
2238 if (EDGE_SUCC (cond2_bb, 1)->dest != phi_bb)
2239 return false;
2240 cond2_phi_edge = EDGE_SUCC (cond2_bb, 1);
2241 }
2242 else if (EDGE_SUCC (cond2_bb, 0)->dest != phi_bb)
2243 return false;
2244 else
2245 cond2_phi_edge = EDGE_SUCC (cond2_bb, 0);
2246 tree arg2 = gimple_phi_arg_def (phi, cond2_phi_edge->dest_idx);
2247 if (!tree_fits_shwi_p (arg2))
2248 return false;
2249 gimple *cond2 = last_stmt (cond2_bb);
2250 if (cond2 == NULL || gimple_code (cond2) != GIMPLE_COND)
2251 return false;
2252 enum tree_code cmp2 = gimple_cond_code (cond2);
2253 tree lhs2 = gimple_cond_lhs (cond2);
2254 tree rhs2 = gimple_cond_rhs (cond2);
2255 if (lhs2 == lhs1)
2256 {
2257 if (!operand_equal_p (rhs2, rhs1, 0))
2258 {
2259 if ((cmp2 == EQ_EXPR || cmp2 == NE_EXPR)
2260 && TREE_CODE (rhs1) == INTEGER_CST
2261 && TREE_CODE (rhs2) == INTEGER_CST)
2262 {
2263 /* For integers, we can have cond2 x == 5
2264 and cond1 x < 5, x <= 4, x <= 5, x < 6,
2265 x > 5, x >= 6, x >= 5 or x > 4. */
2266 if (tree_int_cst_lt (rhs1, rhs2))
2267 {
2268 if (wi::ne_p (wi::to_wide (rhs1) + 1, wi::to_wide (rhs2)))
2269 return false;
2270 if (cmp1 == LE_EXPR)
2271 cmp1 = LT_EXPR;
2272 else if (cmp1 == GT_EXPR)
2273 cmp1 = GE_EXPR;
2274 else
2275 return false;
2276 }
2277 else
2278 {
2279 gcc_checking_assert (tree_int_cst_lt (rhs2, rhs1));
2280 if (wi::ne_p (wi::to_wide (rhs2) + 1, wi::to_wide (rhs1)))
2281 return false;
2282 if (cmp1 == LT_EXPR)
2283 cmp1 = LE_EXPR;
2284 else if (cmp1 == GE_EXPR)
2285 cmp1 = GT_EXPR;
2286 else
2287 return false;
2288 }
2289 rhs1 = rhs2;
2290 }
2291 else
2292 return false;
2293 }
2294 }
2295 else if (lhs2 == rhs1)
2296 {
2297 if (rhs2 != lhs1)
2298 return false;
2299 }
2300 else
2301 return false;
2302
2303 tree arg3 = arg2;
2304 basic_block cond3_bb = cond2_bb;
2305 edge cond3_phi_edge = cond2_phi_edge;
2306 gimple *cond3 = cond2;
2307 enum tree_code cmp3 = cmp2;
2308 tree lhs3 = lhs2;
2309 tree rhs3 = rhs2;
2310 if (EDGE_COUNT (phi_bb->preds) == 4)
2311 {
2312 if (absu_hwi (tree_to_shwi (arg2)) != 1)
2313 return false;
2314 if (e1->flags & EDGE_TRUE_VALUE)
2315 {
2316 if (tree_to_shwi (arg0) != 2
2317 || absu_hwi (tree_to_shwi (arg1)) != 1
2318 || wi::to_widest (arg1) == wi::to_widest (arg2))
2319 return false;
2320 }
2321 else if (tree_to_shwi (arg1) != 2
2322 || absu_hwi (tree_to_shwi (arg0)) != 1
2323 || wi::to_widest (arg0) == wi::to_widest (arg1))
2324 return false;
2325 switch (cmp2)
2326 {
2327 case LT_EXPR:
2328 case LE_EXPR:
2329 case GT_EXPR:
2330 case GE_EXPR:
2331 break;
2332 default:
2333 return false;
2334 }
2335 /* if (x < y) goto phi_bb; else fallthru;
2336 if (x > y) goto phi_bb; else fallthru;
2337 bbx:;
2338 phi_bb:;
2339 is ok, but if x and y are swapped in one of the comparisons,
2340 or the comparisons are the same and operands not swapped,
2341 or the true and false edges are swapped, it is not. */
2342 if ((lhs2 == lhs1)
2343 ^ (((cond2_phi_edge->flags
2344 & ((cmp2 == LT_EXPR || cmp2 == LE_EXPR)
2345 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)
2346 != ((e1->flags
2347 & ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2348 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) != 0)))
2349 return false;
2350 if (!single_pred_p (cond2_bb) || !cond_only_block_p (cond2_bb))
2351 return false;
2352 cond3_bb = single_pred (cond2_bb);
2353 if (EDGE_COUNT (cond2_bb->succs) != 2)
2354 return false;
2355 if (EDGE_SUCC (cond3_bb, 0)->dest == cond2_bb)
2356 {
2357 if (EDGE_SUCC (cond3_bb, 1)->dest != phi_bb)
2358 return false;
2359 cond3_phi_edge = EDGE_SUCC (cond3_bb, 1);
2360 }
2361 else if (EDGE_SUCC (cond3_bb, 0)->dest != phi_bb)
2362 return false;
2363 else
2364 cond3_phi_edge = EDGE_SUCC (cond3_bb, 0);
2365 arg3 = gimple_phi_arg_def (phi, cond3_phi_edge->dest_idx);
2366 cond3 = last_stmt (cond3_bb);
2367 if (cond3 == NULL || gimple_code (cond3) != GIMPLE_COND)
2368 return false;
2369 cmp3 = gimple_cond_code (cond3);
2370 lhs3 = gimple_cond_lhs (cond3);
2371 rhs3 = gimple_cond_rhs (cond3);
2372 if (lhs3 == lhs1)
2373 {
2374 if (!operand_equal_p (rhs3, rhs1, 0))
2375 return false;
2376 }
2377 else if (lhs3 == rhs1)
2378 {
2379 if (rhs3 != lhs1)
2380 return false;
2381 }
2382 else
2383 return false;
2384 }
2385 else if (absu_hwi (tree_to_shwi (arg0)) != 1
2386 || absu_hwi (tree_to_shwi (arg1)) != 1
2387 || wi::to_widest (arg0) == wi::to_widest (arg1))
2388 return false;
2389
2390 if (!integer_zerop (arg3) || (cmp3 != EQ_EXPR && cmp3 != NE_EXPR))
2391 return false;
2392 if ((cond3_phi_edge->flags & (cmp3 == EQ_EXPR
2393 ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE)) == 0)
2394 return false;
2395
2396 /* lhs1 one_cmp rhs1 results in phires of 1. */
2397 enum tree_code one_cmp;
2398 if ((cmp1 == LT_EXPR || cmp1 == LE_EXPR)
2399 ^ (!integer_onep ((e1->flags & EDGE_TRUE_VALUE) ? arg1 : arg0)))
2400 one_cmp = LT_EXPR;
2401 else
2402 one_cmp = GT_EXPR;
2403
2404 enum tree_code res_cmp;
2405 switch (cmp)
2406 {
2407 case EQ_EXPR:
2408 if (integer_zerop (rhs))
2409 res_cmp = EQ_EXPR;
2410 else if (integer_minus_onep (rhs))
2411 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2412 else if (integer_onep (rhs))
2413 res_cmp = one_cmp;
2414 else
2415 return false;
2416 break;
2417 case NE_EXPR:
2418 if (integer_zerop (rhs))
2419 res_cmp = NE_EXPR;
2420 else if (integer_minus_onep (rhs))
2421 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2422 else if (integer_onep (rhs))
2423 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2424 else
2425 return false;
2426 break;
2427 case LT_EXPR:
2428 if (integer_onep (rhs))
2429 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2430 else if (integer_zerop (rhs))
2431 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2432 else
2433 return false;
2434 break;
2435 case LE_EXPR:
2436 if (integer_zerop (rhs))
2437 res_cmp = one_cmp == LT_EXPR ? GE_EXPR : LE_EXPR;
2438 else if (integer_minus_onep (rhs))
2439 res_cmp = one_cmp == LT_EXPR ? GT_EXPR : LT_EXPR;
2440 else
2441 return false;
2442 break;
2443 case GT_EXPR:
2444 if (integer_minus_onep (rhs))
2445 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2446 else if (integer_zerop (rhs))
2447 res_cmp = one_cmp;
2448 else
2449 return false;
2450 break;
2451 case GE_EXPR:
2452 if (integer_zerop (rhs))
2453 res_cmp = one_cmp == LT_EXPR ? LE_EXPR : GE_EXPR;
2454 else if (integer_onep (rhs))
2455 res_cmp = one_cmp;
2456 else
2457 return false;
2458 break;
2459 default:
2460 gcc_unreachable ();
2461 }
2462
2463 if (gimple_code (use_stmt) == GIMPLE_COND)
2464 {
2465 gcond *use_cond = as_a <gcond *> (use_stmt);
2466 gimple_cond_set_code (use_cond, res_cmp);
2467 gimple_cond_set_lhs (use_cond, lhs1);
2468 gimple_cond_set_rhs (use_cond, rhs1);
2469 }
2470 else if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
2471 {
2472 gimple_assign_set_rhs_code (use_stmt, res_cmp);
2473 gimple_assign_set_rhs1 (use_stmt, lhs1);
2474 gimple_assign_set_rhs2 (use_stmt, rhs1);
2475 }
2476 else
2477 {
2478 tree cond = build2 (res_cmp, TREE_TYPE (gimple_assign_rhs1 (use_stmt)),
2479 lhs1, rhs1);
2480 gimple_assign_set_rhs1 (use_stmt, cond);
2481 }
2482 update_stmt (use_stmt);
2483
2484 if (MAY_HAVE_DEBUG_BIND_STMTS)
2485 {
2486 use_operand_p use_p;
2487 imm_use_iterator iter;
2488 bool has_debug_uses = false;
2489 bool has_cast_debug_uses = false;
2490 FOR_EACH_IMM_USE_FAST (use_p, iter, phires)
2491 {
2492 gimple *use_stmt = USE_STMT (use_p);
2493 if (orig_use_lhs && use_stmt == orig_use_stmt)
2494 continue;
2495 gcc_assert (is_gimple_debug (use_stmt));
2496 has_debug_uses = true;
2497 break;
2498 }
2499 if (orig_use_lhs)
2500 {
2501 if (!has_debug_uses || is_cast)
2502 FOR_EACH_IMM_USE_FAST (use_p, iter, orig_use_lhs)
2503 {
2504 gimple *use_stmt = USE_STMT (use_p);
2505 gcc_assert (is_gimple_debug (use_stmt));
2506 has_debug_uses = true;
2507 if (is_cast)
2508 has_cast_debug_uses = true;
2509 }
2510 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2511 tree zero = build_zero_cst (TREE_TYPE (orig_use_lhs));
2512 gimple_assign_set_rhs_with_ops (&gsi, INTEGER_CST, zero);
2513 update_stmt (orig_use_stmt);
2514 }
2515
2516 if (has_debug_uses)
2517 {
2518 /* If there are debug uses, emit something like:
2519 # DEBUG D#1 => i_2(D) > j_3(D) ? 1 : -1
2520 # DEBUG D#2 => i_2(D) == j_3(D) ? 0 : D#1
2521 where > stands for the comparison that yielded 1
2522 and replace debug uses of phi result with that D#2.
2523 Ignore the value of 2, because if NaNs aren't expected,
2524 all floating point numbers should be comparable. */
2525 gimple_stmt_iterator gsi = gsi_after_labels (gimple_bb (phi));
2526 tree type = TREE_TYPE (phires);
2527 tree temp1 = build_debug_expr_decl (type);
2528 tree t = build2 (one_cmp, boolean_type_node, lhs1, rhs2);
2529 t = build3 (COND_EXPR, type, t, build_one_cst (type),
2530 build_int_cst (type, -1));
2531 gimple *g = gimple_build_debug_bind (temp1, t, phi);
2532 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2533 tree temp2 = build_debug_expr_decl (type);
2534 t = build2 (EQ_EXPR, boolean_type_node, lhs1, rhs2);
2535 t = build3 (COND_EXPR, type, t, build_zero_cst (type), temp1);
2536 g = gimple_build_debug_bind (temp2, t, phi);
2537 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2538 replace_uses_by (phires, temp2);
2539 if (orig_use_lhs)
2540 {
2541 if (has_cast_debug_uses)
2542 {
2543 tree temp3 = make_node (DEBUG_EXPR_DECL);
2544 DECL_ARTIFICIAL (temp3) = 1;
2545 TREE_TYPE (temp3) = TREE_TYPE (orig_use_lhs);
2546 SET_DECL_MODE (temp3, TYPE_MODE (type));
2547 t = fold_convert (TREE_TYPE (temp3), temp2);
2548 g = gimple_build_debug_bind (temp3, t, phi);
2549 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2550 replace_uses_by (orig_use_lhs, temp3);
2551 }
2552 else
2553 replace_uses_by (orig_use_lhs, temp2);
2554 }
2555 }
2556 }
2557
2558 if (orig_use_lhs)
2559 {
2560 gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
2561 gsi_remove (&gsi, true);
2562 }
2563
2564 gimple_stmt_iterator psi = gsi_for_stmt (phi);
2565 remove_phi_node (&psi, true);
2566 statistics_counter_event (cfun, "spaceship replacement", 1);
2567
2568 return true;
2569 }
2570
2571 /* Optimize x ? __builtin_fun (x) : C, where C is __builtin_fun (0).
2572 Convert
2573
2574 <bb 2>
2575 if (b_4(D) != 0)
2576 goto <bb 3>
2577 else
2578 goto <bb 4>
2579
2580 <bb 3>
2581 _2 = (unsigned long) b_4(D);
2582 _9 = __builtin_popcountl (_2);
2583 OR
2584 _9 = __builtin_popcountl (b_4(D));
2585
2586 <bb 4>
2587 c_12 = PHI <0(2), _9(3)>
2588
2589 Into
2590 <bb 2>
2591 _2 = (unsigned long) b_4(D);
2592 _9 = __builtin_popcountl (_2);
2593 OR
2594 _9 = __builtin_popcountl (b_4(D));
2595
2596 <bb 4>
2597 c_12 = PHI <_9(2)>
2598
2599 Similarly for __builtin_clz or __builtin_ctz if
2600 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
2601 instead of 0 above it uses the value from that macro. */
2602
2603 static bool
2604 cond_removal_in_builtin_zero_pattern (basic_block cond_bb,
2605 basic_block middle_bb,
2606 edge e1, edge e2, gphi *phi,
2607 tree arg0, tree arg1)
2608 {
2609 gimple *cond;
2610 gimple_stmt_iterator gsi, gsi_from;
2611 gimple *call;
2612 gimple *cast = NULL;
2613 tree lhs, arg;
2614
2615 /* Check that
2616 _2 = (unsigned long) b_4(D);
2617 _9 = __builtin_popcountl (_2);
2618 OR
2619 _9 = __builtin_popcountl (b_4(D));
2620 are the only stmts in the middle_bb. */
2621
2622 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
2623 if (gsi_end_p (gsi))
2624 return false;
2625 cast = gsi_stmt (gsi);
2626 gsi_next_nondebug (&gsi);
2627 if (!gsi_end_p (gsi))
2628 {
2629 call = gsi_stmt (gsi);
2630 gsi_next_nondebug (&gsi);
2631 if (!gsi_end_p (gsi))
2632 return false;
2633 }
2634 else
2635 {
2636 call = cast;
2637 cast = NULL;
2638 }
2639
2640 /* Check that we have a popcount/clz/ctz builtin. */
2641 if (!is_gimple_call (call) || gimple_call_num_args (call) != 1)
2642 return false;
2643
2644 arg = gimple_call_arg (call, 0);
2645 lhs = gimple_get_lhs (call);
2646
2647 if (lhs == NULL_TREE)
2648 return false;
2649
2650 combined_fn cfn = gimple_call_combined_fn (call);
2651 internal_fn ifn = IFN_LAST;
2652 int val = 0;
2653 switch (cfn)
2654 {
2655 case CFN_BUILT_IN_BSWAP16:
2656 case CFN_BUILT_IN_BSWAP32:
2657 case CFN_BUILT_IN_BSWAP64:
2658 case CFN_BUILT_IN_BSWAP128:
2659 CASE_CFN_FFS:
2660 CASE_CFN_PARITY:
2661 CASE_CFN_POPCOUNT:
2662 break;
2663 CASE_CFN_CLZ:
2664 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2665 {
2666 tree type = TREE_TYPE (arg);
2667 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
2668 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2669 val) == 2)
2670 {
2671 ifn = IFN_CLZ;
2672 break;
2673 }
2674 }
2675 return false;
2676 CASE_CFN_CTZ:
2677 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
2678 {
2679 tree type = TREE_TYPE (arg);
2680 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
2681 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
2682 val) == 2)
2683 {
2684 ifn = IFN_CTZ;
2685 break;
2686 }
2687 }
2688 return false;
2689 case CFN_BUILT_IN_CLRSB:
2690 val = TYPE_PRECISION (integer_type_node) - 1;
2691 break;
2692 case CFN_BUILT_IN_CLRSBL:
2693 val = TYPE_PRECISION (long_integer_type_node) - 1;
2694 break;
2695 case CFN_BUILT_IN_CLRSBLL:
2696 val = TYPE_PRECISION (long_long_integer_type_node) - 1;
2697 break;
2698 default:
2699 return false;
2700 }
2701
2702 if (cast)
2703 {
2704 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
2705 /* Check that we have a cast prior to that. */
2706 if (gimple_code (cast) != GIMPLE_ASSIGN
2707 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
2708 return false;
2709 /* Result of the cast stmt is the argument to the builtin. */
2710 if (arg != gimple_assign_lhs (cast))
2711 return false;
2712 arg = gimple_assign_rhs1 (cast);
2713 }
2714
2715 cond = last_stmt (cond_bb);
2716
2717 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
2718 builtin. */
2719 if (gimple_code (cond) != GIMPLE_COND
2720 || (gimple_cond_code (cond) != NE_EXPR
2721 && gimple_cond_code (cond) != EQ_EXPR)
2722 || !integer_zerop (gimple_cond_rhs (cond))
2723 || arg != gimple_cond_lhs (cond))
2724 return false;
2725
2726 /* Canonicalize. */
2727 if ((e2->flags & EDGE_TRUE_VALUE
2728 && gimple_cond_code (cond) == NE_EXPR)
2729 || (e1->flags & EDGE_TRUE_VALUE
2730 && gimple_cond_code (cond) == EQ_EXPR))
2731 {
2732 std::swap (arg0, arg1);
2733 std::swap (e1, e2);
2734 }
2735
2736 /* Check PHI arguments. */
2737 if (lhs != arg0
2738 || TREE_CODE (arg1) != INTEGER_CST
2739 || wi::to_wide (arg1) != val)
2740 return false;
2741
2742 /* And insert the popcount/clz/ctz builtin and cast stmt before the
2743 cond_bb. */
2744 gsi = gsi_last_bb (cond_bb);
2745 if (cast)
2746 {
2747 gsi_from = gsi_for_stmt (cast);
2748 gsi_move_before (&gsi_from, &gsi);
2749 reset_flow_sensitive_info (gimple_get_lhs (cast));
2750 }
2751 gsi_from = gsi_for_stmt (call);
2752 if (ifn == IFN_LAST || gimple_call_internal_p (call))
2753 gsi_move_before (&gsi_from, &gsi);
2754 else
2755 {
2756 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
2757 the latter is well defined at zero. */
2758 call = gimple_build_call_internal (ifn, 1, gimple_call_arg (call, 0));
2759 gimple_call_set_lhs (call, lhs);
2760 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2761 gsi_remove (&gsi_from, true);
2762 }
2763 reset_flow_sensitive_info (lhs);
2764
2765 /* Now update the PHI and remove unneeded bbs. */
2766 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
2767 return true;
2768 }
2769
2770 /* Auxiliary functions to determine the set of memory accesses which
2771 can't trap because they are preceded by accesses to the same memory
2772 portion. We do that for MEM_REFs, so we only need to track
2773 the SSA_NAME of the pointer indirectly referenced. The algorithm
2774 simply is a walk over all instructions in dominator order. When
2775 we see an MEM_REF we determine if we've already seen a same
2776 ref anywhere up to the root of the dominator tree. If we do the
2777 current access can't trap. If we don't see any dominating access
2778 the current access might trap, but might also make later accesses
2779 non-trapping, so we remember it. We need to be careful with loads
2780 or stores, for instance a load might not trap, while a store would,
2781 so if we see a dominating read access this doesn't mean that a later
2782 write access would not trap. Hence we also need to differentiate the
2783 type of access(es) seen.
2784
2785 ??? We currently are very conservative and assume that a load might
2786 trap even if a store doesn't (write-only memory). This probably is
2787 overly conservative.
2788
2789 We currently support a special case that for !TREE_ADDRESSABLE automatic
2790 variables, it could ignore whether something is a load or store because the
2791 local stack should be always writable. */
2792
2793 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
2794 basic block an *_REF through it was seen, which would constitute a
2795 no-trap region for same accesses.
2796
2797 Size is needed to support 2 MEM_REFs of different types, like
2798 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
2799 OEP_ADDRESS_OF. */
2800 struct ref_to_bb
2801 {
2802 tree exp;
2803 HOST_WIDE_INT size;
2804 unsigned int phase;
2805 basic_block bb;
2806 };
2807
2808 /* Hashtable helpers. */
2809
2810 struct refs_hasher : free_ptr_hash<ref_to_bb>
2811 {
2812 static inline hashval_t hash (const ref_to_bb *);
2813 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
2814 };
2815
2816 /* Used for quick clearing of the hash-table when we see calls.
2817 Hash entries with phase < nt_call_phase are invalid. */
2818 static unsigned int nt_call_phase;
2819
2820 /* The hash function. */
2821
2822 inline hashval_t
2823 refs_hasher::hash (const ref_to_bb *n)
2824 {
2825 inchash::hash hstate;
2826 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
2827 hstate.add_hwi (n->size);
2828 return hstate.end ();
2829 }
2830
2831 /* The equality function of *P1 and *P2. */
2832
2833 inline bool
2834 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
2835 {
2836 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
2837 && n1->size == n2->size;
2838 }
2839
2840 class nontrapping_dom_walker : public dom_walker
2841 {
2842 public:
2843 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
2844 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
2845 {}
2846
2847 virtual edge before_dom_children (basic_block);
2848 virtual void after_dom_children (basic_block);
2849
2850 private:
2851
2852 /* We see the expression EXP in basic block BB. If it's an interesting
2853 expression (an MEM_REF through an SSA_NAME) possibly insert the
2854 expression into the set NONTRAP or the hash table of seen expressions.
2855 STORE is true if this expression is on the LHS, otherwise it's on
2856 the RHS. */
2857 void add_or_mark_expr (basic_block, tree, bool);
2858
2859 hash_set<tree> *m_nontrapping;
2860
2861 /* The hash table for remembering what we've seen. */
2862 hash_table<refs_hasher> m_seen_refs;
2863 };
2864
2865 /* Called by walk_dominator_tree, when entering the block BB. */
2866 edge
2867 nontrapping_dom_walker::before_dom_children (basic_block bb)
2868 {
2869 edge e;
2870 edge_iterator ei;
2871 gimple_stmt_iterator gsi;
2872
2873 /* If we haven't seen all our predecessors, clear the hash-table. */
2874 FOR_EACH_EDGE (e, ei, bb->preds)
2875 if ((((size_t)e->src->aux) & 2) == 0)
2876 {
2877 nt_call_phase++;
2878 break;
2879 }
2880
2881 /* Mark this BB as being on the path to dominator root and as visited. */
2882 bb->aux = (void*)(1 | 2);
2883
2884 /* And walk the statements in order. */
2885 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2886 {
2887 gimple *stmt = gsi_stmt (gsi);
2888
2889 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
2890 || (is_gimple_call (stmt)
2891 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
2892 nt_call_phase++;
2893 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
2894 {
2895 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
2896 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
2897 }
2898 }
2899 return NULL;
2900 }
2901
2902 /* Called by walk_dominator_tree, when basic block BB is exited. */
2903 void
2904 nontrapping_dom_walker::after_dom_children (basic_block bb)
2905 {
2906 /* This BB isn't on the path to dominator root anymore. */
2907 bb->aux = (void*)2;
2908 }
2909
2910 /* We see the expression EXP in basic block BB. If it's an interesting
2911 expression of:
2912 1) MEM_REF
2913 2) ARRAY_REF
2914 3) COMPONENT_REF
2915 possibly insert the expression into the set NONTRAP or the hash table
2916 of seen expressions. STORE is true if this expression is on the LHS,
2917 otherwise it's on the RHS. */
2918 void
2919 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
2920 {
2921 HOST_WIDE_INT size;
2922
2923 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
2924 || TREE_CODE (exp) == COMPONENT_REF)
2925 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
2926 {
2927 struct ref_to_bb map;
2928 ref_to_bb **slot;
2929 struct ref_to_bb *r2bb;
2930 basic_block found_bb = 0;
2931
2932 if (!store)
2933 {
2934 tree base = get_base_address (exp);
2935 /* Only record a LOAD of a local variable without address-taken, as
2936 the local stack is always writable. This allows cselim on a STORE
2937 with a dominating LOAD. */
2938 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
2939 return;
2940 }
2941
2942 /* Try to find the last seen *_REF, which can trap. */
2943 map.exp = exp;
2944 map.size = size;
2945 slot = m_seen_refs.find_slot (&map, INSERT);
2946 r2bb = *slot;
2947 if (r2bb && r2bb->phase >= nt_call_phase)
2948 found_bb = r2bb->bb;
2949
2950 /* If we've found a trapping *_REF, _and_ it dominates EXP
2951 (it's in a basic block on the path from us to the dominator root)
2952 then we can't trap. */
2953 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
2954 {
2955 m_nontrapping->add (exp);
2956 }
2957 else
2958 {
2959 /* EXP might trap, so insert it into the hash table. */
2960 if (r2bb)
2961 {
2962 r2bb->phase = nt_call_phase;
2963 r2bb->bb = bb;
2964 }
2965 else
2966 {
2967 r2bb = XNEW (struct ref_to_bb);
2968 r2bb->phase = nt_call_phase;
2969 r2bb->bb = bb;
2970 r2bb->exp = exp;
2971 r2bb->size = size;
2972 *slot = r2bb;
2973 }
2974 }
2975 }
2976 }
2977
2978 /* This is the entry point of gathering non trapping memory accesses.
2979 It will do a dominator walk over the whole function, and it will
2980 make use of the bb->aux pointers. It returns a set of trees
2981 (the MEM_REFs itself) which can't trap. */
2982 static hash_set<tree> *
2983 get_non_trapping (void)
2984 {
2985 nt_call_phase = 0;
2986 hash_set<tree> *nontrap = new hash_set<tree>;
2987
2988 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
2989 .walk (cfun->cfg->x_entry_block_ptr);
2990
2991 clear_aux_for_blocks ();
2992 return nontrap;
2993 }
2994
2995 /* Do the main work of conditional store replacement. We already know
2996 that the recognized pattern looks like so:
2997
2998 split:
2999 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
3000 MIDDLE_BB:
3001 something
3002 fallthrough (edge E0)
3003 JOIN_BB:
3004 some more
3005
3006 We check that MIDDLE_BB contains only one store, that that store
3007 doesn't trap (not via NOTRAP, but via checking if an access to the same
3008 memory location dominates us, or the store is to a local addressable
3009 object) and that the store has a "simple" RHS. */
3010
3011 static bool
3012 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
3013 edge e0, edge e1, hash_set<tree> *nontrap)
3014 {
3015 gimple *assign = last_and_only_stmt (middle_bb);
3016 tree lhs, rhs, name, name2;
3017 gphi *newphi;
3018 gassign *new_stmt;
3019 gimple_stmt_iterator gsi;
3020 location_t locus;
3021
3022 /* Check if middle_bb contains of only one store. */
3023 if (!assign
3024 || !gimple_assign_single_p (assign)
3025 || gimple_has_volatile_ops (assign))
3026 return false;
3027
3028 /* And no PHI nodes so all uses in the single stmt are also
3029 available where we insert to. */
3030 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
3031 return false;
3032
3033 locus = gimple_location (assign);
3034 lhs = gimple_assign_lhs (assign);
3035 rhs = gimple_assign_rhs1 (assign);
3036 if ((!REFERENCE_CLASS_P (lhs)
3037 && !DECL_P (lhs))
3038 || !is_gimple_reg_type (TREE_TYPE (lhs)))
3039 return false;
3040
3041 /* Prove that we can move the store down. We could also check
3042 TREE_THIS_NOTRAP here, but in that case we also could move stores,
3043 whose value is not available readily, which we want to avoid. */
3044 if (!nontrap->contains (lhs))
3045 {
3046 /* If LHS is an access to a local variable without address-taken
3047 (or when we allow data races) and known not to trap, we could
3048 always safely move down the store. */
3049 tree base = get_base_address (lhs);
3050 if (!auto_var_p (base)
3051 || (TREE_ADDRESSABLE (base) && !flag_store_data_races)
3052 || tree_could_trap_p (lhs))
3053 return false;
3054 }
3055
3056 /* Now we've checked the constraints, so do the transformation:
3057 1) Remove the single store. */
3058 gsi = gsi_for_stmt (assign);
3059 unlink_stmt_vdef (assign);
3060 gsi_remove (&gsi, true);
3061 release_defs (assign);
3062
3063 /* Make both store and load use alias-set zero as we have to
3064 deal with the case of the store being a conditional change
3065 of the dynamic type. */
3066 lhs = unshare_expr (lhs);
3067 tree *basep = &lhs;
3068 while (handled_component_p (*basep))
3069 basep = &TREE_OPERAND (*basep, 0);
3070 if (TREE_CODE (*basep) == MEM_REF
3071 || TREE_CODE (*basep) == TARGET_MEM_REF)
3072 TREE_OPERAND (*basep, 1)
3073 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
3074 else
3075 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
3076 build_fold_addr_expr (*basep),
3077 build_zero_cst (ptr_type_node));
3078
3079 /* 2) Insert a load from the memory of the store to the temporary
3080 on the edge which did not contain the store. */
3081 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3082 new_stmt = gimple_build_assign (name, lhs);
3083 gimple_set_location (new_stmt, locus);
3084 lhs = unshare_expr (lhs);
3085 {
3086 /* Set the no-warning bit on the rhs of the load to avoid uninit
3087 warnings. */
3088 tree rhs1 = gimple_assign_rhs1 (new_stmt);
3089 suppress_warning (rhs1, OPT_Wuninitialized);
3090 }
3091 gsi_insert_on_edge (e1, new_stmt);
3092
3093 /* 3) Create a PHI node at the join block, with one argument
3094 holding the old RHS, and the other holding the temporary
3095 where we stored the old memory contents. */
3096 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3097 newphi = create_phi_node (name2, join_bb);
3098 add_phi_arg (newphi, rhs, e0, locus);
3099 add_phi_arg (newphi, name, e1, locus);
3100
3101 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3102
3103 /* 4) Insert that PHI node. */
3104 gsi = gsi_after_labels (join_bb);
3105 if (gsi_end_p (gsi))
3106 {
3107 gsi = gsi_last_bb (join_bb);
3108 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3109 }
3110 else
3111 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3112
3113 if (dump_file && (dump_flags & TDF_DETAILS))
3114 {
3115 fprintf (dump_file, "\nConditional store replacement happened!");
3116 fprintf (dump_file, "\nReplaced the store with a load.");
3117 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
3118 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
3119 }
3120 statistics_counter_event (cfun, "conditional store replacement", 1);
3121
3122 return true;
3123 }
3124
3125 /* Do the main work of conditional store replacement. */
3126
3127 static bool
3128 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
3129 basic_block join_bb, gimple *then_assign,
3130 gimple *else_assign)
3131 {
3132 tree lhs_base, lhs, then_rhs, else_rhs, name;
3133 location_t then_locus, else_locus;
3134 gimple_stmt_iterator gsi;
3135 gphi *newphi;
3136 gassign *new_stmt;
3137
3138 if (then_assign == NULL
3139 || !gimple_assign_single_p (then_assign)
3140 || gimple_clobber_p (then_assign)
3141 || gimple_has_volatile_ops (then_assign)
3142 || else_assign == NULL
3143 || !gimple_assign_single_p (else_assign)
3144 || gimple_clobber_p (else_assign)
3145 || gimple_has_volatile_ops (else_assign))
3146 return false;
3147
3148 lhs = gimple_assign_lhs (then_assign);
3149 if (!is_gimple_reg_type (TREE_TYPE (lhs))
3150 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
3151 return false;
3152
3153 lhs_base = get_base_address (lhs);
3154 if (lhs_base == NULL_TREE
3155 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
3156 return false;
3157
3158 then_rhs = gimple_assign_rhs1 (then_assign);
3159 else_rhs = gimple_assign_rhs1 (else_assign);
3160 then_locus = gimple_location (then_assign);
3161 else_locus = gimple_location (else_assign);
3162
3163 /* Now we've checked the constraints, so do the transformation:
3164 1) Remove the stores. */
3165 gsi = gsi_for_stmt (then_assign);
3166 unlink_stmt_vdef (then_assign);
3167 gsi_remove (&gsi, true);
3168 release_defs (then_assign);
3169
3170 gsi = gsi_for_stmt (else_assign);
3171 unlink_stmt_vdef (else_assign);
3172 gsi_remove (&gsi, true);
3173 release_defs (else_assign);
3174
3175 /* 2) Create a PHI node at the join block, with one argument
3176 holding the old RHS, and the other holding the temporary
3177 where we stored the old memory contents. */
3178 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
3179 newphi = create_phi_node (name, join_bb);
3180 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
3181 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
3182
3183 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
3184
3185 /* 3) Insert that PHI node. */
3186 gsi = gsi_after_labels (join_bb);
3187 if (gsi_end_p (gsi))
3188 {
3189 gsi = gsi_last_bb (join_bb);
3190 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
3191 }
3192 else
3193 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
3194
3195 statistics_counter_event (cfun, "if-then-else store replacement", 1);
3196
3197 return true;
3198 }
3199
3200 /* Return the single store in BB with VDEF or NULL if there are
3201 other stores in the BB or loads following the store. */
3202
3203 static gimple *
3204 single_trailing_store_in_bb (basic_block bb, tree vdef)
3205 {
3206 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
3207 return NULL;
3208 gimple *store = SSA_NAME_DEF_STMT (vdef);
3209 if (gimple_bb (store) != bb
3210 || gimple_code (store) == GIMPLE_PHI)
3211 return NULL;
3212
3213 /* Verify there is no other store in this BB. */
3214 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
3215 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
3216 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
3217 return NULL;
3218
3219 /* Verify there is no load or store after the store. */
3220 use_operand_p use_p;
3221 imm_use_iterator imm_iter;
3222 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
3223 if (USE_STMT (use_p) != store
3224 && gimple_bb (USE_STMT (use_p)) == bb)
3225 return NULL;
3226
3227 return store;
3228 }
3229
3230 /* Conditional store replacement. We already know
3231 that the recognized pattern looks like so:
3232
3233 split:
3234 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
3235 THEN_BB:
3236 ...
3237 X = Y;
3238 ...
3239 goto JOIN_BB;
3240 ELSE_BB:
3241 ...
3242 X = Z;
3243 ...
3244 fallthrough (edge E0)
3245 JOIN_BB:
3246 some more
3247
3248 We check that it is safe to sink the store to JOIN_BB by verifying that
3249 there are no read-after-write or write-after-write dependencies in
3250 THEN_BB and ELSE_BB. */
3251
3252 static bool
3253 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
3254 basic_block join_bb)
3255 {
3256 vec<data_reference_p> then_datarefs, else_datarefs;
3257 vec<ddr_p> then_ddrs, else_ddrs;
3258 gimple *then_store, *else_store;
3259 bool found, ok = false, res;
3260 struct data_dependence_relation *ddr;
3261 data_reference_p then_dr, else_dr;
3262 int i, j;
3263 tree then_lhs, else_lhs;
3264 basic_block blocks[3];
3265
3266 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
3267 cheap enough to always handle as it allows us to elide dependence
3268 checking. */
3269 gphi *vphi = NULL;
3270 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
3271 gsi_next (&si))
3272 if (virtual_operand_p (gimple_phi_result (si.phi ())))
3273 {
3274 vphi = si.phi ();
3275 break;
3276 }
3277 if (!vphi)
3278 return false;
3279 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
3280 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
3281 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
3282 if (then_assign)
3283 {
3284 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
3285 if (else_assign)
3286 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3287 then_assign, else_assign);
3288 }
3289
3290 /* If either vectorization or if-conversion is disabled then do
3291 not sink any stores. */
3292 if (param_max_stores_to_sink == 0
3293 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
3294 || !flag_tree_loop_if_convert)
3295 return false;
3296
3297 /* Find data references. */
3298 then_datarefs.create (1);
3299 else_datarefs.create (1);
3300 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
3301 == chrec_dont_know)
3302 || !then_datarefs.length ()
3303 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
3304 == chrec_dont_know)
3305 || !else_datarefs.length ())
3306 {
3307 free_data_refs (then_datarefs);
3308 free_data_refs (else_datarefs);
3309 return false;
3310 }
3311
3312 /* Find pairs of stores with equal LHS. */
3313 auto_vec<gimple *, 1> then_stores, else_stores;
3314 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
3315 {
3316 if (DR_IS_READ (then_dr))
3317 continue;
3318
3319 then_store = DR_STMT (then_dr);
3320 then_lhs = gimple_get_lhs (then_store);
3321 if (then_lhs == NULL_TREE)
3322 continue;
3323 found = false;
3324
3325 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
3326 {
3327 if (DR_IS_READ (else_dr))
3328 continue;
3329
3330 else_store = DR_STMT (else_dr);
3331 else_lhs = gimple_get_lhs (else_store);
3332 if (else_lhs == NULL_TREE)
3333 continue;
3334
3335 if (operand_equal_p (then_lhs, else_lhs, 0))
3336 {
3337 found = true;
3338 break;
3339 }
3340 }
3341
3342 if (!found)
3343 continue;
3344
3345 then_stores.safe_push (then_store);
3346 else_stores.safe_push (else_store);
3347 }
3348
3349 /* No pairs of stores found. */
3350 if (!then_stores.length ()
3351 || then_stores.length () > (unsigned) param_max_stores_to_sink)
3352 {
3353 free_data_refs (then_datarefs);
3354 free_data_refs (else_datarefs);
3355 return false;
3356 }
3357
3358 /* Compute and check data dependencies in both basic blocks. */
3359 then_ddrs.create (1);
3360 else_ddrs.create (1);
3361 if (!compute_all_dependences (then_datarefs, &then_ddrs,
3362 vNULL, false)
3363 || !compute_all_dependences (else_datarefs, &else_ddrs,
3364 vNULL, false))
3365 {
3366 free_dependence_relations (then_ddrs);
3367 free_dependence_relations (else_ddrs);
3368 free_data_refs (then_datarefs);
3369 free_data_refs (else_datarefs);
3370 return false;
3371 }
3372 blocks[0] = then_bb;
3373 blocks[1] = else_bb;
3374 blocks[2] = join_bb;
3375 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
3376
3377 /* Check that there are no read-after-write or write-after-write dependencies
3378 in THEN_BB. */
3379 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
3380 {
3381 struct data_reference *dra = DDR_A (ddr);
3382 struct data_reference *drb = DDR_B (ddr);
3383
3384 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3385 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3386 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3387 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3388 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3389 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3390 {
3391 free_dependence_relations (then_ddrs);
3392 free_dependence_relations (else_ddrs);
3393 free_data_refs (then_datarefs);
3394 free_data_refs (else_datarefs);
3395 return false;
3396 }
3397 }
3398
3399 /* Check that there are no read-after-write or write-after-write dependencies
3400 in ELSE_BB. */
3401 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
3402 {
3403 struct data_reference *dra = DDR_A (ddr);
3404 struct data_reference *drb = DDR_B (ddr);
3405
3406 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
3407 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
3408 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
3409 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
3410 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
3411 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
3412 {
3413 free_dependence_relations (then_ddrs);
3414 free_dependence_relations (else_ddrs);
3415 free_data_refs (then_datarefs);
3416 free_data_refs (else_datarefs);
3417 return false;
3418 }
3419 }
3420
3421 /* Sink stores with same LHS. */
3422 FOR_EACH_VEC_ELT (then_stores, i, then_store)
3423 {
3424 else_store = else_stores[i];
3425 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
3426 then_store, else_store);
3427 ok = ok || res;
3428 }
3429
3430 free_dependence_relations (then_ddrs);
3431 free_dependence_relations (else_ddrs);
3432 free_data_refs (then_datarefs);
3433 free_data_refs (else_datarefs);
3434
3435 return ok;
3436 }
3437
3438 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
3439
3440 static bool
3441 local_mem_dependence (gimple *stmt, basic_block bb)
3442 {
3443 tree vuse = gimple_vuse (stmt);
3444 gimple *def;
3445
3446 if (!vuse)
3447 return false;
3448
3449 def = SSA_NAME_DEF_STMT (vuse);
3450 return (def && gimple_bb (def) == bb);
3451 }
3452
3453 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
3454 BB1 and BB2 are "then" and "else" blocks dependent on this test,
3455 and BB3 rejoins control flow following BB1 and BB2, look for
3456 opportunities to hoist loads as follows. If BB3 contains a PHI of
3457 two loads, one each occurring in BB1 and BB2, and the loads are
3458 provably of adjacent fields in the same structure, then move both
3459 loads into BB0. Of course this can only be done if there are no
3460 dependencies preventing such motion.
3461
3462 One of the hoisted loads will always be speculative, so the
3463 transformation is currently conservative:
3464
3465 - The fields must be strictly adjacent.
3466 - The two fields must occupy a single memory block that is
3467 guaranteed to not cross a page boundary.
3468
3469 The last is difficult to prove, as such memory blocks should be
3470 aligned on the minimum of the stack alignment boundary and the
3471 alignment guaranteed by heap allocation interfaces. Thus we rely
3472 on a parameter for the alignment value.
3473
3474 Provided a good value is used for the last case, the first
3475 restriction could possibly be relaxed. */
3476
3477 static void
3478 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
3479 basic_block bb2, basic_block bb3)
3480 {
3481 int param_align = param_l1_cache_line_size;
3482 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
3483 gphi_iterator gsi;
3484
3485 /* Walk the phis in bb3 looking for an opportunity. We are looking
3486 for phis of two SSA names, one each of which is defined in bb1 and
3487 bb2. */
3488 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
3489 {
3490 gphi *phi_stmt = gsi.phi ();
3491 gimple *def1, *def2;
3492 tree arg1, arg2, ref1, ref2, field1, field2;
3493 tree tree_offset1, tree_offset2, tree_size2, next;
3494 int offset1, offset2, size2;
3495 unsigned align1;
3496 gimple_stmt_iterator gsi2;
3497 basic_block bb_for_def1, bb_for_def2;
3498
3499 if (gimple_phi_num_args (phi_stmt) != 2
3500 || virtual_operand_p (gimple_phi_result (phi_stmt)))
3501 continue;
3502
3503 arg1 = gimple_phi_arg_def (phi_stmt, 0);
3504 arg2 = gimple_phi_arg_def (phi_stmt, 1);
3505
3506 if (TREE_CODE (arg1) != SSA_NAME
3507 || TREE_CODE (arg2) != SSA_NAME
3508 || SSA_NAME_IS_DEFAULT_DEF (arg1)
3509 || SSA_NAME_IS_DEFAULT_DEF (arg2))
3510 continue;
3511
3512 def1 = SSA_NAME_DEF_STMT (arg1);
3513 def2 = SSA_NAME_DEF_STMT (arg2);
3514
3515 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
3516 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
3517 continue;
3518
3519 /* Check the mode of the arguments to be sure a conditional move
3520 can be generated for it. */
3521 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
3522 == CODE_FOR_nothing)
3523 continue;
3524
3525 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
3526 if (!gimple_assign_single_p (def1)
3527 || !gimple_assign_single_p (def2)
3528 || gimple_has_volatile_ops (def1)
3529 || gimple_has_volatile_ops (def2))
3530 continue;
3531
3532 ref1 = gimple_assign_rhs1 (def1);
3533 ref2 = gimple_assign_rhs1 (def2);
3534
3535 if (TREE_CODE (ref1) != COMPONENT_REF
3536 || TREE_CODE (ref2) != COMPONENT_REF)
3537 continue;
3538
3539 /* The zeroth operand of the two component references must be
3540 identical. It is not sufficient to compare get_base_address of
3541 the two references, because this could allow for different
3542 elements of the same array in the two trees. It is not safe to
3543 assume that the existence of one array element implies the
3544 existence of a different one. */
3545 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
3546 continue;
3547
3548 field1 = TREE_OPERAND (ref1, 1);
3549 field2 = TREE_OPERAND (ref2, 1);
3550
3551 /* Check for field adjacency, and ensure field1 comes first. */
3552 for (next = DECL_CHAIN (field1);
3553 next && TREE_CODE (next) != FIELD_DECL;
3554 next = DECL_CHAIN (next))
3555 ;
3556
3557 if (next != field2)
3558 {
3559 for (next = DECL_CHAIN (field2);
3560 next && TREE_CODE (next) != FIELD_DECL;
3561 next = DECL_CHAIN (next))
3562 ;
3563
3564 if (next != field1)
3565 continue;
3566
3567 std::swap (field1, field2);
3568 std::swap (def1, def2);
3569 }
3570
3571 bb_for_def1 = gimple_bb (def1);
3572 bb_for_def2 = gimple_bb (def2);
3573
3574 /* Check for proper alignment of the first field. */
3575 tree_offset1 = bit_position (field1);
3576 tree_offset2 = bit_position (field2);
3577 tree_size2 = DECL_SIZE (field2);
3578
3579 if (!tree_fits_uhwi_p (tree_offset1)
3580 || !tree_fits_uhwi_p (tree_offset2)
3581 || !tree_fits_uhwi_p (tree_size2))
3582 continue;
3583
3584 offset1 = tree_to_uhwi (tree_offset1);
3585 offset2 = tree_to_uhwi (tree_offset2);
3586 size2 = tree_to_uhwi (tree_size2);
3587 align1 = DECL_ALIGN (field1) % param_align_bits;
3588
3589 if (offset1 % BITS_PER_UNIT != 0)
3590 continue;
3591
3592 /* For profitability, the two field references should fit within
3593 a single cache line. */
3594 if (align1 + offset2 - offset1 + size2 > param_align_bits)
3595 continue;
3596
3597 /* The two expressions cannot be dependent upon vdefs defined
3598 in bb1/bb2. */
3599 if (local_mem_dependence (def1, bb_for_def1)
3600 || local_mem_dependence (def2, bb_for_def2))
3601 continue;
3602
3603 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
3604 bb0. We hoist the first one first so that a cache miss is handled
3605 efficiently regardless of hardware cache-fill policy. */
3606 gsi2 = gsi_for_stmt (def1);
3607 gsi_move_to_bb_end (&gsi2, bb0);
3608 gsi2 = gsi_for_stmt (def2);
3609 gsi_move_to_bb_end (&gsi2, bb0);
3610 statistics_counter_event (cfun, "hoisted loads", 1);
3611
3612 if (dump_file && (dump_flags & TDF_DETAILS))
3613 {
3614 fprintf (dump_file,
3615 "\nHoisting adjacent loads from %d and %d into %d: \n",
3616 bb_for_def1->index, bb_for_def2->index, bb0->index);
3617 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
3618 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
3619 }
3620 }
3621 }
3622
3623 /* Determine whether we should attempt to hoist adjacent loads out of
3624 diamond patterns in pass_phiopt. Always hoist loads if
3625 -fhoist-adjacent-loads is specified and the target machine has
3626 both a conditional move instruction and a defined cache line size. */
3627
3628 static bool
3629 gate_hoist_loads (void)
3630 {
3631 return (flag_hoist_adjacent_loads == 1
3632 && param_l1_cache_line_size
3633 && HAVE_conditional_move);
3634 }
3635
3636 /* This pass tries to replaces an if-then-else block with an
3637 assignment. We have four kinds of transformations. Some of these
3638 transformations are also performed by the ifcvt RTL optimizer.
3639
3640 Conditional Replacement
3641 -----------------------
3642
3643 This transformation, implemented in match_simplify_replacement,
3644 replaces
3645
3646 bb0:
3647 if (cond) goto bb2; else goto bb1;
3648 bb1:
3649 bb2:
3650 x = PHI <0 (bb1), 1 (bb0), ...>;
3651
3652 with
3653
3654 bb0:
3655 x' = cond;
3656 goto bb2;
3657 bb2:
3658 x = PHI <x' (bb0), ...>;
3659
3660 We remove bb1 as it becomes unreachable. This occurs often due to
3661 gimplification of conditionals.
3662
3663 Value Replacement
3664 -----------------
3665
3666 This transformation, implemented in value_replacement, replaces
3667
3668 bb0:
3669 if (a != b) goto bb2; else goto bb1;
3670 bb1:
3671 bb2:
3672 x = PHI <a (bb1), b (bb0), ...>;
3673
3674 with
3675
3676 bb0:
3677 bb2:
3678 x = PHI <b (bb0), ...>;
3679
3680 This opportunity can sometimes occur as a result of other
3681 optimizations.
3682
3683
3684 Another case caught by value replacement looks like this:
3685
3686 bb0:
3687 t1 = a == CONST;
3688 t2 = b > c;
3689 t3 = t1 & t2;
3690 if (t3 != 0) goto bb1; else goto bb2;
3691 bb1:
3692 bb2:
3693 x = PHI (CONST, a)
3694
3695 Gets replaced with:
3696 bb0:
3697 bb2:
3698 t1 = a == CONST;
3699 t2 = b > c;
3700 t3 = t1 & t2;
3701 x = a;
3702
3703 ABS Replacement
3704 ---------------
3705
3706 This transformation, implemented in match_simplify_replacement, replaces
3707
3708 bb0:
3709 if (a >= 0) goto bb2; else goto bb1;
3710 bb1:
3711 x = -a;
3712 bb2:
3713 x = PHI <x (bb1), a (bb0), ...>;
3714
3715 with
3716
3717 bb0:
3718 x' = ABS_EXPR< a >;
3719 bb2:
3720 x = PHI <x' (bb0), ...>;
3721
3722 MIN/MAX Replacement
3723 -------------------
3724
3725 This transformation, minmax_replacement replaces
3726
3727 bb0:
3728 if (a <= b) goto bb2; else goto bb1;
3729 bb1:
3730 bb2:
3731 x = PHI <b (bb1), a (bb0), ...>;
3732
3733 with
3734
3735 bb0:
3736 x' = MIN_EXPR (a, b)
3737 bb2:
3738 x = PHI <x' (bb0), ...>;
3739
3740 A similar transformation is done for MAX_EXPR.
3741
3742
3743 This pass also performs a fifth transformation of a slightly different
3744 flavor.
3745
3746 Factor conversion in COND_EXPR
3747 ------------------------------
3748
3749 This transformation factors the conversion out of COND_EXPR with
3750 factor_out_conditional_conversion.
3751
3752 For example:
3753 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3754 <bb 3>:
3755 tmp = (int) a;
3756 <bb 4>:
3757 tmp = PHI <tmp, CST>
3758
3759 Into:
3760 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3761 <bb 3>:
3762 <bb 4>:
3763 a = PHI <a, CST>
3764 tmp = (int) a;
3765
3766 Adjacent Load Hoisting
3767 ----------------------
3768
3769 This transformation replaces
3770
3771 bb0:
3772 if (...) goto bb2; else goto bb1;
3773 bb1:
3774 x1 = (<expr>).field1;
3775 goto bb3;
3776 bb2:
3777 x2 = (<expr>).field2;
3778 bb3:
3779 # x = PHI <x1, x2>;
3780
3781 with
3782
3783 bb0:
3784 x1 = (<expr>).field1;
3785 x2 = (<expr>).field2;
3786 if (...) goto bb2; else goto bb1;
3787 bb1:
3788 goto bb3;
3789 bb2:
3790 bb3:
3791 # x = PHI <x1, x2>;
3792
3793 The purpose of this transformation is to enable generation of conditional
3794 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
3795 the loads is speculative, the transformation is restricted to very
3796 specific cases to avoid introducing a page fault. We are looking for
3797 the common idiom:
3798
3799 if (...)
3800 x = y->left;
3801 else
3802 x = y->right;
3803
3804 where left and right are typically adjacent pointers in a tree structure. */
3805
3806 namespace {
3807
3808 const pass_data pass_data_phiopt =
3809 {
3810 GIMPLE_PASS, /* type */
3811 "phiopt", /* name */
3812 OPTGROUP_NONE, /* optinfo_flags */
3813 TV_TREE_PHIOPT, /* tv_id */
3814 ( PROP_cfg | PROP_ssa ), /* properties_required */
3815 0, /* properties_provided */
3816 0, /* properties_destroyed */
3817 0, /* todo_flags_start */
3818 0, /* todo_flags_finish */
3819 };
3820
3821 class pass_phiopt : public gimple_opt_pass
3822 {
3823 public:
3824 pass_phiopt (gcc::context *ctxt)
3825 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
3826 {}
3827
3828 /* opt_pass methods: */
3829 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
3830 void set_pass_param (unsigned n, bool param)
3831 {
3832 gcc_assert (n == 0);
3833 early_p = param;
3834 }
3835 virtual bool gate (function *) { return flag_ssa_phiopt; }
3836 virtual unsigned int execute (function *)
3837 {
3838 return tree_ssa_phiopt_worker (false,
3839 !early_p ? gate_hoist_loads () : false,
3840 early_p);
3841 }
3842
3843 private:
3844 bool early_p;
3845 }; // class pass_phiopt
3846
3847 } // anon namespace
3848
3849 gimple_opt_pass *
3850 make_pass_phiopt (gcc::context *ctxt)
3851 {
3852 return new pass_phiopt (ctxt);
3853 }
3854
3855 namespace {
3856
3857 const pass_data pass_data_cselim =
3858 {
3859 GIMPLE_PASS, /* type */
3860 "cselim", /* name */
3861 OPTGROUP_NONE, /* optinfo_flags */
3862 TV_TREE_PHIOPT, /* tv_id */
3863 ( PROP_cfg | PROP_ssa ), /* properties_required */
3864 0, /* properties_provided */
3865 0, /* properties_destroyed */
3866 0, /* todo_flags_start */
3867 0, /* todo_flags_finish */
3868 };
3869
3870 class pass_cselim : public gimple_opt_pass
3871 {
3872 public:
3873 pass_cselim (gcc::context *ctxt)
3874 : gimple_opt_pass (pass_data_cselim, ctxt)
3875 {}
3876
3877 /* opt_pass methods: */
3878 virtual bool gate (function *) { return flag_tree_cselim; }
3879 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
3880
3881 }; // class pass_cselim
3882
3883 } // anon namespace
3884
3885 gimple_opt_pass *
3886 make_pass_cselim (gcc::context *ctxt)
3887 {
3888 return new pass_cselim (ctxt);
3889 }
This page took 0.230493 seconds and 5 git commands to generate.