]> gcc.gnu.org Git - gcc.git/blob - gcc/cfgcleanup.c
cfgcleanup.c (outgoing_edges_match): Check for insn match with a single outgoing...
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "rtl.h"
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
39 #include "timevar.h"
40 #include "output.h"
41 #include "insn-config.h"
42 #include "flags.h"
43 #include "recog.h"
44 #include "toplev.h"
45 #include "cselib.h"
46
47 #include "obstack.h"
48
49 /* cleanup_cfg maintains following flags for each basic block. */
50 enum bb_flags {
51 /* Set if life info needs to be recomputed for given BB. */
52 BB_UPDATE_LIFE = 1,
53 /* Set if BB is the forwarder block to avoid too many
54 forwarder_block_p calls. */
55 BB_FORWARDER_BLOCK = 2
56 };
57
58 #define BB_FLAGS(bb) (enum bb_flags)(bb)->aux
59 #define BB_SET_FLAG(bb,flag) \
60 (bb)->aux = (void *) (long) ((enum bb_flags)(bb)->aux | (flag))
61 #define BB_CLEAR_FLAG(bb,flag) \
62 (bb)->aux = (void *) (long) ((enum bb_flags)(bb)->aux & ~(flag))
63
64 #define FORWARDER_BLOCK_P(bb) (BB_FLAGS(bb) & BB_FORWARDER_BLOCK)
65
66 static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
67 static bool try_crossjump_bb PARAMS ((int, basic_block));
68 static bool outgoing_edges_match PARAMS ((int,
69 basic_block, basic_block));
70 static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
71 rtx *, rtx *));
72 static bool insns_match_p PARAMS ((int, rtx, rtx));
73
74 static bool delete_unreachable_blocks PARAMS ((void));
75 static bool label_is_jump_target_p PARAMS ((rtx, rtx));
76 static bool tail_recursion_label_p PARAMS ((rtx));
77 static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
78 basic_block));
79 static void merge_blocks_move_successor_nojumps PARAMS ((basic_block,
80 basic_block));
81 static bool merge_blocks PARAMS ((edge,basic_block,basic_block,
82 int));
83 static bool try_optimize_cfg PARAMS ((int));
84 static bool try_simplify_condjump PARAMS ((basic_block));
85 static bool try_forward_edges PARAMS ((int, basic_block));
86 static edge thread_jump PARAMS ((int, edge, basic_block));
87 static bool mark_effect PARAMS ((rtx, bitmap));
88 static void notice_new_block PARAMS ((basic_block));
89 static void update_forwarder_flag PARAMS ((basic_block));
90 \f
91 /* Set flags for newly created block. */
92
93 static void
94 notice_new_block (bb)
95 basic_block bb;
96 {
97 if (!bb)
98 return;
99 BB_SET_FLAG (bb, BB_UPDATE_LIFE);
100 if (forwarder_block_p (bb))
101 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
102 }
103
104 /* Recompute forwarder flag after block has been modified. */
105
106 static void
107 update_forwarder_flag (bb)
108 basic_block bb;
109 {
110 if (forwarder_block_p (bb))
111 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
112 else
113 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
114 }
115 \f
116 /* Simplify a conditional jump around an unconditional jump.
117 Return true if something changed. */
118
119 static bool
120 try_simplify_condjump (cbranch_block)
121 basic_block cbranch_block;
122 {
123 basic_block jump_block, jump_dest_block, cbranch_dest_block;
124 edge cbranch_jump_edge, cbranch_fallthru_edge;
125 rtx cbranch_insn;
126
127 /* Verify that there are exactly two successors. */
128 if (!cbranch_block->succ
129 || !cbranch_block->succ->succ_next
130 || cbranch_block->succ->succ_next->succ_next)
131 return false;
132
133 /* Verify that we've got a normal conditional branch at the end
134 of the block. */
135 cbranch_insn = cbranch_block->end;
136 if (!any_condjump_p (cbranch_insn))
137 return false;
138
139 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
140 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
141
142 /* The next block must not have multiple predecessors, must not
143 be the last block in the function, and must contain just the
144 unconditional jump. */
145 jump_block = cbranch_fallthru_edge->dest;
146 if (jump_block->pred->pred_next
147 || jump_block->index == n_basic_blocks - 1
148 || !FORWARDER_BLOCK_P (jump_block))
149 return false;
150 jump_dest_block = jump_block->succ->dest;
151
152 /* The conditional branch must target the block after the
153 unconditional branch. */
154 cbranch_dest_block = cbranch_jump_edge->dest;
155
156 if (!can_fallthru (jump_block, cbranch_dest_block))
157 return false;
158
159 /* Invert the conditional branch. */
160 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
161 return false;
162
163 if (rtl_dump_file)
164 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
165 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
166
167 /* Success. Update the CFG to match. Note that after this point
168 the edge variable names appear backwards; the redirection is done
169 this way to preserve edge profile data. */
170 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
171 cbranch_dest_block);
172 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
173 jump_dest_block);
174 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
175 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
176
177 /* Delete the block with the unconditional jump, and clean up the mess. */
178 flow_delete_block (jump_block);
179 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
180
181 return true;
182 }
183 \f
184 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
185 on register. Used by jump threading. */
186 static bool
187 mark_effect (exp, nonequal)
188 rtx exp;
189 regset nonequal;
190 {
191 switch (GET_CODE (exp))
192 {
193 /* In case we do clobber the register, mark it as equal, as we know the
194 value is dead so it don't have to match. */
195 case CLOBBER:
196 if (REG_P (XEXP (exp, 0)))
197 CLEAR_REGNO_REG_SET (nonequal, REGNO (XEXP (exp, 0)));
198 return false;
199 case SET:
200 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
201 return false;
202 if (GET_CODE (SET_SRC (exp)) != REG)
203 return true;
204 SET_REGNO_REG_SET (nonequal, REGNO (SET_SRC (exp)));
205 return false;
206 default:
207 return false;
208 }
209 }
210 /* Attempt to prove that the basic block B will have no side effects and
211 allways continues in the same edge if reached via E. Return the edge
212 if exist, NULL otherwise. */
213
214 static edge
215 thread_jump (mode, e, b)
216 int mode;
217 edge e;
218 basic_block b;
219 {
220 rtx set1, set2, cond1, cond2, insn;
221 enum rtx_code code1, code2, reversed_code2;
222 bool reverse1 = false;
223 int i;
224 regset nonequal;
225 bool failed = false;
226
227 /* At the moment, we do handle only conditional jumps, but later we may
228 want to extend this code to tablejumps and others. */
229 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
230 return NULL;
231 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
232 return NULL;
233
234 /* Second branch must end with onlyjump, as we will eliminate the jump. */
235 if (!any_condjump_p (e->src->end) || !any_condjump_p (b->end)
236 || !onlyjump_p (b->end))
237 return NULL;
238
239 set1 = pc_set (e->src->end);
240 set2 = pc_set (b->end);
241 if (((e->flags & EDGE_FALLTHRU) != 0)
242 != (XEXP (SET_SRC (set1), 0) == pc_rtx))
243 reverse1 = true;
244
245 cond1 = XEXP (SET_SRC (set1), 0);
246 cond2 = XEXP (SET_SRC (set2), 0);
247 if (reverse1)
248 code1 = reversed_comparison_code (cond1, b->end);
249 else
250 code1 = GET_CODE (cond1);
251
252 code2 = GET_CODE (cond2);
253 reversed_code2 = reversed_comparison_code (cond2, b->end);
254
255 if (!comparison_dominates_p (code1, code2)
256 && !comparison_dominates_p (code1, reversed_code2))
257 return NULL;
258
259 /* Ensure that the comparison operators are equivalent.
260 ??? This is far too pesimistic. We should allow swapped operands,
261 different CCmodes, or for example comparisons for interval, that
262 dominate even when operands are not equivalent. */
263 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
264 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
265 return NULL;
266
267 /* Short circuit cases where block B contains some side effects, as we can't
268 safely bypass it. */
269 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
270 insn = NEXT_INSN (insn))
271 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
272 return NULL;
273
274 cselib_init ();
275
276 /* First process all values computed in the source basic block. */
277 for (insn = NEXT_INSN (e->src->head); insn != NEXT_INSN (e->src->end);
278 insn = NEXT_INSN (insn))
279 if (INSN_P (insn))
280 cselib_process_insn (insn);
281
282 nonequal = BITMAP_XMALLOC();
283 CLEAR_REG_SET (nonequal);
284 /* Now assume that we've continued by the edge E to B and continue
285 processing as if it were same basic block.
286
287 Our goal is to prove that whole block is an NOOP. */
288 for (insn = NEXT_INSN (b->head); insn != b->end && !failed;
289 insn = NEXT_INSN (insn))
290 {
291 if (INSN_P (insn))
292 {
293 rtx pat = PATTERN (insn);
294
295 if (GET_CODE (pat) == PARALLEL)
296 {
297 for (i = 0; i < XVECLEN (pat, 0); i++)
298 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
299 }
300 else
301 failed |= mark_effect (pat, nonequal);
302 }
303 cselib_process_insn (insn);
304 }
305
306 /* Later we should clear nonequal of dead registers. So far we don't
307 have life information in cfg_cleanup. */
308 if (failed)
309 goto failed_exit;
310
311 /* In case liveness information is available, we need to prove equivalence
312 only of the live values. */
313 if (mode & CLEANUP_UPDATE_LIFE)
314 AND_REG_SET (nonequal, b->global_live_at_end);
315
316 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
317
318 BITMAP_XFREE (nonequal);
319 cselib_finish ();
320 if ((comparison_dominates_p (code1, code2) != 0)
321 != (XEXP (SET_SRC (set2), 0) == pc_rtx))
322 return BRANCH_EDGE (b);
323 else
324 return FALLTHRU_EDGE (b);
325
326 failed_exit:
327 BITMAP_XFREE (nonequal);
328 cselib_finish ();
329 return NULL;
330 }
331 \f
332 /* Attempt to forward edges leaving basic block B.
333 Return true if successful. */
334
335 static bool
336 try_forward_edges (mode, b)
337 basic_block b;
338 int mode;
339 {
340 bool changed = false;
341 edge e, next, threaded_edge;
342
343 for (e = b->succ; e ; e = next)
344 {
345 basic_block target, first;
346 int counter;
347 bool threaded = false;
348
349 next = e->succ_next;
350
351 /* Skip complex edges because we don't know how to update them.
352
353 Still handle fallthru edges, as we can succeed to forward fallthru
354 edge to the same place as the branch edge of conditional branch
355 and turn conditional branch to an unconditional branch. */
356 if (e->flags & EDGE_COMPLEX)
357 continue;
358
359 target = first = e->dest;
360 counter = 0;
361
362 while (counter < n_basic_blocks)
363 {
364 basic_block new_target = NULL;
365 bool new_target_threaded = false;
366
367 if (FORWARDER_BLOCK_P (target)
368 && target->succ->dest != EXIT_BLOCK_PTR)
369 {
370 /* Bypass trivial infinite loops. */
371 if (target == target->succ->dest)
372 counter = n_basic_blocks;
373 new_target = target->succ->dest;
374 }
375 /* Allow to thread only over one edge at time to simplify updating
376 of probabilities. */
377 else if ((mode & CLEANUP_THREADING) && !threaded)
378 {
379 threaded_edge = thread_jump (mode, e, target);
380 if (threaded_edge)
381 {
382 new_target = threaded_edge->dest;
383 new_target_threaded = true;
384 }
385 }
386 if (!new_target)
387 break;
388
389 /* Avoid killing of loop pre-headers, as it is the place loop
390 optimizer wants to hoist code to.
391
392 For fallthru forwarders, the LOOP_BEG note must appear between
393 the header of block and CODE_LABEL of the loop, for non forwarders
394 it must appear before the JUMP_INSN. */
395 if (mode & CLEANUP_PRE_LOOP)
396 {
397 rtx insn = (target->succ->flags & EDGE_FALLTHRU
398 ? target->head : prev_nonnote_insn (target->end));
399
400 if (GET_CODE (insn) != NOTE)
401 insn = NEXT_INSN (insn);
402
403 for (;insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
404 insn = NEXT_INSN (insn))
405 if (GET_CODE (insn) == NOTE
406 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
407 break;
408
409 if (GET_CODE (insn) == NOTE)
410 break;
411 }
412 counter++;
413 target = new_target;
414 threaded |= new_target_threaded;
415 }
416
417 if (counter >= n_basic_blocks)
418 {
419 if (rtl_dump_file)
420 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
421 target->index);
422 }
423 else if (target == first)
424 ; /* We didn't do anything. */
425 else
426 {
427 /* Save the values now, as the edge may get removed. */
428 gcov_type edge_count = e->count;
429 int edge_probability = e->probability;
430 int edge_frequency;
431
432 if (threaded)
433 {
434 notice_new_block (redirect_edge_and_branch_force (e, target));
435 if (rtl_dump_file)
436 fprintf (rtl_dump_file, "Conditionals threaded.\n");
437 }
438 else if (!redirect_edge_and_branch (e, target))
439 {
440 if (rtl_dump_file)
441 fprintf (rtl_dump_file, "Forwarding edge %i->%i to %i failed.\n",
442 b->index, e->dest->index, target->index);
443 continue;
444 }
445 /* We successfully forwarded the edge. Now update profile
446 data: for each edge we traversed in the chain, remove
447 the original edge's execution count. */
448 edge_frequency = ((edge_probability * b->frequency
449 + REG_BR_PROB_BASE / 2)
450 / REG_BR_PROB_BASE);
451
452 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
453 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
454 BB_SET_FLAG (b, BB_UPDATE_LIFE);
455
456 do
457 {
458 edge t;
459 first->count -= edge_count;
460 first->succ->count -= edge_count;
461 first->frequency -= edge_frequency;
462 if (first->succ->succ_next)
463 t = threaded_edge;
464 else
465 t = first->succ;
466 first = t->dest;
467 }
468 while (first != target);
469
470 changed = true;
471 }
472 }
473
474 return changed;
475 }
476 \f
477 /* Return true if LABEL is a target of JUMP_INSN. This applies only
478 to non-complex jumps. That is, direct unconditional, conditional,
479 and tablejumps, but not computed jumps or returns. It also does
480 not apply to the fallthru case of a conditional jump. */
481
482 static bool
483 label_is_jump_target_p (label, jump_insn)
484 rtx label, jump_insn;
485 {
486 rtx tmp = JUMP_LABEL (jump_insn);
487
488 if (label == tmp)
489 return true;
490
491 if (tmp != NULL_RTX
492 && (tmp = NEXT_INSN (tmp)) != NULL_RTX
493 && GET_CODE (tmp) == JUMP_INSN
494 && (tmp = PATTERN (tmp),
495 GET_CODE (tmp) == ADDR_VEC
496 || GET_CODE (tmp) == ADDR_DIFF_VEC))
497 {
498 rtvec vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
499 int i, veclen = GET_NUM_ELEM (vec);
500
501 for (i = 0; i < veclen; ++i)
502 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
503 return true;
504 }
505
506 return false;
507 }
508
509 /* Return true if LABEL is used for tail recursion. */
510
511 static bool
512 tail_recursion_label_p (label)
513 rtx label;
514 {
515 rtx x;
516
517 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
518 if (label == XEXP (x, 0))
519 return true;
520
521 return false;
522 }
523
524 /* Blocks A and B are to be merged into a single block. A has no incoming
525 fallthru edge, so it can be moved before B without adding or modifying
526 any jumps (aside from the jump from A to B). */
527
528 static void
529 merge_blocks_move_predecessor_nojumps (a, b)
530 basic_block a, b;
531 {
532 rtx barrier;
533 int index;
534
535 barrier = next_nonnote_insn (a->end);
536 if (GET_CODE (barrier) != BARRIER)
537 abort ();
538 delete_insn (barrier);
539
540 /* Move block and loop notes out of the chain so that we do not
541 disturb their order.
542
543 ??? A better solution would be to squeeze out all the non-nested notes
544 and adjust the block trees appropriately. Even better would be to have
545 a tighter connection between block trees and rtl so that this is not
546 necessary. */
547 if (squeeze_notes (&a->head, &a->end))
548 abort ();
549
550 /* Scramble the insn chain. */
551 if (a->end != PREV_INSN (b->head))
552 reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
553 BB_SET_FLAG (a, BB_UPDATE_LIFE);
554
555 if (rtl_dump_file)
556 {
557 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
558 a->index, b->index);
559 }
560
561 /* Swap the records for the two blocks around. Although we are deleting B,
562 A is now where B was and we want to compact the BB array from where
563 A used to be. */
564 BASIC_BLOCK (a->index) = b;
565 BASIC_BLOCK (b->index) = a;
566 index = a->index;
567 a->index = b->index;
568 b->index = index;
569
570 /* Now blocks A and B are contiguous. Merge them. */
571 merge_blocks_nomove (a, b);
572 }
573
574 /* Blocks A and B are to be merged into a single block. B has no outgoing
575 fallthru edge, so it can be moved after A without adding or modifying
576 any jumps (aside from the jump from A to B). */
577
578 static void
579 merge_blocks_move_successor_nojumps (a, b)
580 basic_block a, b;
581 {
582 rtx barrier, real_b_end;
583
584 real_b_end = b->end;
585 barrier = NEXT_INSN (b->end);
586
587 /* Recognize a jump table following block B. */
588 if (barrier
589 && GET_CODE (barrier) == CODE_LABEL
590 && NEXT_INSN (barrier)
591 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
592 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
593 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
594 {
595 /* Temporarily add the table jump insn to b, so that it will also
596 be moved to the correct location. */
597 b->end = NEXT_INSN (barrier);
598 barrier = NEXT_INSN (b->end);
599 }
600
601 /* There had better have been a barrier there. Delete it. */
602 if (barrier && GET_CODE (barrier) == BARRIER)
603 delete_insn (barrier);
604
605 /* Move block and loop notes out of the chain so that we do not
606 disturb their order.
607
608 ??? A better solution would be to squeeze out all the non-nested notes
609 and adjust the block trees appropriately. Even better would be to have
610 a tighter connection between block trees and rtl so that this is not
611 necessary. */
612 if (squeeze_notes (&b->head, &b->end))
613 abort ();
614
615 /* Scramble the insn chain. */
616 reorder_insns_nobb (b->head, b->end, a->end);
617
618 /* Restore the real end of b. */
619 b->end = real_b_end;
620
621 /* Now blocks A and B are contiguous. Merge them. */
622 merge_blocks_nomove (a, b);
623 BB_SET_FLAG (a, BB_UPDATE_LIFE);
624
625 if (rtl_dump_file)
626 {
627 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
628 b->index, a->index);
629 }
630 }
631
632 /* Attempt to merge basic blocks that are potentially non-adjacent.
633 Return true iff the attempt succeeded. */
634
635 static bool
636 merge_blocks (e, b, c, mode)
637 edge e;
638 basic_block b, c;
639 int mode;
640 {
641 /* If C has a tail recursion label, do not merge. There is no
642 edge recorded from the call_placeholder back to this label, as
643 that would make optimize_sibling_and_tail_recursive_calls more
644 complex for no gain. */
645 if ((mode & CLEANUP_PRE_SIBCALL)
646 && GET_CODE (c->head) == CODE_LABEL
647 && tail_recursion_label_p (c->head))
648 return false;
649
650 /* If B has a fallthru edge to C, no need to move anything. */
651 if (e->flags & EDGE_FALLTHRU)
652 {
653 /* We need to update liveness in case C already has broken liveness
654 or B ends by conditional jump to next instructions that will be
655 removed. */
656 if ((BB_FLAGS (c) & BB_UPDATE_LIFE)
657 || GET_CODE (b->end) == JUMP_INSN)
658 BB_SET_FLAG (b, BB_UPDATE_LIFE);
659 merge_blocks_nomove (b, c);
660 update_forwarder_flag (b);
661
662 if (rtl_dump_file)
663 {
664 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
665 b->index, c->index);
666 }
667
668 return true;
669 }
670 /* Otherwise we will need to move code around. Do that only if expensive
671 transformations are allowed. */
672 else if (mode & CLEANUP_EXPENSIVE)
673 {
674 edge tmp_edge, b_fallthru_edge;
675 bool c_has_outgoing_fallthru;
676 bool b_has_incoming_fallthru;
677
678 /* Avoid overactive code motion, as the forwarder blocks should be
679 eliminated by edge redirection instead. One exception might have
680 been if B is a forwarder block and C has no fallthru edge, but
681 that should be cleaned up by bb-reorder instead. */
682 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
683 return false;
684
685 /* We must make sure to not munge nesting of lexical blocks,
686 and loop notes. This is done by squeezing out all the notes
687 and leaving them there to lie. Not ideal, but functional. */
688
689 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
690 if (tmp_edge->flags & EDGE_FALLTHRU)
691 break;
692 c_has_outgoing_fallthru = (tmp_edge != NULL);
693
694 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
695 if (tmp_edge->flags & EDGE_FALLTHRU)
696 break;
697 b_has_incoming_fallthru = (tmp_edge != NULL);
698 b_fallthru_edge = tmp_edge;
699
700 /* Otherwise, we're going to try to move C after B. If C does
701 not have an outgoing fallthru, then it can be moved
702 immediately after B without introducing or modifying jumps. */
703 if (! c_has_outgoing_fallthru)
704 {
705 merge_blocks_move_successor_nojumps (b, c);
706 return true;
707 }
708
709 /* If B does not have an incoming fallthru, then it can be moved
710 immediately before C without introducing or modifying jumps.
711 C cannot be the first block, so we do not have to worry about
712 accessing a non-existent block. */
713
714 if (b_has_incoming_fallthru)
715 {
716 basic_block bb;
717 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
718 return false;
719 bb = force_nonfallthru (b_fallthru_edge);
720 if (bb)
721 notice_new_block (bb);
722 else
723 BB_SET_FLAG (b_fallthru_edge->src, BB_UPDATE_LIFE);
724 }
725 merge_blocks_move_predecessor_nojumps (b, c);
726 return true;
727 }
728 return false;
729 }
730 \f
731
732 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
733
734 static bool
735 insns_match_p (mode, i1, i2)
736 int mode ATTRIBUTE_UNUSED;
737 rtx i1, i2;
738 {
739 rtx p1, p2;
740
741 /* Verify that I1 and I2 are equivalent. */
742 if (GET_CODE (i1) != GET_CODE (i2))
743 return false;
744
745 p1 = PATTERN (i1);
746 p2 = PATTERN (i2);
747
748 if (GET_CODE (p1) != GET_CODE (p2))
749 return false;
750
751 /* If this is a CALL_INSN, compare register usage information.
752 If we don't check this on stack register machines, the two
753 CALL_INSNs might be merged leaving reg-stack.c with mismatching
754 numbers of stack registers in the same basic block.
755 If we don't check this on machines with delay slots, a delay slot may
756 be filled that clobbers a parameter expected by the subroutine.
757
758 ??? We take the simple route for now and assume that if they're
759 equal, they were constructed identically. */
760
761 if (GET_CODE (i1) == CALL_INSN
762 && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
763 CALL_INSN_FUNCTION_USAGE (i2)))
764 return false;
765
766 #ifdef STACK_REGS
767 /* If cross_jump_death_matters is not 0, the insn's mode
768 indicates whether or not the insn contains any stack-like
769 regs. */
770
771 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
772 {
773 /* If register stack conversion has already been done, then
774 death notes must also be compared before it is certain that
775 the two instruction streams match. */
776
777 rtx note;
778 HARD_REG_SET i1_regset, i2_regset;
779
780 CLEAR_HARD_REG_SET (i1_regset);
781 CLEAR_HARD_REG_SET (i2_regset);
782
783 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
784 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
785 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
786
787 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
788 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
789 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
790
791 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
792
793 return false;
794
795 done:
796 ;
797 }
798 #endif
799
800 if (reload_completed
801 ? ! rtx_renumbered_equal_p (p1, p2) : ! rtx_equal_p (p1, p2))
802 {
803 /* The following code helps take care of G++ cleanups. */
804 rtx equiv1 = find_reg_equal_equiv_note (i1);
805 rtx equiv2 = find_reg_equal_equiv_note (i2);
806
807 if (equiv1 && equiv2
808 /* If the equivalences are not to a constant, they may
809 reference pseudos that no longer exist, so we can't
810 use them. */
811 && (! reload_completed
812 || (CONSTANT_P (XEXP (equiv1, 0))
813 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
814 {
815 rtx s1 = single_set (i1);
816 rtx s2 = single_set (i2);
817 if (s1 != 0 && s2 != 0
818 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
819 {
820 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
821 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
822 if (! rtx_renumbered_equal_p (p1, p2))
823 cancel_changes (0);
824 else if (apply_change_group ())
825 return true;
826 }
827 }
828 return false;
829 }
830 return true;
831 }
832 \f
833 /* Look through the insns at the end of BB1 and BB2 and find the longest
834 sequence that are equivalent. Store the first insns for that sequence
835 in *F1 and *F2 and return the sequence length.
836
837 To simplify callers of this function, if the blocks match exactly,
838 store the head of the blocks in *F1 and *F2. */
839
840 static int
841 flow_find_cross_jump (mode, bb1, bb2, f1, f2)
842 int mode ATTRIBUTE_UNUSED;
843 basic_block bb1, bb2;
844 rtx *f1, *f2;
845 {
846 rtx i1, i2, last1, last2, afterlast1, afterlast2;
847 int ninsns = 0;
848
849 /* Skip simple jumps at the end of the blocks. Complex jumps still
850 need to be compared for equivalence, which we'll do below. */
851
852 i1 = bb1->end;
853 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
854 if (onlyjump_p (i1)
855 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
856 {
857 last1 = i1;
858 /* Count everything except for unconditional jump as insn. */
859 if (!simplejump_p (i1) && !returnjump_p (i1))
860 ninsns++;
861 i1 = PREV_INSN (i1);
862 }
863 i2 = bb2->end;
864 if (onlyjump_p (i2)
865 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
866 {
867 last2 = i2;
868 i2 = PREV_INSN (i2);
869 }
870
871 while (true)
872 {
873 /* Ignore notes. */
874 while (!active_insn_p (i1) && i1 != bb1->head)
875 i1 = PREV_INSN (i1);
876 while (!active_insn_p (i2) && i2 != bb2->head)
877 i2 = PREV_INSN (i2);
878
879 if (i1 == bb1->head || i2 == bb2->head)
880 break;
881
882 if (!insns_match_p (mode, i1, i2))
883 break;
884
885 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
886 if (active_insn_p (i1))
887 {
888 /* If the merged insns have different REG_EQUAL notes, then
889 remove them. */
890 rtx equiv1 = find_reg_equal_equiv_note (i1);
891 rtx equiv2 = find_reg_equal_equiv_note (i2);
892
893 if (equiv1 && !equiv2)
894 remove_note (i1, equiv1);
895 else if (!equiv1 && equiv2)
896 remove_note (i2, equiv2);
897 else if (equiv1 && equiv2
898 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
899 {
900 remove_note (i1, equiv1);
901 remove_note (i2, equiv2);
902 }
903
904 afterlast1 = last1, afterlast2 = last2;
905 last1 = i1, last2 = i2;
906 ninsns++;
907 }
908 i1 = PREV_INSN (i1);
909 i2 = PREV_INSN (i2);
910 }
911
912 #ifdef HAVE_cc0
913 if (ninsns)
914 {
915 /* Don't allow the insn after a compare to be shared by
916 cross-jumping unless the compare is also shared. */
917 if (reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
918 last1 = afterlast1, last2 = afterlast2, ninsns--;
919 }
920 #endif
921
922 /* Include preceding notes and labels in the cross-jump. One,
923 this may bring us to the head of the blocks as requested above.
924 Two, it keeps line number notes as matched as may be. */
925 if (ninsns)
926 {
927 while (last1 != bb1->head && !active_insn_p (PREV_INSN (last1)))
928 last1 = PREV_INSN (last1);
929 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
930 last1 = PREV_INSN (last1);
931 while (last2 != bb2->head && !active_insn_p (PREV_INSN (last2)))
932 last2 = PREV_INSN (last2);
933 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
934 last2 = PREV_INSN (last2);
935
936 *f1 = last1;
937 *f2 = last2;
938 }
939
940 return ninsns;
941 }
942
943 /* Return true iff outgoing edges of BB1 and BB2 match, together with
944 the branch instruction. This means that if we commonize the control
945 flow before end of the basic block, the semantic remains unchanged.
946
947 We may assume that there exists one edge with a common destination. */
948
949 static bool
950 outgoing_edges_match (mode, bb1, bb2)
951 int mode;
952 basic_block bb1;
953 basic_block bb2;
954 {
955 int nehedges1 = 0, nehedges2 = 0;
956 edge fallthru1 = 0, fallthru2 = 0;
957 edge e1, e2;
958
959 /* If BB1 has only one successor, we may be looking at either an
960 unconditional jump, or a fake edge to exit. */
961 if (bb1->succ && !bb1->succ->succ_next)
962 {
963 if (! bb2->succ || bb2->succ->succ_next)
964 return false;
965 return insns_match_p (mode, bb1->end, bb2->end);
966 }
967
968 /* Match conditional jumps - this may get tricky when fallthru and branch
969 edges are crossed. */
970 if (bb1->succ
971 && bb1->succ->succ_next
972 && !bb1->succ->succ_next->succ_next
973 && any_condjump_p (bb1->end))
974 {
975 edge b1, f1, b2, f2;
976 bool reverse, match;
977 rtx set1, set2, cond1, cond2;
978 enum rtx_code code1, code2;
979
980 if (!bb2->succ
981 || !bb2->succ->succ_next
982 || bb1->succ->succ_next->succ_next
983 || !any_condjump_p (bb2->end))
984 return false;
985
986 b1 = BRANCH_EDGE (bb1);
987 b2 = BRANCH_EDGE (bb2);
988 f1 = FALLTHRU_EDGE (bb1);
989 f2 = FALLTHRU_EDGE (bb2);
990
991 /* Get around possible forwarders on fallthru edges. Other cases
992 should be optimized out already. */
993 if (FORWARDER_BLOCK_P (f1->dest))
994 f1 = f1->dest->succ;
995 if (FORWARDER_BLOCK_P (f2->dest))
996 f2 = f2->dest->succ;
997
998 /* To simplify use of this function, return false if there are
999 unneeded forwarder blocks. These will get eliminated later
1000 during cleanup_cfg. */
1001 if (FORWARDER_BLOCK_P (f1->dest)
1002 || FORWARDER_BLOCK_P (f2->dest)
1003 || FORWARDER_BLOCK_P (b1->dest)
1004 || FORWARDER_BLOCK_P (b2->dest))
1005 return false;
1006
1007 if (f1->dest == f2->dest && b1->dest == b2->dest)
1008 reverse = false;
1009 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1010 reverse = true;
1011 else
1012 return false;
1013
1014 set1 = pc_set (bb1->end);
1015 set2 = pc_set (bb2->end);
1016 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1017 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1018 reverse = !reverse;
1019
1020 cond1 = XEXP (SET_SRC (set1), 0);
1021 cond2 = XEXP (SET_SRC (set2), 0);
1022 code1 = GET_CODE (cond1);
1023 if (reverse)
1024 code2 = reversed_comparison_code (cond2, bb2->end);
1025 else
1026 code2 = GET_CODE (cond2);
1027 if (code2 == UNKNOWN)
1028 return false;
1029
1030 /* Verify codes and operands match. */
1031 match = ((code1 == code2
1032 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1033 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1034 || (code1 == swap_condition (code2)
1035 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1036 XEXP (cond2, 0))
1037 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1038 XEXP (cond2, 1))));
1039
1040 /* If we return true, we will join the blocks. Which means that
1041 we will only have one branch prediction bit to work with. Thus
1042 we require the existing branches to have probabilities that are
1043 roughly similar. */
1044 /* ??? We should use bb->frequency to allow merging in infrequently
1045 executed blocks, but at the moment it is not available when
1046 cleanup_cfg is run. */
1047 if (match && !optimize_size)
1048 {
1049 rtx note1, note2;
1050 int prob1, prob2;
1051 note1 = find_reg_note (bb1->end, REG_BR_PROB, 0);
1052 note2 = find_reg_note (bb2->end, REG_BR_PROB, 0);
1053
1054 if (note1 && note2)
1055 {
1056 prob1 = INTVAL (XEXP (note1, 0));
1057 prob2 = INTVAL (XEXP (note2, 0));
1058 if (reverse)
1059 prob2 = REG_BR_PROB_BASE - prob2;
1060
1061 /* Fail if the difference in probabilities is
1062 greater than 5%. */
1063 if (abs (prob1 - prob2) > REG_BR_PROB_BASE / 20)
1064 return false;
1065 }
1066 else if (note1 || note2)
1067 return false;
1068 }
1069
1070 if (rtl_dump_file && match)
1071 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
1072 bb1->index, bb2->index);
1073
1074 return match;
1075 }
1076
1077 /* Generic case - we are seeing an computed jump, table jump or trapping
1078 instruction. */
1079
1080 /* First ensure that the instructions match. There may be many outgoing
1081 edges so this test is generally cheaper.
1082 ??? Currently the tablejumps will never match, as they do have
1083 different tables. */
1084 if (!insns_match_p (mode, bb1->end, bb2->end))
1085 return false;
1086
1087 /* Search the outgoing edges, ensure that the counts do match, find possible
1088 fallthru and exception handling edges since these needs more
1089 validation. */
1090 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1091 e1 = e1->succ_next, e2 = e2->succ_next)
1092 {
1093 if (e1->flags & EDGE_EH)
1094 nehedges1++;
1095 if (e2->flags & EDGE_EH)
1096 nehedges2++;
1097 if (e1->flags & EDGE_FALLTHRU)
1098 fallthru1 = e1;
1099 if (e2->flags & EDGE_FALLTHRU)
1100 fallthru2 = e2;
1101 }
1102 /* If number of edges of various types does not match, fail. */
1103 if (e1 || e2)
1104 return false;
1105 if (nehedges1 != nehedges2)
1106 return false;
1107 if ((fallthru1 != 0) != (fallthru2 != 0))
1108 return false;
1109
1110 /* fallthru edges must be forwarded to the same destination. */
1111 if (fallthru1)
1112 {
1113 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1114 ? fallthru1->dest->succ->dest: fallthru1->dest);
1115 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1116 ? fallthru2->dest->succ->dest: fallthru2->dest);
1117 if (d1 != d2)
1118 return false;
1119 }
1120 /* In case we do have EH edges, ensure we are in the same region. */
1121 if (nehedges1)
1122 {
1123 rtx n1 = find_reg_note (bb1->end, REG_EH_REGION, 0);
1124 rtx n2 = find_reg_note (bb2->end, REG_EH_REGION, 0);
1125 if (XEXP (n1, 0) != XEXP (n2, 0))
1126 return false;
1127 }
1128 /* We don't need to match the rest of edges as above checks should be enought
1129 to ensure that they are equivalent. */
1130 return true;
1131 }
1132
1133 /* E1 and E2 are edges with the same destination block. Search their
1134 predecessors for common code. If found, redirect control flow from
1135 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1136
1137 static bool
1138 try_crossjump_to_edge (mode, e1, e2)
1139 int mode;
1140 edge e1, e2;
1141 {
1142 int nmatch;
1143 basic_block src1 = e1->src, src2 = e2->src;
1144 basic_block redirect_to;
1145 rtx newpos1, newpos2;
1146 edge s;
1147 rtx last;
1148 rtx label;
1149 rtx note;
1150
1151 /* Search backward through forwarder blocks. We don't need to worry
1152 about multiple entry or chained forwarders, as they will be optimized
1153 away. We do this to look past the unconditional jump following a
1154 conditional jump that is required due to the current CFG shape. */
1155 if (src1->pred
1156 && !src1->pred->pred_next
1157 && FORWARDER_BLOCK_P (src1))
1158 {
1159 e1 = src1->pred;
1160 src1 = e1->src;
1161 }
1162 if (src2->pred
1163 && !src2->pred->pred_next
1164 && FORWARDER_BLOCK_P (src2))
1165 {
1166 e2 = src2->pred;
1167 src2 = e2->src;
1168 }
1169
1170 /* Nothing to do if we reach ENTRY, or a common source block. */
1171 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1172 return false;
1173 if (src1 == src2)
1174 return false;
1175
1176 /* Seeing more than 1 forwarder blocks would confuse us later... */
1177 if (FORWARDER_BLOCK_P (e1->dest)
1178 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1179 return false;
1180 if (FORWARDER_BLOCK_P (e2->dest)
1181 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1182 return false;
1183
1184 /* Likewise with dead code (possibly newly created by the other optimizations
1185 of cfg_cleanup). */
1186 if (!src1->pred || !src2->pred)
1187 return false;
1188
1189 /* Look for the common insn sequence, part the first ... */
1190 if (!outgoing_edges_match (mode, src1, src2))
1191 return false;
1192
1193 /* ... and part the second. */
1194 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1195 if (!nmatch)
1196 return false;
1197
1198 /* Avoid splitting if possible. */
1199 if (newpos2 == src2->head)
1200 redirect_to = src2;
1201 else
1202 {
1203 if (rtl_dump_file)
1204 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
1205 src2->index, nmatch);
1206 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1207 }
1208
1209 if (rtl_dump_file)
1210 fprintf (rtl_dump_file,
1211 "Cross jumping from bb %i to bb %i; %i common insns\n",
1212 src1->index, src2->index, nmatch);
1213
1214 redirect_to->count += src1->count;
1215 redirect_to->frequency += src1->frequency;
1216
1217 /* Recompute the frequencies and counts of outgoing edges. */
1218 for (s = redirect_to->succ; s; s = s->succ_next)
1219 {
1220 edge s2;
1221 basic_block d = s->dest;
1222
1223 if (FORWARDER_BLOCK_P (d))
1224 d = d->succ->dest;
1225 for (s2 = src1->succ; ; s2 = s2->succ_next)
1226 {
1227 basic_block d2 = s2->dest;
1228 if (FORWARDER_BLOCK_P (d2))
1229 d2 = d2->succ->dest;
1230 if (d == d2)
1231 break;
1232 }
1233 s->count += s2->count;
1234
1235 /* Take care to update possible forwarder blocks. We verified
1236 that there is no more than one in the chain, so we can't run
1237 into infinite loop. */
1238 if (FORWARDER_BLOCK_P (s->dest))
1239 {
1240 s->dest->succ->count += s2->count;
1241 s->dest->count += s2->count;
1242 s->dest->frequency += EDGE_FREQUENCY (s);
1243 }
1244 if (FORWARDER_BLOCK_P (s2->dest))
1245 {
1246 s2->dest->succ->count -= s2->count;
1247 s2->dest->count -= s2->count;
1248 s2->dest->frequency -= EDGE_FREQUENCY (s);
1249 }
1250 if (!redirect_to->frequency && !src1->frequency)
1251 s->probability = (s->probability + s2->probability) / 2;
1252 else
1253 s->probability =
1254 ((s->probability * redirect_to->frequency +
1255 s2->probability * src1->frequency)
1256 / (redirect_to->frequency + src1->frequency));
1257 }
1258
1259 note = find_reg_note (redirect_to->end, REG_BR_PROB, 0);
1260 if (note)
1261 XEXP (note, 0) = GEN_INT (BRANCH_EDGE (redirect_to)->probability);
1262
1263 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1264
1265 /* Skip possible basic block header. */
1266 if (GET_CODE (newpos1) == CODE_LABEL)
1267 newpos1 = NEXT_INSN (newpos1);
1268 if (GET_CODE (newpos1) == NOTE)
1269 newpos1 = NEXT_INSN (newpos1);
1270 last = src1->end;
1271
1272 /* Emit the jump insn. */
1273 label = block_label (redirect_to);
1274 emit_jump_insn_after (gen_jump (label), src1->end);
1275 JUMP_LABEL (src1->end) = label;
1276 LABEL_NUSES (label)++;
1277
1278 /* Delete the now unreachable instructions. */
1279 delete_insn_chain (newpos1, last);
1280
1281 /* Make sure there is a barrier after the new jump. */
1282 last = next_nonnote_insn (src1->end);
1283 if (!last || GET_CODE (last) != BARRIER)
1284 emit_barrier_after (src1->end);
1285
1286 /* Update CFG. */
1287 while (src1->succ)
1288 remove_edge (src1->succ);
1289 make_single_succ_edge (src1, redirect_to, 0);
1290
1291 BB_SET_FLAG (src1, BB_UPDATE_LIFE);
1292 update_forwarder_flag (src1);
1293
1294 return true;
1295 }
1296
1297 /* Search the predecessors of BB for common insn sequences. When found,
1298 share code between them by redirecting control flow. Return true if
1299 any changes made. */
1300
1301 static bool
1302 try_crossjump_bb (mode, bb)
1303 int mode;
1304 basic_block bb;
1305 {
1306 edge e, e2, nexte2, nexte, fallthru;
1307 bool changed;
1308
1309 /* Nothing to do if there is not at least two incoming edges. */
1310 if (!bb->pred || !bb->pred->pred_next)
1311 return false;
1312
1313 /* It is always cheapest to redirect a block that ends in a branch to
1314 a block that falls through into BB, as that adds no branches to the
1315 program. We'll try that combination first. */
1316 for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next)
1317 if (fallthru->flags & EDGE_FALLTHRU)
1318 break;
1319
1320 changed = false;
1321 for (e = bb->pred; e; e = nexte)
1322 {
1323 nexte = e->pred_next;
1324
1325 /* As noted above, first try with the fallthru predecessor. */
1326 if (fallthru)
1327 {
1328 /* Don't combine the fallthru edge into anything else.
1329 If there is a match, we'll do it the other way around. */
1330 if (e == fallthru)
1331 continue;
1332
1333 if (try_crossjump_to_edge (mode, e, fallthru))
1334 {
1335 changed = true;
1336 nexte = bb->pred;
1337 continue;
1338 }
1339 }
1340
1341 /* Non-obvious work limiting check: Recognize that we're going
1342 to call try_crossjump_bb on every basic block. So if we have
1343 two blocks with lots of outgoing edges (a switch) and they
1344 share lots of common destinations, then we would do the
1345 cross-jump check once for each common destination.
1346
1347 Now, if the blocks actually are cross-jump candidates, then
1348 all of their destinations will be shared. Which means that
1349 we only need check them for cross-jump candidacy once. We
1350 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1351 choosing to do the check from the block for which the edge
1352 in question is the first successor of A. */
1353 if (e->src->succ != e)
1354 continue;
1355
1356 for (e2 = bb->pred; e2; e2 = nexte2)
1357 {
1358 nexte2 = e2->pred_next;
1359
1360 if (e2 == e)
1361 continue;
1362
1363 /* We've already checked the fallthru edge above. */
1364 if (e2 == fallthru)
1365 continue;
1366
1367 /* The "first successor" check above only prevents multiple
1368 checks of crossjump(A,B). In order to prevent redundant
1369 checks of crossjump(B,A), require that A be the block
1370 with the lowest index. */
1371 if (e->src->index > e2->src->index)
1372 continue;
1373
1374 if (try_crossjump_to_edge (mode, e, e2))
1375 {
1376 changed = true;
1377 nexte = bb->pred;
1378 break;
1379 }
1380 }
1381 }
1382
1383 return changed;
1384 }
1385
1386 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1387 instructions etc. Return nonzero if changes were made. */
1388
1389 static bool
1390 try_optimize_cfg (mode)
1391 int mode;
1392 {
1393 int i;
1394 bool changed_overall = false;
1395 bool changed;
1396 int iterations = 0;
1397 sbitmap blocks;
1398
1399 if (mode & CLEANUP_CROSSJUMP)
1400 add_noreturn_fake_exit_edges ();
1401
1402 for (i = 0; i < n_basic_blocks; i++)
1403 update_forwarder_flag (BASIC_BLOCK (i));
1404
1405 /* Attempt to merge blocks as made possible by edge removal. If a block
1406 has only one successor, and the successor has only one predecessor,
1407 they may be combined. */
1408
1409 do
1410 {
1411 changed = false;
1412 iterations++;
1413
1414 if (rtl_dump_file)
1415 fprintf (rtl_dump_file, "\n\ntry_optimize_cfg iteration %i\n\n",
1416 iterations);
1417
1418 for (i = 0; i < n_basic_blocks;)
1419 {
1420 basic_block c, b = BASIC_BLOCK (i);
1421 edge s;
1422 bool changed_here = false;
1423
1424 /* Delete trivially dead basic blocks. */
1425 while (b->pred == NULL)
1426 {
1427 c = BASIC_BLOCK (b->index - 1);
1428 if (rtl_dump_file)
1429 fprintf (rtl_dump_file, "Deleting block %i.\n", b->index);
1430 flow_delete_block (b);
1431 changed = true;
1432 b = c;
1433 }
1434
1435 /* Remove code labels no longer used. Don't do this before
1436 CALL_PLACEHOLDER is removed, as some branches may be hidden
1437 within. */
1438 if (b->pred->pred_next == NULL
1439 && (b->pred->flags & EDGE_FALLTHRU)
1440 && !(b->pred->flags & EDGE_COMPLEX)
1441 && GET_CODE (b->head) == CODE_LABEL
1442 && (!(mode & CLEANUP_PRE_SIBCALL)
1443 || !tail_recursion_label_p (b->head))
1444 /* If the previous block ends with a branch to this block,
1445 we can't delete the label. Normally this is a condjump
1446 that is yet to be simplified, but if CASE_DROPS_THRU,
1447 this can be a tablejump with some element going to the
1448 same place as the default (fallthru). */
1449 && (b->pred->src == ENTRY_BLOCK_PTR
1450 || GET_CODE (b->pred->src->end) != JUMP_INSN
1451 || ! label_is_jump_target_p (b->head, b->pred->src->end)))
1452 {
1453 rtx label = b->head;
1454 b->head = NEXT_INSN (b->head);
1455 delete_insn_chain (label, label);
1456 if (rtl_dump_file)
1457 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1458 b->index);
1459 }
1460
1461 /* If we fall through an empty block, we can remove it. */
1462 if (b->pred->pred_next == NULL
1463 && (b->pred->flags & EDGE_FALLTHRU)
1464 && GET_CODE (b->head) != CODE_LABEL
1465 && FORWARDER_BLOCK_P (b)
1466 /* Note that forwarder_block_p true ensures that there
1467 is a successor for this block. */
1468 && (b->succ->flags & EDGE_FALLTHRU)
1469 && n_basic_blocks > 1)
1470 {
1471 if (rtl_dump_file)
1472 fprintf (rtl_dump_file, "Deleting fallthru block %i.\n",
1473 b->index);
1474 c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
1475 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1476 flow_delete_block (b);
1477 changed = true;
1478 b = c;
1479 }
1480
1481 /* Merge blocks. Loop because chains of blocks might be
1482 combineable. */
1483 while ((s = b->succ) != NULL
1484 && s->succ_next == NULL
1485 && !(s->flags & EDGE_COMPLEX)
1486 && (c = s->dest) != EXIT_BLOCK_PTR
1487 && c->pred->pred_next == NULL
1488 /* If the jump insn has side effects,
1489 we can't kill the edge. */
1490 && (GET_CODE (b->end) != JUMP_INSN
1491 || onlyjump_p (b->end))
1492 && merge_blocks (s, b, c, mode))
1493 changed_here = true;
1494
1495 /* Simplify branch over branch. */
1496 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1497 {
1498 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1499 changed_here = true;
1500 }
1501
1502 /* If B has a single outgoing edge, but uses a non-trivial jump
1503 instruction without side-effects, we can either delete the
1504 jump entirely, or replace it with a simple unconditional jump.
1505 Use redirect_edge_and_branch to do the dirty work. */
1506 if (b->succ
1507 && ! b->succ->succ_next
1508 && b->succ->dest != EXIT_BLOCK_PTR
1509 && onlyjump_p (b->end)
1510 && redirect_edge_and_branch (b->succ, b->succ->dest))
1511 {
1512 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1513 update_forwarder_flag (b);
1514 changed_here = true;
1515 }
1516
1517 /* Simplify branch to branch. */
1518 if (try_forward_edges (mode, b))
1519 changed_here = true;
1520
1521 /* Look for shared code between blocks. */
1522 if ((mode & CLEANUP_CROSSJUMP)
1523 && try_crossjump_bb (mode, b))
1524 changed_here = true;
1525
1526 /* Don't get confused by the index shift caused by deleting
1527 blocks. */
1528 if (!changed_here)
1529 i = b->index + 1;
1530 else
1531 changed = true;
1532 }
1533
1534 if ((mode & CLEANUP_CROSSJUMP)
1535 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1536 changed = true;
1537
1538 #ifdef ENABLE_CHECKING
1539 if (changed)
1540 verify_flow_info ();
1541 #endif
1542
1543 changed_overall |= changed;
1544 }
1545 while (changed);
1546
1547 if (mode & CLEANUP_CROSSJUMP)
1548 remove_fake_edges ();
1549
1550 if ((mode & CLEANUP_UPDATE_LIFE) && changed_overall)
1551 {
1552 bool found = 0;
1553 blocks = sbitmap_alloc (n_basic_blocks);
1554 sbitmap_zero (blocks);
1555 for (i = 0; i < n_basic_blocks; i++)
1556 if (BB_FLAGS (BASIC_BLOCK (i)) & BB_UPDATE_LIFE)
1557 {
1558 found = 1;
1559 SET_BIT (blocks, i);
1560 }
1561 if (found)
1562 update_life_info (blocks, UPDATE_LIFE_GLOBAL,
1563 PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
1564 | PROP_KILL_DEAD_CODE);
1565 sbitmap_free (blocks);
1566 }
1567 for (i = 0; i < n_basic_blocks; i++)
1568 BASIC_BLOCK (i)->aux = NULL;
1569
1570 return changed_overall;
1571 }
1572 \f
1573 /* Delete all unreachable basic blocks. */
1574
1575 static bool
1576 delete_unreachable_blocks ()
1577 {
1578 int i;
1579 bool changed = false;
1580
1581 find_unreachable_blocks ();
1582
1583 /* Delete all unreachable basic blocks. Count down so that we
1584 don't interfere with the block renumbering that happens in
1585 flow_delete_block. */
1586
1587 for (i = n_basic_blocks - 1; i >= 0; --i)
1588 {
1589 basic_block b = BASIC_BLOCK (i);
1590
1591 if (!(b->flags & BB_REACHABLE))
1592 flow_delete_block (b), changed = true;
1593 }
1594
1595 if (changed)
1596 tidy_fallthru_edges ();
1597 return changed;
1598 }
1599 \f
1600 /* Tidy the CFG by deleting unreachable code and whatnot. */
1601
1602 bool
1603 cleanup_cfg (mode)
1604 int mode;
1605 {
1606 bool changed = false;
1607
1608 timevar_push (TV_CLEANUP_CFG);
1609 changed = delete_unreachable_blocks ();
1610 if (try_optimize_cfg (mode))
1611 delete_unreachable_blocks (), changed = true;
1612
1613 /* Kill the data we won't maintain. */
1614 free_EXPR_LIST_list (&label_value_list);
1615 free_EXPR_LIST_list (&tail_recursion_label_list);
1616 timevar_pop (TV_CLEANUP_CFG);
1617
1618 return changed;
1619 }
This page took 0.115596 seconds and 6 git commands to generate.