]> gcc.gnu.org Git - gcc.git/blob - gcc/cfgcleanup.c
cfgcleanup.c: Include tm_p.h
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "rtl.h"
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
39 #include "timevar.h"
40 #include "output.h"
41 #include "insn-config.h"
42 #include "flags.h"
43 #include "recog.h"
44 #include "toplev.h"
45 #include "cselib.h"
46 #include "tm_p.h"
47
48 #include "obstack.h"
49
50 /* cleanup_cfg maintains following flags for each basic block. */
51
52 enum bb_flags
53 {
54 /* Set if life info needs to be recomputed for given BB. */
55 BB_UPDATE_LIFE = 1,
56 /* Set if BB is the forwarder block to avoid too many
57 forwarder_block_p calls. */
58 BB_FORWARDER_BLOCK = 2
59 };
60
61 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
62 #define BB_SET_FLAG(BB, FLAG) \
63 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
64 #define BB_CLEAR_FLAG(BB, FLAG) \
65 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
66
67 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
68
69 static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
70 static bool try_crossjump_bb PARAMS ((int, basic_block));
71 static bool outgoing_edges_match PARAMS ((int,
72 basic_block, basic_block));
73 static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
74 rtx *, rtx *));
75 static bool insns_match_p PARAMS ((int, rtx, rtx));
76
77 static bool delete_unreachable_blocks PARAMS ((void));
78 static bool label_is_jump_target_p PARAMS ((rtx, rtx));
79 static bool tail_recursion_label_p PARAMS ((rtx));
80 static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
81 basic_block));
82 static void merge_blocks_move_successor_nojumps PARAMS ((basic_block,
83 basic_block));
84 static bool merge_blocks PARAMS ((edge,basic_block,basic_block,
85 int));
86 static bool try_optimize_cfg PARAMS ((int));
87 static bool try_simplify_condjump PARAMS ((basic_block));
88 static bool try_forward_edges PARAMS ((int, basic_block));
89 static edge thread_jump PARAMS ((int, edge, basic_block));
90 static bool mark_effect PARAMS ((rtx, bitmap));
91 static void notice_new_block PARAMS ((basic_block));
92 static void update_forwarder_flag PARAMS ((basic_block));
93 \f
94 /* Set flags for newly created block. */
95
96 static void
97 notice_new_block (bb)
98 basic_block bb;
99 {
100 if (!bb)
101 return;
102
103 BB_SET_FLAG (bb, BB_UPDATE_LIFE);
104 if (forwarder_block_p (bb))
105 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
106 }
107
108 /* Recompute forwarder flag after block has been modified. */
109
110 static void
111 update_forwarder_flag (bb)
112 basic_block bb;
113 {
114 if (forwarder_block_p (bb))
115 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
116 else
117 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
118 }
119 \f
120 /* Simplify a conditional jump around an unconditional jump.
121 Return true if something changed. */
122
123 static bool
124 try_simplify_condjump (cbranch_block)
125 basic_block cbranch_block;
126 {
127 basic_block jump_block, jump_dest_block, cbranch_dest_block;
128 edge cbranch_jump_edge, cbranch_fallthru_edge;
129 rtx cbranch_insn;
130
131 /* Verify that there are exactly two successors. */
132 if (!cbranch_block->succ
133 || !cbranch_block->succ->succ_next
134 || cbranch_block->succ->succ_next->succ_next)
135 return false;
136
137 /* Verify that we've got a normal conditional branch at the end
138 of the block. */
139 cbranch_insn = cbranch_block->end;
140 if (!any_condjump_p (cbranch_insn))
141 return false;
142
143 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
144 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
145
146 /* The next block must not have multiple predecessors, must not
147 be the last block in the function, and must contain just the
148 unconditional jump. */
149 jump_block = cbranch_fallthru_edge->dest;
150 if (jump_block->pred->pred_next
151 || jump_block->index == n_basic_blocks - 1
152 || !FORWARDER_BLOCK_P (jump_block))
153 return false;
154 jump_dest_block = jump_block->succ->dest;
155
156 /* The conditional branch must target the block after the
157 unconditional branch. */
158 cbranch_dest_block = cbranch_jump_edge->dest;
159
160 if (!can_fallthru (jump_block, cbranch_dest_block))
161 return false;
162
163 /* Invert the conditional branch. */
164 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
165 return false;
166
167 if (rtl_dump_file)
168 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
169 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
170
171 /* Success. Update the CFG to match. Note that after this point
172 the edge variable names appear backwards; the redirection is done
173 this way to preserve edge profile data. */
174 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
175 cbranch_dest_block);
176 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
177 jump_dest_block);
178 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
179 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
180
181 /* Delete the block with the unconditional jump, and clean up the mess. */
182 flow_delete_block (jump_block);
183 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
184
185 return true;
186 }
187 \f
188 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
189 on register. Used by jump threading. */
190
191 static bool
192 mark_effect (exp, nonequal)
193 rtx exp;
194 regset nonequal;
195 {
196 int regno;
197 rtx dest;
198 switch (GET_CODE (exp))
199 {
200 /* In case we do clobber the register, mark it as equal, as we know the
201 value is dead so it don't have to match. */
202 case CLOBBER:
203 if (REG_P (XEXP (exp, 0)))
204 {
205 dest = XEXP (exp, 0);
206 regno = REGNO (dest);
207 CLEAR_REGNO_REG_SET (nonequal, regno);
208 if (regno < FIRST_PSEUDO_REGISTER)
209 {
210 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
211 while (--n > 0)
212 CLEAR_REGNO_REG_SET (nonequal, regno + n);
213 }
214 }
215 return false;
216
217 case SET:
218 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
219 return false;
220 dest = SET_DEST (exp);
221 if (dest == pc_rtx)
222 return false;
223 if (!REG_P (dest))
224 return true;
225 regno = REGNO (dest);
226 SET_REGNO_REG_SET (nonequal, regno);
227 if (regno < FIRST_PSEUDO_REGISTER)
228 {
229 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
230 while (--n > 0)
231 SET_REGNO_REG_SET (nonequal, regno + n);
232 }
233 return false;
234
235 default:
236 return false;
237 }
238 }
239 /* Attempt to prove that the basic block B will have no side effects and
240 allways continues in the same edge if reached via E. Return the edge
241 if exist, NULL otherwise. */
242
243 static edge
244 thread_jump (mode, e, b)
245 int mode;
246 edge e;
247 basic_block b;
248 {
249 rtx set1, set2, cond1, cond2, insn;
250 enum rtx_code code1, code2, reversed_code2;
251 bool reverse1 = false;
252 int i;
253 regset nonequal;
254 bool failed = false;
255
256 /* At the moment, we do handle only conditional jumps, but later we may
257 want to extend this code to tablejumps and others. */
258 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
259 return NULL;
260 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
261 return NULL;
262
263 /* Second branch must end with onlyjump, as we will eliminate the jump. */
264 if (!any_condjump_p (e->src->end) || !any_condjump_p (b->end)
265 || !onlyjump_p (b->end))
266 return NULL;
267
268 set1 = pc_set (e->src->end);
269 set2 = pc_set (b->end);
270 if (((e->flags & EDGE_FALLTHRU) != 0)
271 != (XEXP (SET_SRC (set1), 0) == pc_rtx))
272 reverse1 = true;
273
274 cond1 = XEXP (SET_SRC (set1), 0);
275 cond2 = XEXP (SET_SRC (set2), 0);
276 if (reverse1)
277 code1 = reversed_comparison_code (cond1, b->end);
278 else
279 code1 = GET_CODE (cond1);
280
281 code2 = GET_CODE (cond2);
282 reversed_code2 = reversed_comparison_code (cond2, b->end);
283
284 if (!comparison_dominates_p (code1, code2)
285 && !comparison_dominates_p (code1, reversed_code2))
286 return NULL;
287
288 /* Ensure that the comparison operators are equivalent.
289 ??? This is far too pesimistic. We should allow swapped operands,
290 different CCmodes, or for example comparisons for interval, that
291 dominate even when operands are not equivalent. */
292 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
293 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
294 return NULL;
295
296 /* Short circuit cases where block B contains some side effects, as we can't
297 safely bypass it. */
298 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
299 insn = NEXT_INSN (insn))
300 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
301 return NULL;
302
303 cselib_init ();
304
305 /* First process all values computed in the source basic block. */
306 for (insn = NEXT_INSN (e->src->head); insn != NEXT_INSN (e->src->end);
307 insn = NEXT_INSN (insn))
308 if (INSN_P (insn))
309 cselib_process_insn (insn);
310
311 nonequal = BITMAP_XMALLOC();
312 CLEAR_REG_SET (nonequal);
313
314 /* Now assume that we've continued by the edge E to B and continue
315 processing as if it were same basic block.
316 Our goal is to prove that whole block is an NOOP. */
317
318 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end) && !failed;
319 insn = NEXT_INSN (insn))
320 {
321 if (INSN_P (insn))
322 {
323 rtx pat = PATTERN (insn);
324
325 if (GET_CODE (pat) == PARALLEL)
326 {
327 for (i = 0; i < XVECLEN (pat, 0); i++)
328 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
329 }
330 else
331 failed |= mark_effect (pat, nonequal);
332 }
333
334 cselib_process_insn (insn);
335 }
336
337 /* Later we should clear nonequal of dead registers. So far we don't
338 have life information in cfg_cleanup. */
339 if (failed)
340 goto failed_exit;
341
342 /* In case liveness information is available, we need to prove equivalence
343 only of the live values. */
344 if (mode & CLEANUP_UPDATE_LIFE)
345 AND_REG_SET (nonequal, b->global_live_at_end);
346
347 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
348
349 BITMAP_XFREE (nonequal);
350 cselib_finish ();
351 if ((comparison_dominates_p (code1, code2) != 0)
352 != (XEXP (SET_SRC (set2), 0) == pc_rtx))
353 return BRANCH_EDGE (b);
354 else
355 return FALLTHRU_EDGE (b);
356
357 failed_exit:
358 BITMAP_XFREE (nonequal);
359 cselib_finish ();
360 return NULL;
361 }
362 \f
363 /* Attempt to forward edges leaving basic block B.
364 Return true if successful. */
365
366 static bool
367 try_forward_edges (mode, b)
368 basic_block b;
369 int mode;
370 {
371 bool changed = false;
372 edge e, next, threaded_edge;
373
374 for (e = b->succ; e; e = next)
375 {
376 basic_block target, first;
377 int counter;
378 bool threaded = false;
379
380 next = e->succ_next;
381
382 /* Skip complex edges because we don't know how to update them.
383
384 Still handle fallthru edges, as we can succeed to forward fallthru
385 edge to the same place as the branch edge of conditional branch
386 and turn conditional branch to an unconditional branch. */
387 if (e->flags & EDGE_COMPLEX)
388 continue;
389
390 target = first = e->dest;
391 counter = 0;
392
393 while (counter < n_basic_blocks)
394 {
395 basic_block new_target = NULL;
396 bool new_target_threaded = false;
397
398 if (FORWARDER_BLOCK_P (target)
399 && target->succ->dest != EXIT_BLOCK_PTR)
400 {
401 /* Bypass trivial infinite loops. */
402 if (target == target->succ->dest)
403 counter = n_basic_blocks;
404 new_target = target->succ->dest;
405 }
406
407 /* Allow to thread only over one edge at time to simplify updating
408 of probabilities. */
409 else if ((mode & CLEANUP_THREADING) && !threaded)
410 {
411 threaded_edge = thread_jump (mode, e, target);
412 if (threaded_edge)
413 {
414 new_target = threaded_edge->dest;
415 new_target_threaded = true;
416 }
417 }
418
419 if (!new_target)
420 break;
421
422 /* Avoid killing of loop pre-headers, as it is the place loop
423 optimizer wants to hoist code to.
424
425 For fallthru forwarders, the LOOP_BEG note must appear between
426 the header of block and CODE_LABEL of the loop, for non forwarders
427 it must appear before the JUMP_INSN. */
428 if (mode & CLEANUP_PRE_LOOP)
429 {
430 rtx insn = (target->succ->flags & EDGE_FALLTHRU
431 ? target->head : prev_nonnote_insn (target->end));
432
433 if (GET_CODE (insn) != NOTE)
434 insn = NEXT_INSN (insn);
435
436 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
437 insn = NEXT_INSN (insn))
438 if (GET_CODE (insn) == NOTE
439 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
440 break;
441
442 if (GET_CODE (insn) == NOTE)
443 break;
444 }
445
446 counter++;
447 target = new_target;
448 threaded |= new_target_threaded;
449 }
450
451 if (counter >= n_basic_blocks)
452 {
453 if (rtl_dump_file)
454 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
455 target->index);
456 }
457 else if (target == first)
458 ; /* We didn't do anything. */
459 else
460 {
461 /* Save the values now, as the edge may get removed. */
462 gcov_type edge_count = e->count;
463 int edge_probability = e->probability;
464 int edge_frequency;
465
466 /* Don't force if target is exit block. */
467 if (threaded && target != EXIT_BLOCK_PTR)
468 {
469 notice_new_block (redirect_edge_and_branch_force (e, target));
470 if (rtl_dump_file)
471 fprintf (rtl_dump_file, "Conditionals threaded.\n");
472 }
473 else if (!redirect_edge_and_branch (e, target))
474 {
475 if (rtl_dump_file)
476 fprintf (rtl_dump_file,
477 "Forwarding edge %i->%i to %i failed.\n",
478 b->index, e->dest->index, target->index);
479 continue;
480 }
481
482 /* We successfully forwarded the edge. Now update profile
483 data: for each edge we traversed in the chain, remove
484 the original edge's execution count. */
485 edge_frequency = ((edge_probability * b->frequency
486 + REG_BR_PROB_BASE / 2)
487 / REG_BR_PROB_BASE);
488
489 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
490 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
491 BB_SET_FLAG (b, BB_UPDATE_LIFE);
492
493 do
494 {
495 edge t;
496
497 first->count -= edge_count;
498 first->succ->count -= edge_count;
499 first->frequency -= edge_frequency;
500 if (first->succ->succ_next)
501 t = threaded_edge;
502 else
503 t = first->succ;
504
505 first = t->dest;
506 }
507 while (first != target);
508
509 changed = true;
510 }
511 }
512
513 return changed;
514 }
515 \f
516 /* Return true if LABEL is a target of JUMP_INSN. This applies only
517 to non-complex jumps. That is, direct unconditional, conditional,
518 and tablejumps, but not computed jumps or returns. It also does
519 not apply to the fallthru case of a conditional jump. */
520
521 static bool
522 label_is_jump_target_p (label, jump_insn)
523 rtx label, jump_insn;
524 {
525 rtx tmp = JUMP_LABEL (jump_insn);
526
527 if (label == tmp)
528 return true;
529
530 if (tmp != NULL_RTX
531 && (tmp = NEXT_INSN (tmp)) != NULL_RTX
532 && GET_CODE (tmp) == JUMP_INSN
533 && (tmp = PATTERN (tmp),
534 GET_CODE (tmp) == ADDR_VEC
535 || GET_CODE (tmp) == ADDR_DIFF_VEC))
536 {
537 rtvec vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
538 int i, veclen = GET_NUM_ELEM (vec);
539
540 for (i = 0; i < veclen; ++i)
541 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
542 return true;
543 }
544
545 return false;
546 }
547
548 /* Return true if LABEL is used for tail recursion. */
549
550 static bool
551 tail_recursion_label_p (label)
552 rtx label;
553 {
554 rtx x;
555
556 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
557 if (label == XEXP (x, 0))
558 return true;
559
560 return false;
561 }
562
563 /* Blocks A and B are to be merged into a single block. A has no incoming
564 fallthru edge, so it can be moved before B without adding or modifying
565 any jumps (aside from the jump from A to B). */
566
567 static void
568 merge_blocks_move_predecessor_nojumps (a, b)
569 basic_block a, b;
570 {
571 rtx barrier;
572 int index;
573
574 barrier = next_nonnote_insn (a->end);
575 if (GET_CODE (barrier) != BARRIER)
576 abort ();
577 delete_insn (barrier);
578
579 /* Move block and loop notes out of the chain so that we do not
580 disturb their order.
581
582 ??? A better solution would be to squeeze out all the non-nested notes
583 and adjust the block trees appropriately. Even better would be to have
584 a tighter connection between block trees and rtl so that this is not
585 necessary. */
586 if (squeeze_notes (&a->head, &a->end))
587 abort ();
588
589 /* Scramble the insn chain. */
590 if (a->end != PREV_INSN (b->head))
591 reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
592 BB_SET_FLAG (a, BB_UPDATE_LIFE);
593
594 if (rtl_dump_file)
595 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
596 a->index, b->index);
597
598 /* Swap the records for the two blocks around. Although we are deleting B,
599 A is now where B was and we want to compact the BB array from where
600 A used to be. */
601 BASIC_BLOCK (a->index) = b;
602 BASIC_BLOCK (b->index) = a;
603 index = a->index;
604 a->index = b->index;
605 b->index = index;
606
607 /* Now blocks A and B are contiguous. Merge them. */
608 merge_blocks_nomove (a, b);
609 }
610
611 /* Blocks A and B are to be merged into a single block. B has no outgoing
612 fallthru edge, so it can be moved after A without adding or modifying
613 any jumps (aside from the jump from A to B). */
614
615 static void
616 merge_blocks_move_successor_nojumps (a, b)
617 basic_block a, b;
618 {
619 rtx barrier, real_b_end;
620
621 real_b_end = b->end;
622 barrier = NEXT_INSN (b->end);
623
624 /* Recognize a jump table following block B. */
625 if (barrier
626 && GET_CODE (barrier) == CODE_LABEL
627 && NEXT_INSN (barrier)
628 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
629 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
630 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
631 {
632 /* Temporarily add the table jump insn to b, so that it will also
633 be moved to the correct location. */
634 b->end = NEXT_INSN (barrier);
635 barrier = NEXT_INSN (b->end);
636 }
637
638 /* There had better have been a barrier there. Delete it. */
639 if (barrier && GET_CODE (barrier) == BARRIER)
640 delete_insn (barrier);
641
642 /* Move block and loop notes out of the chain so that we do not
643 disturb their order.
644
645 ??? A better solution would be to squeeze out all the non-nested notes
646 and adjust the block trees appropriately. Even better would be to have
647 a tighter connection between block trees and rtl so that this is not
648 necessary. */
649 if (squeeze_notes (&b->head, &b->end))
650 abort ();
651
652 /* Scramble the insn chain. */
653 reorder_insns_nobb (b->head, b->end, a->end);
654
655 /* Restore the real end of b. */
656 b->end = real_b_end;
657
658 /* Now blocks A and B are contiguous. Merge them. */
659 merge_blocks_nomove (a, b);
660 BB_SET_FLAG (a, BB_UPDATE_LIFE);
661
662 if (rtl_dump_file)
663 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
664 b->index, a->index);
665 }
666
667 /* Attempt to merge basic blocks that are potentially non-adjacent.
668 Return true iff the attempt succeeded. */
669
670 static bool
671 merge_blocks (e, b, c, mode)
672 edge e;
673 basic_block b, c;
674 int mode;
675 {
676 /* If C has a tail recursion label, do not merge. There is no
677 edge recorded from the call_placeholder back to this label, as
678 that would make optimize_sibling_and_tail_recursive_calls more
679 complex for no gain. */
680 if ((mode & CLEANUP_PRE_SIBCALL)
681 && GET_CODE (c->head) == CODE_LABEL
682 && tail_recursion_label_p (c->head))
683 return false;
684
685 /* If B has a fallthru edge to C, no need to move anything. */
686 if (e->flags & EDGE_FALLTHRU)
687 {
688 /* We need to update liveness in case C already has broken liveness
689 or B ends by conditional jump to next instructions that will be
690 removed. */
691 if ((BB_FLAGS (c) & BB_UPDATE_LIFE)
692 || GET_CODE (b->end) == JUMP_INSN)
693 BB_SET_FLAG (b, BB_UPDATE_LIFE);
694 merge_blocks_nomove (b, c);
695 update_forwarder_flag (b);
696
697 if (rtl_dump_file)
698 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
699 b->index, c->index);
700
701 return true;
702 }
703
704 /* Otherwise we will need to move code around. Do that only if expensive
705 transformations are allowed. */
706 else if (mode & CLEANUP_EXPENSIVE)
707 {
708 edge tmp_edge, b_fallthru_edge;
709 bool c_has_outgoing_fallthru;
710 bool b_has_incoming_fallthru;
711
712 /* Avoid overactive code motion, as the forwarder blocks should be
713 eliminated by edge redirection instead. One exception might have
714 been if B is a forwarder block and C has no fallthru edge, but
715 that should be cleaned up by bb-reorder instead. */
716 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
717 return false;
718
719 /* We must make sure to not munge nesting of lexical blocks,
720 and loop notes. This is done by squeezing out all the notes
721 and leaving them there to lie. Not ideal, but functional. */
722
723 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
724 if (tmp_edge->flags & EDGE_FALLTHRU)
725 break;
726
727 c_has_outgoing_fallthru = (tmp_edge != NULL);
728
729 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
730 if (tmp_edge->flags & EDGE_FALLTHRU)
731 break;
732
733 b_has_incoming_fallthru = (tmp_edge != NULL);
734 b_fallthru_edge = tmp_edge;
735
736 /* Otherwise, we're going to try to move C after B. If C does
737 not have an outgoing fallthru, then it can be moved
738 immediately after B without introducing or modifying jumps. */
739 if (! c_has_outgoing_fallthru)
740 {
741 merge_blocks_move_successor_nojumps (b, c);
742 return true;
743 }
744
745 /* If B does not have an incoming fallthru, then it can be moved
746 immediately before C without introducing or modifying jumps.
747 C cannot be the first block, so we do not have to worry about
748 accessing a non-existent block. */
749
750 if (b_has_incoming_fallthru)
751 {
752 basic_block bb;
753
754 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
755 return false;
756 bb = force_nonfallthru (b_fallthru_edge);
757 if (bb)
758 notice_new_block (bb);
759 else
760 BB_SET_FLAG (b_fallthru_edge->src, BB_UPDATE_LIFE);
761 }
762
763 merge_blocks_move_predecessor_nojumps (b, c);
764 return true;
765 }
766
767 return false;
768 }
769 \f
770
771 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
772
773 static bool
774 insns_match_p (mode, i1, i2)
775 int mode ATTRIBUTE_UNUSED;
776 rtx i1, i2;
777 {
778 rtx p1, p2;
779
780 /* Verify that I1 and I2 are equivalent. */
781 if (GET_CODE (i1) != GET_CODE (i2))
782 return false;
783
784 p1 = PATTERN (i1);
785 p2 = PATTERN (i2);
786
787 if (GET_CODE (p1) != GET_CODE (p2))
788 return false;
789
790 /* If this is a CALL_INSN, compare register usage information.
791 If we don't check this on stack register machines, the two
792 CALL_INSNs might be merged leaving reg-stack.c with mismatching
793 numbers of stack registers in the same basic block.
794 If we don't check this on machines with delay slots, a delay slot may
795 be filled that clobbers a parameter expected by the subroutine.
796
797 ??? We take the simple route for now and assume that if they're
798 equal, they were constructed identically. */
799
800 if (GET_CODE (i1) == CALL_INSN
801 && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
802 CALL_INSN_FUNCTION_USAGE (i2)))
803 return false;
804
805 #ifdef STACK_REGS
806 /* If cross_jump_death_matters is not 0, the insn's mode
807 indicates whether or not the insn contains any stack-like
808 regs. */
809
810 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
811 {
812 /* If register stack conversion has already been done, then
813 death notes must also be compared before it is certain that
814 the two instruction streams match. */
815
816 rtx note;
817 HARD_REG_SET i1_regset, i2_regset;
818
819 CLEAR_HARD_REG_SET (i1_regset);
820 CLEAR_HARD_REG_SET (i2_regset);
821
822 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
823 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
824 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
825
826 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
827 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
828 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
829
830 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
831
832 return false;
833
834 done:
835 ;
836 }
837 #endif
838
839 if (reload_completed
840 ? ! rtx_renumbered_equal_p (p1, p2) : ! rtx_equal_p (p1, p2))
841 {
842 /* The following code helps take care of G++ cleanups. */
843 rtx equiv1 = find_reg_equal_equiv_note (i1);
844 rtx equiv2 = find_reg_equal_equiv_note (i2);
845
846 if (equiv1 && equiv2
847 /* If the equivalences are not to a constant, they may
848 reference pseudos that no longer exist, so we can't
849 use them. */
850 && (! reload_completed
851 || (CONSTANT_P (XEXP (equiv1, 0))
852 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
853 {
854 rtx s1 = single_set (i1);
855 rtx s2 = single_set (i2);
856 if (s1 != 0 && s2 != 0
857 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
858 {
859 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
860 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
861 if (! rtx_renumbered_equal_p (p1, p2))
862 cancel_changes (0);
863 else if (apply_change_group ())
864 return true;
865 }
866 }
867
868 return false;
869 }
870
871 return true;
872 }
873 \f
874 /* Look through the insns at the end of BB1 and BB2 and find the longest
875 sequence that are equivalent. Store the first insns for that sequence
876 in *F1 and *F2 and return the sequence length.
877
878 To simplify callers of this function, if the blocks match exactly,
879 store the head of the blocks in *F1 and *F2. */
880
881 static int
882 flow_find_cross_jump (mode, bb1, bb2, f1, f2)
883 int mode ATTRIBUTE_UNUSED;
884 basic_block bb1, bb2;
885 rtx *f1, *f2;
886 {
887 rtx i1, i2, last1, last2, afterlast1, afterlast2;
888 int ninsns = 0;
889
890 /* Skip simple jumps at the end of the blocks. Complex jumps still
891 need to be compared for equivalence, which we'll do below. */
892
893 i1 = bb1->end;
894 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
895 if (onlyjump_p (i1)
896 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
897 {
898 last1 = i1;
899 i1 = PREV_INSN (i1);
900 }
901
902 i2 = bb2->end;
903 if (onlyjump_p (i2)
904 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
905 {
906 last2 = i2;
907 /* Count everything except for unconditional jump as insn. */
908 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
909 ninsns++;
910 i2 = PREV_INSN (i2);
911 }
912
913 while (true)
914 {
915 /* Ignore notes. */
916 while (!active_insn_p (i1) && i1 != bb1->head)
917 i1 = PREV_INSN (i1);
918
919 while (!active_insn_p (i2) && i2 != bb2->head)
920 i2 = PREV_INSN (i2);
921
922 if (i1 == bb1->head || i2 == bb2->head)
923 break;
924
925 if (!insns_match_p (mode, i1, i2))
926 break;
927
928 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
929 if (active_insn_p (i1))
930 {
931 /* If the merged insns have different REG_EQUAL notes, then
932 remove them. */
933 rtx equiv1 = find_reg_equal_equiv_note (i1);
934 rtx equiv2 = find_reg_equal_equiv_note (i2);
935
936 if (equiv1 && !equiv2)
937 remove_note (i1, equiv1);
938 else if (!equiv1 && equiv2)
939 remove_note (i2, equiv2);
940 else if (equiv1 && equiv2
941 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
942 {
943 remove_note (i1, equiv1);
944 remove_note (i2, equiv2);
945 }
946
947 afterlast1 = last1, afterlast2 = last2;
948 last1 = i1, last2 = i2;
949 ninsns++;
950 }
951
952 i1 = PREV_INSN (i1);
953 i2 = PREV_INSN (i2);
954 }
955
956 #ifdef HAVE_cc0
957 /* Don't allow the insn after a compare to be shared by
958 cross-jumping unless the compare is also shared. */
959 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
960 last1 = afterlast1, last2 = afterlast2, ninsns--;
961 #endif
962
963 /* Include preceding notes and labels in the cross-jump. One,
964 this may bring us to the head of the blocks as requested above.
965 Two, it keeps line number notes as matched as may be. */
966 if (ninsns)
967 {
968 while (last1 != bb1->head && !active_insn_p (PREV_INSN (last1)))
969 last1 = PREV_INSN (last1);
970
971 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
972 last1 = PREV_INSN (last1);
973
974 while (last2 != bb2->head && !active_insn_p (PREV_INSN (last2)))
975 last2 = PREV_INSN (last2);
976
977 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
978 last2 = PREV_INSN (last2);
979
980 *f1 = last1;
981 *f2 = last2;
982 }
983
984 return ninsns;
985 }
986
987 /* Return true iff outgoing edges of BB1 and BB2 match, together with
988 the branch instruction. This means that if we commonize the control
989 flow before end of the basic block, the semantic remains unchanged.
990
991 We may assume that there exists one edge with a common destination. */
992
993 static bool
994 outgoing_edges_match (mode, bb1, bb2)
995 int mode;
996 basic_block bb1;
997 basic_block bb2;
998 {
999 int nehedges1 = 0, nehedges2 = 0;
1000 edge fallthru1 = 0, fallthru2 = 0;
1001 edge e1, e2;
1002
1003 /* If BB1 has only one successor, we may be looking at either an
1004 unconditional jump, or a fake edge to exit. */
1005 if (bb1->succ && !bb1->succ->succ_next
1006 && !(bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)))
1007 return (bb2->succ && !bb2->succ->succ_next
1008 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0);
1009
1010 /* Match conditional jumps - this may get tricky when fallthru and branch
1011 edges are crossed. */
1012 if (bb1->succ
1013 && bb1->succ->succ_next
1014 && !bb1->succ->succ_next->succ_next
1015 && any_condjump_p (bb1->end)
1016 && onlyjump_p (bb1->end))
1017 {
1018 edge b1, f1, b2, f2;
1019 bool reverse, match;
1020 rtx set1, set2, cond1, cond2;
1021 enum rtx_code code1, code2;
1022
1023 if (!bb2->succ
1024 || !bb2->succ->succ_next
1025 || bb1->succ->succ_next->succ_next
1026 || !any_condjump_p (bb2->end)
1027 || !onlyjump_p (bb1->end))
1028 return false;
1029
1030 b1 = BRANCH_EDGE (bb1);
1031 b2 = BRANCH_EDGE (bb2);
1032 f1 = FALLTHRU_EDGE (bb1);
1033 f2 = FALLTHRU_EDGE (bb2);
1034
1035 /* Get around possible forwarders on fallthru edges. Other cases
1036 should be optimized out already. */
1037 if (FORWARDER_BLOCK_P (f1->dest))
1038 f1 = f1->dest->succ;
1039
1040 if (FORWARDER_BLOCK_P (f2->dest))
1041 f2 = f2->dest->succ;
1042
1043 /* To simplify use of this function, return false if there are
1044 unneeded forwarder blocks. These will get eliminated later
1045 during cleanup_cfg. */
1046 if (FORWARDER_BLOCK_P (f1->dest)
1047 || FORWARDER_BLOCK_P (f2->dest)
1048 || FORWARDER_BLOCK_P (b1->dest)
1049 || FORWARDER_BLOCK_P (b2->dest))
1050 return false;
1051
1052 if (f1->dest == f2->dest && b1->dest == b2->dest)
1053 reverse = false;
1054 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1055 reverse = true;
1056 else
1057 return false;
1058
1059 set1 = pc_set (bb1->end);
1060 set2 = pc_set (bb2->end);
1061 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1062 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1063 reverse = !reverse;
1064
1065 cond1 = XEXP (SET_SRC (set1), 0);
1066 cond2 = XEXP (SET_SRC (set2), 0);
1067 code1 = GET_CODE (cond1);
1068 if (reverse)
1069 code2 = reversed_comparison_code (cond2, bb2->end);
1070 else
1071 code2 = GET_CODE (cond2);
1072
1073 if (code2 == UNKNOWN)
1074 return false;
1075
1076 /* Verify codes and operands match. */
1077 match = ((code1 == code2
1078 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1079 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1080 || (code1 == swap_condition (code2)
1081 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1082 XEXP (cond2, 0))
1083 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1084 XEXP (cond2, 1))));
1085
1086 /* If we return true, we will join the blocks. Which means that
1087 we will only have one branch prediction bit to work with. Thus
1088 we require the existing branches to have probabilities that are
1089 roughly similar. */
1090 /* ??? We should use bb->frequency to allow merging in infrequently
1091 executed blocks, but at the moment it is not available when
1092 cleanup_cfg is run. */
1093 if (match && !optimize_size)
1094 {
1095 rtx note1, note2;
1096 int prob1, prob2;
1097
1098 note1 = find_reg_note (bb1->end, REG_BR_PROB, 0);
1099 note2 = find_reg_note (bb2->end, REG_BR_PROB, 0);
1100
1101 if (note1 && note2)
1102 {
1103 prob1 = INTVAL (XEXP (note1, 0));
1104 prob2 = INTVAL (XEXP (note2, 0));
1105 if (reverse)
1106 prob2 = REG_BR_PROB_BASE - prob2;
1107
1108 /* Fail if the difference in probabilities is
1109 greater than 5%. */
1110 if (abs (prob1 - prob2) > REG_BR_PROB_BASE / 20)
1111 return false;
1112 }
1113
1114 else if (note1 || note2)
1115 return false;
1116 }
1117
1118 if (rtl_dump_file && match)
1119 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
1120 bb1->index, bb2->index);
1121
1122 return match;
1123 }
1124
1125 /* Generic case - we are seeing an computed jump, table jump or trapping
1126 instruction. */
1127
1128 /* First ensure that the instructions match. There may be many outgoing
1129 edges so this test is generally cheaper.
1130 ??? Currently the tablejumps will never match, as they do have
1131 different tables. */
1132 if (!insns_match_p (mode, bb1->end, bb2->end))
1133 return false;
1134
1135 /* Search the outgoing edges, ensure that the counts do match, find possible
1136 fallthru and exception handling edges since these needs more
1137 validation. */
1138 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1139 e1 = e1->succ_next, e2 = e2->succ_next)
1140 {
1141 if (e1->flags & EDGE_EH)
1142 nehedges1++;
1143
1144 if (e2->flags & EDGE_EH)
1145 nehedges2++;
1146
1147 if (e1->flags & EDGE_FALLTHRU)
1148 fallthru1 = e1;
1149 if (e2->flags & EDGE_FALLTHRU)
1150 fallthru2 = e2;
1151 }
1152
1153 /* If number of edges of various types does not match, fail. */
1154 if (e1 || e2
1155 || nehedges1 != nehedges2
1156 || (fallthru1 != 0) != (fallthru2 != 0))
1157 return false;
1158
1159 /* fallthru edges must be forwarded to the same destination. */
1160 if (fallthru1)
1161 {
1162 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1163 ? fallthru1->dest->succ->dest: fallthru1->dest);
1164 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1165 ? fallthru2->dest->succ->dest: fallthru2->dest);
1166
1167 if (d1 != d2)
1168 return false;
1169 }
1170
1171 /* In case we do have EH edges, ensure we are in the same region. */
1172 if (nehedges1)
1173 {
1174 rtx n1 = find_reg_note (bb1->end, REG_EH_REGION, 0);
1175 rtx n2 = find_reg_note (bb2->end, REG_EH_REGION, 0);
1176
1177 if (XEXP (n1, 0) != XEXP (n2, 0))
1178 return false;
1179 }
1180
1181 /* We don't need to match the rest of edges as above checks should be enought
1182 to ensure that they are equivalent. */
1183 return true;
1184 }
1185
1186 /* E1 and E2 are edges with the same destination block. Search their
1187 predecessors for common code. If found, redirect control flow from
1188 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1189
1190 static bool
1191 try_crossjump_to_edge (mode, e1, e2)
1192 int mode;
1193 edge e1, e2;
1194 {
1195 int nmatch;
1196 basic_block src1 = e1->src, src2 = e2->src;
1197 basic_block redirect_to;
1198 rtx newpos1, newpos2;
1199 edge s;
1200 rtx last;
1201 rtx label;
1202 rtx note;
1203
1204 /* Search backward through forwarder blocks. We don't need to worry
1205 about multiple entry or chained forwarders, as they will be optimized
1206 away. We do this to look past the unconditional jump following a
1207 conditional jump that is required due to the current CFG shape. */
1208 if (src1->pred
1209 && !src1->pred->pred_next
1210 && FORWARDER_BLOCK_P (src1))
1211 e1 = src1->pred, src1 = e1->src;
1212
1213 if (src2->pred
1214 && !src2->pred->pred_next
1215 && FORWARDER_BLOCK_P (src2))
1216 e2 = src2->pred, src2 = e2->src;
1217
1218 /* Nothing to do if we reach ENTRY, or a common source block. */
1219 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1220 return false;
1221 if (src1 == src2)
1222 return false;
1223
1224 /* Seeing more than 1 forwarder blocks would confuse us later... */
1225 if (FORWARDER_BLOCK_P (e1->dest)
1226 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1227 return false;
1228
1229 if (FORWARDER_BLOCK_P (e2->dest)
1230 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1231 return false;
1232
1233 /* Likewise with dead code (possibly newly created by the other optimizations
1234 of cfg_cleanup). */
1235 if (!src1->pred || !src2->pred)
1236 return false;
1237
1238 /* Look for the common insn sequence, part the first ... */
1239 if (!outgoing_edges_match (mode, src1, src2))
1240 return false;
1241
1242 /* ... and part the second. */
1243 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1244 if (!nmatch)
1245 return false;
1246
1247 /* Avoid splitting if possible. */
1248 if (newpos2 == src2->head)
1249 redirect_to = src2;
1250 else
1251 {
1252 if (rtl_dump_file)
1253 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
1254 src2->index, nmatch);
1255 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1256 }
1257
1258 if (rtl_dump_file)
1259 fprintf (rtl_dump_file,
1260 "Cross jumping from bb %i to bb %i; %i common insns\n",
1261 src1->index, src2->index, nmatch);
1262
1263 redirect_to->count += src1->count;
1264 redirect_to->frequency += src1->frequency;
1265
1266 /* Recompute the frequencies and counts of outgoing edges. */
1267 for (s = redirect_to->succ; s; s = s->succ_next)
1268 {
1269 edge s2;
1270 basic_block d = s->dest;
1271
1272 if (FORWARDER_BLOCK_P (d))
1273 d = d->succ->dest;
1274
1275 for (s2 = src1->succ; ; s2 = s2->succ_next)
1276 {
1277 basic_block d2 = s2->dest;
1278 if (FORWARDER_BLOCK_P (d2))
1279 d2 = d2->succ->dest;
1280 if (d == d2)
1281 break;
1282 }
1283
1284 s->count += s2->count;
1285
1286 /* Take care to update possible forwarder blocks. We verified
1287 that there is no more than one in the chain, so we can't run
1288 into infinite loop. */
1289 if (FORWARDER_BLOCK_P (s->dest))
1290 {
1291 s->dest->succ->count += s2->count;
1292 s->dest->count += s2->count;
1293 s->dest->frequency += EDGE_FREQUENCY (s);
1294 }
1295
1296 if (FORWARDER_BLOCK_P (s2->dest))
1297 {
1298 s2->dest->succ->count -= s2->count;
1299 s2->dest->count -= s2->count;
1300 s2->dest->frequency -= EDGE_FREQUENCY (s);
1301 }
1302
1303 if (!redirect_to->frequency && !src1->frequency)
1304 s->probability = (s->probability + s2->probability) / 2;
1305 else
1306 s->probability
1307 = ((s->probability * redirect_to->frequency +
1308 s2->probability * src1->frequency)
1309 / (redirect_to->frequency + src1->frequency));
1310 }
1311
1312 note = find_reg_note (redirect_to->end, REG_BR_PROB, 0);
1313 if (note)
1314 XEXP (note, 0) = GEN_INT (BRANCH_EDGE (redirect_to)->probability);
1315
1316 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1317
1318 /* Skip possible basic block header. */
1319 if (GET_CODE (newpos1) == CODE_LABEL)
1320 newpos1 = NEXT_INSN (newpos1);
1321
1322 if (GET_CODE (newpos1) == NOTE)
1323 newpos1 = NEXT_INSN (newpos1);
1324 last = src1->end;
1325
1326 /* Emit the jump insn. */
1327 label = block_label (redirect_to);
1328 emit_jump_insn_after (gen_jump (label), src1->end);
1329 JUMP_LABEL (src1->end) = label;
1330 LABEL_NUSES (label)++;
1331
1332 /* Delete the now unreachable instructions. */
1333 delete_insn_chain (newpos1, last);
1334
1335 /* Make sure there is a barrier after the new jump. */
1336 last = next_nonnote_insn (src1->end);
1337 if (!last || GET_CODE (last) != BARRIER)
1338 emit_barrier_after (src1->end);
1339
1340 /* Update CFG. */
1341 while (src1->succ)
1342 remove_edge (src1->succ);
1343 make_single_succ_edge (src1, redirect_to, 0);
1344
1345 BB_SET_FLAG (src1, BB_UPDATE_LIFE);
1346 update_forwarder_flag (src1);
1347
1348 return true;
1349 }
1350
1351 /* Search the predecessors of BB for common insn sequences. When found,
1352 share code between them by redirecting control flow. Return true if
1353 any changes made. */
1354
1355 static bool
1356 try_crossjump_bb (mode, bb)
1357 int mode;
1358 basic_block bb;
1359 {
1360 edge e, e2, nexte2, nexte, fallthru;
1361 bool changed;
1362
1363 /* Nothing to do if there is not at least two incoming edges. */
1364 if (!bb->pred || !bb->pred->pred_next)
1365 return false;
1366
1367 /* It is always cheapest to redirect a block that ends in a branch to
1368 a block that falls through into BB, as that adds no branches to the
1369 program. We'll try that combination first. */
1370 for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next)
1371 if (fallthru->flags & EDGE_FALLTHRU)
1372 break;
1373
1374 changed = false;
1375 for (e = bb->pred; e; e = nexte)
1376 {
1377 nexte = e->pred_next;
1378
1379 /* As noted above, first try with the fallthru predecessor. */
1380 if (fallthru)
1381 {
1382 /* Don't combine the fallthru edge into anything else.
1383 If there is a match, we'll do it the other way around. */
1384 if (e == fallthru)
1385 continue;
1386
1387 if (try_crossjump_to_edge (mode, e, fallthru))
1388 {
1389 changed = true;
1390 nexte = bb->pred;
1391 continue;
1392 }
1393 }
1394
1395 /* Non-obvious work limiting check: Recognize that we're going
1396 to call try_crossjump_bb on every basic block. So if we have
1397 two blocks with lots of outgoing edges (a switch) and they
1398 share lots of common destinations, then we would do the
1399 cross-jump check once for each common destination.
1400
1401 Now, if the blocks actually are cross-jump candidates, then
1402 all of their destinations will be shared. Which means that
1403 we only need check them for cross-jump candidacy once. We
1404 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1405 choosing to do the check from the block for which the edge
1406 in question is the first successor of A. */
1407 if (e->src->succ != e)
1408 continue;
1409
1410 for (e2 = bb->pred; e2; e2 = nexte2)
1411 {
1412 nexte2 = e2->pred_next;
1413
1414 if (e2 == e)
1415 continue;
1416
1417 /* We've already checked the fallthru edge above. */
1418 if (e2 == fallthru)
1419 continue;
1420
1421 /* The "first successor" check above only prevents multiple
1422 checks of crossjump(A,B). In order to prevent redundant
1423 checks of crossjump(B,A), require that A be the block
1424 with the lowest index. */
1425 if (e->src->index > e2->src->index)
1426 continue;
1427
1428 if (try_crossjump_to_edge (mode, e, e2))
1429 {
1430 changed = true;
1431 nexte = bb->pred;
1432 break;
1433 }
1434 }
1435 }
1436
1437 return changed;
1438 }
1439
1440 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1441 instructions etc. Return nonzero if changes were made. */
1442
1443 static bool
1444 try_optimize_cfg (mode)
1445 int mode;
1446 {
1447 int i;
1448 bool changed_overall = false;
1449 bool changed;
1450 int iterations = 0;
1451 sbitmap blocks;
1452
1453 if (mode & CLEANUP_CROSSJUMP)
1454 add_noreturn_fake_exit_edges ();
1455
1456 for (i = 0; i < n_basic_blocks; i++)
1457 update_forwarder_flag (BASIC_BLOCK (i));
1458
1459 /* Attempt to merge blocks as made possible by edge removal. If a block
1460 has only one successor, and the successor has only one predecessor,
1461 they may be combined. */
1462 do
1463 {
1464 changed = false;
1465 iterations++;
1466
1467 if (rtl_dump_file)
1468 fprintf (rtl_dump_file, "\n\ntry_optimize_cfg iteration %i\n\n",
1469 iterations);
1470
1471 for (i = 0; i < n_basic_blocks;)
1472 {
1473 basic_block c, b = BASIC_BLOCK (i);
1474 edge s;
1475 bool changed_here = false;
1476
1477 /* Delete trivially dead basic blocks. */
1478 while (b->pred == NULL)
1479 {
1480 c = BASIC_BLOCK (b->index - 1);
1481 if (rtl_dump_file)
1482 fprintf (rtl_dump_file, "Deleting block %i.\n", b->index);
1483
1484 flow_delete_block (b);
1485 changed = true;
1486 b = c;
1487 }
1488
1489 /* Remove code labels no longer used. Don't do this before
1490 CALL_PLACEHOLDER is removed, as some branches may be hidden
1491 within. */
1492 if (b->pred->pred_next == NULL
1493 && (b->pred->flags & EDGE_FALLTHRU)
1494 && !(b->pred->flags & EDGE_COMPLEX)
1495 && GET_CODE (b->head) == CODE_LABEL
1496 && (!(mode & CLEANUP_PRE_SIBCALL)
1497 || !tail_recursion_label_p (b->head))
1498 /* If the previous block ends with a branch to this block,
1499 we can't delete the label. Normally this is a condjump
1500 that is yet to be simplified, but if CASE_DROPS_THRU,
1501 this can be a tablejump with some element going to the
1502 same place as the default (fallthru). */
1503 && (b->pred->src == ENTRY_BLOCK_PTR
1504 || GET_CODE (b->pred->src->end) != JUMP_INSN
1505 || ! label_is_jump_target_p (b->head, b->pred->src->end)))
1506 {
1507 rtx label = b->head;
1508
1509 b->head = NEXT_INSN (b->head);
1510 delete_insn_chain (label, label);
1511 if (rtl_dump_file)
1512 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1513 b->index);
1514 }
1515
1516 /* If we fall through an empty block, we can remove it. */
1517 if (b->pred->pred_next == NULL
1518 && (b->pred->flags & EDGE_FALLTHRU)
1519 && GET_CODE (b->head) != CODE_LABEL
1520 && FORWARDER_BLOCK_P (b)
1521 /* Note that forwarder_block_p true ensures that there
1522 is a successor for this block. */
1523 && (b->succ->flags & EDGE_FALLTHRU)
1524 && n_basic_blocks > 1)
1525 {
1526 if (rtl_dump_file)
1527 fprintf (rtl_dump_file, "Deleting fallthru block %i.\n",
1528 b->index);
1529
1530 c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
1531 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1532 flow_delete_block (b);
1533 changed = true;
1534 b = c;
1535 }
1536
1537 /* Merge blocks. Loop because chains of blocks might be
1538 combineable. */
1539 while ((s = b->succ) != NULL
1540 && s->succ_next == NULL
1541 && !(s->flags & EDGE_COMPLEX)
1542 && (c = s->dest) != EXIT_BLOCK_PTR
1543 && c->pred->pred_next == NULL
1544 /* If the jump insn has side effects,
1545 we can't kill the edge. */
1546 && (GET_CODE (b->end) != JUMP_INSN
1547 || onlyjump_p (b->end))
1548 && merge_blocks (s, b, c, mode))
1549 changed_here = true;
1550
1551 /* Simplify branch over branch. */
1552 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1553 {
1554 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1555 changed_here = true;
1556 }
1557
1558 /* If B has a single outgoing edge, but uses a non-trivial jump
1559 instruction without side-effects, we can either delete the
1560 jump entirely, or replace it with a simple unconditional jump.
1561 Use redirect_edge_and_branch to do the dirty work. */
1562 if (b->succ
1563 && ! b->succ->succ_next
1564 && b->succ->dest != EXIT_BLOCK_PTR
1565 && onlyjump_p (b->end)
1566 && redirect_edge_and_branch (b->succ, b->succ->dest))
1567 {
1568 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1569 update_forwarder_flag (b);
1570 changed_here = true;
1571 }
1572
1573 /* Simplify branch to branch. */
1574 if (try_forward_edges (mode, b))
1575 changed_here = true;
1576
1577 /* Look for shared code between blocks. */
1578 if ((mode & CLEANUP_CROSSJUMP)
1579 && try_crossjump_bb (mode, b))
1580 changed_here = true;
1581
1582 /* Don't get confused by the index shift caused by deleting
1583 blocks. */
1584 if (!changed_here)
1585 i = b->index + 1;
1586 else
1587 changed = true;
1588 }
1589
1590 if ((mode & CLEANUP_CROSSJUMP)
1591 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1592 changed = true;
1593
1594 #ifdef ENABLE_CHECKING
1595 if (changed)
1596 verify_flow_info ();
1597 #endif
1598
1599 changed_overall |= changed;
1600 }
1601 while (changed);
1602
1603 if (mode & CLEANUP_CROSSJUMP)
1604 remove_fake_edges ();
1605
1606 if ((mode & CLEANUP_UPDATE_LIFE) && changed_overall)
1607 {
1608 bool found = 0;
1609
1610 blocks = sbitmap_alloc (n_basic_blocks);
1611 sbitmap_zero (blocks);
1612 for (i = 0; i < n_basic_blocks; i++)
1613 if (BB_FLAGS (BASIC_BLOCK (i)) & BB_UPDATE_LIFE)
1614 {
1615 found = 1;
1616 SET_BIT (blocks, i);
1617 }
1618
1619 if (found)
1620 update_life_info (blocks, UPDATE_LIFE_GLOBAL,
1621 PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
1622 | PROP_KILL_DEAD_CODE);
1623 sbitmap_free (blocks);
1624 }
1625
1626 for (i = 0; i < n_basic_blocks; i++)
1627 BASIC_BLOCK (i)->aux = NULL;
1628
1629 return changed_overall;
1630 }
1631 \f
1632 /* Delete all unreachable basic blocks. */
1633
1634 static bool
1635 delete_unreachable_blocks ()
1636 {
1637 int i;
1638 bool changed = false;
1639
1640 find_unreachable_blocks ();
1641
1642 /* Delete all unreachable basic blocks. Count down so that we
1643 don't interfere with the block renumbering that happens in
1644 flow_delete_block. */
1645
1646 for (i = n_basic_blocks - 1; i >= 0; --i)
1647 {
1648 basic_block b = BASIC_BLOCK (i);
1649
1650 if (!(b->flags & BB_REACHABLE))
1651 flow_delete_block (b), changed = true;
1652 }
1653
1654 if (changed)
1655 tidy_fallthru_edges ();
1656 return changed;
1657 }
1658 \f
1659 /* Tidy the CFG by deleting unreachable code and whatnot. */
1660
1661 bool
1662 cleanup_cfg (mode)
1663 int mode;
1664 {
1665 bool changed = false;
1666
1667 timevar_push (TV_CLEANUP_CFG);
1668 changed = delete_unreachable_blocks ();
1669 if (try_optimize_cfg (mode))
1670 delete_unreachable_blocks (), changed = true;
1671
1672 /* Kill the data we won't maintain. */
1673 free_EXPR_LIST_list (&label_value_list);
1674 free_EXPR_LIST_list (&tail_recursion_label_list);
1675 timevar_pop (TV_CLEANUP_CFG);
1676
1677 return changed;
1678 }
This page took 0.118898 seconds and 6 git commands to generate.