]> gcc.gnu.org Git - gcc.git/blob - gcc/cfgcleanup.c
jump.c (squeeze_notes): Return true if no real insns were found.
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "rtl.h"
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
39 #include "timevar.h"
40 #include "output.h"
41 #include "insn-config.h"
42 #include "flags.h"
43 #include "recog.h"
44 #include "toplev.h"
45
46 #include "obstack.h"
47
48 /* cleanup_cfg maintains following flags for each basic block. */
49 enum bb_flags {
50 /* Set if life info needs to be recomputed for given BB. */
51 BB_UPDATE_LIFE = 1,
52 /* Set if BB is the forwarder block to avoid too many
53 forwarder_block_p calls. */
54 BB_FORWARDER_BLOCK = 2
55 };
56
57 #define BB_FLAGS(bb) (enum bb_flags)(bb)->aux
58 #define BB_SET_FLAG(bb,flag) \
59 (bb)->aux = (void *) (long) ((enum bb_flags)(bb)->aux | (flag))
60 #define BB_CLEAR_FLAG(bb,flag) \
61 (bb)->aux = (void *) (long) ((enum bb_flags)(bb)->aux & ~(flag))
62
63 #define FORWARDER_BLOCK_P(bb) (BB_FLAGS(bb) & BB_FORWARDER_BLOCK)
64
65 static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
66 static bool try_crossjump_bb PARAMS ((int, basic_block));
67 static bool outgoing_edges_match PARAMS ((basic_block, basic_block));
68 static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
69 rtx *, rtx *));
70
71 static bool delete_unreachable_blocks PARAMS ((void));
72 static bool tail_recursion_label_p PARAMS ((rtx));
73 static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
74 basic_block));
75 static void merge_blocks_move_successor_nojumps PARAMS ((basic_block,
76 basic_block));
77 static bool merge_blocks PARAMS ((edge,basic_block,basic_block,
78 int));
79 static bool try_optimize_cfg PARAMS ((int));
80 static bool try_simplify_condjump PARAMS ((basic_block));
81 static bool try_forward_edges PARAMS ((int, basic_block));
82 static void notice_new_block PARAMS ((basic_block));
83 static void update_forwarder_flag PARAMS ((basic_block));
84 \f
85 /* Set flags for newly created block. */
86
87 static void
88 notice_new_block (bb)
89 basic_block bb;
90 {
91 if (!bb)
92 return;
93 BB_SET_FLAG (bb, BB_UPDATE_LIFE);
94 if (forwarder_block_p (bb))
95 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
96 }
97
98 /* Recompute forwarder flag after block has been modified. */
99
100 static void
101 update_forwarder_flag (bb)
102 basic_block bb;
103 {
104 if (forwarder_block_p (bb))
105 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
106 else
107 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
108 }
109 \f
110 /* Simplify a conditional jump around an unconditional jump.
111 Return true if something changed. */
112
113 static bool
114 try_simplify_condjump (cbranch_block)
115 basic_block cbranch_block;
116 {
117 basic_block jump_block, jump_dest_block, cbranch_dest_block;
118 edge cbranch_jump_edge, cbranch_fallthru_edge;
119 rtx cbranch_insn;
120
121 /* Verify that there are exactly two successors. */
122 if (!cbranch_block->succ
123 || !cbranch_block->succ->succ_next
124 || cbranch_block->succ->succ_next->succ_next)
125 return false;
126
127 /* Verify that we've got a normal conditional branch at the end
128 of the block. */
129 cbranch_insn = cbranch_block->end;
130 if (!any_condjump_p (cbranch_insn))
131 return false;
132
133 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
134 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
135
136 /* The next block must not have multiple predecessors, must not
137 be the last block in the function, and must contain just the
138 unconditional jump. */
139 jump_block = cbranch_fallthru_edge->dest;
140 if (jump_block->pred->pred_next
141 || jump_block->index == n_basic_blocks - 1
142 || !FORWARDER_BLOCK_P (jump_block))
143 return false;
144 jump_dest_block = jump_block->succ->dest;
145
146 /* The conditional branch must target the block after the
147 unconditional branch. */
148 cbranch_dest_block = cbranch_jump_edge->dest;
149
150 if (!can_fallthru (jump_block, cbranch_dest_block))
151 return false;
152
153 /* Invert the conditional branch. */
154 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
155 return false;
156
157 if (rtl_dump_file)
158 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
159 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
160
161 /* Success. Update the CFG to match. Note that after this point
162 the edge variable names appear backwards; the redirection is done
163 this way to preserve edge profile data. */
164 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
165 cbranch_dest_block);
166 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
167 jump_dest_block);
168 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
169 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
170
171 /* Delete the block with the unconditional jump, and clean up the mess. */
172 flow_delete_block (jump_block);
173 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
174
175 return true;
176 }
177 \f
178 /* Attempt to forward edges leaving basic block B.
179 Return true if successful. */
180
181 static bool
182 try_forward_edges (mode, b)
183 basic_block b;
184 int mode;
185 {
186 bool changed = false;
187 edge e, next;
188
189 for (e = b->succ; e ; e = next)
190 {
191 basic_block target, first;
192 int counter;
193
194 next = e->succ_next;
195
196 /* Skip complex edges because we don't know how to update them.
197
198 Still handle fallthru edges, as we can succeed to forward fallthru
199 edge to the same place as the branch edge of conditional branch
200 and turn conditional branch to an unconditional branch. */
201 if (e->flags & EDGE_COMPLEX)
202 continue;
203
204 target = first = e->dest;
205 counter = 0;
206
207 /* Look for the real destination of the jump.
208 Avoid infinite loop in the infinite empty loop by counting
209 up to n_basic_blocks. */
210 while (FORWARDER_BLOCK_P (target)
211 && target->succ->dest != EXIT_BLOCK_PTR
212 && counter < n_basic_blocks)
213 {
214 /* Bypass trivial infinite loops. */
215 if (target == target->succ->dest)
216 counter = n_basic_blocks;
217
218 /* Avoid killing of loop pre-headers, as it is the place loop
219 optimizer wants to hoist code to.
220
221 For fallthru forwarders, the LOOP_BEG note must appear between
222 the header of block and CODE_LABEL of the loop, for non forwarders
223 it must appear before the JUMP_INSN. */
224 if (mode & CLEANUP_PRE_LOOP)
225 {
226 rtx insn = (target->succ->flags & EDGE_FALLTHRU
227 ? target->head : prev_nonnote_insn (target->end));
228
229 if (GET_CODE (insn) != NOTE)
230 insn = NEXT_INSN (insn);
231
232 for (;insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
233 insn = NEXT_INSN (insn))
234 if (GET_CODE (insn) == NOTE
235 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
236 break;
237
238 if (GET_CODE (insn) == NOTE)
239 break;
240 }
241 target = target->succ->dest, counter++;
242 }
243
244 if (counter >= n_basic_blocks)
245 {
246 if (rtl_dump_file)
247 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
248 target->index);
249 }
250 else if (target == first)
251 ; /* We didn't do anything. */
252 else
253 {
254 /* Save the values now, as the edge may get removed. */
255 gcov_type edge_count = e->count;
256 int edge_probability = e->probability;
257
258 if (redirect_edge_and_branch (e, target))
259 {
260 /* We successfully forwarded the edge. Now update profile
261 data: for each edge we traversed in the chain, remove
262 the original edge's execution count. */
263 int edge_frequency = ((edge_probability * b->frequency
264 + REG_BR_PROB_BASE / 2)
265 / REG_BR_PROB_BASE);
266
267 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
268 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
269 BB_SET_FLAG (b, BB_UPDATE_LIFE);
270
271 do
272 {
273 first->count -= edge_count;
274 first->succ->count -= edge_count;
275 first->frequency -= edge_frequency;
276 first = first->succ->dest;
277 }
278 while (first != target);
279
280 changed = true;
281 }
282 else
283 {
284 if (rtl_dump_file)
285 fprintf (rtl_dump_file, "Forwarding edge %i->%i to %i failed.\n",
286 b->index, e->dest->index, target->index);
287 }
288 }
289 }
290
291 return changed;
292 }
293 \f
294 /* Return true if LABEL is used for tail recursion. */
295
296 static bool
297 tail_recursion_label_p (label)
298 rtx label;
299 {
300 rtx x;
301
302 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
303 if (label == XEXP (x, 0))
304 return true;
305
306 return false;
307 }
308
309 /* Blocks A and B are to be merged into a single block. A has no incoming
310 fallthru edge, so it can be moved before B without adding or modifying
311 any jumps (aside from the jump from A to B). */
312
313 static void
314 merge_blocks_move_predecessor_nojumps (a, b)
315 basic_block a, b;
316 {
317 rtx barrier;
318 int index;
319
320 barrier = next_nonnote_insn (a->end);
321 if (GET_CODE (barrier) != BARRIER)
322 abort ();
323 delete_insn (barrier);
324
325 /* Move block and loop notes out of the chain so that we do not
326 disturb their order.
327
328 ??? A better solution would be to squeeze out all the non-nested notes
329 and adjust the block trees appropriately. Even better would be to have
330 a tighter connection between block trees and rtl so that this is not
331 necessary. */
332 if (squeeze_notes (&a->head, &a->end))
333 abort ();
334
335 /* Scramble the insn chain. */
336 if (a->end != PREV_INSN (b->head))
337 reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
338 BB_SET_FLAG (a, BB_UPDATE_LIFE);
339
340 if (rtl_dump_file)
341 {
342 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
343 a->index, b->index);
344 }
345
346 /* Swap the records for the two blocks around. Although we are deleting B,
347 A is now where B was and we want to compact the BB array from where
348 A used to be. */
349 BASIC_BLOCK (a->index) = b;
350 BASIC_BLOCK (b->index) = a;
351 index = a->index;
352 a->index = b->index;
353 b->index = index;
354
355 /* Now blocks A and B are contiguous. Merge them. */
356 merge_blocks_nomove (a, b);
357 }
358
359 /* Blocks A and B are to be merged into a single block. B has no outgoing
360 fallthru edge, so it can be moved after A without adding or modifying
361 any jumps (aside from the jump from A to B). */
362
363 static void
364 merge_blocks_move_successor_nojumps (a, b)
365 basic_block a, b;
366 {
367 rtx barrier, real_b_end;
368
369 real_b_end = b->end;
370 barrier = NEXT_INSN (b->end);
371
372 /* Recognize a jump table following block B. */
373 if (barrier
374 && GET_CODE (barrier) == CODE_LABEL
375 && NEXT_INSN (barrier)
376 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
377 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
378 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
379 {
380 /* Temporarily add the table jump insn to b, so that it will also
381 be moved to the correct location. */
382 b->end = NEXT_INSN (barrier);
383 barrier = NEXT_INSN (b->end);
384 }
385
386 /* There had better have been a barrier there. Delete it. */
387 if (barrier && GET_CODE (barrier) == BARRIER)
388 delete_insn (barrier);
389
390 /* Move block and loop notes out of the chain so that we do not
391 disturb their order.
392
393 ??? A better solution would be to squeeze out all the non-nested notes
394 and adjust the block trees appropriately. Even better would be to have
395 a tighter connection between block trees and rtl so that this is not
396 necessary. */
397 if (squeeze_notes (&b->head, &b->end))
398 abort ();
399
400 /* Scramble the insn chain. */
401 reorder_insns_nobb (b->head, b->end, a->end);
402
403 /* Restore the real end of b. */
404 b->end = real_b_end;
405
406 /* Now blocks A and B are contiguous. Merge them. */
407 merge_blocks_nomove (a, b);
408 BB_SET_FLAG (a, BB_UPDATE_LIFE);
409
410 if (rtl_dump_file)
411 {
412 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
413 b->index, a->index);
414 }
415 }
416
417 /* Attempt to merge basic blocks that are potentially non-adjacent.
418 Return true iff the attempt succeeded. */
419
420 static bool
421 merge_blocks (e, b, c, mode)
422 edge e;
423 basic_block b, c;
424 int mode;
425 {
426 /* If C has a tail recursion label, do not merge. There is no
427 edge recorded from the call_placeholder back to this label, as
428 that would make optimize_sibling_and_tail_recursive_calls more
429 complex for no gain. */
430 if ((mode & CLEANUP_PRE_SIBCALL)
431 && GET_CODE (c->head) == CODE_LABEL
432 && tail_recursion_label_p (c->head))
433 return false;
434
435 /* If B has a fallthru edge to C, no need to move anything. */
436 if (e->flags & EDGE_FALLTHRU)
437 {
438 /* We need to update liveness in case C already has broken liveness
439 or B ends by conditional jump to next instructions that will be
440 removed. */
441 if ((BB_FLAGS (c) & BB_UPDATE_LIFE)
442 || GET_CODE (b->end) == JUMP_INSN)
443 BB_SET_FLAG (b, BB_UPDATE_LIFE);
444 merge_blocks_nomove (b, c);
445 update_forwarder_flag (b);
446
447 if (rtl_dump_file)
448 {
449 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
450 b->index, c->index);
451 }
452
453 return true;
454 }
455 /* Otherwise we will need to move code around. Do that only if expensive
456 transformations are allowed. */
457 else if (mode & CLEANUP_EXPENSIVE)
458 {
459 edge tmp_edge, b_fallthru_edge;
460 bool c_has_outgoing_fallthru;
461 bool b_has_incoming_fallthru;
462
463 /* Avoid overactive code motion, as the forwarder blocks should be
464 eliminated by edge redirection instead. One exception might have
465 been if B is a forwarder block and C has no fallthru edge, but
466 that should be cleaned up by bb-reorder instead. */
467 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
468 return false;
469
470 /* We must make sure to not munge nesting of lexical blocks,
471 and loop notes. This is done by squeezing out all the notes
472 and leaving them there to lie. Not ideal, but functional. */
473
474 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
475 if (tmp_edge->flags & EDGE_FALLTHRU)
476 break;
477 c_has_outgoing_fallthru = (tmp_edge != NULL);
478
479 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
480 if (tmp_edge->flags & EDGE_FALLTHRU)
481 break;
482 b_has_incoming_fallthru = (tmp_edge != NULL);
483 b_fallthru_edge = tmp_edge;
484
485 /* Otherwise, we're going to try to move C after B. If C does
486 not have an outgoing fallthru, then it can be moved
487 immediately after B without introducing or modifying jumps. */
488 if (! c_has_outgoing_fallthru)
489 {
490 merge_blocks_move_successor_nojumps (b, c);
491 return true;
492 }
493
494 /* If B does not have an incoming fallthru, then it can be moved
495 immediately before C without introducing or modifying jumps.
496 C cannot be the first block, so we do not have to worry about
497 accessing a non-existent block. */
498
499 if (b_has_incoming_fallthru)
500 {
501 basic_block bb;
502 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
503 return false;
504 bb = force_nonfallthru (b_fallthru_edge);
505 if (bb)
506 notice_new_block (bb);
507 else
508 BB_SET_FLAG (b_fallthru_edge->src, BB_UPDATE_LIFE);
509 }
510 merge_blocks_move_predecessor_nojumps (b, c);
511 return true;
512 }
513 return false;
514 }
515 \f
516 /* Look through the insns at the end of BB1 and BB2 and find the longest
517 sequence that are equivalent. Store the first insns for that sequence
518 in *F1 and *F2 and return the sequence length.
519
520 To simplify callers of this function, if the blocks match exactly,
521 store the head of the blocks in *F1 and *F2. */
522
523 static int
524 flow_find_cross_jump (mode, bb1, bb2, f1, f2)
525 int mode ATTRIBUTE_UNUSED;
526 basic_block bb1, bb2;
527 rtx *f1, *f2;
528 {
529 rtx i1, i2, p1, p2, last1, last2, afterlast1, afterlast2;
530 int ninsns = 0;
531
532 /* Skip simple jumps at the end of the blocks. Complex jumps still
533 need to be compared for equivalence, which we'll do below. */
534
535 i1 = bb1->end;
536 if (onlyjump_p (i1)
537 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
538 i1 = PREV_INSN (i1);
539 i2 = bb2->end;
540 if (onlyjump_p (i2)
541 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
542 i2 = PREV_INSN (i2);
543
544 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
545 while (true)
546 {
547 /* Ignore notes. */
548 while ((GET_CODE (i1) == NOTE && i1 != bb1->head))
549 i1 = PREV_INSN (i1);
550 while ((GET_CODE (i2) == NOTE && i2 != bb2->head))
551 i2 = PREV_INSN (i2);
552
553 if (i1 == bb1->head || i2 == bb2->head)
554 break;
555
556 /* Verify that I1 and I2 are equivalent. */
557
558 if (GET_CODE (i1) != GET_CODE (i2))
559 break;
560
561 p1 = PATTERN (i1);
562 p2 = PATTERN (i2);
563
564 /* If this is a CALL_INSN, compare register usage information.
565 If we don't check this on stack register machines, the two
566 CALL_INSNs might be merged leaving reg-stack.c with mismatching
567 numbers of stack registers in the same basic block.
568 If we don't check this on machines with delay slots, a delay slot may
569 be filled that clobbers a parameter expected by the subroutine.
570
571 ??? We take the simple route for now and assume that if they're
572 equal, they were constructed identically. */
573
574 if (GET_CODE (i1) == CALL_INSN
575 && ! rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
576 CALL_INSN_FUNCTION_USAGE (i2)))
577 break;
578
579 #ifdef STACK_REGS
580 /* If cross_jump_death_matters is not 0, the insn's mode
581 indicates whether or not the insn contains any stack-like
582 regs. */
583
584 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
585 {
586 /* If register stack conversion has already been done, then
587 death notes must also be compared before it is certain that
588 the two instruction streams match. */
589
590 rtx note;
591 HARD_REG_SET i1_regset, i2_regset;
592
593 CLEAR_HARD_REG_SET (i1_regset);
594 CLEAR_HARD_REG_SET (i2_regset);
595
596 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
597 if (REG_NOTE_KIND (note) == REG_DEAD
598 && STACK_REG_P (XEXP (note, 0)))
599 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
600
601 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
602 if (REG_NOTE_KIND (note) == REG_DEAD
603 && STACK_REG_P (XEXP (note, 0)))
604 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
605
606 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
607
608 break;
609
610 done:
611 ;
612 }
613 #endif
614
615 if (GET_CODE (p1) != GET_CODE (p2))
616 break;
617
618 if (! rtx_renumbered_equal_p (p1, p2))
619 {
620 /* The following code helps take care of G++ cleanups. */
621 rtx equiv1 = find_reg_equal_equiv_note (i1);
622 rtx equiv2 = find_reg_equal_equiv_note (i2);
623
624 if (equiv1 && equiv2
625 /* If the equivalences are not to a constant, they may
626 reference pseudos that no longer exist, so we can't
627 use them. */
628 && CONSTANT_P (XEXP (equiv1, 0))
629 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
630 {
631 rtx s1 = single_set (i1);
632 rtx s2 = single_set (i2);
633 if (s1 != 0 && s2 != 0
634 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
635 {
636 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
637 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
638 if (! rtx_renumbered_equal_p (p1, p2))
639 cancel_changes (0);
640 else if (apply_change_group ())
641 goto win;
642 }
643 }
644 break;
645 }
646
647 win:
648 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
649 if (GET_CODE (p1) != USE && GET_CODE (p1) != CLOBBER)
650 {
651 /* If the merged insns have different REG_EQUAL notes, then
652 remove them. */
653 rtx equiv1 = find_reg_equal_equiv_note (i1);
654 rtx equiv2 = find_reg_equal_equiv_note (i2);
655
656 if (equiv1 && !equiv2)
657 remove_note (i1, equiv1);
658 else if (!equiv1 && equiv2)
659 remove_note (i2, equiv2);
660 else if (equiv1 && equiv2
661 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
662 {
663 remove_note (i1, equiv1);
664 remove_note (i2, equiv2);
665 }
666
667 afterlast1 = last1, afterlast2 = last2;
668 last1 = i1, last2 = i2;
669 ninsns++;
670 }
671 i1 = PREV_INSN (i1);
672 i2 = PREV_INSN (i2);
673 }
674
675 #ifdef HAVE_cc0
676 if (ninsns)
677 {
678 /* Don't allow the insn after a compare to be shared by
679 cross-jumping unless the compare is also shared. */
680 if (reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
681 last1 = afterlast1, last2 = afterlast2, ninsns--;
682 }
683 #endif
684
685 /* Include preceding notes and labels in the cross-jump. One,
686 this may bring us to the head of the blocks as requested above.
687 Two, it keeps line number notes as matched as may be. */
688 if (ninsns)
689 {
690 while (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == NOTE)
691 last1 = PREV_INSN (last1);
692 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
693 last1 = PREV_INSN (last1);
694 while (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == NOTE)
695 last2 = PREV_INSN (last2);
696 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
697 last2 = PREV_INSN (last2);
698
699 *f1 = last1;
700 *f2 = last2;
701 }
702
703 return ninsns;
704 }
705
706 /* Return true iff outgoing edges of BB1 and BB2 match, together with
707 the branch instruction. This means that if we commonize the control
708 flow before end of the basic block, the semantic remains unchanged.
709
710 We may assume that there exists one edge with a common destination. */
711
712 static bool
713 outgoing_edges_match (bb1, bb2)
714 basic_block bb1;
715 basic_block bb2;
716 {
717 /* If BB1 has only one successor, we must be looking at an unconditional
718 jump. Which, by the assumption above, means that we only need to check
719 that BB2 has one successor. */
720 if (bb1->succ && !bb1->succ->succ_next)
721 return (bb2->succ && !bb2->succ->succ_next);
722
723 /* Match conditional jumps - this may get tricky when fallthru and branch
724 edges are crossed. */
725 if (bb1->succ
726 && bb1->succ->succ_next
727 && !bb1->succ->succ_next->succ_next
728 && any_condjump_p (bb1->end))
729 {
730 edge b1, f1, b2, f2;
731 bool reverse, match;
732 rtx set1, set2, cond1, cond2;
733 enum rtx_code code1, code2;
734
735 if (!bb2->succ
736 || !bb2->succ->succ_next
737 || bb1->succ->succ_next->succ_next
738 || !any_condjump_p (bb2->end))
739 return false;
740
741 b1 = BRANCH_EDGE (bb1);
742 b2 = BRANCH_EDGE (bb2);
743 f1 = FALLTHRU_EDGE (bb1);
744 f2 = FALLTHRU_EDGE (bb2);
745
746 /* Get around possible forwarders on fallthru edges. Other cases
747 should be optimized out already. */
748 if (FORWARDER_BLOCK_P (f1->dest))
749 f1 = f1->dest->succ;
750 if (FORWARDER_BLOCK_P (f2->dest))
751 f2 = f2->dest->succ;
752
753 /* To simplify use of this function, return false if there are
754 unneeded forwarder blocks. These will get eliminated later
755 during cleanup_cfg. */
756 if (FORWARDER_BLOCK_P (f1->dest)
757 || FORWARDER_BLOCK_P (f2->dest)
758 || FORWARDER_BLOCK_P (b1->dest)
759 || FORWARDER_BLOCK_P (b2->dest))
760 return false;
761
762 if (f1->dest == f2->dest && b1->dest == b2->dest)
763 reverse = false;
764 else if (f1->dest == b2->dest && b1->dest == f2->dest)
765 reverse = true;
766 else
767 return false;
768
769 set1 = pc_set (bb1->end);
770 set2 = pc_set (bb2->end);
771 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
772 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
773 reverse = !reverse;
774
775 cond1 = XEXP (SET_SRC (set1), 0);
776 cond2 = XEXP (SET_SRC (set2), 0);
777 code1 = GET_CODE (cond1);
778 if (reverse)
779 code2 = reversed_comparison_code (cond2, bb2->end);
780 else
781 code2 = GET_CODE (cond2);
782 if (code2 == UNKNOWN)
783 return false;
784
785 /* Verify codes and operands match. */
786 match = ((code1 == code2
787 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
788 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
789 || (code1 == swap_condition (code2)
790 && rtx_renumbered_equal_p (XEXP (cond1, 1),
791 XEXP (cond2, 0))
792 && rtx_renumbered_equal_p (XEXP (cond1, 0),
793 XEXP (cond2, 1))));
794
795 /* If we return true, we will join the blocks. Which means that
796 we will only have one branch prediction bit to work with. Thus
797 we require the existing branches to have probabilities that are
798 roughly similar. */
799 /* ??? We should use bb->frequency to allow merging in infrequently
800 executed blocks, but at the moment it is not available when
801 cleanup_cfg is run. */
802 if (match && !optimize_size)
803 {
804 rtx note1, note2;
805 int prob1, prob2;
806 note1 = find_reg_note (bb1->end, REG_BR_PROB, 0);
807 note2 = find_reg_note (bb2->end, REG_BR_PROB, 0);
808
809 if (note1 && note2)
810 {
811 prob1 = INTVAL (XEXP (note1, 0));
812 prob2 = INTVAL (XEXP (note2, 0));
813 if (reverse)
814 prob2 = REG_BR_PROB_BASE - prob2;
815
816 /* Fail if the difference in probabilities is
817 greater than 5%. */
818 if (abs (prob1 - prob2) > REG_BR_PROB_BASE / 20)
819 return false;
820 }
821 else if (note1 || note2)
822 return false;
823 }
824
825 if (rtl_dump_file && match)
826 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
827 bb1->index, bb2->index);
828
829 return match;
830 }
831
832 /* ??? We can handle computed jumps too. This may be important for
833 inlined functions containing switch statements. Also jumps w/o
834 fallthru edges can be handled by simply matching whole insn. */
835 return false;
836 }
837
838 /* E1 and E2 are edges with the same destination block. Search their
839 predecessors for common code. If found, redirect control flow from
840 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
841
842 static bool
843 try_crossjump_to_edge (mode, e1, e2)
844 int mode;
845 edge e1, e2;
846 {
847 int nmatch;
848 basic_block src1 = e1->src, src2 = e2->src;
849 basic_block redirect_to;
850 rtx newpos1, newpos2;
851 edge s;
852 rtx last;
853 rtx label;
854 rtx note;
855
856 /* Search backward through forwarder blocks. We don't need to worry
857 about multiple entry or chained forwarders, as they will be optimized
858 away. We do this to look past the unconditional jump following a
859 conditional jump that is required due to the current CFG shape. */
860 if (src1->pred
861 && !src1->pred->pred_next
862 && FORWARDER_BLOCK_P (src1))
863 {
864 e1 = src1->pred;
865 src1 = e1->src;
866 }
867 if (src2->pred
868 && !src2->pred->pred_next
869 && FORWARDER_BLOCK_P (src2))
870 {
871 e2 = src2->pred;
872 src2 = e2->src;
873 }
874
875 /* Nothing to do if we reach ENTRY, or a common source block. */
876 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
877 return false;
878 if (src1 == src2)
879 return false;
880
881 /* Seeing more than 1 forwarder blocks would confuse us later... */
882 if (FORWARDER_BLOCK_P (e1->dest)
883 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
884 return false;
885 if (FORWARDER_BLOCK_P (e2->dest)
886 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
887 return false;
888
889 /* Likewise with dead code (possibly newly created by the other optimizations
890 of cfg_cleanup). */
891 if (!src1->pred || !src2->pred)
892 return false;
893
894 /* Likewise with complex edges.
895 ??? We should be able to handle most complex edges later with some
896 care. */
897 if (e1->flags & EDGE_COMPLEX)
898 return false;
899
900 /* Look for the common insn sequence, part the first ... */
901 if (!outgoing_edges_match (src1, src2))
902 return false;
903
904 /* ... and part the second. */
905 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
906 if (!nmatch)
907 return false;
908
909 /* Avoid splitting if possible. */
910 if (newpos2 == src2->head)
911 redirect_to = src2;
912 else
913 {
914 if (rtl_dump_file)
915 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
916 src2->index, nmatch);
917 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
918 }
919
920 if (rtl_dump_file)
921 fprintf (rtl_dump_file,
922 "Cross jumping from bb %i to bb %i; %i common insns\n",
923 src1->index, src2->index, nmatch);
924
925 redirect_to->count += src1->count;
926 redirect_to->frequency += src1->frequency;
927
928 /* Recompute the frequencies and counts of outgoing edges. */
929 for (s = redirect_to->succ; s; s = s->succ_next)
930 {
931 edge s2;
932 basic_block d = s->dest;
933
934 if (FORWARDER_BLOCK_P (d))
935 d = d->succ->dest;
936 for (s2 = src1->succ; ; s2 = s2->succ_next)
937 {
938 basic_block d2 = s2->dest;
939 if (FORWARDER_BLOCK_P (d2))
940 d2 = d2->succ->dest;
941 if (d == d2)
942 break;
943 }
944 s->count += s2->count;
945
946 /* Take care to update possible forwarder blocks. We verified
947 that there is no more than one in the chain, so we can't run
948 into infinite loop. */
949 if (FORWARDER_BLOCK_P (s->dest))
950 {
951 s->dest->succ->count += s2->count;
952 s->dest->count += s2->count;
953 s->dest->frequency += EDGE_FREQUENCY (s);
954 }
955 if (FORWARDER_BLOCK_P (s2->dest))
956 {
957 s2->dest->succ->count -= s2->count;
958 s2->dest->count -= s2->count;
959 s2->dest->frequency -= EDGE_FREQUENCY (s);
960 }
961 if (!redirect_to->frequency && !src1->frequency)
962 s->probability = (s->probability + s2->probability) / 2;
963 else
964 s->probability =
965 ((s->probability * redirect_to->frequency +
966 s2->probability * src1->frequency)
967 / (redirect_to->frequency + src1->frequency));
968 }
969
970 note = find_reg_note (redirect_to->end, REG_BR_PROB, 0);
971 if (note)
972 XEXP (note, 0) = GEN_INT (BRANCH_EDGE (redirect_to)->probability);
973
974 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
975
976 /* Skip possible basic block header. */
977 if (GET_CODE (newpos1) == CODE_LABEL)
978 newpos1 = NEXT_INSN (newpos1);
979 if (GET_CODE (newpos1) == NOTE)
980 newpos1 = NEXT_INSN (newpos1);
981 last = src1->end;
982
983 /* Emit the jump insn. */
984 label = block_label (redirect_to);
985 emit_jump_insn_after (gen_jump (label), src1->end);
986 JUMP_LABEL (src1->end) = label;
987 LABEL_NUSES (label)++;
988
989 /* Delete the now unreachable instructions. */
990 delete_insn_chain (newpos1, last);
991
992 /* Make sure there is a barrier after the new jump. */
993 last = next_nonnote_insn (src1->end);
994 if (!last || GET_CODE (last) != BARRIER)
995 emit_barrier_after (src1->end);
996
997 /* Update CFG. */
998 while (src1->succ)
999 remove_edge (src1->succ);
1000 make_single_succ_edge (src1, redirect_to, 0);
1001
1002 BB_SET_FLAG (src1, BB_UPDATE_LIFE);
1003 update_forwarder_flag (src1);
1004
1005 return true;
1006 }
1007
1008 /* Search the predecessors of BB for common insn sequences. When found,
1009 share code between them by redirecting control flow. Return true if
1010 any changes made. */
1011
1012 static bool
1013 try_crossjump_bb (mode, bb)
1014 int mode;
1015 basic_block bb;
1016 {
1017 edge e, e2, nexte2, nexte, fallthru;
1018 bool changed;
1019
1020 /* Nothing to do if there is not at least two incoming edges. */
1021 if (!bb->pred || !bb->pred->pred_next)
1022 return false;
1023
1024 /* It is always cheapest to redirect a block that ends in a branch to
1025 a block that falls through into BB, as that adds no branches to the
1026 program. We'll try that combination first. */
1027 for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next)
1028 if (fallthru->flags & EDGE_FALLTHRU)
1029 break;
1030
1031 changed = false;
1032 for (e = bb->pred; e; e = nexte)
1033 {
1034 nexte = e->pred_next;
1035
1036 /* Elide complex edges now, as neither try_crossjump_to_edge
1037 nor outgoing_edges_match can handle them. */
1038 if (e->flags & EDGE_COMPLEX)
1039 continue;
1040
1041 /* As noted above, first try with the fallthru predecessor. */
1042 if (fallthru)
1043 {
1044 /* Don't combine the fallthru edge into anything else.
1045 If there is a match, we'll do it the other way around. */
1046 if (e == fallthru)
1047 continue;
1048
1049 if (try_crossjump_to_edge (mode, e, fallthru))
1050 {
1051 changed = true;
1052 nexte = bb->pred;
1053 continue;
1054 }
1055 }
1056
1057 /* Non-obvious work limiting check: Recognize that we're going
1058 to call try_crossjump_bb on every basic block. So if we have
1059 two blocks with lots of outgoing edges (a switch) and they
1060 share lots of common destinations, then we would do the
1061 cross-jump check once for each common destination.
1062
1063 Now, if the blocks actually are cross-jump candidates, then
1064 all of their destinations will be shared. Which means that
1065 we only need check them for cross-jump candidacy once. We
1066 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1067 choosing to do the check from the block for which the edge
1068 in question is the first successor of A. */
1069 if (e->src->succ != e)
1070 continue;
1071
1072 for (e2 = bb->pred; e2; e2 = nexte2)
1073 {
1074 nexte2 = e2->pred_next;
1075
1076 if (e2 == e)
1077 continue;
1078
1079 /* We've already checked the fallthru edge above. */
1080 if (e2 == fallthru)
1081 continue;
1082
1083 /* Again, neither try_crossjump_to_edge nor outgoing_edges_match
1084 can handle complex edges. */
1085 if (e2->flags & EDGE_COMPLEX)
1086 continue;
1087
1088 /* The "first successor" check above only prevents multiple
1089 checks of crossjump(A,B). In order to prevent redundant
1090 checks of crossjump(B,A), require that A be the block
1091 with the lowest index. */
1092 if (e->src->index > e2->src->index)
1093 continue;
1094
1095 if (try_crossjump_to_edge (mode, e, e2))
1096 {
1097 changed = true;
1098 nexte = bb->pred;
1099 break;
1100 }
1101 }
1102 }
1103
1104 return changed;
1105 }
1106
1107 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1108 instructions etc. Return nonzero if changes were made. */
1109
1110 static bool
1111 try_optimize_cfg (mode)
1112 int mode;
1113 {
1114 int i;
1115 bool changed_overall = false;
1116 bool changed;
1117 int iterations = 0;
1118 sbitmap blocks;
1119
1120 if (mode & CLEANUP_CROSSJUMP)
1121 add_noreturn_fake_exit_edges ();
1122
1123 for (i = 0; i < n_basic_blocks; i++)
1124 update_forwarder_flag (BASIC_BLOCK (i));
1125
1126 /* Attempt to merge blocks as made possible by edge removal. If a block
1127 has only one successor, and the successor has only one predecessor,
1128 they may be combined. */
1129
1130 do
1131 {
1132 changed = false;
1133 iterations++;
1134
1135 if (rtl_dump_file)
1136 fprintf (rtl_dump_file, "\n\ntry_optimize_cfg iteration %i\n\n",
1137 iterations);
1138
1139 for (i = 0; i < n_basic_blocks;)
1140 {
1141 basic_block c, b = BASIC_BLOCK (i);
1142 edge s;
1143 bool changed_here = false;
1144
1145 /* Delete trivially dead basic blocks. */
1146 while (b->pred == NULL)
1147 {
1148 c = BASIC_BLOCK (b->index - 1);
1149 if (rtl_dump_file)
1150 fprintf (rtl_dump_file, "Deleting block %i.\n", b->index);
1151 flow_delete_block (b);
1152 changed = true;
1153 b = c;
1154 }
1155
1156 /* Remove code labels no longer used. Don't do this before
1157 CALL_PLACEHOLDER is removed, as some branches may be hidden
1158 within. */
1159 if (b->pred->pred_next == NULL
1160 && (b->pred->flags & EDGE_FALLTHRU)
1161 && !(b->pred->flags & EDGE_COMPLEX)
1162 && GET_CODE (b->head) == CODE_LABEL
1163 && (!(mode & CLEANUP_PRE_SIBCALL)
1164 || !tail_recursion_label_p (b->head))
1165 /* If previous block ends with condjump jumping to next BB,
1166 we can't delete the label. */
1167 && (b->pred->src == ENTRY_BLOCK_PTR
1168 || !reg_mentioned_p (b->head, b->pred->src->end)))
1169 {
1170 rtx label = b->head;
1171 b->head = NEXT_INSN (b->head);
1172 delete_insn_chain (label, label);
1173 if (rtl_dump_file)
1174 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1175 b->index);
1176 }
1177
1178 /* If we fall through an empty block, we can remove it. */
1179 if (b->pred->pred_next == NULL
1180 && (b->pred->flags & EDGE_FALLTHRU)
1181 && GET_CODE (b->head) != CODE_LABEL
1182 && FORWARDER_BLOCK_P (b)
1183 /* Note that forwarder_block_p true ensures that there
1184 is a successor for this block. */
1185 && (b->succ->flags & EDGE_FALLTHRU)
1186 && n_basic_blocks > 1)
1187 {
1188 if (rtl_dump_file)
1189 fprintf (rtl_dump_file, "Deleting fallthru block %i.\n",
1190 b->index);
1191 c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
1192 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1193 flow_delete_block (b);
1194 changed = true;
1195 b = c;
1196 }
1197
1198 /* Merge blocks. Loop because chains of blocks might be
1199 combineable. */
1200 while ((s = b->succ) != NULL
1201 && s->succ_next == NULL
1202 && !(s->flags & EDGE_COMPLEX)
1203 && (c = s->dest) != EXIT_BLOCK_PTR
1204 && c->pred->pred_next == NULL
1205 /* If the jump insn has side effects,
1206 we can't kill the edge. */
1207 && (GET_CODE (b->end) != JUMP_INSN
1208 || onlyjump_p (b->end))
1209 && merge_blocks (s, b, c, mode))
1210 changed_here = true;
1211
1212 /* Simplify branch over branch. */
1213 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1214 changed_here = true;
1215
1216 /* If B has a single outgoing edge, but uses a non-trivial jump
1217 instruction without side-effects, we can either delete the
1218 jump entirely, or replace it with a simple unconditional jump.
1219 Use redirect_edge_and_branch to do the dirty work. */
1220 if (b->succ
1221 && ! b->succ->succ_next
1222 && b->succ->dest != EXIT_BLOCK_PTR
1223 && onlyjump_p (b->end)
1224 && redirect_edge_and_branch (b->succ, b->succ->dest))
1225 {
1226 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1227 update_forwarder_flag (b);
1228 changed_here = true;
1229 }
1230
1231 /* Simplify branch to branch. */
1232 if (try_forward_edges (mode, b))
1233 changed_here = true;
1234
1235 /* Look for shared code between blocks. */
1236 if ((mode & CLEANUP_CROSSJUMP)
1237 && try_crossjump_bb (mode, b))
1238 changed_here = true;
1239
1240 /* Don't get confused by the index shift caused by deleting
1241 blocks. */
1242 if (!changed_here)
1243 i = b->index + 1;
1244 else
1245 changed = true;
1246 }
1247
1248 if ((mode & CLEANUP_CROSSJUMP)
1249 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1250 changed = true;
1251
1252 #ifdef ENABLE_CHECKING
1253 if (changed)
1254 verify_flow_info ();
1255 #endif
1256
1257 changed_overall |= changed;
1258 }
1259 while (changed);
1260
1261 if (mode & CLEANUP_CROSSJUMP)
1262 remove_fake_edges ();
1263
1264 if ((mode & CLEANUP_UPDATE_LIFE) & changed_overall)
1265 {
1266 bool found = 0;
1267 blocks = sbitmap_alloc (n_basic_blocks);
1268 for (i = 0; i < n_basic_blocks; i++)
1269 if (BB_FLAGS (BASIC_BLOCK (i)) & BB_UPDATE_LIFE)
1270 {
1271 found = 1;
1272 SET_BIT (blocks, i);
1273 }
1274 if (found)
1275 update_life_info (blocks, UPDATE_LIFE_GLOBAL,
1276 PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
1277 | PROP_KILL_DEAD_CODE);
1278 sbitmap_free (blocks);
1279 }
1280 for (i = 0; i < n_basic_blocks; i++)
1281 BASIC_BLOCK (i)->aux = NULL;
1282
1283 return changed_overall;
1284 }
1285 \f
1286 /* Delete all unreachable basic blocks. */
1287
1288 static bool
1289 delete_unreachable_blocks ()
1290 {
1291 int i;
1292 bool changed = false;
1293
1294 find_unreachable_blocks ();
1295
1296 /* Delete all unreachable basic blocks. Count down so that we
1297 don't interfere with the block renumbering that happens in
1298 flow_delete_block. */
1299
1300 for (i = n_basic_blocks - 1; i >= 0; --i)
1301 {
1302 basic_block b = BASIC_BLOCK (i);
1303
1304 if (!(b->flags & BB_REACHABLE))
1305 flow_delete_block (b), changed = true;
1306 }
1307
1308 if (changed)
1309 tidy_fallthru_edges ();
1310 return changed;
1311 }
1312 \f
1313 /* Tidy the CFG by deleting unreachable code and whatnot. */
1314
1315 bool
1316 cleanup_cfg (mode)
1317 int mode;
1318 {
1319 bool changed = false;
1320
1321 timevar_push (TV_CLEANUP_CFG);
1322 changed = delete_unreachable_blocks ();
1323 if (try_optimize_cfg (mode))
1324 delete_unreachable_blocks (), changed = true;
1325
1326 /* Kill the data we won't maintain. */
1327 free_EXPR_LIST_list (&label_value_list);
1328 free_EXPR_LIST_list (&tail_recursion_label_list);
1329 timevar_pop (TV_CLEANUP_CFG);
1330
1331 return changed;
1332 }
This page took 0.098658 seconds and 6 git commands to generate.