]> gcc.gnu.org Git - gcc.git/blob - gcc/cfgcleanup.c
basic-block.h (BB_REACHABLE): Renumber.
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "rtl.h"
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
39 #include "timevar.h"
40 #include "output.h"
41 #include "insn-config.h"
42 #include "flags.h"
43 #include "recog.h"
44 #include "toplev.h"
45 #include "cselib.h"
46 #include "tm_p.h"
47 #include "target.h"
48
49 #include "obstack.h"
50
51 /* cleanup_cfg maintains following flags for each basic block. */
52
53 enum bb_flags
54 {
55 /* Set if BB is the forwarder block to avoid too many
56 forwarder_block_p calls. */
57 BB_FORWARDER_BLOCK = 1
58 };
59
60 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
61 #define BB_SET_FLAG(BB, FLAG) \
62 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
63 #define BB_CLEAR_FLAG(BB, FLAG) \
64 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
65
66 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
67
68 static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
69 static bool try_crossjump_bb PARAMS ((int, basic_block));
70 static bool outgoing_edges_match PARAMS ((int,
71 basic_block, basic_block));
72 static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
73 rtx *, rtx *));
74 static bool insns_match_p PARAMS ((int, rtx, rtx));
75
76 static bool delete_unreachable_blocks PARAMS ((void));
77 static bool label_is_jump_target_p PARAMS ((rtx, rtx));
78 static bool tail_recursion_label_p PARAMS ((rtx));
79 static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
80 basic_block));
81 static void merge_blocks_move_successor_nojumps PARAMS ((basic_block,
82 basic_block));
83 static bool merge_blocks PARAMS ((edge,basic_block,basic_block,
84 int));
85 static bool try_optimize_cfg PARAMS ((int));
86 static bool try_simplify_condjump PARAMS ((basic_block));
87 static bool try_forward_edges PARAMS ((int, basic_block));
88 static edge thread_jump PARAMS ((int, edge, basic_block));
89 static bool mark_effect PARAMS ((rtx, bitmap));
90 static void notice_new_block PARAMS ((basic_block));
91 static void update_forwarder_flag PARAMS ((basic_block));
92 \f
93 /* Set flags for newly created block. */
94
95 static void
96 notice_new_block (bb)
97 basic_block bb;
98 {
99 if (!bb)
100 return;
101
102 if (forwarder_block_p (bb))
103 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
104 }
105
106 /* Recompute forwarder flag after block has been modified. */
107
108 static void
109 update_forwarder_flag (bb)
110 basic_block bb;
111 {
112 if (forwarder_block_p (bb))
113 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
114 else
115 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
116 }
117 \f
118 /* Simplify a conditional jump around an unconditional jump.
119 Return true if something changed. */
120
121 static bool
122 try_simplify_condjump (cbranch_block)
123 basic_block cbranch_block;
124 {
125 basic_block jump_block, jump_dest_block, cbranch_dest_block;
126 edge cbranch_jump_edge, cbranch_fallthru_edge;
127 rtx cbranch_insn;
128
129 /* Verify that there are exactly two successors. */
130 if (!cbranch_block->succ
131 || !cbranch_block->succ->succ_next
132 || cbranch_block->succ->succ_next->succ_next)
133 return false;
134
135 /* Verify that we've got a normal conditional branch at the end
136 of the block. */
137 cbranch_insn = cbranch_block->end;
138 if (!any_condjump_p (cbranch_insn))
139 return false;
140
141 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
142 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
143
144 /* The next block must not have multiple predecessors, must not
145 be the last block in the function, and must contain just the
146 unconditional jump. */
147 jump_block = cbranch_fallthru_edge->dest;
148 if (jump_block->pred->pred_next
149 || jump_block->index == n_basic_blocks - 1
150 || !FORWARDER_BLOCK_P (jump_block))
151 return false;
152 jump_dest_block = jump_block->succ->dest;
153
154 /* The conditional branch must target the block after the
155 unconditional branch. */
156 cbranch_dest_block = cbranch_jump_edge->dest;
157
158 if (!can_fallthru (jump_block, cbranch_dest_block))
159 return false;
160
161 /* Invert the conditional branch. */
162 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
163 return false;
164
165 if (rtl_dump_file)
166 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
167 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
168
169 /* Success. Update the CFG to match. Note that after this point
170 the edge variable names appear backwards; the redirection is done
171 this way to preserve edge profile data. */
172 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
173 cbranch_dest_block);
174 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
175 jump_dest_block);
176 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
177 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
178 update_br_prob_note (cbranch_block);
179
180 /* Delete the block with the unconditional jump, and clean up the mess. */
181 flow_delete_block (jump_block);
182 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
183
184 return true;
185 }
186 \f
187 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
188 on register. Used by jump threading. */
189
190 static bool
191 mark_effect (exp, nonequal)
192 rtx exp;
193 regset nonequal;
194 {
195 int regno;
196 rtx dest;
197 switch (GET_CODE (exp))
198 {
199 /* In case we do clobber the register, mark it as equal, as we know the
200 value is dead so it don't have to match. */
201 case CLOBBER:
202 if (REG_P (XEXP (exp, 0)))
203 {
204 dest = XEXP (exp, 0);
205 regno = REGNO (dest);
206 CLEAR_REGNO_REG_SET (nonequal, regno);
207 if (regno < FIRST_PSEUDO_REGISTER)
208 {
209 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
210 while (--n > 0)
211 CLEAR_REGNO_REG_SET (nonequal, regno + n);
212 }
213 }
214 return false;
215
216 case SET:
217 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
218 return false;
219 dest = SET_DEST (exp);
220 if (dest == pc_rtx)
221 return false;
222 if (!REG_P (dest))
223 return true;
224 regno = REGNO (dest);
225 SET_REGNO_REG_SET (nonequal, regno);
226 if (regno < FIRST_PSEUDO_REGISTER)
227 {
228 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
229 while (--n > 0)
230 SET_REGNO_REG_SET (nonequal, regno + n);
231 }
232 return false;
233
234 default:
235 return false;
236 }
237 }
238 /* Attempt to prove that the basic block B will have no side effects and
239 allways continues in the same edge if reached via E. Return the edge
240 if exist, NULL otherwise. */
241
242 static edge
243 thread_jump (mode, e, b)
244 int mode;
245 edge e;
246 basic_block b;
247 {
248 rtx set1, set2, cond1, cond2, insn;
249 enum rtx_code code1, code2, reversed_code2;
250 bool reverse1 = false;
251 int i;
252 regset nonequal;
253 bool failed = false;
254
255 /* At the moment, we do handle only conditional jumps, but later we may
256 want to extend this code to tablejumps and others. */
257 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
258 return NULL;
259 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
260 return NULL;
261
262 /* Second branch must end with onlyjump, as we will eliminate the jump. */
263 if (!any_condjump_p (e->src->end) || !any_condjump_p (b->end)
264 || !onlyjump_p (b->end))
265 return NULL;
266
267 set1 = pc_set (e->src->end);
268 set2 = pc_set (b->end);
269 if (((e->flags & EDGE_FALLTHRU) != 0)
270 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
271 reverse1 = true;
272
273 cond1 = XEXP (SET_SRC (set1), 0);
274 cond2 = XEXP (SET_SRC (set2), 0);
275 if (reverse1)
276 code1 = reversed_comparison_code (cond1, e->src->end);
277 else
278 code1 = GET_CODE (cond1);
279
280 code2 = GET_CODE (cond2);
281 reversed_code2 = reversed_comparison_code (cond2, b->end);
282
283 if (!comparison_dominates_p (code1, code2)
284 && !comparison_dominates_p (code1, reversed_code2))
285 return NULL;
286
287 /* Ensure that the comparison operators are equivalent.
288 ??? This is far too pesimistic. We should allow swapped operands,
289 different CCmodes, or for example comparisons for interval, that
290 dominate even when operands are not equivalent. */
291 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
292 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
293 return NULL;
294
295 /* Short circuit cases where block B contains some side effects, as we can't
296 safely bypass it. */
297 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
298 insn = NEXT_INSN (insn))
299 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
300 return NULL;
301
302 cselib_init ();
303
304 /* First process all values computed in the source basic block. */
305 for (insn = NEXT_INSN (e->src->head); insn != NEXT_INSN (e->src->end);
306 insn = NEXT_INSN (insn))
307 if (INSN_P (insn))
308 cselib_process_insn (insn);
309
310 nonequal = BITMAP_XMALLOC();
311 CLEAR_REG_SET (nonequal);
312
313 /* Now assume that we've continued by the edge E to B and continue
314 processing as if it were same basic block.
315 Our goal is to prove that whole block is an NOOP. */
316
317 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end) && !failed;
318 insn = NEXT_INSN (insn))
319 {
320 if (INSN_P (insn))
321 {
322 rtx pat = PATTERN (insn);
323
324 if (GET_CODE (pat) == PARALLEL)
325 {
326 for (i = 0; i < XVECLEN (pat, 0); i++)
327 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
328 }
329 else
330 failed |= mark_effect (pat, nonequal);
331 }
332
333 cselib_process_insn (insn);
334 }
335
336 /* Later we should clear nonequal of dead registers. So far we don't
337 have life information in cfg_cleanup. */
338 if (failed)
339 goto failed_exit;
340
341 /* In case liveness information is available, we need to prove equivalence
342 only of the live values. */
343 if (mode & CLEANUP_UPDATE_LIFE)
344 AND_REG_SET (nonequal, b->global_live_at_end);
345
346 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
347
348 BITMAP_XFREE (nonequal);
349 cselib_finish ();
350 if ((comparison_dominates_p (code1, code2) != 0)
351 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
352 return BRANCH_EDGE (b);
353 else
354 return FALLTHRU_EDGE (b);
355
356 failed_exit:
357 BITMAP_XFREE (nonequal);
358 cselib_finish ();
359 return NULL;
360 }
361 \f
362 /* Attempt to forward edges leaving basic block B.
363 Return true if successful. */
364
365 static bool
366 try_forward_edges (mode, b)
367 basic_block b;
368 int mode;
369 {
370 bool changed = false;
371 edge e, next, *threaded_edges = NULL;
372
373 for (e = b->succ; e; e = next)
374 {
375 basic_block target, first;
376 int counter;
377 bool threaded = false;
378 int nthreaded_edges = 0;
379
380 next = e->succ_next;
381
382 /* Skip complex edges because we don't know how to update them.
383
384 Still handle fallthru edges, as we can succeed to forward fallthru
385 edge to the same place as the branch edge of conditional branch
386 and turn conditional branch to an unconditional branch. */
387 if (e->flags & EDGE_COMPLEX)
388 continue;
389
390 target = first = e->dest;
391 counter = 0;
392
393 while (counter < n_basic_blocks)
394 {
395 basic_block new_target = NULL;
396 bool new_target_threaded = false;
397
398 if (FORWARDER_BLOCK_P (target)
399 && target->succ->dest != EXIT_BLOCK_PTR)
400 {
401 /* Bypass trivial infinite loops. */
402 if (target == target->succ->dest)
403 counter = n_basic_blocks;
404 new_target = target->succ->dest;
405 }
406
407 /* Allow to thread only over one edge at time to simplify updating
408 of probabilities. */
409 else if (mode & CLEANUP_THREADING)
410 {
411 edge t = thread_jump (mode, e, target);
412 if (t)
413 {
414 if (!threaded_edges)
415 threaded_edges = xmalloc (sizeof (*threaded_edges)
416 * n_basic_blocks);
417 else
418 {
419 int i;
420
421 /* Detect an infinite loop across blocks not
422 including the start block. */
423 for (i = 0; i < nthreaded_edges; ++i)
424 if (threaded_edges[i] == t)
425 break;
426 if (i < nthreaded_edges)
427 {
428 counter = n_basic_blocks;
429 break;
430 }
431 }
432
433 /* Detect an infinite loop across the start block. */
434 if (t->dest == b)
435 break;
436
437 if (nthreaded_edges >= n_basic_blocks)
438 abort ();
439 threaded_edges[nthreaded_edges++] = t;
440
441 new_target = t->dest;
442 new_target_threaded = true;
443 }
444 }
445
446 if (!new_target)
447 break;
448
449 /* Avoid killing of loop pre-headers, as it is the place loop
450 optimizer wants to hoist code to.
451
452 For fallthru forwarders, the LOOP_BEG note must appear between
453 the header of block and CODE_LABEL of the loop, for non forwarders
454 it must appear before the JUMP_INSN. */
455 if (mode & CLEANUP_PRE_LOOP)
456 {
457 rtx insn = (target->succ->flags & EDGE_FALLTHRU
458 ? target->head : prev_nonnote_insn (target->end));
459
460 if (GET_CODE (insn) != NOTE)
461 insn = NEXT_INSN (insn);
462
463 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
464 insn = NEXT_INSN (insn))
465 if (GET_CODE (insn) == NOTE
466 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
467 break;
468
469 if (GET_CODE (insn) == NOTE)
470 break;
471 }
472
473 counter++;
474 target = new_target;
475 threaded |= new_target_threaded;
476 }
477
478 if (counter >= n_basic_blocks)
479 {
480 if (rtl_dump_file)
481 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
482 target->index);
483 }
484 else if (target == first)
485 ; /* We didn't do anything. */
486 else
487 {
488 /* Save the values now, as the edge may get removed. */
489 gcov_type edge_count = e->count;
490 int edge_probability = e->probability;
491 int edge_frequency;
492 int n = 0;
493
494 /* Don't force if target is exit block. */
495 if (threaded && target != EXIT_BLOCK_PTR)
496 {
497 notice_new_block (redirect_edge_and_branch_force (e, target));
498 if (rtl_dump_file)
499 fprintf (rtl_dump_file, "Conditionals threaded.\n");
500 }
501 else if (!redirect_edge_and_branch (e, target))
502 {
503 if (rtl_dump_file)
504 fprintf (rtl_dump_file,
505 "Forwarding edge %i->%i to %i failed.\n",
506 b->index, e->dest->index, target->index);
507 continue;
508 }
509
510 /* We successfully forwarded the edge. Now update profile
511 data: for each edge we traversed in the chain, remove
512 the original edge's execution count. */
513 edge_frequency = ((edge_probability * b->frequency
514 + REG_BR_PROB_BASE / 2)
515 / REG_BR_PROB_BASE);
516
517 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
518 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
519
520 do
521 {
522 edge t;
523
524 first->count -= edge_count;
525 if (first->count < 0)
526 first->count = 0;
527 first->frequency -= edge_frequency;
528 if (first->frequency < 0)
529 first->frequency = 0;
530 if (first->succ->succ_next)
531 {
532 edge e;
533 int prob;
534 if (n >= nthreaded_edges)
535 abort ();
536 t = threaded_edges [n++];
537 if (t->src != first)
538 abort ();
539 if (first->frequency)
540 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
541 else
542 prob = 0;
543 if (prob > t->probability)
544 prob = t->probability;
545 t->probability -= prob;
546 prob = REG_BR_PROB_BASE - prob;
547 if (prob <= 0)
548 {
549 first->succ->probability = REG_BR_PROB_BASE;
550 first->succ->succ_next->probability = 0;
551 }
552 else
553 for (e = first->succ; e; e = e->succ_next)
554 e->probability = ((e->probability * REG_BR_PROB_BASE)
555 / (double) prob);
556 update_br_prob_note (first);
557 }
558 else
559 {
560 /* It is possible that as the result of
561 threading we've removed edge as it is
562 threaded to the fallthru edge. Avoid
563 getting out of sync. */
564 if (n < nthreaded_edges
565 && first == threaded_edges [n]->src)
566 n++;
567 t = first->succ;
568 }
569
570 t->count -= edge_count;
571 if (t->count < 0)
572 t->count = 0;
573 first = t->dest;
574 }
575 while (first != target);
576
577 changed = true;
578 }
579 }
580
581 if (threaded_edges)
582 free (threaded_edges);
583 return changed;
584 }
585 \f
586 /* Return true if LABEL is a target of JUMP_INSN. This applies only
587 to non-complex jumps. That is, direct unconditional, conditional,
588 and tablejumps, but not computed jumps or returns. It also does
589 not apply to the fallthru case of a conditional jump. */
590
591 static bool
592 label_is_jump_target_p (label, jump_insn)
593 rtx label, jump_insn;
594 {
595 rtx tmp = JUMP_LABEL (jump_insn);
596
597 if (label == tmp)
598 return true;
599
600 if (tmp != NULL_RTX
601 && (tmp = NEXT_INSN (tmp)) != NULL_RTX
602 && GET_CODE (tmp) == JUMP_INSN
603 && (tmp = PATTERN (tmp),
604 GET_CODE (tmp) == ADDR_VEC
605 || GET_CODE (tmp) == ADDR_DIFF_VEC))
606 {
607 rtvec vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
608 int i, veclen = GET_NUM_ELEM (vec);
609
610 for (i = 0; i < veclen; ++i)
611 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
612 return true;
613 }
614
615 return false;
616 }
617
618 /* Return true if LABEL is used for tail recursion. */
619
620 static bool
621 tail_recursion_label_p (label)
622 rtx label;
623 {
624 rtx x;
625
626 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
627 if (label == XEXP (x, 0))
628 return true;
629
630 return false;
631 }
632
633 /* Blocks A and B are to be merged into a single block. A has no incoming
634 fallthru edge, so it can be moved before B without adding or modifying
635 any jumps (aside from the jump from A to B). */
636
637 static void
638 merge_blocks_move_predecessor_nojumps (a, b)
639 basic_block a, b;
640 {
641 rtx barrier;
642 int index;
643
644 barrier = next_nonnote_insn (a->end);
645 if (GET_CODE (barrier) != BARRIER)
646 abort ();
647 delete_insn (barrier);
648
649 /* Move block and loop notes out of the chain so that we do not
650 disturb their order.
651
652 ??? A better solution would be to squeeze out all the non-nested notes
653 and adjust the block trees appropriately. Even better would be to have
654 a tighter connection between block trees and rtl so that this is not
655 necessary. */
656 if (squeeze_notes (&a->head, &a->end))
657 abort ();
658
659 /* Scramble the insn chain. */
660 if (a->end != PREV_INSN (b->head))
661 reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
662 a->flags |= BB_DIRTY;
663
664 if (rtl_dump_file)
665 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
666 a->index, b->index);
667
668 /* Swap the records for the two blocks around. Although we are deleting B,
669 A is now where B was and we want to compact the BB array from where
670 A used to be. */
671 BASIC_BLOCK (a->index) = b;
672 BASIC_BLOCK (b->index) = a;
673 index = a->index;
674 a->index = b->index;
675 b->index = index;
676
677 /* Now blocks A and B are contiguous. Merge them. */
678 merge_blocks_nomove (a, b);
679 }
680
681 /* Blocks A and B are to be merged into a single block. B has no outgoing
682 fallthru edge, so it can be moved after A without adding or modifying
683 any jumps (aside from the jump from A to B). */
684
685 static void
686 merge_blocks_move_successor_nojumps (a, b)
687 basic_block a, b;
688 {
689 rtx barrier, real_b_end;
690
691 real_b_end = b->end;
692 barrier = NEXT_INSN (b->end);
693
694 /* Recognize a jump table following block B. */
695 if (barrier
696 && GET_CODE (barrier) == CODE_LABEL
697 && NEXT_INSN (barrier)
698 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
699 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
700 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
701 {
702 /* Temporarily add the table jump insn to b, so that it will also
703 be moved to the correct location. */
704 b->end = NEXT_INSN (barrier);
705 barrier = NEXT_INSN (b->end);
706 }
707
708 /* There had better have been a barrier there. Delete it. */
709 if (barrier && GET_CODE (barrier) == BARRIER)
710 delete_insn (barrier);
711
712 /* Move block and loop notes out of the chain so that we do not
713 disturb their order.
714
715 ??? A better solution would be to squeeze out all the non-nested notes
716 and adjust the block trees appropriately. Even better would be to have
717 a tighter connection between block trees and rtl so that this is not
718 necessary. */
719 if (squeeze_notes (&b->head, &b->end))
720 abort ();
721
722 /* Scramble the insn chain. */
723 reorder_insns_nobb (b->head, b->end, a->end);
724
725 /* Restore the real end of b. */
726 b->end = real_b_end;
727
728 /* Now blocks A and B are contiguous. Merge them. */
729 merge_blocks_nomove (a, b);
730
731 if (rtl_dump_file)
732 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
733 b->index, a->index);
734 }
735
736 /* Attempt to merge basic blocks that are potentially non-adjacent.
737 Return true iff the attempt succeeded. */
738
739 static bool
740 merge_blocks (e, b, c, mode)
741 edge e;
742 basic_block b, c;
743 int mode;
744 {
745 /* If C has a tail recursion label, do not merge. There is no
746 edge recorded from the call_placeholder back to this label, as
747 that would make optimize_sibling_and_tail_recursive_calls more
748 complex for no gain. */
749 if ((mode & CLEANUP_PRE_SIBCALL)
750 && GET_CODE (c->head) == CODE_LABEL
751 && tail_recursion_label_p (c->head))
752 return false;
753
754 /* If B has a fallthru edge to C, no need to move anything. */
755 if (e->flags & EDGE_FALLTHRU)
756 {
757 int b_index = b->index, c_index = c->index;
758 merge_blocks_nomove (b, c);
759 update_forwarder_flag (b);
760
761 if (rtl_dump_file)
762 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
763 b_index, c_index);
764
765 return true;
766 }
767
768 /* Otherwise we will need to move code around. Do that only if expensive
769 transformations are allowed. */
770 else if (mode & CLEANUP_EXPENSIVE)
771 {
772 edge tmp_edge, b_fallthru_edge;
773 bool c_has_outgoing_fallthru;
774 bool b_has_incoming_fallthru;
775
776 /* Avoid overactive code motion, as the forwarder blocks should be
777 eliminated by edge redirection instead. One exception might have
778 been if B is a forwarder block and C has no fallthru edge, but
779 that should be cleaned up by bb-reorder instead. */
780 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
781 return false;
782
783 /* We must make sure to not munge nesting of lexical blocks,
784 and loop notes. This is done by squeezing out all the notes
785 and leaving them there to lie. Not ideal, but functional. */
786
787 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
788 if (tmp_edge->flags & EDGE_FALLTHRU)
789 break;
790
791 c_has_outgoing_fallthru = (tmp_edge != NULL);
792
793 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
794 if (tmp_edge->flags & EDGE_FALLTHRU)
795 break;
796
797 b_has_incoming_fallthru = (tmp_edge != NULL);
798 b_fallthru_edge = tmp_edge;
799
800 /* Otherwise, we're going to try to move C after B. If C does
801 not have an outgoing fallthru, then it can be moved
802 immediately after B without introducing or modifying jumps. */
803 if (! c_has_outgoing_fallthru)
804 {
805 merge_blocks_move_successor_nojumps (b, c);
806 return true;
807 }
808
809 /* If B does not have an incoming fallthru, then it can be moved
810 immediately before C without introducing or modifying jumps.
811 C cannot be the first block, so we do not have to worry about
812 accessing a non-existent block. */
813
814 if (b_has_incoming_fallthru)
815 {
816 basic_block bb;
817
818 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
819 return false;
820 bb = force_nonfallthru (b_fallthru_edge);
821 if (bb)
822 notice_new_block (bb);
823 }
824
825 merge_blocks_move_predecessor_nojumps (b, c);
826 return true;
827 }
828
829 return false;
830 }
831 \f
832
833 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
834
835 static bool
836 insns_match_p (mode, i1, i2)
837 int mode ATTRIBUTE_UNUSED;
838 rtx i1, i2;
839 {
840 rtx p1, p2;
841
842 /* Verify that I1 and I2 are equivalent. */
843 if (GET_CODE (i1) != GET_CODE (i2))
844 return false;
845
846 p1 = PATTERN (i1);
847 p2 = PATTERN (i2);
848
849 if (GET_CODE (p1) != GET_CODE (p2))
850 return false;
851
852 /* If this is a CALL_INSN, compare register usage information.
853 If we don't check this on stack register machines, the two
854 CALL_INSNs might be merged leaving reg-stack.c with mismatching
855 numbers of stack registers in the same basic block.
856 If we don't check this on machines with delay slots, a delay slot may
857 be filled that clobbers a parameter expected by the subroutine.
858
859 ??? We take the simple route for now and assume that if they're
860 equal, they were constructed identically. */
861
862 if (GET_CODE (i1) == CALL_INSN
863 && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
864 CALL_INSN_FUNCTION_USAGE (i2)))
865 return false;
866
867 #ifdef STACK_REGS
868 /* If cross_jump_death_matters is not 0, the insn's mode
869 indicates whether or not the insn contains any stack-like
870 regs. */
871
872 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
873 {
874 /* If register stack conversion has already been done, then
875 death notes must also be compared before it is certain that
876 the two instruction streams match. */
877
878 rtx note;
879 HARD_REG_SET i1_regset, i2_regset;
880
881 CLEAR_HARD_REG_SET (i1_regset);
882 CLEAR_HARD_REG_SET (i2_regset);
883
884 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
885 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
886 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
887
888 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
889 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
890 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
891
892 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
893
894 return false;
895
896 done:
897 ;
898 }
899 #endif
900
901 if (reload_completed
902 ? ! rtx_renumbered_equal_p (p1, p2) : ! rtx_equal_p (p1, p2))
903 {
904 /* The following code helps take care of G++ cleanups. */
905 rtx equiv1 = find_reg_equal_equiv_note (i1);
906 rtx equiv2 = find_reg_equal_equiv_note (i2);
907
908 if (equiv1 && equiv2
909 /* If the equivalences are not to a constant, they may
910 reference pseudos that no longer exist, so we can't
911 use them. */
912 && (! reload_completed
913 || (CONSTANT_P (XEXP (equiv1, 0))
914 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
915 {
916 rtx s1 = single_set (i1);
917 rtx s2 = single_set (i2);
918 if (s1 != 0 && s2 != 0
919 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
920 {
921 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
922 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
923 if (! rtx_renumbered_equal_p (p1, p2))
924 cancel_changes (0);
925 else if (apply_change_group ())
926 return true;
927 }
928 }
929
930 return false;
931 }
932
933 return true;
934 }
935 \f
936 /* Look through the insns at the end of BB1 and BB2 and find the longest
937 sequence that are equivalent. Store the first insns for that sequence
938 in *F1 and *F2 and return the sequence length.
939
940 To simplify callers of this function, if the blocks match exactly,
941 store the head of the blocks in *F1 and *F2. */
942
943 static int
944 flow_find_cross_jump (mode, bb1, bb2, f1, f2)
945 int mode ATTRIBUTE_UNUSED;
946 basic_block bb1, bb2;
947 rtx *f1, *f2;
948 {
949 rtx i1, i2, last1, last2, afterlast1, afterlast2;
950 int ninsns = 0;
951
952 /* Skip simple jumps at the end of the blocks. Complex jumps still
953 need to be compared for equivalence, which we'll do below. */
954
955 i1 = bb1->end;
956 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
957 if (onlyjump_p (i1)
958 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
959 {
960 last1 = i1;
961 i1 = PREV_INSN (i1);
962 }
963
964 i2 = bb2->end;
965 if (onlyjump_p (i2)
966 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
967 {
968 last2 = i2;
969 /* Count everything except for unconditional jump as insn. */
970 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
971 ninsns++;
972 i2 = PREV_INSN (i2);
973 }
974
975 while (true)
976 {
977 /* Ignore notes. */
978 while (!active_insn_p (i1) && i1 != bb1->head)
979 i1 = PREV_INSN (i1);
980
981 while (!active_insn_p (i2) && i2 != bb2->head)
982 i2 = PREV_INSN (i2);
983
984 if (i1 == bb1->head || i2 == bb2->head)
985 break;
986
987 if (!insns_match_p (mode, i1, i2))
988 break;
989
990 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
991 if (active_insn_p (i1))
992 {
993 /* If the merged insns have different REG_EQUAL notes, then
994 remove them. */
995 rtx equiv1 = find_reg_equal_equiv_note (i1);
996 rtx equiv2 = find_reg_equal_equiv_note (i2);
997
998 if (equiv1 && !equiv2)
999 remove_note (i1, equiv1);
1000 else if (!equiv1 && equiv2)
1001 remove_note (i2, equiv2);
1002 else if (equiv1 && equiv2
1003 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1004 {
1005 remove_note (i1, equiv1);
1006 remove_note (i2, equiv2);
1007 }
1008
1009 afterlast1 = last1, afterlast2 = last2;
1010 last1 = i1, last2 = i2;
1011 ninsns++;
1012 }
1013
1014 i1 = PREV_INSN (i1);
1015 i2 = PREV_INSN (i2);
1016 }
1017
1018 #ifdef HAVE_cc0
1019 /* Don't allow the insn after a compare to be shared by
1020 cross-jumping unless the compare is also shared. */
1021 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1022 last1 = afterlast1, last2 = afterlast2, ninsns--;
1023 #endif
1024
1025 /* Include preceding notes and labels in the cross-jump. One,
1026 this may bring us to the head of the blocks as requested above.
1027 Two, it keeps line number notes as matched as may be. */
1028 if (ninsns)
1029 {
1030 while (last1 != bb1->head && !active_insn_p (PREV_INSN (last1)))
1031 last1 = PREV_INSN (last1);
1032
1033 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
1034 last1 = PREV_INSN (last1);
1035
1036 while (last2 != bb2->head && !active_insn_p (PREV_INSN (last2)))
1037 last2 = PREV_INSN (last2);
1038
1039 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
1040 last2 = PREV_INSN (last2);
1041
1042 *f1 = last1;
1043 *f2 = last2;
1044 }
1045
1046 return ninsns;
1047 }
1048
1049 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1050 the branch instruction. This means that if we commonize the control
1051 flow before end of the basic block, the semantic remains unchanged.
1052
1053 We may assume that there exists one edge with a common destination. */
1054
1055 static bool
1056 outgoing_edges_match (mode, bb1, bb2)
1057 int mode;
1058 basic_block bb1;
1059 basic_block bb2;
1060 {
1061 int nehedges1 = 0, nehedges2 = 0;
1062 edge fallthru1 = 0, fallthru2 = 0;
1063 edge e1, e2;
1064
1065 /* If BB1 has only one successor, we may be looking at either an
1066 unconditional jump, or a fake edge to exit. */
1067 if (bb1->succ && !bb1->succ->succ_next
1068 && !(bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)))
1069 return (bb2->succ && !bb2->succ->succ_next
1070 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0);
1071
1072 /* Match conditional jumps - this may get tricky when fallthru and branch
1073 edges are crossed. */
1074 if (bb1->succ
1075 && bb1->succ->succ_next
1076 && !bb1->succ->succ_next->succ_next
1077 && any_condjump_p (bb1->end)
1078 && onlyjump_p (bb1->end))
1079 {
1080 edge b1, f1, b2, f2;
1081 bool reverse, match;
1082 rtx set1, set2, cond1, cond2;
1083 enum rtx_code code1, code2;
1084
1085 if (!bb2->succ
1086 || !bb2->succ->succ_next
1087 || bb1->succ->succ_next->succ_next
1088 || !any_condjump_p (bb2->end)
1089 || !onlyjump_p (bb1->end))
1090 return false;
1091
1092 b1 = BRANCH_EDGE (bb1);
1093 b2 = BRANCH_EDGE (bb2);
1094 f1 = FALLTHRU_EDGE (bb1);
1095 f2 = FALLTHRU_EDGE (bb2);
1096
1097 /* Get around possible forwarders on fallthru edges. Other cases
1098 should be optimized out already. */
1099 if (FORWARDER_BLOCK_P (f1->dest))
1100 f1 = f1->dest->succ;
1101
1102 if (FORWARDER_BLOCK_P (f2->dest))
1103 f2 = f2->dest->succ;
1104
1105 /* To simplify use of this function, return false if there are
1106 unneeded forwarder blocks. These will get eliminated later
1107 during cleanup_cfg. */
1108 if (FORWARDER_BLOCK_P (f1->dest)
1109 || FORWARDER_BLOCK_P (f2->dest)
1110 || FORWARDER_BLOCK_P (b1->dest)
1111 || FORWARDER_BLOCK_P (b2->dest))
1112 return false;
1113
1114 if (f1->dest == f2->dest && b1->dest == b2->dest)
1115 reverse = false;
1116 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1117 reverse = true;
1118 else
1119 return false;
1120
1121 set1 = pc_set (bb1->end);
1122 set2 = pc_set (bb2->end);
1123 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1124 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1125 reverse = !reverse;
1126
1127 cond1 = XEXP (SET_SRC (set1), 0);
1128 cond2 = XEXP (SET_SRC (set2), 0);
1129 code1 = GET_CODE (cond1);
1130 if (reverse)
1131 code2 = reversed_comparison_code (cond2, bb2->end);
1132 else
1133 code2 = GET_CODE (cond2);
1134
1135 if (code2 == UNKNOWN)
1136 return false;
1137
1138 /* Verify codes and operands match. */
1139 match = ((code1 == code2
1140 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1141 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1142 || (code1 == swap_condition (code2)
1143 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1144 XEXP (cond2, 0))
1145 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1146 XEXP (cond2, 1))));
1147
1148 /* If we return true, we will join the blocks. Which means that
1149 we will only have one branch prediction bit to work with. Thus
1150 we require the existing branches to have probabilities that are
1151 roughly similar. */
1152 if (match
1153 && !optimize_size
1154 && bb1->frequency > BB_FREQ_MAX / 1000
1155 && bb2->frequency > BB_FREQ_MAX / 1000)
1156 {
1157 int prob2;
1158
1159 if (b1->dest == b2->dest)
1160 prob2 = b2->probability;
1161 else
1162 /* Do not use f2 probability as f2 may be forwarded. */
1163 prob2 = REG_BR_PROB_BASE - b2->probability;
1164
1165 /* Fail if the difference in probabilities is
1166 greater than 5%. */
1167 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 20)
1168 {
1169 if (rtl_dump_file)
1170 fprintf (rtl_dump_file,
1171 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1172 bb1->index, bb2->index, b1->probability, prob2);
1173
1174 return false;
1175 }
1176 }
1177
1178 if (rtl_dump_file && match)
1179 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
1180 bb1->index, bb2->index);
1181
1182 return match;
1183 }
1184
1185 /* Generic case - we are seeing an computed jump, table jump or trapping
1186 instruction. */
1187
1188 /* First ensure that the instructions match. There may be many outgoing
1189 edges so this test is generally cheaper.
1190 ??? Currently the tablejumps will never match, as they do have
1191 different tables. */
1192 if (!insns_match_p (mode, bb1->end, bb2->end))
1193 return false;
1194
1195 /* Search the outgoing edges, ensure that the counts do match, find possible
1196 fallthru and exception handling edges since these needs more
1197 validation. */
1198 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1199 e1 = e1->succ_next, e2 = e2->succ_next)
1200 {
1201 if (e1->flags & EDGE_EH)
1202 nehedges1++;
1203
1204 if (e2->flags & EDGE_EH)
1205 nehedges2++;
1206
1207 if (e1->flags & EDGE_FALLTHRU)
1208 fallthru1 = e1;
1209 if (e2->flags & EDGE_FALLTHRU)
1210 fallthru2 = e2;
1211 }
1212
1213 /* If number of edges of various types does not match, fail. */
1214 if (e1 || e2
1215 || nehedges1 != nehedges2
1216 || (fallthru1 != 0) != (fallthru2 != 0))
1217 return false;
1218
1219 /* fallthru edges must be forwarded to the same destination. */
1220 if (fallthru1)
1221 {
1222 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1223 ? fallthru1->dest->succ->dest: fallthru1->dest);
1224 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1225 ? fallthru2->dest->succ->dest: fallthru2->dest);
1226
1227 if (d1 != d2)
1228 return false;
1229 }
1230
1231 /* In case we do have EH edges, ensure we are in the same region. */
1232 if (nehedges1)
1233 {
1234 rtx n1 = find_reg_note (bb1->end, REG_EH_REGION, 0);
1235 rtx n2 = find_reg_note (bb2->end, REG_EH_REGION, 0);
1236
1237 if (XEXP (n1, 0) != XEXP (n2, 0))
1238 return false;
1239 }
1240
1241 /* We don't need to match the rest of edges as above checks should be enought
1242 to ensure that they are equivalent. */
1243 return true;
1244 }
1245
1246 /* E1 and E2 are edges with the same destination block. Search their
1247 predecessors for common code. If found, redirect control flow from
1248 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1249
1250 static bool
1251 try_crossjump_to_edge (mode, e1, e2)
1252 int mode;
1253 edge e1, e2;
1254 {
1255 int nmatch;
1256 basic_block src1 = e1->src, src2 = e2->src;
1257 basic_block redirect_to;
1258 rtx newpos1, newpos2;
1259 edge s;
1260 rtx last;
1261 rtx label;
1262
1263 /* Search backward through forwarder blocks. We don't need to worry
1264 about multiple entry or chained forwarders, as they will be optimized
1265 away. We do this to look past the unconditional jump following a
1266 conditional jump that is required due to the current CFG shape. */
1267 if (src1->pred
1268 && !src1->pred->pred_next
1269 && FORWARDER_BLOCK_P (src1))
1270 e1 = src1->pred, src1 = e1->src;
1271
1272 if (src2->pred
1273 && !src2->pred->pred_next
1274 && FORWARDER_BLOCK_P (src2))
1275 e2 = src2->pred, src2 = e2->src;
1276
1277 /* Nothing to do if we reach ENTRY, or a common source block. */
1278 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1279 return false;
1280 if (src1 == src2)
1281 return false;
1282
1283 /* Seeing more than 1 forwarder blocks would confuse us later... */
1284 if (FORWARDER_BLOCK_P (e1->dest)
1285 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1286 return false;
1287
1288 if (FORWARDER_BLOCK_P (e2->dest)
1289 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1290 return false;
1291
1292 /* Likewise with dead code (possibly newly created by the other optimizations
1293 of cfg_cleanup). */
1294 if (!src1->pred || !src2->pred)
1295 return false;
1296
1297 /* Look for the common insn sequence, part the first ... */
1298 if (!outgoing_edges_match (mode, src1, src2))
1299 return false;
1300
1301 /* ... and part the second. */
1302 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1303 if (!nmatch)
1304 return false;
1305
1306 /* Avoid splitting if possible. */
1307 if (newpos2 == src2->head)
1308 redirect_to = src2;
1309 else
1310 {
1311 if (rtl_dump_file)
1312 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
1313 src2->index, nmatch);
1314 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1315 }
1316
1317 if (rtl_dump_file)
1318 fprintf (rtl_dump_file,
1319 "Cross jumping from bb %i to bb %i; %i common insns\n",
1320 src1->index, src2->index, nmatch);
1321
1322 redirect_to->count += src1->count;
1323 redirect_to->frequency += src1->frequency;
1324
1325 /* Recompute the frequencies and counts of outgoing edges. */
1326 for (s = redirect_to->succ; s; s = s->succ_next)
1327 {
1328 edge s2;
1329 basic_block d = s->dest;
1330
1331 if (FORWARDER_BLOCK_P (d))
1332 d = d->succ->dest;
1333
1334 for (s2 = src1->succ; ; s2 = s2->succ_next)
1335 {
1336 basic_block d2 = s2->dest;
1337 if (FORWARDER_BLOCK_P (d2))
1338 d2 = d2->succ->dest;
1339 if (d == d2)
1340 break;
1341 }
1342
1343 s->count += s2->count;
1344
1345 /* Take care to update possible forwarder blocks. We verified
1346 that there is no more than one in the chain, so we can't run
1347 into infinite loop. */
1348 if (FORWARDER_BLOCK_P (s->dest))
1349 {
1350 s->dest->succ->count += s2->count;
1351 s->dest->count += s2->count;
1352 s->dest->frequency += EDGE_FREQUENCY (s);
1353 }
1354
1355 if (FORWARDER_BLOCK_P (s2->dest))
1356 {
1357 s2->dest->succ->count -= s2->count;
1358 if (s2->dest->succ->count < 0)
1359 s2->dest->succ->count = 0;
1360 s2->dest->count -= s2->count;
1361 s2->dest->frequency -= EDGE_FREQUENCY (s);
1362 if (s2->dest->frequency < 0)
1363 s2->dest->frequency = 0;
1364 if (s2->dest->count < 0)
1365 s2->dest->count = 0;
1366 }
1367
1368 if (!redirect_to->frequency && !src1->frequency)
1369 s->probability = (s->probability + s2->probability) / 2;
1370 else
1371 s->probability
1372 = ((s->probability * redirect_to->frequency +
1373 s2->probability * src1->frequency)
1374 / (redirect_to->frequency + src1->frequency));
1375 }
1376
1377 update_br_prob_note (redirect_to);
1378
1379 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1380
1381 /* Skip possible basic block header. */
1382 if (GET_CODE (newpos1) == CODE_LABEL)
1383 newpos1 = NEXT_INSN (newpos1);
1384
1385 if (GET_CODE (newpos1) == NOTE)
1386 newpos1 = NEXT_INSN (newpos1);
1387 last = src1->end;
1388
1389 /* Emit the jump insn. */
1390 label = block_label (redirect_to);
1391 emit_jump_insn_after (gen_jump (label), src1->end);
1392 JUMP_LABEL (src1->end) = label;
1393 LABEL_NUSES (label)++;
1394
1395 /* Delete the now unreachable instructions. */
1396 delete_insn_chain (newpos1, last);
1397
1398 /* Make sure there is a barrier after the new jump. */
1399 last = next_nonnote_insn (src1->end);
1400 if (!last || GET_CODE (last) != BARRIER)
1401 emit_barrier_after (src1->end);
1402
1403 /* Update CFG. */
1404 while (src1->succ)
1405 remove_edge (src1->succ);
1406 make_single_succ_edge (src1, redirect_to, 0);
1407
1408 update_forwarder_flag (src1);
1409
1410 return true;
1411 }
1412
1413 /* Search the predecessors of BB for common insn sequences. When found,
1414 share code between them by redirecting control flow. Return true if
1415 any changes made. */
1416
1417 static bool
1418 try_crossjump_bb (mode, bb)
1419 int mode;
1420 basic_block bb;
1421 {
1422 edge e, e2, nexte2, nexte, fallthru;
1423 bool changed;
1424
1425 /* Nothing to do if there is not at least two incoming edges. */
1426 if (!bb->pred || !bb->pred->pred_next)
1427 return false;
1428
1429 /* It is always cheapest to redirect a block that ends in a branch to
1430 a block that falls through into BB, as that adds no branches to the
1431 program. We'll try that combination first. */
1432 for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next)
1433 if (fallthru->flags & EDGE_FALLTHRU)
1434 break;
1435
1436 changed = false;
1437 for (e = bb->pred; e; e = nexte)
1438 {
1439 nexte = e->pred_next;
1440
1441 /* As noted above, first try with the fallthru predecessor. */
1442 if (fallthru)
1443 {
1444 /* Don't combine the fallthru edge into anything else.
1445 If there is a match, we'll do it the other way around. */
1446 if (e == fallthru)
1447 continue;
1448
1449 if (try_crossjump_to_edge (mode, e, fallthru))
1450 {
1451 changed = true;
1452 nexte = bb->pred;
1453 continue;
1454 }
1455 }
1456
1457 /* Non-obvious work limiting check: Recognize that we're going
1458 to call try_crossjump_bb on every basic block. So if we have
1459 two blocks with lots of outgoing edges (a switch) and they
1460 share lots of common destinations, then we would do the
1461 cross-jump check once for each common destination.
1462
1463 Now, if the blocks actually are cross-jump candidates, then
1464 all of their destinations will be shared. Which means that
1465 we only need check them for cross-jump candidacy once. We
1466 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1467 choosing to do the check from the block for which the edge
1468 in question is the first successor of A. */
1469 if (e->src->succ != e)
1470 continue;
1471
1472 for (e2 = bb->pred; e2; e2 = nexte2)
1473 {
1474 nexte2 = e2->pred_next;
1475
1476 if (e2 == e)
1477 continue;
1478
1479 /* We've already checked the fallthru edge above. */
1480 if (e2 == fallthru)
1481 continue;
1482
1483 /* The "first successor" check above only prevents multiple
1484 checks of crossjump(A,B). In order to prevent redundant
1485 checks of crossjump(B,A), require that A be the block
1486 with the lowest index. */
1487 if (e->src->index > e2->src->index)
1488 continue;
1489
1490 if (try_crossjump_to_edge (mode, e, e2))
1491 {
1492 changed = true;
1493 nexte = bb->pred;
1494 break;
1495 }
1496 }
1497 }
1498
1499 return changed;
1500 }
1501
1502 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1503 instructions etc. Return nonzero if changes were made. */
1504
1505 static bool
1506 try_optimize_cfg (mode)
1507 int mode;
1508 {
1509 int i;
1510 bool changed_overall = false;
1511 bool changed;
1512 int iterations = 0;
1513 sbitmap blocks;
1514
1515 if (mode & CLEANUP_CROSSJUMP)
1516 add_noreturn_fake_exit_edges ();
1517
1518 for (i = 0; i < n_basic_blocks; i++)
1519 update_forwarder_flag (BASIC_BLOCK (i));
1520
1521 if (mode & CLEANUP_UPDATE_LIFE)
1522 clear_bb_flags ();
1523
1524 if (! (* targetm.cannot_modify_jumps_p) ())
1525 {
1526 /* Attempt to merge blocks as made possible by edge removal. If
1527 a block has only one successor, and the successor has only
1528 one predecessor, they may be combined. */
1529 do
1530 {
1531 changed = false;
1532 iterations++;
1533
1534 if (rtl_dump_file)
1535 fprintf (rtl_dump_file,
1536 "\n\ntry_optimize_cfg iteration %i\n\n",
1537 iterations);
1538
1539 for (i = 0; i < n_basic_blocks;)
1540 {
1541 basic_block c, b = BASIC_BLOCK (i);
1542 edge s;
1543 bool changed_here = false;
1544
1545 /* Delete trivially dead basic blocks. */
1546 while (b->pred == NULL)
1547 {
1548 c = BASIC_BLOCK (b->index - 1);
1549 if (rtl_dump_file)
1550 fprintf (rtl_dump_file, "Deleting block %i.\n",
1551 b->index);
1552
1553 flow_delete_block (b);
1554 changed = true;
1555 b = c;
1556 }
1557
1558 /* Remove code labels no longer used. Don't do this
1559 before CALL_PLACEHOLDER is removed, as some branches
1560 may be hidden within. */
1561 if (b->pred->pred_next == NULL
1562 && (b->pred->flags & EDGE_FALLTHRU)
1563 && !(b->pred->flags & EDGE_COMPLEX)
1564 && GET_CODE (b->head) == CODE_LABEL
1565 && (!(mode & CLEANUP_PRE_SIBCALL)
1566 || !tail_recursion_label_p (b->head))
1567 /* If the previous block ends with a branch to this
1568 block, we can't delete the label. Normally this
1569 is a condjump that is yet to be simplified, but
1570 if CASE_DROPS_THRU, this can be a tablejump with
1571 some element going to the same place as the
1572 default (fallthru). */
1573 && (b->pred->src == ENTRY_BLOCK_PTR
1574 || GET_CODE (b->pred->src->end) != JUMP_INSN
1575 || ! label_is_jump_target_p (b->head,
1576 b->pred->src->end)))
1577 {
1578 rtx label = b->head;
1579
1580 b->head = NEXT_INSN (b->head);
1581 delete_insn_chain (label, label);
1582 if (rtl_dump_file)
1583 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1584 b->index);
1585 }
1586
1587 /* If we fall through an empty block, we can remove it. */
1588 if (b->pred->pred_next == NULL
1589 && (b->pred->flags & EDGE_FALLTHRU)
1590 && GET_CODE (b->head) != CODE_LABEL
1591 && FORWARDER_BLOCK_P (b)
1592 /* Note that forwarder_block_p true ensures that
1593 there is a successor for this block. */
1594 && (b->succ->flags & EDGE_FALLTHRU)
1595 && n_basic_blocks > 1)
1596 {
1597 if (rtl_dump_file)
1598 fprintf (rtl_dump_file,
1599 "Deleting fallthru block %i.\n",
1600 b->index);
1601
1602 c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
1603 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1604 flow_delete_block (b);
1605 changed = true;
1606 b = c;
1607 }
1608
1609 /* Merge blocks. Loop because chains of blocks might be
1610 combineable. */
1611 while ((s = b->succ) != NULL
1612 && s->succ_next == NULL
1613 && !(s->flags & EDGE_COMPLEX)
1614 && (c = s->dest) != EXIT_BLOCK_PTR
1615 && c->pred->pred_next == NULL
1616 /* If the jump insn has side effects,
1617 we can't kill the edge. */
1618 && (GET_CODE (b->end) != JUMP_INSN
1619 || onlyjump_p (b->end))
1620 && merge_blocks (s, b, c, mode))
1621 changed_here = true;
1622
1623 /* Simplify branch over branch. */
1624 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1625 changed_here = true;
1626
1627 /* If B has a single outgoing edge, but uses a
1628 non-trivial jump instruction without side-effects, we
1629 can either delete the jump entirely, or replace it
1630 with a simple unconditional jump. Use
1631 redirect_edge_and_branch to do the dirty work. */
1632 if (b->succ
1633 && ! b->succ->succ_next
1634 && b->succ->dest != EXIT_BLOCK_PTR
1635 && onlyjump_p (b->end)
1636 && redirect_edge_and_branch (b->succ, b->succ->dest))
1637 {
1638 update_forwarder_flag (b);
1639 changed_here = true;
1640 }
1641
1642 /* Simplify branch to branch. */
1643 if (try_forward_edges (mode, b))
1644 changed_here = true;
1645
1646 /* Look for shared code between blocks. */
1647 if ((mode & CLEANUP_CROSSJUMP)
1648 && try_crossjump_bb (mode, b))
1649 changed_here = true;
1650
1651 /* Don't get confused by the index shift caused by
1652 deleting blocks. */
1653 if (!changed_here)
1654 i = b->index + 1;
1655 else
1656 changed = true;
1657 }
1658
1659 if ((mode & CLEANUP_CROSSJUMP)
1660 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1661 changed = true;
1662
1663 #ifdef ENABLE_CHECKING
1664 if (changed)
1665 verify_flow_info ();
1666 #endif
1667
1668 changed_overall |= changed;
1669 }
1670 while (changed);
1671 }
1672
1673 if (mode & CLEANUP_CROSSJUMP)
1674 remove_fake_edges ();
1675
1676 if ((mode & CLEANUP_UPDATE_LIFE) && changed_overall)
1677 update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL,
1678 PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
1679 | PROP_KILL_DEAD_CODE | PROP_LOG_LINKS);
1680
1681 for (i = 0; i < n_basic_blocks; i++)
1682 BASIC_BLOCK (i)->aux = NULL;
1683
1684 return changed_overall;
1685 }
1686 \f
1687 /* Delete all unreachable basic blocks. */
1688
1689 static bool
1690 delete_unreachable_blocks ()
1691 {
1692 int i;
1693 bool changed = false;
1694
1695 find_unreachable_blocks ();
1696
1697 /* Delete all unreachable basic blocks. Count down so that we
1698 don't interfere with the block renumbering that happens in
1699 flow_delete_block. */
1700
1701 for (i = n_basic_blocks - 1; i >= 0; --i)
1702 {
1703 basic_block b = BASIC_BLOCK (i);
1704
1705 if (!(b->flags & BB_REACHABLE))
1706 flow_delete_block (b), changed = true;
1707 }
1708
1709 if (changed)
1710 tidy_fallthru_edges ();
1711 return changed;
1712 }
1713 \f
1714 /* Tidy the CFG by deleting unreachable code and whatnot. */
1715
1716 bool
1717 cleanup_cfg (mode)
1718 int mode;
1719 {
1720 bool changed = false;
1721
1722 timevar_push (TV_CLEANUP_CFG);
1723 changed = delete_unreachable_blocks ();
1724 if (try_optimize_cfg (mode))
1725 delete_unreachable_blocks (), changed = true;
1726
1727 /* Kill the data we won't maintain. */
1728 free_EXPR_LIST_list (&label_value_list);
1729 free_EXPR_LIST_list (&tail_recursion_label_list);
1730 timevar_pop (TV_CLEANUP_CFG);
1731
1732 return changed;
1733 }
This page took 0.131673 seconds and 6 git commands to generate.