]> gcc.gnu.org Git - gcc.git/blob - gcc/cfgcleanup.c
Daily bump.
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "regs.h"
52 #include "expr.h"
53
54 /* cleanup_cfg maintains following flags for each basic block. */
55
56 enum bb_flags
57 {
58 /* Set if BB is the forwarder block to avoid too many
59 forwarder_block_p calls. */
60 BB_FORWARDER_BLOCK = 1,
61 BB_NONTHREADABLE_BLOCK = 2
62 };
63
64 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
65 #define BB_SET_FLAG(BB, FLAG) \
66 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
67 #define BB_CLEAR_FLAG(BB, FLAG) \
68 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
69
70 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
71
72 /* Set to true when we are running first pass of try_optimize_cfg loop. */
73 static bool first_pass;
74 static bool try_crossjump_to_edge (int, edge, edge);
75 static bool try_crossjump_bb (int, basic_block);
76 static bool outgoing_edges_match (int, basic_block, basic_block);
77 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
78 static bool insns_match_p (int, rtx, rtx);
79
80 static bool tail_recursion_label_p (rtx);
81 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
82 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
83 static bool try_optimize_cfg (int);
84 static bool try_simplify_condjump (basic_block);
85 static bool try_forward_edges (int, basic_block);
86 static edge thread_jump (int, edge, basic_block);
87 static bool mark_effect (rtx, bitmap);
88 static void notice_new_block (basic_block);
89 static void update_forwarder_flag (basic_block);
90 static int mentions_nonequal_regs (rtx *, void *);
91 static void merge_memattrs (rtx, rtx);
92 \f
93 /* Set flags for newly created block. */
94
95 static void
96 notice_new_block (basic_block bb)
97 {
98 if (!bb)
99 return;
100
101 if (forwarder_block_p (bb))
102 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
103 }
104
105 /* Recompute forwarder flag after block has been modified. */
106
107 static void
108 update_forwarder_flag (basic_block bb)
109 {
110 if (forwarder_block_p (bb))
111 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
112 else
113 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
114 }
115 \f
116 /* Simplify a conditional jump around an unconditional jump.
117 Return true if something changed. */
118
119 static bool
120 try_simplify_condjump (basic_block cbranch_block)
121 {
122 basic_block jump_block, jump_dest_block, cbranch_dest_block;
123 edge cbranch_jump_edge, cbranch_fallthru_edge;
124 rtx cbranch_insn;
125 rtx insn, next;
126 rtx end;
127
128 /* Verify that there are exactly two successors. */
129 if (!cbranch_block->succ
130 || !cbranch_block->succ->succ_next
131 || cbranch_block->succ->succ_next->succ_next)
132 return false;
133
134 /* Verify that we've got a normal conditional branch at the end
135 of the block. */
136 cbranch_insn = BB_END (cbranch_block);
137 if (!any_condjump_p (cbranch_insn))
138 return false;
139
140 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
141 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
142
143 /* The next block must not have multiple predecessors, must not
144 be the last block in the function, and must contain just the
145 unconditional jump. */
146 jump_block = cbranch_fallthru_edge->dest;
147 if (jump_block->pred->pred_next
148 || jump_block->next_bb == EXIT_BLOCK_PTR
149 || !FORWARDER_BLOCK_P (jump_block))
150 return false;
151 jump_dest_block = jump_block->succ->dest;
152
153 /* The conditional branch must target the block after the
154 unconditional branch. */
155 cbranch_dest_block = cbranch_jump_edge->dest;
156
157 if (!can_fallthru (jump_block, cbranch_dest_block))
158 return false;
159
160 /* Invert the conditional branch. */
161 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
162 return false;
163
164 if (dump_file)
165 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
166 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
167
168 /* Success. Update the CFG to match. Note that after this point
169 the edge variable names appear backwards; the redirection is done
170 this way to preserve edge profile data. */
171 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
172 cbranch_dest_block);
173 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
174 jump_dest_block);
175 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
176 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
177 update_br_prob_note (cbranch_block);
178
179 end = BB_END (jump_block);
180 /* Deleting a block may produce unreachable code warning even when we are
181 not deleting anything live. Suppress it by moving all the line number
182 notes out of the block. */
183 for (insn = BB_HEAD (jump_block); insn != NEXT_INSN (BB_END (jump_block));
184 insn = next)
185 {
186 next = NEXT_INSN (insn);
187 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
188 {
189 if (insn == BB_END (jump_block))
190 {
191 BB_END (jump_block) = PREV_INSN (insn);
192 if (insn == end)
193 break;
194 }
195 reorder_insns_nobb (insn, insn, end);
196 end = insn;
197 }
198 }
199 /* Delete the block with the unconditional jump, and clean up the mess. */
200 delete_basic_block (jump_block);
201 tidy_fallthru_edge (cbranch_jump_edge);
202
203 return true;
204 }
205 \f
206 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
207 on register. Used by jump threading. */
208
209 static bool
210 mark_effect (rtx exp, regset nonequal)
211 {
212 int regno;
213 rtx dest;
214 switch (GET_CODE (exp))
215 {
216 /* In case we do clobber the register, mark it as equal, as we know the
217 value is dead so it don't have to match. */
218 case CLOBBER:
219 if (REG_P (XEXP (exp, 0)))
220 {
221 dest = XEXP (exp, 0);
222 regno = REGNO (dest);
223 CLEAR_REGNO_REG_SET (nonequal, regno);
224 if (regno < FIRST_PSEUDO_REGISTER)
225 {
226 int n = hard_regno_nregs[regno][GET_MODE (dest)];
227 while (--n > 0)
228 CLEAR_REGNO_REG_SET (nonequal, regno + n);
229 }
230 }
231 return false;
232
233 case SET:
234 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
235 return false;
236 dest = SET_DEST (exp);
237 if (dest == pc_rtx)
238 return false;
239 if (!REG_P (dest))
240 return true;
241 regno = REGNO (dest);
242 SET_REGNO_REG_SET (nonequal, regno);
243 if (regno < FIRST_PSEUDO_REGISTER)
244 {
245 int n = hard_regno_nregs[regno][GET_MODE (dest)];
246 while (--n > 0)
247 SET_REGNO_REG_SET (nonequal, regno + n);
248 }
249 return false;
250
251 default:
252 return false;
253 }
254 }
255
256 /* Return nonzero if X is a register set in regset DATA.
257 Called via for_each_rtx. */
258 static int
259 mentions_nonequal_regs (rtx *x, void *data)
260 {
261 regset nonequal = (regset) data;
262 if (REG_P (*x))
263 {
264 int regno;
265
266 regno = REGNO (*x);
267 if (REGNO_REG_SET_P (nonequal, regno))
268 return 1;
269 if (regno < FIRST_PSEUDO_REGISTER)
270 {
271 int n = hard_regno_nregs[regno][GET_MODE (*x)];
272 while (--n > 0)
273 if (REGNO_REG_SET_P (nonequal, regno + n))
274 return 1;
275 }
276 }
277 return 0;
278 }
279 /* Attempt to prove that the basic block B will have no side effects and
280 always continues in the same edge if reached via E. Return the edge
281 if exist, NULL otherwise. */
282
283 static edge
284 thread_jump (int mode, edge e, basic_block b)
285 {
286 rtx set1, set2, cond1, cond2, insn;
287 enum rtx_code code1, code2, reversed_code2;
288 bool reverse1 = false;
289 int i;
290 regset nonequal;
291 bool failed = false;
292
293 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
294 return NULL;
295
296 /* At the moment, we do handle only conditional jumps, but later we may
297 want to extend this code to tablejumps and others. */
298 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
299 return NULL;
300 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
301 {
302 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
303 return NULL;
304 }
305
306 /* Second branch must end with onlyjump, as we will eliminate the jump. */
307 if (!any_condjump_p (BB_END (e->src)))
308 return NULL;
309
310 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
311 {
312 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
313 return NULL;
314 }
315
316 set1 = pc_set (BB_END (e->src));
317 set2 = pc_set (BB_END (b));
318 if (((e->flags & EDGE_FALLTHRU) != 0)
319 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
320 reverse1 = true;
321
322 cond1 = XEXP (SET_SRC (set1), 0);
323 cond2 = XEXP (SET_SRC (set2), 0);
324 if (reverse1)
325 code1 = reversed_comparison_code (cond1, BB_END (e->src));
326 else
327 code1 = GET_CODE (cond1);
328
329 code2 = GET_CODE (cond2);
330 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
331
332 if (!comparison_dominates_p (code1, code2)
333 && !comparison_dominates_p (code1, reversed_code2))
334 return NULL;
335
336 /* Ensure that the comparison operators are equivalent.
337 ??? This is far too pessimistic. We should allow swapped operands,
338 different CCmodes, or for example comparisons for interval, that
339 dominate even when operands are not equivalent. */
340 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
341 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
342 return NULL;
343
344 /* Short circuit cases where block B contains some side effects, as we can't
345 safely bypass it. */
346 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
347 insn = NEXT_INSN (insn))
348 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
349 {
350 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
351 return NULL;
352 }
353
354 cselib_init (false);
355
356 /* First process all values computed in the source basic block. */
357 for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
358 insn = NEXT_INSN (insn))
359 if (INSN_P (insn))
360 cselib_process_insn (insn);
361
362 nonequal = BITMAP_XMALLOC();
363 CLEAR_REG_SET (nonequal);
364
365 /* Now assume that we've continued by the edge E to B and continue
366 processing as if it were same basic block.
367 Our goal is to prove that whole block is an NOOP. */
368
369 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed;
370 insn = NEXT_INSN (insn))
371 {
372 if (INSN_P (insn))
373 {
374 rtx pat = PATTERN (insn);
375
376 if (GET_CODE (pat) == PARALLEL)
377 {
378 for (i = 0; i < XVECLEN (pat, 0); i++)
379 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
380 }
381 else
382 failed |= mark_effect (pat, nonequal);
383 }
384
385 cselib_process_insn (insn);
386 }
387
388 /* Later we should clear nonequal of dead registers. So far we don't
389 have life information in cfg_cleanup. */
390 if (failed)
391 {
392 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
393 goto failed_exit;
394 }
395
396 /* cond2 must not mention any register that is not equal to the
397 former block. */
398 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
399 goto failed_exit;
400
401 /* In case liveness information is available, we need to prove equivalence
402 only of the live values. */
403 if (mode & CLEANUP_UPDATE_LIFE)
404 AND_REG_SET (nonequal, b->global_live_at_end);
405
406 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
407
408 BITMAP_XFREE (nonequal);
409 cselib_finish ();
410 if ((comparison_dominates_p (code1, code2) != 0)
411 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
412 return BRANCH_EDGE (b);
413 else
414 return FALLTHRU_EDGE (b);
415
416 failed_exit:
417 BITMAP_XFREE (nonequal);
418 cselib_finish ();
419 return NULL;
420 }
421 \f
422 /* Attempt to forward edges leaving basic block B.
423 Return true if successful. */
424
425 static bool
426 try_forward_edges (int mode, basic_block b)
427 {
428 bool changed = false;
429 edge e, next, *threaded_edges = NULL;
430
431 for (e = b->succ; e; e = next)
432 {
433 basic_block target, first;
434 int counter;
435 bool threaded = false;
436 int nthreaded_edges = 0;
437 bool may_thread = first_pass | (b->flags & BB_DIRTY);
438
439 next = e->succ_next;
440
441 /* Skip complex edges because we don't know how to update them.
442
443 Still handle fallthru edges, as we can succeed to forward fallthru
444 edge to the same place as the branch edge of conditional branch
445 and turn conditional branch to an unconditional branch. */
446 if (e->flags & EDGE_COMPLEX)
447 continue;
448
449 target = first = e->dest;
450 counter = 0;
451
452 while (counter < n_basic_blocks)
453 {
454 basic_block new_target = NULL;
455 bool new_target_threaded = false;
456 may_thread |= target->flags & BB_DIRTY;
457
458 if (FORWARDER_BLOCK_P (target)
459 && target->succ->dest != EXIT_BLOCK_PTR)
460 {
461 /* Bypass trivial infinite loops. */
462 if (target == target->succ->dest)
463 counter = n_basic_blocks;
464 new_target = target->succ->dest;
465 }
466
467 /* Allow to thread only over one edge at time to simplify updating
468 of probabilities. */
469 else if ((mode & CLEANUP_THREADING) && may_thread)
470 {
471 edge t = thread_jump (mode, e, target);
472 if (t)
473 {
474 if (!threaded_edges)
475 threaded_edges = xmalloc (sizeof (*threaded_edges)
476 * n_basic_blocks);
477 else
478 {
479 int i;
480
481 /* Detect an infinite loop across blocks not
482 including the start block. */
483 for (i = 0; i < nthreaded_edges; ++i)
484 if (threaded_edges[i] == t)
485 break;
486 if (i < nthreaded_edges)
487 {
488 counter = n_basic_blocks;
489 break;
490 }
491 }
492
493 /* Detect an infinite loop across the start block. */
494 if (t->dest == b)
495 break;
496
497 if (nthreaded_edges >= n_basic_blocks)
498 abort ();
499 threaded_edges[nthreaded_edges++] = t;
500
501 new_target = t->dest;
502 new_target_threaded = true;
503 }
504 }
505
506 if (!new_target)
507 break;
508
509 /* Avoid killing of loop pre-headers, as it is the place loop
510 optimizer wants to hoist code to.
511
512 For fallthru forwarders, the LOOP_BEG note must appear between
513 the header of block and CODE_LABEL of the loop, for non forwarders
514 it must appear before the JUMP_INSN. */
515 if ((mode & CLEANUP_PRE_LOOP) && optimize)
516 {
517 rtx insn = (target->succ->flags & EDGE_FALLTHRU
518 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
519
520 if (GET_CODE (insn) != NOTE)
521 insn = NEXT_INSN (insn);
522
523 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
524 insn = NEXT_INSN (insn))
525 if (GET_CODE (insn) == NOTE
526 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
527 break;
528
529 if (GET_CODE (insn) == NOTE)
530 break;
531
532 /* Do not clean up branches to just past the end of a loop
533 at this time; it can mess up the loop optimizer's
534 recognition of some patterns. */
535
536 insn = PREV_INSN (BB_HEAD (target));
537 if (insn && GET_CODE (insn) == NOTE
538 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
539 break;
540 }
541
542 counter++;
543 target = new_target;
544 threaded |= new_target_threaded;
545 }
546
547 if (counter >= n_basic_blocks)
548 {
549 if (dump_file)
550 fprintf (dump_file, "Infinite loop in BB %i.\n",
551 target->index);
552 }
553 else if (target == first)
554 ; /* We didn't do anything. */
555 else
556 {
557 /* Save the values now, as the edge may get removed. */
558 gcov_type edge_count = e->count;
559 int edge_probability = e->probability;
560 int edge_frequency;
561 int n = 0;
562
563 /* Don't force if target is exit block. */
564 if (threaded && target != EXIT_BLOCK_PTR)
565 {
566 notice_new_block (redirect_edge_and_branch_force (e, target));
567 if (dump_file)
568 fprintf (dump_file, "Conditionals threaded.\n");
569 }
570 else if (!redirect_edge_and_branch (e, target))
571 {
572 if (dump_file)
573 fprintf (dump_file,
574 "Forwarding edge %i->%i to %i failed.\n",
575 b->index, e->dest->index, target->index);
576 continue;
577 }
578
579 /* We successfully forwarded the edge. Now update profile
580 data: for each edge we traversed in the chain, remove
581 the original edge's execution count. */
582 edge_frequency = ((edge_probability * b->frequency
583 + REG_BR_PROB_BASE / 2)
584 / REG_BR_PROB_BASE);
585
586 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
587 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
588
589 do
590 {
591 edge t;
592
593 first->count -= edge_count;
594 if (first->count < 0)
595 first->count = 0;
596 first->frequency -= edge_frequency;
597 if (first->frequency < 0)
598 first->frequency = 0;
599 if (first->succ->succ_next)
600 {
601 edge e;
602 int prob;
603 if (n >= nthreaded_edges)
604 abort ();
605 t = threaded_edges [n++];
606 if (t->src != first)
607 abort ();
608 if (first->frequency)
609 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
610 else
611 prob = 0;
612 if (prob > t->probability)
613 prob = t->probability;
614 t->probability -= prob;
615 prob = REG_BR_PROB_BASE - prob;
616 if (prob <= 0)
617 {
618 first->succ->probability = REG_BR_PROB_BASE;
619 first->succ->succ_next->probability = 0;
620 }
621 else
622 for (e = first->succ; e; e = e->succ_next)
623 e->probability = ((e->probability * REG_BR_PROB_BASE)
624 / (double) prob);
625 update_br_prob_note (first);
626 }
627 else
628 {
629 /* It is possible that as the result of
630 threading we've removed edge as it is
631 threaded to the fallthru edge. Avoid
632 getting out of sync. */
633 if (n < nthreaded_edges
634 && first == threaded_edges [n]->src)
635 n++;
636 t = first->succ;
637 }
638
639 t->count -= edge_count;
640 if (t->count < 0)
641 t->count = 0;
642 first = t->dest;
643 }
644 while (first != target);
645
646 changed = true;
647 }
648 }
649
650 if (threaded_edges)
651 free (threaded_edges);
652 return changed;
653 }
654 \f
655 /* Return true if LABEL is used for tail recursion. */
656
657 static bool
658 tail_recursion_label_p (rtx label)
659 {
660 rtx x;
661
662 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
663 if (label == XEXP (x, 0))
664 return true;
665
666 return false;
667 }
668
669 /* Blocks A and B are to be merged into a single block. A has no incoming
670 fallthru edge, so it can be moved before B without adding or modifying
671 any jumps (aside from the jump from A to B). */
672
673 static void
674 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
675 {
676 rtx barrier;
677
678 barrier = next_nonnote_insn (BB_END (a));
679 if (GET_CODE (barrier) != BARRIER)
680 abort ();
681 delete_insn (barrier);
682
683 /* Move block and loop notes out of the chain so that we do not
684 disturb their order.
685
686 ??? A better solution would be to squeeze out all the non-nested notes
687 and adjust the block trees appropriately. Even better would be to have
688 a tighter connection between block trees and rtl so that this is not
689 necessary. */
690 if (squeeze_notes (&BB_HEAD (a), &BB_END (a)))
691 abort ();
692
693 /* Scramble the insn chain. */
694 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
695 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
696 a->flags |= BB_DIRTY;
697
698 if (dump_file)
699 fprintf (dump_file, "Moved block %d before %d and merged.\n",
700 a->index, b->index);
701
702 /* Swap the records for the two blocks around. */
703
704 unlink_block (a);
705 link_block (a, b->prev_bb);
706
707 /* Now blocks A and B are contiguous. Merge them. */
708 merge_blocks (a, b);
709 }
710
711 /* Blocks A and B are to be merged into a single block. B has no outgoing
712 fallthru edge, so it can be moved after A without adding or modifying
713 any jumps (aside from the jump from A to B). */
714
715 static void
716 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
717 {
718 rtx barrier, real_b_end;
719 rtx label, table;
720
721 real_b_end = BB_END (b);
722
723 /* If there is a jump table following block B temporarily add the jump table
724 to block B so that it will also be moved to the correct location. */
725 if (tablejump_p (BB_END (b), &label, &table)
726 && prev_active_insn (label) == BB_END (b))
727 {
728 BB_END (b) = table;
729 }
730
731 /* There had better have been a barrier there. Delete it. */
732 barrier = NEXT_INSN (BB_END (b));
733 if (barrier && GET_CODE (barrier) == BARRIER)
734 delete_insn (barrier);
735
736 /* Move block and loop notes out of the chain so that we do not
737 disturb their order.
738
739 ??? A better solution would be to squeeze out all the non-nested notes
740 and adjust the block trees appropriately. Even better would be to have
741 a tighter connection between block trees and rtl so that this is not
742 necessary. */
743 if (squeeze_notes (&BB_HEAD (b), &BB_END (b)))
744 abort ();
745
746 /* Scramble the insn chain. */
747 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
748
749 /* Restore the real end of b. */
750 BB_END (b) = real_b_end;
751
752 if (dump_file)
753 fprintf (dump_file, "Moved block %d after %d and merged.\n",
754 b->index, a->index);
755
756 /* Now blocks A and B are contiguous. Merge them. */
757 merge_blocks (a, b);
758 }
759
760 /* Attempt to merge basic blocks that are potentially non-adjacent.
761 Return NULL iff the attempt failed, otherwise return basic block
762 where cleanup_cfg should continue. Because the merging commonly
763 moves basic block away or introduces another optimization
764 possibility, return basic block just before B so cleanup_cfg don't
765 need to iterate.
766
767 It may be good idea to return basic block before C in the case
768 C has been moved after B and originally appeared earlier in the
769 insn sequence, but we have no information available about the
770 relative ordering of these two. Hopefully it is not too common. */
771
772 static basic_block
773 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
774 {
775 basic_block next;
776 /* If C has a tail recursion label, do not merge. There is no
777 edge recorded from the call_placeholder back to this label, as
778 that would make optimize_sibling_and_tail_recursive_calls more
779 complex for no gain. */
780 if ((mode & CLEANUP_PRE_SIBCALL)
781 && GET_CODE (BB_HEAD (c)) == CODE_LABEL
782 && tail_recursion_label_p (BB_HEAD (c)))
783 return NULL;
784
785 /* If B has a fallthru edge to C, no need to move anything. */
786 if (e->flags & EDGE_FALLTHRU)
787 {
788 int b_index = b->index, c_index = c->index;
789 merge_blocks (b, c);
790 update_forwarder_flag (b);
791
792 if (dump_file)
793 fprintf (dump_file, "Merged %d and %d without moving.\n",
794 b_index, c_index);
795
796 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
797 }
798
799 /* Otherwise we will need to move code around. Do that only if expensive
800 transformations are allowed. */
801 else if (mode & CLEANUP_EXPENSIVE)
802 {
803 edge tmp_edge, b_fallthru_edge;
804 bool c_has_outgoing_fallthru;
805 bool b_has_incoming_fallthru;
806
807 /* Avoid overactive code motion, as the forwarder blocks should be
808 eliminated by edge redirection instead. One exception might have
809 been if B is a forwarder block and C has no fallthru edge, but
810 that should be cleaned up by bb-reorder instead. */
811 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
812 return NULL;
813
814 /* We must make sure to not munge nesting of lexical blocks,
815 and loop notes. This is done by squeezing out all the notes
816 and leaving them there to lie. Not ideal, but functional. */
817
818 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
819 if (tmp_edge->flags & EDGE_FALLTHRU)
820 break;
821
822 c_has_outgoing_fallthru = (tmp_edge != NULL);
823
824 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
825 if (tmp_edge->flags & EDGE_FALLTHRU)
826 break;
827
828 b_has_incoming_fallthru = (tmp_edge != NULL);
829 b_fallthru_edge = tmp_edge;
830 next = b->prev_bb;
831 if (next == c)
832 next = next->prev_bb;
833
834 /* Otherwise, we're going to try to move C after B. If C does
835 not have an outgoing fallthru, then it can be moved
836 immediately after B without introducing or modifying jumps. */
837 if (! c_has_outgoing_fallthru)
838 {
839 merge_blocks_move_successor_nojumps (b, c);
840 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
841 }
842
843 /* If B does not have an incoming fallthru, then it can be moved
844 immediately before C without introducing or modifying jumps.
845 C cannot be the first block, so we do not have to worry about
846 accessing a non-existent block. */
847
848 if (b_has_incoming_fallthru)
849 {
850 basic_block bb;
851
852 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
853 return NULL;
854 bb = force_nonfallthru (b_fallthru_edge);
855 if (bb)
856 notice_new_block (bb);
857 }
858
859 merge_blocks_move_predecessor_nojumps (b, c);
860 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
861 }
862
863 return NULL;
864 }
865 \f
866
867 /* Removes the memory attributes of MEM expression
868 if they are not equal. */
869
870 void
871 merge_memattrs (rtx x, rtx y)
872 {
873 int i;
874 int j;
875 enum rtx_code code;
876 const char *fmt;
877
878 if (x == y)
879 return;
880 if (x == 0 || y == 0)
881 return;
882
883 code = GET_CODE (x);
884
885 if (code != GET_CODE (y))
886 return;
887
888 if (GET_MODE (x) != GET_MODE (y))
889 return;
890
891 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
892 {
893 if (! MEM_ATTRS (x))
894 MEM_ATTRS (y) = 0;
895 else if (! MEM_ATTRS (y))
896 MEM_ATTRS (x) = 0;
897 else
898 {
899 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
900 {
901 set_mem_alias_set (x, 0);
902 set_mem_alias_set (y, 0);
903 }
904
905 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
906 {
907 set_mem_expr (x, 0);
908 set_mem_expr (y, 0);
909 set_mem_offset (x, 0);
910 set_mem_offset (y, 0);
911 }
912 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
913 {
914 set_mem_offset (x, 0);
915 set_mem_offset (y, 0);
916 }
917
918 set_mem_size (x, MAX (MEM_SIZE (x), MEM_SIZE (y)));
919 set_mem_size (y, MEM_SIZE (x));
920
921 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
922 set_mem_align (y, MEM_ALIGN (x));
923 }
924 }
925
926 fmt = GET_RTX_FORMAT (code);
927 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
928 {
929 switch (fmt[i])
930 {
931 case 'E':
932 /* Two vectors must have the same length. */
933 if (XVECLEN (x, i) != XVECLEN (y, i))
934 return;
935
936 for (j = 0; j < XVECLEN (x, i); j++)
937 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
938
939 break;
940
941 case 'e':
942 merge_memattrs (XEXP (x, i), XEXP (y, i));
943 }
944 }
945 return;
946 }
947
948
949 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
950
951 static bool
952 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
953 {
954 rtx p1, p2;
955
956 /* Verify that I1 and I2 are equivalent. */
957 if (GET_CODE (i1) != GET_CODE (i2))
958 return false;
959
960 p1 = PATTERN (i1);
961 p2 = PATTERN (i2);
962
963 if (GET_CODE (p1) != GET_CODE (p2))
964 return false;
965
966 /* If this is a CALL_INSN, compare register usage information.
967 If we don't check this on stack register machines, the two
968 CALL_INSNs might be merged leaving reg-stack.c with mismatching
969 numbers of stack registers in the same basic block.
970 If we don't check this on machines with delay slots, a delay slot may
971 be filled that clobbers a parameter expected by the subroutine.
972
973 ??? We take the simple route for now and assume that if they're
974 equal, they were constructed identically. */
975
976 if (GET_CODE (i1) == CALL_INSN
977 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
978 CALL_INSN_FUNCTION_USAGE (i2))
979 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
980 return false;
981
982 #ifdef STACK_REGS
983 /* If cross_jump_death_matters is not 0, the insn's mode
984 indicates whether or not the insn contains any stack-like
985 regs. */
986
987 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
988 {
989 /* If register stack conversion has already been done, then
990 death notes must also be compared before it is certain that
991 the two instruction streams match. */
992
993 rtx note;
994 HARD_REG_SET i1_regset, i2_regset;
995
996 CLEAR_HARD_REG_SET (i1_regset);
997 CLEAR_HARD_REG_SET (i2_regset);
998
999 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1000 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1001 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1002
1003 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1004 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1005 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1006
1007 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1008
1009 return false;
1010
1011 done:
1012 ;
1013 }
1014 #endif
1015
1016 if (reload_completed
1017 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1018 return true;
1019
1020 /* Do not do EQUIV substitution after reload. First, we're undoing the
1021 work of reload_cse. Second, we may be undoing the work of the post-
1022 reload splitting pass. */
1023 /* ??? Possibly add a new phase switch variable that can be used by
1024 targets to disallow the troublesome insns after splitting. */
1025 if (!reload_completed)
1026 {
1027 /* The following code helps take care of G++ cleanups. */
1028 rtx equiv1 = find_reg_equal_equiv_note (i1);
1029 rtx equiv2 = find_reg_equal_equiv_note (i2);
1030
1031 if (equiv1 && equiv2
1032 /* If the equivalences are not to a constant, they may
1033 reference pseudos that no longer exist, so we can't
1034 use them. */
1035 && (! reload_completed
1036 || (CONSTANT_P (XEXP (equiv1, 0))
1037 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1038 {
1039 rtx s1 = single_set (i1);
1040 rtx s2 = single_set (i2);
1041 if (s1 != 0 && s2 != 0
1042 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1043 {
1044 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1045 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1046 if (! rtx_renumbered_equal_p (p1, p2))
1047 cancel_changes (0);
1048 else if (apply_change_group ())
1049 return true;
1050 }
1051 }
1052 }
1053
1054 return false;
1055 }
1056 \f
1057 /* Look through the insns at the end of BB1 and BB2 and find the longest
1058 sequence that are equivalent. Store the first insns for that sequence
1059 in *F1 and *F2 and return the sequence length.
1060
1061 To simplify callers of this function, if the blocks match exactly,
1062 store the head of the blocks in *F1 and *F2. */
1063
1064 static int
1065 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1066 basic_block bb2, rtx *f1, rtx *f2)
1067 {
1068 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1069 int ninsns = 0;
1070
1071 /* Skip simple jumps at the end of the blocks. Complex jumps still
1072 need to be compared for equivalence, which we'll do below. */
1073
1074 i1 = BB_END (bb1);
1075 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1076 if (onlyjump_p (i1)
1077 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1078 {
1079 last1 = i1;
1080 i1 = PREV_INSN (i1);
1081 }
1082
1083 i2 = BB_END (bb2);
1084 if (onlyjump_p (i2)
1085 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1086 {
1087 last2 = i2;
1088 /* Count everything except for unconditional jump as insn. */
1089 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1090 ninsns++;
1091 i2 = PREV_INSN (i2);
1092 }
1093
1094 while (true)
1095 {
1096 /* Ignore notes. */
1097 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1098 i1 = PREV_INSN (i1);
1099
1100 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1101 i2 = PREV_INSN (i2);
1102
1103 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1104 break;
1105
1106 if (!insns_match_p (mode, i1, i2))
1107 break;
1108
1109 merge_memattrs (i1, i2);
1110
1111 /* Don't begin a cross-jump with a NOTE insn. */
1112 if (INSN_P (i1))
1113 {
1114 /* If the merged insns have different REG_EQUAL notes, then
1115 remove them. */
1116 rtx equiv1 = find_reg_equal_equiv_note (i1);
1117 rtx equiv2 = find_reg_equal_equiv_note (i2);
1118
1119 if (equiv1 && !equiv2)
1120 remove_note (i1, equiv1);
1121 else if (!equiv1 && equiv2)
1122 remove_note (i2, equiv2);
1123 else if (equiv1 && equiv2
1124 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1125 {
1126 remove_note (i1, equiv1);
1127 remove_note (i2, equiv2);
1128 }
1129
1130 afterlast1 = last1, afterlast2 = last2;
1131 last1 = i1, last2 = i2;
1132 ninsns++;
1133 }
1134
1135 i1 = PREV_INSN (i1);
1136 i2 = PREV_INSN (i2);
1137 }
1138
1139 #ifdef HAVE_cc0
1140 /* Don't allow the insn after a compare to be shared by
1141 cross-jumping unless the compare is also shared. */
1142 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1143 last1 = afterlast1, last2 = afterlast2, ninsns--;
1144 #endif
1145
1146 /* Include preceding notes and labels in the cross-jump. One,
1147 this may bring us to the head of the blocks as requested above.
1148 Two, it keeps line number notes as matched as may be. */
1149 if (ninsns)
1150 {
1151 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1152 last1 = PREV_INSN (last1);
1153
1154 if (last1 != BB_HEAD (bb1) && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
1155 last1 = PREV_INSN (last1);
1156
1157 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1158 last2 = PREV_INSN (last2);
1159
1160 if (last2 != BB_HEAD (bb2) && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
1161 last2 = PREV_INSN (last2);
1162
1163 *f1 = last1;
1164 *f2 = last2;
1165 }
1166
1167 return ninsns;
1168 }
1169
1170 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1171 the branch instruction. This means that if we commonize the control
1172 flow before end of the basic block, the semantic remains unchanged.
1173
1174 We may assume that there exists one edge with a common destination. */
1175
1176 static bool
1177 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1178 {
1179 int nehedges1 = 0, nehedges2 = 0;
1180 edge fallthru1 = 0, fallthru2 = 0;
1181 edge e1, e2;
1182
1183 /* If BB1 has only one successor, we may be looking at either an
1184 unconditional jump, or a fake edge to exit. */
1185 if (bb1->succ && !bb1->succ->succ_next
1186 && (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1187 && (GET_CODE (BB_END (bb1)) != JUMP_INSN || simplejump_p (BB_END (bb1))))
1188 return (bb2->succ && !bb2->succ->succ_next
1189 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1190 && (GET_CODE (BB_END (bb2)) != JUMP_INSN || simplejump_p (BB_END (bb2))));
1191
1192 /* Match conditional jumps - this may get tricky when fallthru and branch
1193 edges are crossed. */
1194 if (bb1->succ
1195 && bb1->succ->succ_next
1196 && !bb1->succ->succ_next->succ_next
1197 && any_condjump_p (BB_END (bb1))
1198 && onlyjump_p (BB_END (bb1)))
1199 {
1200 edge b1, f1, b2, f2;
1201 bool reverse, match;
1202 rtx set1, set2, cond1, cond2;
1203 enum rtx_code code1, code2;
1204
1205 if (!bb2->succ
1206 || !bb2->succ->succ_next
1207 || bb2->succ->succ_next->succ_next
1208 || !any_condjump_p (BB_END (bb2))
1209 || !onlyjump_p (BB_END (bb2)))
1210 return false;
1211
1212 b1 = BRANCH_EDGE (bb1);
1213 b2 = BRANCH_EDGE (bb2);
1214 f1 = FALLTHRU_EDGE (bb1);
1215 f2 = FALLTHRU_EDGE (bb2);
1216
1217 /* Get around possible forwarders on fallthru edges. Other cases
1218 should be optimized out already. */
1219 if (FORWARDER_BLOCK_P (f1->dest))
1220 f1 = f1->dest->succ;
1221
1222 if (FORWARDER_BLOCK_P (f2->dest))
1223 f2 = f2->dest->succ;
1224
1225 /* To simplify use of this function, return false if there are
1226 unneeded forwarder blocks. These will get eliminated later
1227 during cleanup_cfg. */
1228 if (FORWARDER_BLOCK_P (f1->dest)
1229 || FORWARDER_BLOCK_P (f2->dest)
1230 || FORWARDER_BLOCK_P (b1->dest)
1231 || FORWARDER_BLOCK_P (b2->dest))
1232 return false;
1233
1234 if (f1->dest == f2->dest && b1->dest == b2->dest)
1235 reverse = false;
1236 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1237 reverse = true;
1238 else
1239 return false;
1240
1241 set1 = pc_set (BB_END (bb1));
1242 set2 = pc_set (BB_END (bb2));
1243 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1244 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1245 reverse = !reverse;
1246
1247 cond1 = XEXP (SET_SRC (set1), 0);
1248 cond2 = XEXP (SET_SRC (set2), 0);
1249 code1 = GET_CODE (cond1);
1250 if (reverse)
1251 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1252 else
1253 code2 = GET_CODE (cond2);
1254
1255 if (code2 == UNKNOWN)
1256 return false;
1257
1258 /* Verify codes and operands match. */
1259 match = ((code1 == code2
1260 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1261 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1262 || (code1 == swap_condition (code2)
1263 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1264 XEXP (cond2, 0))
1265 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1266 XEXP (cond2, 1))));
1267
1268 /* If we return true, we will join the blocks. Which means that
1269 we will only have one branch prediction bit to work with. Thus
1270 we require the existing branches to have probabilities that are
1271 roughly similar. */
1272 if (match
1273 && !optimize_size
1274 && maybe_hot_bb_p (bb1)
1275 && maybe_hot_bb_p (bb2))
1276 {
1277 int prob2;
1278
1279 if (b1->dest == b2->dest)
1280 prob2 = b2->probability;
1281 else
1282 /* Do not use f2 probability as f2 may be forwarded. */
1283 prob2 = REG_BR_PROB_BASE - b2->probability;
1284
1285 /* Fail if the difference in probabilities is greater than 50%.
1286 This rules out two well-predicted branches with opposite
1287 outcomes. */
1288 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1289 {
1290 if (dump_file)
1291 fprintf (dump_file,
1292 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1293 bb1->index, bb2->index, b1->probability, prob2);
1294
1295 return false;
1296 }
1297 }
1298
1299 if (dump_file && match)
1300 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1301 bb1->index, bb2->index);
1302
1303 return match;
1304 }
1305
1306 /* Generic case - we are seeing a computed jump, table jump or trapping
1307 instruction. */
1308
1309 #ifndef CASE_DROPS_THROUGH
1310 /* Check whether there are tablejumps in the end of BB1 and BB2.
1311 Return true if they are identical. */
1312 {
1313 rtx label1, label2;
1314 rtx table1, table2;
1315
1316 if (tablejump_p (BB_END (bb1), &label1, &table1)
1317 && tablejump_p (BB_END (bb2), &label2, &table2)
1318 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1319 {
1320 /* The labels should never be the same rtx. If they really are same
1321 the jump tables are same too. So disable crossjumping of blocks BB1
1322 and BB2 because when deleting the common insns in the end of BB1
1323 by delete_block () the jump table would be deleted too. */
1324 /* If LABEL2 is referenced in BB1->END do not do anything
1325 because we would loose information when replacing
1326 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1327 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1328 {
1329 /* Set IDENTICAL to true when the tables are identical. */
1330 bool identical = false;
1331 rtx p1, p2;
1332
1333 p1 = PATTERN (table1);
1334 p2 = PATTERN (table2);
1335 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1336 {
1337 identical = true;
1338 }
1339 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1340 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1341 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1342 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1343 {
1344 int i;
1345
1346 identical = true;
1347 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1348 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1349 identical = false;
1350 }
1351
1352 if (identical)
1353 {
1354 replace_label_data rr;
1355 bool match;
1356
1357 /* Temporarily replace references to LABEL1 with LABEL2
1358 in BB1->END so that we could compare the instructions. */
1359 rr.r1 = label1;
1360 rr.r2 = label2;
1361 rr.update_label_nuses = false;
1362 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1363
1364 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1365 if (dump_file && match)
1366 fprintf (dump_file,
1367 "Tablejumps in bb %i and %i match.\n",
1368 bb1->index, bb2->index);
1369
1370 /* Set the original label in BB1->END because when deleting
1371 a block whose end is a tablejump, the tablejump referenced
1372 from the instruction is deleted too. */
1373 rr.r1 = label2;
1374 rr.r2 = label1;
1375 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1376
1377 return match;
1378 }
1379 }
1380 return false;
1381 }
1382 }
1383 #endif
1384
1385 /* First ensure that the instructions match. There may be many outgoing
1386 edges so this test is generally cheaper. */
1387 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1388 return false;
1389
1390 /* Search the outgoing edges, ensure that the counts do match, find possible
1391 fallthru and exception handling edges since these needs more
1392 validation. */
1393 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1394 e1 = e1->succ_next, e2 = e2->succ_next)
1395 {
1396 if (e1->flags & EDGE_EH)
1397 nehedges1++;
1398
1399 if (e2->flags & EDGE_EH)
1400 nehedges2++;
1401
1402 if (e1->flags & EDGE_FALLTHRU)
1403 fallthru1 = e1;
1404 if (e2->flags & EDGE_FALLTHRU)
1405 fallthru2 = e2;
1406 }
1407
1408 /* If number of edges of various types does not match, fail. */
1409 if (e1 || e2
1410 || nehedges1 != nehedges2
1411 || (fallthru1 != 0) != (fallthru2 != 0))
1412 return false;
1413
1414 /* fallthru edges must be forwarded to the same destination. */
1415 if (fallthru1)
1416 {
1417 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1418 ? fallthru1->dest->succ->dest: fallthru1->dest);
1419 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1420 ? fallthru2->dest->succ->dest: fallthru2->dest);
1421
1422 if (d1 != d2)
1423 return false;
1424 }
1425
1426 /* Ensure the same EH region. */
1427 {
1428 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1429 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1430
1431 if (!n1 && n2)
1432 return false;
1433
1434 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1435 return false;
1436 }
1437
1438 /* We don't need to match the rest of edges as above checks should be enough
1439 to ensure that they are equivalent. */
1440 return true;
1441 }
1442
1443 /* E1 and E2 are edges with the same destination block. Search their
1444 predecessors for common code. If found, redirect control flow from
1445 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1446
1447 static bool
1448 try_crossjump_to_edge (int mode, edge e1, edge e2)
1449 {
1450 int nmatch;
1451 basic_block src1 = e1->src, src2 = e2->src;
1452 basic_block redirect_to, redirect_from, to_remove;
1453 rtx newpos1, newpos2;
1454 edge s;
1455
1456 /* Search backward through forwarder blocks. We don't need to worry
1457 about multiple entry or chained forwarders, as they will be optimized
1458 away. We do this to look past the unconditional jump following a
1459 conditional jump that is required due to the current CFG shape. */
1460 if (src1->pred
1461 && !src1->pred->pred_next
1462 && FORWARDER_BLOCK_P (src1))
1463 e1 = src1->pred, src1 = e1->src;
1464
1465 if (src2->pred
1466 && !src2->pred->pred_next
1467 && FORWARDER_BLOCK_P (src2))
1468 e2 = src2->pred, src2 = e2->src;
1469
1470 /* Nothing to do if we reach ENTRY, or a common source block. */
1471 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1472 return false;
1473 if (src1 == src2)
1474 return false;
1475
1476 /* Seeing more than 1 forwarder blocks would confuse us later... */
1477 if (FORWARDER_BLOCK_P (e1->dest)
1478 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1479 return false;
1480
1481 if (FORWARDER_BLOCK_P (e2->dest)
1482 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1483 return false;
1484
1485 /* Likewise with dead code (possibly newly created by the other optimizations
1486 of cfg_cleanup). */
1487 if (!src1->pred || !src2->pred)
1488 return false;
1489
1490 /* Look for the common insn sequence, part the first ... */
1491 if (!outgoing_edges_match (mode, src1, src2))
1492 return false;
1493
1494 /* ... and part the second. */
1495 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1496 if (!nmatch)
1497 return false;
1498
1499 #ifndef CASE_DROPS_THROUGH
1500 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1501 will be deleted.
1502 If we have tablejumps in the end of SRC1 and SRC2
1503 they have been already compared for equivalence in outgoing_edges_match ()
1504 so replace the references to TABLE1 by references to TABLE2. */
1505 {
1506 rtx label1, label2;
1507 rtx table1, table2;
1508
1509 if (tablejump_p (BB_END (src1), &label1, &table1)
1510 && tablejump_p (BB_END (src2), &label2, &table2)
1511 && label1 != label2)
1512 {
1513 replace_label_data rr;
1514 rtx insn;
1515
1516 /* Replace references to LABEL1 with LABEL2. */
1517 rr.r1 = label1;
1518 rr.r2 = label2;
1519 rr.update_label_nuses = true;
1520 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1521 {
1522 /* Do not replace the label in SRC1->END because when deleting
1523 a block whose end is a tablejump, the tablejump referenced
1524 from the instruction is deleted too. */
1525 if (insn != BB_END (src1))
1526 for_each_rtx (&insn, replace_label, &rr);
1527 }
1528 }
1529 }
1530 #endif
1531
1532 /* Avoid splitting if possible. */
1533 if (newpos2 == BB_HEAD (src2))
1534 redirect_to = src2;
1535 else
1536 {
1537 if (dump_file)
1538 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1539 src2->index, nmatch);
1540 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1541 }
1542
1543 if (dump_file)
1544 fprintf (dump_file,
1545 "Cross jumping from bb %i to bb %i; %i common insns\n",
1546 src1->index, src2->index, nmatch);
1547
1548 redirect_to->count += src1->count;
1549 redirect_to->frequency += src1->frequency;
1550 /* We may have some registers visible trought the block. */
1551 redirect_to->flags |= BB_DIRTY;
1552
1553 /* Recompute the frequencies and counts of outgoing edges. */
1554 for (s = redirect_to->succ; s; s = s->succ_next)
1555 {
1556 edge s2;
1557 basic_block d = s->dest;
1558
1559 if (FORWARDER_BLOCK_P (d))
1560 d = d->succ->dest;
1561
1562 for (s2 = src1->succ; ; s2 = s2->succ_next)
1563 {
1564 basic_block d2 = s2->dest;
1565 if (FORWARDER_BLOCK_P (d2))
1566 d2 = d2->succ->dest;
1567 if (d == d2)
1568 break;
1569 }
1570
1571 s->count += s2->count;
1572
1573 /* Take care to update possible forwarder blocks. We verified
1574 that there is no more than one in the chain, so we can't run
1575 into infinite loop. */
1576 if (FORWARDER_BLOCK_P (s->dest))
1577 {
1578 s->dest->succ->count += s2->count;
1579 s->dest->count += s2->count;
1580 s->dest->frequency += EDGE_FREQUENCY (s);
1581 }
1582
1583 if (FORWARDER_BLOCK_P (s2->dest))
1584 {
1585 s2->dest->succ->count -= s2->count;
1586 if (s2->dest->succ->count < 0)
1587 s2->dest->succ->count = 0;
1588 s2->dest->count -= s2->count;
1589 s2->dest->frequency -= EDGE_FREQUENCY (s);
1590 if (s2->dest->frequency < 0)
1591 s2->dest->frequency = 0;
1592 if (s2->dest->count < 0)
1593 s2->dest->count = 0;
1594 }
1595
1596 if (!redirect_to->frequency && !src1->frequency)
1597 s->probability = (s->probability + s2->probability) / 2;
1598 else
1599 s->probability
1600 = ((s->probability * redirect_to->frequency +
1601 s2->probability * src1->frequency)
1602 / (redirect_to->frequency + src1->frequency));
1603 }
1604
1605 update_br_prob_note (redirect_to);
1606
1607 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1608
1609 /* Skip possible basic block header. */
1610 if (GET_CODE (newpos1) == CODE_LABEL)
1611 newpos1 = NEXT_INSN (newpos1);
1612
1613 if (GET_CODE (newpos1) == NOTE)
1614 newpos1 = NEXT_INSN (newpos1);
1615
1616 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1617 to_remove = redirect_from->succ->dest;
1618
1619 redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
1620 delete_basic_block (to_remove);
1621
1622 update_forwarder_flag (redirect_from);
1623
1624 return true;
1625 }
1626
1627 /* Search the predecessors of BB for common insn sequences. When found,
1628 share code between them by redirecting control flow. Return true if
1629 any changes made. */
1630
1631 static bool
1632 try_crossjump_bb (int mode, basic_block bb)
1633 {
1634 edge e, e2, nexte2, nexte, fallthru;
1635 bool changed;
1636 int n = 0, max;
1637
1638 /* Nothing to do if there is not at least two incoming edges. */
1639 if (!bb->pred || !bb->pred->pred_next)
1640 return false;
1641
1642 /* It is always cheapest to redirect a block that ends in a branch to
1643 a block that falls through into BB, as that adds no branches to the
1644 program. We'll try that combination first. */
1645 fallthru = NULL;
1646 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1647 for (e = bb->pred; e ; e = e->pred_next, n++)
1648 {
1649 if (e->flags & EDGE_FALLTHRU)
1650 fallthru = e;
1651 if (n > max)
1652 return false;
1653 }
1654
1655 changed = false;
1656 for (e = bb->pred; e; e = nexte)
1657 {
1658 nexte = e->pred_next;
1659
1660 /* As noted above, first try with the fallthru predecessor. */
1661 if (fallthru)
1662 {
1663 /* Don't combine the fallthru edge into anything else.
1664 If there is a match, we'll do it the other way around. */
1665 if (e == fallthru)
1666 continue;
1667 /* If nothing changed since the last attempt, there is nothing
1668 we can do. */
1669 if (!first_pass
1670 && (!(e->src->flags & BB_DIRTY)
1671 && !(fallthru->src->flags & BB_DIRTY)))
1672 continue;
1673
1674 if (try_crossjump_to_edge (mode, e, fallthru))
1675 {
1676 changed = true;
1677 nexte = bb->pred;
1678 continue;
1679 }
1680 }
1681
1682 /* Non-obvious work limiting check: Recognize that we're going
1683 to call try_crossjump_bb on every basic block. So if we have
1684 two blocks with lots of outgoing edges (a switch) and they
1685 share lots of common destinations, then we would do the
1686 cross-jump check once for each common destination.
1687
1688 Now, if the blocks actually are cross-jump candidates, then
1689 all of their destinations will be shared. Which means that
1690 we only need check them for cross-jump candidacy once. We
1691 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1692 choosing to do the check from the block for which the edge
1693 in question is the first successor of A. */
1694 if (e->src->succ != e)
1695 continue;
1696
1697 for (e2 = bb->pred; e2; e2 = nexte2)
1698 {
1699 nexte2 = e2->pred_next;
1700
1701 if (e2 == e)
1702 continue;
1703
1704 /* We've already checked the fallthru edge above. */
1705 if (e2 == fallthru)
1706 continue;
1707
1708 /* The "first successor" check above only prevents multiple
1709 checks of crossjump(A,B). In order to prevent redundant
1710 checks of crossjump(B,A), require that A be the block
1711 with the lowest index. */
1712 if (e->src->index > e2->src->index)
1713 continue;
1714
1715 /* If nothing changed since the last attempt, there is nothing
1716 we can do. */
1717 if (!first_pass
1718 && (!(e->src->flags & BB_DIRTY)
1719 && !(e2->src->flags & BB_DIRTY)))
1720 continue;
1721
1722 if (try_crossjump_to_edge (mode, e, e2))
1723 {
1724 changed = true;
1725 nexte = bb->pred;
1726 break;
1727 }
1728 }
1729 }
1730
1731 return changed;
1732 }
1733
1734 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1735 instructions etc. Return nonzero if changes were made. */
1736
1737 static bool
1738 try_optimize_cfg (int mode)
1739 {
1740 bool changed_overall = false;
1741 bool changed;
1742 int iterations = 0;
1743 basic_block bb, b, next;
1744
1745 if (mode & CLEANUP_CROSSJUMP)
1746 add_noreturn_fake_exit_edges ();
1747
1748 FOR_EACH_BB (bb)
1749 update_forwarder_flag (bb);
1750
1751 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1752 clear_bb_flags ();
1753
1754 if (! targetm.cannot_modify_jumps_p ())
1755 {
1756 first_pass = true;
1757 /* Attempt to merge blocks as made possible by edge removal. If
1758 a block has only one successor, and the successor has only
1759 one predecessor, they may be combined. */
1760 do
1761 {
1762 changed = false;
1763 iterations++;
1764
1765 if (dump_file)
1766 fprintf (dump_file,
1767 "\n\ntry_optimize_cfg iteration %i\n\n",
1768 iterations);
1769
1770 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1771 {
1772 basic_block c;
1773 edge s;
1774 bool changed_here = false;
1775
1776 /* Delete trivially dead basic blocks. */
1777 while (b->pred == NULL)
1778 {
1779 c = b->prev_bb;
1780 if (dump_file)
1781 fprintf (dump_file, "Deleting block %i.\n",
1782 b->index);
1783
1784 delete_basic_block (b);
1785 if (!(mode & CLEANUP_CFGLAYOUT))
1786 changed = true;
1787 b = c;
1788 }
1789
1790 /* Remove code labels no longer used. Don't do this
1791 before CALL_PLACEHOLDER is removed, as some branches
1792 may be hidden within. */
1793 if (b->pred->pred_next == NULL
1794 && (b->pred->flags & EDGE_FALLTHRU)
1795 && !(b->pred->flags & EDGE_COMPLEX)
1796 && GET_CODE (BB_HEAD (b)) == CODE_LABEL
1797 && (!(mode & CLEANUP_PRE_SIBCALL)
1798 || !tail_recursion_label_p (BB_HEAD (b)))
1799 /* If the previous block ends with a branch to this
1800 block, we can't delete the label. Normally this
1801 is a condjump that is yet to be simplified, but
1802 if CASE_DROPS_THRU, this can be a tablejump with
1803 some element going to the same place as the
1804 default (fallthru). */
1805 && (b->pred->src == ENTRY_BLOCK_PTR
1806 || GET_CODE (BB_END (b->pred->src)) != JUMP_INSN
1807 || ! label_is_jump_target_p (BB_HEAD (b),
1808 BB_END (b->pred->src))))
1809 {
1810 rtx label = BB_HEAD (b);
1811
1812 delete_insn_chain (label, label);
1813 /* In the case label is undeletable, move it after the
1814 BASIC_BLOCK note. */
1815 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1816 {
1817 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1818
1819 reorder_insns_nobb (label, label, bb_note);
1820 BB_HEAD (b) = bb_note;
1821 }
1822 if (dump_file)
1823 fprintf (dump_file, "Deleted label in block %i.\n",
1824 b->index);
1825 }
1826
1827 /* If we fall through an empty block, we can remove it. */
1828 if (!(mode & CLEANUP_CFGLAYOUT)
1829 && b->pred->pred_next == NULL
1830 && (b->pred->flags & EDGE_FALLTHRU)
1831 && GET_CODE (BB_HEAD (b)) != CODE_LABEL
1832 && FORWARDER_BLOCK_P (b)
1833 /* Note that forwarder_block_p true ensures that
1834 there is a successor for this block. */
1835 && (b->succ->flags & EDGE_FALLTHRU)
1836 && n_basic_blocks > 1)
1837 {
1838 if (dump_file)
1839 fprintf (dump_file,
1840 "Deleting fallthru block %i.\n",
1841 b->index);
1842
1843 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1844 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1845 delete_basic_block (b);
1846 changed = true;
1847 b = c;
1848 }
1849
1850 if ((s = b->succ) != NULL
1851 && s->succ_next == NULL
1852 && !(s->flags & EDGE_COMPLEX)
1853 && (c = s->dest) != EXIT_BLOCK_PTR
1854 && c->pred->pred_next == NULL
1855 && b != c)
1856 {
1857 /* When not in cfg_layout mode use code aware of reordering
1858 INSN. This code possibly creates new basic blocks so it
1859 does not fit merge_blocks interface and is kept here in
1860 hope that it will become useless once more of compiler
1861 is transformed to use cfg_layout mode. */
1862
1863 if ((mode & CLEANUP_CFGLAYOUT)
1864 && can_merge_blocks_p (b, c))
1865 {
1866 merge_blocks (b, c);
1867 update_forwarder_flag (b);
1868 changed_here = true;
1869 }
1870 else if (!(mode & CLEANUP_CFGLAYOUT)
1871 /* If the jump insn has side effects,
1872 we can't kill the edge. */
1873 && (GET_CODE (BB_END (b)) != JUMP_INSN
1874 || (reload_completed
1875 ? simplejump_p (BB_END (b))
1876 : onlyjump_p (BB_END (b))))
1877 && (next = merge_blocks_move (s, b, c, mode)))
1878 {
1879 b = next;
1880 changed_here = true;
1881 }
1882 }
1883
1884 /* Simplify branch over branch. */
1885 if ((mode & CLEANUP_EXPENSIVE)
1886 && !(mode & CLEANUP_CFGLAYOUT)
1887 && try_simplify_condjump (b))
1888 changed_here = true;
1889
1890 /* If B has a single outgoing edge, but uses a
1891 non-trivial jump instruction without side-effects, we
1892 can either delete the jump entirely, or replace it
1893 with a simple unconditional jump. */
1894 if (b->succ
1895 && ! b->succ->succ_next
1896 && b->succ->dest != EXIT_BLOCK_PTR
1897 && onlyjump_p (BB_END (b))
1898 && try_redirect_by_replacing_jump (b->succ, b->succ->dest,
1899 (mode & CLEANUP_CFGLAYOUT) != 0))
1900 {
1901 update_forwarder_flag (b);
1902 changed_here = true;
1903 }
1904
1905 /* Simplify branch to branch. */
1906 if (try_forward_edges (mode, b))
1907 changed_here = true;
1908
1909 /* Look for shared code between blocks. */
1910 if ((mode & CLEANUP_CROSSJUMP)
1911 && try_crossjump_bb (mode, b))
1912 changed_here = true;
1913
1914 /* Don't get confused by the index shift caused by
1915 deleting blocks. */
1916 if (!changed_here)
1917 b = b->next_bb;
1918 else
1919 changed = true;
1920 }
1921
1922 if ((mode & CLEANUP_CROSSJUMP)
1923 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1924 changed = true;
1925
1926 #ifdef ENABLE_CHECKING
1927 if (changed)
1928 verify_flow_info ();
1929 #endif
1930
1931 changed_overall |= changed;
1932 first_pass = false;
1933 }
1934 while (changed);
1935 }
1936
1937 if (mode & CLEANUP_CROSSJUMP)
1938 remove_fake_edges ();
1939
1940 clear_aux_for_blocks ();
1941
1942 return changed_overall;
1943 }
1944 \f
1945 /* Delete all unreachable basic blocks. */
1946
1947 bool
1948 delete_unreachable_blocks (void)
1949 {
1950 bool changed = false;
1951 basic_block b, next_bb;
1952
1953 find_unreachable_blocks ();
1954
1955 /* Delete all unreachable basic blocks. */
1956
1957 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
1958 {
1959 next_bb = b->next_bb;
1960
1961 if (!(b->flags & BB_REACHABLE))
1962 {
1963 delete_basic_block (b);
1964 changed = true;
1965 }
1966 }
1967
1968 if (changed)
1969 tidy_fallthru_edges ();
1970 return changed;
1971 }
1972 \f
1973 /* Tidy the CFG by deleting unreachable code and whatnot. */
1974
1975 bool
1976 cleanup_cfg (int mode)
1977 {
1978 bool changed = false;
1979
1980 timevar_push (TV_CLEANUP_CFG);
1981 if (delete_unreachable_blocks ())
1982 {
1983 changed = true;
1984 /* We've possibly created trivially dead code. Cleanup it right
1985 now to introduce more opportunities for try_optimize_cfg. */
1986 if (!(mode & (CLEANUP_NO_INSN_DEL
1987 | CLEANUP_UPDATE_LIFE | CLEANUP_PRE_SIBCALL))
1988 && !reload_completed)
1989 delete_trivially_dead_insns (get_insns(), max_reg_num ());
1990 }
1991
1992 compact_blocks ();
1993
1994 while (try_optimize_cfg (mode))
1995 {
1996 delete_unreachable_blocks (), changed = true;
1997 if (mode & CLEANUP_UPDATE_LIFE)
1998 {
1999 /* Cleaning up CFG introduces more opportunities for dead code
2000 removal that in turn may introduce more opportunities for
2001 cleaning up the CFG. */
2002 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2003 PROP_DEATH_NOTES
2004 | PROP_SCAN_DEAD_CODE
2005 | PROP_KILL_DEAD_CODE
2006 | ((mode & CLEANUP_LOG_LINKS)
2007 ? PROP_LOG_LINKS : 0)))
2008 break;
2009 }
2010 else if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_PRE_SIBCALL))
2011 && (mode & CLEANUP_EXPENSIVE)
2012 && !reload_completed)
2013 {
2014 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2015 break;
2016 }
2017 else
2018 break;
2019 delete_dead_jumptables ();
2020 }
2021
2022 /* Kill the data we won't maintain. */
2023 free_EXPR_LIST_list (&label_value_list);
2024 timevar_pop (TV_CLEANUP_CFG);
2025
2026 return changed;
2027 }
This page took 0.12514 seconds and 5 git commands to generate.