]> gcc.gnu.org Git - gcc.git/blame - gcc/sched-ebb.c
arm.c (arm_override_options): Error on iWMMXt and hardware floating point.
[gcc.git] / gcc / sched-ebb.c
CommitLineData
18e720b3
BS
1/* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
d9221e01 3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
18e720b3
BS
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
1322177d 7This file is part of GCC.
18e720b3 8
1322177d
LB
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 2, or (at your option) any later
12version.
18e720b3 13
1322177d
LB
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
18e720b3
BS
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
47a1bd82 20along with GCC; see the file COPYING. If not, write to the Free
366ccddb
KC
21Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2202110-1301, USA. */
18e720b3
BS
23\f
24#include "config.h"
25#include "system.h"
4977bab6
ZW
26#include "coretypes.h"
27#include "tm.h"
18e720b3
BS
28#include "toplev.h"
29#include "rtl.h"
30#include "tm_p.h"
31#include "hard-reg-set.h"
18e720b3
BS
32#include "regs.h"
33#include "function.h"
34#include "flags.h"
35#include "insn-config.h"
36#include "insn-attr.h"
37#include "except.h"
38#include "toplev.h"
39#include "recog.h"
d73b1f07 40#include "cfglayout.h"
62551c66 41#include "params.h"
18e720b3 42#include "sched-int.h"
30028c85 43#include "target.h"
10d22567 44#include "output.h"
18e720b3 45\f
18e720b3
BS
46/* The number of insns scheduled so far. */
47static int sched_n_insns;
48
496d7bb0
MK
49/* The number of insns to be scheduled in total. */
50static int n_insns;
51
52/* Set of blocks, that already have their dependencies calculated. */
53static bitmap_head dont_calc_deps;
54/* Set of basic blocks, that are ebb heads of tails respectively. */
55static bitmap_head ebb_head, ebb_tail;
56
57/* Last basic block in current ebb. */
58static basic_block last_bb;
59
18e720b3 60/* Implementations of the sched_info functions for region scheduling. */
63f54b1a 61static void init_ready_list (void);
496d7bb0 62static void begin_schedule_ready (rtx, rtx);
46c5ad27
AJ
63static int schedule_more_p (void);
64static const char *ebb_print_insn (rtx, int);
65static int rank (rtx, rtx);
66static int contributes_to_priority (rtx, rtx);
5a257872 67static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
46c5ad27
AJ
68static basic_block earliest_block_with_similiar_load (basic_block, rtx);
69static void add_deps_for_risky_insns (rtx, rtx);
70static basic_block schedule_ebb (rtx, rtx);
496d7bb0
MK
71
72static void add_remove_insn (rtx, int);
73static void add_block1 (basic_block, basic_block);
74static basic_block advance_target_bb (basic_block, rtx);
75static void fix_recovery_cfg (int, int, int);
76
77#ifdef ENABLE_CHECKING
78static int ebb_head_or_leaf_p (basic_block, int);
79#endif
18e720b3
BS
80
81/* Return nonzero if there are more insns that should be scheduled. */
82
83static int
46c5ad27 84schedule_more_p (void)
18e720b3 85{
496d7bb0 86 return sched_n_insns < n_insns;
18e720b3
BS
87}
88
89/* Add all insns that are initially ready to the ready list READY. Called
90 once before scheduling a set of insns. */
91
92static void
63f54b1a 93init_ready_list (void)
18e720b3 94{
496d7bb0 95 int n = 0;
18e720b3
BS
96 rtx prev_head = current_sched_info->prev_head;
97 rtx next_tail = current_sched_info->next_tail;
98 rtx insn;
99
18e720b3
BS
100 sched_n_insns = 0;
101
102#if 0
103 /* Print debugging information. */
104 if (sched_verbose >= 5)
105 debug_dependencies ();
106#endif
107
108 /* Initialize ready list with all 'ready' insns in target block.
109 Count number of insns in the target block being scheduled. */
110 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
111 {
63f54b1a 112 try_ready (insn);
496d7bb0 113 n++;
18e720b3 114 }
18e720b3 115
496d7bb0
MK
116 gcc_assert (n == n_insns);
117}
18e720b3 118
496d7bb0
MK
119/* INSN is being scheduled after LAST. Update counters. */
120static void
121begin_schedule_ready (rtx insn, rtx last)
18e720b3
BS
122{
123 sched_n_insns++;
496d7bb0
MK
124
125 if (BLOCK_FOR_INSN (insn) == last_bb
126 /* INSN is a jump in the last block, ... */
127 && control_flow_insn_p (insn)
128 /* that is going to be moved over some instructions. */
129 && last != PREV_INSN (insn))
130 {
131 edge e;
132 edge_iterator ei;
133 basic_block bb;
134
135 /* An obscure special case, where we do have partially dead
136 instruction scheduled after last control flow instruction.
137 In this case we can create new basic block. It is
138 always exactly one basic block last in the sequence. */
139
140 FOR_EACH_EDGE (e, ei, last_bb->succs)
141 if (e->flags & EDGE_FALLTHRU)
142 break;
143
144#ifdef ENABLE_CHECKING
145 gcc_assert (!e || !(e->flags & EDGE_COMPLEX));
146
147 gcc_assert (BLOCK_FOR_INSN (insn) == last_bb
148 && !RECOVERY_BLOCK (insn)
149 && BB_HEAD (last_bb) != insn
150 && BB_END (last_bb) == insn);
151
152 {
153 rtx x;
154
155 x = NEXT_INSN (insn);
156 if (e)
157 gcc_assert (NOTE_P (x) || LABEL_P (x));
158 else
159 gcc_assert (BARRIER_P (x));
160 }
161#endif
162
163 if (e)
164 {
165 bb = split_edge (e);
166 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
167 }
168 else
169 bb = create_basic_block (insn, 0, last_bb);
170
171 /* split_edge () creates BB before E->DEST. Keep in mind, that
172 this operation extends scheduling region till the end of BB.
173 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
174 of the scheduling region. */
175 current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
176 gcc_assert (current_sched_info->next_tail);
177
178 add_block (bb, last_bb);
179 gcc_assert (last_bb == bb);
180 }
18e720b3
BS
181}
182
18e720b3
BS
183/* Return a string that contains the insn uid and optionally anything else
184 necessary to identify this insn in an output. It's valid to use a
185 static buffer for this. The ALIGNED parameter should cause the string
186 to be formatted so that multiple output lines will line up nicely. */
187
188static const char *
46c5ad27 189ebb_print_insn (rtx insn, int aligned ATTRIBUTE_UNUSED)
18e720b3
BS
190{
191 static char tmp[80];
192
193 sprintf (tmp, "%4d", INSN_UID (insn));
194 return tmp;
195}
196
197/* Compare priority of two insns. Return a positive number if the second
198 insn is to be preferred for scheduling, and a negative one if the first
199 is to be preferred. Zero if they are equally good. */
200
201static int
46c5ad27 202rank (rtx insn1, rtx insn2)
18e720b3 203{
8f62128d
JH
204 basic_block bb1 = BLOCK_FOR_INSN (insn1);
205 basic_block bb2 = BLOCK_FOR_INSN (insn2);
206
207 if (bb1->count > bb2->count
208 || bb1->frequency > bb2->frequency)
209 return -1;
210 if (bb1->count < bb2->count
211 || bb1->frequency < bb2->frequency)
212 return 1;
18e720b3
BS
213 return 0;
214}
215
216/* NEXT is an instruction that depends on INSN (a backward dependence);
217 return nonzero if we should include this dependence in priority
218 calculations. */
219
220static int
46c5ad27
AJ
221contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
222 rtx insn ATTRIBUTE_UNUSED)
18e720b3
BS
223{
224 return 1;
225}
226
5a257872
EB
227 /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
228 conditionally set before INSN. Store the set of registers that
229 must be considered as used by this jump in USED and that of
230 registers that must be considered as set in SET. */
18e720b3
BS
231
232static void
5a257872
EB
233compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
234 regset set)
18e720b3
BS
235{
236 basic_block b = BLOCK_FOR_INSN (insn);
237 edge e;
628f6a4e
BE
238 edge_iterator ei;
239
240 FOR_EACH_EDGE (e, ei, b->succs)
5a257872
EB
241 if (e->flags & EDGE_FALLTHRU)
242 /* The jump may be a by-product of a branch that has been merged
243 in the main codepath after being conditionalized. Therefore
244 it may guard the fallthrough block from using a value that has
245 conditionally overwritten that of the main codepath. So we
246 consider that it restores the value of the main codepath. */
496d7bb0 247 bitmap_and (set, glat_start [e->dest->index], cond_set);
5a257872 248 else
496d7bb0 249 bitmap_ior_into (used, glat_start [e->dest->index]);
18e720b3
BS
250}
251
252/* Used in schedule_insns to initialize current_sched_info for scheduling
253 regions (or single basic blocks). */
254
255static struct sched_info ebb_sched_info =
256{
257 init_ready_list,
496d7bb0 258 NULL,
18e720b3 259 schedule_more_p,
63f54b1a 260 NULL,
18e720b3 261 rank,
6d0de005 262 ebb_print_insn,
18e720b3
BS
263 contributes_to_priority,
264 compute_jump_reg_dependencies,
265
266 NULL, NULL,
267 NULL, NULL,
ddbd5439
MK
268 0, 1, 0,
269
496d7bb0
MK
270 add_remove_insn,
271 begin_schedule_ready,
272 add_block1,
273 advance_target_bb,
274 fix_recovery_cfg,
275#ifdef ENABLE_CHECKING
276 ebb_head_or_leaf_p,
277#endif
278 /* We need to DETACH_LIVE_INFO to be able to create new basic blocks.
279 See begin_schedule_ready (). */
280 SCHED_EBB | USE_GLAT | DETACH_LIFE_INFO
18e720b3
BS
281};
282\f
15aab9c0
VM
283/* Returns the earliest block in EBB currently being processed where a
284 "similar load" 'insn2' is found, and hence LOAD_INSN can move
285 speculatively into the found block. All the following must hold:
286
287 (1) both loads have 1 base register (PFREE_CANDIDATEs).
288 (2) load_insn and load2 have a def-use dependence upon
289 the same insn 'insn1'.
290
291 From all these we can conclude that the two loads access memory
292 addresses that differ at most by a constant, and hence if moving
293 load_insn would cause an exception, it would have been caused by
294 load2 anyhow.
295
296 The function uses list (given by LAST_BLOCK) of already processed
297 blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
298
299static basic_block
46c5ad27 300earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
15aab9c0
VM
301{
302 rtx back_link;
303 basic_block bb, earliest_block = NULL;
304
305 for (back_link = LOG_LINKS (load_insn);
306 back_link;
307 back_link = XEXP (back_link, 1))
308 {
309 rtx insn1 = XEXP (back_link, 0);
310
311 if (GET_MODE (back_link) == VOIDmode)
312 {
313 /* Found a DEF-USE dependence (insn1, load_insn). */
314 rtx fore_link;
315
316 for (fore_link = INSN_DEPEND (insn1);
317 fore_link;
318 fore_link = XEXP (fore_link, 1))
319 {
320 rtx insn2 = XEXP (fore_link, 0);
321 basic_block insn2_block = BLOCK_FOR_INSN (insn2);
322
323 if (GET_MODE (fore_link) == VOIDmode)
324 {
325 if (earliest_block != NULL
326 && earliest_block->index < insn2_block->index)
327 continue;
328
329 /* Found a DEF-USE dependence (insn1, insn2). */
330 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
331 /* insn2 not guaranteed to be a 1 base reg load. */
332 continue;
46c5ad27 333
15aab9c0
VM
334 for (bb = last_block; bb; bb = bb->aux)
335 if (insn2_block == bb)
336 break;
337
338 if (!bb)
339 /* insn2 is the similar load. */
340 earliest_block = insn2_block;
341 }
342 }
343 }
344 }
345
346 return earliest_block;
347}
348
4d6922ee 349/* The following function adds dependencies between jumps and risky
15aab9c0
VM
350 insns in given ebb. */
351
352static void
46c5ad27 353add_deps_for_risky_insns (rtx head, rtx tail)
15aab9c0
VM
354{
355 rtx insn, prev;
356 int class;
357 rtx last_jump = NULL_RTX;
358 rtx next_tail = NEXT_INSN (tail);
359 basic_block last_block = NULL, bb;
46c5ad27 360
15aab9c0 361 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
ddbd5439 362 if (control_flow_insn_p (insn))
15aab9c0
VM
363 {
364 bb = BLOCK_FOR_INSN (insn);
365 bb->aux = last_block;
366 last_block = bb;
367 last_jump = insn;
368 }
369 else if (INSN_P (insn) && last_jump != NULL_RTX)
370 {
371 class = haifa_classify_insn (insn);
372 prev = last_jump;
373 switch (class)
374 {
375 case PFREE_CANDIDATE:
376 if (flag_schedule_speculative_load)
377 {
378 bb = earliest_block_with_similiar_load (last_block, insn);
379 if (bb)
3044064c
VM
380 {
381 bb = bb->aux;
382 if (!bb)
383 break;
a813c111 384 prev = BB_END (bb);
3044064c 385 }
15aab9c0 386 }
5d3cc252 387 /* Fall through. */
15aab9c0
VM
388 case TRAP_RISKY:
389 case IRISKY:
390 case PRISKY_CANDIDATE:
e0bb17a8 391 /* ??? We could implement better checking PRISKY_CANDIDATEs
15aab9c0
VM
392 analogous to sched-rgn.c. */
393 /* We can not change the mode of the backward
394 dependency because REG_DEP_ANTI has the lowest
395 rank. */
ddbd5439
MK
396 if (! sched_insns_conditions_mutex_p (insn, prev))
397 {
398 if (!(current_sched_info->flags & DO_SPECULATION))
399 {
400 enum DEPS_ADJUST_RESULT res;
401
402 res = add_or_update_back_dep (insn, prev,
403 REG_DEP_ANTI, DEP_ANTI);
404
405 if (res == DEP_CREATED)
406 add_forw_dep (insn, LOG_LINKS (insn));
407 else
408 gcc_assert (res != DEP_CHANGED);
409 }
410 else
411 add_or_update_back_forw_dep (insn, prev, REG_DEP_ANTI,
412 set_dep_weak (DEP_ANTI,
413 BEGIN_CONTROL,
414 MAX_DEP_WEAK));
415 }
416
15aab9c0 417 break;
46c5ad27 418
15aab9c0
VM
419 default:
420 break;
421 }
422 }
423 /* Maintain the invariant that bb->aux is clear after use. */
424 while (last_block)
425 {
426 bb = last_block->aux;
427 last_block->aux = NULL;
428 last_block = bb;
429 }
430}
431
18e720b3
BS
432/* Schedule a single extended basic block, defined by the boundaries HEAD
433 and TAIL. */
434
8f62128d 435static basic_block
46c5ad27 436schedule_ebb (rtx head, rtx tail)
18e720b3 437{
496d7bb0 438 basic_block first_bb, target_bb;
18e720b3 439 struct deps tmp_deps;
496d7bb0
MK
440
441 first_bb = BLOCK_FOR_INSN (head);
442 last_bb = BLOCK_FOR_INSN (tail);
18e720b3
BS
443
444 if (no_real_insns_p (head, tail))
8f62128d 445 return BLOCK_FOR_INSN (tail);
18e720b3 446
496d7bb0
MK
447 gcc_assert (INSN_P (head) && INSN_P (tail));
448
449 if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
450 {
451 init_deps_global ();
18e720b3 452
496d7bb0
MK
453 /* Compute LOG_LINKS. */
454 init_deps (&tmp_deps);
455 sched_analyze (&tmp_deps, head, tail);
456 free_deps (&tmp_deps);
18e720b3 457
496d7bb0
MK
458 /* Compute INSN_DEPEND. */
459 compute_forward_dependences (head, tail);
18e720b3 460
496d7bb0 461 add_deps_for_risky_insns (head, tail);
15aab9c0 462
496d7bb0
MK
463 if (targetm.sched.dependencies_evaluation_hook)
464 targetm.sched.dependencies_evaluation_hook (head, tail);
465
466 finish_deps_global ();
467 }
468 else
469 /* Only recovery blocks can have their dependencies already calculated,
470 and they always are single block ebbs. */
471 gcc_assert (first_bb == last_bb);
30028c85 472
18e720b3 473 /* Set priorities. */
63f54b1a 474 current_sched_info->sched_max_insns_priority = 0;
18e720b3 475 n_insns = set_priorities (head, tail);
63f54b1a 476 current_sched_info->sched_max_insns_priority++;
18e720b3
BS
477
478 current_sched_info->prev_head = PREV_INSN (head);
479 current_sched_info->next_tail = NEXT_INSN (tail);
480
481 if (write_symbols != NO_DEBUG)
482 {
0212907f 483 save_line_notes (first_bb->index, head, tail);
18e720b3
BS
484 rm_line_notes (head, tail);
485 }
486
487 /* rm_other_notes only removes notes which are _inside_ the
488 block---that is, it won't remove notes before the first real insn
489 or after the last real insn of the block. So if the first insn
490 has a REG_SAVE_NOTE which would otherwise be emitted before the
491 insn, it is redundant with the note before the start of the
570a98eb 492 block, and so we have to take it out. */
18e720b3
BS
493 if (INSN_P (head))
494 {
495 rtx note;
496
497 for (note = REG_NOTES (head); note; note = XEXP (note, 1))
498 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
668707f7 499 remove_note (head, note);
18e720b3
BS
500 }
501
502 /* Remove remaining note insns from the block, save them in
503 note_list. These notes are restored at the end of
504 schedule_block (). */
505 rm_other_notes (head, tail);
506
496d7bb0
MK
507 unlink_bb_notes (first_bb, last_bb);
508
18e720b3
BS
509 current_sched_info->queue_must_finish_empty = 1;
510
496d7bb0
MK
511 target_bb = first_bb;
512 schedule_block (&target_bb, n_insns);
18e720b3 513
496d7bb0
MK
514 /* We might pack all instructions into fewer blocks,
515 so we may made some of them empty. Can't assert (b == last_bb). */
516
18e720b3 517 /* Sanity check: verify that all region insns were scheduled. */
41374e13 518 gcc_assert (sched_n_insns == n_insns);
18e720b3
BS
519 head = current_sched_info->head;
520 tail = current_sched_info->tail;
521
522 if (write_symbols != NO_DEBUG)
14052b68 523 restore_line_notes (head, tail);
18e720b3 524
496d7bb0
MK
525 if (EDGE_COUNT (last_bb->preds) == 0)
526 /* LAST_BB is unreachable. */
527 {
528 gcc_assert (first_bb != last_bb
529 && EDGE_COUNT (last_bb->succs) == 0);
530 last_bb = last_bb->prev_bb;
531 delete_basic_block (last_bb->next_bb);
532 }
533
534 return last_bb;
18e720b3
BS
535}
536
10d22567 537/* The one entry point in this file. */
18e720b3
BS
538
539void
10d22567 540schedule_ebbs (void)
18e720b3 541{
e0082a72 542 basic_block bb;
62551c66 543 int probability_cutoff;
496d7bb0
MK
544 rtx tail;
545 sbitmap large_region_blocks, blocks;
546 int any_large_regions;
62551c66
JH
547
548 if (profile_info && flag_branch_probabilities)
549 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
550 else
551 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
552 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
18e720b3
BS
553
554 /* Taking care of this degenerate case makes the rest of
555 this code simpler. */
24bd1a0b 556 if (n_basic_blocks == NUM_FIXED_BLOCKS)
18e720b3
BS
557 return;
558
ddbd5439
MK
559 /* We need current_sched_info in init_dependency_caches, which is
560 invoked via sched_init. */
18e720b3
BS
561 current_sched_info = &ebb_sched_info;
562
ddbd5439
MK
563 sched_init ();
564
852c6ec7 565 compute_bb_for_insn ();
18e720b3 566
496d7bb0
MK
567 /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
568 bitmap_initialize (&dont_calc_deps, 0);
569 bitmap_clear (&dont_calc_deps);
570 bitmap_initialize (&ebb_head, 0);
571 bitmap_clear (&ebb_head);
572 bitmap_initialize (&ebb_tail, 0);
573 bitmap_clear (&ebb_tail);
574
18e720b3 575 /* Schedule every region in the subroutine. */
e0082a72 576 FOR_EACH_BB (bb)
0b17ab2f 577 {
a813c111 578 rtx head = BB_HEAD (bb);
18e720b3
BS
579
580 for (;;)
581 {
18e720b3 582 edge e;
628f6a4e 583 edge_iterator ei;
a813c111 584 tail = BB_END (bb);
e0082a72 585 if (bb->next_bb == EXIT_BLOCK_PTR
4b4bf941 586 || LABEL_P (BB_HEAD (bb->next_bb)))
18e720b3 587 break;
628f6a4e 588 FOR_EACH_EDGE (e, ei, bb->succs)
18e720b3
BS
589 if ((e->flags & EDGE_FALLTHRU) != 0)
590 break;
591 if (! e)
592 break;
62551c66 593 if (e->probability <= probability_cutoff)
8f62128d 594 break;
e0082a72 595 bb = bb->next_bb;
18e720b3
BS
596 }
597
598 /* Blah. We should fix the rest of the code not to get confused by
599 a note or two. */
600 while (head != tail)
601 {
4b4bf941 602 if (NOTE_P (head))
18e720b3 603 head = NEXT_INSN (head);
4b4bf941 604 else if (NOTE_P (tail))
18e720b3 605 tail = PREV_INSN (tail);
4b4bf941 606 else if (LABEL_P (head))
18e720b3
BS
607 head = NEXT_INSN (head);
608 else
609 break;
610 }
611
496d7bb0 612 bitmap_set_bit (&ebb_head, BLOCK_NUM (head));
8f62128d 613 bb = schedule_ebb (head, tail);
496d7bb0 614 bitmap_set_bit (&ebb_tail, bb->index);
18e720b3 615 }
496d7bb0 616 bitmap_clear (&dont_calc_deps);
18e720b3 617
496d7bb0
MK
618 gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO);
619 /* We can create new basic blocks during scheduling, and
620 attach_life_info () will create regsets for them
621 (along with attaching existing info back). */
622 attach_life_info ();
623
624 /* Updating register live information. */
625 allocate_reg_life_data ();
626
627 any_large_regions = 0;
628 large_region_blocks = sbitmap_alloc (last_basic_block);
629 sbitmap_zero (large_region_blocks);
630 FOR_EACH_BB (bb)
631 SET_BIT (large_region_blocks, bb->index);
632
633 blocks = sbitmap_alloc (last_basic_block);
634 sbitmap_zero (blocks);
635
636 /* Update life information. For regions consisting of multiple blocks
637 we've possibly done interblock scheduling that affects global liveness.
638 For regions consisting of single blocks we need to do only local
639 liveness. */
640 FOR_EACH_BB (bb)
641 {
642 int bbi;
643
644 bbi = bb->index;
645
646 if (!bitmap_bit_p (&ebb_head, bbi)
647 || !bitmap_bit_p (&ebb_tail, bbi)
648 /* New blocks (e.g. recovery blocks) should be processed
649 as parts of large regions. */
650 || !glat_start[bbi])
651 any_large_regions = 1;
652 else
653 {
654 SET_BIT (blocks, bbi);
655 RESET_BIT (large_region_blocks, bbi);
656 }
657 }
658
659 update_life_info (blocks, UPDATE_LIFE_LOCAL, 0);
660 sbitmap_free (blocks);
661
662 if (any_large_regions)
663 {
664 update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL, 0);
665
666#ifdef ENABLE_CHECKING
667 /* !!! We can't check reg_live_info here because of the fact,
668 that destination registers of COND_EXEC's may be dead
669 before scheduling (while they should be alive). Don't know why. */
a57aee2a 670 /*check_reg_live (true);*/
496d7bb0
MK
671#endif
672 }
673 sbitmap_free (large_region_blocks);
674
675 bitmap_clear (&ebb_head);
676 bitmap_clear (&ebb_tail);
18e720b3
BS
677
678 /* Reposition the prologue and epilogue notes in case we moved the
679 prologue/epilogue insns. */
680 if (reload_completed)
681 reposition_prologue_and_epilogue_notes (get_insns ());
682
683 if (write_symbols != NO_DEBUG)
684 rm_redundant_line_notes ();
685
686 sched_finish ();
687}
496d7bb0
MK
688
689/* INSN has been added to/removed from current ebb. */
690static void
691add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p)
692{
693 if (!remove_p)
694 n_insns++;
695 else
696 n_insns--;
697}
698
699/* BB was added to ebb after AFTER. */
700static void
701add_block1 (basic_block bb, basic_block after)
702{
703 /* Recovery blocks are always bounded by BARRIERS,
704 therefore, they always form single block EBB,
705 therefore, we can use rec->index to identify such EBBs. */
706 if (after == EXIT_BLOCK_PTR)
707 bitmap_set_bit (&dont_calc_deps, bb->index);
708 else if (after == last_bb)
709 last_bb = bb;
710}
711
712/* Return next block in ebb chain. For parameter meaning please refer to
713 sched-int.h: struct sched_info: advance_target_bb. */
714static basic_block
715advance_target_bb (basic_block bb, rtx insn)
716{
717 if (insn)
718 {
719 if (BLOCK_FOR_INSN (insn) != bb
720 && control_flow_insn_p (insn)
721 && !RECOVERY_BLOCK (insn)
722 && !RECOVERY_BLOCK (BB_END (bb)))
723 {
724 gcc_assert (!control_flow_insn_p (BB_END (bb))
725 && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
726 return bb;
727 }
728 else
729 return 0;
730 }
731 else if (bb != last_bb)
732 return bb->next_bb;
733 else
734 gcc_unreachable ();
735}
736
737/* Fix internal data after interblock movement of jump instruction.
738 For parameter meaning please refer to
739 sched-int.h: struct sched_info: fix_recovery_cfg. */
740static void
741fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi, int jump_bb_nexti)
742{
743 gcc_assert (last_bb->index != bbi);
744
745 if (jump_bb_nexti == last_bb->index)
746 last_bb = BASIC_BLOCK (jump_bbi);
747}
748
749#ifdef ENABLE_CHECKING
750/* Return non zero, if BB is first or last (depending of LEAF_P) block in
751 current ebb. For more information please refer to
752 sched-int.h: struct sched_info: region_head_or_leaf_p. */
753static int
754ebb_head_or_leaf_p (basic_block bb, int leaf_p)
755{
756 if (!leaf_p)
757 return bitmap_bit_p (&ebb_head, bb->index);
758 else
759 return bitmap_bit_p (&ebb_tail, bb->index);
760}
761#endif /* ENABLE_CHECKING */
This page took 2.179921 seconds and 5 git commands to generate.