]>
Commit | Line | Data |
---|---|---|
18e720b3 | 1 | /* Instruction scheduling pass. |
85ec4feb | 2 | Copyright (C) 1992-2018 Free Software Foundation, Inc. |
18e720b3 BS |
3 | Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, |
4 | and currently maintained by, Jim Wilson (wilson@cygnus.com) | |
5 | ||
1322177d | 6 | This file is part of GCC. |
18e720b3 | 7 | |
1322177d LB |
8 | GCC is free software; you can redistribute it and/or modify it under |
9 | the terms of the GNU General Public License as published by the Free | |
9dcd6f09 | 10 | Software Foundation; either version 3, or (at your option) any later |
1322177d | 11 | version. |
18e720b3 | 12 | |
1322177d LB |
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18e720b3 BS |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
9dcd6f09 NC |
19 | along with GCC; see the file COPYING3. If not see |
20 | <http://www.gnu.org/licenses/>. */ | |
18e720b3 BS |
21 | \f |
22 | #include "config.h" | |
23 | #include "system.h" | |
4977bab6 | 24 | #include "coretypes.h" |
c7131fb2 | 25 | #include "backend.h" |
957060b5 | 26 | #include "target.h" |
18e720b3 | 27 | #include "rtl.h" |
957060b5 | 28 | #include "cfghooks.h" |
c7131fb2 | 29 | #include "df.h" |
59f2e9d8 | 30 | #include "profile.h" |
18e720b3 | 31 | #include "insn-attr.h" |
62551c66 | 32 | #include "params.h" |
60393bbc AM |
33 | #include "cfgrtl.h" |
34 | #include "cfgbuild.h" | |
18e720b3 | 35 | #include "sched-int.h" |
6fb5fa3c | 36 | |
18e720b3 | 37 | \f |
a750daa2 MK |
38 | #ifdef INSN_SCHEDULING |
39 | ||
496d7bb0 | 40 | /* The number of insns to be scheduled in total. */ |
e855c69d AB |
41 | static int rgn_n_insns; |
42 | ||
43 | /* The number of insns scheduled so far. */ | |
44 | static int sched_rgn_n_insns; | |
496d7bb0 MK |
45 | |
46 | /* Set of blocks, that already have their dependencies calculated. */ | |
47 | static bitmap_head dont_calc_deps; | |
496d7bb0 MK |
48 | |
49 | /* Last basic block in current ebb. */ | |
50 | static basic_block last_bb; | |
51 | ||
18e720b3 | 52 | /* Implementations of the sched_info functions for region scheduling. */ |
63f54b1a | 53 | static void init_ready_list (void); |
ce1ce33a | 54 | static void begin_schedule_ready (rtx_insn *); |
46c5ad27 | 55 | static int schedule_more_p (void); |
ce1ce33a DM |
56 | static const char *ebb_print_insn (const rtx_insn *, int); |
57 | static int rank (rtx_insn *, rtx_insn *); | |
58 | static int ebb_contributes_to_priority (rtx_insn *, rtx_insn *); | |
46c5ad27 | 59 | static basic_block earliest_block_with_similiar_load (basic_block, rtx); |
66fcd40c | 60 | static void add_deps_for_risky_insns (rtx_insn *, rtx_insn *); |
f57aa6b0 | 61 | static void debug_ebb_dependencies (rtx_insn *, rtx_insn *); |
496d7bb0 | 62 | |
ce1ce33a | 63 | static void ebb_add_remove_insn (rtx_insn *, int); |
e855c69d | 64 | static void ebb_add_block (basic_block, basic_block); |
ce1ce33a | 65 | static basic_block advance_target_bb (basic_block, rtx_insn *); |
e855c69d | 66 | static void ebb_fix_recovery_cfg (int, int, int); |
496d7bb0 | 67 | |
26965010 BS |
68 | /* Allocate memory and store the state of the frontend. Return the allocated |
69 | memory. */ | |
70 | static void * | |
71 | save_ebb_state (void) | |
72 | { | |
73 | int *p = XNEW (int); | |
74 | *p = sched_rgn_n_insns; | |
75 | return p; | |
76 | } | |
77 | ||
78 | /* Restore the state of the frontend from P_, then free it. */ | |
79 | static void | |
80 | restore_ebb_state (void *p_) | |
81 | { | |
82 | int *p = (int *)p_; | |
83 | sched_rgn_n_insns = *p; | |
84 | free (p_); | |
85 | } | |
86 | ||
18e720b3 BS |
87 | /* Return nonzero if there are more insns that should be scheduled. */ |
88 | ||
89 | static int | |
46c5ad27 | 90 | schedule_more_p (void) |
18e720b3 | 91 | { |
e855c69d | 92 | return sched_rgn_n_insns < rgn_n_insns; |
18e720b3 BS |
93 | } |
94 | ||
b640bd8f MK |
95 | /* Print dependency information about ebb between HEAD and TAIL. */ |
96 | static void | |
f57aa6b0 | 97 | debug_ebb_dependencies (rtx_insn *head, rtx_insn *tail) |
b640bd8f MK |
98 | { |
99 | fprintf (sched_dump, | |
100 | ";; --------------- forward dependences: ------------ \n"); | |
101 | ||
102 | fprintf (sched_dump, "\n;; --- EBB Dependences --- from bb%d to bb%d \n", | |
103 | BLOCK_NUM (head), BLOCK_NUM (tail)); | |
104 | ||
105 | debug_dependencies (head, tail); | |
106 | } | |
107 | ||
18e720b3 BS |
108 | /* Add all insns that are initially ready to the ready list READY. Called |
109 | once before scheduling a set of insns. */ | |
110 | ||
111 | static void | |
63f54b1a | 112 | init_ready_list (void) |
18e720b3 | 113 | { |
496d7bb0 | 114 | int n = 0; |
dc01c3d1 DM |
115 | rtx_insn *prev_head = current_sched_info->prev_head; |
116 | rtx_insn *next_tail = current_sched_info->next_tail; | |
ce1ce33a | 117 | rtx_insn *insn; |
18e720b3 | 118 | |
e855c69d | 119 | sched_rgn_n_insns = 0; |
18e720b3 | 120 | |
18e720b3 BS |
121 | /* Print debugging information. */ |
122 | if (sched_verbose >= 5) | |
b640bd8f | 123 | debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail)); |
18e720b3 BS |
124 | |
125 | /* Initialize ready list with all 'ready' insns in target block. | |
126 | Count number of insns in the target block being scheduled. */ | |
127 | for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) | |
128 | { | |
63f54b1a | 129 | try_ready (insn); |
496d7bb0 | 130 | n++; |
18e720b3 | 131 | } |
18e720b3 | 132 | |
e855c69d | 133 | gcc_assert (n == rgn_n_insns); |
496d7bb0 | 134 | } |
18e720b3 | 135 | |
496d7bb0 MK |
136 | /* INSN is being scheduled after LAST. Update counters. */ |
137 | static void | |
ce1ce33a | 138 | begin_schedule_ready (rtx_insn *insn ATTRIBUTE_UNUSED) |
18e720b3 | 139 | { |
e855c69d | 140 | sched_rgn_n_insns++; |
86014d07 | 141 | } |
496d7bb0 | 142 | |
86014d07 BS |
143 | /* INSN is being moved to its place in the schedule, after LAST. */ |
144 | static void | |
ce1ce33a | 145 | begin_move_insn (rtx_insn *insn, rtx_insn *last) |
86014d07 | 146 | { |
496d7bb0 MK |
147 | if (BLOCK_FOR_INSN (insn) == last_bb |
148 | /* INSN is a jump in the last block, ... */ | |
149 | && control_flow_insn_p (insn) | |
150 | /* that is going to be moved over some instructions. */ | |
151 | && last != PREV_INSN (insn)) | |
152 | { | |
153 | edge e; | |
496d7bb0 MK |
154 | basic_block bb; |
155 | ||
156 | /* An obscure special case, where we do have partially dead | |
157 | instruction scheduled after last control flow instruction. | |
158 | In this case we can create new basic block. It is | |
159 | always exactly one basic block last in the sequence. */ | |
b8698a0f | 160 | |
0fd4b31d | 161 | e = find_fallthru_edge (last_bb->succs); |
496d7bb0 | 162 | |
77a74ed7 | 163 | gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX)); |
496d7bb0 | 164 | |
77a74ed7 NF |
165 | gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb |
166 | && !IS_SPECULATION_CHECK_P (insn) | |
167 | && BB_HEAD (last_bb) != insn | |
168 | && BB_END (last_bb) == insn); | |
496d7bb0 MK |
169 | |
170 | { | |
e67d1102 | 171 | rtx_insn *x = NEXT_INSN (insn); |
496d7bb0 | 172 | if (e) |
77a74ed7 | 173 | gcc_checking_assert (NOTE_P (x) || LABEL_P (x)); |
496d7bb0 | 174 | else |
77a74ed7 | 175 | gcc_checking_assert (BARRIER_P (x)); |
496d7bb0 | 176 | } |
496d7bb0 MK |
177 | |
178 | if (e) | |
179 | { | |
180 | bb = split_edge (e); | |
181 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb))); | |
182 | } | |
183 | else | |
bbf81a5f JJ |
184 | { |
185 | /* Create an empty unreachable block after the INSN. */ | |
dc01c3d1 | 186 | rtx_insn *next = NEXT_INSN (insn); |
bbf81a5f JJ |
187 | if (next && BARRIER_P (next)) |
188 | next = NEXT_INSN (next); | |
189 | bb = create_basic_block (next, NULL_RTX, last_bb); | |
190 | } | |
b8698a0f | 191 | |
496d7bb0 MK |
192 | /* split_edge () creates BB before E->DEST. Keep in mind, that |
193 | this operation extends scheduling region till the end of BB. | |
194 | Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out | |
195 | of the scheduling region. */ | |
196 | current_sched_info->next_tail = NEXT_INSN (BB_END (bb)); | |
197 | gcc_assert (current_sched_info->next_tail); | |
198 | ||
e855c69d AB |
199 | /* Append new basic block to the end of the ebb. */ |
200 | sched_init_only_bb (bb, last_bb); | |
496d7bb0 MK |
201 | gcc_assert (last_bb == bb); |
202 | } | |
18e720b3 BS |
203 | } |
204 | ||
18e720b3 BS |
205 | /* Return a string that contains the insn uid and optionally anything else |
206 | necessary to identify this insn in an output. It's valid to use a | |
207 | static buffer for this. The ALIGNED parameter should cause the string | |
208 | to be formatted so that multiple output lines will line up nicely. */ | |
209 | ||
210 | static const char * | |
ce1ce33a | 211 | ebb_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED) |
18e720b3 BS |
212 | { |
213 | static char tmp[80]; | |
214 | ||
e855c69d AB |
215 | /* '+' before insn means it is a new cycle start. */ |
216 | if (GET_MODE (insn) == TImode) | |
217 | sprintf (tmp, "+ %4d", INSN_UID (insn)); | |
218 | else | |
219 | sprintf (tmp, " %4d", INSN_UID (insn)); | |
220 | ||
18e720b3 BS |
221 | return tmp; |
222 | } | |
223 | ||
224 | /* Compare priority of two insns. Return a positive number if the second | |
225 | insn is to be preferred for scheduling, and a negative one if the first | |
226 | is to be preferred. Zero if they are equally good. */ | |
227 | ||
228 | static int | |
ce1ce33a | 229 | rank (rtx_insn *insn1, rtx_insn *insn2) |
18e720b3 | 230 | { |
8f62128d JH |
231 | basic_block bb1 = BLOCK_FOR_INSN (insn1); |
232 | basic_block bb2 = BLOCK_FOR_INSN (insn2); | |
233 | ||
e7a74006 | 234 | if (bb1->count > bb2->count) |
8f62128d | 235 | return -1; |
e7a74006 | 236 | if (bb1->count < bb2->count) |
8f62128d | 237 | return 1; |
18e720b3 BS |
238 | return 0; |
239 | } | |
240 | ||
241 | /* NEXT is an instruction that depends on INSN (a backward dependence); | |
242 | return nonzero if we should include this dependence in priority | |
243 | calculations. */ | |
244 | ||
245 | static int | |
ce1ce33a DM |
246 | ebb_contributes_to_priority (rtx_insn *next ATTRIBUTE_UNUSED, |
247 | rtx_insn *insn ATTRIBUTE_UNUSED) | |
18e720b3 BS |
248 | { |
249 | return 1; | |
250 | } | |
251 | ||
aef0e7a8 BS |
252 | /* INSN is a JUMP_INSN. Store the set of registers that |
253 | must be considered as used by this jump in USED. */ | |
18e720b3 | 254 | |
e855c69d | 255 | void |
aef0e7a8 | 256 | ebb_compute_jump_reg_dependencies (rtx insn, regset used) |
18e720b3 BS |
257 | { |
258 | basic_block b = BLOCK_FOR_INSN (insn); | |
259 | edge e; | |
628f6a4e BE |
260 | edge_iterator ei; |
261 | ||
262 | FOR_EACH_EDGE (e, ei, b->succs) | |
aef0e7a8 | 263 | if ((e->flags & EDGE_FALLTHRU) == 0) |
89a95777 | 264 | bitmap_ior_into (used, df_get_live_in (e->dest)); |
18e720b3 BS |
265 | } |
266 | ||
267 | /* Used in schedule_insns to initialize current_sched_info for scheduling | |
268 | regions (or single basic blocks). */ | |
269 | ||
e855c69d AB |
270 | static struct common_sched_info_def ebb_common_sched_info; |
271 | ||
272 | static struct sched_deps_info_def ebb_sched_deps_info = | |
273 | { | |
274 | ebb_compute_jump_reg_dependencies, | |
275 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
276 | NULL, | |
277 | 1, 0, 0 | |
278 | }; | |
279 | ||
280 | static struct haifa_sched_info ebb_sched_info = | |
18e720b3 BS |
281 | { |
282 | init_ready_list, | |
496d7bb0 | 283 | NULL, |
18e720b3 | 284 | schedule_more_p, |
63f54b1a | 285 | NULL, |
18e720b3 | 286 | rank, |
6d0de005 | 287 | ebb_print_insn, |
e855c69d | 288 | ebb_contributes_to_priority, |
356c23b3 | 289 | NULL, /* insn_finishes_block_p */ |
18e720b3 BS |
290 | |
291 | NULL, NULL, | |
292 | NULL, NULL, | |
e855c69d | 293 | 1, 0, |
ddbd5439 | 294 | |
e855c69d | 295 | ebb_add_remove_insn, |
496d7bb0 | 296 | begin_schedule_ready, |
86014d07 | 297 | begin_move_insn, |
496d7bb0 | 298 | advance_target_bb, |
26965010 BS |
299 | |
300 | save_ebb_state, | |
301 | restore_ebb_state, | |
302 | ||
6fb5fa3c DB |
303 | SCHED_EBB |
304 | /* We can create new blocks in begin_schedule_ready (). */ | |
305 | | NEW_BBS | |
18e720b3 BS |
306 | }; |
307 | \f | |
15aab9c0 VM |
308 | /* Returns the earliest block in EBB currently being processed where a |
309 | "similar load" 'insn2' is found, and hence LOAD_INSN can move | |
310 | speculatively into the found block. All the following must hold: | |
311 | ||
312 | (1) both loads have 1 base register (PFREE_CANDIDATEs). | |
313 | (2) load_insn and load2 have a def-use dependence upon | |
314 | the same insn 'insn1'. | |
315 | ||
316 | From all these we can conclude that the two loads access memory | |
317 | addresses that differ at most by a constant, and hence if moving | |
318 | load_insn would cause an exception, it would have been caused by | |
319 | load2 anyhow. | |
320 | ||
321 | The function uses list (given by LAST_BLOCK) of already processed | |
322 | blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */ | |
323 | ||
324 | static basic_block | |
46c5ad27 | 325 | earliest_block_with_similiar_load (basic_block last_block, rtx load_insn) |
15aab9c0 | 326 | { |
e2f6ff94 MK |
327 | sd_iterator_def back_sd_it; |
328 | dep_t back_dep; | |
15aab9c0 VM |
329 | basic_block bb, earliest_block = NULL; |
330 | ||
e2f6ff94 | 331 | FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep) |
15aab9c0 | 332 | { |
66fcd40c | 333 | rtx_insn *insn1 = DEP_PRO (back_dep); |
15aab9c0 | 334 | |
b8698a0f | 335 | if (DEP_TYPE (back_dep) == REG_DEP_TRUE) |
e2f6ff94 | 336 | /* Found a DEF-USE dependence (insn1, load_insn). */ |
15aab9c0 | 337 | { |
e2f6ff94 MK |
338 | sd_iterator_def fore_sd_it; |
339 | dep_t fore_dep; | |
15aab9c0 | 340 | |
e2f6ff94 | 341 | FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep) |
15aab9c0 | 342 | { |
66fcd40c | 343 | rtx_insn *insn2 = DEP_CON (fore_dep); |
15aab9c0 VM |
344 | basic_block insn2_block = BLOCK_FOR_INSN (insn2); |
345 | ||
e2f6ff94 | 346 | if (DEP_TYPE (fore_dep) == REG_DEP_TRUE) |
15aab9c0 VM |
347 | { |
348 | if (earliest_block != NULL | |
349 | && earliest_block->index < insn2_block->index) | |
350 | continue; | |
351 | ||
352 | /* Found a DEF-USE dependence (insn1, insn2). */ | |
353 | if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) | |
354 | /* insn2 not guaranteed to be a 1 base reg load. */ | |
355 | continue; | |
46c5ad27 | 356 | |
1634b18f | 357 | for (bb = last_block; bb; bb = (basic_block) bb->aux) |
15aab9c0 VM |
358 | if (insn2_block == bb) |
359 | break; | |
360 | ||
361 | if (!bb) | |
362 | /* insn2 is the similar load. */ | |
363 | earliest_block = insn2_block; | |
364 | } | |
365 | } | |
366 | } | |
367 | } | |
368 | ||
369 | return earliest_block; | |
370 | } | |
371 | ||
4d6922ee | 372 | /* The following function adds dependencies between jumps and risky |
15aab9c0 VM |
373 | insns in given ebb. */ |
374 | ||
375 | static void | |
66fcd40c | 376 | add_deps_for_risky_insns (rtx_insn *head, rtx_insn *tail) |
15aab9c0 | 377 | { |
66fcd40c | 378 | rtx_insn *insn, *prev; |
55d796da | 379 | int classification; |
66fcd40c DM |
380 | rtx_insn *last_jump = NULL; |
381 | rtx_insn *next_tail = NEXT_INSN (tail); | |
15aab9c0 | 382 | basic_block last_block = NULL, bb; |
46c5ad27 | 383 | |
15aab9c0 | 384 | for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) |
26965010 BS |
385 | { |
386 | add_delay_dependencies (insn); | |
387 | if (control_flow_insn_p (insn)) | |
388 | { | |
389 | bb = BLOCK_FOR_INSN (insn); | |
390 | bb->aux = last_block; | |
391 | last_block = bb; | |
2c331232 BS |
392 | /* Ensure blocks stay in the same order. */ |
393 | if (last_jump) | |
394 | add_dependence (insn, last_jump, REG_DEP_ANTI); | |
26965010 BS |
395 | last_jump = insn; |
396 | } | |
397 | else if (INSN_P (insn) && last_jump != NULL_RTX) | |
398 | { | |
399 | classification = haifa_classify_insn (insn); | |
400 | prev = last_jump; | |
401 | ||
402 | switch (classification) | |
403 | { | |
404 | case PFREE_CANDIDATE: | |
405 | if (flag_schedule_speculative_load) | |
406 | { | |
407 | bb = earliest_block_with_similiar_load (last_block, insn); | |
408 | if (bb) | |
409 | { | |
410 | bb = (basic_block) bb->aux; | |
411 | if (!bb) | |
412 | break; | |
413 | prev = BB_END (bb); | |
414 | } | |
415 | } | |
416 | /* Fall through. */ | |
417 | case TRAP_RISKY: | |
418 | case IRISKY: | |
419 | case PRISKY_CANDIDATE: | |
420 | /* ??? We could implement better checking PRISKY_CANDIDATEs | |
421 | analogous to sched-rgn.c. */ | |
422 | /* We can not change the mode of the backward | |
423 | dependency because REG_DEP_ANTI has the lowest | |
424 | rank. */ | |
425 | if (! sched_insns_conditions_mutex_p (insn, prev)) | |
426 | { | |
e2724e63 BS |
427 | if ((current_sched_info->flags & DO_SPECULATION) |
428 | && (spec_info->mask & BEGIN_CONTROL)) | |
26965010 | 429 | { |
e2724e63 | 430 | dep_def _dep, *dep = &_dep; |
26965010 | 431 | |
e2724e63 | 432 | init_dep (dep, prev, insn, REG_DEP_ANTI); |
26965010 | 433 | |
e2724e63 BS |
434 | if (current_sched_info->flags & USE_DEPS_LIST) |
435 | { | |
436 | DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL, | |
437 | MAX_DEP_WEAK); | |
26965010 | 438 | |
e2724e63 | 439 | } |
26965010 | 440 | sd_add_or_update_dep (dep, false); |
26965010 | 441 | } |
e2724e63 BS |
442 | else |
443 | add_dependence (insn, prev, REG_DEP_CONTROL); | |
26965010 BS |
444 | } |
445 | ||
446 | break; | |
447 | ||
448 | default: | |
449 | break; | |
450 | } | |
451 | } | |
452 | } | |
15aab9c0 VM |
453 | /* Maintain the invariant that bb->aux is clear after use. */ |
454 | while (last_block) | |
455 | { | |
1634b18f | 456 | bb = (basic_block) last_block->aux; |
15aab9c0 VM |
457 | last_block->aux = NULL; |
458 | last_block = bb; | |
459 | } | |
460 | } | |
461 | ||
7043b893 BS |
462 | /* Schedule a single extended basic block, defined by the boundaries |
463 | HEAD and TAIL. | |
18e720b3 | 464 | |
9c582551 | 465 | We change our expectations about scheduler behavior depending on |
7043b893 BS |
466 | whether MODULO_SCHEDULING is true. If it is, we expect that the |
467 | caller has already called set_modulo_params and created delay pairs | |
468 | as appropriate. If the modulo schedule failed, we return | |
469 | NULL_RTX. */ | |
470 | ||
471 | basic_block | |
66fcd40c | 472 | schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling) |
18e720b3 | 473 | { |
496d7bb0 | 474 | basic_block first_bb, target_bb; |
88302d54 | 475 | struct deps_desc tmp_deps; |
7043b893 BS |
476 | bool success; |
477 | ||
478 | /* Blah. We should fix the rest of the code not to get confused by | |
479 | a note or two. */ | |
480 | while (head != tail) | |
481 | { | |
482 | if (NOTE_P (head) || DEBUG_INSN_P (head)) | |
483 | head = NEXT_INSN (head); | |
484 | else if (NOTE_P (tail) || DEBUG_INSN_P (tail)) | |
485 | tail = PREV_INSN (tail); | |
486 | else if (LABEL_P (head)) | |
487 | head = NEXT_INSN (head); | |
488 | else | |
489 | break; | |
490 | } | |
b8698a0f | 491 | |
496d7bb0 MK |
492 | first_bb = BLOCK_FOR_INSN (head); |
493 | last_bb = BLOCK_FOR_INSN (tail); | |
18e720b3 BS |
494 | |
495 | if (no_real_insns_p (head, tail)) | |
8f62128d | 496 | return BLOCK_FOR_INSN (tail); |
18e720b3 | 497 | |
496d7bb0 MK |
498 | gcc_assert (INSN_P (head) && INSN_P (tail)); |
499 | ||
500 | if (!bitmap_bit_p (&dont_calc_deps, first_bb->index)) | |
501 | { | |
502 | init_deps_global (); | |
18e720b3 | 503 | |
e2f6ff94 | 504 | /* Compute dependencies. */ |
bcf33775 | 505 | init_deps (&tmp_deps, false); |
496d7bb0 MK |
506 | sched_analyze (&tmp_deps, head, tail); |
507 | free_deps (&tmp_deps); | |
18e720b3 | 508 | |
496d7bb0 | 509 | add_deps_for_risky_insns (head, tail); |
15aab9c0 | 510 | |
496d7bb0 MK |
511 | if (targetm.sched.dependencies_evaluation_hook) |
512 | targetm.sched.dependencies_evaluation_hook (head, tail); | |
513 | ||
514 | finish_deps_global (); | |
515 | } | |
516 | else | |
517 | /* Only recovery blocks can have their dependencies already calculated, | |
b8698a0f | 518 | and they always are single block ebbs. */ |
496d7bb0 | 519 | gcc_assert (first_bb == last_bb); |
30028c85 | 520 | |
18e720b3 | 521 | /* Set priorities. */ |
63f54b1a | 522 | current_sched_info->sched_max_insns_priority = 0; |
e855c69d | 523 | rgn_n_insns = set_priorities (head, tail); |
63f54b1a | 524 | current_sched_info->sched_max_insns_priority++; |
18e720b3 BS |
525 | |
526 | current_sched_info->prev_head = PREV_INSN (head); | |
527 | current_sched_info->next_tail = NEXT_INSN (tail); | |
528 | ||
e855c69d | 529 | remove_notes (head, tail); |
18e720b3 | 530 | |
496d7bb0 MK |
531 | unlink_bb_notes (first_bb, last_bb); |
532 | ||
496d7bb0 | 533 | target_bb = first_bb; |
e855c69d AB |
534 | |
535 | /* Make ready list big enough to hold all the instructions from the ebb. */ | |
536 | sched_extend_ready_list (rgn_n_insns); | |
975ccf22 | 537 | success = schedule_block (&target_bb, NULL); |
7043b893 BS |
538 | gcc_assert (success || modulo_scheduling); |
539 | ||
e855c69d AB |
540 | /* Free ready list. */ |
541 | sched_finish_ready_list (); | |
18e720b3 | 542 | |
496d7bb0 MK |
543 | /* We might pack all instructions into fewer blocks, |
544 | so we may made some of them empty. Can't assert (b == last_bb). */ | |
b8698a0f | 545 | |
18e720b3 | 546 | /* Sanity check: verify that all region insns were scheduled. */ |
7043b893 | 547 | gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns); |
e2f6ff94 MK |
548 | |
549 | /* Free dependencies. */ | |
550 | sched_free_deps (current_sched_info->head, current_sched_info->tail, true); | |
551 | ||
552 | gcc_assert (haifa_recovery_bb_ever_added_p | |
553 | || deps_pools_are_empty_p ()); | |
18e720b3 | 554 | |
496d7bb0 MK |
555 | if (EDGE_COUNT (last_bb->preds) == 0) |
556 | /* LAST_BB is unreachable. */ | |
557 | { | |
558 | gcc_assert (first_bb != last_bb | |
559 | && EDGE_COUNT (last_bb->succs) == 0); | |
560 | last_bb = last_bb->prev_bb; | |
561 | delete_basic_block (last_bb->next_bb); | |
562 | } | |
563 | ||
7043b893 | 564 | return success ? last_bb : NULL; |
18e720b3 BS |
565 | } |
566 | ||
7043b893 BS |
567 | /* Perform initializations before running schedule_ebbs or a single |
568 | schedule_ebb. */ | |
18e720b3 | 569 | void |
7043b893 | 570 | schedule_ebbs_init (void) |
18e720b3 | 571 | { |
e855c69d AB |
572 | /* Setup infos. */ |
573 | { | |
574 | memcpy (&ebb_common_sched_info, &haifa_common_sched_info, | |
575 | sizeof (ebb_common_sched_info)); | |
576 | ||
577 | ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg; | |
578 | ebb_common_sched_info.add_block = ebb_add_block; | |
579 | ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS; | |
580 | ||
581 | common_sched_info = &ebb_common_sched_info; | |
582 | sched_deps_info = &ebb_sched_deps_info; | |
583 | current_sched_info = &ebb_sched_info; | |
584 | } | |
18e720b3 | 585 | |
e855c69d | 586 | haifa_sched_init (); |
ddbd5439 | 587 | |
852c6ec7 | 588 | compute_bb_for_insn (); |
18e720b3 | 589 | |
496d7bb0 MK |
590 | /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */ |
591 | bitmap_initialize (&dont_calc_deps, 0); | |
592 | bitmap_clear (&dont_calc_deps); | |
7043b893 BS |
593 | } |
594 | ||
595 | /* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb. */ | |
596 | void | |
597 | schedule_ebbs_finish (void) | |
598 | { | |
599 | bitmap_clear (&dont_calc_deps); | |
600 | ||
601 | /* Reposition the prologue and epilogue notes in case we moved the | |
602 | prologue/epilogue insns. */ | |
603 | if (reload_completed) | |
604 | reposition_prologue_and_epilogue_notes (); | |
605 | ||
606 | haifa_sched_finish (); | |
607 | } | |
608 | ||
609 | /* The main entry point in this file. */ | |
610 | ||
611 | void | |
612 | schedule_ebbs (void) | |
613 | { | |
614 | basic_block bb; | |
615 | int probability_cutoff; | |
66fcd40c | 616 | rtx_insn *tail; |
7043b893 BS |
617 | |
618 | /* Taking care of this degenerate case makes the rest of | |
619 | this code simpler. */ | |
0cae8d31 | 620 | if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS) |
7043b893 BS |
621 | return; |
622 | ||
ee4e85b7 | 623 | if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ) |
7043b893 BS |
624 | probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK); |
625 | else | |
626 | probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY); | |
627 | probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff; | |
628 | ||
629 | schedule_ebbs_init (); | |
496d7bb0 | 630 | |
18e720b3 | 631 | /* Schedule every region in the subroutine. */ |
11cd3bed | 632 | FOR_EACH_BB_FN (bb, cfun) |
0b17ab2f | 633 | { |
66fcd40c | 634 | rtx_insn *head = BB_HEAD (bb); |
18e720b3 | 635 | |
2a6a0d80 BS |
636 | if (bb->flags & BB_DISABLE_SCHEDULE) |
637 | continue; | |
638 | ||
18e720b3 BS |
639 | for (;;) |
640 | { | |
18e720b3 | 641 | edge e; |
a813c111 | 642 | tail = BB_END (bb); |
fefa31b5 | 643 | if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
4b4bf941 | 644 | || LABEL_P (BB_HEAD (bb->next_bb))) |
18e720b3 | 645 | break; |
0fd4b31d | 646 | e = find_fallthru_edge (bb->succs); |
18e720b3 BS |
647 | if (! e) |
648 | break; | |
357067f2 JH |
649 | if (e->probability.initialized_p () |
650 | && e->probability.to_reg_br_prob_base () <= probability_cutoff) | |
8f62128d | 651 | break; |
2a6a0d80 BS |
652 | if (e->dest->flags & BB_DISABLE_SCHEDULE) |
653 | break; | |
e0082a72 | 654 | bb = bb->next_bb; |
18e720b3 BS |
655 | } |
656 | ||
7043b893 | 657 | bb = schedule_ebb (head, tail, false); |
18e720b3 | 658 | } |
7043b893 | 659 | schedule_ebbs_finish (); |
18e720b3 | 660 | } |
496d7bb0 MK |
661 | |
662 | /* INSN has been added to/removed from current ebb. */ | |
663 | static void | |
ce1ce33a | 664 | ebb_add_remove_insn (rtx_insn *insn ATTRIBUTE_UNUSED, int remove_p) |
496d7bb0 MK |
665 | { |
666 | if (!remove_p) | |
e855c69d | 667 | rgn_n_insns++; |
496d7bb0 | 668 | else |
e855c69d | 669 | rgn_n_insns--; |
496d7bb0 MK |
670 | } |
671 | ||
672 | /* BB was added to ebb after AFTER. */ | |
673 | static void | |
e855c69d | 674 | ebb_add_block (basic_block bb, basic_block after) |
496d7bb0 | 675 | { |
b8698a0f | 676 | /* Recovery blocks are always bounded by BARRIERS, |
496d7bb0 MK |
677 | therefore, they always form single block EBB, |
678 | therefore, we can use rec->index to identify such EBBs. */ | |
fefa31b5 | 679 | if (after == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
496d7bb0 MK |
680 | bitmap_set_bit (&dont_calc_deps, bb->index); |
681 | else if (after == last_bb) | |
682 | last_bb = bb; | |
683 | } | |
684 | ||
685 | /* Return next block in ebb chain. For parameter meaning please refer to | |
686 | sched-int.h: struct sched_info: advance_target_bb. */ | |
687 | static basic_block | |
ce1ce33a | 688 | advance_target_bb (basic_block bb, rtx_insn *insn) |
496d7bb0 MK |
689 | { |
690 | if (insn) | |
691 | { | |
692 | if (BLOCK_FOR_INSN (insn) != bb | |
693 | && control_flow_insn_p (insn) | |
7ea84dc4 MK |
694 | /* We handle interblock movement of the speculation check |
695 | or over a speculation check in | |
696 | haifa-sched.c: move_block_after_check (). */ | |
697 | && !IS_SPECULATION_BRANCHY_CHECK_P (insn) | |
698 | && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb))) | |
496d7bb0 | 699 | { |
7ea84dc4 | 700 | /* Assert that we don't move jumps across blocks. */ |
496d7bb0 MK |
701 | gcc_assert (!control_flow_insn_p (BB_END (bb)) |
702 | && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb))); | |
703 | return bb; | |
704 | } | |
705 | else | |
706 | return 0; | |
707 | } | |
496d7bb0 | 708 | else |
d3b30e42 MK |
709 | /* Return next non empty block. */ |
710 | { | |
711 | do | |
712 | { | |
713 | gcc_assert (bb != last_bb); | |
714 | ||
715 | bb = bb->next_bb; | |
716 | } | |
717 | while (bb_note (bb) == BB_END (bb)); | |
718 | ||
719 | return bb; | |
720 | } | |
496d7bb0 MK |
721 | } |
722 | ||
723 | /* Fix internal data after interblock movement of jump instruction. | |
724 | For parameter meaning please refer to | |
725 | sched-int.h: struct sched_info: fix_recovery_cfg. */ | |
726 | static void | |
e855c69d AB |
727 | ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi, |
728 | int jump_bb_nexti) | |
496d7bb0 MK |
729 | { |
730 | gcc_assert (last_bb->index != bbi); | |
731 | ||
732 | if (jump_bb_nexti == last_bb->index) | |
06e28de2 | 733 | last_bb = BASIC_BLOCK_FOR_FN (cfun, jump_bbi); |
496d7bb0 | 734 | } |
a750daa2 MK |
735 | |
736 | #endif /* INSN_SCHEDULING */ |