]> gcc.gnu.org Git - gcc.git/blame - gcc/haifa-sched.c
2006-03-28 Maxim Kuvyrkov <mkuvyrkov@ispras.ru>
[gcc.git] / gcc / haifa-sched.c
CommitLineData
8c660648 1/* Instruction scheduling pass.
d050d723 2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3cc82eea 3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
8c660648
JL
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
1322177d 7This file is part of GCC.
5d14e356 8
1322177d
LB
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 2, or (at your option) any later
12version.
5d14e356 13
1322177d
LB
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
5d14e356
RK
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
47a1bd82 20along with GCC; see the file COPYING. If not, write to the Free
366ccddb
KC
21Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2202110-1301, USA. */
8c660648 23
b4ead7d4
BS
24/* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
8c660648
JL
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
56
8c660648
JL
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
59
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
2db45993
JL
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
8c660648
JL
72
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
79
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
83
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using LOG_LINKS backward
86 dependences. LOG_LINKS are translated into INSN_DEPEND forward
87 dependences for the purpose of forward list scheduling.
88
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
97
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
102
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
107
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
111
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
116
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
a813c111 119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
8c660648
JL
120
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
b4ead7d4 125 beginning of basic blocks and regions that have been scheduled. */
8c660648 126\f
8c660648 127#include "config.h"
5835e573 128#include "system.h"
4977bab6
ZW
129#include "coretypes.h"
130#include "tm.h"
01198c2f 131#include "toplev.h"
8c660648 132#include "rtl.h"
6baf1cc8 133#include "tm_p.h"
efc9bd41 134#include "hard-reg-set.h"
8c660648 135#include "regs.h"
49ad7cfa 136#include "function.h"
8c660648
JL
137#include "flags.h"
138#include "insn-config.h"
139#include "insn-attr.h"
140#include "except.h"
487a6e06 141#include "toplev.h"
79c9824e 142#include "recog.h"
1708fd40 143#include "sched-int.h"
c237e94a 144#include "target.h"
10d22567 145#include "output.h"
496d7bb0 146#include "params.h"
8c660648 147
8c660648
JL
148#ifdef INSN_SCHEDULING
149
8c660648
JL
150/* issue_rate is the number of insns that can be scheduled in the same
151 machine cycle. It can be defined in the config/mach/mach.h file,
152 otherwise we set it to 1. */
153
154static int issue_rate;
155
cc132865 156/* sched-verbose controls the amount of debugging output the
409f8483 157 scheduler prints. It is controlled by -fsched-verbose=N:
8c660648
JL
158 N>0 and no -DSR : the output is directed to stderr.
159 N>=10 will direct the printouts to stderr (regardless of -dSR).
160 N=1: same as -dSR.
161 N=2: bb's probabilities, detailed ready list info, unit/insn info.
162 N=3: rtl at abort point, control-flow, regions info.
cc132865 163 N=5: dependences info. */
8c660648 164
8c660648 165static int sched_verbose_param = 0;
b4ead7d4 166int sched_verbose = 0;
8c660648 167
63de6c74 168/* Debugging file. All printouts are sent to dump, which is always set,
8c660648 169 either to stderr, or to the dump listing file (-dRS). */
c62c2659 170FILE *sched_dump = 0;
a88f02e7
BS
171
172/* Highest uid before scheduling. */
173static int old_max_uid;
8c660648
JL
174
175/* fix_sched_param() is called from toplev.c upon detection
409f8483 176 of the -fsched-verbose=N option. */
8c660648
JL
177
178void
1d088dee 179fix_sched_param (const char *param, const char *val)
8c660648 180{
cc132865 181 if (!strcmp (param, "verbose"))
8c660648 182 sched_verbose_param = atoi (val);
8c660648 183 else
d4ee4d25 184 warning (0, "fix_sched_param: unknown param: %s", param);
8c660648
JL
185}
186
16f6ece6 187struct haifa_insn_data *h_i_d;
f66d83e1 188
f66d83e1
RH
189#define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note)
190#define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
63f54b1a
MK
191#define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
192
193/* If INSN_TICK of an instruction is equal to INVALID_TICK,
194 then it should be recalculated from scratch. */
195#define INVALID_TICK (-(max_insn_queue_index + 1))
196/* The minimal value of the INSN_TICK of an instruction. */
197#define MIN_TICK (-max_insn_queue_index)
8c660648 198
496d7bb0
MK
199/* Issue points are used to distinguish between instructions in max_issue ().
200 For now, all instructions are equally good. */
201#define ISSUE_POINTS(INSN) 1
202
8c660648
JL
203/* Vector indexed by basic block number giving the starting line-number
204 for each basic block. */
205static rtx *line_note_head;
206
207/* List of important notes we must keep around. This is a pointer to the
208 last element in the list. */
209static rtx note_list;
210
496d7bb0
MK
211static struct spec_info_def spec_info_var;
212/* Description of the speculative part of the scheduling.
213 If NULL - no speculation. */
214static spec_info_t spec_info;
215
216/* True, if recovery block was added during scheduling of current block.
217 Used to determine, if we need to fix INSN_TICKs. */
218static bool added_recovery_block_p;
219
220/* Counters of different types of speculative isntructions. */
221static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
222
223/* Pointers to GLAT data. See init_glat for more information. */
224regset *glat_start, *glat_end;
225
226/* Array used in {unlink, restore}_bb_notes. */
227static rtx *bb_header = 0;
228
229/* Number of basic_blocks. */
230static int old_last_basic_block;
231
232/* Basic block after which recovery blocks will be created. */
233static basic_block before_recovery;
234
8c660648
JL
235/* Queues, etc. */
236
237/* An instruction is ready to be scheduled when all insns preceding it
238 have already been scheduled. It is important to ensure that all
239 insns which use its result will not be executed until its result
240 has been computed. An insn is maintained in one of four structures:
241
242 (P) the "Pending" set of insns which cannot be scheduled until
243 their dependencies have been satisfied.
244 (Q) the "Queued" set of insns that can be scheduled when sufficient
245 time has passed.
246 (R) the "Ready" list of unscheduled, uncommitted insns.
247 (S) the "Scheduled" list of insns.
248
249 Initially, all insns are either "Pending" or "Ready" depending on
250 whether their dependencies are satisfied.
251
252 Insns move from the "Ready" list to the "Scheduled" list as they
253 are committed to the schedule. As this occurs, the insns in the
254 "Pending" list have their dependencies satisfied and move to either
255 the "Ready" list or the "Queued" set depending on whether
256 sufficient time has passed to make them ready. As time passes,
fa0aee89 257 insns move from the "Queued" set to the "Ready" list.
8c660648
JL
258
259 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
260 insns, i.e., those that are ready, queued, and pending.
261 The "Queued" set (Q) is implemented by the variable `insn_queue'.
262 The "Ready" list (R) is implemented by the variables `ready' and
263 `n_ready'.
264 The "Scheduled" list (S) is the new insn chain built by this pass.
265
266 The transition (R->S) is implemented in the scheduling loop in
267 `schedule_block' when the best insn to schedule is chosen.
8c660648
JL
268 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
269 insns move from the ready list to the scheduled list.
270 The transition (Q->R) is implemented in 'queue_to_insn' as time
271 passes or stalls are introduced. */
272
273/* Implement a circular buffer to delay instructions until sufficient
fa0aee89 274 time has passed. For the new pipeline description interface,
63f54b1a 275 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
fa0aee89
PB
276 than maximal time of instruction execution computed by genattr.c on
277 the base maximal time of functional unit reservations and getting a
278 result. This is the longest time an insn may be queued. */
fae15c93
VM
279
280static rtx *insn_queue;
8c660648
JL
281static int q_ptr = 0;
282static int q_size = 0;
fa0aee89
PB
283#define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
284#define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
fae15c93 285
63f54b1a
MK
286#define QUEUE_SCHEDULED (-3)
287#define QUEUE_NOWHERE (-2)
288#define QUEUE_READY (-1)
289/* QUEUE_SCHEDULED - INSN is scheduled.
290 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
291 queue or ready list.
292 QUEUE_READY - INSN is in ready list.
293 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
294
295#define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
296
fae15c93
VM
297/* The following variable value refers for all current and future
298 reservations of the processor units. */
299state_t curr_state;
300
301/* The following variable value is size of memory representing all
fa0aee89 302 current and future reservations of the processor units. */
fae15c93
VM
303static size_t dfa_state_size;
304
305/* The following array is used to find the best insn from ready when
306 the automaton pipeline interface is used. */
307static char *ready_try;
8c660648 308
176f9a7b
BS
309/* Describe the ready list of the scheduler.
310 VEC holds space enough for all insns in the current region. VECLEN
311 says how many exactly.
312 FIRST is the index of the element with the highest priority; i.e. the
313 last one in the ready list, since elements are ordered by ascending
314 priority.
315 N_READY determines how many insns are on the ready list. */
316
317struct ready_list
318{
319 rtx *vec;
320 int veclen;
321 int first;
322 int n_ready;
323};
324
63f54b1a
MK
325/* The pointer to the ready list. */
326static struct ready_list *readyp;
327
328/* Scheduling clock. */
329static int clock_var;
330
496d7bb0
MK
331/* Number of instructions in current scheduling region. */
332static int rgn_n_insns;
333
1d088dee 334static int may_trap_exp (rtx, int);
15aab9c0
VM
335
336/* Nonzero iff the address is comprised from at most 1 register. */
337#define CONST_BASED_ADDRESS_P(x) \
f8cfc6aa 338 (REG_P (x) \
15aab9c0
VM
339 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
340 || (GET_CODE (x) == LO_SUM)) \
341 && (CONSTANT_P (XEXP (x, 0)) \
342 || CONSTANT_P (XEXP (x, 1)))))
343
344/* Returns a class that insn with GET_DEST(insn)=x may belong to,
345 as found by analyzing insn's expression. */
346
347static int
1d088dee 348may_trap_exp (rtx x, int is_store)
15aab9c0
VM
349{
350 enum rtx_code code;
351
352 if (x == 0)
353 return TRAP_FREE;
354 code = GET_CODE (x);
355 if (is_store)
356 {
357 if (code == MEM && may_trap_p (x))
358 return TRAP_RISKY;
359 else
360 return TRAP_FREE;
361 }
362 if (code == MEM)
363 {
364 /* The insn uses memory: a volatile load. */
365 if (MEM_VOLATILE_P (x))
366 return IRISKY;
367 /* An exception-free load. */
368 if (!may_trap_p (x))
369 return IFREE;
370 /* A load with 1 base register, to be further checked. */
371 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
372 return PFREE_CANDIDATE;
373 /* No info on the load, to be further checked. */
374 return PRISKY_CANDIDATE;
375 }
376 else
377 {
378 const char *fmt;
379 int i, insn_class = TRAP_FREE;
380
381 /* Neither store nor load, check if it may cause a trap. */
382 if (may_trap_p (x))
383 return TRAP_RISKY;
384 /* Recursive step: walk the insn... */
385 fmt = GET_RTX_FORMAT (code);
386 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
387 {
388 if (fmt[i] == 'e')
389 {
390 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
391 insn_class = WORST_CLASS (insn_class, tmp_class);
392 }
393 else if (fmt[i] == 'E')
394 {
395 int j;
396 for (j = 0; j < XVECLEN (x, i); j++)
397 {
398 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
399 insn_class = WORST_CLASS (insn_class, tmp_class);
400 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
401 break;
402 }
403 }
404 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
405 break;
406 }
407 return insn_class;
408 }
409}
410
411/* Classifies insn for the purpose of verifying that it can be
412 moved speculatively, by examining it's patterns, returning:
413 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
414 TRAP_FREE: non-load insn.
a98ebe2e 415 IFREE: load from a globally safe location.
15aab9c0
VM
416 IRISKY: volatile load.
417 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
418 being either PFREE or PRISKY. */
419
420int
1d088dee 421haifa_classify_insn (rtx insn)
15aab9c0
VM
422{
423 rtx pat = PATTERN (insn);
424 int tmp_class = TRAP_FREE;
425 int insn_class = TRAP_FREE;
426 enum rtx_code code;
427
428 if (GET_CODE (pat) == PARALLEL)
429 {
430 int i, len = XVECLEN (pat, 0);
431
432 for (i = len - 1; i >= 0; i--)
433 {
434 code = GET_CODE (XVECEXP (pat, 0, i));
435 switch (code)
436 {
437 case CLOBBER:
438 /* Test if it is a 'store'. */
439 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
440 break;
441 case SET:
442 /* Test if it is a store. */
443 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
444 if (tmp_class == TRAP_RISKY)
445 break;
446 /* Test if it is a load. */
447 tmp_class
448 = WORST_CLASS (tmp_class,
449 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
450 0));
451 break;
452 case COND_EXEC:
453 case TRAP_IF:
454 tmp_class = TRAP_RISKY;
455 break;
456 default:
457 ;
458 }
459 insn_class = WORST_CLASS (insn_class, tmp_class);
460 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
461 break;
462 }
463 }
464 else
465 {
466 code = GET_CODE (pat);
467 switch (code)
468 {
469 case CLOBBER:
470 /* Test if it is a 'store'. */
471 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
472 break;
473 case SET:
474 /* Test if it is a store. */
475 tmp_class = may_trap_exp (SET_DEST (pat), 1);
476 if (tmp_class == TRAP_RISKY)
477 break;
478 /* Test if it is a load. */
479 tmp_class =
480 WORST_CLASS (tmp_class,
481 may_trap_exp (SET_SRC (pat), 0));
482 break;
483 case COND_EXEC:
484 case TRAP_IF:
485 tmp_class = TRAP_RISKY;
486 break;
487 default:;
488 }
489 insn_class = tmp_class;
490 }
491
492 return insn_class;
493}
494
8c660648 495/* Forward declarations. */
fae15c93 496
496d7bb0 497HAIFA_INLINE static int insn_cost1 (rtx, enum reg_note, rtx, rtx);
1d088dee
AJ
498static int priority (rtx);
499static int rank_for_schedule (const void *, const void *);
500static void swap_sort (rtx *, int);
501static void queue_insn (rtx, int);
63f54b1a 502static int schedule_insn (rtx);
1d088dee 503static int find_set_reg_weight (rtx);
496d7bb0
MK
504static void find_insn_reg_weight (basic_block);
505static void find_insn_reg_weight1 (rtx);
1d088dee
AJ
506static void adjust_priority (rtx);
507static void advance_one_cycle (void);
8c660648 508
8c660648
JL
509/* Notes handling mechanism:
510 =========================
511 Generally, NOTES are saved before scheduling and restored after scheduling.
512 The scheduler distinguishes between three types of notes:
513
514 (1) LINE_NUMBER notes, generated and used for debugging. Here,
515 before scheduling a region, a pointer to the LINE_NUMBER note is
516 added to the insn following it (in save_line_notes()), and the note
517 is removed (in rm_line_notes() and unlink_line_notes()). After
518 scheduling the region, this pointer is used for regeneration of
519 the LINE_NUMBER note (in restore_line_notes()).
520
521 (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
522 Before scheduling a region, a pointer to the note is added to the insn
523 that follows or precedes it. (This happens as part of the data dependence
524 computation). After scheduling an insn, the pointer contained in it is
525 used for regenerating the corresponding note (in reemit_notes).
526
527 (3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
528 these notes are put in a list (in rm_other_notes() and
529 unlink_other_notes ()). After scheduling the block, these notes are
530 inserted at the beginning of the block (in schedule_block()). */
531
1d088dee
AJ
532static rtx unlink_other_notes (rtx, rtx);
533static rtx unlink_line_notes (rtx, rtx);
496d7bb0 534static void reemit_notes (rtx);
3fe41456 535
1d088dee 536static rtx *ready_lastpos (struct ready_list *);
63f54b1a 537static void ready_add (struct ready_list *, rtx, bool);
1d088dee
AJ
538static void ready_sort (struct ready_list *);
539static rtx ready_remove_first (struct ready_list *);
3fe41456 540
1d088dee 541static void queue_to_ready (struct ready_list *);
569fa502 542static int early_queue_to_ready (state_t, struct ready_list *);
176f9a7b 543
1d088dee 544static void debug_ready_list (struct ready_list *);
3fe41456 545
496d7bb0 546static void move_insn (rtx);
8c660648 547
fae15c93 548/* The following functions are used to implement multi-pass scheduling
fa0aee89 549 on the first cycle. */
1d088dee
AJ
550static rtx ready_element (struct ready_list *, int);
551static rtx ready_remove (struct ready_list *, int);
63f54b1a 552static void ready_remove_insn (rtx);
496d7bb0 553static int max_issue (struct ready_list *, int *, int);
fae15c93 554
1d088dee 555static rtx choose_ready (struct ready_list *);
fae15c93 556
63f54b1a
MK
557static void fix_inter_tick (rtx, rtx);
558static int fix_tick_ready (rtx);
559static void change_queue_index (rtx, int);
560static void resolve_dep (rtx, rtx);
561
496d7bb0
MK
562/* The following functions are used to implement scheduling of data/control
563 speculative instructions. */
564
565static void extend_h_i_d (void);
566static void extend_ready (int);
567static void extend_global (rtx);
568static void extend_all (rtx);
569static void init_h_i_d (rtx);
570static void generate_recovery_code (rtx);
571static void process_insn_depend_be_in_spec (rtx, rtx, ds_t);
572static void begin_speculative_block (rtx);
573static void add_to_speculative_block (rtx);
574static dw_t dep_weak (ds_t);
575static edge find_fallthru_edge (basic_block);
576static void init_before_recovery (void);
577static basic_block create_recovery_block (void);
578static void create_check_block_twin (rtx, bool);
579static void fix_recovery_deps (basic_block);
580static void associate_line_notes_with_blocks (basic_block);
581static void change_pattern (rtx, rtx);
582static int speculate_insn (rtx, ds_t, rtx *);
583static void dump_new_block_header (int, basic_block, rtx, rtx);
584static void restore_bb_notes (basic_block);
585static void extend_bb (basic_block);
586static void fix_jump_move (rtx);
587static void move_block_after_check (rtx);
588static void move_succs (VEC(edge,gc) **, basic_block);
589static void init_glat (void);
590static void init_glat1 (basic_block);
591static void attach_life_info1 (basic_block);
592static void free_glat (void);
593static void sched_remove_insn (rtx);
594static void clear_priorities (rtx);
595static void add_jump_dependencies (rtx, rtx);
596static rtx bb_note (basic_block);
597static void calc_priorities (rtx);
598#ifdef ENABLE_CHECKING
599static int has_edge_p (VEC(edge,gc) *, int);
600static void check_cfg (rtx, rtx);
601static void check_sched_flags (void);
602#endif
603
8c660648
JL
604#endif /* INSN_SCHEDULING */
605\f
1708fd40
BS
606/* Point to state used for the current scheduling pass. */
607struct sched_info *current_sched_info;
8c660648
JL
608\f
609#ifndef INSN_SCHEDULING
610void
10d22567 611schedule_insns (void)
8c660648
JL
612{
613}
614#else
cbb13457 615
496d7bb0
MK
616/* Working copy of frontend's sched_info variable. */
617static struct sched_info current_sched_info_var;
618
8c660648
JL
619/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
620 so that insns independent of the last scheduled insn will be preferred
621 over dependent instructions. */
622
623static rtx last_scheduled_insn;
624
8c660648
JL
625/* Compute cost of executing INSN given the dependence LINK on the insn USED.
626 This is the number of cycles between instruction issue and
627 instruction results. */
628
b4ead7d4 629HAIFA_INLINE int
1d088dee 630insn_cost (rtx insn, rtx link, rtx used)
496d7bb0
MK
631{
632 return insn_cost1 (insn, used ? REG_NOTE_KIND (link) : REG_NOTE_MAX,
633 link, used);
634}
635
636/* Compute cost of executing INSN given the dependence on the insn USED.
637 If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
638 Otherwise, dependence between INSN and USED is assumed to be of type
639 DEP_TYPE. This function was introduced as a workaround for
640 targetm.adjust_cost hook.
641 This is the number of cycles between instruction issue and
642 instruction results. */
643
644HAIFA_INLINE static int
645insn_cost1 (rtx insn, enum reg_note dep_type, rtx link, rtx used)
8c660648 646{
b3694847 647 int cost = INSN_COST (insn);
8c660648 648
fae15c93 649 if (cost < 0)
8c660648 650 {
fae15c93
VM
651 /* A USE insn, or something else we don't need to
652 understand. We can't pass these directly to
653 result_ready_cost or insn_default_latency because it will
654 trigger a fatal error for unrecognizable insns. */
655 if (recog_memoized (insn) < 0)
8c660648 656 {
fae15c93
VM
657 INSN_COST (insn) = 0;
658 return 0;
8c660648
JL
659 }
660 else
661 {
fa0aee89 662 cost = insn_default_latency (insn);
fae15c93
VM
663 if (cost < 0)
664 cost = 0;
1d088dee 665
8c660648
JL
666 INSN_COST (insn) = cost;
667 }
668 }
669
63de6c74 670 /* In this case estimate cost without caring how insn is used. */
496d7bb0 671 if (used == 0)
8c660648
JL
672 return cost;
673
fae15c93
VM
674 /* A USE insn should never require the value used to be computed.
675 This allows the computation of a function's result and parameter
676 values to overlap the return and call. */
677 if (recog_memoized (used) < 0)
197043f5 678 cost = 0;
fae15c93 679 else
8c660648 680 {
496d7bb0
MK
681 gcc_assert (!link || dep_type == REG_NOTE_KIND (link));
682
fa0aee89 683 if (INSN_CODE (insn) >= 0)
197043f5 684 {
496d7bb0 685 if (dep_type == REG_DEP_ANTI)
fa0aee89 686 cost = 0;
496d7bb0 687 else if (dep_type == REG_DEP_OUTPUT)
fae15c93 688 {
fa0aee89
PB
689 cost = (insn_default_latency (insn)
690 - insn_default_latency (used));
691 if (cost <= 0)
692 cost = 1;
fae15c93 693 }
fa0aee89
PB
694 else if (bypass_p (insn))
695 cost = insn_latency (insn, used);
197043f5 696 }
b8ec5764 697
496d7bb0
MK
698 if (targetm.sched.adjust_cost_2)
699 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost);
700 else
701 {
702 gcc_assert (link);
703 if (targetm.sched.adjust_cost)
704 cost = targetm.sched.adjust_cost (used, link, insn, cost);
705 }
fae15c93
VM
706
707 if (cost < 0)
708 cost = 0;
709 }
1d088dee 710
8c660648
JL
711 return cost;
712}
713
714/* Compute the priority number for INSN. */
715
716static int
1d088dee 717priority (rtx insn)
8c660648 718{
8c660648
JL
719 rtx link;
720
2c3c49de 721 if (! INSN_P (insn))
8c660648
JL
722 return 0;
723
21e4c9a8 724 if (! INSN_PRIORITY_KNOWN (insn))
8c660648 725 {
21e4c9a8
BS
726 int this_priority = 0;
727
8c660648
JL
728 if (INSN_DEPEND (insn) == 0)
729 this_priority = insn_cost (insn, 0, 0);
730 else
21e4c9a8 731 {
496d7bb0
MK
732 rtx prev_first, twin;
733 basic_block rec;
8c660648 734
496d7bb0
MK
735 /* For recovery check instructions we calculate priority slightly
736 different than that of normal instructions. Instead of walking
737 through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
738 of each instruction in the corresponding recovery block. */
8c660648 739
496d7bb0
MK
740 rec = RECOVERY_BLOCK (insn);
741 if (!rec || rec == EXIT_BLOCK_PTR)
742 {
743 prev_first = PREV_INSN (insn);
744 twin = insn;
745 }
746 else
747 {
748 prev_first = NEXT_INSN (BB_HEAD (rec));
749 twin = PREV_INSN (BB_END (rec));
750 }
8c660648 751
496d7bb0
MK
752 do
753 {
754 for (link = INSN_DEPEND (twin); link; link = XEXP (link, 1))
755 {
756 rtx next;
757 int next_priority;
758
759 next = XEXP (link, 0);
760
761 if (BLOCK_FOR_INSN (next) != rec)
762 {
763 /* Critical path is meaningful in block boundaries
764 only. */
765 if (! (*current_sched_info->contributes_to_priority)
766 (next, insn)
767 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
768 then speculative instructions will less likely be
769 scheduled. That is because the priority of
770 their producers will increase, and, thus, the
771 producers will more likely be scheduled, thus,
772 resolving the dependence. */
773 || ((current_sched_info->flags & DO_SPECULATION)
774 && (DEP_STATUS (link) & SPECULATIVE)
775 && !(spec_info->flags
776 & COUNT_SPEC_IN_CRITICAL_PATH)))
777 continue;
778
779 next_priority = insn_cost1 (insn,
780 twin == insn ?
781 REG_NOTE_KIND (link) :
782 REG_DEP_ANTI,
783 twin == insn ? link : 0,
784 next) + priority (next);
785
786 if (next_priority > this_priority)
787 this_priority = next_priority;
788 }
789 }
790
791 twin = PREV_INSN (twin);
21e4c9a8 792 }
496d7bb0 793 while (twin != prev_first);
21e4c9a8 794 }
8c660648 795 INSN_PRIORITY (insn) = this_priority;
21e4c9a8 796 INSN_PRIORITY_KNOWN (insn) = 1;
8c660648 797 }
21e4c9a8
BS
798
799 return INSN_PRIORITY (insn);
8c660648
JL
800}
801\f
8c660648 802/* Macros and functions for keeping the priority queue sorted, and
d91edf86 803 dealing with queuing and dequeuing of instructions. */
8c660648
JL
804
805#define SCHED_SORT(READY, N_READY) \
806do { if ((N_READY) == 2) \
807 swap_sort (READY, N_READY); \
808 else if ((N_READY) > 2) \
809 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
810while (0)
811
812/* Returns a positive value if x is preferred; returns a negative value if
813 y is preferred. Should never return 0, since that will make the sort
814 unstable. */
815
816static int
1d088dee 817rank_for_schedule (const void *x, const void *y)
8c660648 818{
7a403706
KH
819 rtx tmp = *(const rtx *) y;
820 rtx tmp2 = *(const rtx *) x;
8f39865a 821 rtx link;
2db45993 822 int tmp_class, tmp2_class, depend_count1, depend_count2;
1708fd40 823 int val, priority_val, weight_val, info_val;
8c660648 824
58fb7809
VM
825 /* The insn in a schedule group should be issued the first. */
826 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
827 return SCHED_GROUP_P (tmp2) ? 1 : -1;
828
63de6c74 829 /* Prefer insn with higher priority. */
8c660648 830 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
30028c85 831
8c660648
JL
832 if (priority_val)
833 return priority_val;
834
496d7bb0
MK
835 /* Prefer speculative insn with greater dependencies weakness. */
836 if (spec_info)
837 {
838 ds_t ds1, ds2;
839 dw_t dw1, dw2;
840 int dw;
841
842 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
843 if (ds1)
844 dw1 = dep_weak (ds1);
845 else
846 dw1 = NO_DEP_WEAK;
847
848 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
849 if (ds2)
850 dw2 = dep_weak (ds2);
851 else
852 dw2 = NO_DEP_WEAK;
853
854 dw = dw2 - dw1;
855 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
856 return dw;
857 }
858
63de6c74 859 /* Prefer an insn with smaller contribution to registers-pressure. */
8c660648
JL
860 if (!reload_completed &&
861 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
4977bab6 862 return weight_val;
8c660648 863
1708fd40
BS
864 info_val = (*current_sched_info->rank) (tmp, tmp2);
865 if (info_val)
866 return info_val;
8c660648 867
63de6c74 868 /* Compare insns based on their relation to the last-scheduled-insn. */
63f54b1a 869 if (INSN_P (last_scheduled_insn))
8c660648
JL
870 {
871 /* Classify the instructions into three classes:
872 1) Data dependent on last schedule insn.
873 2) Anti/Output dependent on last scheduled insn.
874 3) Independent of last scheduled insn, or has latency of one.
875 Choose the insn from the highest numbered class if different. */
8f39865a
DM
876 link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
877 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
8c660648
JL
878 tmp_class = 3;
879 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
880 tmp_class = 1;
881 else
882 tmp_class = 2;
883
8f39865a
DM
884 link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
885 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
8c660648
JL
886 tmp2_class = 3;
887 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
888 tmp2_class = 1;
889 else
890 tmp2_class = 2;
891
892 if ((val = tmp2_class - tmp_class))
893 return val;
894 }
895
7a403706 896 /* Prefer the insn which has more later insns that depend on it.
2db45993
JL
897 This gives the scheduler more freedom when scheduling later
898 instructions at the expense of added register pressure. */
899 depend_count1 = 0;
900 for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
901 depend_count1++;
902
903 depend_count2 = 0;
904 for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
905 depend_count2++;
906
907 val = depend_count2 - depend_count1;
908 if (val)
909 return val;
7a403706 910
8c660648
JL
911 /* If insns are equally good, sort by INSN_LUID (original insn order),
912 so that we make the sort stable. This minimizes instruction movement,
913 thus minimizing sched's effect on debugging and cross-jumping. */
914 return INSN_LUID (tmp) - INSN_LUID (tmp2);
915}
916
917/* Resort the array A in which only element at index N may be out of order. */
918
cbb13457 919HAIFA_INLINE static void
1d088dee 920swap_sort (rtx *a, int n)
8c660648
JL
921{
922 rtx insn = a[n - 1];
923 int i = n - 2;
924
925 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
926 {
927 a[i + 1] = a[i];
928 i -= 1;
929 }
930 a[i + 1] = insn;
931}
932
8c660648
JL
933/* Add INSN to the insn queue so that it can be executed at least
934 N_CYCLES after the currently executing insn. Preserve insns
935 chain for debugging purposes. */
936
cbb13457 937HAIFA_INLINE static void
1d088dee 938queue_insn (rtx insn, int n_cycles)
8c660648
JL
939{
940 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
ebb7b10b 941 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
63f54b1a
MK
942
943 gcc_assert (n_cycles <= max_insn_queue_index);
944
8c660648
JL
945 insn_queue[next_q] = link;
946 q_size += 1;
947
948 if (sched_verbose >= 2)
949 {
1708fd40
BS
950 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
951 (*current_sched_info->print_insn) (insn, 0));
8c660648 952
a88f02e7 953 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
8c660648 954 }
63f54b1a
MK
955
956 QUEUE_INDEX (insn) = next_q;
957}
958
959/* Remove INSN from queue. */
960static void
961queue_remove (rtx insn)
962{
963 gcc_assert (QUEUE_INDEX (insn) >= 0);
964 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
965 q_size--;
966 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
176f9a7b
BS
967}
968
969/* Return a pointer to the bottom of the ready list, i.e. the insn
970 with the lowest priority. */
971
972HAIFA_INLINE static rtx *
1d088dee 973ready_lastpos (struct ready_list *ready)
176f9a7b 974{
63f54b1a 975 gcc_assert (ready->n_ready >= 1);
176f9a7b
BS
976 return ready->vec + ready->first - ready->n_ready + 1;
977}
978
63f54b1a
MK
979/* Add an element INSN to the ready list so that it ends up with the
980 lowest/highest priority dependending on FIRST_P. */
176f9a7b 981
63f54b1a
MK
982HAIFA_INLINE static void
983ready_add (struct ready_list *ready, rtx insn, bool first_p)
176f9a7b 984{
63f54b1a 985 if (!first_p)
176f9a7b 986 {
63f54b1a
MK
987 if (ready->first == ready->n_ready)
988 {
989 memmove (ready->vec + ready->veclen - ready->n_ready,
990 ready_lastpos (ready),
991 ready->n_ready * sizeof (rtx));
992 ready->first = ready->veclen - 1;
993 }
994 ready->vec[ready->first - ready->n_ready] = insn;
176f9a7b 995 }
63f54b1a
MK
996 else
997 {
998 if (ready->first == ready->veclen - 1)
999 {
1000 if (ready->n_ready)
1001 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1002 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1003 ready_lastpos (ready),
1004 ready->n_ready * sizeof (rtx));
1005 ready->first = ready->veclen - 2;
1006 }
1007 ready->vec[++(ready->first)] = insn;
1008 }
1009
176f9a7b 1010 ready->n_ready++;
63f54b1a
MK
1011
1012 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1013 QUEUE_INDEX (insn) = QUEUE_READY;
176f9a7b 1014}
8c660648 1015
176f9a7b
BS
1016/* Remove the element with the highest priority from the ready list and
1017 return it. */
1018
1019HAIFA_INLINE static rtx
1d088dee 1020ready_remove_first (struct ready_list *ready)
176f9a7b
BS
1021{
1022 rtx t;
535a42b1
NS
1023
1024 gcc_assert (ready->n_ready);
176f9a7b
BS
1025 t = ready->vec[ready->first--];
1026 ready->n_ready--;
1027 /* If the queue becomes empty, reset it. */
1028 if (ready->n_ready == 0)
1029 ready->first = ready->veclen - 1;
63f54b1a
MK
1030
1031 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1032 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1033
176f9a7b
BS
1034 return t;
1035}
1036
fae15c93
VM
1037/* The following code implements multi-pass scheduling for the first
1038 cycle. In other words, we will try to choose ready insn which
1039 permits to start maximum number of insns on the same cycle. */
1040
1041/* Return a pointer to the element INDEX from the ready. INDEX for
1042 insn with the highest priority is 0, and the lowest priority has
1043 N_READY - 1. */
1044
1045HAIFA_INLINE static rtx
1d088dee 1046ready_element (struct ready_list *ready, int index)
fae15c93 1047{
535a42b1
NS
1048 gcc_assert (ready->n_ready && index < ready->n_ready);
1049
fae15c93
VM
1050 return ready->vec[ready->first - index];
1051}
1052
1053/* Remove the element INDEX from the ready list and return it. INDEX
1054 for insn with the highest priority is 0, and the lowest priority
1055 has N_READY - 1. */
1056
1057HAIFA_INLINE static rtx
1d088dee 1058ready_remove (struct ready_list *ready, int index)
fae15c93
VM
1059{
1060 rtx t;
1061 int i;
1062
1063 if (index == 0)
1064 return ready_remove_first (ready);
535a42b1 1065 gcc_assert (ready->n_ready && index < ready->n_ready);
fae15c93
VM
1066 t = ready->vec[ready->first - index];
1067 ready->n_ready--;
1068 for (i = index; i < ready->n_ready; i++)
1069 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
63f54b1a 1070 QUEUE_INDEX (t) = QUEUE_NOWHERE;
fae15c93
VM
1071 return t;
1072}
1073
63f54b1a
MK
1074/* Remove INSN from the ready list. */
1075static void
1076ready_remove_insn (rtx insn)
1077{
1078 int i;
1079
1080 for (i = 0; i < readyp->n_ready; i++)
1081 if (ready_element (readyp, i) == insn)
1082 {
1083 ready_remove (readyp, i);
1084 return;
1085 }
1086 gcc_unreachable ();
1087}
fae15c93 1088
176f9a7b
BS
1089/* Sort the ready list READY by ascending priority, using the SCHED_SORT
1090 macro. */
1091
1092HAIFA_INLINE static void
1d088dee 1093ready_sort (struct ready_list *ready)
176f9a7b
BS
1094{
1095 rtx *first = ready_lastpos (ready);
1096 SCHED_SORT (first, ready->n_ready);
8c660648
JL
1097}
1098
8c660648 1099/* PREV is an insn that is ready to execute. Adjust its priority if that
c46a37c4
RH
1100 will help shorten or lengthen register lifetimes as appropriate. Also
1101 provide a hook for the target to tweek itself. */
8c660648 1102
cbb13457 1103HAIFA_INLINE static void
1d088dee 1104adjust_priority (rtx prev)
8c660648 1105{
c46a37c4
RH
1106 /* ??? There used to be code here to try and estimate how an insn
1107 affected register lifetimes, but it did it by looking at REG_DEAD
7a403706 1108 notes, which we removed in schedule_region. Nor did it try to
c46a37c4 1109 take into account register pressure or anything useful like that.
8c660648 1110
c46a37c4 1111 Revisit when we have a machine model to work with and not before. */
197043f5 1112
c237e94a
ZW
1113 if (targetm.sched.adjust_priority)
1114 INSN_PRIORITY (prev) =
5fd9b178 1115 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
8c660648
JL
1116}
1117
fae15c93
VM
1118/* Advance time on one cycle. */
1119HAIFA_INLINE static void
1d088dee 1120advance_one_cycle (void)
fae15c93 1121{
fa0aee89
PB
1122 if (targetm.sched.dfa_pre_cycle_insn)
1123 state_transition (curr_state,
1124 targetm.sched.dfa_pre_cycle_insn ());
1125
1126 state_transition (curr_state, NULL);
1127
1128 if (targetm.sched.dfa_post_cycle_insn)
1129 state_transition (curr_state,
1130 targetm.sched.dfa_post_cycle_insn ());
fae15c93
VM
1131}
1132
4bdc8810
RH
1133/* Clock at which the previous instruction was issued. */
1134static int last_clock_var;
1135
8c660648 1136/* INSN is the "currently executing insn". Launch each insn which was
176f9a7b 1137 waiting on INSN. READY is the ready list which contains the insns
58fb7809
VM
1138 that are ready to fire. CLOCK is the current cycle. The function
1139 returns necessary cycle advance after issuing the insn (it is not
1140 zero for insns in a schedule group). */
8c660648 1141
58fb7809 1142static int
63f54b1a 1143schedule_insn (rtx insn)
8c660648
JL
1144{
1145 rtx link;
58fb7809 1146 int advance = 0;
8c660648 1147
fa0aee89 1148 if (sched_verbose >= 1)
8c660648 1149 {
6d0de005 1150 char buf[2048];
fae15c93 1151
6d0de005 1152 print_insn (buf, insn, 0);
6a87d634 1153 buf[40] = 0;
63f54b1a 1154 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
fae15c93 1155
6d0de005
JH
1156 if (recog_memoized (insn) < 0)
1157 fprintf (sched_dump, "nothing");
1158 else
1159 print_reservation (sched_dump, insn);
1160 fputc ('\n', sched_dump);
1161 }
8c660648 1162
63f54b1a
MK
1163 /* Scheduling instruction should have all its dependencies resolved and
1164 should have been removed from the ready list. */
1165 gcc_assert (INSN_DEP_COUNT (insn) == 0);
1166 gcc_assert (!LOG_LINKS (insn));
1167 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
569fa502 1168
63f54b1a
MK
1169 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1170
1171 /* Now we can free RESOLVED_DEPS list. */
1172 if (current_sched_info->flags & USE_DEPS_LIST)
1173 free_DEPS_LIST_list (&RESOLVED_DEPS (insn));
1174 else
1175 free_INSN_LIST_list (&RESOLVED_DEPS (insn));
1176
1177 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1178 if (INSN_TICK (insn) > clock_var)
1179 /* INSN has been prematurely moved from the queue to the ready list.
1180 This is possible only if following flag is set. */
1181 gcc_assert (flag_sched_stalled_insns);
1182
1183 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1184 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1185 INSN_TICK (insn) = clock_var;
1186
1187 /* Update dependent instructions. */
1188 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
8c660648
JL
1189 {
1190 rtx next = XEXP (link, 0);
8c660648 1191
63f54b1a 1192 resolve_dep (next, insn);
8c660648 1193
496d7bb0
MK
1194 if (!RECOVERY_BLOCK (insn)
1195 || RECOVERY_BLOCK (insn) == EXIT_BLOCK_PTR)
1196 {
1197 int effective_cost;
1198
1199 effective_cost = try_ready (next);
1200
1201 if (effective_cost >= 0
1202 && SCHED_GROUP_P (next)
1203 && advance < effective_cost)
1204 advance = effective_cost;
1205 }
1206 else
1207 /* Check always has only one forward dependence (to the first insn in
1208 the recovery block), therefore, this will be executed only once. */
1209 {
1210 gcc_assert (XEXP (link, 1) == 0);
1211 fix_recovery_deps (RECOVERY_BLOCK (insn));
1212 }
8c660648
JL
1213 }
1214
7a403706 1215 /* Annotate the instruction with issue information -- TImode
4bdc8810
RH
1216 indicates that the instruction is expected not to be able
1217 to issue on the same cycle as the previous insn. A machine
1218 may use this information to decide how the instruction should
1219 be aligned. */
30028c85 1220 if (issue_rate > 1
fae15c93
VM
1221 && GET_CODE (PATTERN (insn)) != USE
1222 && GET_CODE (PATTERN (insn)) != CLOBBER)
4bdc8810 1223 {
30028c85 1224 if (reload_completed)
63f54b1a
MK
1225 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1226 last_clock_var = clock_var;
4bdc8810 1227 }
63f54b1a 1228
58fb7809 1229 return advance;
8c660648
JL
1230}
1231
63de6c74 1232/* Functions for handling of notes. */
8c660648
JL
1233
1234/* Delete notes beginning with INSN and put them in the chain
1235 of notes ended by NOTE_LIST.
1236 Returns the insn following the notes. */
1237
1238static rtx
1d088dee 1239unlink_other_notes (rtx insn, rtx tail)
8c660648
JL
1240{
1241 rtx prev = PREV_INSN (insn);
1242
496d7bb0 1243 while (insn != tail && NOTE_NOT_BB_P (insn))
8c660648
JL
1244 {
1245 rtx next = NEXT_INSN (insn);
1246 /* Delete the note from its current position. */
1247 if (prev)
1248 NEXT_INSN (prev) = next;
1249 if (next)
1250 PREV_INSN (next) = prev;
1251
c46a37c4 1252 /* See sched_analyze to see how these are handled. */
496d7bb0 1253 if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
8c660648
JL
1254 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
1255 {
1256 /* Insert the note at the end of the notes list. */
1257 PREV_INSN (insn) = note_list;
1258 if (note_list)
1259 NEXT_INSN (note_list) = insn;
1260 note_list = insn;
1261 }
1262
1263 insn = next;
1264 }
1265 return insn;
1266}
1267
1268/* Delete line notes beginning with INSN. Record line-number notes so
1269 they can be reused. Returns the insn following the notes. */
1270
1271static rtx
1d088dee 1272unlink_line_notes (rtx insn, rtx tail)
8c660648
JL
1273{
1274 rtx prev = PREV_INSN (insn);
1275
4b4bf941 1276 while (insn != tail && NOTE_P (insn))
8c660648
JL
1277 {
1278 rtx next = NEXT_INSN (insn);
1279
1280 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
1281 {
1282 /* Delete the note from its current position. */
1283 if (prev)
1284 NEXT_INSN (prev) = next;
1285 if (next)
1286 PREV_INSN (next) = prev;
1287
1288 /* Record line-number notes so they can be reused. */
1289 LINE_NOTE (insn) = insn;
1290 }
1291 else
1292 prev = insn;
1293
1294 insn = next;
1295 }
1296 return insn;
1297}
1298
496d7bb0
MK
1299/* Return the head and tail pointers of ebb starting at BEG and ending
1300 at END. */
8c660648 1301
b4ead7d4 1302void
496d7bb0
MK
1303get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1304{
1305 rtx beg_head = BB_HEAD (beg);
1306 rtx beg_tail = BB_END (beg);
1307 rtx end_head = BB_HEAD (end);
1308 rtx end_tail = BB_END (end);
1309
1310 /* Don't include any notes or labels at the beginning of the BEG
1311 basic block, or notes at the end of the END basic blocks. */
1312
1313 if (LABEL_P (beg_head))
1314 beg_head = NEXT_INSN (beg_head);
1315
1316 while (beg_head != beg_tail)
1317 if (NOTE_P (beg_head))
1318 beg_head = NEXT_INSN (beg_head);
1319 else
1320 break;
1321
1322 *headp = beg_head;
1323
1324 if (beg == end)
1325 end_head = beg_head;
1326 else if (LABEL_P (end_head))
1327 end_head = NEXT_INSN (end_head);
1328
1329 while (end_head != end_tail)
1330 if (NOTE_P (end_tail))
1331 end_tail = PREV_INSN (end_tail);
1332 else
1333 break;
8c660648 1334
496d7bb0 1335 *tailp = end_tail;
8c660648
JL
1336}
1337
1708fd40
BS
1338/* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1339
b4ead7d4 1340int
1d088dee 1341no_real_insns_p (rtx head, rtx tail)
1708fd40
BS
1342{
1343 while (head != NEXT_INSN (tail))
1344 {
4b4bf941 1345 if (!NOTE_P (head) && !LABEL_P (head))
1708fd40
BS
1346 return 0;
1347 head = NEXT_INSN (head);
1348 }
1349 return 1;
1350}
1351
79c2ffde
BS
1352/* Delete line notes from one block. Save them so they can be later restored
1353 (in restore_line_notes). HEAD and TAIL are the boundaries of the
1354 block in which notes should be processed. */
8c660648 1355
b4ead7d4 1356void
1d088dee 1357rm_line_notes (rtx head, rtx tail)
8c660648
JL
1358{
1359 rtx next_tail;
8c660648
JL
1360 rtx insn;
1361
8c660648
JL
1362 next_tail = NEXT_INSN (tail);
1363 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1364 {
1365 rtx prev;
1366
1367 /* Farm out notes, and maybe save them in NOTE_LIST.
1368 This is needed to keep the debugger from
1369 getting completely deranged. */
496d7bb0 1370 if (NOTE_NOT_BB_P (insn))
8c660648
JL
1371 {
1372 prev = insn;
1373 insn = unlink_line_notes (insn, next_tail);
1374
535a42b1 1375 gcc_assert (prev != tail && prev != head && insn != next_tail);
8c660648
JL
1376 }
1377 }
1378}
1379
79c2ffde 1380/* Save line number notes for each insn in block B. HEAD and TAIL are
3ef42a0c 1381 the boundaries of the block in which notes should be processed. */
8c660648 1382
b4ead7d4 1383void
1d088dee 1384save_line_notes (int b, rtx head, rtx tail)
8c660648 1385{
8c660648
JL
1386 rtx next_tail;
1387
1388 /* We must use the true line number for the first insn in the block
1389 that was computed and saved at the start of this pass. We can't
1390 use the current line number, because scheduling of the previous
1391 block may have changed the current line number. */
1392
b4ead7d4 1393 rtx line = line_note_head[b];
8c660648
JL
1394 rtx insn;
1395
8c660648
JL
1396 next_tail = NEXT_INSN (tail);
1397
79c2ffde 1398 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4b4bf941 1399 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
8c660648
JL
1400 line = insn;
1401 else
1402 LINE_NOTE (insn) = line;
1403}
1404
14052b68 1405/* After a block was scheduled, insert line notes into the insns list.
79c2ffde 1406 HEAD and TAIL are the boundaries of the block in which notes should
3ef42a0c 1407 be processed. */
8c660648 1408
b4ead7d4 1409void
1d088dee 1410restore_line_notes (rtx head, rtx tail)
8c660648
JL
1411{
1412 rtx line, note, prev, new;
1413 int added_notes = 0;
79c2ffde 1414 rtx next_tail, insn;
8c660648 1415
79c2ffde
BS
1416 head = head;
1417 next_tail = NEXT_INSN (tail);
8c660648
JL
1418
1419 /* Determine the current line-number. We want to know the current
1420 line number of the first insn of the block here, in case it is
1421 different from the true line number that was saved earlier. If
1422 different, then we need a line number note before the first insn
1423 of this block. If it happens to be the same, then we don't want to
1424 emit another line number note here. */
1425 for (line = head; line; line = PREV_INSN (line))
4b4bf941 1426 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
8c660648
JL
1427 break;
1428
1429 /* Walk the insns keeping track of the current line-number and inserting
1430 the line-number notes as needed. */
1431 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4b4bf941 1432 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
8c660648
JL
1433 line = insn;
1434 /* This used to emit line number notes before every non-deleted note.
1435 However, this confuses a debugger, because line notes not separated
1436 by real instructions all end up at the same address. I can find no
1437 use for line number notes before other notes, so none are emitted. */
4b4bf941 1438 else if (!NOTE_P (insn)
79c2ffde 1439 && INSN_UID (insn) < old_max_uid
8c660648
JL
1440 && (note = LINE_NOTE (insn)) != 0
1441 && note != line
1442 && (line == 0
6773e15f
PB
1443#ifdef USE_MAPPED_LOCATION
1444 || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
1445#else
8c660648 1446 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
6773e15f
PB
1447 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
1448#endif
1449 ))
8c660648
JL
1450 {
1451 line = note;
1452 prev = PREV_INSN (insn);
1453 if (LINE_NOTE (note))
1454 {
1455 /* Re-use the original line-number note. */
1456 LINE_NOTE (note) = 0;
1457 PREV_INSN (note) = prev;
1458 NEXT_INSN (prev) = note;
1459 PREV_INSN (insn) = note;
1460 NEXT_INSN (note) = insn;
496d7bb0 1461 set_block_for_insn (note, BLOCK_FOR_INSN (insn));
8c660648
JL
1462 }
1463 else
1464 {
1465 added_notes++;
1466 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
6773e15f 1467#ifndef USE_MAPPED_LOCATION
8c660648 1468 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
6773e15f 1469#endif
8c660648
JL
1470 }
1471 }
1472 if (sched_verbose && added_notes)
a88f02e7 1473 fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
8c660648
JL
1474}
1475
1476/* After scheduling the function, delete redundant line notes from the
1477 insns list. */
1478
b4ead7d4 1479void
1d088dee 1480rm_redundant_line_notes (void)
8c660648
JL
1481{
1482 rtx line = 0;
1483 rtx insn = get_insns ();
1484 int active_insn = 0;
1485 int notes = 0;
1486
1487 /* Walk the insns deleting redundant line-number notes. Many of these
1488 are already present. The remainder tend to occur at basic
1489 block boundaries. */
1490 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
4b4bf941 1491 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
8c660648
JL
1492 {
1493 /* If there are no active insns following, INSN is redundant. */
1494 if (active_insn == 0)
1495 {
1496 notes++;
6773e15f 1497 SET_INSN_DELETED (insn);
8c660648
JL
1498 }
1499 /* If the line number is unchanged, LINE is redundant. */
1500 else if (line
6773e15f
PB
1501#ifdef USE_MAPPED_LOCATION
1502 && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
1503#else
8c660648 1504 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
6773e15f
PB
1505 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
1506#endif
1507)
8c660648
JL
1508 {
1509 notes++;
6773e15f 1510 SET_INSN_DELETED (line);
8c660648
JL
1511 line = insn;
1512 }
1513 else
1514 line = insn;
1515 active_insn = 0;
1516 }
4b4bf941 1517 else if (!((NOTE_P (insn)
8c660648 1518 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
4b4bf941 1519 || (NONJUMP_INSN_P (insn)
8c660648
JL
1520 && (GET_CODE (PATTERN (insn)) == USE
1521 || GET_CODE (PATTERN (insn)) == CLOBBER))))
1522 active_insn++;
1523
1524 if (sched_verbose && notes)
a88f02e7 1525 fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
8c660648
JL
1526}
1527
79c2ffde 1528/* Delete notes between HEAD and TAIL and put them in the chain
8c660648
JL
1529 of notes ended by NOTE_LIST. */
1530
b4ead7d4 1531void
1d088dee 1532rm_other_notes (rtx head, rtx tail)
8c660648
JL
1533{
1534 rtx next_tail;
1535 rtx insn;
1536
b4ead7d4 1537 note_list = 0;
2c3c49de 1538 if (head == tail && (! INSN_P (head)))
8c660648
JL
1539 return;
1540
1541 next_tail = NEXT_INSN (tail);
1542 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1543 {
1544 rtx prev;
1545
1546 /* Farm out notes, and maybe save them in NOTE_LIST.
1547 This is needed to keep the debugger from
1548 getting completely deranged. */
496d7bb0 1549 if (NOTE_NOT_BB_P (insn))
8c660648
JL
1550 {
1551 prev = insn;
1552
1553 insn = unlink_other_notes (insn, next_tail);
1554
535a42b1 1555 gcc_assert (prev != tail && prev != head && insn != next_tail);
8c660648
JL
1556 }
1557 }
1558}
1559
63de6c74 1560/* Functions for computation of registers live/usage info. */
8c660648 1561
30028c85
VM
1562/* This function looks for a new register being defined.
1563 If the destination register is already used by the source,
32dd366d 1564 a new register is not needed. */
30028c85
VM
1565
1566static int
1d088dee 1567find_set_reg_weight (rtx x)
30028c85
VM
1568{
1569 if (GET_CODE (x) == CLOBBER
1570 && register_operand (SET_DEST (x), VOIDmode))
1571 return 1;
1572 if (GET_CODE (x) == SET
1573 && register_operand (SET_DEST (x), VOIDmode))
1574 {
f8cfc6aa 1575 if (REG_P (SET_DEST (x)))
30028c85
VM
1576 {
1577 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1578 return 1;
1579 else
1580 return 0;
1581 }
1582 return 1;
1583 }
1584 return 0;
1585}
1586
c46a37c4 1587/* Calculate INSN_REG_WEIGHT for all insns of a block. */
8c660648
JL
1588
1589static void
496d7bb0 1590find_insn_reg_weight (basic_block bb)
8c660648
JL
1591{
1592 rtx insn, next_tail, head, tail;
8c660648 1593
496d7bb0 1594 get_ebb_head_tail (bb, bb, &head, &tail);
8c660648
JL
1595 next_tail = NEXT_INSN (tail);
1596
1597 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
496d7bb0
MK
1598 find_insn_reg_weight1 (insn);
1599}
8c660648 1600
496d7bb0
MK
1601/* Calculate INSN_REG_WEIGHT for single insntruction.
1602 Separated from find_insn_reg_weight because of need
1603 to initialize new instruction in generate_recovery_code. */
1604static void
1605find_insn_reg_weight1 (rtx insn)
1606{
1607 int reg_weight = 0;
1608 rtx x;
1609
1610 /* Handle register life information. */
1611 if (! INSN_P (insn))
1612 return;
1613
1614 /* Increment weight for each register born here. */
1615 x = PATTERN (insn);
1616 reg_weight += find_set_reg_weight (x);
1617 if (GET_CODE (x) == PARALLEL)
1618 {
1619 int j;
1620 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
8c660648 1621 {
496d7bb0
MK
1622 x = XVECEXP (PATTERN (insn), 0, j);
1623 reg_weight += find_set_reg_weight (x);
8c660648 1624 }
8c660648 1625 }
496d7bb0
MK
1626 /* Decrement weight for each register that dies here. */
1627 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1628 {
1629 if (REG_NOTE_KIND (x) == REG_DEAD
1630 || REG_NOTE_KIND (x) == REG_UNUSED)
1631 reg_weight--;
1632 }
1633
1634 INSN_REG_WEIGHT (insn) = reg_weight;
8c660648
JL
1635}
1636
8c660648
JL
1637/* Move insns that became ready to fire from queue to ready list. */
1638
176f9a7b 1639static void
1d088dee 1640queue_to_ready (struct ready_list *ready)
8c660648
JL
1641{
1642 rtx insn;
1643 rtx link;
1644
1645 q_ptr = NEXT_Q (q_ptr);
1646
1647 /* Add all pending insns that can be scheduled without stalls to the
b4ead7d4
BS
1648 ready list. */
1649 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1650 {
1651 insn = XEXP (link, 0);
1652 q_size -= 1;
1708fd40 1653
b4ead7d4
BS
1654 if (sched_verbose >= 2)
1655 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1656 (*current_sched_info->print_insn) (insn, 0));
1708fd40 1657
63f54b1a 1658 ready_add (ready, insn, false);
b4ead7d4
BS
1659 if (sched_verbose >= 2)
1660 fprintf (sched_dump, "moving to ready without stalls\n");
1708fd40 1661 }
63f54b1a 1662 free_INSN_LIST_list (&insn_queue[q_ptr]);
b4ead7d4
BS
1663
1664 /* If there are no ready insns, stall until one is ready and add all
1665 of the pending insns at that point to the ready list. */
1666 if (ready->n_ready == 0)
1708fd40 1667 {
b3694847 1668 int stalls;
1708fd40 1669
fa0aee89 1670 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
b4ead7d4
BS
1671 {
1672 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1673 {
1674 for (; link; link = XEXP (link, 1))
1675 {
1676 insn = XEXP (link, 0);
1677 q_size -= 1;
1708fd40 1678
b4ead7d4
BS
1679 if (sched_verbose >= 2)
1680 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1681 (*current_sched_info->print_insn) (insn, 0));
1708fd40 1682
63f54b1a 1683 ready_add (ready, insn, false);
b4ead7d4
BS
1684 if (sched_verbose >= 2)
1685 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1686 }
63f54b1a 1687 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1708fd40 1688
fae15c93
VM
1689 advance_one_cycle ();
1690
1691 break;
b4ead7d4 1692 }
fae15c93
VM
1693
1694 advance_one_cycle ();
b4ead7d4 1695 }
1708fd40 1696
b4ead7d4
BS
1697 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1698 clock_var += stalls;
1708fd40 1699 }
1708fd40
BS
1700}
1701
569fa502
DN
1702/* Used by early_queue_to_ready. Determines whether it is "ok" to
1703 prematurely move INSN from the queue to the ready list. Currently,
1704 if a target defines the hook 'is_costly_dependence', this function
1705 uses the hook to check whether there exist any dependences which are
1706 considered costly by the target, between INSN and other insns that
1707 have already been scheduled. Dependences are checked up to Y cycles
1708 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1709 controlling this value.
1710 (Other considerations could be taken into account instead (or in
1711 addition) depending on user flags and target hooks. */
1712
1713static bool
1714ok_for_early_queue_removal (rtx insn)
1715{
1716 int n_cycles;
1717 rtx prev_insn = last_scheduled_insn;
1718
1719 if (targetm.sched.is_costly_dependence)
1720 {
1721 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1722 {
1723 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1724 {
1725 rtx dep_link = 0;
1726 int dep_cost;
1727
4b4bf941 1728 if (!NOTE_P (prev_insn))
569fa502
DN
1729 {
1730 dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
1731 if (dep_link)
1732 {
1733 dep_cost = insn_cost (prev_insn, dep_link, insn) ;
1734 if (targetm.sched.is_costly_dependence (prev_insn, insn,
1735 dep_link, dep_cost,
1736 flag_sched_stalled_insns_dep - n_cycles))
1737 return false;
1738 }
1739 }
1740
1741 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1742 break;
1743 }
1744
1745 if (!prev_insn)
1746 break;
1747 prev_insn = PREV_INSN (prev_insn);
1748 }
1749 }
1750
1751 return true;
1752}
1753
1754
1755/* Remove insns from the queue, before they become "ready" with respect
6614fd40 1756 to FU latency considerations. */
569fa502
DN
1757
1758static int
1759early_queue_to_ready (state_t state, struct ready_list *ready)
1760{
1761 rtx insn;
1762 rtx link;
1763 rtx next_link;
1764 rtx prev_link;
1765 bool move_to_ready;
1766 int cost;
1767 state_t temp_state = alloca (dfa_state_size);
1768 int stalls;
1769 int insns_removed = 0;
1770
1771 /*
1772 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1773 function:
1774
1775 X == 0: There is no limit on how many queued insns can be removed
1776 prematurely. (flag_sched_stalled_insns = -1).
1777
1778 X >= 1: Only X queued insns can be removed prematurely in each
1779 invocation. (flag_sched_stalled_insns = X).
1780
1781 Otherwise: Early queue removal is disabled.
1782 (flag_sched_stalled_insns = 0)
1783 */
1784
1785 if (! flag_sched_stalled_insns)
1786 return 0;
1787
fa0aee89 1788 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
569fa502
DN
1789 {
1790 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1791 {
1792 if (sched_verbose > 6)
1793 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1794
1795 prev_link = 0;
1796 while (link)
1797 {
1798 next_link = XEXP (link, 1);
1799 insn = XEXP (link, 0);
1800 if (insn && sched_verbose > 6)
1801 print_rtl_single (sched_dump, insn);
1802
1803 memcpy (temp_state, state, dfa_state_size);
1804 if (recog_memoized (insn) < 0)
1805 /* non-negative to indicate that it's not ready
1806 to avoid infinite Q->R->Q->R... */
1807 cost = 0;
1808 else
1809 cost = state_transition (temp_state, insn);
1810
1811 if (sched_verbose >= 6)
1812 fprintf (sched_dump, "transition cost = %d\n", cost);
1813
1814 move_to_ready = false;
1815 if (cost < 0)
1816 {
1817 move_to_ready = ok_for_early_queue_removal (insn);
1818 if (move_to_ready == true)
1819 {
1820 /* move from Q to R */
1821 q_size -= 1;
63f54b1a 1822 ready_add (ready, insn, false);
569fa502
DN
1823
1824 if (prev_link)
1825 XEXP (prev_link, 1) = next_link;
1826 else
1827 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1828
1829 free_INSN_LIST_node (link);
1830
1831 if (sched_verbose >= 2)
1832 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1833 (*current_sched_info->print_insn) (insn, 0));
1834
1835 insns_removed++;
1836 if (insns_removed == flag_sched_stalled_insns)
63f54b1a
MK
1837 /* Remove no more than flag_sched_stalled_insns insns
1838 from Q at a time. */
569fa502
DN
1839 return insns_removed;
1840 }
1841 }
1842
1843 if (move_to_ready == false)
1844 prev_link = link;
1845
1846 link = next_link;
1847 } /* while link */
1848 } /* if link */
1849
1850 } /* for stalls.. */
1851
1852 return insns_removed;
1853}
1854
1855
b4ead7d4 1856/* Print the ready list for debugging purposes. Callable from debugger. */
1708fd40 1857
b4ead7d4 1858static void
1d088dee 1859debug_ready_list (struct ready_list *ready)
1708fd40 1860{
b4ead7d4
BS
1861 rtx *p;
1862 int i;
1708fd40 1863
b4ead7d4 1864 if (ready->n_ready == 0)
fae15c93
VM
1865 {
1866 fprintf (sched_dump, "\n");
1867 return;
1868 }
1708fd40 1869
b4ead7d4
BS
1870 p = ready_lastpos (ready);
1871 for (i = 0; i < ready->n_ready; i++)
1872 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1873 fprintf (sched_dump, "\n");
1874}
1708fd40 1875
570a98eb 1876/* Search INSN for REG_SAVE_NOTE note pairs for
8b96512f 1877 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
c46a37c4
RH
1878 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1879 saved value for NOTE_BLOCK_NUMBER which is useful for
496d7bb0 1880 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
8c660648 1881
496d7bb0
MK
1882static void
1883reemit_notes (rtx insn)
8c660648 1884{
496d7bb0 1885 rtx note, last = insn;
8c660648 1886
8c660648
JL
1887 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1888 {
c46a37c4 1889 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
8c660648 1890 {
b3b42a4d
RK
1891 enum insn_note note_type = INTVAL (XEXP (note, 0));
1892
63f4a88e
JH
1893 last = emit_note_before (note_type, last);
1894 remove_note (insn, note);
8c660648
JL
1895 }
1896 }
8c660648
JL
1897}
1898
496d7bb0
MK
1899/* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1900static void
1901move_insn (rtx insn)
1902{
1903 rtx last = last_scheduled_insn;
c9e03727 1904
496d7bb0
MK
1905 if (PREV_INSN (insn) != last)
1906 {
1907 basic_block bb;
1908 rtx note;
1909 int jump_p = 0;
8c660648 1910
496d7bb0
MK
1911 bb = BLOCK_FOR_INSN (insn);
1912
1913 /* BB_HEAD is either LABEL or NOTE. */
1914 gcc_assert (BB_HEAD (bb) != insn);
1915
1916 if (BB_END (bb) == insn)
1917 /* If this is last instruction in BB, move end marker one
1918 instruction up. */
1919 {
1920 /* Jumps are always placed at the end of basic block. */
1921 jump_p = control_flow_insn_p (insn);
1922
1923 gcc_assert (!jump_p
1924 || ((current_sched_info->flags & SCHED_RGN)
1925 && RECOVERY_BLOCK (insn)
1926 && RECOVERY_BLOCK (insn) != EXIT_BLOCK_PTR)
1927 || (current_sched_info->flags & SCHED_EBB));
1928
1929 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1930
1931 BB_END (bb) = PREV_INSN (insn);
1932 }
8c660648 1933
496d7bb0 1934 gcc_assert (BB_END (bb) != last);
c9e03727 1935
496d7bb0
MK
1936 if (jump_p)
1937 /* We move the block note along with jump. */
1938 {
1939 /* NT is needed for assertion below. */
1940 rtx nt = current_sched_info->next_tail;
1941
1942 note = NEXT_INSN (insn);
1943 while (NOTE_NOT_BB_P (note) && note != nt)
1944 note = NEXT_INSN (note);
1945
1946 if (note != nt
1947 && (LABEL_P (note)
1948 || BARRIER_P (note)))
1949 note = NEXT_INSN (note);
1950
1951 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1952 }
1953 else
1954 note = insn;
1955
1956 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1957 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1958
1959 NEXT_INSN (note) = NEXT_INSN (last);
1960 PREV_INSN (NEXT_INSN (last)) = note;
c9e03727 1961
496d7bb0
MK
1962 NEXT_INSN (last) = insn;
1963 PREV_INSN (insn) = last;
1964
1965 bb = BLOCK_FOR_INSN (last);
1966
1967 if (jump_p)
1968 {
1969 fix_jump_move (insn);
1970
1971 if (BLOCK_FOR_INSN (insn) != bb)
1972 move_block_after_check (insn);
1973
1974 gcc_assert (BB_END (bb) == last);
1975 }
1976
1977 set_block_for_insn (insn, bb);
1978
1979 /* Update BB_END, if needed. */
1980 if (BB_END (bb) == last)
1981 BB_END (bb) = insn;
1982 }
1983
1984 reemit_notes (insn);
58fb7809 1985
496d7bb0 1986 SCHED_GROUP_P (insn) = 0;
8c660648
JL
1987}
1988
30028c85
VM
1989/* The following structure describe an entry of the stack of choices. */
1990struct choice_entry
1991{
1992 /* Ordinal number of the issued insn in the ready queue. */
1993 int index;
1994 /* The number of the rest insns whose issues we should try. */
1995 int rest;
1996 /* The number of issued essential insns. */
1997 int n;
1998 /* State after issuing the insn. */
1999 state_t state;
2000};
2001
2002/* The following array is used to implement a stack of choices used in
2003 function max_issue. */
2004static struct choice_entry *choice_stack;
2005
2006/* The following variable value is number of essential insns issued on
2007 the current cycle. An insn is essential one if it changes the
2008 processors state. */
2009static int cycle_issued_insns;
2010
880efc46
VM
2011/* The following variable value is maximal number of tries of issuing
2012 insns for the first cycle multipass insn scheduling. We define
2013 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2014 need this constraint if all real insns (with non-negative codes)
2015 had reservations because in this case the algorithm complexity is
2016 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2017 might be incomplete and such insn might occur. For such
2018 descriptions, the complexity of algorithm (without the constraint)
2019 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2020static int max_lookahead_tries;
2021
2022/* The following value is value of hook
2023 `first_cycle_multipass_dfa_lookahead' at the last call of
2024 `max_issue'. */
2025static int cached_first_cycle_multipass_dfa_lookahead = 0;
2026
2027/* The following value is value of `issue_rate' at the last call of
2028 `sched_init'. */
2029static int cached_issue_rate = 0;
2030
fae15c93
VM
2031/* The following function returns maximal (or close to maximal) number
2032 of insns which can be issued on the same cycle and one of which
30028c85 2033 insns is insns with the best rank (the first insn in READY). To
fae15c93
VM
2034 make this function tries different samples of ready insns. READY
2035 is current queue `ready'. Global array READY_TRY reflects what
496d7bb0
MK
2036 insns are already issued in this try. MAX_POINTS is the sum of points
2037 of all instructions in READY. The function stops immediatelly,
2038 if it reached the such a solution, that all instruction can be issued.
2039 INDEX will contain index of the best insn in READY. The following
2040 function is used only for first cycle multipass scheduling. */
fae15c93 2041static int
496d7bb0 2042max_issue (struct ready_list *ready, int *index, int max_points)
fae15c93 2043{
496d7bb0 2044 int n, i, all, n_ready, best, delay, tries_num, points = -1;
30028c85 2045 struct choice_entry *top;
fae15c93 2046 rtx insn;
fae15c93 2047
fae15c93 2048 best = 0;
30028c85
VM
2049 memcpy (choice_stack->state, curr_state, dfa_state_size);
2050 top = choice_stack;
880efc46 2051 top->rest = cached_first_cycle_multipass_dfa_lookahead;
30028c85
VM
2052 top->n = 0;
2053 n_ready = ready->n_ready;
2054 for (all = i = 0; i < n_ready; i++)
fae15c93 2055 if (!ready_try [i])
30028c85
VM
2056 all++;
2057 i = 0;
880efc46 2058 tries_num = 0;
30028c85
VM
2059 for (;;)
2060 {
2061 if (top->rest == 0 || i >= n_ready)
2062 {
2063 if (top == choice_stack)
2064 break;
2065 if (best < top - choice_stack && ready_try [0])
2066 {
2067 best = top - choice_stack;
2068 *index = choice_stack [1].index;
496d7bb0
MK
2069 points = top->n;
2070 if (top->n == max_points || best == all)
30028c85
VM
2071 break;
2072 }
2073 i = top->index;
2074 ready_try [i] = 0;
2075 top--;
2076 memcpy (curr_state, top->state, dfa_state_size);
2077 }
2078 else if (!ready_try [i])
2079 {
880efc46
VM
2080 tries_num++;
2081 if (tries_num > max_lookahead_tries)
2082 break;
30028c85
VM
2083 insn = ready_element (ready, i);
2084 delay = state_transition (curr_state, insn);
2085 if (delay < 0)
2086 {
2087 if (state_dead_lock_p (curr_state))
2088 top->rest = 0;
2089 else
2090 top->rest--;
2091 n = top->n;
2092 if (memcmp (top->state, curr_state, dfa_state_size) != 0)
496d7bb0 2093 n += ISSUE_POINTS (insn);
30028c85 2094 top++;
880efc46 2095 top->rest = cached_first_cycle_multipass_dfa_lookahead;
30028c85
VM
2096 top->index = i;
2097 top->n = n;
2098 memcpy (top->state, curr_state, dfa_state_size);
2099 ready_try [i] = 1;
2100 i = -1;
2101 }
2102 }
2103 i++;
2104 }
2105 while (top != choice_stack)
2106 {
2107 ready_try [top->index] = 0;
2108 top--;
2109 }
496d7bb0
MK
2110 memcpy (curr_state, choice_stack->state, dfa_state_size);
2111
2112 if (sched_verbose >= 4)
2113 fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
2114 (*current_sched_info->print_insn) (ready_element (ready, *index),
2115 0),
2116 points, max_points);
2117
fae15c93
VM
2118 return best;
2119}
2120
2121/* The following function chooses insn from READY and modifies
2122 *N_READY and READY. The following function is used only for first
2123 cycle multipass scheduling. */
2124
2125static rtx
1d088dee 2126choose_ready (struct ready_list *ready)
fae15c93 2127{
880efc46
VM
2128 int lookahead = 0;
2129
2130 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
5fd9b178 2131 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
880efc46 2132 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
fae15c93
VM
2133 return ready_remove_first (ready);
2134 else
2135 {
2136 /* Try to choose the better insn. */
496d7bb0 2137 int index = 0, i, n;
30028c85 2138 rtx insn;
496d7bb0
MK
2139 int more_issue, max_points, try_data = 1, try_control = 1;
2140
880efc46
VM
2141 if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
2142 {
2143 cached_first_cycle_multipass_dfa_lookahead = lookahead;
2144 max_lookahead_tries = 100;
2145 for (i = 0; i < issue_rate; i++)
2146 max_lookahead_tries *= lookahead;
2147 }
30028c85
VM
2148 insn = ready_element (ready, 0);
2149 if (INSN_CODE (insn) < 0)
2150 return ready_remove_first (ready);
496d7bb0
MK
2151
2152 if (spec_info
2153 && spec_info->flags & (PREFER_NON_DATA_SPEC
2154 | PREFER_NON_CONTROL_SPEC))
2155 {
496d7bb0
MK
2156 for (i = 0, n = ready->n_ready; i < n; i++)
2157 {
a57aee2a
MK
2158 rtx x;
2159 ds_t s;
2160
496d7bb0
MK
2161 x = ready_element (ready, i);
2162 s = TODO_SPEC (x);
2163
2164 if (spec_info->flags & PREFER_NON_DATA_SPEC
2165 && !(s & DATA_SPEC))
2166 {
2167 try_data = 0;
2168 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2169 || !try_control)
2170 break;
2171 }
2172
2173 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2174 && !(s & CONTROL_SPEC))
2175 {
2176 try_control = 0;
2177 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2178 break;
2179 }
2180 }
2181 }
2182
2183 if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2184 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2185 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2186 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2187 (insn)))
a57aee2a
MK
2188 /* Discard speculative instruction that stands first in the ready
2189 list. */
496d7bb0
MK
2190 {
2191 change_queue_index (insn, 1);
2192 return 0;
2193 }
2194
2195 max_points = ISSUE_POINTS (insn);
2196 more_issue = issue_rate - cycle_issued_insns - 1;
2197
30028c85
VM
2198 for (i = 1; i < ready->n_ready; i++)
2199 {
2200 insn = ready_element (ready, i);
2201 ready_try [i]
2202 = (INSN_CODE (insn) < 0
496d7bb0
MK
2203 || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2204 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
30028c85 2205 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
496d7bb0
MK
2206 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2207 (insn)));
2208
2209 if (!ready_try [i] && more_issue-- > 0)
2210 max_points += ISSUE_POINTS (insn);
30028c85 2211 }
496d7bb0
MK
2212
2213 if (max_issue (ready, &index, max_points) == 0)
fae15c93
VM
2214 return ready_remove_first (ready);
2215 else
2216 return ready_remove (ready, index);
2217 }
2218}
2219
496d7bb0
MK
2220/* Use forward list scheduling to rearrange insns of block pointed to by
2221 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2222 region. */
8c660648 2223
b4ead7d4 2224void
496d7bb0 2225schedule_block (basic_block *target_bb, int rgn_n_insns1)
8c660648 2226{
176f9a7b 2227 struct ready_list ready;
30028c85 2228 int i, first_cycle_insn_p;
8c660648 2229 int can_issue_more;
fae15c93 2230 state_t temp_state = NULL; /* It is used for multipass scheduling. */
58fb7809 2231 int sort_p, advance, start_clock_var;
8c660648 2232
63de6c74 2233 /* Head/tail info for this block. */
1708fd40
BS
2234 rtx prev_head = current_sched_info->prev_head;
2235 rtx next_tail = current_sched_info->next_tail;
2236 rtx head = NEXT_INSN (prev_head);
2237 rtx tail = PREV_INSN (next_tail);
8c660648 2238
484df988
JL
2239 /* We used to have code to avoid getting parameters moved from hard
2240 argument registers into pseudos.
8c660648 2241
484df988
JL
2242 However, it was removed when it proved to be of marginal benefit
2243 and caused problems because schedule_block and compute_forward_dependences
2244 had different notions of what the "head" insn was. */
8c660648 2245
535a42b1 2246 gcc_assert (head != tail || INSN_P (head));
8c660648 2247
496d7bb0
MK
2248 added_recovery_block_p = false;
2249
63de6c74 2250 /* Debug info. */
8c660648 2251 if (sched_verbose)
496d7bb0 2252 dump_new_block_header (0, *target_bb, head, tail);
8c660648 2253
fa0aee89 2254 state_reset (curr_state);
8c660648 2255
63de6c74 2256 /* Allocate the ready list. */
63f54b1a 2257 readyp = &ready;
496d7bb0
MK
2258 ready.vec = NULL;
2259 ready_try = NULL;
2260 choice_stack = NULL;
2261
2262 rgn_n_insns = -1;
2263 extend_ready (rgn_n_insns1 + 1);
2264
176f9a7b 2265 ready.first = ready.veclen - 1;
176f9a7b 2266 ready.n_ready = 0;
8c660648 2267
fa0aee89
PB
2268 /* It is used for first cycle multipass scheduling. */
2269 temp_state = alloca (dfa_state_size);
fae15c93 2270
c237e94a 2271 if (targetm.sched.md_init)
5fd9b178 2272 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
e4da5f6d 2273
89076bb3
RH
2274 /* We start inserting insns after PREV_HEAD. */
2275 last_scheduled_insn = prev_head;
8c660648 2276
496d7bb0
MK
2277 gcc_assert (NOTE_P (last_scheduled_insn)
2278 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2279
1708fd40
BS
2280 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2281 queue. */
8c660648
JL
2282 q_ptr = 0;
2283 q_size = 0;
fae15c93 2284
fa0aee89
PB
2285 insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
2286 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
8c660648 2287
197043f5
RH
2288 /* Start just before the beginning of time. */
2289 clock_var = -1;
63f54b1a
MK
2290
2291 /* We need queue and ready lists and clock_var be initialized
2292 in try_ready () (which is called through init_ready_list ()). */
2293 (*current_sched_info->init_ready_list) ();
2294
496d7bb0
MK
2295 /* Now we can restore basic block notes and maintain precise cfg. */
2296 restore_bb_notes (*target_bb);
2297
63f54b1a
MK
2298 last_clock_var = -1;
2299
58fb7809 2300 advance = 0;
197043f5 2301
30028c85 2302 sort_p = TRUE;
63de6c74 2303 /* Loop until all the insns in BB are scheduled. */
1708fd40 2304 while ((*current_sched_info->schedule_more_p) ())
8c660648 2305 {
58fb7809 2306 do
8c660648 2307 {
58fb7809
VM
2308 start_clock_var = clock_var;
2309
2310 clock_var++;
1d088dee 2311
58fb7809 2312 advance_one_cycle ();
1d088dee 2313
58fb7809
VM
2314 /* Add to the ready list all pending insns that can be issued now.
2315 If there are no ready insns, increment clock until one
2316 is ready and add all pending insns at that point to the ready
2317 list. */
2318 queue_to_ready (&ready);
1d088dee 2319
535a42b1 2320 gcc_assert (ready.n_ready);
1d088dee 2321
58fb7809
VM
2322 if (sched_verbose >= 2)
2323 {
2324 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2325 debug_ready_list (&ready);
2326 }
2327 advance -= clock_var - start_clock_var;
8c660648 2328 }
58fb7809 2329 while (advance > 0);
8c660648 2330
30028c85
VM
2331 if (sort_p)
2332 {
2333 /* Sort the ready list based on priority. */
2334 ready_sort (&ready);
1d088dee 2335
30028c85
VM
2336 if (sched_verbose >= 2)
2337 {
2338 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2339 debug_ready_list (&ready);
2340 }
2341 }
197043f5 2342
b4ead7d4
BS
2343 /* Allow the target to reorder the list, typically for
2344 better instruction bundling. */
14484a78 2345 if (sort_p && targetm.sched.reorder
58fb7809
VM
2346 && (ready.n_ready == 0
2347 || !SCHED_GROUP_P (ready_element (&ready, 0))))
c237e94a 2348 can_issue_more =
5fd9b178
KH
2349 targetm.sched.reorder (sched_dump, sched_verbose,
2350 ready_lastpos (&ready),
2351 &ready.n_ready, clock_var);
c237e94a
ZW
2352 else
2353 can_issue_more = issue_rate;
e1306f49 2354
fae15c93 2355 first_cycle_insn_p = 1;
30028c85 2356 cycle_issued_insns = 0;
fae15c93 2357 for (;;)
e1306f49 2358 {
fae15c93
VM
2359 rtx insn;
2360 int cost;
d57f1617 2361 bool asm_p = false;
fae15c93 2362
6d0de005 2363 if (sched_verbose >= 2)
fae15c93 2364 {
496d7bb0 2365 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
fae15c93
VM
2366 clock_var);
2367 debug_ready_list (&ready);
2368 }
2369
fa0aee89
PB
2370 if (ready.n_ready == 0
2371 && can_issue_more
2372 && reload_completed)
fae15c93 2373 {
fa0aee89
PB
2374 /* Allow scheduling insns directly from the queue in case
2375 there's nothing better to do (ready list is empty) but
2376 there are still vacant dispatch slots in the current cycle. */
2377 if (sched_verbose >= 6)
2378 fprintf(sched_dump,";;\t\tSecond chance\n");
2379 memcpy (temp_state, curr_state, dfa_state_size);
2380 if (early_queue_to_ready (temp_state, &ready))
2381 ready_sort (&ready);
fae15c93 2382 }
569fa502 2383
fa0aee89
PB
2384 if (ready.n_ready == 0 || !can_issue_more
2385 || state_dead_lock_p (curr_state)
2386 || !(*current_sched_info->schedule_more_p) ())
2387 break;
1d088dee 2388
fa0aee89
PB
2389 /* Select and remove the insn from the ready list. */
2390 if (sort_p)
496d7bb0
MK
2391 {
2392 insn = choose_ready (&ready);
2393 if (!insn)
2394 continue;
2395 }
fa0aee89
PB
2396 else
2397 insn = ready_remove_first (&ready);
1d088dee 2398
fa0aee89
PB
2399 if (targetm.sched.dfa_new_cycle
2400 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2401 insn, last_clock_var,
2402 clock_var, &sort_p))
63f54b1a
MK
2403 /* SORT_P is used by the target to override sorting
2404 of the ready list. This is needed when the target
2405 has modified its internal structures expecting that
2406 the insn will be issued next. As we need the insn
2407 to have the highest priority (so it will be returned by
2408 the ready_remove_first call above), we invoke
2409 ready_add (&ready, insn, true).
2410 But, still, there is one issue: INSN can be later
2411 discarded by scheduler's front end through
2412 current_sched_info->can_schedule_ready_p, hence, won't
2413 be issued next. */
fa0aee89 2414 {
63f54b1a
MK
2415 ready_add (&ready, insn, true);
2416 break;
fa0aee89 2417 }
1d088dee 2418
fa0aee89
PB
2419 sort_p = TRUE;
2420 memcpy (temp_state, curr_state, dfa_state_size);
2421 if (recog_memoized (insn) < 0)
2422 {
2423 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2424 || asm_noperands (PATTERN (insn)) >= 0);
2425 if (!first_cycle_insn_p && asm_p)
2426 /* This is asm insn which is tryed to be issued on the
2427 cycle not first. Issue it on the next cycle. */
2428 cost = 1;
fae15c93 2429 else
fa0aee89
PB
2430 /* A USE insn, or something else we don't need to
2431 understand. We can't pass these directly to
2432 state_transition because it will trigger a
2433 fatal error for unrecognizable insns. */
2434 cost = 0;
2435 }
2436 else
2437 {
2438 cost = state_transition (temp_state, insn);
2439 if (cost < 0)
2440 cost = 0;
2441 else if (cost == 0)
2442 cost = 1;
fae15c93 2443 }
e1306f49 2444
b4ead7d4
BS
2445 if (cost >= 1)
2446 {
2447 queue_insn (insn, cost);
dab80c81
AK
2448 if (SCHED_GROUP_P (insn))
2449 {
2450 advance = cost;
2451 break;
2452 }
2453
b4ead7d4
BS
2454 continue;
2455 }
e1306f49 2456
496d7bb0
MK
2457 if (current_sched_info->can_schedule_ready_p
2458 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2459 /* We normally get here only if we don't want to move
2460 insn from the split block. */
2461 {
2462 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2463 continue;
2464 }
2465
2466 /* DECISSION is made. */
2467
2468 if (TODO_SPEC (insn) & SPECULATIVE)
2469 generate_recovery_code (insn);
2470
2471 if (control_flow_insn_p (last_scheduled_insn)
2472 /* This is used to to switch basic blocks by request
2473 from scheduler front-end (actually, sched-ebb.c only).
2474 This is used to process blocks with single fallthru
2475 edge. If successing block has jump, it [jump] will try
2476 move at the end of current bb, thus corrupting CFG. */
2477 || current_sched_info->advance_target_bb (*target_bb, insn))
2478 {
2479 *target_bb = current_sched_info->advance_target_bb
2480 (*target_bb, 0);
2481
2482 if (sched_verbose)
2483 {
2484 rtx x;
8c660648 2485
496d7bb0
MK
2486 x = next_real_insn (last_scheduled_insn);
2487 gcc_assert (x);
2488 dump_new_block_header (1, *target_bb, x, tail);
2489 }
8c660648 2490
496d7bb0
MK
2491 last_scheduled_insn = bb_note (*target_bb);
2492 }
2493
2494 /* Update counters, etc in the scheduler's front end. */
2495 (*current_sched_info->begin_schedule_ready) (insn,
2496 last_scheduled_insn);
2497
2498 move_insn (insn);
2499 last_scheduled_insn = insn;
2500
fa0aee89 2501 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
63f54b1a
MK
2502 {
2503 cycle_issued_insns++;
2504 memcpy (curr_state, temp_state, dfa_state_size);
2505 }
1d088dee 2506
c237e94a
ZW
2507 if (targetm.sched.variable_issue)
2508 can_issue_more =
5fd9b178 2509 targetm.sched.variable_issue (sched_dump, sched_verbose,
c237e94a 2510 insn, can_issue_more);
85d69216
JL
2511 /* A naked CLOBBER or USE generates no instruction, so do
2512 not count them against the issue rate. */
2513 else if (GET_CODE (PATTERN (insn)) != USE
2514 && GET_CODE (PATTERN (insn)) != CLOBBER)
c237e94a 2515 can_issue_more--;
8c660648 2516
63f54b1a 2517 advance = schedule_insn (insn);
d57f1617
VM
2518
2519 /* After issuing an asm insn we should start a new cycle. */
2520 if (advance == 0 && asm_p)
2521 advance = 1;
58fb7809
VM
2522 if (advance != 0)
2523 break;
8c660648 2524
fae15c93
VM
2525 first_cycle_insn_p = 0;
2526
30028c85
VM
2527 /* Sort the ready list based on priority. This must be
2528 redone here, as schedule_insn may have readied additional
32dd366d 2529 insns that will not be sorted correctly. */
30028c85
VM
2530 if (ready.n_ready > 0)
2531 ready_sort (&ready);
2532
58fb7809
VM
2533 if (targetm.sched.reorder2
2534 && (ready.n_ready == 0
2535 || !SCHED_GROUP_P (ready_element (&ready, 0))))
c237e94a 2536 {
c237e94a 2537 can_issue_more =
5fd9b178
KH
2538 targetm.sched.reorder2 (sched_dump, sched_verbose,
2539 ready.n_ready
2540 ? ready_lastpos (&ready) : NULL,
2541 &ready.n_ready, clock_var);
c237e94a 2542 }
b4ead7d4 2543 }
b4ead7d4 2544 }
8c660648 2545
b4ead7d4
BS
2546 /* Debug info. */
2547 if (sched_verbose)
2548 {
2549 fprintf (sched_dump, ";;\tReady list (final): ");
2550 debug_ready_list (&ready);
b4ead7d4 2551 }
8c660648 2552
63f54b1a
MK
2553 if (current_sched_info->queue_must_finish_empty)
2554 /* Sanity check -- queue must be empty now. Meaningless if region has
2555 multiple bbs. */
2556 gcc_assert (!q_size && !ready.n_ready);
2557 else
30028c85 2558 {
63f54b1a
MK
2559 /* We must maintain QUEUE_INDEX between blocks in region. */
2560 for (i = ready.n_ready - 1; i >= 0; i--)
496d7bb0
MK
2561 {
2562 rtx x;
2563
2564 x = ready_element (&ready, i);
2565 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2566 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2567 }
63f54b1a
MK
2568
2569 if (q_size)
2570 for (i = 0; i <= max_insn_queue_index; i++)
2571 {
2572 rtx link;
2573 for (link = insn_queue[i]; link; link = XEXP (link, 1))
496d7bb0
MK
2574 {
2575 rtx x;
2576
2577 x = XEXP (link, 0);
2578 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2579 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2580 }
63f54b1a
MK
2581 free_INSN_LIST_list (&insn_queue[i]);
2582 }
496d7bb0 2583 }
1d088dee 2584
496d7bb0
MK
2585 if (!current_sched_info->queue_must_finish_empty
2586 || added_recovery_block_p)
2587 {
30028c85
VM
2588 /* INSN_TICK (minimum clock tick at which the insn becomes
2589 ready) may be not correct for the insn in the subsequent
2590 blocks of the region. We should use a correct value of
2591 `clock_var' or modify INSN_TICK. It is better to keep
2592 clock_var value equal to 0 at the start of a basic block.
2593 Therefore we modify INSN_TICK here. */
63f54b1a 2594 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
30028c85
VM
2595 }
2596
496d7bb0
MK
2597#ifdef ENABLE_CHECKING
2598 /* After the reload the ia64 backend doesn't maintain BB_END, so
2599 if we want to check anything, better do it now.
2600 And it already clobbered previously scheduled code. */
2601 if (reload_completed)
2602 check_cfg (BB_HEAD (BLOCK_FOR_INSN (prev_head)), 0);
2603#endif
2604
63f54b1a
MK
2605 if (targetm.sched.md_finish)
2606 targetm.sched.md_finish (sched_dump, sched_verbose);
2607
2608 /* Update head/tail boundaries. */
2609 head = NEXT_INSN (prev_head);
2610 tail = last_scheduled_insn;
2611
d73b1f07
RH
2612 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2613 previously found among the insns. Insert them at the beginning
2614 of the insns. */
2615 if (note_list != 0)
2616 {
496d7bb0 2617 basic_block head_bb = BLOCK_FOR_INSN (head);
d73b1f07
RH
2618 rtx note_head = note_list;
2619
2620 while (PREV_INSN (note_head))
2621 {
496d7bb0 2622 set_block_for_insn (note_head, head_bb);
d73b1f07
RH
2623 note_head = PREV_INSN (note_head);
2624 }
496d7bb0
MK
2625 /* In the above cycle we've missed this note: */
2626 set_block_for_insn (note_head, head_bb);
d73b1f07
RH
2627
2628 PREV_INSN (note_head) = PREV_INSN (head);
2629 NEXT_INSN (PREV_INSN (head)) = note_head;
2630 PREV_INSN (head) = note_list;
2631 NEXT_INSN (note_list) = head;
2632 head = note_head;
2633 }
8c660648 2634
b4ead7d4
BS
2635 /* Debugging. */
2636 if (sched_verbose)
8c660648 2637 {
b4ead7d4
BS
2638 fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
2639 clock_var, INSN_UID (head));
2640 fprintf (sched_dump, ";; new tail = %d\n\n",
2641 INSN_UID (tail));
b4ead7d4 2642 }
8c660648 2643
b4ead7d4
BS
2644 current_sched_info->head = head;
2645 current_sched_info->tail = tail;
8c660648 2646
b4ead7d4 2647 free (ready.vec);
fae15c93 2648
fa0aee89
PB
2649 free (ready_try);
2650 for (i = 0; i <= rgn_n_insns; i++)
2651 free (choice_stack [i].state);
2652 free (choice_stack);
8c660648 2653}
b4ead7d4 2654\f
63de6c74 2655/* Set_priorities: compute priority of each insn in the block. */
8c660648 2656
b4ead7d4 2657int
1d088dee 2658set_priorities (rtx head, rtx tail)
8c660648
JL
2659{
2660 rtx insn;
2661 int n_insn;
79ae11c4
DN
2662 int sched_max_insns_priority =
2663 current_sched_info->sched_max_insns_priority;
8c660648 2664 rtx prev_head;
8c660648 2665
2c3c49de 2666 if (head == tail && (! INSN_P (head)))
8c660648
JL
2667 return 0;
2668
2669 n_insn = 0;
63f54b1a
MK
2670
2671 prev_head = PREV_INSN (head);
8c660648
JL
2672 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2673 {
63f54b1a 2674 if (!INSN_P (insn))
8c660648
JL
2675 continue;
2676
58fb7809 2677 n_insn++;
8c660648 2678 (void) priority (insn);
79ae11c4
DN
2679
2680 if (INSN_PRIORITY_KNOWN (insn))
2681 sched_max_insns_priority =
2682 MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
8c660648 2683 }
63f54b1a
MK
2684
2685 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
8c660648
JL
2686
2687 return n_insn;
2688}
2689
496d7bb0
MK
2690/* Next LUID to assign to an instruction. */
2691static int luid;
2692
10d22567 2693/* Initialize some global state for the scheduler. */
8c660648 2694
b4ead7d4 2695void
10d22567 2696sched_init (void)
8c660648 2697{
e0082a72 2698 basic_block b;
8c660648 2699 rtx insn;
fae15c93 2700 int i;
8c660648 2701
496d7bb0
MK
2702 /* Switch to working copy of sched_info. */
2703 memcpy (&current_sched_info_var, current_sched_info,
2704 sizeof (current_sched_info_var));
2705 current_sched_info = &current_sched_info_var;
2706
63de6c74 2707 /* Disable speculative loads in their presence if cc0 defined. */
8c660648
JL
2708#ifdef HAVE_cc0
2709 flag_schedule_speculative_load = 0;
2710#endif
2711
63de6c74 2712 /* Set dump and sched_verbose for the desired debugging output. If no
409f8483
DE
2713 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2714 For -fsched-verbose=N, N>=10, print everything to stderr. */
8c660648
JL
2715 sched_verbose = sched_verbose_param;
2716 if (sched_verbose_param == 0 && dump_file)
2717 sched_verbose = 1;
a88f02e7
BS
2718 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2719 ? stderr : dump_file);
8c660648 2720
496d7bb0
MK
2721 /* Initialize SPEC_INFO. */
2722 if (targetm.sched.set_sched_flags)
2723 {
2724 spec_info = &spec_info_var;
2725 targetm.sched.set_sched_flags (spec_info);
2726 if (current_sched_info->flags & DO_SPECULATION)
2727 spec_info->weakness_cutoff =
2728 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2729 else
2730 /* So we won't read anything accidently. */
2731 spec_info = 0;
2732#ifdef ENABLE_CHECKING
2733 check_sched_flags ();
2734#endif
2735 }
2736 else
2737 /* So we won't read anything accidently. */
2738 spec_info = 0;
2739
63de6c74 2740 /* Initialize issue_rate. */
c237e94a 2741 if (targetm.sched.issue_rate)
5fd9b178 2742 issue_rate = targetm.sched.issue_rate ();
c237e94a
ZW
2743 else
2744 issue_rate = 1;
8c660648 2745
880efc46
VM
2746 if (cached_issue_rate != issue_rate)
2747 {
2748 cached_issue_rate = issue_rate;
2749 /* To invalidate max_lookahead_tries: */
2750 cached_first_cycle_multipass_dfa_lookahead = 0;
2751 }
2752
496d7bb0
MK
2753 old_max_uid = 0;
2754 h_i_d = 0;
2755 extend_h_i_d ();
8c660648 2756
fae15c93 2757 for (i = 0; i < old_max_uid; i++)
63f54b1a
MK
2758 {
2759 h_i_d[i].cost = -1;
496d7bb0 2760 h_i_d[i].todo_spec = HARD_DEP;
63f54b1a
MK
2761 h_i_d[i].queue_index = QUEUE_NOWHERE;
2762 h_i_d[i].tick = INVALID_TICK;
2763 h_i_d[i].inter_tick = INVALID_TICK;
2764 }
fae15c93 2765
fa0aee89
PB
2766 if (targetm.sched.init_dfa_pre_cycle_insn)
2767 targetm.sched.init_dfa_pre_cycle_insn ();
1d088dee 2768
fa0aee89
PB
2769 if (targetm.sched.init_dfa_post_cycle_insn)
2770 targetm.sched.init_dfa_post_cycle_insn ();
1d088dee 2771
fa0aee89
PB
2772 dfa_start ();
2773 dfa_state_size = state_size ();
2774 curr_state = xmalloc (dfa_state_size);
fae15c93 2775
f66d83e1 2776 h_i_d[0].luid = 0;
356edbd7 2777 luid = 1;
e0082a72 2778 FOR_EACH_BB (b)
a813c111 2779 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
8c660648 2780 {
f77e39fc
MM
2781 INSN_LUID (insn) = luid;
2782
2783 /* Increment the next luid, unless this is a note. We don't
2784 really need separate IDs for notes and we don't want to
2785 schedule differently depending on whether or not there are
2786 line-number notes, i.e., depending on whether or not we're
2787 generating debugging information. */
4b4bf941 2788 if (!NOTE_P (insn))
f77e39fc
MM
2789 ++luid;
2790
a813c111 2791 if (insn == BB_END (b))
8c660648
JL
2792 break;
2793 }
7a403706 2794
a88f02e7
BS
2795 init_dependency_caches (luid);
2796
a88f02e7
BS
2797 init_alias_analysis ();
2798
496d7bb0
MK
2799 line_note_head = 0;
2800 old_last_basic_block = 0;
2801 glat_start = 0;
2802 glat_end = 0;
2803 extend_bb (0);
a88f02e7 2804
496d7bb0
MK
2805 if (current_sched_info->flags & USE_GLAT)
2806 init_glat ();
a88f02e7
BS
2807
2808 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2809 removing death notes. */
e0082a72 2810 FOR_EACH_BB_REVERSE (b)
496d7bb0 2811 find_insn_reg_weight (b);
58565a33
SKG
2812
2813 if (targetm.sched.md_init_global)
5fd9b178 2814 targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
496d7bb0
MK
2815
2816 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2817 before_recovery = 0;
2818
2819#ifdef ENABLE_CHECKING
2820 /* This is used preferably for finding bugs in check_cfg () itself. */
2821 check_cfg (0, 0);
2822#endif
a88f02e7
BS
2823}
2824
b4ead7d4 2825/* Free global data used during insn scheduling. */
8c660648 2826
a88f02e7 2827void
1d088dee 2828sched_finish (void)
a88f02e7 2829{
f66d83e1 2830 free (h_i_d);
fa0aee89
PB
2831 free (curr_state);
2832 dfa_finish ();
b4ead7d4
BS
2833 free_dependency_caches ();
2834 end_alias_analysis ();
496d7bb0
MK
2835 free (line_note_head);
2836 free_glat ();
58565a33
SKG
2837
2838 if (targetm.sched.md_finish_global)
496d7bb0
MK
2839 targetm.sched.md_finish_global (sched_dump, sched_verbose);
2840
2841 if (spec_info && spec_info->dump)
2842 {
2843 char c = reload_completed ? 'a' : 'b';
2844
2845 fprintf (spec_info->dump,
2846 ";; %s:\n", current_function_name ());
2847
2848 fprintf (spec_info->dump,
2849 ";; Procedure %cr-begin-data-spec motions == %d\n",
2850 c, nr_begin_data);
2851 fprintf (spec_info->dump,
2852 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2853 c, nr_be_in_data);
2854 fprintf (spec_info->dump,
2855 ";; Procedure %cr-begin-control-spec motions == %d\n",
2856 c, nr_begin_control);
2857 fprintf (spec_info->dump,
2858 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2859 c, nr_be_in_control);
2860 }
2861
2862#ifdef ENABLE_CHECKING
2863 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2864 if (!reload_completed)
2865 check_cfg (0, 0);
2866#endif
a68e7e6c
PB
2867
2868 current_sched_info = NULL;
8c660648 2869}
63f54b1a
MK
2870
2871/* Fix INSN_TICKs of the instructions in the current block as well as
2872 INSN_TICKs of their dependants.
2873 HEAD and TAIL are the begin and the end of the current scheduled block. */
2874static void
2875fix_inter_tick (rtx head, rtx tail)
2876{
2877 /* Set of instructions with corrected INSN_TICK. */
2878 bitmap_head processed;
2879 int next_clock = clock_var + 1;
2880
2881 bitmap_initialize (&processed, 0);
2882
2883 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2884 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2885 across different blocks. */
2886 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2887 {
2888 if (INSN_P (head))
2889 {
2890 int tick;
2891 rtx link;
2892
2893 tick = INSN_TICK (head);
2894 gcc_assert (tick >= MIN_TICK);
2895
2896 /* Fix INSN_TICK of instruction from just scheduled block. */
2897 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2898 {
2899 bitmap_set_bit (&processed, INSN_LUID (head));
2900 tick -= next_clock;
2901
2902 if (tick < MIN_TICK)
2903 tick = MIN_TICK;
2904
2905 INSN_TICK (head) = tick;
2906 }
2907
2908 for (link = INSN_DEPEND (head); link; link = XEXP (link, 1))
2909 {
2910 rtx next;
2911
2912 next = XEXP (link, 0);
2913 tick = INSN_TICK (next);
2914
2915 if (tick != INVALID_TICK
2916 /* If NEXT has its INSN_TICK calculated, fix it.
2917 If not - it will be properly calculated from
2918 scratch later in fix_tick_ready. */
2919 && !bitmap_bit_p (&processed, INSN_LUID (next)))
2920 {
2921 bitmap_set_bit (&processed, INSN_LUID (next));
2922 tick -= next_clock;
2923
2924 if (tick < MIN_TICK)
2925 tick = MIN_TICK;
2926
2927 if (tick > INTER_TICK (next))
2928 INTER_TICK (next) = tick;
2929 else
2930 tick = INTER_TICK (next);
2931
2932 INSN_TICK (next) = tick;
2933 }
2934 }
2935 }
2936 }
2937 bitmap_clear (&processed);
2938}
2939
2940/* Check if NEXT is ready to be added to the ready or queue list.
2941 If "yes", add it to the proper list.
2942 Returns:
2943 -1 - is not ready yet,
2944 0 - added to the ready list,
2945 0 < N - queued for N cycles. */
2946int
2947try_ready (rtx next)
496d7bb0
MK
2948{
2949 ds_t old_ts, *ts;
2950 rtx link;
63f54b1a 2951
496d7bb0
MK
2952 ts = &TODO_SPEC (next);
2953 old_ts = *ts;
63f54b1a 2954
496d7bb0
MK
2955 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
2956 && ((old_ts & HARD_DEP)
2957 || (old_ts & SPECULATIVE)));
2958
2959 if (!(current_sched_info->flags & DO_SPECULATION))
2960 {
2961 if (!LOG_LINKS (next))
2962 *ts &= ~HARD_DEP;
2963 }
2964 else
2965 {
2966 *ts &= ~SPECULATIVE & ~HARD_DEP;
2967
2968 link = LOG_LINKS (next);
2969 if (link)
2970 {
2971 /* LOG_LINKS are maintained sorted.
2972 So if DEP_STATUS of the first dep is SPECULATIVE,
2973 than all other deps are speculative too. */
2974 if (DEP_STATUS (link) & SPECULATIVE)
2975 {
2976 /* Now we've got NEXT with speculative deps only.
2977 1. Look at the deps to see what we have to do.
2978 2. Check if we can do 'todo'. */
2979 *ts = DEP_STATUS (link) & SPECULATIVE;
2980 while ((link = XEXP (link, 1)))
2981 *ts = ds_merge (*ts, DEP_STATUS (link) & SPECULATIVE);
2982
2983 if (dep_weak (*ts) < spec_info->weakness_cutoff)
2984 /* Too few points. */
2985 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
2986 }
2987 else
2988 *ts |= HARD_DEP;
2989 }
2990 }
2991
2992 if (*ts & HARD_DEP)
2993 gcc_assert (*ts == old_ts
2994 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
2995 else if (current_sched_info->new_ready)
2996 *ts = current_sched_info->new_ready (next, *ts);
2997
2998 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
2999 have its original pattern or changed (speculative) one. This is due
3000 to changing ebb in region scheduling.
3001 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3002 has speculative pattern.
3003
3004 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3005 control-speculative NEXT could have been discarded by sched-rgn.c
3006 (the same case as when discarded by can_schedule_ready_p ()). */
3007
3008 if ((*ts & SPECULATIVE)
3009 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3010 need to change anything. */
3011 && *ts != old_ts)
3012 {
3013 int res;
3014 rtx new_pat;
3015
3016 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3017
3018 res = speculate_insn (next, *ts, &new_pat);
3019
3020 switch (res)
3021 {
3022 case -1:
3023 /* It would be nice to change DEP_STATUS of all dependences,
3024 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3025 so we won't reanalyze anything. */
3026 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3027 break;
3028
3029 case 0:
3030 /* We follow the rule, that every speculative insn
3031 has non-null ORIG_PAT. */
3032 if (!ORIG_PAT (next))
3033 ORIG_PAT (next) = PATTERN (next);
3034 break;
3035
3036 case 1:
3037 if (!ORIG_PAT (next))
3038 /* If we gonna to overwrite the original pattern of insn,
3039 save it. */
3040 ORIG_PAT (next) = PATTERN (next);
3041
3042 change_pattern (next, new_pat);
3043 break;
3044
3045 default:
3046 gcc_unreachable ();
3047 }
3048 }
3049
3050 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3051 either correct (*ts & SPECULATIVE),
3052 or we simply don't care (*ts & HARD_DEP). */
3053
3054 gcc_assert (!ORIG_PAT (next)
3055 || !RECOVERY_BLOCK (next)
3056 || RECOVERY_BLOCK (next) == EXIT_BLOCK_PTR);
3057
3cc82eea
MK
3058 if (*ts == 0 && ORIG_PAT (next) && !RECOVERY_BLOCK (next))
3059 /* We should change pattern of every previously speculative
3060 instruction - and we determine if NEXT was speculative by using
3061 ORIG_PAT field. Except one case - simple checks have ORIG_PAT
3062 pat too, hence we also check for the RECOVERY_BLOCK. */
3063 {
3064 change_pattern (next, ORIG_PAT (next));
3065 ORIG_PAT (next) = 0;
3066 }
3067
496d7bb0
MK
3068 if (*ts & HARD_DEP)
3069 {
3070 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3071 control-speculative NEXT could have been discarded by sched-rgn.c
3072 (the same case as when discarded by can_schedule_ready_p ()). */
3073 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3074
3075 change_queue_index (next, QUEUE_NOWHERE);
3076 return -1;
3077 }
3078
3079 if (sched_verbose >= 2)
3080 {
3081 int s = TODO_SPEC (next);
3082
3083 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3084 (*current_sched_info->print_insn) (next, 0));
3085
3086 if (spec_info && spec_info->dump)
3087 {
3088 if (s & BEGIN_DATA)
3089 fprintf (spec_info->dump, "; data-spec;");
3090 if (s & BEGIN_CONTROL)
3091 fprintf (spec_info->dump, "; control-spec;");
3092 if (s & BE_IN_CONTROL)
3093 fprintf (spec_info->dump, "; in-control-spec;");
3094 }
3095
3096 fprintf (sched_dump, "\n");
3097 }
3098
3099 adjust_priority (next);
3100
3101 return fix_tick_ready (next);
3102}
3103
3104/* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3105static int
63f54b1a
MK
3106fix_tick_ready (rtx next)
3107{
3108 rtx link;
3109 int tick, delay;
3110
3111 link = RESOLVED_DEPS (next);
3112
3113 if (link)
3114 {
3115 int full_p;
3116
3117 tick = INSN_TICK (next);
496d7bb0 3118 /* if tick is not equal to INVALID_TICK, then update
63f54b1a
MK
3119 INSN_TICK of NEXT with the most recent resolved dependence
3120 cost. Overwise, recalculate from scratch. */
3121 full_p = tick == INVALID_TICK;
3122 do
3123 {
3124 rtx pro;
3125 int tick1;
3126
3127 pro = XEXP (link, 0);
3128 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
496d7bb0 3129
63f54b1a
MK
3130 tick1 = INSN_TICK (pro) + insn_cost (pro, link, next);
3131 if (tick1 > tick)
3132 tick = tick1;
3133 }
3134 while ((link = XEXP (link, 1)) && full_p);
3135 }
3136 else
3137 tick = -1;
3138
3139 INSN_TICK (next) = tick;
3140
3141 delay = tick - clock_var;
3142 if (delay <= 0)
3143 delay = QUEUE_READY;
3144
3145 change_queue_index (next, delay);
496d7bb0 3146
63f54b1a
MK
3147 return delay;
3148}
3149
3150/* Move NEXT to the proper queue list with (DELAY >= 1),
3151 or add it to the ready list (DELAY == QUEUE_READY),
3152 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3153static void
3154change_queue_index (rtx next, int delay)
3155{
3156 int i = QUEUE_INDEX (next);
3157
3158 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3159 && delay != 0);
3160 gcc_assert (i != QUEUE_SCHEDULED);
3161
3162 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3163 || (delay < 0 && delay == i))
3164 /* We have nothing to do. */
3165 return;
3166
3167 /* Remove NEXT from whereever it is now. */
3168 if (i == QUEUE_READY)
3169 ready_remove_insn (next);
3170 else if (i >= 0)
3171 queue_remove (next);
3172
3173 /* Add it to the proper place. */
3174 if (delay == QUEUE_READY)
3175 ready_add (readyp, next, false);
3176 else if (delay >= 1)
3177 queue_insn (next, delay);
3178
3179 if (sched_verbose >= 2)
3180 {
3181 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3182 (*current_sched_info->print_insn) (next, 0));
3183
3184 if (delay == QUEUE_READY)
3185 fprintf (sched_dump, " into ready\n");
3186 else if (delay >= 1)
3187 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3188 else
3189 fprintf (sched_dump, " removed from ready or queue lists\n");
3190 }
3191}
3192
3193/* INSN is being scheduled. Resolve the dependence between INSN and NEXT. */
3194static void
3195resolve_dep (rtx next, rtx insn)
3196{
3197 rtx dep;
3198
3199 INSN_DEP_COUNT (next)--;
3200
3201 dep = remove_list_elem (insn, &LOG_LINKS (next));
3202 XEXP (dep, 1) = RESOLVED_DEPS (next);
3203 RESOLVED_DEPS (next) = dep;
3204
3205 gcc_assert ((INSN_DEP_COUNT (next) != 0 || !LOG_LINKS (next))
3206 && (LOG_LINKS (next) || INSN_DEP_COUNT (next) == 0));
3207}
3208
496d7bb0
MK
3209/* Extend H_I_D data. */
3210static void
3211extend_h_i_d (void)
3212{
3213 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3214 pseudos which do not cross calls. */
3215 int new_max_uid = get_max_uid() + 1;
3216
3217 h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
3218 old_max_uid = new_max_uid;
3219
3220 if (targetm.sched.h_i_d_extended)
3221 targetm.sched.h_i_d_extended ();
3222}
3223
3224/* Extend READY, READY_TRY and CHOICE_STACK arrays.
3225 N_NEW_INSNS is the number of additional elements to allocate. */
3226static void
3227extend_ready (int n_new_insns)
3228{
3229 int i;
3230
3231 readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
3232 readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
3233
3234 ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
3235 rgn_n_insns + 1, sizeof (char));
3236
3237 rgn_n_insns += n_new_insns;
3238
3239 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3240 rgn_n_insns + 1);
3241
3242 for (i = rgn_n_insns; n_new_insns--; i--)
3243 choice_stack[i].state = xmalloc (dfa_state_size);
3244}
3245
3246/* Extend global scheduler structures (those, that live across calls to
3247 schedule_block) to include information about just emitted INSN. */
3248static void
3249extend_global (rtx insn)
3250{
3251 gcc_assert (INSN_P (insn));
3252 /* These structures have scheduler scope. */
3253 extend_h_i_d ();
3254 init_h_i_d (insn);
3255
3256 extend_dependency_caches (1, 0);
3257}
3258
3259/* Extends global and local scheduler structures to include information
3260 about just emitted INSN. */
3261static void
3262extend_all (rtx insn)
3263{
3264 extend_global (insn);
3265
3266 /* These structures have block scope. */
3267 extend_ready (1);
3268
3269 (*current_sched_info->add_remove_insn) (insn, 0);
3270}
3271
3272/* Initialize h_i_d entry of the new INSN with default values.
3273 Values, that are not explicitly initialized here, hold zero. */
3274static void
3275init_h_i_d (rtx insn)
3276{
3277 INSN_LUID (insn) = luid++;
3278 INSN_COST (insn) = -1;
3279 TODO_SPEC (insn) = HARD_DEP;
3280 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
3281 INSN_TICK (insn) = INVALID_TICK;
3282 INTER_TICK (insn) = INVALID_TICK;
3283 find_insn_reg_weight1 (insn);
3284}
3285
3286/* Generates recovery code for INSN. */
3287static void
3288generate_recovery_code (rtx insn)
3289{
3290 if (TODO_SPEC (insn) & BEGIN_SPEC)
3291 begin_speculative_block (insn);
3292
3293 /* Here we have insn with no dependencies to
3294 instructions other then CHECK_SPEC ones. */
3295
3296 if (TODO_SPEC (insn) & BE_IN_SPEC)
3297 add_to_speculative_block (insn);
3298}
3299
3300/* Helper function.
3301 Tries to add speculative dependencies of type FS between instructions
3302 in LINK list and TWIN. */
3303static void
3304process_insn_depend_be_in_spec (rtx link, rtx twin, ds_t fs)
3305{
3306 for (; link; link = XEXP (link, 1))
3307 {
3308 ds_t ds;
3309 rtx consumer;
3310
3311 consumer = XEXP (link, 0);
3312
3313 ds = DEP_STATUS (link);
3314
3cc82eea
MK
3315 if (fs && (ds & DEP_TYPES) == DEP_TRUE)
3316 ds = (ds & ~BEGIN_SPEC) | fs;
496d7bb0
MK
3317
3318 add_back_forw_dep (consumer, twin, REG_NOTE_KIND (link), ds);
3319 }
3320}
3321
3322/* Generates recovery code for BEGIN speculative INSN. */
3323static void
3324begin_speculative_block (rtx insn)
3325{
3326 if (TODO_SPEC (insn) & BEGIN_DATA)
3327 nr_begin_data++;
3328 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3329 nr_begin_control++;
3330
3331 create_check_block_twin (insn, false);
3332
3333 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3334}
3335
3336/* Generates recovery code for BE_IN speculative INSN. */
3337static void
3338add_to_speculative_block (rtx insn)
3339{
3340 ds_t ts;
3341 rtx link, twins = NULL;
3342
3343 ts = TODO_SPEC (insn);
3344 gcc_assert (!(ts & ~BE_IN_SPEC));
3345
3346 if (ts & BE_IN_DATA)
3347 nr_be_in_data++;
3348 if (ts & BE_IN_CONTROL)
3349 nr_be_in_control++;
3350
3351 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3352 gcc_assert (!TODO_SPEC (insn));
3353
3354 DONE_SPEC (insn) |= ts;
3355
3356 /* First we convert all simple checks to branchy. */
3357 for (link = LOG_LINKS (insn); link;)
3358 {
3359 rtx check;
3360
3361 check = XEXP (link, 0);
3362
3363 if (RECOVERY_BLOCK (check))
3364 {
3365 create_check_block_twin (check, true);
3366 link = LOG_LINKS (insn);
3367 }
3368 else
3369 link = XEXP (link, 1);
3370 }
3371
3372 clear_priorities (insn);
3373
3374 do
3375 {
3376 rtx link, check, twin;
3377 basic_block rec;
3378
3379 link = LOG_LINKS (insn);
3380 gcc_assert (!(DEP_STATUS (link) & BEGIN_SPEC)
3381 && (DEP_STATUS (link) & BE_IN_SPEC)
3382 && (DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3383
3384 check = XEXP (link, 0);
3385 gcc_assert (!RECOVERY_BLOCK (check) && !ORIG_PAT (check)
3386 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3387
3388 rec = BLOCK_FOR_INSN (check);
3389
3390 twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
3391 extend_global (twin);
3392
3393 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3394
3395 if (sched_verbose && spec_info->dump)
3396 /* INSN_BB (insn) isn't determined for twin insns yet.
3397 So we can't use current_sched_info->print_insn. */
3398 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3399 INSN_UID (twin), rec->index);
3400
3401 twins = alloc_INSN_LIST (twin, twins);
3402
3403 /* Add dependences between TWIN and all apropriate
3404 instructions from REC. */
3405 do
3406 {
3407 add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
3408
3409 do
3410 {
3411 link = XEXP (link, 1);
3412 if (link)
3413 {
3414 check = XEXP (link, 0);
3415 if (BLOCK_FOR_INSN (check) == rec)
3416 break;
3417 }
3418 else
3419 break;
3420 }
3421 while (1);
3422 }
3423 while (link);
3424
3425 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, ts);
3426
3427 for (link = LOG_LINKS (insn); link;)
3428 {
3429 check = XEXP (link, 0);
3430
3431 if (BLOCK_FOR_INSN (check) == rec)
3432 {
3433 delete_back_forw_dep (insn, check);
3434 link = LOG_LINKS (insn);
3435 }
3436 else
3437 link = XEXP (link, 1);
3438 }
3439 }
3440 while (LOG_LINKS (insn));
3441
3442 /* We can't add the dependence between insn and twin earlier because
3443 that would make twin appear in the INSN_DEPEND (insn). */
3444 while (twins)
3445 {
3446 rtx twin;
3447
3448 twin = XEXP (twins, 0);
3449 calc_priorities (twin);
3450 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3451
3452 twin = XEXP (twins, 1);
3453 free_INSN_LIST_node (twins);
3454 twins = twin;
3455 }
3456}
3457
3458/* Extends and fills with zeros (only the new part) array pointed to by P. */
3459void *
3460xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3461{
3462 gcc_assert (new_nmemb >= old_nmemb);
3463 p = XRESIZEVAR (void, p, new_nmemb * size);
3464 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3465 return p;
3466}
3467
3468/* Return the probability of speculation success for the speculation
3469 status DS. */
3470static dw_t
3471dep_weak (ds_t ds)
3472{
3473 ds_t res = 1, dt;
3474 int n = 0;
3475
3476 dt = FIRST_SPEC_TYPE;
3477 do
3478 {
3479 if (ds & dt)
3480 {
3481 res *= (ds_t) get_dep_weak (ds, dt);
3482 n++;
3483 }
3484
3485 if (dt == LAST_SPEC_TYPE)
3486 break;
3487 dt <<= SPEC_TYPE_SHIFT;
3488 }
3489 while (1);
3490
3491 gcc_assert (n);
3492 while (--n)
3493 res /= MAX_DEP_WEAK;
3494
3495 if (res < MIN_DEP_WEAK)
3496 res = MIN_DEP_WEAK;
3497
3498 gcc_assert (res <= MAX_DEP_WEAK);
3499
3500 return (dw_t) res;
3501}
3502
3503/* Helper function.
3504 Find fallthru edge from PRED. */
3505static edge
3506find_fallthru_edge (basic_block pred)
3507{
3508 edge e;
3509 edge_iterator ei;
3510 basic_block succ;
3511
3512 succ = pred->next_bb;
3513 gcc_assert (succ->prev_bb == pred);
3514
3515 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3516 {
3517 FOR_EACH_EDGE (e, ei, pred->succs)
3518 if (e->flags & EDGE_FALLTHRU)
3519 {
3520 gcc_assert (e->dest == succ);
3521 return e;
3522 }
3523 }
3524 else
3525 {
3526 FOR_EACH_EDGE (e, ei, succ->preds)
3527 if (e->flags & EDGE_FALLTHRU)
3528 {
3529 gcc_assert (e->src == pred);
3530 return e;
3531 }
3532 }
3533
3534 return NULL;
3535}
3536
3537/* Initialize BEFORE_RECOVERY variable. */
3538static void
3539init_before_recovery (void)
3540{
3541 basic_block last;
3542 edge e;
3543
3544 last = EXIT_BLOCK_PTR->prev_bb;
3545 e = find_fallthru_edge (last);
3546
3547 if (e)
3548 {
3549 /* We create two basic blocks:
3550 1. Single instruction block is inserted right after E->SRC
3551 and has jump to
3552 2. Empty block right before EXIT_BLOCK.
3553 Between these two blocks recovery blocks will be emitted. */
3554
3555 basic_block single, empty;
3556 rtx x, label;
3557
3558 single = create_empty_bb (last);
3559 empty = create_empty_bb (single);
3560
3561 single->count = last->count;
3562 empty->count = last->count;
3563 single->frequency = last->frequency;
3564 empty->frequency = last->frequency;
3565 BB_COPY_PARTITION (single, last);
3566 BB_COPY_PARTITION (empty, last);
3567
3568 redirect_edge_succ (e, single);
3569 make_single_succ_edge (single, empty, 0);
3570 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3571 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3572
3573 label = block_label (empty);
3574 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3575 JUMP_LABEL (x) = label;
3576 LABEL_NUSES (label)++;
3577 extend_global (x);
3578
3579 emit_barrier_after (x);
3580
3581 add_block (empty, 0);
3582 add_block (single, 0);
3583
3584 before_recovery = single;
3585
3586 if (sched_verbose >= 2 && spec_info->dump)
3587 fprintf (spec_info->dump,
3588 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3589 last->index, single->index, empty->index);
3590 }
3591 else
3592 before_recovery = last;
3593}
3594
3595/* Returns new recovery block. */
3596static basic_block
3597create_recovery_block (void)
3598{
3599 rtx label;
3600 basic_block rec;
3601
3602 added_recovery_block_p = true;
3603
3604 if (!before_recovery)
3605 init_before_recovery ();
3606
3607 label = gen_label_rtx ();
3608 gcc_assert (BARRIER_P (NEXT_INSN (BB_END (before_recovery))));
3609 label = emit_label_after (label, NEXT_INSN (BB_END (before_recovery)));
3610
3611 rec = create_basic_block (label, label, before_recovery);
3612 emit_barrier_after (BB_END (rec));
3613
3614 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3615 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3616
3617 if (sched_verbose && spec_info->dump)
3618 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3619 rec->index);
3620
3621 before_recovery = rec;
3622
3623 return rec;
3624}
3625
3626/* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3627 INSN is a simple check, that should be converted to branchy one. */
3628static void
3629create_check_block_twin (rtx insn, bool mutate_p)
3630{
3631 basic_block rec;
3632 rtx label, check, twin, link;
3633 ds_t fs;
3634
3635 gcc_assert (ORIG_PAT (insn)
3636 && (!mutate_p
3637 || (RECOVERY_BLOCK (insn) == EXIT_BLOCK_PTR
3638 && !(TODO_SPEC (insn) & SPECULATIVE))));
3639
3640 /* Create recovery block. */
3641 if (mutate_p || targetm.sched.needs_block_p (insn))
3642 {
3643 rec = create_recovery_block ();
3644 label = BB_HEAD (rec);
3645 }
3646 else
3647 {
3648 rec = EXIT_BLOCK_PTR;
3649 label = 0;
3650 }
3651
3652 /* Emit CHECK. */
3653 check = targetm.sched.gen_check (insn, label, mutate_p);
3654
3655 if (rec != EXIT_BLOCK_PTR)
3656 {
3657 /* To have mem_reg alive at the beginning of second_bb,
3658 we emit check BEFORE insn, so insn after splitting
3659 insn will be at the beginning of second_bb, which will
3660 provide us with the correct life information. */
3661 check = emit_jump_insn_before (check, insn);
3662 JUMP_LABEL (check) = label;
3663 LABEL_NUSES (label)++;
3664 }
3665 else
3666 check = emit_insn_before (check, insn);
3667
3668 /* Extend data structures. */
3669 extend_all (check);
3670 RECOVERY_BLOCK (check) = rec;
3671
3672 if (sched_verbose && spec_info->dump)
3673 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3674 (*current_sched_info->print_insn) (check, 0));
3675
3676 gcc_assert (ORIG_PAT (insn));
3677
3678 /* Initialize TWIN (twin is a dublicate of original instruction
3679 in the recovery block). */
3680 if (rec != EXIT_BLOCK_PTR)
3681 {
3682 rtx link;
3683
3684 for (link = RESOLVED_DEPS (insn); link; link = XEXP (link, 1))
3685 if (DEP_STATUS (link) & DEP_OUTPUT)
3686 {
3687 RESOLVED_DEPS (check) =
3688 alloc_DEPS_LIST (XEXP (link, 0), RESOLVED_DEPS (check), DEP_TRUE);
3689 PUT_REG_NOTE_KIND (RESOLVED_DEPS (check), REG_DEP_TRUE);
3690 }
3691
3692 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3693 extend_global (twin);
3694
3695 if (sched_verbose && spec_info->dump)
3696 /* INSN_BB (insn) isn't determined for twin insns yet.
3697 So we can't use current_sched_info->print_insn. */
3698 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3699 INSN_UID (twin), rec->index);
3700 }
3701 else
3702 {
3703 ORIG_PAT (check) = ORIG_PAT (insn);
3704 HAS_INTERNAL_DEP (check) = 1;
3705 twin = check;
3706 /* ??? We probably should change all OUTPUT dependencies to
3707 (TRUE | OUTPUT). */
3708 }
3709
3710 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3711
3712 if (rec != EXIT_BLOCK_PTR)
3713 /* In case of branchy check, fix CFG. */
3714 {
3715 basic_block first_bb, second_bb;
3716 rtx jump;
3717 edge e;
3718 int edge_flags;
3719
3720 first_bb = BLOCK_FOR_INSN (check);
3721 e = split_block (first_bb, check);
3722 /* split_block emits note if *check == BB_END. Probably it
3723 is better to rip that note off. */
3724 gcc_assert (e->src == first_bb);
3725 second_bb = e->dest;
3726
3727 /* This is fixing of incoming edge. */
3728 /* ??? Which other flags should be specified? */
3729 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3730 /* Partition type is the same, if it is "unpartitioned". */
3731 edge_flags = EDGE_CROSSING;
3732 else
3733 edge_flags = 0;
3734
3735 e = make_edge (first_bb, rec, edge_flags);
3736
3737 add_block (second_bb, first_bb);
3738
3739 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
3740 label = block_label (second_bb);
3741 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3742 JUMP_LABEL (jump) = label;
3743 LABEL_NUSES (label)++;
3744 extend_global (jump);
3745
3746 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3747 /* Partition type is the same, if it is "unpartitioned". */
3748 {
3749 /* Rewritten from cfgrtl.c. */
3750 if (flag_reorder_blocks_and_partition
3751 && targetm.have_named_sections
3752 /*&& !any_condjump_p (jump)*/)
3753 /* any_condjump_p (jump) == false.
3754 We don't need the same note for the check because
3755 any_condjump_p (check) == true. */
3756 {
3757 REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3758 NULL_RTX,
3759 REG_NOTES (jump));
3760 }
3761 edge_flags = EDGE_CROSSING;
3762 }
3763 else
3764 edge_flags = 0;
3765
3766 make_single_succ_edge (rec, second_bb, edge_flags);
3767
3768 add_block (rec, EXIT_BLOCK_PTR);
3769 }
3770
3771 /* Move backward dependences from INSN to CHECK and
3772 move forward dependences from INSN to TWIN. */
3773 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
3774 {
3775 ds_t ds;
3776
3777 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3778 check --TRUE--> producer ??? or ANTI ???
3779 twin --TRUE--> producer
3780 twin --ANTI--> check
3781
3782 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3783 check --ANTI--> producer
3784 twin --ANTI--> producer
3785 twin --ANTI--> check
3786
3787 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3788 check ~~TRUE~~> producer
3789 twin ~~TRUE~~> producer
3790 twin --ANTI--> check */
3791
3792 ds = DEP_STATUS (link);
3793
3794 if (ds & BEGIN_SPEC)
3795 {
3796 gcc_assert (!mutate_p);
3797 ds &= ~BEGIN_SPEC;
3798 }
3799
3800 if (rec != EXIT_BLOCK_PTR)
3801 {
3802 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3803 add_back_forw_dep (twin, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3804 }
3805 else
3806 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3807 }
3808
3809 for (link = LOG_LINKS (insn); link;)
3810 if ((DEP_STATUS (link) & BEGIN_SPEC)
3811 || mutate_p)
3812 /* We can delete this dep only if we totally overcome it with
3813 BEGIN_SPECULATION. */
3814 {
3815 delete_back_forw_dep (insn, XEXP (link, 0));
3816 link = LOG_LINKS (insn);
3817 }
3818 else
3819 link = XEXP (link, 1);
3820
3821 fs = 0;
3822
3823 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3824 here. */
3825
3826 gcc_assert (!DONE_SPEC (insn));
3827
3828 if (!mutate_p)
3829 {
3830 ds_t ts = TODO_SPEC (insn);
3831
3832 DONE_SPEC (insn) = ts & BEGIN_SPEC;
3833 CHECK_SPEC (check) = ts & BEGIN_SPEC;
3834
3835 if (ts & BEGIN_DATA)
3836 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
3837 if (ts & BEGIN_CONTROL)
3838 fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
3839 }
3840 else
3841 CHECK_SPEC (check) = CHECK_SPEC (insn);
3842
3843 /* Future speculations: call the helper. */
3844 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, fs);
3845
3846 if (rec != EXIT_BLOCK_PTR)
3847 {
3848 /* Which types of dependencies should we use here is,
3849 generally, machine-dependent question... But, for now,
3850 it is not. */
3851
3852 if (!mutate_p)
3853 {
3854 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
3855 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3856 }
3857 else
3858 {
3859 if (spec_info->dump)
3860 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
3861 (*current_sched_info->print_insn) (insn, 0));
3862
3863 for (link = INSN_DEPEND (insn); link; link = INSN_DEPEND (insn))
3864 delete_back_forw_dep (XEXP (link, 0), insn);
3865
3866 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
3867 try_ready (check);
3868
3869 sched_remove_insn (insn);
3870 }
3871
3872 add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
3873 }
3874 else
3875 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
3876
3877 if (!mutate_p)
3878 /* Fix priorities. If MUTATE_P is nonzero, this is not neccessary,
3879 because it'll be done later in add_to_speculative_block. */
3880 {
3881 clear_priorities (twin);
3882 calc_priorities (twin);
3883 }
3884}
3885
3886/* Removes dependency between instructions in the recovery block REC
3887 and usual region instructions. It keeps inner dependences so it
3888 won't be neccessary to recompute them. */
3889static void
3890fix_recovery_deps (basic_block rec)
3891{
3892 rtx note, insn, link, jump, ready_list = 0;
3893 bitmap_head in_ready;
3894
3895 bitmap_initialize (&in_ready, 0);
3896
3897 /* NOTE - a basic block note. */
3898 note = NEXT_INSN (BB_HEAD (rec));
3899 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3900 insn = BB_END (rec);
3901 gcc_assert (JUMP_P (insn));
3902 insn = PREV_INSN (insn);
3903
3904 do
3905 {
3906 for (link = INSN_DEPEND (insn); link;)
3907 {
3908 rtx consumer;
3909
3910 consumer = XEXP (link, 0);
3911
3912 if (BLOCK_FOR_INSN (consumer) != rec)
3913 {
3914 delete_back_forw_dep (consumer, insn);
3915
3916 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
3917 {
3918 ready_list = alloc_INSN_LIST (consumer, ready_list);
3919 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
3920 }
3921
3922 link = INSN_DEPEND (insn);
3923 }
3924 else
3925 {
3926 gcc_assert ((DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3927
3928 link = XEXP (link, 1);
3929 }
3930 }
3931
3932 insn = PREV_INSN (insn);
3933 }
3934 while (insn != note);
3935
3936 bitmap_clear (&in_ready);
3937
3938 /* Try to add instructions to the ready or queue list. */
3939 for (link = ready_list; link; link = XEXP (link, 1))
3940 try_ready (XEXP (link, 0));
3941 free_INSN_LIST_list (&ready_list);
3942
3943 /* Fixing jump's dependences. */
3944 insn = BB_HEAD (rec);
3945 jump = BB_END (rec);
3946
3947 gcc_assert (LABEL_P (insn));
3948 insn = NEXT_INSN (insn);
3949
3950 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
3951 add_jump_dependencies (insn, jump);
3952}
3953
3954/* The function saves line notes at the beginning of block B. */
3955static void
3956associate_line_notes_with_blocks (basic_block b)
3957{
3958 rtx line;
3959
3960 for (line = BB_HEAD (b); line; line = PREV_INSN (line))
3961 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
3962 {
3963 line_note_head[b->index] = line;
3964 break;
3965 }
3966 /* Do a forward search as well, since we won't get to see the first
3967 notes in a basic block. */
3968 for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
3969 {
3970 if (INSN_P (line))
3971 break;
3972 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
3973 line_note_head[b->index] = line;
3974 }
3975}
3976
3977/* Changes pattern of the INSN to NEW_PAT. */
3978static void
3979change_pattern (rtx insn, rtx new_pat)
3980{
3981 int t;
3982
3983 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
3984 gcc_assert (t);
3985 /* Invalidate INSN_COST, so it'll be recalculated. */
3986 INSN_COST (insn) = -1;
3987 /* Invalidate INSN_TICK, so it'll be recalculated. */
3988 INSN_TICK (insn) = INVALID_TICK;
3989 dfa_clear_single_insn_cache (insn);
3990}
3991
3992
3993/* -1 - can't speculate,
3994 0 - for speculation with REQUEST mode it is OK to use
3995 current instruction pattern,
3996 1 - need to change pattern for *NEW_PAT to be speculative. */
3997static int
3998speculate_insn (rtx insn, ds_t request, rtx *new_pat)
3999{
4000 gcc_assert (current_sched_info->flags & DO_SPECULATION
4001 && (request & SPECULATIVE));
4002
4003 if (!NONJUMP_INSN_P (insn)
4004 || HAS_INTERNAL_DEP (insn)
4005 || SCHED_GROUP_P (insn)
4006 || side_effects_p (PATTERN (insn))
4007 || (request & spec_info->mask) != request)
4008 return -1;
4009
4010 gcc_assert (!RECOVERY_BLOCK (insn));
4011
4012 if (request & BE_IN_SPEC)
4013 {
4014 if (may_trap_p (PATTERN (insn)))
4015 return -1;
4016
4017 if (!(request & BEGIN_SPEC))
4018 return 0;
4019 }
4020
4021 return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
4022}
4023
4024/* Print some information about block BB, which starts with HEAD and
4025 ends with TAIL, before scheduling it.
4026 I is zero, if scheduler is about to start with the fresh ebb. */
4027static void
4028dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4029{
4030 if (!i)
4031 fprintf (sched_dump,
4032 ";; ======================================================\n");
4033 else
4034 fprintf (sched_dump,
4035 ";; =====================ADVANCING TO=====================\n");
4036 fprintf (sched_dump,
4037 ";; -- basic block %d from %d to %d -- %s reload\n",
4038 bb->index, INSN_UID (head), INSN_UID (tail),
4039 (reload_completed ? "after" : "before"));
4040 fprintf (sched_dump,
4041 ";; ======================================================\n");
4042 fprintf (sched_dump, "\n");
4043}
4044
4045/* Unlink basic block notes and labels and saves them, so they
4046 can be easily restored. We unlink basic block notes in EBB to
4047 provide back-compatability with the previous code, as target backends
4048 assume, that there'll be only instructions between
4049 current_sched_info->{head and tail}. We restore these notes as soon
4050 as we can.
4051 FIRST (LAST) is the first (last) basic block in the ebb.
4052 NB: In usual case (FIRST == LAST) nothing is really done. */
4053void
4054unlink_bb_notes (basic_block first, basic_block last)
4055{
4056 /* We DON'T unlink basic block notes of the first block in the ebb. */
4057 if (first == last)
4058 return;
4059
4060 bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
4061
4062 /* Make a sentinel. */
4063 if (last->next_bb != EXIT_BLOCK_PTR)
4064 bb_header[last->next_bb->index] = 0;
4065
4066 first = first->next_bb;
4067 do
4068 {
4069 rtx prev, label, note, next;
4070
4071 label = BB_HEAD (last);
4072 if (LABEL_P (label))
4073 note = NEXT_INSN (label);
4074 else
4075 note = label;
4076 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4077
4078 prev = PREV_INSN (label);
4079 next = NEXT_INSN (note);
4080 gcc_assert (prev && next);
4081
4082 NEXT_INSN (prev) = next;
4083 PREV_INSN (next) = prev;
4084
4085 bb_header[last->index] = label;
4086
4087 if (last == first)
4088 break;
4089
4090 last = last->prev_bb;
4091 }
4092 while (1);
4093}
4094
4095/* Restore basic block notes.
4096 FIRST is the first basic block in the ebb. */
4097static void
4098restore_bb_notes (basic_block first)
4099{
4100 if (!bb_header)
4101 return;
4102
4103 /* We DON'T unlink basic block notes of the first block in the ebb. */
4104 first = first->next_bb;
4105 /* Remember: FIRST is actually a second basic block in the ebb. */
4106
4107 while (first != EXIT_BLOCK_PTR
4108 && bb_header[first->index])
4109 {
4110 rtx prev, label, note, next;
4111
4112 label = bb_header[first->index];
4113 prev = PREV_INSN (label);
4114 next = NEXT_INSN (prev);
4115
4116 if (LABEL_P (label))
4117 note = NEXT_INSN (label);
4118 else
4119 note = label;
4120 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4121
4122 bb_header[first->index] = 0;
4123
4124 NEXT_INSN (prev) = label;
4125 NEXT_INSN (note) = next;
4126 PREV_INSN (next) = note;
4127
4128 first = first->next_bb;
4129 }
4130
4131 free (bb_header);
4132 bb_header = 0;
4133}
4134
4135/* Extend per basic block data structures of the scheduler.
4136 If BB is NULL, initialize structures for the whole CFG.
4137 Otherwise, initialize them for the just created BB. */
4138static void
4139extend_bb (basic_block bb)
4140{
4141 rtx insn;
4142
4143 if (write_symbols != NO_DEBUG)
4144 {
4145 /* Save-line-note-head:
4146 Determine the line-number at the start of each basic block.
4147 This must be computed and saved now, because after a basic block's
4148 predecessor has been scheduled, it is impossible to accurately
4149 determine the correct line number for the first insn of the block. */
4150 line_note_head = xrecalloc (line_note_head, last_basic_block,
4151 old_last_basic_block,
4152 sizeof (*line_note_head));
4153
4154 if (bb)
4155 associate_line_notes_with_blocks (bb);
4156 else
4157 FOR_EACH_BB (bb)
4158 associate_line_notes_with_blocks (bb);
4159 }
4160
4161 old_last_basic_block = last_basic_block;
4162
4163 if (current_sched_info->flags & USE_GLAT)
4164 {
4165 glat_start = xrealloc (glat_start,
4166 last_basic_block * sizeof (*glat_start));
4167 glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
4168 }
4169
4170 /* The following is done to keep current_sched_info->next_tail non null. */
4171
4172 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4173 if (NEXT_INSN (insn) == 0
4174 || (!NOTE_P (insn)
4175 && !LABEL_P (insn)
4176 /* Don't emit a NOTE if it would end up before a BARRIER. */
4177 && !BARRIER_P (NEXT_INSN (insn))))
4178 {
4179 emit_note_after (NOTE_INSN_DELETED, insn);
4180 /* Make insn to appear outside BB. */
4181 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4182 }
4183}
4184
4185/* Add a basic block BB to extended basic block EBB.
4186 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4187 If EBB is NULL, then BB should be a new region. */
4188void
4189add_block (basic_block bb, basic_block ebb)
4190{
4191 gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
4192 && bb->il.rtl->global_live_at_start == 0
4193 && bb->il.rtl->global_live_at_end == 0);
4194
4195 extend_bb (bb);
4196
4197 glat_start[bb->index] = 0;
4198 glat_end[bb->index] = 0;
4199
4200 if (current_sched_info->add_block)
4201 /* This changes only data structures of the front-end. */
4202 current_sched_info->add_block (bb, ebb);
4203}
4204
4205/* Helper function.
4206 Fix CFG after both in- and inter-block movement of
4207 control_flow_insn_p JUMP. */
4208static void
4209fix_jump_move (rtx jump)
4210{
4211 basic_block bb, jump_bb, jump_bb_next;
4212
4213 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4214 jump_bb = BLOCK_FOR_INSN (jump);
4215 jump_bb_next = jump_bb->next_bb;
4216
4217 gcc_assert (current_sched_info->flags & SCHED_EBB
4218 || (RECOVERY_BLOCK (jump)
4219 && RECOVERY_BLOCK (jump) != EXIT_BLOCK_PTR));
4220
4221 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4222 /* if jump_bb_next is not empty. */
4223 BB_END (jump_bb) = BB_END (jump_bb_next);
4224
4225 if (BB_END (bb) != PREV_INSN (jump))
4226 /* Then there are instruction after jump that should be placed
4227 to jump_bb_next. */
4228 BB_END (jump_bb_next) = BB_END (bb);
4229 else
4230 /* Otherwise jump_bb_next is empty. */
4231 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4232
4233 /* To make assertion in move_insn happy. */
4234 BB_END (bb) = PREV_INSN (jump);
4235
4236 update_bb_for_insn (jump_bb_next);
4237}
4238
4239/* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4240static void
4241move_block_after_check (rtx jump)
4242{
4243 basic_block bb, jump_bb, jump_bb_next;
4244 VEC(edge,gc) *t;
4245
4246 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4247 jump_bb = BLOCK_FOR_INSN (jump);
4248 jump_bb_next = jump_bb->next_bb;
4249
4250 update_bb_for_insn (jump_bb);
4251
4252 gcc_assert (RECOVERY_BLOCK (jump)
4253 || RECOVERY_BLOCK (BB_END (jump_bb_next)));
4254
4255 unlink_block (jump_bb_next);
4256 link_block (jump_bb_next, bb);
4257
4258 t = bb->succs;
4259 bb->succs = 0;
4260 move_succs (&(jump_bb->succs), bb);
4261 move_succs (&(jump_bb_next->succs), jump_bb);
4262 move_succs (&t, jump_bb_next);
4263
4264 if (current_sched_info->fix_recovery_cfg)
4265 current_sched_info->fix_recovery_cfg
4266 (bb->index, jump_bb->index, jump_bb_next->index);
4267}
4268
4269/* Helper function for move_block_after_check.
4270 This functions attaches edge vector pointed to by SUCCSP to
4271 block TO. */
4272static void
4273move_succs (VEC(edge,gc) **succsp, basic_block to)
4274{
4275 edge e;
4276 edge_iterator ei;
4277
4278 gcc_assert (to->succs == 0);
4279
4280 to->succs = *succsp;
4281
4282 FOR_EACH_EDGE (e, ei, to->succs)
4283 e->src = to;
4284
4285 *succsp = 0;
4286}
4287
4288/* Initialize GLAT (global_live_at_{start, end}) structures.
4289 GLAT structures are used to substitute global_live_{start, end}
4290 regsets during scheduling. This is neccessary to use such functions as
4291 split_block (), as they assume consistancy of register live information. */
4292static void
4293init_glat (void)
4294{
4295 basic_block bb;
4296
4297 FOR_ALL_BB (bb)
4298 init_glat1 (bb);
4299}
4300
4301/* Helper function for init_glat. */
4302static void
4303init_glat1 (basic_block bb)
4304{
4305 gcc_assert (bb->il.rtl->global_live_at_start != 0
4306 && bb->il.rtl->global_live_at_end != 0);
4307
4308 glat_start[bb->index] = bb->il.rtl->global_live_at_start;
4309 glat_end[bb->index] = bb->il.rtl->global_live_at_end;
4310
4311 if (current_sched_info->flags & DETACH_LIFE_INFO)
4312 {
4313 bb->il.rtl->global_live_at_start = 0;
4314 bb->il.rtl->global_live_at_end = 0;
4315 }
4316}
4317
4318/* Attach reg_live_info back to basic blocks.
4319 Also save regsets, that should not have been changed during scheduling,
4320 for checking purposes (see check_reg_live). */
4321void
4322attach_life_info (void)
4323{
4324 basic_block bb;
4325
4326 FOR_ALL_BB (bb)
4327 attach_life_info1 (bb);
4328}
4329
4330/* Helper function for attach_life_info. */
4331static void
4332attach_life_info1 (basic_block bb)
4333{
4334 gcc_assert (bb->il.rtl->global_live_at_start == 0
4335 && bb->il.rtl->global_live_at_end == 0);
4336
4337 if (glat_start[bb->index])
4338 {
4339 gcc_assert (glat_end[bb->index]);
4340
4341 bb->il.rtl->global_live_at_start = glat_start[bb->index];
4342 bb->il.rtl->global_live_at_end = glat_end[bb->index];
4343
4344 /* Make them NULL, so they won't be freed in free_glat. */
4345 glat_start[bb->index] = 0;
4346 glat_end[bb->index] = 0;
4347
4348#ifdef ENABLE_CHECKING
4349 if (bb->index < NUM_FIXED_BLOCKS
4350 || current_sched_info->region_head_or_leaf_p (bb, 0))
4351 {
4352 glat_start[bb->index] = ALLOC_REG_SET (&reg_obstack);
4353 COPY_REG_SET (glat_start[bb->index],
4354 bb->il.rtl->global_live_at_start);
4355 }
4356
4357 if (bb->index < NUM_FIXED_BLOCKS
4358 || current_sched_info->region_head_or_leaf_p (bb, 1))
4359 {
4360 glat_end[bb->index] = ALLOC_REG_SET (&reg_obstack);
4361 COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
4362 }
4363#endif
4364 }
4365 else
4366 {
4367 gcc_assert (!glat_end[bb->index]);
4368
4369 bb->il.rtl->global_live_at_start = ALLOC_REG_SET (&reg_obstack);
4370 bb->il.rtl->global_live_at_end = ALLOC_REG_SET (&reg_obstack);
4371 }
4372}
4373
4374/* Free GLAT information. */
4375static void
4376free_glat (void)
4377{
4378#ifdef ENABLE_CHECKING
4379 if (current_sched_info->flags & DETACH_LIFE_INFO)
4380 {
4381 basic_block bb;
4382
4383 FOR_ALL_BB (bb)
4384 {
4385 if (glat_start[bb->index])
4386 FREE_REG_SET (glat_start[bb->index]);
4387 if (glat_end[bb->index])
4388 FREE_REG_SET (glat_end[bb->index]);
4389 }
4390 }
4391#endif
4392
4393 free (glat_start);
4394 free (glat_end);
4395}
4396
4397/* Remove INSN from the instruction stream.
4398 INSN should have any dependencies. */
4399static void
4400sched_remove_insn (rtx insn)
4401{
4402 change_queue_index (insn, QUEUE_NOWHERE);
4403 current_sched_info->add_remove_insn (insn, 1);
4404 remove_insn (insn);
4405}
4406
4407/* Clear priorities of all instructions, that are
4408 forward dependent on INSN. */
4409static void
4410clear_priorities (rtx insn)
4411{
4412 rtx link;
4413
4414 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4415 {
4416 rtx pro;
4417
4418 pro = XEXP (link, 0);
4419 if (INSN_PRIORITY_KNOWN (pro))
4420 {
4421 INSN_PRIORITY_KNOWN (pro) = 0;
4422 clear_priorities (pro);
4423 }
4424 }
4425}
4426
4427/* Recompute priorities of instructions, whose priorities might have been
4428 changed due to changes in INSN. */
4429static void
4430calc_priorities (rtx insn)
4431{
4432 rtx link;
4433
4434 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4435 {
4436 rtx pro;
4437
4438 pro = XEXP (link, 0);
4439 if (!INSN_PRIORITY_KNOWN (pro))
4440 {
4441 priority (pro);
4442 calc_priorities (pro);
4443 }
4444 }
4445}
4446
4447
4448/* Add dependences between JUMP and other instructions in the recovery
4449 block. INSN is the first insn the recovery block. */
4450static void
4451add_jump_dependencies (rtx insn, rtx jump)
4452{
4453 do
4454 {
4455 insn = NEXT_INSN (insn);
4456 if (insn == jump)
4457 break;
4458
4459 if (!INSN_DEPEND (insn))
4460 add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
4461 }
4462 while (1);
4463 gcc_assert (LOG_LINKS (jump));
4464}
4465
4466/* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4467static rtx
4468bb_note (basic_block bb)
4469{
4470 rtx note;
4471
4472 note = BB_HEAD (bb);
4473 if (LABEL_P (note))
4474 note = NEXT_INSN (note);
4475
4476 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4477 return note;
4478}
4479
4480#ifdef ENABLE_CHECKING
4481extern void debug_spec_status (ds_t);
4482
4483/* Dump information about the dependence status S. */
4484void
4485debug_spec_status (ds_t s)
4486{
4487 FILE *f = stderr;
4488
4489 if (s & BEGIN_DATA)
4490 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
4491 if (s & BE_IN_DATA)
4492 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
4493 if (s & BEGIN_CONTROL)
4494 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
4495 if (s & BE_IN_CONTROL)
4496 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
4497
4498 if (s & HARD_DEP)
4499 fprintf (f, "HARD_DEP; ");
4500
4501 if (s & DEP_TRUE)
4502 fprintf (f, "DEP_TRUE; ");
4503 if (s & DEP_ANTI)
4504 fprintf (f, "DEP_ANTI; ");
4505 if (s & DEP_OUTPUT)
4506 fprintf (f, "DEP_OUTPUT; ");
4507
4508 fprintf (f, "\n");
4509}
4510
4511/* Helper function for check_cfg.
4512 Return non-zero, if edge vector pointed to by EL has edge with TYPE in
4513 its flags. */
4514static int
4515has_edge_p (VEC(edge,gc) *el, int type)
4516{
4517 edge e;
4518 edge_iterator ei;
4519
4520 FOR_EACH_EDGE (e, ei, el)
4521 if (e->flags & type)
4522 return 1;
4523 return 0;
4524}
4525
4526/* Check few properties of CFG between HEAD and TAIL.
4527 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4528 instruction stream. */
4529static void
4530check_cfg (rtx head, rtx tail)
4531{
4532 rtx next_tail;
4533 basic_block bb = 0;
4534 int not_first = 0, not_last;
4535
4536 if (head == NULL)
4537 head = get_insns ();
4538 if (tail == NULL)
4539 tail = get_last_insn ();
4540 next_tail = NEXT_INSN (tail);
4541
4542 do
4543 {
4544 not_last = head != tail;
4545
4546 if (not_first)
4547 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4548 if (not_last)
4549 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4550
4551 if (LABEL_P (head)
4552 || (NOTE_INSN_BASIC_BLOCK_P (head)
4553 && (!not_first
4554 || (not_first && !LABEL_P (PREV_INSN (head))))))
4555 {
4556 gcc_assert (bb == 0);
4557 bb = BLOCK_FOR_INSN (head);
4558 if (bb != 0)
4559 gcc_assert (BB_HEAD (bb) == head);
4560 else
4561 /* This is the case of jump table. See inside_basic_block_p (). */
4562 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4563 }
4564
4565 if (bb == 0)
4566 {
4567 gcc_assert (!inside_basic_block_p (head));
4568 head = NEXT_INSN (head);
4569 }
4570 else
4571 {
4572 gcc_assert (inside_basic_block_p (head)
4573 || NOTE_P (head));
4574 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4575
4576 if (LABEL_P (head))
4577 {
4578 head = NEXT_INSN (head);
4579 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4580 }
4581 else
4582 {
4583 if (control_flow_insn_p (head))
4584 {
4585 gcc_assert (BB_END (bb) == head);
4586
4587 if (any_uncondjump_p (head))
4588 gcc_assert (EDGE_COUNT (bb->succs) == 1
4589 && BARRIER_P (NEXT_INSN (head)));
4590 else if (any_condjump_p (head))
4591 gcc_assert (EDGE_COUNT (bb->succs) > 1
4592 && !BARRIER_P (NEXT_INSN (head)));
4593 }
4594 if (BB_END (bb) == head)
4595 {
4596 if (EDGE_COUNT (bb->succs) > 1)
4597 gcc_assert (control_flow_insn_p (head)
4598 || has_edge_p (bb->succs, EDGE_COMPLEX));
4599 bb = 0;
4600 }
4601
4602 head = NEXT_INSN (head);
4603 }
4604 }
4605
4606 not_first = 1;
4607 }
4608 while (head != next_tail);
4609
4610 gcc_assert (bb == 0);
4611}
4612
4613/* Perform few consistancy checks of flags in different data structures. */
4614static void
4615check_sched_flags (void)
4616{
4617 unsigned int f = current_sched_info->flags;
4618
4619 if (flag_sched_stalled_insns)
4620 gcc_assert (!(f & DO_SPECULATION));
4621 if (f & DO_SPECULATION)
4622 gcc_assert (!flag_sched_stalled_insns
4623 && (f & DETACH_LIFE_INFO)
4624 && spec_info
4625 && spec_info->mask);
4626 if (f & DETACH_LIFE_INFO)
4627 gcc_assert (f & USE_GLAT);
4628}
4629
a57aee2a
MK
4630/* Check global_live_at_{start, end} regsets.
4631 If FATAL_P is TRUE, then abort execution at the first failure.
4632 Overwise, print diagnostics to STDERR (this mode is for calling
4633 from debugger). */
496d7bb0 4634void
a57aee2a 4635check_reg_live (bool fatal_p)
496d7bb0
MK
4636{
4637 basic_block bb;
4638
4639 FOR_ALL_BB (bb)
4640 {
4641 int i;
4642
4643 i = bb->index;
4644
4645 if (glat_start[i])
a57aee2a
MK
4646 {
4647 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
4648 glat_start[i]);
4649
4650 if (!b)
4651 {
4652 gcc_assert (!fatal_p);
4653
4654 fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
4655 }
4656 }
4657
496d7bb0 4658 if (glat_end[i])
a57aee2a
MK
4659 {
4660 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
4661 glat_end[i]);
4662
4663 if (!b)
4664 {
4665 gcc_assert (!fatal_p);
4666
4667 fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
4668 }
4669 }
496d7bb0
MK
4670 }
4671}
4672#endif /* ENABLE_CHECKING */
4673
8c660648 4674#endif /* INSN_SCHEDULING */
This page took 3.058527 seconds and 5 git commands to generate.