1 /* Instruction scheduling pass.
2 Copyright (C) 1992 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
22 /* Instruction scheduling pass.
24 This pass implements list scheduling within basic blocks. It is
25 run after flow analysis, but before register allocation. The
26 scheduler works as follows:
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning
39 values to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we also add it to the ready list. When all insns down
50 to the lowest priority have been scheduled, the critical path of the
51 basic block has been made as short as possible. The remaining insns
52 are then scheduled in remaining slots.
54 The following list shows the order in which we want to break ties:
56 1. choose insn with lowest conflict cost, ties broken by
57 2. choose insn with the longest path to end of bb, ties broken by
58 3. choose insn that kills the most registers, ties broken by
59 4. choose insn that conflicts with the most ready insns, or finally
60 5. choose insn with lowest UID.
62 Memory references complicate matters. Only if we can be certain
63 that memory references are not part of the data dependency graph
64 (via true, anti, or output dependence), can we move operations past
65 memory references. To first approximation, reads can be done
66 independently, while writes introduce dependencies. Better
67 approximations will yield fewer dependencies.
69 Dependencies set up by memory references are treated in exactly the
70 same way as other dependencies, by using LOG_LINKS.
72 Having optimized the critical path, we may have also unduly
73 extended the lifetimes of some registers. If an operation requires
74 that constants be loaded into registers, it is certainly desirable
75 to load those constants as early as necessary, but no earlier.
76 I.e., it will not do to load up a bunch of registers at the
77 beginning of a basic block only to use them at the end, if they
78 could be loaded later, since this may result in excessive register
81 Note that since branches are never in basic blocks, but only end
82 basic blocks, this pass will not do any branch scheduling. But
83 that is ok, since we can use GNU's delayed branch scheduling
84 pass to take care of this case.
86 Also note that no further optimizations based on algebraic identities
87 are performed, so this pass would be a good one to perform instruction
88 splitting, such as breaking up a multiply instruction into shifts
89 and adds where that is profitable.
91 Given the memory aliasing analysis that this pass should perform,
92 it should be possible to remove redundant stores to memory, and to
93 load values from registers instead of hitting memory.
95 This pass must update information that subsequent passes expect to be
96 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
97 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
100 The information in the line number notes is carefully retained by this
101 pass. All other NOTE insns are grouped in their same relative order at
102 the beginning of basic blocks that have been scheduled. */
107 #include "basic-block.h"
109 #include "hard-reg-set.h"
111 #include "insn-config.h"
112 #include "insn-attr.h"
114 /* Arrays set up by scheduling for the same respective purposes as
115 similar-named arrays set up by flow analysis. We work with these
116 arrays during the scheduling pass so we can compare values against
119 Values of these arrays are copied at the end of this pass into the
120 arrays set up by flow analysis. */
121 static short *sched_reg_n_deaths
;
122 static int *sched_reg_n_calls_crossed
;
123 static int *sched_reg_live_length
;
125 /* Element N is the next insn that sets (hard or pseudo) register
126 N within the current basic block; or zero, if there is no
127 such insn. Needed for new registers which may be introduced
128 by splitting insns. */
129 static rtx
*reg_last_uses
;
130 static rtx
*reg_last_sets
;
132 /* Vector indexed by INSN_UID giving the original ordering of the insns. */
133 static int *insn_luid
;
134 #define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
136 /* Vector indexed by INSN_UID giving each instruction a priority. */
137 static int *insn_priority
;
138 #define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
140 #define DONE_PRIORITY -1
141 #define MAX_PRIORITY 0x7fffffff
142 #define TAIL_PRIORITY 0x7ffffffe
143 #define LAUNCH_PRIORITY 0x7f000001
144 #define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
145 #define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
147 /* Vector indexed by INSN_UID giving number of insns refering to this insn. */
148 static int *insn_ref_count
;
149 #define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
151 /* Vector indexed by INSN_UID giving line-number note in effect for each
152 insn. For line-number notes, this indicates whether the note may be
154 static rtx
*line_note
;
155 #define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
157 /* Vector indexed by basic block number giving the starting line-number
158 for each basic block. */
159 static rtx
*line_note_head
;
161 /* List of important notes we must keep around. This is a pointer to the
162 last element in the list. */
163 static rtx note_list
;
165 /* Regsets telling whether a given register is live or dead before the last
166 scheduled insn. Must scan the instructions once before scheduling to
167 determine what registers are live or dead at the end of the block. */
168 static regset bb_dead_regs
;
169 static regset bb_live_regs
;
171 /* Regset telling whether a given register is live after the insn currently
172 being scheduled. Before processing an insn, this is equal to bb_live_regs
173 above. This is used so that we can find regsiters that are newly born/dead
174 after processing an insn. */
175 static regset old_live_regs
;
177 /* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
178 during the initial scan and reused later. If there are not exactly as
179 many REG_DEAD notes in the post scheduled code as there were in the
180 prescheduled code then we trigger an abort because this indicates a bug. */
181 static rtx dead_notes
;
185 /* An instruction is ready to be scheduled when all insns following it
186 have already been scheduled. It is important to ensure that all
187 insns which use its result will not be executed until its result
188 has been computed. We maintain three lists (conceptually):
190 (1) a "Ready" list of unscheduled, uncommitted insns
191 (2) a "Scheduled" list of scheduled insns
192 (3) a "Pending" list of insns which can be scheduled, but
195 Insns move from the "Ready" list to the "Pending" list when
196 all insns following them have been scheduled.
198 Insns move from the "Pending" list to the "Scheduled" list
199 when there is sufficient space in the pipeline to prevent
200 stalls between the insn and scheduled insns which use it.
202 The "Pending" list acts as a buffer to prevent insns
205 The "Ready" list is implemented by the variable `ready'.
206 The "Pending" list are the insns in the LOG_LINKS of ready insns.
207 The "Scheduled" list is the new insn chain built by this pass. */
209 /* Implement a circular buffer from which instructions are issued. */
211 static rtx insn_queue
[Q_SIZE
];
212 static int q_ptr
= 0;
213 static int q_size
= 0;
214 #define NEXT_Q(X) (((X)+1) & (Q_SIZE-1))
215 #define NEXT_Q_AFTER(X,C) (((X)+C) & (Q_SIZE-1))
217 /* Forward declarations. */
218 static void sched_analyze_2 ();
219 static void schedule_block ();
221 /* Main entry point of this file. */
222 void schedule_insns ();
224 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
226 /* Vector indexed by N giving the initial (unchanging) value known
227 for pseudo-register N. */
228 static rtx
*reg_known_value
;
230 /* Indicates number of valid entries in reg_known_value. */
231 static int reg_known_value_size
;
237 if (GET_CODE (x
) == REG
&& REGNO (x
) >= FIRST_PSEUDO_REGISTER
238 && REGNO (x
) <= reg_known_value_size
)
239 return reg_known_value
[REGNO (x
)];
240 else if (GET_CODE (x
) == PLUS
)
242 rtx x0
= canon_rtx (XEXP (x
, 0));
243 rtx x1
= canon_rtx (XEXP (x
, 1));
245 if (x0
!= XEXP (x
, 0) || x1
!= XEXP (x
, 1))
247 /* We can tolerate LO_SUMs being offset here; these
248 rtl are used for nothing other than comparisons. */
249 if (GET_CODE (x0
) == CONST_INT
)
250 return plus_constant_for_output (x1
, INTVAL (x0
));
251 else if (GET_CODE (x1
) == CONST_INT
)
252 return plus_constant_for_output (x0
, INTVAL (x1
));
253 return gen_rtx (PLUS
, GET_MODE (x
), x0
, x1
);
259 /* Set up all info needed to perform alias analysis on memory references. */
262 init_alias_analysis ()
264 int maxreg
= max_reg_num ();
269 reg_known_value_size
= maxreg
;
272 = (rtx
*) oballoc ((maxreg
-FIRST_PSEUDO_REGISTER
) * sizeof (rtx
))
273 - FIRST_PSEUDO_REGISTER
;
274 bzero (reg_known_value
+FIRST_PSEUDO_REGISTER
,
275 (maxreg
-FIRST_PSEUDO_REGISTER
) * sizeof (rtx
));
277 /* Fill in the entries with known constant values. */
278 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
279 if ((set
= single_set (insn
)) != 0
280 && GET_CODE (SET_DEST (set
)) == REG
281 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
282 && (((note
= find_reg_note (insn
, REG_EQUAL
, 0)) != 0
283 && reg_n_sets
[REGNO (SET_DEST (set
))] == 1)
284 || (note
= find_reg_note (insn
, REG_EQUIV
, 0)) != 0)
285 && GET_CODE (XEXP (note
, 0)) != EXPR_LIST
)
286 reg_known_value
[REGNO (SET_DEST (set
))] = XEXP (note
, 0);
288 /* Fill in the remaining entries. */
289 while (--maxreg
>= FIRST_PSEUDO_REGISTER
)
290 if (reg_known_value
[maxreg
] == 0)
291 reg_known_value
[maxreg
] = regno_reg_rtx
[maxreg
];
294 /* Return 1 if X and Y are identical-looking rtx's.
296 We use the data in reg_known_value above to see if two registers with
297 different numbers are, in fact, equivalent. */
300 rtx_equal_for_memref_p (x
, y
)
305 register enum rtx_code code
;
308 if (x
== 0 && y
== 0)
310 if (x
== 0 || y
== 0)
319 /* Rtx's of different codes cannot be equal. */
320 if (code
!= GET_CODE (y
))
323 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
324 (REG:SI x) and (REG:HI x) are NOT equivalent. */
326 if (GET_MODE (x
) != GET_MODE (y
))
329 /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
332 return REGNO (x
) == REGNO (y
);
333 if (code
== LABEL_REF
)
334 return XEXP (x
, 0) == XEXP (y
, 0);
335 if (code
== SYMBOL_REF
)
336 return XSTR (x
, 0) == XSTR (y
, 0);
338 /* Compare the elements. If any pair of corresponding elements
339 fail to match, return 0 for the whole things. */
341 fmt
= GET_RTX_FORMAT (code
);
342 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
348 if (XINT (x
, i
) != XINT (y
, i
))
354 /* Two vectors must have the same length. */
355 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
358 /* And the corresponding elements must match. */
359 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
360 if (rtx_equal_for_memref_p (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
)) == 0)
365 if (rtx_equal_for_memref_p (XEXP (x
, i
), XEXP (y
, i
)) == 0)
371 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
376 /* These are just backpointers, so they don't matter. */
382 /* It is believed that rtx's at this level will never
383 contain anything but integers and other rtx's,
384 except for within LABEL_REFs and SYMBOL_REFs. */
392 /* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
393 X and return it, or return 0 if none found. */
396 find_symbolic_term (x
)
400 register enum rtx_code code
;
404 if (code
== SYMBOL_REF
|| code
== LABEL_REF
)
406 if (GET_RTX_CLASS (code
) == 'o')
409 fmt
= GET_RTX_FORMAT (code
);
410 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
416 t
= find_symbolic_term (XEXP (x
, i
));
420 else if (fmt
[i
] == 'E')
426 /* Return nonzero if X and Y (memory addresses) could reference the
427 same location in memory. C is an offset accumulator. When
428 C is nonzero, we are testing aliases between X and Y + C.
429 XSIZE is the size in bytes of the X reference,
430 similarly YSIZE is the size in bytes for Y.
432 If XSIZE or YSIZE is zero, we do not know the amount of memory being
433 referenced (the reference was BLKmode), so make the most pessimistic
436 We recognize the following cases of non-conflicting memory:
438 (1) addresses involving the frame pointer cannot conflict
439 with addresses involving static variables.
440 (2) static variables with different addresses cannot conflict.
442 Nice to notice that varying addresses cannot confict with fp if no
443 local variables had their addresses taken, but that's too hard now. */
446 memrefs_conflict_p (xsize
, x
, ysize
, y
, c
)
451 if (GET_CODE (x
) == HIGH
)
453 else if (GET_CODE (x
) == LO_SUM
)
457 if (GET_CODE (y
) == HIGH
)
459 else if (GET_CODE (y
) == LO_SUM
)
464 if (rtx_equal_for_memref_p (x
, y
))
465 return (xsize
== 0 || ysize
== 0 ||
466 (c
>= 0 && xsize
> c
) || (c
< 0 && ysize
+c
> 0));
468 if (y
== frame_pointer_rtx
|| y
== stack_pointer_rtx
)
472 y
= x
; ysize
= xsize
;
473 x
= t
; xsize
= tsize
;
476 if (x
== frame_pointer_rtx
|| x
== stack_pointer_rtx
)
483 if (GET_CODE (y
) == PLUS
484 && canon_rtx (XEXP (y
, 0)) == x
485 && (y1
= canon_rtx (XEXP (y
, 1)))
486 && GET_CODE (y1
) == CONST_INT
)
489 return (xsize
== 0 || ysize
== 0
490 || (c
>= 0 && xsize
> c
) || (c
< 0 && ysize
+c
> 0));
493 if (GET_CODE (y
) == PLUS
494 && (y1
= canon_rtx (XEXP (y
, 0)))
501 if (GET_CODE (x
) == PLUS
)
503 /* The fact that X is canonnicallized means that this
504 PLUS rtx is canonnicallized. */
505 rtx x0
= XEXP (x
, 0);
506 rtx x1
= XEXP (x
, 1);
508 if (GET_CODE (y
) == PLUS
)
510 /* The fact that Y is canonnicallized means that this
511 PLUS rtx is canonnicallized. */
512 rtx y0
= XEXP (y
, 0);
513 rtx y1
= XEXP (y
, 1);
515 if (rtx_equal_for_memref_p (x1
, y1
))
516 return memrefs_conflict_p (xsize
, x0
, ysize
, y0
, c
);
517 if (rtx_equal_for_memref_p (x0
, y0
))
518 return memrefs_conflict_p (xsize
, x1
, ysize
, y1
, c
);
519 if (GET_CODE (x1
) == CONST_INT
)
520 if (GET_CODE (y1
) == CONST_INT
)
521 return memrefs_conflict_p (xsize
, x0
, ysize
, y0
,
522 c
- INTVAL (x1
) + INTVAL (y1
));
524 return memrefs_conflict_p (xsize
, x0
, ysize
, y
, c
- INTVAL (x1
));
525 else if (GET_CODE (y1
) == CONST_INT
)
526 return memrefs_conflict_p (xsize
, x
, ysize
, y0
, c
+ INTVAL (y1
));
528 /* Handle case where we cannot understand iteration operators,
529 but we notice that the base addresses are distinct objects. */
530 x
= find_symbolic_term (x
);
533 y
= find_symbolic_term (y
);
536 return rtx_equal_for_memref_p (x
, y
);
538 else if (GET_CODE (x1
) == CONST_INT
)
539 return memrefs_conflict_p (xsize
, x0
, ysize
, y
, c
- INTVAL (x1
));
541 else if (GET_CODE (y
) == PLUS
)
543 /* The fact that Y is canonnicallized means that this
544 PLUS rtx is canonnicallized. */
545 rtx y0
= XEXP (y
, 0);
546 rtx y1
= XEXP (y
, 1);
548 if (GET_CODE (y1
) == CONST_INT
)
549 return memrefs_conflict_p (xsize
, x
, ysize
, y0
, c
+ INTVAL (y1
));
554 if (GET_CODE (x
) == GET_CODE (y
))
555 switch (GET_CODE (x
))
559 /* Handle cases where we expect the second operands to be the
560 same, and check only whether the first operand would conflict
563 rtx x1
= canon_rtx (XEXP (x
, 1));
564 rtx y1
= canon_rtx (XEXP (y
, 1));
565 if (! rtx_equal_for_memref_p (x1
, y1
))
567 x0
= canon_rtx (XEXP (x
, 0));
568 y0
= canon_rtx (XEXP (y
, 0));
569 if (rtx_equal_for_memref_p (x0
, y0
))
570 return (xsize
== 0 || ysize
== 0
571 || (c
>= 0 && xsize
> c
) || (c
< 0 && ysize
+c
> 0));
573 /* Can't properly adjust our sizes. */
574 if (GET_CODE (x1
) != CONST_INT
)
576 xsize
/= INTVAL (x1
);
577 ysize
/= INTVAL (x1
);
579 return memrefs_conflict_p (xsize
, x0
, ysize
, y0
, c
);
585 if (GET_CODE (x
) == CONST_INT
&& GET_CODE (y
) == CONST_INT
)
587 c
+= (INTVAL (y
) - INTVAL (x
));
588 return (xsize
== 0 || ysize
== 0
589 || (c
>= 0 && xsize
> c
) || (c
< 0 && ysize
+c
> 0));
592 if (GET_CODE (x
) == CONST
)
594 if (GET_CODE (y
) == CONST
)
595 return memrefs_conflict_p (xsize
, canon_rtx (XEXP (x
, 0)),
596 ysize
, canon_rtx (XEXP (y
, 0)), c
);
598 return memrefs_conflict_p (xsize
, canon_rtx (XEXP (x
, 0)),
601 if (GET_CODE (y
) == CONST
)
602 return memrefs_conflict_p (xsize
, x
, ysize
,
603 canon_rtx (XEXP (y
, 0)), c
);
606 return (rtx_equal_for_memref_p (x
, y
)
607 && (xsize
== 0 || ysize
== 0
608 || (c
>= 0 && xsize
> c
) || (c
< 0 && ysize
+c
> 0)));
615 /* Functions to compute memory dependencies.
617 Since we process the insns in execution order, we can build tables
618 to keep track of what registers are fixed (and not aliased), what registers
619 are varying in known ways, and what registers are varying in unknown
622 If both memory references are volatile, then there must always be a
623 dependence between the two references, since their order can not be
624 changed. A volatile and non-volatile reference can be interchanged
627 A MEM_IN_STRUCT reference at a varying address can never conflict with a
628 non-MEM_IN_STRUCT reference at a fixed address. */
630 /* Read dependence: X is read after read in MEM takes place. There can
631 only be a dependence here if both reads are volatile. */
634 read_dependence (mem
, x
)
638 return MEM_VOLATILE_P (x
) && MEM_VOLATILE_P (mem
);
641 /* True dependence: X is read after store in MEM takes place. */
644 true_dependence (mem
, x
)
648 if (RTX_UNCHANGING_P (x
))
651 return ((MEM_VOLATILE_P (x
) && MEM_VOLATILE_P (mem
))
652 || (memrefs_conflict_p (SIZE_FOR_MODE (mem
), XEXP (mem
, 0),
653 SIZE_FOR_MODE (x
), XEXP (x
, 0), 0)
654 && ! (MEM_IN_STRUCT_P (mem
) && rtx_addr_varies_p (mem
)
655 && ! MEM_IN_STRUCT_P (x
) && ! rtx_addr_varies_p (x
))
656 && ! (MEM_IN_STRUCT_P (x
) && rtx_addr_varies_p (x
)
657 && ! MEM_IN_STRUCT_P (mem
) && ! rtx_addr_varies_p (mem
))));
660 /* Anti dependence: X is written after read in MEM takes place. */
663 anti_dependence (mem
, x
)
667 if (RTX_UNCHANGING_P (mem
))
670 return ((MEM_VOLATILE_P (x
) && MEM_VOLATILE_P (mem
))
671 || (memrefs_conflict_p (SIZE_FOR_MODE (mem
), XEXP (mem
, 0),
672 SIZE_FOR_MODE (x
), XEXP (x
, 0), 0)
673 && ! (MEM_IN_STRUCT_P (mem
) && rtx_addr_varies_p (mem
)
674 && ! MEM_IN_STRUCT_P (x
) && ! rtx_addr_varies_p (x
))
675 && ! (MEM_IN_STRUCT_P (x
) && rtx_addr_varies_p (x
)
676 && ! MEM_IN_STRUCT_P (mem
) && ! rtx_addr_varies_p (mem
))));
679 /* Output dependence: X is written after store in MEM takes place. */
682 output_dependence (mem
, x
)
686 return ((MEM_VOLATILE_P (x
) && MEM_VOLATILE_P (mem
))
687 || (memrefs_conflict_p (SIZE_FOR_MODE (mem
), XEXP (mem
, 0),
688 SIZE_FOR_MODE (x
), XEXP (x
, 0), 0)
689 && ! (MEM_IN_STRUCT_P (mem
) && rtx_addr_varies_p (mem
)
690 && ! MEM_IN_STRUCT_P (x
) && ! rtx_addr_varies_p (x
))
691 && ! (MEM_IN_STRUCT_P (x
) && rtx_addr_varies_p (x
)
692 && ! MEM_IN_STRUCT_P (mem
) && ! rtx_addr_varies_p (mem
))));
695 #ifndef INSN_SCHEDULING
696 void schedule_insns () {}
702 /* Computation of memory dependencies. */
704 /* The *_insns and *_mems are paired lists. Each pending memory operation
705 will have a pointer to the MEM rtx on one list and a pointer to the
706 containing insn on the other list in the same place in the list. */
708 /* We can't use add_dependence like the old code did, because a single insn
709 may have multiple memory accesses, and hence needs to be on the list
710 once for each memory access. Add_dependence won't let you add an insn
711 to a list more than once. */
713 /* An INSN_LIST containing all insns with pending read operations. */
714 static rtx pending_read_insns
;
716 /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
717 static rtx pending_read_mems
;
719 /* An INSN_LIST containing all insns with pending write operations. */
720 static rtx pending_write_insns
;
722 /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
723 static rtx pending_write_mems
;
725 /* Indicates the combined length of the two pending lists. We must prevent
726 these lists from ever growing too large since the number of dependencies
727 produced is at least O(N*N), and execution time is at least O(4*N*N), as
728 a function of the length of these pending lists. */
730 static int pending_lists_length
;
732 /* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
734 static rtx unused_insn_list
;
736 /* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
738 static rtx unused_expr_list
;
740 /* The last insn upon which all memory references must depend.
741 This is an insn which flushed the pending lists, creating a dependency
742 between it and all previously pending memory references. This creates
743 a barrier (or a checkpoint) which no memory reference is allowed to cross.
745 This includes all non constant CALL_INSNs. When we do interprocedural
746 alias analysis, this restriction can be relaxed.
747 This may also be an INSN that writes memory if the pending lists grow
750 static rtx last_pending_memory_flush
;
752 /* The last function call we have seen. All hard regs, and, of course,
753 the last function call, must depend on this. */
755 static rtx last_function_call
;
757 /* The LOG_LINKS field of this is a list of insns which use a pseudo register
758 that does not already cross a call. We create dependencies between each
759 of those insn and the next call insn, to ensure that they won't cross a call
760 after scheduling is done. */
762 static rtx sched_before_next_call
;
764 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
765 so that insns independent of the last scheduled insn will be preferred
766 over dependent instructions. */
768 static rtx last_scheduled_insn
;
770 /* Process an insn's memory dependencies. There are four kinds of
773 (0) read dependence: read follows read
774 (1) true dependence: read follows write
775 (2) anti dependence: write follows read
776 (3) output dependence: write follows write
778 We are careful to build only dependencies which actually exist, and
779 use transitivity to avoid building too many links. */
781 /* Return the INSN_LIST containing INSN in LIST, or NULL
782 if LIST does not contain INSN. */
785 find_insn_list (insn
, list
)
791 if (XEXP (list
, 0) == insn
)
793 list
= XEXP (list
, 1);
798 /* Compute cost of executing INSN. This is the number of virtual
799 cycles taken between instruction issue and instruction results. */
807 recog_memoized (insn
);
809 /* A USE insn, or something else we don't need to understand.
810 We can't pass these directly to result_ready_cost because it will trigger
811 a fatal error for unrecognizable insns. */
812 if (INSN_CODE (insn
) < 0)
816 cost
= result_ready_cost (insn
);
825 /* Compute the priority number for INSN. */
831 if (insn
&& GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
835 int this_priority
= INSN_PRIORITY (insn
);
838 if (this_priority
> 0)
839 return this_priority
;
843 /* Nonzero if these insns must be scheduled together. */
844 if (SCHED_GROUP_P (insn
))
847 while (SCHED_GROUP_P (prev
))
849 prev
= PREV_INSN (prev
);
850 INSN_REF_COUNT (prev
) += 1;
854 for (prev
= LOG_LINKS (insn
); prev
; prev
= XEXP (prev
, 1))
856 rtx x
= XEXP (prev
, 0);
858 /* A dependence pointing to a note is always obsolete, because
859 sched_analyze_insn will have created any necessary new dependences
860 which replace it. Notes can be created when instructions are
861 deleted by insn splitting, or by register allocation. */
862 if (GET_CODE (x
) == NOTE
)
864 remove_dependence (insn
, x
);
868 /* This priority calculation was chosen because it results in the
869 least instruction movement, and does not hurt the performance
870 of the resulting code compared to the old algorithm.
871 This makes the sched algorithm more stable, which results
872 in better code, because there is less register pressure,
873 cross jumping is more likely to work, and debugging is easier.
875 When all instructions have a latency of 1, there is no need to
876 move any instructions. Subtracting one here ensures that in such
877 cases all instructions will end up with a priority of one, and
878 hence no scheduling will be done.
880 The original code did not subtract the one, and added the
881 insn_cost of the current instruction to its priority (e.g.
882 move the insn_cost call down to the end). */
884 if (REG_NOTE_KIND (prev
) == 0)
885 /* Data dependence. */
886 prev_priority
= priority (x
) + insn_cost (x
) - 1;
888 /* Anti or output dependence. Don't add the latency of this
889 insn's result, because it isn't being used. */
890 prev_priority
= priority (x
);
892 if (prev_priority
> max_priority
)
893 max_priority
= prev_priority
;
894 INSN_REF_COUNT (x
) += 1;
897 INSN_PRIORITY (insn
) = max_priority
;
898 return INSN_PRIORITY (insn
);
903 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
904 them to the unused_*_list variables, so that they can be reused. */
907 free_pending_lists ()
909 register rtx link
, prev_link
;
911 if (pending_read_insns
)
913 prev_link
= pending_read_insns
;
914 link
= XEXP (prev_link
, 1);
919 link
= XEXP (link
, 1);
922 XEXP (prev_link
, 1) = unused_insn_list
;
923 unused_insn_list
= pending_read_insns
;
924 pending_read_insns
= 0;
927 if (pending_write_insns
)
929 prev_link
= pending_write_insns
;
930 link
= XEXP (prev_link
, 1);
935 link
= XEXP (link
, 1);
938 XEXP (prev_link
, 1) = unused_insn_list
;
939 unused_insn_list
= pending_write_insns
;
940 pending_write_insns
= 0;
943 if (pending_read_mems
)
945 prev_link
= pending_read_mems
;
946 link
= XEXP (prev_link
, 1);
951 link
= XEXP (link
, 1);
954 XEXP (prev_link
, 1) = unused_expr_list
;
955 unused_expr_list
= pending_read_mems
;
956 pending_read_mems
= 0;
959 if (pending_write_mems
)
961 prev_link
= pending_write_mems
;
962 link
= XEXP (prev_link
, 1);
967 link
= XEXP (link
, 1);
970 XEXP (prev_link
, 1) = unused_expr_list
;
971 unused_expr_list
= pending_write_mems
;
972 pending_write_mems
= 0;
976 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
977 The MEM is a memory reference contained within INSN, which we are saving
978 so that we can do memory aliasing on it. */
981 add_insn_mem_dependence (insn_list
, mem_list
, insn
, mem
)
982 rtx
*insn_list
, *mem_list
, insn
, mem
;
986 if (unused_insn_list
)
988 link
= unused_insn_list
;
989 unused_insn_list
= XEXP (link
, 1);
992 link
= rtx_alloc (INSN_LIST
);
993 XEXP (link
, 0) = insn
;
994 XEXP (link
, 1) = *insn_list
;
997 if (unused_expr_list
)
999 link
= unused_expr_list
;
1000 unused_expr_list
= XEXP (link
, 1);
1003 link
= rtx_alloc (EXPR_LIST
);
1004 XEXP (link
, 0) = mem
;
1005 XEXP (link
, 1) = *mem_list
;
1008 pending_lists_length
++;
1011 /* Make a dependency between every memory reference on the pending lists
1012 and INSN, thus flushing the pending lists. */
1015 flush_pending_lists (insn
)
1020 while (pending_read_insns
)
1022 add_dependence (insn
, XEXP (pending_read_insns
, 0), REG_DEP_ANTI
);
1024 link
= pending_read_insns
;
1025 pending_read_insns
= XEXP (pending_read_insns
, 1);
1026 XEXP (link
, 1) = unused_insn_list
;
1027 unused_insn_list
= link
;
1029 link
= pending_read_mems
;
1030 pending_read_mems
= XEXP (pending_read_mems
, 1);
1031 XEXP (link
, 1) = unused_expr_list
;
1032 unused_expr_list
= link
;
1034 while (pending_write_insns
)
1036 add_dependence (insn
, XEXP (pending_write_insns
, 0), REG_DEP_ANTI
);
1038 link
= pending_write_insns
;
1039 pending_write_insns
= XEXP (pending_write_insns
, 1);
1040 XEXP (link
, 1) = unused_insn_list
;
1041 unused_insn_list
= link
;
1043 link
= pending_write_mems
;
1044 pending_write_mems
= XEXP (pending_write_mems
, 1);
1045 XEXP (link
, 1) = unused_expr_list
;
1046 unused_expr_list
= link
;
1048 pending_lists_length
= 0;
1050 if (last_pending_memory_flush
)
1051 add_dependence (insn
, last_pending_memory_flush
, REG_DEP_ANTI
);
1053 last_pending_memory_flush
= insn
;
1056 /* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1057 by the write to the destination of X, and reads of everything mentioned. */
1060 sched_analyze_1 (x
, insn
)
1065 register rtx dest
= SET_DEST (x
);
1070 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
1071 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
1073 if (GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
1075 /* The second and third arguments are values read by this insn. */
1076 sched_analyze_2 (XEXP (dest
, 1), insn
);
1077 sched_analyze_2 (XEXP (dest
, 2), insn
);
1079 dest
= SUBREG_REG (dest
);
1082 if (GET_CODE (dest
) == REG
)
1084 register int offset
, bit
, i
;
1086 regno
= REGNO (dest
);
1088 /* A hard reg in a wide mode may really be multiple registers.
1089 If so, mark all of them just like the first. */
1090 if (regno
< FIRST_PSEUDO_REGISTER
)
1092 i
= HARD_REGNO_NREGS (regno
, GET_MODE (dest
));
1097 for (u
= reg_last_uses
[regno
+i
]; u
; u
= XEXP (u
, 1))
1098 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1099 reg_last_uses
[regno
+ i
] = 0;
1100 if (reg_last_sets
[regno
+ i
])
1101 add_dependence (insn
, reg_last_sets
[regno
+ i
],
1103 reg_last_sets
[regno
+ i
] = insn
;
1104 if ((call_used_regs
[i
] || global_regs
[i
])
1105 && last_function_call
)
1106 /* Function calls clobber all call_used regs. */
1107 add_dependence (insn
, last_function_call
, REG_DEP_ANTI
);
1114 for (u
= reg_last_uses
[regno
]; u
; u
= XEXP (u
, 1))
1115 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1116 reg_last_uses
[regno
] = 0;
1117 if (reg_last_sets
[regno
])
1118 add_dependence (insn
, reg_last_sets
[regno
], REG_DEP_OUTPUT
);
1119 reg_last_sets
[regno
] = insn
;
1121 /* Don't let it cross a call after scheduling if it doesn't
1122 already cross one. */
1123 if (reg_n_calls_crossed
[regno
] == 0 && last_function_call
)
1124 add_dependence (insn
, last_function_call
, REG_DEP_ANTI
);
1127 else if (GET_CODE (dest
) == MEM
)
1129 /* Writing memory. */
1131 if (pending_lists_length
> 32)
1133 /* Flush all pending reads and writes to prevent the pending lists
1134 from getting any larger. Insn scheduling runs too slowly when
1135 these lists get long. The number 32 was chosen because it
1136 seems like a resonable number. When compiling GCC with itself,
1137 this flush occurs 8 times for sparc, and 10 times for m88k using
1139 flush_pending_lists (insn
);
1143 rtx pending
, pending_mem
;
1145 pending
= pending_read_insns
;
1146 pending_mem
= pending_read_mems
;
1149 /* If a dependency already exists, don't create a new one. */
1150 if (! find_insn_list (XEXP (pending
, 0), LOG_LINKS (insn
)))
1151 if (anti_dependence (XEXP (pending_mem
, 0), dest
, insn
))
1152 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
1154 pending
= XEXP (pending
, 1);
1155 pending_mem
= XEXP (pending_mem
, 1);
1158 pending
= pending_write_insns
;
1159 pending_mem
= pending_write_mems
;
1162 /* If a dependency already exists, don't create a new one. */
1163 if (! find_insn_list (XEXP (pending
, 0), LOG_LINKS (insn
)))
1164 if (output_dependence (XEXP (pending_mem
, 0), dest
))
1165 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
1167 pending
= XEXP (pending
, 1);
1168 pending_mem
= XEXP (pending_mem
, 1);
1171 if (last_pending_memory_flush
)
1172 add_dependence (insn
, last_pending_memory_flush
, REG_DEP_ANTI
);
1174 add_insn_mem_dependence (&pending_write_insns
, &pending_write_mems
,
1177 sched_analyze_2 (XEXP (dest
, 0), insn
);
1180 /* Analyze reads. */
1181 if (GET_CODE (x
) == SET
)
1182 sched_analyze_2 (SET_SRC (x
), insn
);
1183 else if (GET_CODE (x
) != CLOBBER
)
1184 sched_analyze_2 (dest
, insn
);
1187 /* Analyze the uses of memory and registers in rtx X in INSN. */
1190 sched_analyze_2 (x
, insn
)
1196 register enum rtx_code code
;
1202 code
= GET_CODE (x
);
1204 /* Get rid of the easy cases first. */
1206 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1207 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1208 this does not mean that this insn is using cc0. */
1209 if (code
== CONST_INT
|| code
== CONST_DOUBLE
|| code
== SYMBOL_REF
1210 || code
== CONST
|| code
== LABEL_REF
)
1214 else if (code
== CC0
)
1218 /* User of CC0 depends on immediately preceding insn.
1219 All notes are removed from the list of insns to schedule before we
1220 reach here, so the previous insn must be the setter of cc0. */
1221 if (GET_CODE (PREV_INSN (insn
)) != INSN
)
1223 SCHED_GROUP_P (insn
) = 1;
1225 /* Make a copy of all dependencies on PREV_INSN, and add to this insn.
1226 This is so that all the dependencies will apply to the group. */
1228 for (link
= LOG_LINKS (PREV_INSN (insn
)); link
; link
= XEXP (link
, 1))
1229 add_dependence (insn
, XEXP (link
, 0), GET_MODE (link
));
1235 else if (code
== REG
)
1237 int regno
= REGNO (x
);
1238 if (regno
< FIRST_PSEUDO_REGISTER
)
1242 i
= HARD_REGNO_NREGS (regno
, GET_MODE (x
));
1245 reg_last_uses
[regno
+ i
]
1246 = gen_rtx (INSN_LIST
, VOIDmode
,
1247 insn
, reg_last_uses
[regno
+ i
]);
1248 if (reg_last_sets
[regno
+ i
])
1249 add_dependence (insn
, reg_last_sets
[regno
+ i
], 0);
1250 if ((call_used_regs
[regno
+ i
] || global_regs
[regno
+ i
])
1251 && last_function_call
)
1252 /* Function calls clobber all call_used regs. */
1253 add_dependence (insn
, last_function_call
, REG_DEP_ANTI
);
1258 reg_last_uses
[regno
]
1259 = gen_rtx (INSN_LIST
, VOIDmode
, insn
, reg_last_uses
[regno
]);
1260 if (reg_last_sets
[regno
])
1261 add_dependence (insn
, reg_last_sets
[regno
], 0);
1263 /* If the register does not already cross any calls, then add this
1264 insn to the sched_before_next_call list so that it will still
1265 not cross calls after scheduling. */
1266 if (reg_n_calls_crossed
[regno
] == 0)
1267 add_dependence (sched_before_next_call
, insn
, REG_DEP_ANTI
);
1272 /* The interesting case. */
1273 else if (code
== MEM
)
1275 /* Reading memory. */
1277 /* Don't create a dependence for memory references which are known to
1278 be unchanging, such as constant pool accesses. These will never
1279 conflict with any other memory access. */
1280 if (RTX_UNCHANGING_P (x
) == 0)
1282 rtx pending
, pending_mem
;
1284 pending
= pending_read_insns
;
1285 pending_mem
= pending_read_mems
;
1288 /* If a dependency already exists, don't create a new one. */
1289 if (! find_insn_list (XEXP (pending
, 0), LOG_LINKS (insn
)))
1290 if (read_dependence (XEXP (pending_mem
, 0), x
))
1291 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
1293 pending
= XEXP (pending
, 1);
1294 pending_mem
= XEXP (pending_mem
, 1);
1297 pending
= pending_write_insns
;
1298 pending_mem
= pending_write_mems
;
1301 /* If a dependency already exists, don't create a new one. */
1302 if (! find_insn_list (XEXP (pending
, 0), LOG_LINKS (insn
)))
1303 if (true_dependence (XEXP (pending_mem
, 0), x
))
1304 add_dependence (insn
, XEXP (pending
, 0), 0);
1306 pending
= XEXP (pending
, 1);
1307 pending_mem
= XEXP (pending_mem
, 1);
1309 if (last_pending_memory_flush
)
1310 add_dependence (insn
, last_pending_memory_flush
, REG_DEP_ANTI
);
1312 /* Always add these dependencies to pending_reads, since
1313 this insn may be followed by a write. */
1314 add_insn_mem_dependence (&pending_read_insns
, &pending_read_mems
,
1317 /* Take advantage of tail recursion here. */
1318 sched_analyze_2 (XEXP (x
, 0), insn
);
1322 else if (code
== ASM_OPERANDS
|| code
== ASM_INPUT
1323 || code
== UNSPEC_VOLATILE
)
1327 /* Traditional and volatile asm instructions must be considered to use
1328 and clobber all hard registers and all of memory. So must
1329 UNSPEC_VOLATILE operations. */
1330 if ((code
== ASM_OPERANDS
&& MEM_VOLATILE_P (x
)) || code
== ASM_INPUT
1331 || code
== UNSPEC_VOLATILE
)
1333 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1335 for (u
= reg_last_uses
[i
]; u
; u
= XEXP (u
, 1))
1336 if (GET_CODE (PATTERN (XEXP (u
, 0))) != USE
)
1337 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1338 reg_last_uses
[i
] = 0;
1339 if (reg_last_sets
[i
]
1340 && GET_CODE (PATTERN (reg_last_sets
[i
])) != USE
)
1341 add_dependence (insn
, reg_last_sets
[i
], 0);
1342 reg_last_sets
[i
] = insn
;
1345 flush_pending_lists (insn
);
1348 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1349 We can not just fall through here since then we would be confused
1350 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1351 traditional asms unlike their normal usage. */
1353 if (code
== ASM_OPERANDS
)
1355 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
1356 sched_analyze_2 (ASM_OPERANDS_INPUT (x
, j
), insn
);
1361 /* Other cases: walk the insn. */
1362 fmt
= GET_RTX_FORMAT (code
);
1363 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1366 sched_analyze_2 (XEXP (x
, i
), insn
);
1367 else if (fmt
[i
] == 'E')
1368 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1369 sched_analyze_2 (XVECEXP (x
, i
, j
), insn
);
1373 /* Analyze an INSN with pattern X to find all dependencies. */
1376 sched_analyze_insn (x
, insn
)
1379 register RTX_CODE code
= GET_CODE (x
);
1382 if (code
== SET
|| code
== CLOBBER
)
1383 sched_analyze_1 (x
, insn
);
1384 else if (code
== PARALLEL
)
1387 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1389 code
= GET_CODE (XVECEXP (x
, 0, i
));
1390 if (code
== SET
|| code
== CLOBBER
)
1391 sched_analyze_1 (XVECEXP (x
, 0, i
), insn
);
1393 sched_analyze_2 (XVECEXP (x
, 0, i
), insn
);
1397 sched_analyze_2 (x
, insn
);
1399 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1401 /* Any REG_INC note is a SET of the register indicated. */
1402 if (REG_NOTE_KIND (link
) == REG_INC
)
1404 rtx dest
= XEXP (link
, 0);
1405 int regno
= REGNO (dest
);
1408 /* A hard reg in a wide mode may really be multiple registers.
1409 If so, mark all of them just like the first. */
1410 if (regno
< FIRST_PSEUDO_REGISTER
)
1412 i
= HARD_REGNO_NREGS (regno
, GET_MODE (dest
));
1417 for (u
= reg_last_uses
[regno
+i
]; u
; u
= XEXP (u
, 1))
1418 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1419 reg_last_uses
[regno
+ i
] = 0;
1420 if (reg_last_sets
[regno
+ i
])
1421 add_dependence (insn
, reg_last_sets
[regno
+ i
],
1423 reg_last_sets
[regno
+ i
] = insn
;
1424 if ((call_used_regs
[i
] || global_regs
[i
])
1425 && last_function_call
)
1426 /* Function calls clobber all call_used regs. */
1427 add_dependence (insn
, last_function_call
, REG_DEP_ANTI
);
1434 for (u
= reg_last_uses
[regno
]; u
; u
= XEXP (u
, 1))
1435 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1436 reg_last_uses
[regno
] = 0;
1437 if (reg_last_sets
[regno
])
1438 add_dependence (insn
, reg_last_sets
[regno
], REG_DEP_OUTPUT
);
1439 reg_last_sets
[regno
] = insn
;
1441 /* Don't let it cross a call after scheduling if it doesn't
1442 already cross one. */
1443 if (reg_n_calls_crossed
[regno
] == 0 && last_function_call
)
1444 add_dependence (insn
, last_function_call
, 0);
1449 /* Handle function calls. */
1450 if (GET_CODE (insn
) == CALL_INSN
)
1455 /* When scheduling instructions, we make sure calls don't lose their
1456 accompanying USE insns by depending them one on another in order. */
1458 prev_dep_insn
= insn
;
1459 dep_insn
= PREV_INSN (insn
);
1460 while (GET_CODE (dep_insn
) == INSN
1461 && GET_CODE (PATTERN (dep_insn
)) == USE
)
1463 SCHED_GROUP_P (prev_dep_insn
) = 1;
1465 /* Make a copy of all dependencies on dep_insn, and add to insn.
1466 This is so that all of the dependencies will apply to the
1469 for (link
= LOG_LINKS (dep_insn
); link
; link
= XEXP (link
, 1))
1470 add_dependence (insn
, XEXP (link
, 0), GET_MODE (link
));
1472 prev_dep_insn
= dep_insn
;
1473 dep_insn
= PREV_INSN (dep_insn
);
1478 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1479 for every dependency. */
1482 sched_analyze (head
, tail
)
1486 register int n_insns
= 0;
1488 register int luid
= 0;
1490 for (insn
= head
; ; insn
= NEXT_INSN (insn
))
1492 INSN_LUID (insn
) = luid
++;
1494 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == JUMP_INSN
)
1496 sched_analyze_insn (PATTERN (insn
), insn
);
1499 else if (GET_CODE (insn
) == CALL_INSN
)
1505 /* Any instruction using a hard register which may get clobbered
1506 by a call needs to be marked as dependent on this call.
1507 This prevents a use of a hard return reg from being moved
1508 past a void call (i.e. it does not explicitly set the hard
1511 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1512 if (call_used_regs
[i
] || global_regs
[i
])
1514 for (u
= reg_last_uses
[i
]; u
; u
= XEXP (u
, 1))
1515 if (GET_CODE (PATTERN (XEXP (u
, 0))) != USE
)
1516 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1517 reg_last_uses
[i
] = 0;
1518 if (reg_last_sets
[i
]
1519 && GET_CODE (PATTERN (reg_last_sets
[i
])) != USE
)
1520 add_dependence (insn
, reg_last_sets
[i
], REG_DEP_ANTI
);
1521 reg_last_sets
[i
] = insn
;
1522 /* Insn, being a CALL_INSN, magically depends on
1523 `last_function_call' already. */
1526 /* For each insn which shouldn't cross a call, add a dependence
1527 between that insn and this call insn. */
1528 x
= LOG_LINKS (sched_before_next_call
);
1531 add_dependence (insn
, XEXP (x
, 0), REG_DEP_ANTI
);
1534 LOG_LINKS (sched_before_next_call
) = 0;
1536 sched_analyze_insn (PATTERN (insn
), insn
);
1538 /* We don't need to flush memory for a function call which does
1539 not involve memory. */
1540 if (! CONST_CALL_P (insn
))
1542 /* In the absence of interprocedural alias analysis,
1543 we must flush all pending reads and writes, and
1544 start new dependencies starting from here. */
1545 flush_pending_lists (insn
);
1548 /* Depend this function call (actually, the user of this
1549 function call) on all hard register clobberage. */
1550 last_function_call
= insn
;
1559 /* Called when we see a set of a register. If death is true, then we are
1560 scanning backwards. Mark that register as unborn. If nobody says
1561 otherwise, that is how things will remain. If death is false, then we
1562 are scanning forwards. Mark that register as being born. */
1565 sched_note_set (b
, x
, death
)
1570 register int regno
, j
;
1571 register rtx reg
= SET_DEST (x
);
1577 while (GET_CODE (reg
) == SUBREG
|| GET_CODE (reg
) == STRICT_LOW_PART
1578 || GET_CODE (reg
) == SIGN_EXTRACT
|| GET_CODE (reg
) == ZERO_EXTRACT
)
1580 /* Must treat modification of just one hardware register of a multi-reg
1581 value or just a byte field of a register exactly the same way that
1582 mark_set_1 in flow.c does. */
1583 if (GET_CODE (reg
) == ZERO_EXTRACT
1584 || GET_CODE (reg
) == SIGN_EXTRACT
1585 || (GET_CODE (reg
) == SUBREG
1586 && REG_SIZE (SUBREG_REG (reg
)) > REG_SIZE (reg
)))
1589 reg
= SUBREG_REG (reg
);
1592 if (GET_CODE (reg
) != REG
)
1595 /* Global registers are always live, so the code below does not apply
1598 regno
= REGNO (reg
);
1599 if (regno
>= FIRST_PSEUDO_REGISTER
|| ! global_regs
[regno
])
1601 register int offset
= regno
/ REGSET_ELT_BITS
;
1602 register int bit
= 1 << (regno
% REGSET_ELT_BITS
);
1606 /* If we only set part of the register, then this set does not
1611 /* Try killing this register. */
1612 if (regno
< FIRST_PSEUDO_REGISTER
)
1614 int j
= HARD_REGNO_NREGS (regno
, GET_MODE (reg
));
1617 offset
= (regno
+ j
) / REGSET_ELT_BITS
;
1618 bit
= 1 << ((regno
+ j
) % REGSET_ELT_BITS
);
1620 bb_live_regs
[offset
] &= ~bit
;
1621 bb_dead_regs
[offset
] |= bit
;
1626 bb_live_regs
[offset
] &= ~bit
;
1627 bb_dead_regs
[offset
] |= bit
;
1632 /* Make the register live again. */
1633 if (regno
< FIRST_PSEUDO_REGISTER
)
1635 int j
= HARD_REGNO_NREGS (regno
, GET_MODE (reg
));
1638 offset
= (regno
+ j
) / REGSET_ELT_BITS
;
1639 bit
= 1 << ((regno
+ j
) % REGSET_ELT_BITS
);
1641 bb_live_regs
[offset
] |= bit
;
1642 bb_dead_regs
[offset
] &= ~bit
;
1647 bb_live_regs
[offset
] |= bit
;
1648 bb_dead_regs
[offset
] &= ~bit
;
1654 /* Macros and functions for keeping the priority queue sorted, and
1655 dealing with queueing and unqueueing of instructions. */
1657 #define SCHED_SORT(READY, NEW_READY, OLD_READY) \
1658 do { if ((NEW_READY) - (OLD_READY) == 1) \
1659 swap_sort (READY, NEW_READY); \
1660 else if ((NEW_READY) - (OLD_READY) > 1) \
1661 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
1664 /* Returns a positive value if y is preferred; returns a negative value if
1665 x is preferred. Should never return 0, since that will make the sort
1669 rank_for_schedule (x
, y
)
1674 rtx tmp_dep
, tmp2_dep
;
1675 int tmp_class
, tmp2_class
;
1678 /* Choose the instruction with the highest priority, if different. */
1679 if (value
= INSN_PRIORITY (tmp
) - INSN_PRIORITY (tmp2
))
1682 if (last_scheduled_insn
)
1684 /* Classify the instructions into three classes:
1685 1) Data dependent on last schedule insn.
1686 2) Anti/Output dependent on last scheduled insn.
1687 3) Independent of last scheduled insn, or has latency of one.
1688 Choose the insn from the highest numbered class if different. */
1689 tmp_dep
= find_insn_list (tmp
, LOG_LINKS (last_scheduled_insn
));
1690 if (tmp_dep
== 0 || insn_cost (tmp
) == 1)
1692 else if (REG_NOTE_KIND (tmp_dep
) == 0)
1697 tmp2_dep
= find_insn_list (tmp2
, LOG_LINKS (last_scheduled_insn
));
1698 if (tmp2_dep
== 0 || insn_cost (tmp2
) == 1)
1700 else if (REG_NOTE_KIND (tmp2_dep
) == 0)
1705 if (value
= tmp_class
- tmp2_class
)
1709 /* If insns are equally good, sort by INSN_LUID (original insn order),
1710 so that we make the sort stable. This minimizes instruction movement,
1711 thus minimizing sched's effect on debugging and cross-jumping. */
1712 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
1715 /* Resort the array A in which only element at index N may be out of order. */
1717 __inline
static void
1725 while (i
>= 0 && rank_for_schedule (a
+i
, &insn
) >= 0)
1733 static int max_priority
;
1735 /* Add INSN to the insn queue so that it fires at least N_CYCLES
1736 before the currently executing insn. */
1738 __inline
static void
1739 queue_insn (insn
, n_cycles
)
1743 int next_q
= NEXT_Q_AFTER (q_ptr
, n_cycles
);
1744 NEXT_INSN (insn
) = insn_queue
[next_q
];
1745 insn_queue
[next_q
] = insn
;
1749 /* Return nonzero if PAT is the pattern of an insn which makes a
1753 birthing_insn_p (pat
)
1758 if (reload_completed
== 1)
1761 if (GET_CODE (pat
) == SET
1762 && GET_CODE (SET_DEST (pat
)) == REG
)
1764 rtx dest
= SET_DEST (pat
);
1765 int i
= REGNO (dest
);
1766 int offset
= i
/ REGSET_ELT_BITS
;
1767 int bit
= 1 << (i
% REGSET_ELT_BITS
);
1769 /* It would be more accurate to use refers_to_regno_p or
1770 reg_mentioned_p to determine when the dest is not live before this
1773 if (bb_live_regs
[offset
] & bit
)
1774 return (reg_n_sets
[i
] == 1);
1778 if (GET_CODE (pat
) == PARALLEL
)
1780 for (j
= 0; j
< XVECLEN (pat
, 0); j
++)
1781 if (birthing_insn_p (XVECEXP (pat
, 0, j
)))
1787 /* If PREV is an insn which is immediately ready to execute, return 1,
1788 otherwise return 0. We may adjust its priority if that will help shorten
1789 register lifetimes. */
1795 rtx pat
= PATTERN (prev
);
1797 /* MAX of (a) number of cycles needed by prev
1798 (b) number of cycles before needed resources are free. */
1799 int n_cycles
= insn_cost (prev
);
1802 /* Trying to shorten register lives after reload has completed
1803 is useless and wrong. It gives inaccurate schedules. */
1804 if (reload_completed
== 0)
1806 for (note
= REG_NOTES (prev
); note
; note
= XEXP (note
, 1))
1807 if (REG_NOTE_KIND (note
) == REG_DEAD
)
1810 /* Defer scheduling insns which kill registers, since that
1811 shortens register lives. Prefer scheduling insns which
1812 make registers live for the same reason. */
1816 INSN_PRIORITY (prev
) >>= 3;
1819 INSN_PRIORITY (prev
) >>= 2;
1823 INSN_PRIORITY (prev
) >>= 1;
1826 if (birthing_insn_p (pat
))
1828 int max
= max_priority
;
1830 if (max
> INSN_PRIORITY (prev
))
1831 INSN_PRIORITY (prev
) = max
;
1839 queue_insn (prev
, n_cycles
);
1843 /* INSN is the "currently executing insn". Launch each insn which was
1844 waiting on INSN (in the backwards dataflow sense). READY is a
1845 vector of insns which are ready to fire. N_READY is the number of
1846 elements in READY. */
1849 launch_links (insn
, ready
, n_ready
)
1855 int new_ready
= n_ready
;
1857 if (LOG_LINKS (insn
) == 0)
1860 /* This is used by the function launch_link above. */
1862 max_priority
= MAX (INSN_PRIORITY (ready
[0]), INSN_PRIORITY (insn
));
1864 max_priority
= INSN_PRIORITY (insn
);
1866 for (link
= LOG_LINKS (insn
); link
!= 0; link
= XEXP (link
, 1))
1868 rtx prev
= XEXP (link
, 0);
1870 if ((INSN_REF_COUNT (prev
) -= 1) == 0 && launch_link (prev
))
1871 ready
[new_ready
++] = prev
;
1877 /* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
1881 create_reg_dead_note (reg
, insn
)
1884 rtx link
= dead_notes
;
1887 /* In theory, we should not end up with more REG_DEAD reg notes than we
1888 started with. In practice, this can occur as the result of bugs in
1889 flow, combine and/or sched. */
1894 link
= rtx_alloc (EXPR_LIST
);
1895 PUT_REG_NOTE_KIND (link
, REG_DEAD
);
1899 dead_notes
= XEXP (dead_notes
, 1);
1901 XEXP (link
, 0) = reg
;
1902 XEXP (link
, 1) = REG_NOTES (insn
);
1903 REG_NOTES (insn
) = link
;
1906 /* Subroutine on attach_deaths_insn--handles the recursive search
1907 through INSN. If SET_P is true, then x is being modified by the insn. */
1910 attach_deaths (x
, insn
, set_p
)
1917 register enum rtx_code code
;
1923 code
= GET_CODE (x
);
1935 /* Get rid of the easy cases first. */
1940 /* If the register dies in this insn, queue that note, and mark
1941 this register as needing to die. */
1942 /* This code is very similar to mark_used_1 (if set_p is false)
1943 and mark_set_1 (if set_p is true) in flow.c. */
1945 register int regno
= REGNO (x
);
1946 register int offset
= regno
/ REGSET_ELT_BITS
;
1947 register int bit
= 1 << (regno
% REGSET_ELT_BITS
);
1948 int all_needed
= (old_live_regs
[offset
] & bit
);
1949 int some_needed
= (old_live_regs
[offset
] & bit
);
1954 if (regno
< FIRST_PSEUDO_REGISTER
)
1958 n
= HARD_REGNO_NREGS (regno
, GET_MODE (x
));
1961 some_needed
|= (old_live_regs
[(regno
+ n
) / REGSET_ELT_BITS
]
1962 & 1 << ((regno
+ n
) % REGSET_ELT_BITS
));
1963 all_needed
&= (old_live_regs
[(regno
+ n
) / REGSET_ELT_BITS
]
1964 & 1 << ((regno
+ n
) % REGSET_ELT_BITS
));
1968 /* If it wasn't live before we started, then add a REG_DEAD note.
1969 We must check the previous lifetime info not the current info,
1970 because we may have to execute this code several times, e.g.
1971 once for a clobber (which doesn't add a note) and later
1972 for a use (which does add a note).
1974 Always make the register live. We must do this even if it was
1975 live before, because this may be an insn which sets and uses
1976 the same register, in which case the register has already been
1977 killed, so we must make it live again.
1979 Global registers are always live, and should never have a REG_DEAD
1980 note added for them, so none of the code below applies to them. */
1982 if (regno
>= FIRST_PSEUDO_REGISTER
|| ! global_regs
[regno
])
1984 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
1985 STACK_POINTER_REGNUM, since these are always considered to be
1986 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
1987 if (regno
!= FRAME_POINTER_REGNUM
1988 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
1989 && ! (regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
1991 && regno
!= STACK_POINTER_REGNUM
)
1993 if (! all_needed
&& ! dead_or_set_p (insn
, x
))
1995 /* If none of the words in X is needed, make a REG_DEAD
1996 note. Otherwise, we must make partial REG_DEAD
1999 create_reg_dead_note (x
, insn
);
2004 /* Don't make a REG_DEAD note for a part of a
2005 register that is set in the insn. */
2006 for (i
= HARD_REGNO_NREGS (regno
, GET_MODE (x
)) - 1;
2008 if ((old_live_regs
[(regno
+ i
) / REGSET_ELT_BITS
]
2009 & 1 << ((regno
+i
) % REGSET_ELT_BITS
)) == 0
2010 && ! dead_or_set_regno_p (insn
, regno
+ i
))
2011 create_reg_dead_note (gen_rtx (REG
, word_mode
,
2018 if (regno
< FIRST_PSEUDO_REGISTER
)
2020 int j
= HARD_REGNO_NREGS (regno
, GET_MODE (x
));
2023 offset
= (regno
+ j
) / REGSET_ELT_BITS
;
2024 bit
= 1 << ((regno
+ j
) % REGSET_ELT_BITS
);
2026 bb_dead_regs
[offset
] &= ~bit
;
2027 bb_live_regs
[offset
] |= bit
;
2032 bb_dead_regs
[offset
] &= ~bit
;
2033 bb_live_regs
[offset
] |= bit
;
2040 /* Handle tail-recursive case. */
2041 attach_deaths (XEXP (x
, 0), insn
, 0);
2045 case STRICT_LOW_PART
:
2046 /* These two cases preserve the value of SET_P, so handle them
2048 attach_deaths (XEXP (x
, 0), insn
, set_p
);
2053 /* This case preserves the value of SET_P for the first operand, but
2054 clears it for the other two. */
2055 attach_deaths (XEXP (x
, 0), insn
, set_p
);
2056 attach_deaths (XEXP (x
, 1), insn
, 0);
2057 attach_deaths (XEXP (x
, 2), insn
, 0);
2061 /* Other cases: walk the insn. */
2062 fmt
= GET_RTX_FORMAT (code
);
2063 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2066 attach_deaths (XEXP (x
, i
), insn
, 0);
2067 else if (fmt
[i
] == 'E')
2068 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2069 attach_deaths (XVECEXP (x
, i
, j
), insn
, 0);
2074 /* After INSN has executed, add register death notes for each register
2075 that is dead after INSN. */
2078 attach_deaths_insn (insn
)
2081 rtx x
= PATTERN (insn
);
2082 register RTX_CODE code
= GET_CODE (x
);
2086 attach_deaths (SET_SRC (x
), insn
, 0);
2088 /* A register might die here even if it is the destination, e.g.
2089 it is the target of a volatile read and is otherwise unused.
2090 Hence we must always call attach_deaths for the SET_DEST. */
2091 attach_deaths (SET_DEST (x
), insn
, 1);
2093 else if (code
== PARALLEL
)
2096 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
2098 code
= GET_CODE (XVECEXP (x
, 0, i
));
2101 attach_deaths (SET_SRC (XVECEXP (x
, 0, i
)), insn
, 0);
2103 attach_deaths (SET_DEST (XVECEXP (x
, 0, i
)), insn
, 1);
2105 else if (code
== CLOBBER
)
2106 attach_deaths (XEXP (XVECEXP (x
, 0, i
), 0), insn
, 1);
2108 attach_deaths (XVECEXP (x
, 0, i
), insn
, 0);
2111 else if (code
== CLOBBER
)
2112 attach_deaths (XEXP (x
, 0), insn
, 1);
2114 attach_deaths (x
, insn
, 0);
2117 /* Delete notes beginning with INSN and maybe put them in the chain
2118 of notes ended by NOTE_LIST.
2119 Returns the insn following the notes. */
2122 unlink_notes (insn
, tail
)
2125 rtx prev
= PREV_INSN (insn
);
2127 while (insn
!= tail
&& GET_CODE (insn
) == NOTE
)
2129 rtx next
= NEXT_INSN (insn
);
2130 /* Delete the note from its current position. */
2132 NEXT_INSN (prev
) = next
;
2134 PREV_INSN (next
) = prev
;
2136 if (write_symbols
!= NO_DEBUG
&& NOTE_LINE_NUMBER (insn
) > 0)
2137 /* Record line-number notes so they can be reused. */
2138 LINE_NOTE (insn
) = insn
;
2141 /* Insert the note at the end of the notes list. */
2142 PREV_INSN (insn
) = note_list
;
2144 NEXT_INSN (note_list
) = insn
;
2153 /* Data structure for keeping track of register information
2154 during that register's life. */
2158 short offset
; short bit
;
2159 short live_length
; short calls_crossed
;
2162 /* Constructor for `sometimes' data structure. */
2165 new_sometimes_live (regs_sometimes_live
, offset
, bit
, sometimes_max
)
2166 struct sometimes
*regs_sometimes_live
;
2170 register struct sometimes
*p
;
2171 register int regno
= offset
* REGSET_ELT_BITS
+ bit
;
2174 /* There should never be a register greater than max_regno here. If there
2175 is, it means that a define_split has created a new pseudo reg. This
2176 is not allowed, since there will not be flow info available for any
2177 new register, so catch the error here. */
2178 if (regno
>= max_regno
)
2181 p
= ®s_sometimes_live
[sometimes_max
];
2185 p
->calls_crossed
= 0;
2187 return sometimes_max
;
2190 /* Count lengths of all regs we are currently tracking,
2191 and find new registers no longer live. */
2194 finish_sometimes_live (regs_sometimes_live
, sometimes_max
)
2195 struct sometimes
*regs_sometimes_live
;
2200 for (i
= 0; i
< sometimes_max
; i
++)
2202 register struct sometimes
*p
= ®s_sometimes_live
[i
];
2205 regno
= p
->offset
* REGSET_ELT_BITS
+ p
->bit
;
2207 sched_reg_live_length
[regno
] += p
->live_length
;
2208 sched_reg_n_calls_crossed
[regno
] += p
->calls_crossed
;
2212 /* Use modified list scheduling to rearrange insns in basic block
2213 B. FILE, if nonzero, is where we dump interesting output about
2217 schedule_block (b
, file
)
2224 int i
, j
, n_ready
= 0, new_ready
, n_insns
= 0;
2225 int sched_n_insns
= 0;
2226 #define NEED_NOTHING 0
2231 /* HEAD and TAIL delimit the region being scheduled. */
2232 rtx head
= basic_block_head
[b
];
2233 rtx tail
= basic_block_end
[b
];
2234 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2235 being scheduled. When the insns have been ordered,
2236 these insns delimit where the new insns are to be
2237 spliced back into the insn chain. */
2241 /* Keep life information accurate. */
2242 register struct sometimes
*regs_sometimes_live
;
2246 fprintf (file
, ";;\t -- basic block number %d from %d to %d --\n",
2247 b
, INSN_UID (basic_block_head
[b
]), INSN_UID (basic_block_end
[b
]));
2250 reg_last_uses
= (rtx
*) alloca (i
* sizeof (rtx
));
2251 bzero (reg_last_uses
, i
* sizeof (rtx
));
2252 reg_last_sets
= (rtx
*) alloca (i
* sizeof (rtx
));
2253 bzero (reg_last_sets
, i
* sizeof (rtx
));
2255 /* Remove certain insns at the beginning from scheduling,
2256 by advancing HEAD. */
2258 /* At the start of a function, before reload has run, don't delay getting
2259 parameters from hard registers into pseudo registers. */
2260 if (reload_completed
== 0 && b
== 0)
2263 && GET_CODE (head
) == NOTE
2264 && NOTE_LINE_NUMBER (head
) != NOTE_INSN_FUNCTION_BEG
)
2265 head
= NEXT_INSN (head
);
2267 && GET_CODE (head
) == INSN
2268 && GET_CODE (PATTERN (head
)) == SET
)
2270 rtx src
= SET_SRC (PATTERN (head
));
2271 while (GET_CODE (src
) == SUBREG
2272 || GET_CODE (src
) == SIGN_EXTEND
2273 || GET_CODE (src
) == ZERO_EXTEND
2274 || GET_CODE (src
) == SIGN_EXTRACT
2275 || GET_CODE (src
) == ZERO_EXTRACT
)
2276 src
= XEXP (src
, 0);
2277 if (GET_CODE (src
) != REG
2278 || REGNO (src
) >= FIRST_PSEUDO_REGISTER
)
2280 /* Keep this insn from ever being scheduled. */
2281 INSN_REF_COUNT (head
) = 1;
2282 head
= NEXT_INSN (head
);
2286 /* Don't include any notes or labels at the beginning of the
2287 basic block, or notes at the ends of basic blocks. */
2288 while (head
!= tail
)
2290 if (GET_CODE (head
) == NOTE
)
2291 head
= NEXT_INSN (head
);
2292 else if (GET_CODE (tail
) == NOTE
)
2293 tail
= PREV_INSN (tail
);
2294 else if (GET_CODE (head
) == CODE_LABEL
)
2295 head
= NEXT_INSN (head
);
2298 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2299 to schedule this block. */
2301 && (GET_CODE (head
) == NOTE
|| GET_CODE (head
) == CODE_LABEL
))
2305 /* This short-cut doesn't work. It does not count call insns crossed by
2306 registers in reg_sometimes_live. It does not mark these registers as
2307 dead if they die in this block. It does not mark these registers live
2308 (or create new reg_sometimes_live entries if necessary) if they are born
2311 The easy solution is to just always schedule a block. This block only
2312 has one insn, so this won't slow down this pass by much. */
2318 /* Exclude certain insns at the end of the basic block by advancing TAIL. */
2319 /* This isn't correct. Instead of advancing TAIL, should assign very
2320 high priorities to these insns to guarantee that they get scheduled last.
2321 If these insns are ignored, as is currently done, the register life info
2322 may be incorrectly computed. */
2323 if (GET_CODE (tail
) == INSN
2324 && GET_CODE (PATTERN (tail
)) == USE
2325 && next_nonnote_insn (tail
) == 0)
2327 /* If this was the only insn in the block, then there are no insns to
2332 /* We don't try to reorder the USE at the end of a function. */
2333 tail
= prev_nonnote_insn (tail
);
2336 /* This short-cut does not work. See comment above. */
2341 else if (GET_CODE (tail
) == JUMP_INSN
2342 && SCHED_GROUP_P (tail
) == 0
2343 && GET_CODE (PREV_INSN (tail
)) == INSN
2344 && GET_CODE (PATTERN (PREV_INSN (tail
))) == USE
2345 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (PREV_INSN (tail
)), 0)))
2347 /* Don't let the setting of the function's return value register
2348 move from this jump. For the same reason we want to get the
2349 parameters into pseudo registers as quickly as possible, we
2350 want to set the function's return value register as late as
2353 /* If this is the only insn in the block, then there is no need to
2354 schedule the block. */
2358 tail
= PREV_INSN (tail
);
2362 tail
= prev_nonnote_insn (tail
);
2365 /* This shortcut does not work. See comment above. */
2372 /* This is probably wrong. Instead of doing this, should give this insn
2373 a very high priority to guarantee that it gets scheduled last. */
2374 /* Can not separate an insn that sets the condition code from one that
2375 uses it. So we must leave an insn that sets cc0 where it is. */
2376 if (sets_cc0_p (PATTERN (tail
)))
2377 tail
= PREV_INSN (tail
);
2380 /* Now HEAD through TAIL are the insns actually to be rearranged;
2381 Let PREV_HEAD and NEXT_TAIL enclose them. */
2382 prev_head
= PREV_INSN (head
);
2383 next_tail
= NEXT_INSN (tail
);
2385 /* Initialize basic block data structures. */
2387 pending_read_insns
= 0;
2388 pending_read_mems
= 0;
2389 pending_write_insns
= 0;
2390 pending_write_mems
= 0;
2391 pending_lists_length
= 0;
2392 last_pending_memory_flush
= 0;
2393 last_function_call
= 0;
2394 last_scheduled_insn
= 0;
2396 LOG_LINKS (sched_before_next_call
) = 0;
2398 n_insns
+= sched_analyze (head
, tail
);
2401 free_pending_lists ();
2405 /* Allocate vector to hold insns to be rearranged (except those
2406 insns which are controlled by an insn with SCHED_GROUP_P set).
2407 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2408 as those variables ultimately are set up. */
2409 ready
= (rtx
*) alloca ((n_insns
+1) * sizeof (rtx
));
2411 /* TAIL is now the last of the insns to be rearranged.
2412 Put those insns into the READY vector. */
2415 /* If the last insn is a branch, force it to be the last insn after
2416 scheduling. Also, don't try to reorder calls at the ends the basic
2417 block -- this will only lead to worse register allocation. */
2418 if (GET_CODE (tail
) == CALL_INSN
|| GET_CODE (tail
) == JUMP_INSN
)
2421 ready
[n_ready
++] = tail
;
2422 INSN_PRIORITY (tail
) = TAIL_PRIORITY
;
2423 INSN_REF_COUNT (tail
) = 0;
2424 insn
= PREV_INSN (tail
);
2427 /* Assign priorities to instructions. Also check whether they
2428 are in priority order already. If so then I will be nonnegative.
2429 We use this shortcut only before reloading. */
2431 i
= reload_completed
? DONE_PRIORITY
: MAX_PRIORITY
;
2434 for (; insn
!= prev_head
; insn
= PREV_INSN (insn
))
2436 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
2439 if (INSN_REF_COUNT (insn
) == 0)
2440 ready
[n_ready
++] = insn
;
2441 if (SCHED_GROUP_P (insn
))
2443 while (SCHED_GROUP_P (insn
))
2445 insn
= PREV_INSN (insn
);
2446 while (GET_CODE (insn
) == NOTE
)
2447 insn
= PREV_INSN (insn
);
2455 if (INSN_PRIORITY (insn
) < i
)
2456 i
= INSN_PRIORITY (insn
);
2457 else if (INSN_PRIORITY (insn
) > i
)
2464 /* This short-cut doesn't work. It does not count call insns crossed by
2465 registers in reg_sometimes_live. It does not mark these registers as
2466 dead if they die in this block. It does not mark these registers live
2467 (or create new reg_sometimes_live entries if necessary) if they are born
2470 The easy solution is to just always schedule a block. These blocks tend
2471 to be very short, so this doesn't slow down this pass by much. */
2473 /* If existing order is good, don't bother to reorder. */
2474 if (i
!= DONE_PRIORITY
)
2477 fprintf (file
, ";; already scheduled\n");
2479 if (reload_completed
== 0)
2481 for (i
= 0; i
< sometimes_max
; i
++)
2482 regs_sometimes_live
[i
].live_length
+= n_insns
;
2484 finish_sometimes_live (regs_sometimes_live
, sometimes_max
);
2486 free_pending_lists ();
2491 /* Scan all the insns to be scheduled, removing NOTE insns
2492 and register death notes.
2493 Line number NOTE insns end up in NOTE_LIST.
2494 Register death notes end up in DEAD_NOTES.
2496 Recreate the register life information for the end of this basic
2499 if (reload_completed
== 0)
2501 bcopy (basic_block_live_at_start
[b
], bb_live_regs
, regset_bytes
);
2502 bzero (bb_dead_regs
, regset_bytes
);
2506 /* This is the first block in the function. There may be insns
2507 before head that we can't schedule. We still need to examine
2508 them though for accurate register lifetime analysis. */
2510 /* We don't want to remove any REG_DEAD notes as the code below
2513 for (insn
= basic_block_head
[b
]; insn
!= head
;
2514 insn
= NEXT_INSN (insn
))
2515 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
2517 /* See if the register gets born here. */
2518 /* We must check for registers being born before we check for
2519 registers dying. It is possible for a register to be born
2520 and die in the same insn, e.g. reading from a volatile
2521 memory location into an otherwise unused register. Such
2522 a register must be marked as dead after this insn. */
2523 if (GET_CODE (PATTERN (insn
)) == SET
2524 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2525 sched_note_set (b
, PATTERN (insn
), 0);
2526 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2529 for (j
= XVECLEN (PATTERN (insn
), 0) - 1; j
>= 0; j
--)
2530 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == SET
2531 || GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == CLOBBER
)
2532 sched_note_set (b
, XVECEXP (PATTERN (insn
), 0, j
), 0);
2534 /* ??? This code is obsolete and should be deleted. It
2535 is harmless though, so we will leave it in for now. */
2536 for (j
= XVECLEN (PATTERN (insn
), 0) - 1; j
>= 0; j
--)
2537 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == USE
)
2538 sched_note_set (b
, XVECEXP (PATTERN (insn
), 0, j
), 0);
2541 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2543 if ((REG_NOTE_KIND (link
) == REG_DEAD
2544 || REG_NOTE_KIND (link
) == REG_UNUSED
)
2545 /* Verify that the REG_NOTE has a legal value. */
2546 && GET_CODE (XEXP (link
, 0)) == REG
)
2548 register int regno
= REGNO (XEXP (link
, 0));
2549 register int offset
= regno
/ REGSET_ELT_BITS
;
2550 register int bit
= 1 << (regno
% REGSET_ELT_BITS
);
2552 if (regno
< FIRST_PSEUDO_REGISTER
)
2554 int j
= HARD_REGNO_NREGS (regno
,
2555 GET_MODE (XEXP (link
, 0)));
2558 offset
= (regno
+ j
) / REGSET_ELT_BITS
;
2559 bit
= 1 << ((regno
+ j
) % REGSET_ELT_BITS
);
2561 bb_live_regs
[offset
] &= ~bit
;
2562 bb_dead_regs
[offset
] |= bit
;
2567 bb_live_regs
[offset
] &= ~bit
;
2568 bb_dead_regs
[offset
] |= bit
;
2576 /* If debugging information is being produced, keep track of the line
2577 number notes for each insn. */
2578 if (write_symbols
!= NO_DEBUG
)
2580 /* We must use the true line number for the first insn in the block
2581 that was computed and saved at the start of this pass. We can't
2582 use the current line number, because scheduling of the previous
2583 block may have changed the current line number. */
2584 rtx line
= line_note_head
[b
];
2586 for (insn
= basic_block_head
[b
];
2588 insn
= NEXT_INSN (insn
))
2589 if (GET_CODE (insn
) == NOTE
&& NOTE_LINE_NUMBER (insn
) > 0)
2592 LINE_NOTE (insn
) = line
;
2595 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
2597 rtx prev
, next
, link
;
2599 /* Farm out notes. This is needed to keep the debugger from
2600 getting completely deranged. */
2601 if (GET_CODE (insn
) == NOTE
)
2604 insn
= unlink_notes (insn
, next_tail
);
2609 if (insn
== next_tail
)
2613 if (reload_completed
== 0
2614 && GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
2616 /* See if the register gets born here. */
2617 /* We must check for registers being born before we check for
2618 registers dying. It is possible for a register to be born and
2619 die in the same insn, e.g. reading from a volatile memory
2620 location into an otherwise unused register. Such a register
2621 must be marked as dead after this insn. */
2622 if (GET_CODE (PATTERN (insn
)) == SET
2623 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2624 sched_note_set (b
, PATTERN (insn
), 0);
2625 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2628 for (j
= XVECLEN (PATTERN (insn
), 0) - 1; j
>= 0; j
--)
2629 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == SET
2630 || GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == CLOBBER
)
2631 sched_note_set (b
, XVECEXP (PATTERN (insn
), 0, j
), 0);
2633 /* ??? This code is obsolete and should be deleted. It
2634 is harmless though, so we will leave it in for now. */
2635 for (j
= XVECLEN (PATTERN (insn
), 0) - 1; j
>= 0; j
--)
2636 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == USE
)
2637 sched_note_set (b
, XVECEXP (PATTERN (insn
), 0, j
), 0);
2640 /* Need to know what registers this insn kills. */
2641 for (prev
= 0, link
= REG_NOTES (insn
); link
; link
= next
)
2645 next
= XEXP (link
, 1);
2646 if ((REG_NOTE_KIND (link
) == REG_DEAD
2647 || REG_NOTE_KIND (link
) == REG_UNUSED
)
2648 /* Verify that the REG_NOTE has a legal value. */
2649 && GET_CODE (XEXP (link
, 0)) == REG
)
2651 register int regno
= REGNO (XEXP (link
, 0));
2652 register int offset
= regno
/ REGSET_ELT_BITS
;
2653 register int bit
= 1 << (regno
% REGSET_ELT_BITS
);
2655 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
2657 if (REG_NOTE_KIND (link
) == REG_DEAD
)
2660 XEXP (prev
, 1) = next
;
2662 REG_NOTES (insn
) = next
;
2663 XEXP (link
, 1) = dead_notes
;
2669 if (regno
< FIRST_PSEUDO_REGISTER
)
2671 int j
= HARD_REGNO_NREGS (regno
,
2672 GET_MODE (XEXP (link
, 0)));
2675 offset
= (regno
+ j
) / REGSET_ELT_BITS
;
2676 bit
= 1 << ((regno
+ j
) % REGSET_ELT_BITS
);
2678 bb_live_regs
[offset
] &= ~bit
;
2679 bb_dead_regs
[offset
] |= bit
;
2684 bb_live_regs
[offset
] &= ~bit
;
2685 bb_dead_regs
[offset
] |= bit
;
2694 if (reload_completed
== 0)
2696 /* Keep track of register lives. */
2697 old_live_regs
= (regset
) alloca (regset_bytes
);
2699 = (struct sometimes
*) alloca (max_regno
* sizeof (struct sometimes
));
2702 /* Start with registers live at end. */
2703 for (j
= 0; j
< regset_size
; j
++)
2705 int live
= bb_live_regs
[j
];
2706 old_live_regs
[j
] = live
;
2710 for (bit
= 0; bit
< REGSET_ELT_BITS
; bit
++)
2711 if (live
& (1 << bit
))
2712 sometimes_max
= new_sometimes_live (regs_sometimes_live
, j
,
2713 bit
, sometimes_max
);
2718 SCHED_SORT (ready
, n_ready
, 1);
2722 fprintf (file
, ";; ready list initially:\n;; ");
2723 for (i
= 0; i
< n_ready
; i
++)
2724 fprintf (file
, "%d ", INSN_UID (ready
[i
]));
2725 fprintf (file
, "\n\n");
2727 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
2728 if (INSN_PRIORITY (insn
) > 0)
2729 fprintf (file
, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
2730 INSN_UID (insn
), INSN_PRIORITY (insn
),
2731 INSN_REF_COUNT (insn
));
2734 /* Now HEAD and TAIL are going to become disconnected
2735 entirely from the insn chain. */
2738 /* Q_SIZE will always be zero here. */
2740 bzero (insn_queue
, sizeof (insn_queue
));
2742 /* Now, perform list scheduling. */
2744 /* Where we start inserting insns is after TAIL. */
2747 new_needs
= (NEXT_INSN (prev_head
) == basic_block_head
[b
]
2748 ? NEED_HEAD
: NEED_NOTHING
);
2749 if (PREV_INSN (next_tail
) == basic_block_end
[b
])
2750 new_needs
|= NEED_TAIL
;
2752 new_ready
= n_ready
;
2753 while (sched_n_insns
< n_insns
)
2755 q_ptr
= NEXT_Q (q_ptr
);
2757 /* Add all pending insns that can be scheduled without stalls to the
2759 for (insn
= insn_queue
[q_ptr
]; insn
; insn
= NEXT_INSN (insn
))
2762 fprintf (file
, ";; launching %d before %d with no stalls\n",
2763 INSN_UID (insn
), INSN_UID (last
));
2764 ready
[new_ready
++] = insn
;
2767 insn_queue
[q_ptr
] = 0;
2769 /* If there are no ready insns, stall until one is ready and add all
2770 of the pending insns at that point to the ready list. */
2773 register int stalls
;
2775 for (stalls
= 1; stalls
< Q_SIZE
; stalls
++)
2776 if (insn
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)])
2778 for (; insn
; insn
= NEXT_INSN (insn
))
2781 fprintf (file
, ";; issue insn %d before %d with %d stalls\n",
2782 INSN_UID (insn
), INSN_UID (last
), stalls
);
2783 ready
[new_ready
++] = insn
;
2786 insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)] = 0;
2791 /* This looks logically correct, but on the SPEC benchmark set on
2792 the SPARC, I get better code without it. */
2793 q_ptr
= NEXT_Q_AFTER (q_ptr
, stalls
);
2797 /* There should be some instructions waiting to fire. */
2801 /* Sort the ready list and choose the best insn to schedule.
2802 N_READY holds the number of items that were scheduled the last time,
2803 minus the one instruction scheduled on the last loop iteration; it
2804 is not modified for any other reason in this loop. */
2805 SCHED_SORT (ready
, new_ready
, n_ready
);
2806 n_ready
= new_ready
;
2807 last_scheduled_insn
= insn
= ready
[0];
2809 if (DONE_PRIORITY_P (insn
))
2812 if (reload_completed
== 0)
2814 /* Process this insn, and each insn linked to this one which must
2815 be immediately output after this insn. */
2818 /* First we kill registers set by this insn, and then we
2819 make registers used by this insn live. This is the opposite
2820 order used above because we are traversing the instructions
2823 /* Strictly speaking, we should scan REG_UNUSED notes and make
2824 every register mentioned there live, however, we will just
2825 kill them again immediately below, so there doesn't seem to
2826 be any reason why we bother to do this. */
2828 /* See if this is the last notice we must take of a register. */
2829 if (GET_CODE (PATTERN (insn
)) == SET
2830 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2831 sched_note_set (b
, PATTERN (insn
), 1);
2832 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2835 for (j
= XVECLEN (PATTERN (insn
), 0) - 1; j
>= 0; j
--)
2836 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == SET
2837 || GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == CLOBBER
)
2838 sched_note_set (b
, XVECEXP (PATTERN (insn
), 0, j
), 1);
2841 /* This code keeps life analysis information up to date. */
2842 if (GET_CODE (insn
) == CALL_INSN
)
2844 register struct sometimes
*p
;
2846 /* A call kills all call used and global registers, except
2847 for those mentioned in the call pattern which will be
2848 made live again later. */
2849 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
2850 if (call_used_regs
[i
] || global_regs
[i
])
2852 register int offset
= i
/ REGSET_ELT_BITS
;
2853 register int bit
= 1 << (i
% REGSET_ELT_BITS
);
2855 bb_live_regs
[offset
] &= ~bit
;
2856 bb_dead_regs
[offset
] |= bit
;
2859 /* Regs live at the time of a call instruction must not
2860 go in a register clobbered by calls. Record this for
2861 all regs now live. Note that insns which are born or
2862 die in a call do not cross a call, so this must be done
2863 after the killings (above) and before the births
2865 p
= regs_sometimes_live
;
2866 for (i
= 0; i
< sometimes_max
; i
++, p
++)
2867 if (bb_live_regs
[p
->offset
] & (1 << p
->bit
))
2868 p
->calls_crossed
+= 1;
2871 /* Make every register used live, and add REG_DEAD notes for
2872 registers which were not live before we started. */
2873 attach_deaths_insn (insn
);
2875 /* Find registers now made live by that instruction. */
2876 for (i
= 0; i
< regset_size
; i
++)
2878 int diff
= bb_live_regs
[i
] & ~old_live_regs
[i
];
2882 old_live_regs
[i
] |= diff
;
2883 for (bit
= 0; bit
< REGSET_ELT_BITS
; bit
++)
2884 if (diff
& (1 << bit
))
2886 = new_sometimes_live (regs_sometimes_live
, i
, bit
,
2891 /* Count lengths of all regs we are worrying about now,
2892 and handle registers no longer live. */
2894 for (i
= 0; i
< sometimes_max
; i
++)
2896 register struct sometimes
*p
= ®s_sometimes_live
[i
];
2897 int regno
= p
->offset
*REGSET_ELT_BITS
+ p
->bit
;
2899 p
->live_length
+= 1;
2901 if ((bb_live_regs
[p
->offset
] & (1 << p
->bit
)) == 0)
2903 /* This is the end of one of this register's lifetime
2904 segments. Save the lifetime info collected so far,
2905 and clear its bit in the old_live_regs entry. */
2906 sched_reg_live_length
[regno
] += p
->live_length
;
2907 sched_reg_n_calls_crossed
[regno
] += p
->calls_crossed
;
2908 old_live_regs
[p
->offset
] &= ~(1 << p
->bit
);
2910 /* Delete the reg_sometimes_live entry for this reg by
2911 copying the last entry over top of it. */
2912 *p
= regs_sometimes_live
[--sometimes_max
];
2913 /* ...and decrement i so that this newly copied entry
2914 will be processed. */
2920 insn
= PREV_INSN (insn
);
2922 while (SCHED_GROUP_P (link
));
2924 /* Set INSN back to the insn we are scheduling now. */
2928 /* Schedule INSN. Remove it from the ready list. */
2933 NEXT_INSN (insn
) = last
;
2934 PREV_INSN (last
) = insn
;
2937 /* Everything that precedes INSN now either becomes "ready", if
2938 it can execute immediately before INSN, or "pending", if
2939 there must be a delay. Give INSN high enough priority that
2940 at least one (maybe more) reg-killing insns can be launched
2941 ahead of all others. Mark INSN as scheduled by changing its
2943 INSN_PRIORITY (insn
) = LAUNCH_PRIORITY
;
2944 new_ready
= launch_links (insn
, ready
, n_ready
);
2945 INSN_PRIORITY (insn
) = DONE_PRIORITY
;
2947 /* Schedule all prior insns that must not be moved. */
2948 if (SCHED_GROUP_P (insn
))
2950 /* Disable these insns from being launched. */
2952 while (SCHED_GROUP_P (link
))
2954 /* Disable these insns from being launched by anybody. */
2955 link
= PREV_INSN (link
);
2956 INSN_REF_COUNT (link
) = 0;
2959 /* None of these insns can move forward into delay slots. */
2960 while (SCHED_GROUP_P (insn
))
2962 insn
= PREV_INSN (insn
);
2963 new_ready
= launch_links (insn
, ready
, new_ready
);
2964 INSN_PRIORITY (insn
) = DONE_PRIORITY
;
2967 NEXT_INSN (insn
) = last
;
2968 PREV_INSN (last
) = insn
;
2976 if (reload_completed
== 0)
2977 finish_sometimes_live (regs_sometimes_live
, sometimes_max
);
2979 /* HEAD is now the first insn in the chain of insns that
2980 been scheduled by the loop above.
2981 TAIL is the last of those insns. */
2984 /* NOTE_LIST is the end of a chain of notes previously found
2985 among the insns. Insert them at the beginning of the insns. */
2988 rtx note_head
= note_list
;
2989 while (PREV_INSN (note_head
))
2990 note_head
= PREV_INSN (note_head
);
2992 PREV_INSN (head
) = note_list
;
2993 NEXT_INSN (note_list
) = head
;
2997 /* In theory, there should be no REG_DEAD notes leftover at the end.
2998 In practice, this can occur as the result of bugs in flow, combine.c,
2999 and/or sched.c. The values of the REG_DEAD notes remaining are
3000 meaningless, because dead_notes is just used as a free list. */
3002 if (dead_notes
!= 0)
3006 if (new_needs
& NEED_HEAD
)
3007 basic_block_head
[b
] = head
;
3008 PREV_INSN (head
) = prev_head
;
3009 NEXT_INSN (prev_head
) = head
;
3011 if (new_needs
& NEED_TAIL
)
3012 basic_block_end
[b
] = tail
;
3013 NEXT_INSN (tail
) = next_tail
;
3014 PREV_INSN (next_tail
) = tail
;
3016 /* Restore the line-number notes of each insn. */
3017 if (write_symbols
!= NO_DEBUG
)
3019 rtx line
, note
, prev
, new;
3022 head
= basic_block_head
[b
];
3023 next_tail
= NEXT_INSN (basic_block_end
[b
]);
3025 /* Determine the current line-number. We want to know the current
3026 line number of the first insn of the block here, in case it is
3027 different from the true line number that was saved earlier. If
3028 different, then we need a line number note before the first insn
3029 of this block. If it happens to be the same, then we don't want to
3030 emit another line number note here. */
3031 for (line
= head
; line
; line
= PREV_INSN (line
))
3032 if (GET_CODE (line
) == NOTE
&& NOTE_LINE_NUMBER (line
) > 0)
3035 /* Walk the insns keeping track of the current line-number and inserting
3036 the line-number notes as needed. */
3037 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
3038 if (GET_CODE (insn
) == NOTE
&& NOTE_LINE_NUMBER (insn
) > 0)
3040 else if (! (GET_CODE (insn
) == NOTE
3041 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_DELETED
)
3042 && (note
= LINE_NOTE (insn
)) != 0
3045 || NOTE_LINE_NUMBER (note
) != NOTE_LINE_NUMBER (line
)
3046 || NOTE_SOURCE_FILE (note
) != NOTE_SOURCE_FILE (line
)))
3049 prev
= PREV_INSN (insn
);
3050 if (LINE_NOTE (note
))
3052 /* Re-use the orignal line-number note. */
3053 LINE_NOTE (note
) = 0;
3054 PREV_INSN (note
) = prev
;
3055 NEXT_INSN (prev
) = note
;
3056 PREV_INSN (insn
) = note
;
3057 NEXT_INSN (note
) = insn
;
3062 new = emit_note_after (NOTE_LINE_NUMBER (note
), prev
);
3063 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note
);
3067 fprintf (file
, ";; added %d line-number notes\n", notes
);
3072 fprintf (file
, ";; new basic block head = %d\n;; new basic block end = %d\n\n",
3073 INSN_UID (basic_block_head
[b
]), INSN_UID (basic_block_end
[b
]));
3076 /* Yow! We're done! */
3077 free_pending_lists ();
3082 /* Subroutine of split_hard_reg_notes. Searches X for any reference to
3083 REGNO, returning the rtx of the reference found if any. Otherwise,
3087 regno_use_in (regno
, x
)
3095 if (GET_CODE (x
) == REG
&& REGNO (x
) == regno
)
3098 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3099 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3103 if (tem
= regno_use_in (regno
, XEXP (x
, i
)))
3106 else if (fmt
[i
] == 'E')
3107 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3108 if (tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
)))
3115 /* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3116 needed for the hard register mentioned in the note. This can happen
3117 if the reference to the hard register in the original insn was split into
3118 several smaller hard register references in the split insns. */
3121 split_hard_reg_notes (note
, first
, last
, orig_insn
)
3122 rtx note
, first
, last
, orig_insn
;
3124 rtx reg
, temp
, link
;
3125 int n_regs
, i
, new_reg
;
3128 /* Assume that this is a REG_DEAD note. */
3129 if (REG_NOTE_KIND (note
) != REG_DEAD
)
3132 reg
= XEXP (note
, 0);
3134 n_regs
= HARD_REGNO_NREGS (REGNO (reg
), GET_MODE (reg
));
3136 /* ??? Could add check here to see whether, the hard register is referenced
3137 in the same mode as in the original insn. If so, then it has not been
3138 split, and the rest of the code below is unnecessary. */
3140 for (i
= 1; i
< n_regs
; i
++)
3142 new_reg
= REGNO (reg
) + i
;
3144 /* Check for references to new_reg in the split insns. */
3145 for (insn
= last
; ; insn
= PREV_INSN (insn
))
3147 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i'
3148 && (temp
= regno_use_in (new_reg
, PATTERN (insn
))))
3150 /* Create a new reg dead note here. */
3151 link
= rtx_alloc (EXPR_LIST
);
3152 PUT_REG_NOTE_KIND (link
, REG_DEAD
);
3153 XEXP (link
, 0) = temp
;
3154 XEXP (link
, 1) = REG_NOTES (insn
);
3155 REG_NOTES (insn
) = link
;
3158 /* It isn't mentioned anywhere, so no new reg note is needed for
3166 /* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3167 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3170 new_insn_dead_notes (pat
, insn
, last
, orig_insn
)
3171 rtx pat
, insn
, last
, orig_insn
;
3175 /* PAT is either a CLOBBER or a SET here. */
3176 dest
= XEXP (pat
, 0);
3178 while (GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SUBREG
3179 || GET_CODE (dest
) == STRICT_LOW_PART
3180 || GET_CODE (dest
) == SIGN_EXTRACT
)
3181 dest
= XEXP (dest
, 0);
3183 if (GET_CODE (dest
) == REG
)
3185 for (tem
= last
; tem
!= insn
; tem
= PREV_INSN (tem
))
3187 if (GET_RTX_CLASS (GET_CODE (tem
)) == 'i'
3188 && reg_overlap_mentioned_p (dest
, PATTERN (tem
))
3189 && (set
= single_set (tem
)))
3191 rtx tem_dest
= SET_DEST (set
);
3193 while (GET_CODE (tem_dest
) == ZERO_EXTRACT
3194 || GET_CODE (tem_dest
) == SUBREG
3195 || GET_CODE (tem_dest
) == STRICT_LOW_PART
3196 || GET_CODE (tem_dest
) == SIGN_EXTRACT
)
3197 tem_dest
= XEXP (tem_dest
, 0);
3199 if (tem_dest
!= dest
)
3201 /* Use the same scheme as combine.c, don't put both REG_DEAD
3202 and REG_UNUSED notes on the same insn. */
3203 if (! find_regno_note (tem
, REG_UNUSED
, REGNO (dest
))
3204 && ! find_regno_note (tem
, REG_DEAD
, REGNO (dest
)))
3206 rtx note
= rtx_alloc (EXPR_LIST
);
3207 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
3208 XEXP (note
, 0) = dest
;
3209 XEXP (note
, 1) = REG_NOTES (tem
);
3210 REG_NOTES (tem
) = note
;
3212 /* The reg only dies in one insn, the last one that uses
3216 else if (reg_overlap_mentioned_p (dest
, SET_SRC (set
)))
3217 /* We found an instruction that both uses the register,
3218 and sets it, so no new REG_NOTE is needed for this set. */
3222 /* If this is a set, it must die somewhere, unless it is the dest of
3223 the original insn, and hence is live after the original insn. Abort
3224 if it isn't supposed to be live after the original insn.
3226 If this is a clobber, then just add a REG_UNUSED note. */
3229 int live_after_orig_insn
= 0;
3230 rtx pattern
= PATTERN (orig_insn
);
3233 if (GET_CODE (pat
) == CLOBBER
)
3235 rtx note
= rtx_alloc (EXPR_LIST
);
3236 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
3237 XEXP (note
, 0) = dest
;
3238 XEXP (note
, 1) = REG_NOTES (insn
);
3239 REG_NOTES (insn
) = note
;
3243 /* The original insn could have multiple sets, so search the
3244 insn for all sets. */
3245 if (GET_CODE (pattern
) == SET
)
3247 if (reg_overlap_mentioned_p (dest
, SET_DEST (pattern
)))
3248 live_after_orig_insn
= 1;
3250 else if (GET_CODE (pattern
) == PARALLEL
)
3252 for (i
= 0; i
< XVECLEN (pattern
, 0); i
++)
3253 if (GET_CODE (XVECEXP (pattern
, 0, i
)) == SET
3254 && reg_overlap_mentioned_p (dest
,
3255 SET_DEST (XVECEXP (pattern
,
3257 live_after_orig_insn
= 1;
3260 if (! live_after_orig_insn
)
3266 /* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3267 registers modified by X. INC is -1 if the containing insn is being deleted,
3268 and is 1 if the containing insn is a newly generated insn. */
3271 update_n_sets (x
, inc
)
3275 rtx dest
= SET_DEST (x
);
3277 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
3278 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
3279 dest
= SUBREG_REG (dest
);
3281 if (GET_CODE (dest
) == REG
)
3283 int regno
= REGNO (dest
);
3285 if (regno
< FIRST_PSEUDO_REGISTER
)
3288 int endregno
= regno
+ HARD_REGNO_NREGS (regno
, GET_MODE (dest
));
3290 for (i
= regno
; i
< endregno
; i
++)
3291 reg_n_sets
[i
] += inc
;
3294 reg_n_sets
[regno
] += inc
;
3298 /* Updates all flow-analysis related quantities (including REG_NOTES) for
3299 the insns from FIRST to LAST inclusive that were created by splitting
3300 ORIG_INSN. NOTES are the original REG_NOTES. */
3303 update_flow_info (notes
, first
, last
, orig_insn
)
3310 rtx orig_dest
, temp
;
3313 /* Get and save the destination set by the original insn. */
3315 orig_dest
= single_set (orig_insn
);
3317 orig_dest
= SET_DEST (orig_dest
);
3319 /* Move REG_NOTES from the original insn to where they now belong. */
3321 for (note
= notes
; note
; note
= next
)
3323 next
= XEXP (note
, 1);
3324 switch (REG_NOTE_KIND (note
))
3328 /* Move these notes from the original insn to the last new insn where
3329 the register is now set. */
3331 for (insn
= last
; ; insn
= PREV_INSN (insn
))
3333 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i'
3334 && reg_mentioned_p (XEXP (note
, 0), PATTERN (insn
)))
3336 XEXP (note
, 1) = REG_NOTES (insn
);
3337 REG_NOTES (insn
) = note
;
3339 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3341 /* ??? This won't handle mutiple word registers correctly,
3342 but should be good enough for now. */
3343 if (REG_NOTE_KIND (note
) == REG_UNUSED
3344 && ! dead_or_set_p (insn
, XEXP (note
, 0)))
3345 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
3347 /* The reg only dies in one insn, the last one that uses
3351 /* It must die somewhere, fail it we couldn't find where it died.
3353 If this is a REG_UNUSED note, then it must be a temporary
3354 register that was not needed by this instantiation of the
3355 pattern, so we can safely ignore it. */
3358 if (REG_NOTE_KIND (note
) != REG_UNUSED
)
3365 /* If this note refers to a multiple word hard register, it may
3366 have been split into several smaller hard register references.
3367 Check to see if there are any new register references that
3368 need REG_NOTES added for them. */
3369 temp
= XEXP (note
, 0);
3370 if (REG_NOTE_KIND (note
) == REG_DEAD
3371 && GET_CODE (temp
) == REG
3372 && REGNO (temp
) < FIRST_PSEUDO_REGISTER
3373 && HARD_REGNO_NREGS (REGNO (temp
), GET_MODE (temp
)))
3374 split_hard_reg_notes (note
, first
, last
, orig_insn
);
3378 /* This note applies to the dest of the original insn. Find the
3379 first new insn that now has the same dest, and move the note
3385 for (insn
= first
; ; insn
= NEXT_INSN (insn
))
3387 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i'
3388 && (temp
= single_set (insn
))
3389 && rtx_equal_p (SET_DEST (temp
), orig_dest
))
3391 XEXP (note
, 1) = REG_NOTES (insn
);
3392 REG_NOTES (insn
) = note
;
3393 /* The reg is only zero before one insn, the first that
3397 /* It must be set somewhere, fail if we couldn't find where it
3406 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
3407 set is meaningless. Just drop the note. */
3411 case REG_NO_CONFLICT
:
3412 /* These notes apply to the dest of the original insn. Find the last
3413 new insn that now has the same dest, and move the note there. */
3418 for (insn
= last
; ; insn
= PREV_INSN (insn
))
3420 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i'
3421 && (temp
= single_set (insn
))
3422 && rtx_equal_p (SET_DEST (temp
), orig_dest
))
3424 XEXP (note
, 1) = REG_NOTES (insn
);
3425 REG_NOTES (insn
) = note
;
3426 /* Only put this note on one of the new insns. */
3430 /* The original dest must still be set someplace. Abort if we
3431 couldn't find it. */
3438 /* Move a REG_LIBCALL note to the first insn created, and update
3439 the corresponding REG_RETVAL note. */
3440 XEXP (note
, 1) = REG_NOTES (first
);
3441 REG_NOTES (first
) = note
;
3443 insn
= XEXP (note
, 0);
3444 note
= find_reg_note (insn
, REG_RETVAL
, 0);
3446 XEXP (note
, 0) = first
;
3450 /* Move a REG_RETVAL note to the last insn created, and update
3451 the corresponding REG_LIBCALL note. */
3452 XEXP (note
, 1) = REG_NOTES (last
);
3453 REG_NOTES (last
) = note
;
3455 insn
= XEXP (note
, 0);
3456 note
= find_reg_note (insn
, REG_LIBCALL
, 0);
3458 XEXP (note
, 0) = last
;
3462 /* This should be moved to whichever instruction is a JUMP_INSN. */
3464 for (insn
= last
; ; insn
= PREV_INSN (insn
))
3466 if (GET_CODE (insn
) == JUMP_INSN
)
3468 XEXP (note
, 1) = REG_NOTES (insn
);
3469 REG_NOTES (insn
) = note
;
3470 /* Only put this note on one of the new insns. */
3473 /* Fail if we couldn't find a JUMP_INSN. */
3480 /* This should be moved to whichever instruction now has the
3481 increment operation. */
3485 /* Should be moved to the new insn(s) which use the label. */
3490 /* These two notes will never appear until after reorg, so we don't
3491 have to handle them here. */
3497 /* Each new insn created, except the last, has a new set. If the destination
3498 is a register, then this reg is now live across several insns, whereas
3499 previously the dest reg was born and died within the same insn. To
3500 reflect this, we now need a REG_DEAD note on the insn where this
3503 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
3505 for (insn
= first
; insn
!= last
; insn
= NEXT_INSN (insn
))
3510 pat
= PATTERN (insn
);
3511 if (GET_CODE (pat
) == SET
|| GET_CODE (pat
) == CLOBBER
)
3512 new_insn_dead_notes (pat
, insn
, last
, orig_insn
);
3513 else if (GET_CODE (pat
) == PARALLEL
)
3515 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
3516 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3517 || GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
)
3518 new_insn_dead_notes (XVECEXP (pat
, 0, i
), insn
, last
, orig_insn
);
3522 /* If any insn, except the last, uses the register set by the last insn,
3523 then we need a new REG_DEAD note on that insn. In this case, there
3524 would not have been a REG_DEAD note for this register in the original
3525 insn because it was used and set within one insn.
3527 There is no new REG_DEAD note needed if the last insn uses the register
3528 that it is setting. */
3530 set
= single_set (last
);
3533 rtx dest
= SET_DEST (set
);
3535 while (GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SUBREG
3536 || GET_CODE (dest
) == STRICT_LOW_PART
3537 || GET_CODE (dest
) == SIGN_EXTRACT
)
3538 dest
= XEXP (dest
, 0);
3540 if (GET_CODE (dest
) == REG
3541 && ! reg_overlap_mentioned_p (dest
, SET_SRC (set
)))
3543 for (insn
= PREV_INSN (last
); ; insn
= PREV_INSN (insn
))
3545 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i'
3546 && reg_mentioned_p (dest
, PATTERN (insn
))
3547 && (set
= single_set (insn
)))
3549 rtx insn_dest
= SET_DEST (set
);
3551 while (GET_CODE (insn_dest
) == ZERO_EXTRACT
3552 || GET_CODE (insn_dest
) == SUBREG
3553 || GET_CODE (insn_dest
) == STRICT_LOW_PART
3554 || GET_CODE (insn_dest
) == SIGN_EXTRACT
)
3555 insn_dest
= XEXP (insn_dest
, 0);
3557 if (insn_dest
!= dest
)
3559 note
= rtx_alloc (EXPR_LIST
);
3560 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
3561 XEXP (note
, 0) = dest
;
3562 XEXP (note
, 1) = REG_NOTES (insn
);
3563 REG_NOTES (insn
) = note
;
3564 /* The reg only dies in one insn, the last one
3575 /* If the original dest is modifying a multiple register target, and the
3576 original instruction was split such that the original dest is now set
3577 by two or more SUBREG sets, then the split insns no longer kill the
3578 destination of the original insn.
3580 In this case, if there exists an instruction in the same basic block,
3581 before the split insn, which uses the original dest, and this use is
3582 killed by the original insn, then we must remove the REG_DEAD note on
3583 this insn, because it is now superfluous.
3585 This does not apply when a hard register gets split, because the code
3586 knows how to handle overlapping hard registers properly. */
3587 if (orig_dest
&& GET_CODE (orig_dest
) == REG
)
3589 int found_orig_dest
= 0;
3590 int found_split_dest
= 0;
3592 for (insn
= first
; ; insn
= NEXT_INSN (insn
))
3594 set
= single_set (insn
);
3597 if (GET_CODE (SET_DEST (set
)) == REG
3598 && REGNO (SET_DEST (set
)) == REGNO (orig_dest
))
3600 found_orig_dest
= 1;
3603 else if (GET_CODE (SET_DEST (set
)) == SUBREG
3604 && SUBREG_REG (SET_DEST (set
)) == orig_dest
)
3606 found_split_dest
= 1;
3615 if (found_split_dest
)
3617 /* Search backwards from FIRST, looking for the first insn that uses
3618 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
3619 If we find an insn, and it has a REG_DEAD note, then delete the
3622 for (insn
= first
; insn
; insn
= PREV_INSN (insn
))
3624 if (GET_CODE (insn
) == CODE_LABEL
3625 || GET_CODE (insn
) == JUMP_INSN
)
3627 else if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i'
3628 && reg_mentioned_p (orig_dest
, insn
))
3630 note
= find_regno_note (insn
, REG_DEAD
, REGNO (orig_dest
));
3632 remove_note (insn
, note
);
3636 else if (! found_orig_dest
)
3638 /* This should never happen. */
3643 /* Update reg_n_sets. This is necessary to prevent local alloc from
3644 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
3645 a reg from set once to set multiple times. */
3648 rtx x
= PATTERN (orig_insn
);
3649 RTX_CODE code
= GET_CODE (x
);
3651 if (code
== SET
|| code
== CLOBBER
)
3652 update_n_sets (x
, -1);
3653 else if (code
== PARALLEL
)
3656 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
3658 code
= GET_CODE (XVECEXP (x
, 0, i
));
3659 if (code
== SET
|| code
== CLOBBER
)
3660 update_n_sets (XVECEXP (x
, 0, i
), -1);
3664 for (insn
= first
; ; insn
= NEXT_INSN (insn
))
3667 code
= GET_CODE (x
);
3669 if (code
== SET
|| code
== CLOBBER
)
3670 update_n_sets (x
, 1);
3671 else if (code
== PARALLEL
)
3674 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
3676 code
= GET_CODE (XVECEXP (x
, 0, i
));
3677 if (code
== SET
|| code
== CLOBBER
)
3678 update_n_sets (XVECEXP (x
, 0, i
), 1);
3688 /* The one entry point in this file. DUMP_FILE is the dump file for
3692 schedule_insns (dump_file
)
3695 int max_uid
= MAX_INSNS_PER_SPLIT
* (get_max_uid () + 1);
3699 /* Taking care of this degenerate case makes the rest of
3700 this code simpler. */
3701 if (n_basic_blocks
== 0)
3704 /* Create an insn here so that we can hang dependencies off of it later. */
3705 sched_before_next_call
= gen_rtx (INSN
, VOIDmode
, 0, 0, 0, 0, 0, 0, 0);
3707 /* Initialize the unused_*_lists. We can't use the ones left over from
3708 the previous function, because gcc has freed that memory. We can use
3709 the ones left over from the first sched pass in the second pass however,
3710 so only clear them on the first sched pass. The first pass is before
3711 reload if flag_schedule_insns is set, otherwise it is afterwards. */
3713 if (reload_completed
== 0 || ! flag_schedule_insns
)
3715 unused_insn_list
= 0;
3716 unused_expr_list
= 0;
3719 /* We create no insns here, only reorder them, so we
3720 remember how far we can cut back the stack on exit. */
3722 /* Allocate data for this pass. See comments, above,
3723 for what these vectors do. */
3724 /* ??? Instruction splitting below may create new instructions, so these
3725 arrays must be bigger than just max_uid. */
3726 insn_luid
= (int *) alloca (max_uid
* sizeof (int));
3727 insn_priority
= (int *) alloca (max_uid
* sizeof (int));
3728 insn_ref_count
= (int *) alloca (max_uid
* sizeof (int));
3730 if (reload_completed
== 0)
3732 sched_reg_n_deaths
= (short *) alloca (max_regno
* sizeof (short));
3733 sched_reg_n_calls_crossed
= (int *) alloca (max_regno
* sizeof (int));
3734 sched_reg_live_length
= (int *) alloca (max_regno
* sizeof (int));
3735 bb_dead_regs
= (regset
) alloca (regset_bytes
);
3736 bb_live_regs
= (regset
) alloca (regset_bytes
);
3737 bzero (sched_reg_n_calls_crossed
, max_regno
* sizeof (int));
3738 bzero (sched_reg_live_length
, max_regno
* sizeof (int));
3739 bcopy (reg_n_deaths
, sched_reg_n_deaths
, max_regno
* sizeof (short));
3740 init_alias_analysis ();
3744 sched_reg_n_deaths
= 0;
3745 sched_reg_n_calls_crossed
= 0;
3746 sched_reg_live_length
= 0;
3749 if (! flag_schedule_insns
)
3750 init_alias_analysis ();
3753 if (write_symbols
!= NO_DEBUG
)
3757 line_note
= (rtx
*) alloca (max_uid
* sizeof (rtx
));
3758 bzero (line_note
, max_uid
* sizeof (rtx
));
3759 line_note_head
= (rtx
*) alloca (n_basic_blocks
* sizeof (rtx
));
3760 bzero (line_note_head
, n_basic_blocks
* sizeof (rtx
));
3762 /* Determine the line-number at the start of each basic block.
3763 This must be computed and saved now, because after a basic block's
3764 predecessor has been scheduled, it is impossible to accurately
3765 determine the correct line number for the first insn of the block. */
3767 for (b
= 0; b
< n_basic_blocks
; b
++)
3768 for (line
= basic_block_head
[b
]; line
; line
= PREV_INSN (line
))
3769 if (GET_CODE (line
) == NOTE
&& NOTE_LINE_NUMBER (line
) > 0)
3771 line_note_head
[b
] = line
;
3776 bzero (insn_luid
, max_uid
* sizeof (int));
3777 bzero (insn_priority
, max_uid
* sizeof (int));
3778 bzero (insn_ref_count
, max_uid
* sizeof (int));
3780 /* Schedule each basic block, block by block. */
3782 if (NEXT_INSN (basic_block_end
[n_basic_blocks
-1]) == 0
3783 || (GET_CODE (basic_block_end
[n_basic_blocks
-1]) != NOTE
3784 && GET_CODE (basic_block_end
[n_basic_blocks
-1]) != CODE_LABEL
))
3785 emit_note_after (NOTE_INSN_DELETED
, basic_block_end
[n_basic_blocks
-1]);
3787 for (b
= 0; b
< n_basic_blocks
; b
++)
3794 for (insn
= basic_block_head
[b
]; ; insn
= next
)
3799 /* Can't use `next_real_insn' because that
3800 might go across CODE_LABELS and short-out basic blocks. */
3801 next
= NEXT_INSN (insn
);
3802 if (GET_CODE (insn
) != INSN
)
3804 if (insn
== basic_block_end
[b
])
3810 /* Don't split no-op move insns. These should silently disappear
3811 later in final. Splitting such insns would break the code
3812 that handles REG_NO_CONFLICT blocks. */
3813 set
= single_set (insn
);
3814 if (set
&& rtx_equal_p (SET_SRC (set
), SET_DEST (set
)))
3816 if (insn
== basic_block_end
[b
])
3819 /* Nops get in the way while scheduling, so delete them now if
3820 register allocation has already been done. It is too risky
3821 to try to do this before register allocation, and there are
3822 unlikely to be very many nops then anyways. */
3823 if (reload_completed
)
3825 PUT_CODE (insn
, NOTE
);
3826 NOTE_LINE_NUMBER (insn
) = NOTE_INSN_DELETED
;
3827 NOTE_SOURCE_FILE (insn
) = 0;
3833 /* Split insns here to get max fine-grain parallelism. */
3834 prev
= PREV_INSN (insn
);
3835 if (reload_completed
== 0)
3837 rtx last
, first
= PREV_INSN (insn
);
3838 rtx notes
= REG_NOTES (insn
);
3840 last
= try_split (PATTERN (insn
), insn
, 1);
3843 /* try_split returns the NOTE that INSN became. */
3844 first
= NEXT_INSN (first
);
3845 update_flow_info (notes
, first
, last
, insn
);
3847 PUT_CODE (insn
, NOTE
);
3848 NOTE_SOURCE_FILE (insn
) = 0;
3849 NOTE_LINE_NUMBER (insn
) = NOTE_INSN_DELETED
;
3850 if (insn
== basic_block_head
[b
])
3851 basic_block_head
[b
] = first
;
3852 if (insn
== basic_block_end
[b
])
3854 basic_block_end
[b
] = last
;
3860 if (insn
== basic_block_end
[b
])
3864 schedule_block (b
, dump_file
);
3871 if (write_symbols
!= NO_DEBUG
)
3874 rtx insn
= get_insns ();
3875 int active_insn
= 0;
3878 /* Walk the insns deleting redundant line-number notes. Many of these
3879 are already present. The remainder tend to occur at basic
3880 block boundaries. */
3881 for (insn
= get_last_insn (); insn
; insn
= PREV_INSN (insn
))
3882 if (GET_CODE (insn
) == NOTE
&& NOTE_LINE_NUMBER (insn
) > 0)
3884 /* If there are no active insns following, INSN is redundant. */
3885 if (active_insn
== 0)
3888 NOTE_SOURCE_FILE (insn
) = 0;
3889 NOTE_LINE_NUMBER (insn
) = NOTE_INSN_DELETED
;
3891 /* If the line number is unchanged, LINE is redundant. */
3893 && NOTE_LINE_NUMBER (line
) == NOTE_LINE_NUMBER (insn
)
3894 && NOTE_SOURCE_FILE (line
) == NOTE_SOURCE_FILE (insn
))
3897 NOTE_SOURCE_FILE (line
) = 0;
3898 NOTE_LINE_NUMBER (line
) = NOTE_INSN_DELETED
;
3905 else if (! ((GET_CODE (insn
) == NOTE
3906 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_DELETED
)
3907 || (GET_CODE (insn
) == INSN
3908 && (GET_CODE (PATTERN (insn
)) == USE
3909 || GET_CODE (PATTERN (insn
)) == CLOBBER
))))
3912 if (dump_file
&& notes
)
3913 fprintf (dump_file
, ";; deleted %d line-number notes\n", notes
);
3916 if (reload_completed
== 0)
3919 for (regno
= 0; regno
< max_regno
; regno
++)
3920 if (sched_reg_live_length
[regno
])
3924 if (reg_live_length
[regno
] > sched_reg_live_length
[regno
])
3926 ";; register %d life shortened from %d to %d\n",
3927 regno
, reg_live_length
[regno
],
3928 sched_reg_live_length
[regno
]);
3929 /* Negative values are special; don't overwrite the current
3930 reg_live_length value if it is negative. */
3931 else if (reg_live_length
[regno
] < sched_reg_live_length
[regno
]
3932 && reg_live_length
[regno
] >= 0)
3934 ";; register %d life extended from %d to %d\n",
3935 regno
, reg_live_length
[regno
],
3936 sched_reg_live_length
[regno
]);
3938 if (reg_n_calls_crossed
[regno
]
3939 && ! sched_reg_n_calls_crossed
[regno
])
3941 ";; register %d no longer crosses calls\n", regno
);
3942 else if (! reg_n_calls_crossed
[regno
]
3943 && sched_reg_n_calls_crossed
[regno
])
3945 ";; register %d now crosses calls\n", regno
);
3947 reg_live_length
[regno
] = sched_reg_live_length
[regno
];
3948 reg_n_calls_crossed
[regno
] = sched_reg_n_calls_crossed
[regno
];
3952 #endif /* INSN_SCHEDULING */