]> gcc.gnu.org Git - gcc.git/blob - gcc/sched.c
*** empty log message ***
[gcc.git] / gcc / sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GNU CC.
7
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
21
22 /* Instruction scheduling pass.
23
24 This pass implements list scheduling within basic blocks. It is
25 run after flow analysis, but before register allocation. The
26 scheduler works as follows:
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning
39 values to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we also add it to the ready list. When all insns down
50 to the lowest priority have been scheduled, the critical path of the
51 basic block has been made as short as possible. The remaining insns
52 are then scheduled in remaining slots.
53
54 The following list shows the order in which we want to break ties:
55
56 1. choose insn with lowest conflict cost, ties broken by
57 2. choose insn with the longest path to end of bb, ties broken by
58 3. choose insn that kills the most registers, ties broken by
59 4. choose insn that conflicts with the most ready insns, or finally
60 5. choose insn with lowest UID.
61
62 Memory references complicate matters. Only if we can be certain
63 that memory references are not part of the data dependency graph
64 (via true, anti, or output dependence), can we move operations past
65 memory references. To first approximation, reads can be done
66 independently, while writes introduce dependencies. Better
67 approximations will yield fewer dependencies.
68
69 Dependencies set up by memory references are treated in exactly the
70 same way as other dependencies, by using LOG_LINKS.
71
72 Having optimized the critical path, we may have also unduly
73 extended the lifetimes of some registers. If an operation requires
74 that constants be loaded into registers, it is certainly desirable
75 to load those constants as early as necessary, but no earlier.
76 I.e., it will not do to load up a bunch of registers at the
77 beginning of a basic block only to use them at the end, if they
78 could be loaded later, since this may result in excessive register
79 utilization.
80
81 Note that since branches are never in basic blocks, but only end
82 basic blocks, this pass will not do any branch scheduling. But
83 that is ok, since we can use GNU's delayed branch scheduling
84 pass to take care of this case.
85
86 Also note that no further optimizations based on algebraic identities
87 are performed, so this pass would be a good one to perform instruction
88 splitting, such as breaking up a multiply instruction into shifts
89 and adds where that is profitable.
90
91 Given the memory aliasing analysis that this pass should perform,
92 it should be possible to remove redundant stores to memory, and to
93 load values from registers instead of hitting memory.
94
95 This pass must update information that subsequent passes expect to be
96 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
97 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
98 basic_block_end.
99
100 The information in the line number notes is carefully retained by this
101 pass. All other NOTE insns are grouped in their same relative order at
102 the beginning of basic blocks that have been scheduled. */
103 \f
104 #include <stdio.h>
105 #include "config.h"
106 #include "rtl.h"
107 #include "basic-block.h"
108 #include "regs.h"
109 #include "hard-reg-set.h"
110 #include "flags.h"
111 #include "insn-config.h"
112 #include "insn-attr.h"
113
114 /* Arrays set up by scheduling for the same respective purposes as
115 similar-named arrays set up by flow analysis. We work with these
116 arrays during the scheduling pass so we can compare values against
117 unscheduled code.
118
119 Values of these arrays are copied at the end of this pass into the
120 arrays set up by flow analysis. */
121 static short *sched_reg_n_deaths;
122 static int *sched_reg_n_calls_crossed;
123 static int *sched_reg_live_length;
124
125 /* Element N is the next insn that sets (hard or pseudo) register
126 N within the current basic block; or zero, if there is no
127 such insn. Needed for new registers which may be introduced
128 by splitting insns. */
129 static rtx *reg_last_uses;
130 static rtx *reg_last_sets;
131
132 /* Vector indexed by INSN_UID giving the original ordering of the insns. */
133 static int *insn_luid;
134 #define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
135
136 /* Vector indexed by INSN_UID giving each instruction a priority. */
137 static int *insn_priority;
138 #define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
139
140 #define DONE_PRIORITY -1
141 #define MAX_PRIORITY 0x7fffffff
142 #define TAIL_PRIORITY 0x7ffffffe
143 #define LAUNCH_PRIORITY 0x7f000001
144 #define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
145 #define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
146
147 /* Vector indexed by INSN_UID giving number of insns referring to this insn. */
148 static int *insn_ref_count;
149 #define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
150
151 /* Vector indexed by INSN_UID giving line-number note in effect for each
152 insn. For line-number notes, this indicates whether the note may be
153 reused. */
154 static rtx *line_note;
155 #define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
156
157 /* Vector indexed by basic block number giving the starting line-number
158 for each basic block. */
159 static rtx *line_note_head;
160
161 /* List of important notes we must keep around. This is a pointer to the
162 last element in the list. */
163 static rtx note_list;
164
165 /* Regsets telling whether a given register is live or dead before the last
166 scheduled insn. Must scan the instructions once before scheduling to
167 determine what registers are live or dead at the end of the block. */
168 static regset bb_dead_regs;
169 static regset bb_live_regs;
170
171 /* Regset telling whether a given register is live after the insn currently
172 being scheduled. Before processing an insn, this is equal to bb_live_regs
173 above. This is used so that we can find regsiters that are newly born/dead
174 after processing an insn. */
175 static regset old_live_regs;
176
177 /* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
178 during the initial scan and reused later. If there are not exactly as
179 many REG_DEAD notes in the post scheduled code as there were in the
180 prescheduled code then we trigger an abort because this indicates a bug. */
181 static rtx dead_notes;
182
183 /* Queues, etc. */
184
185 /* An instruction is ready to be scheduled when all insns following it
186 have already been scheduled. It is important to ensure that all
187 insns which use its result will not be executed until its result
188 has been computed. We maintain three lists (conceptually):
189
190 (1) a "Ready" list of unscheduled, uncommitted insns
191 (2) a "Scheduled" list of scheduled insns
192 (3) a "Pending" list of insns which can be scheduled, but
193 for stalls.
194
195 Insns move from the "Ready" list to the "Pending" list when
196 all insns following them have been scheduled.
197
198 Insns move from the "Pending" list to the "Scheduled" list
199 when there is sufficient space in the pipeline to prevent
200 stalls between the insn and scheduled insns which use it.
201
202 The "Pending" list acts as a buffer to prevent insns
203 from avalanching.
204
205 The "Ready" list is implemented by the variable `ready'.
206 The "Pending" list are the insns in the LOG_LINKS of ready insns.
207 The "Scheduled" list is the new insn chain built by this pass. */
208
209 /* Implement a circular buffer from which instructions are issued. */
210 #define Q_SIZE 128
211 static rtx insn_queue[Q_SIZE];
212 static int q_ptr = 0;
213 static int q_size = 0;
214 #define NEXT_Q(X) (((X)+1) & (Q_SIZE-1))
215 #define NEXT_Q_AFTER(X,C) (((X)+C) & (Q_SIZE-1))
216 \f
217 /* Forward declarations. */
218 static void sched_analyze_2 ();
219 static void schedule_block ();
220
221 /* Main entry point of this file. */
222 void schedule_insns ();
223 \f
224 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
225
226 /* Vector indexed by N giving the initial (unchanging) value known
227 for pseudo-register N. */
228 static rtx *reg_known_value;
229
230 /* Indicates number of valid entries in reg_known_value. */
231 static int reg_known_value_size;
232
233 static rtx
234 canon_rtx (x)
235 rtx x;
236 {
237 if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
238 && REGNO (x) <= reg_known_value_size)
239 return reg_known_value[REGNO (x)];
240 else if (GET_CODE (x) == PLUS)
241 {
242 rtx x0 = canon_rtx (XEXP (x, 0));
243 rtx x1 = canon_rtx (XEXP (x, 1));
244
245 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
246 {
247 /* We can tolerate LO_SUMs being offset here; these
248 rtl are used for nothing other than comparisons. */
249 if (GET_CODE (x0) == CONST_INT)
250 return plus_constant_for_output (x1, INTVAL (x0));
251 else if (GET_CODE (x1) == CONST_INT)
252 return plus_constant_for_output (x0, INTVAL (x1));
253 return gen_rtx (PLUS, GET_MODE (x), x0, x1);
254 }
255 }
256 return x;
257 }
258
259 /* Set up all info needed to perform alias analysis on memory references. */
260
261 void
262 init_alias_analysis ()
263 {
264 int maxreg = max_reg_num ();
265 rtx insn;
266 rtx note;
267 rtx set;
268
269 reg_known_value_size = maxreg;
270
271 reg_known_value
272 = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx))
273 - FIRST_PSEUDO_REGISTER;
274 bzero (reg_known_value+FIRST_PSEUDO_REGISTER,
275 (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
276
277 /* Fill in the entries with known constant values. */
278 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
279 if ((set = single_set (insn)) != 0
280 && GET_CODE (SET_DEST (set)) == REG
281 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
282 && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
283 && reg_n_sets[REGNO (SET_DEST (set))] == 1)
284 || (note = find_reg_note (insn, REG_EQUIV, 0)) != 0)
285 && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
286 reg_known_value[REGNO (SET_DEST (set))] = XEXP (note, 0);
287
288 /* Fill in the remaining entries. */
289 while (--maxreg >= FIRST_PSEUDO_REGISTER)
290 if (reg_known_value[maxreg] == 0)
291 reg_known_value[maxreg] = regno_reg_rtx[maxreg];
292 }
293
294 /* Return 1 if X and Y are identical-looking rtx's.
295
296 We use the data in reg_known_value above to see if two registers with
297 different numbers are, in fact, equivalent. */
298
299 static int
300 rtx_equal_for_memref_p (x, y)
301 rtx x, y;
302 {
303 register int i;
304 register int j;
305 register enum rtx_code code;
306 register char *fmt;
307
308 if (x == 0 && y == 0)
309 return 1;
310 if (x == 0 || y == 0)
311 return 0;
312 x = canon_rtx (x);
313 y = canon_rtx (y);
314
315 if (x == y)
316 return 1;
317
318 code = GET_CODE (x);
319 /* Rtx's of different codes cannot be equal. */
320 if (code != GET_CODE (y))
321 return 0;
322
323 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
324 (REG:SI x) and (REG:HI x) are NOT equivalent. */
325
326 if (GET_MODE (x) != GET_MODE (y))
327 return 0;
328
329 /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
330
331 if (code == REG)
332 return REGNO (x) == REGNO (y);
333 if (code == LABEL_REF)
334 return XEXP (x, 0) == XEXP (y, 0);
335 if (code == SYMBOL_REF)
336 return XSTR (x, 0) == XSTR (y, 0);
337
338 /* Compare the elements. If any pair of corresponding elements
339 fail to match, return 0 for the whole things. */
340
341 fmt = GET_RTX_FORMAT (code);
342 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
343 {
344 switch (fmt[i])
345 {
346 case 'n':
347 case 'i':
348 if (XINT (x, i) != XINT (y, i))
349 return 0;
350 break;
351
352 case 'V':
353 case 'E':
354 /* Two vectors must have the same length. */
355 if (XVECLEN (x, i) != XVECLEN (y, i))
356 return 0;
357
358 /* And the corresponding elements must match. */
359 for (j = 0; j < XVECLEN (x, i); j++)
360 if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
361 return 0;
362 break;
363
364 case 'e':
365 if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
366 return 0;
367 break;
368
369 case 'S':
370 case 's':
371 if (strcmp (XSTR (x, i), XSTR (y, i)))
372 return 0;
373 break;
374
375 case 'u':
376 /* These are just backpointers, so they don't matter. */
377 break;
378
379 case '0':
380 break;
381
382 /* It is believed that rtx's at this level will never
383 contain anything but integers and other rtx's,
384 except for within LABEL_REFs and SYMBOL_REFs. */
385 default:
386 abort ();
387 }
388 }
389 return 1;
390 }
391
392 /* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
393 X and return it, or return 0 if none found. */
394
395 static rtx
396 find_symbolic_term (x)
397 rtx x;
398 {
399 register int i;
400 register enum rtx_code code;
401 register char *fmt;
402
403 code = GET_CODE (x);
404 if (code == SYMBOL_REF || code == LABEL_REF)
405 return x;
406 if (GET_RTX_CLASS (code) == 'o')
407 return 0;
408
409 fmt = GET_RTX_FORMAT (code);
410 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
411 {
412 rtx t;
413
414 if (fmt[i] == 'e')
415 {
416 t = find_symbolic_term (XEXP (x, i));
417 if (t != 0)
418 return t;
419 }
420 else if (fmt[i] == 'E')
421 break;
422 }
423 return 0;
424 }
425
426 /* Return nonzero if X and Y (memory addresses) could reference the
427 same location in memory. C is an offset accumulator. When
428 C is nonzero, we are testing aliases between X and Y + C.
429 XSIZE is the size in bytes of the X reference,
430 similarly YSIZE is the size in bytes for Y.
431
432 If XSIZE or YSIZE is zero, we do not know the amount of memory being
433 referenced (the reference was BLKmode), so make the most pessimistic
434 assumptions.
435
436 We recognize the following cases of non-conflicting memory:
437
438 (1) addresses involving the frame pointer cannot conflict
439 with addresses involving static variables.
440 (2) static variables with different addresses cannot conflict.
441
442 Nice to notice that varying addresses cannot confict with fp if no
443 local variables had their addresses taken, but that's too hard now. */
444
445 static int
446 memrefs_conflict_p (xsize, x, ysize, y, c)
447 rtx x, y;
448 int xsize, ysize;
449 int c;
450 {
451 if (GET_CODE (x) == HIGH)
452 x = XEXP (x, 0);
453 else if (GET_CODE (x) == LO_SUM)
454 x = XEXP (x, 1);
455 else
456 x = canon_rtx (x);
457 if (GET_CODE (y) == HIGH)
458 y = XEXP (y, 0);
459 else if (GET_CODE (y) == LO_SUM)
460 y = XEXP (y, 1);
461 else
462 y = canon_rtx (y);
463
464 if (rtx_equal_for_memref_p (x, y))
465 return (xsize == 0 || ysize == 0 ||
466 (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
467
468 if (y == frame_pointer_rtx || y == stack_pointer_rtx)
469 {
470 rtx t = y;
471 int tsize = ysize;
472 y = x; ysize = xsize;
473 x = t; xsize = tsize;
474 }
475
476 if (x == frame_pointer_rtx || x == stack_pointer_rtx)
477 {
478 rtx y1;
479
480 if (CONSTANT_P (y))
481 return 0;
482
483 if (GET_CODE (y) == PLUS
484 && canon_rtx (XEXP (y, 0)) == x
485 && (y1 = canon_rtx (XEXP (y, 1)))
486 && GET_CODE (y1) == CONST_INT)
487 {
488 c += INTVAL (y1);
489 return (xsize == 0 || ysize == 0
490 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
491 }
492
493 if (GET_CODE (y) == PLUS
494 && (y1 = canon_rtx (XEXP (y, 0)))
495 && CONSTANT_P (y1))
496 return 0;
497
498 return 1;
499 }
500
501 if (GET_CODE (x) == PLUS)
502 {
503 /* The fact that X is canonnicallized means that this
504 PLUS rtx is canonnicallized. */
505 rtx x0 = XEXP (x, 0);
506 rtx x1 = XEXP (x, 1);
507
508 if (GET_CODE (y) == PLUS)
509 {
510 /* The fact that Y is canonnicallized means that this
511 PLUS rtx is canonnicallized. */
512 rtx y0 = XEXP (y, 0);
513 rtx y1 = XEXP (y, 1);
514
515 if (rtx_equal_for_memref_p (x1, y1))
516 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
517 if (rtx_equal_for_memref_p (x0, y0))
518 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
519 if (GET_CODE (x1) == CONST_INT)
520 if (GET_CODE (y1) == CONST_INT)
521 return memrefs_conflict_p (xsize, x0, ysize, y0,
522 c - INTVAL (x1) + INTVAL (y1));
523 else
524 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
525 else if (GET_CODE (y1) == CONST_INT)
526 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
527
528 /* Handle case where we cannot understand iteration operators,
529 but we notice that the base addresses are distinct objects. */
530 x = find_symbolic_term (x);
531 if (x == 0)
532 return 1;
533 y = find_symbolic_term (y);
534 if (y == 0)
535 return 1;
536 return rtx_equal_for_memref_p (x, y);
537 }
538 else if (GET_CODE (x1) == CONST_INT)
539 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
540 }
541 else if (GET_CODE (y) == PLUS)
542 {
543 /* The fact that Y is canonnicallized means that this
544 PLUS rtx is canonnicallized. */
545 rtx y0 = XEXP (y, 0);
546 rtx y1 = XEXP (y, 1);
547
548 if (GET_CODE (y1) == CONST_INT)
549 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
550 else
551 return 1;
552 }
553
554 if (GET_CODE (x) == GET_CODE (y))
555 switch (GET_CODE (x))
556 {
557 case MULT:
558 {
559 /* Handle cases where we expect the second operands to be the
560 same, and check only whether the first operand would conflict
561 or not. */
562 rtx x0, y0;
563 rtx x1 = canon_rtx (XEXP (x, 1));
564 rtx y1 = canon_rtx (XEXP (y, 1));
565 if (! rtx_equal_for_memref_p (x1, y1))
566 return 1;
567 x0 = canon_rtx (XEXP (x, 0));
568 y0 = canon_rtx (XEXP (y, 0));
569 if (rtx_equal_for_memref_p (x0, y0))
570 return (xsize == 0 || ysize == 0
571 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
572
573 /* Can't properly adjust our sizes. */
574 if (GET_CODE (x1) != CONST_INT)
575 return 1;
576 xsize /= INTVAL (x1);
577 ysize /= INTVAL (x1);
578 c /= INTVAL (x1);
579 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
580 }
581 }
582
583 if (CONSTANT_P (x))
584 {
585 if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
586 {
587 c += (INTVAL (y) - INTVAL (x));
588 return (xsize == 0 || ysize == 0
589 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
590 }
591
592 if (GET_CODE (x) == CONST)
593 {
594 if (GET_CODE (y) == CONST)
595 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
596 ysize, canon_rtx (XEXP (y, 0)), c);
597 else
598 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
599 ysize, y, c);
600 }
601 if (GET_CODE (y) == CONST)
602 return memrefs_conflict_p (xsize, x, ysize,
603 canon_rtx (XEXP (y, 0)), c);
604
605 if (CONSTANT_P (y))
606 return (rtx_equal_for_memref_p (x, y)
607 && (xsize == 0 || ysize == 0
608 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
609
610 return 1;
611 }
612 return 1;
613 }
614
615 /* Functions to compute memory dependencies.
616
617 Since we process the insns in execution order, we can build tables
618 to keep track of what registers are fixed (and not aliased), what registers
619 are varying in known ways, and what registers are varying in unknown
620 ways.
621
622 If both memory references are volatile, then there must always be a
623 dependence between the two references, since their order can not be
624 changed. A volatile and non-volatile reference can be interchanged
625 though.
626
627 A MEM_IN_STRUCT reference at a varying address can never conflict with a
628 non-MEM_IN_STRUCT reference at a fixed address. */
629
630 /* Read dependence: X is read after read in MEM takes place. There can
631 only be a dependence here if both reads are volatile. */
632
633 int
634 read_dependence (mem, x)
635 rtx mem;
636 rtx x;
637 {
638 return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
639 }
640
641 /* True dependence: X is read after store in MEM takes place. */
642
643 int
644 true_dependence (mem, x)
645 rtx mem;
646 rtx x;
647 {
648 if (RTX_UNCHANGING_P (x))
649 return 0;
650
651 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
652 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
653 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
654 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
655 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
656 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
657 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
658 }
659
660 /* Anti dependence: X is written after read in MEM takes place. */
661
662 int
663 anti_dependence (mem, x)
664 rtx mem;
665 rtx x;
666 {
667 if (RTX_UNCHANGING_P (mem))
668 return 0;
669
670 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
671 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
672 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
673 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
674 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
675 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
676 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
677 }
678
679 /* Output dependence: X is written after store in MEM takes place. */
680
681 int
682 output_dependence (mem, x)
683 rtx mem;
684 rtx x;
685 {
686 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
687 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
688 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
689 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
690 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
691 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
692 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
693 }
694 \f
695 #ifndef INSN_SCHEDULING
696 void schedule_insns () {}
697 #else
698 #ifndef __GNUC__
699 #define __inline
700 #endif
701
702 /* Computation of memory dependencies. */
703
704 /* The *_insns and *_mems are paired lists. Each pending memory operation
705 will have a pointer to the MEM rtx on one list and a pointer to the
706 containing insn on the other list in the same place in the list. */
707
708 /* We can't use add_dependence like the old code did, because a single insn
709 may have multiple memory accesses, and hence needs to be on the list
710 once for each memory access. Add_dependence won't let you add an insn
711 to a list more than once. */
712
713 /* An INSN_LIST containing all insns with pending read operations. */
714 static rtx pending_read_insns;
715
716 /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
717 static rtx pending_read_mems;
718
719 /* An INSN_LIST containing all insns with pending write operations. */
720 static rtx pending_write_insns;
721
722 /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
723 static rtx pending_write_mems;
724
725 /* Indicates the combined length of the two pending lists. We must prevent
726 these lists from ever growing too large since the number of dependencies
727 produced is at least O(N*N), and execution time is at least O(4*N*N), as
728 a function of the length of these pending lists. */
729
730 static int pending_lists_length;
731
732 /* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
733
734 static rtx unused_insn_list;
735
736 /* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
737
738 static rtx unused_expr_list;
739
740 /* The last insn upon which all memory references must depend.
741 This is an insn which flushed the pending lists, creating a dependency
742 between it and all previously pending memory references. This creates
743 a barrier (or a checkpoint) which no memory reference is allowed to cross.
744
745 This includes all non constant CALL_INSNs. When we do interprocedural
746 alias analysis, this restriction can be relaxed.
747 This may also be an INSN that writes memory if the pending lists grow
748 too large. */
749
750 static rtx last_pending_memory_flush;
751
752 /* The last function call we have seen. All hard regs, and, of course,
753 the last function call, must depend on this. */
754
755 static rtx last_function_call;
756
757 /* The LOG_LINKS field of this is a list of insns which use a pseudo register
758 that does not already cross a call. We create dependencies between each
759 of those insn and the next call insn, to ensure that they won't cross a call
760 after scheduling is done. */
761
762 static rtx sched_before_next_call;
763
764 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
765 so that insns independent of the last scheduled insn will be preferred
766 over dependent instructions. */
767
768 static rtx last_scheduled_insn;
769
770 /* Process an insn's memory dependencies. There are four kinds of
771 dependencies:
772
773 (0) read dependence: read follows read
774 (1) true dependence: read follows write
775 (2) anti dependence: write follows read
776 (3) output dependence: write follows write
777
778 We are careful to build only dependencies which actually exist, and
779 use transitivity to avoid building too many links. */
780 \f
781 /* Return the INSN_LIST containing INSN in LIST, or NULL
782 if LIST does not contain INSN. */
783
784 __inline static rtx
785 find_insn_list (insn, list)
786 rtx insn;
787 rtx list;
788 {
789 while (list)
790 {
791 if (XEXP (list, 0) == insn)
792 return list;
793 list = XEXP (list, 1);
794 }
795 return 0;
796 }
797
798 /* Compute cost of executing INSN. This is the number of virtual
799 cycles taken between instruction issue and instruction results. */
800
801 __inline static int
802 insn_cost (insn)
803 rtx insn;
804 {
805 register int cost;
806
807 recog_memoized (insn);
808
809 /* A USE insn, or something else we don't need to understand.
810 We can't pass these directly to result_ready_cost because it will trigger
811 a fatal error for unrecognizable insns. */
812 if (INSN_CODE (insn) < 0)
813 return 1;
814 else
815 {
816 cost = result_ready_cost (insn);
817
818 if (cost < 1)
819 cost = 1;
820
821 return cost;
822 }
823 }
824
825 /* Compute the priority number for INSN. */
826
827 static int
828 priority (insn)
829 rtx insn;
830 {
831 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
832 {
833 int prev_priority;
834 int max_priority;
835 int this_priority = INSN_PRIORITY (insn);
836 rtx prev;
837
838 if (this_priority > 0)
839 return this_priority;
840
841 max_priority = 1;
842
843 /* Nonzero if these insns must be scheduled together. */
844 if (SCHED_GROUP_P (insn))
845 {
846 prev = insn;
847 while (SCHED_GROUP_P (prev))
848 {
849 prev = PREV_INSN (prev);
850 INSN_REF_COUNT (prev) += 1;
851 }
852 }
853
854 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
855 {
856 rtx x = XEXP (prev, 0);
857
858 /* A dependence pointing to a note is always obsolete, because
859 sched_analyze_insn will have created any necessary new dependences
860 which replace it. Notes can be created when instructions are
861 deleted by insn splitting, or by register allocation. */
862 if (GET_CODE (x) == NOTE)
863 {
864 remove_dependence (insn, x);
865 continue;
866 }
867
868 /* This priority calculation was chosen because it results in the
869 least instruction movement, and does not hurt the performance
870 of the resulting code compared to the old algorithm.
871 This makes the sched algorithm more stable, which results
872 in better code, because there is less register pressure,
873 cross jumping is more likely to work, and debugging is easier.
874
875 When all instructions have a latency of 1, there is no need to
876 move any instructions. Subtracting one here ensures that in such
877 cases all instructions will end up with a priority of one, and
878 hence no scheduling will be done.
879
880 The original code did not subtract the one, and added the
881 insn_cost of the current instruction to its priority (e.g.
882 move the insn_cost call down to the end). */
883
884 if (REG_NOTE_KIND (prev) == 0)
885 /* Data dependence. */
886 prev_priority = priority (x) + insn_cost (x) - 1;
887 else
888 /* Anti or output dependence. Don't add the latency of this
889 insn's result, because it isn't being used. */
890 prev_priority = priority (x);
891
892 if (prev_priority > max_priority)
893 max_priority = prev_priority;
894 INSN_REF_COUNT (x) += 1;
895 }
896
897 INSN_PRIORITY (insn) = max_priority;
898 return INSN_PRIORITY (insn);
899 }
900 return 0;
901 }
902 \f
903 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
904 them to the unused_*_list variables, so that they can be reused. */
905
906 static void
907 free_pending_lists ()
908 {
909 register rtx link, prev_link;
910
911 if (pending_read_insns)
912 {
913 prev_link = pending_read_insns;
914 link = XEXP (prev_link, 1);
915
916 while (link)
917 {
918 prev_link = link;
919 link = XEXP (link, 1);
920 }
921
922 XEXP (prev_link, 1) = unused_insn_list;
923 unused_insn_list = pending_read_insns;
924 pending_read_insns = 0;
925 }
926
927 if (pending_write_insns)
928 {
929 prev_link = pending_write_insns;
930 link = XEXP (prev_link, 1);
931
932 while (link)
933 {
934 prev_link = link;
935 link = XEXP (link, 1);
936 }
937
938 XEXP (prev_link, 1) = unused_insn_list;
939 unused_insn_list = pending_write_insns;
940 pending_write_insns = 0;
941 }
942
943 if (pending_read_mems)
944 {
945 prev_link = pending_read_mems;
946 link = XEXP (prev_link, 1);
947
948 while (link)
949 {
950 prev_link = link;
951 link = XEXP (link, 1);
952 }
953
954 XEXP (prev_link, 1) = unused_expr_list;
955 unused_expr_list = pending_read_mems;
956 pending_read_mems = 0;
957 }
958
959 if (pending_write_mems)
960 {
961 prev_link = pending_write_mems;
962 link = XEXP (prev_link, 1);
963
964 while (link)
965 {
966 prev_link = link;
967 link = XEXP (link, 1);
968 }
969
970 XEXP (prev_link, 1) = unused_expr_list;
971 unused_expr_list = pending_write_mems;
972 pending_write_mems = 0;
973 }
974 }
975
976 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
977 The MEM is a memory reference contained within INSN, which we are saving
978 so that we can do memory aliasing on it. */
979
980 static void
981 add_insn_mem_dependence (insn_list, mem_list, insn, mem)
982 rtx *insn_list, *mem_list, insn, mem;
983 {
984 register rtx link;
985
986 if (unused_insn_list)
987 {
988 link = unused_insn_list;
989 unused_insn_list = XEXP (link, 1);
990 }
991 else
992 link = rtx_alloc (INSN_LIST);
993 XEXP (link, 0) = insn;
994 XEXP (link, 1) = *insn_list;
995 *insn_list = link;
996
997 if (unused_expr_list)
998 {
999 link = unused_expr_list;
1000 unused_expr_list = XEXP (link, 1);
1001 }
1002 else
1003 link = rtx_alloc (EXPR_LIST);
1004 XEXP (link, 0) = mem;
1005 XEXP (link, 1) = *mem_list;
1006 *mem_list = link;
1007
1008 pending_lists_length++;
1009 }
1010 \f
1011 /* Make a dependency between every memory reference on the pending lists
1012 and INSN, thus flushing the pending lists. */
1013
1014 static void
1015 flush_pending_lists (insn)
1016 rtx insn;
1017 {
1018 rtx link;
1019
1020 while (pending_read_insns)
1021 {
1022 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1023
1024 link = pending_read_insns;
1025 pending_read_insns = XEXP (pending_read_insns, 1);
1026 XEXP (link, 1) = unused_insn_list;
1027 unused_insn_list = link;
1028
1029 link = pending_read_mems;
1030 pending_read_mems = XEXP (pending_read_mems, 1);
1031 XEXP (link, 1) = unused_expr_list;
1032 unused_expr_list = link;
1033 }
1034 while (pending_write_insns)
1035 {
1036 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1037
1038 link = pending_write_insns;
1039 pending_write_insns = XEXP (pending_write_insns, 1);
1040 XEXP (link, 1) = unused_insn_list;
1041 unused_insn_list = link;
1042
1043 link = pending_write_mems;
1044 pending_write_mems = XEXP (pending_write_mems, 1);
1045 XEXP (link, 1) = unused_expr_list;
1046 unused_expr_list = link;
1047 }
1048 pending_lists_length = 0;
1049
1050 if (last_pending_memory_flush)
1051 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1052
1053 last_pending_memory_flush = insn;
1054 }
1055
1056 /* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1057 by the write to the destination of X, and reads of everything mentioned. */
1058
1059 static void
1060 sched_analyze_1 (x, insn)
1061 rtx x;
1062 rtx insn;
1063 {
1064 register int regno;
1065 register rtx dest = SET_DEST (x);
1066
1067 if (dest == 0)
1068 return;
1069
1070 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1071 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1072 {
1073 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1074 {
1075 /* The second and third arguments are values read by this insn. */
1076 sched_analyze_2 (XEXP (dest, 1), insn);
1077 sched_analyze_2 (XEXP (dest, 2), insn);
1078 }
1079 dest = SUBREG_REG (dest);
1080 }
1081
1082 if (GET_CODE (dest) == REG)
1083 {
1084 register int offset, bit, i;
1085
1086 regno = REGNO (dest);
1087
1088 /* A hard reg in a wide mode may really be multiple registers.
1089 If so, mark all of them just like the first. */
1090 if (regno < FIRST_PSEUDO_REGISTER)
1091 {
1092 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1093 while (--i >= 0)
1094 {
1095 rtx u;
1096
1097 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1098 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1099 reg_last_uses[regno + i] = 0;
1100 if (reg_last_sets[regno + i])
1101 add_dependence (insn, reg_last_sets[regno + i],
1102 REG_DEP_OUTPUT);
1103 reg_last_sets[regno + i] = insn;
1104 if ((call_used_regs[i] || global_regs[i])
1105 && last_function_call)
1106 /* Function calls clobber all call_used regs. */
1107 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1108 }
1109 }
1110 else
1111 {
1112 rtx u;
1113
1114 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1115 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1116 reg_last_uses[regno] = 0;
1117 if (reg_last_sets[regno])
1118 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1119 reg_last_sets[regno] = insn;
1120
1121 /* Don't let it cross a call after scheduling if it doesn't
1122 already cross one. */
1123 if (reg_n_calls_crossed[regno] == 0 && last_function_call)
1124 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1125 }
1126 }
1127 else if (GET_CODE (dest) == MEM)
1128 {
1129 /* Writing memory. */
1130
1131 if (pending_lists_length > 32)
1132 {
1133 /* Flush all pending reads and writes to prevent the pending lists
1134 from getting any larger. Insn scheduling runs too slowly when
1135 these lists get long. The number 32 was chosen because it
1136 seems like a resonable number. When compiling GCC with itself,
1137 this flush occurs 8 times for sparc, and 10 times for m88k using
1138 the number 32. */
1139 flush_pending_lists (insn);
1140 }
1141 else
1142 {
1143 rtx pending, pending_mem;
1144
1145 pending = pending_read_insns;
1146 pending_mem = pending_read_mems;
1147 while (pending)
1148 {
1149 /* If a dependency already exists, don't create a new one. */
1150 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1151 if (anti_dependence (XEXP (pending_mem, 0), dest, insn))
1152 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1153
1154 pending = XEXP (pending, 1);
1155 pending_mem = XEXP (pending_mem, 1);
1156 }
1157
1158 pending = pending_write_insns;
1159 pending_mem = pending_write_mems;
1160 while (pending)
1161 {
1162 /* If a dependency already exists, don't create a new one. */
1163 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1164 if (output_dependence (XEXP (pending_mem, 0), dest))
1165 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1166
1167 pending = XEXP (pending, 1);
1168 pending_mem = XEXP (pending_mem, 1);
1169 }
1170
1171 if (last_pending_memory_flush)
1172 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1173
1174 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1175 insn, dest);
1176 }
1177 sched_analyze_2 (XEXP (dest, 0), insn);
1178 }
1179
1180 /* Analyze reads. */
1181 if (GET_CODE (x) == SET)
1182 sched_analyze_2 (SET_SRC (x), insn);
1183 }
1184
1185 /* Analyze the uses of memory and registers in rtx X in INSN. */
1186
1187 static void
1188 sched_analyze_2 (x, insn)
1189 rtx x;
1190 rtx insn;
1191 {
1192 register int i;
1193 register int j;
1194 register enum rtx_code code;
1195 register char *fmt;
1196
1197 if (x == 0)
1198 return;
1199
1200 code = GET_CODE (x);
1201
1202 switch (code)
1203 {
1204 case CONST_INT:
1205 case CONST_DOUBLE:
1206 case SYMBOL_REF:
1207 case CONST:
1208 case LABEL_REF:
1209 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1210 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1211 this does not mean that this insn is using cc0. */
1212 return;
1213
1214 #ifdef HAVE_cc0
1215 case CC0:
1216 {
1217 rtx link, prev;
1218
1219 /* There may be a note before this insn now, but all notes will
1220 be removed before we actually try to schedule the insns, so
1221 it won't cause a problem later. We must avoid it here though. */
1222
1223 /* User of CC0 depends on immediately preceding insn.
1224 SCHED_GROUP_P (insn) = 1;
1225
1226 /* Make a copy of all dependencies on the immediately previous insn,
1227 and add to this insn. This is so that all the dependencies will
1228 apply to the group. */
1229
1230 prev = PREV_INSN (insn);
1231 while (GET_CODE (prev) == NOTE)
1232 prev = PREV_INSN (prev);
1233
1234 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
1235 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
1236
1237 return;
1238 }
1239 #endif
1240
1241 case REG:
1242 {
1243 int regno = REGNO (x);
1244 if (regno < FIRST_PSEUDO_REGISTER)
1245 {
1246 int i;
1247
1248 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1249 while (--i >= 0)
1250 {
1251 reg_last_uses[regno + i]
1252 = gen_rtx (INSN_LIST, VOIDmode,
1253 insn, reg_last_uses[regno + i]);
1254 if (reg_last_sets[regno + i])
1255 add_dependence (insn, reg_last_sets[regno + i], 0);
1256 if ((call_used_regs[regno + i] || global_regs[regno + i])
1257 && last_function_call)
1258 /* Function calls clobber all call_used regs. */
1259 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1260 }
1261 }
1262 else
1263 {
1264 reg_last_uses[regno]
1265 = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
1266 if (reg_last_sets[regno])
1267 add_dependence (insn, reg_last_sets[regno], 0);
1268
1269 /* If the register does not already cross any calls, then add this
1270 insn to the sched_before_next_call list so that it will still
1271 not cross calls after scheduling. */
1272 if (reg_n_calls_crossed[regno] == 0)
1273 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1274 }
1275 return;
1276 }
1277
1278 case MEM:
1279 {
1280 /* Reading memory. */
1281
1282 /* Don't create a dependence for memory references which are known to
1283 be unchanging, such as constant pool accesses. These will never
1284 conflict with any other memory access. */
1285 if (RTX_UNCHANGING_P (x) == 0)
1286 {
1287 rtx pending, pending_mem;
1288
1289 pending = pending_read_insns;
1290 pending_mem = pending_read_mems;
1291 while (pending)
1292 {
1293 /* If a dependency already exists, don't create a new one. */
1294 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1295 if (read_dependence (XEXP (pending_mem, 0), x))
1296 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1297
1298 pending = XEXP (pending, 1);
1299 pending_mem = XEXP (pending_mem, 1);
1300 }
1301
1302 pending = pending_write_insns;
1303 pending_mem = pending_write_mems;
1304 while (pending)
1305 {
1306 /* If a dependency already exists, don't create a new one. */
1307 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1308 if (true_dependence (XEXP (pending_mem, 0), x))
1309 add_dependence (insn, XEXP (pending, 0), 0);
1310
1311 pending = XEXP (pending, 1);
1312 pending_mem = XEXP (pending_mem, 1);
1313 }
1314 if (last_pending_memory_flush)
1315 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1316
1317 /* Always add these dependencies to pending_reads, since
1318 this insn may be followed by a write. */
1319 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1320 insn, x);
1321 }
1322 /* Take advantage of tail recursion here. */
1323 sched_analyze_2 (XEXP (x, 0), insn);
1324 return;
1325 }
1326
1327 case ASM_OPERANDS:
1328 case ASM_INPUT:
1329 case UNSPEC_VOLATILE:
1330 {
1331 rtx u;
1332
1333 /* Traditional and volatile asm instructions must be considered to use
1334 and clobber all hard registers and all of memory. So must
1335 UNSPEC_VOLATILE operations. */
1336 if ((code == ASM_OPERANDS && MEM_VOLATILE_P (x)) || code == ASM_INPUT
1337 || code == UNSPEC_VOLATILE)
1338 {
1339 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1340 {
1341 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1342 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1343 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1344 reg_last_uses[i] = 0;
1345 if (reg_last_sets[i]
1346 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1347 add_dependence (insn, reg_last_sets[i], 0);
1348 reg_last_sets[i] = insn;
1349 }
1350
1351 flush_pending_lists (insn);
1352 }
1353
1354 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1355 We can not just fall through here since then we would be confused
1356 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1357 traditional asms unlike their normal usage. */
1358
1359 if (code == ASM_OPERANDS)
1360 {
1361 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1362 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1363 return;
1364 }
1365 break;
1366 }
1367
1368 case PRE_DEC:
1369 case POST_DEC:
1370 case PRE_INC:
1371 case POST_INC:
1372 /* These read and modify the result; just consider them writes. */
1373 sched_analyze_1 (x, insn);
1374 return;
1375 }
1376
1377 /* Other cases: walk the insn. */
1378 fmt = GET_RTX_FORMAT (code);
1379 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1380 {
1381 if (fmt[i] == 'e')
1382 sched_analyze_2 (XEXP (x, i), insn);
1383 else if (fmt[i] == 'E')
1384 for (j = 0; j < XVECLEN (x, i); j++)
1385 sched_analyze_2 (XVECEXP (x, i, j), insn);
1386 }
1387 }
1388
1389 /* Analyze an INSN with pattern X to find all dependencies. */
1390
1391 static void
1392 sched_analyze_insn (x, insn)
1393 rtx x, insn;
1394 {
1395 register RTX_CODE code = GET_CODE (x);
1396 rtx link;
1397
1398 if (code == SET || code == CLOBBER)
1399 sched_analyze_1 (x, insn);
1400 else if (code == PARALLEL)
1401 {
1402 register int i;
1403 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1404 {
1405 code = GET_CODE (XVECEXP (x, 0, i));
1406 if (code == SET || code == CLOBBER)
1407 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1408 else
1409 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1410 }
1411 }
1412 else
1413 sched_analyze_2 (x, insn);
1414
1415 /* Handle function calls. */
1416 if (GET_CODE (insn) == CALL_INSN)
1417 {
1418 rtx dep_insn;
1419 rtx prev_dep_insn;
1420
1421 /* When scheduling instructions, we make sure calls don't lose their
1422 accompanying USE insns by depending them one on another in order. */
1423
1424 prev_dep_insn = insn;
1425 dep_insn = PREV_INSN (insn);
1426 while (GET_CODE (dep_insn) == INSN
1427 && GET_CODE (PATTERN (dep_insn)) == USE)
1428 {
1429 SCHED_GROUP_P (prev_dep_insn) = 1;
1430
1431 /* Make a copy of all dependencies on dep_insn, and add to insn.
1432 This is so that all of the dependencies will apply to the
1433 group. */
1434
1435 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1436 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
1437
1438 prev_dep_insn = dep_insn;
1439 dep_insn = PREV_INSN (dep_insn);
1440 }
1441 }
1442 }
1443
1444 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1445 for every dependency. */
1446
1447 static int
1448 sched_analyze (head, tail)
1449 rtx head, tail;
1450 {
1451 register rtx insn;
1452 register int n_insns = 0;
1453 register rtx u;
1454 register int luid = 0;
1455
1456 for (insn = head; ; insn = NEXT_INSN (insn))
1457 {
1458 INSN_LUID (insn) = luid++;
1459
1460 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1461 {
1462 sched_analyze_insn (PATTERN (insn), insn);
1463 n_insns += 1;
1464 }
1465 else if (GET_CODE (insn) == CALL_INSN)
1466 {
1467 rtx dest = 0;
1468 rtx x;
1469 register int i;
1470
1471 /* Any instruction using a hard register which may get clobbered
1472 by a call needs to be marked as dependent on this call.
1473 This prevents a use of a hard return reg from being moved
1474 past a void call (i.e. it does not explicitly set the hard
1475 return reg). */
1476
1477 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1478 if (call_used_regs[i] || global_regs[i])
1479 {
1480 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1481 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1482 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1483 reg_last_uses[i] = 0;
1484 if (reg_last_sets[i]
1485 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1486 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1487 reg_last_sets[i] = insn;
1488 /* Insn, being a CALL_INSN, magically depends on
1489 `last_function_call' already. */
1490 }
1491
1492 /* For each insn which shouldn't cross a call, add a dependence
1493 between that insn and this call insn. */
1494 x = LOG_LINKS (sched_before_next_call);
1495 while (x)
1496 {
1497 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1498 x = XEXP (x, 1);
1499 }
1500 LOG_LINKS (sched_before_next_call) = 0;
1501
1502 sched_analyze_insn (PATTERN (insn), insn);
1503
1504 /* We don't need to flush memory for a function call which does
1505 not involve memory. */
1506 if (! CONST_CALL_P (insn))
1507 {
1508 /* In the absence of interprocedural alias analysis,
1509 we must flush all pending reads and writes, and
1510 start new dependencies starting from here. */
1511 flush_pending_lists (insn);
1512 }
1513
1514 /* Depend this function call (actually, the user of this
1515 function call) on all hard register clobberage. */
1516 last_function_call = insn;
1517 n_insns += 1;
1518 }
1519
1520 if (insn == tail)
1521 return n_insns;
1522 }
1523 }
1524 \f
1525 /* Called when we see a set of a register. If death is true, then we are
1526 scanning backwards. Mark that register as unborn. If nobody says
1527 otherwise, that is how things will remain. If death is false, then we
1528 are scanning forwards. Mark that register as being born. */
1529
1530 static void
1531 sched_note_set (b, x, death)
1532 int b;
1533 rtx x;
1534 int death;
1535 {
1536 register int regno, j;
1537 register rtx reg = SET_DEST (x);
1538 int subreg_p = 0;
1539
1540 if (reg == 0)
1541 return;
1542
1543 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
1544 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
1545 {
1546 /* Must treat modification of just one hardware register of a multi-reg
1547 value or just a byte field of a register exactly the same way that
1548 mark_set_1 in flow.c does. */
1549 if (GET_CODE (reg) == ZERO_EXTRACT
1550 || GET_CODE (reg) == SIGN_EXTRACT
1551 || (GET_CODE (reg) == SUBREG
1552 && REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg)))
1553 subreg_p = 1;
1554
1555 reg = SUBREG_REG (reg);
1556 }
1557
1558 if (GET_CODE (reg) != REG)
1559 return;
1560
1561 /* Global registers are always live, so the code below does not apply
1562 to them. */
1563
1564 regno = REGNO (reg);
1565 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1566 {
1567 register int offset = regno / REGSET_ELT_BITS;
1568 register int bit = 1 << (regno % REGSET_ELT_BITS);
1569
1570 if (death)
1571 {
1572 /* If we only set part of the register, then this set does not
1573 kill it. */
1574 if (subreg_p)
1575 return;
1576
1577 /* Try killing this register. */
1578 if (regno < FIRST_PSEUDO_REGISTER)
1579 {
1580 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1581 while (--j >= 0)
1582 {
1583 offset = (regno + j) / REGSET_ELT_BITS;
1584 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
1585
1586 bb_live_regs[offset] &= ~bit;
1587 bb_dead_regs[offset] |= bit;
1588 }
1589 }
1590 else
1591 {
1592 bb_live_regs[offset] &= ~bit;
1593 bb_dead_regs[offset] |= bit;
1594 }
1595 }
1596 else
1597 {
1598 /* Make the register live again. */
1599 if (regno < FIRST_PSEUDO_REGISTER)
1600 {
1601 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1602 while (--j >= 0)
1603 {
1604 offset = (regno + j) / REGSET_ELT_BITS;
1605 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
1606
1607 bb_live_regs[offset] |= bit;
1608 bb_dead_regs[offset] &= ~bit;
1609 }
1610 }
1611 else
1612 {
1613 bb_live_regs[offset] |= bit;
1614 bb_dead_regs[offset] &= ~bit;
1615 }
1616 }
1617 }
1618 }
1619 \f
1620 /* Macros and functions for keeping the priority queue sorted, and
1621 dealing with queueing and unqueueing of instructions. */
1622
1623 #define SCHED_SORT(READY, NEW_READY, OLD_READY) \
1624 do { if ((NEW_READY) - (OLD_READY) == 1) \
1625 swap_sort (READY, NEW_READY); \
1626 else if ((NEW_READY) - (OLD_READY) > 1) \
1627 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
1628 while (0)
1629
1630 /* Returns a positive value if y is preferred; returns a negative value if
1631 x is preferred. Should never return 0, since that will make the sort
1632 unstable. */
1633
1634 static int
1635 rank_for_schedule (x, y)
1636 rtx *x, *y;
1637 {
1638 rtx tmp = *y;
1639 rtx tmp2 = *x;
1640 rtx tmp_dep, tmp2_dep;
1641 int tmp_class, tmp2_class;
1642 int value;
1643
1644 /* Choose the instruction with the highest priority, if different. */
1645 if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2))
1646 return value;
1647
1648 if (last_scheduled_insn)
1649 {
1650 /* Classify the instructions into three classes:
1651 1) Data dependent on last schedule insn.
1652 2) Anti/Output dependent on last scheduled insn.
1653 3) Independent of last scheduled insn, or has latency of one.
1654 Choose the insn from the highest numbered class if different. */
1655 tmp_dep = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
1656 if (tmp_dep == 0 || insn_cost (tmp) == 1)
1657 tmp_class = 3;
1658 else if (REG_NOTE_KIND (tmp_dep) == 0)
1659 tmp_class = 1;
1660 else
1661 tmp_class = 2;
1662
1663 tmp2_dep = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
1664 if (tmp2_dep == 0 || insn_cost (tmp2) == 1)
1665 tmp2_class = 3;
1666 else if (REG_NOTE_KIND (tmp2_dep) == 0)
1667 tmp2_class = 1;
1668 else
1669 tmp2_class = 2;
1670
1671 if (value = tmp_class - tmp2_class)
1672 return value;
1673 }
1674
1675 /* If insns are equally good, sort by INSN_LUID (original insn order),
1676 so that we make the sort stable. This minimizes instruction movement,
1677 thus minimizing sched's effect on debugging and cross-jumping. */
1678 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1679 }
1680
1681 /* Resort the array A in which only element at index N may be out of order. */
1682
1683 __inline static void
1684 swap_sort (a, n)
1685 rtx *a;
1686 int n;
1687 {
1688 rtx insn = a[n-1];
1689 int i = n-2;
1690
1691 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
1692 {
1693 a[i+1] = a[i];
1694 i -= 1;
1695 }
1696 a[i+1] = insn;
1697 }
1698
1699 static int max_priority;
1700
1701 /* Add INSN to the insn queue so that it fires at least N_CYCLES
1702 before the currently executing insn. */
1703
1704 __inline static void
1705 queue_insn (insn, n_cycles)
1706 rtx insn;
1707 int n_cycles;
1708 {
1709 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1710 NEXT_INSN (insn) = insn_queue[next_q];
1711 insn_queue[next_q] = insn;
1712 q_size += 1;
1713 }
1714
1715 /* Return nonzero if PAT is the pattern of an insn which makes a
1716 register live. */
1717
1718 __inline static int
1719 birthing_insn_p (pat)
1720 rtx pat;
1721 {
1722 int j;
1723
1724 if (reload_completed == 1)
1725 return 0;
1726
1727 if (GET_CODE (pat) == SET
1728 && GET_CODE (SET_DEST (pat)) == REG)
1729 {
1730 rtx dest = SET_DEST (pat);
1731 int i = REGNO (dest);
1732 int offset = i / REGSET_ELT_BITS;
1733 int bit = 1 << (i % REGSET_ELT_BITS);
1734
1735 /* It would be more accurate to use refers_to_regno_p or
1736 reg_mentioned_p to determine when the dest is not live before this
1737 insn. */
1738
1739 if (bb_live_regs[offset] & bit)
1740 return (reg_n_sets[i] == 1);
1741
1742 return 0;
1743 }
1744 if (GET_CODE (pat) == PARALLEL)
1745 {
1746 for (j = 0; j < XVECLEN (pat, 0); j++)
1747 if (birthing_insn_p (XVECEXP (pat, 0, j)))
1748 return 1;
1749 }
1750 return 0;
1751 }
1752
1753 /* If PREV is an insn which is immediately ready to execute, return 1,
1754 otherwise return 0. We may adjust its priority if that will help shorten
1755 register lifetimes. */
1756
1757 static int
1758 launch_link (prev)
1759 rtx prev;
1760 {
1761 rtx pat = PATTERN (prev);
1762 rtx note;
1763 /* MAX of (a) number of cycles needed by prev
1764 (b) number of cycles before needed resources are free. */
1765 int n_cycles = insn_cost (prev);
1766 int n_deaths = 0;
1767
1768 /* Trying to shorten register lives after reload has completed
1769 is useless and wrong. It gives inaccurate schedules. */
1770 if (reload_completed == 0)
1771 {
1772 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
1773 if (REG_NOTE_KIND (note) == REG_DEAD)
1774 n_deaths += 1;
1775
1776 /* Defer scheduling insns which kill registers, since that
1777 shortens register lives. Prefer scheduling insns which
1778 make registers live for the same reason. */
1779 switch (n_deaths)
1780 {
1781 default:
1782 INSN_PRIORITY (prev) >>= 3;
1783 break;
1784 case 3:
1785 INSN_PRIORITY (prev) >>= 2;
1786 break;
1787 case 2:
1788 case 1:
1789 INSN_PRIORITY (prev) >>= 1;
1790 break;
1791 case 0:
1792 if (birthing_insn_p (pat))
1793 {
1794 int max = max_priority;
1795
1796 if (max > INSN_PRIORITY (prev))
1797 INSN_PRIORITY (prev) = max;
1798 }
1799 break;
1800 }
1801 }
1802
1803 if (n_cycles <= 1)
1804 return 1;
1805 queue_insn (prev, n_cycles);
1806 return 0;
1807 }
1808
1809 /* INSN is the "currently executing insn". Launch each insn which was
1810 waiting on INSN (in the backwards dataflow sense). READY is a
1811 vector of insns which are ready to fire. N_READY is the number of
1812 elements in READY. */
1813
1814 static int
1815 launch_links (insn, ready, n_ready)
1816 rtx insn;
1817 rtx *ready;
1818 int n_ready;
1819 {
1820 rtx link;
1821 int new_ready = n_ready;
1822
1823 if (LOG_LINKS (insn) == 0)
1824 return n_ready;
1825
1826 /* This is used by the function launch_link above. */
1827 if (n_ready > 0)
1828 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
1829 else
1830 max_priority = INSN_PRIORITY (insn);
1831
1832 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
1833 {
1834 rtx prev = XEXP (link, 0);
1835
1836 if ((INSN_REF_COUNT (prev) -= 1) == 0 && launch_link (prev))
1837 ready[new_ready++] = prev;
1838 }
1839
1840 return new_ready;
1841 }
1842
1843 /* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
1844 dead_notes list. */
1845
1846 static void
1847 create_reg_dead_note (reg, insn)
1848 rtx reg, insn;
1849 {
1850 rtx link = dead_notes;
1851
1852 if (link == 0)
1853 /* In theory, we should not end up with more REG_DEAD reg notes than we
1854 started with. In practice, this can occur as the result of bugs in
1855 flow, combine and/or sched. */
1856 {
1857 #if 1
1858 abort ();
1859 #else
1860 link = rtx_alloc (EXPR_LIST);
1861 PUT_REG_NOTE_KIND (link, REG_DEAD);
1862 #endif
1863 }
1864 else
1865 dead_notes = XEXP (dead_notes, 1);
1866
1867 XEXP (link, 0) = reg;
1868 XEXP (link, 1) = REG_NOTES (insn);
1869 REG_NOTES (insn) = link;
1870 }
1871
1872 /* Subroutine on attach_deaths_insn--handles the recursive search
1873 through INSN. If SET_P is true, then x is being modified by the insn. */
1874
1875 static void
1876 attach_deaths (x, insn, set_p)
1877 rtx x;
1878 rtx insn;
1879 int set_p;
1880 {
1881 register int i;
1882 register int j;
1883 register enum rtx_code code;
1884 register char *fmt;
1885
1886 if (x == 0)
1887 return;
1888
1889 code = GET_CODE (x);
1890
1891 switch (code)
1892 {
1893 case CONST_INT:
1894 case CONST_DOUBLE:
1895 case LABEL_REF:
1896 case SYMBOL_REF:
1897 case CONST:
1898 case CODE_LABEL:
1899 case PC:
1900 case CC0:
1901 /* Get rid of the easy cases first. */
1902 return;
1903
1904 case REG:
1905 {
1906 /* If the register dies in this insn, queue that note, and mark
1907 this register as needing to die. */
1908 /* This code is very similar to mark_used_1 (if set_p is false)
1909 and mark_set_1 (if set_p is true) in flow.c. */
1910
1911 register int regno = REGNO (x);
1912 register int offset = regno / REGSET_ELT_BITS;
1913 register int bit = 1 << (regno % REGSET_ELT_BITS);
1914 int all_needed = (old_live_regs[offset] & bit);
1915 int some_needed = (old_live_regs[offset] & bit);
1916
1917 if (set_p)
1918 return;
1919
1920 if (regno < FIRST_PSEUDO_REGISTER)
1921 {
1922 int n;
1923
1924 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
1925 while (--n > 0)
1926 {
1927 some_needed |= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
1928 & 1 << ((regno + n) % REGSET_ELT_BITS));
1929 all_needed &= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
1930 & 1 << ((regno + n) % REGSET_ELT_BITS));
1931 }
1932 }
1933
1934 /* If it wasn't live before we started, then add a REG_DEAD note.
1935 We must check the previous lifetime info not the current info,
1936 because we may have to execute this code several times, e.g.
1937 once for a clobber (which doesn't add a note) and later
1938 for a use (which does add a note).
1939
1940 Always make the register live. We must do this even if it was
1941 live before, because this may be an insn which sets and uses
1942 the same register, in which case the register has already been
1943 killed, so we must make it live again.
1944
1945 Global registers are always live, and should never have a REG_DEAD
1946 note added for them, so none of the code below applies to them. */
1947
1948 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1949 {
1950 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
1951 STACK_POINTER_REGNUM, since these are always considered to be
1952 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
1953 if (regno != FRAME_POINTER_REGNUM
1954 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
1955 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
1956 #endif
1957 && regno != STACK_POINTER_REGNUM)
1958 {
1959 if (! all_needed && ! dead_or_set_p (insn, x))
1960 {
1961 /* If none of the words in X is needed, make a REG_DEAD
1962 note. Otherwise, we must make partial REG_DEAD
1963 notes. */
1964 if (! some_needed)
1965 create_reg_dead_note (x, insn);
1966 else
1967 {
1968 int i;
1969
1970 /* Don't make a REG_DEAD note for a part of a
1971 register that is set in the insn. */
1972 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
1973 i >= 0; i--)
1974 if ((old_live_regs[(regno + i) / REGSET_ELT_BITS]
1975 & 1 << ((regno +i) % REGSET_ELT_BITS)) == 0
1976 && ! dead_or_set_regno_p (insn, regno + i))
1977 create_reg_dead_note (gen_rtx (REG, word_mode,
1978 regno + i),
1979 insn);
1980 }
1981 }
1982 }
1983
1984 if (regno < FIRST_PSEUDO_REGISTER)
1985 {
1986 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
1987 while (--j >= 0)
1988 {
1989 offset = (regno + j) / REGSET_ELT_BITS;
1990 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
1991
1992 bb_dead_regs[offset] &= ~bit;
1993 bb_live_regs[offset] |= bit;
1994 }
1995 }
1996 else
1997 {
1998 bb_dead_regs[offset] &= ~bit;
1999 bb_live_regs[offset] |= bit;
2000 }
2001 }
2002 return;
2003 }
2004
2005 case MEM:
2006 /* Handle tail-recursive case. */
2007 attach_deaths (XEXP (x, 0), insn, 0);
2008 return;
2009
2010 case SUBREG:
2011 case STRICT_LOW_PART:
2012 /* These two cases preserve the value of SET_P, so handle them
2013 separately. */
2014 attach_deaths (XEXP (x, 0), insn, set_p);
2015 return;
2016
2017 case ZERO_EXTRACT:
2018 case SIGN_EXTRACT:
2019 /* This case preserves the value of SET_P for the first operand, but
2020 clears it for the other two. */
2021 attach_deaths (XEXP (x, 0), insn, set_p);
2022 attach_deaths (XEXP (x, 1), insn, 0);
2023 attach_deaths (XEXP (x, 2), insn, 0);
2024 return;
2025
2026 default:
2027 /* Other cases: walk the insn. */
2028 fmt = GET_RTX_FORMAT (code);
2029 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2030 {
2031 if (fmt[i] == 'e')
2032 attach_deaths (XEXP (x, i), insn, 0);
2033 else if (fmt[i] == 'E')
2034 for (j = 0; j < XVECLEN (x, i); j++)
2035 attach_deaths (XVECEXP (x, i, j), insn, 0);
2036 }
2037 }
2038 }
2039
2040 /* After INSN has executed, add register death notes for each register
2041 that is dead after INSN. */
2042
2043 static void
2044 attach_deaths_insn (insn)
2045 rtx insn;
2046 {
2047 rtx x = PATTERN (insn);
2048 register RTX_CODE code = GET_CODE (x);
2049
2050 if (code == SET)
2051 {
2052 attach_deaths (SET_SRC (x), insn, 0);
2053
2054 /* A register might die here even if it is the destination, e.g.
2055 it is the target of a volatile read and is otherwise unused.
2056 Hence we must always call attach_deaths for the SET_DEST. */
2057 attach_deaths (SET_DEST (x), insn, 1);
2058 }
2059 else if (code == PARALLEL)
2060 {
2061 register int i;
2062 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2063 {
2064 code = GET_CODE (XVECEXP (x, 0, i));
2065 if (code == SET)
2066 {
2067 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2068
2069 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2070 }
2071 else if (code == CLOBBER)
2072 attach_deaths (XEXP (XVECEXP (x, 0, i), 0), insn, 1);
2073 else
2074 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2075 }
2076 }
2077 else if (code == CLOBBER)
2078 attach_deaths (XEXP (x, 0), insn, 1);
2079 else
2080 attach_deaths (x, insn, 0);
2081 }
2082
2083 /* Delete notes beginning with INSN and maybe put them in the chain
2084 of notes ended by NOTE_LIST.
2085 Returns the insn following the notes. */
2086
2087 static rtx
2088 unlink_notes (insn, tail)
2089 rtx insn, tail;
2090 {
2091 rtx prev = PREV_INSN (insn);
2092
2093 while (insn != tail && GET_CODE (insn) == NOTE)
2094 {
2095 rtx next = NEXT_INSN (insn);
2096 /* Delete the note from its current position. */
2097 if (prev)
2098 NEXT_INSN (prev) = next;
2099 if (next)
2100 PREV_INSN (next) = prev;
2101
2102 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2103 /* Record line-number notes so they can be reused. */
2104 LINE_NOTE (insn) = insn;
2105 else
2106 {
2107 /* Insert the note at the end of the notes list. */
2108 PREV_INSN (insn) = note_list;
2109 if (note_list)
2110 NEXT_INSN (note_list) = insn;
2111 note_list = insn;
2112 }
2113
2114 insn = next;
2115 }
2116 return insn;
2117 }
2118
2119 /* Data structure for keeping track of register information
2120 during that register's life. */
2121
2122 struct sometimes
2123 {
2124 short offset; short bit;
2125 short live_length; short calls_crossed;
2126 };
2127
2128 /* Constructor for `sometimes' data structure. */
2129
2130 static int
2131 new_sometimes_live (regs_sometimes_live, offset, bit, sometimes_max)
2132 struct sometimes *regs_sometimes_live;
2133 int offset, bit;
2134 int sometimes_max;
2135 {
2136 register struct sometimes *p;
2137 register int regno = offset * REGSET_ELT_BITS + bit;
2138 int i;
2139
2140 /* There should never be a register greater than max_regno here. If there
2141 is, it means that a define_split has created a new pseudo reg. This
2142 is not allowed, since there will not be flow info available for any
2143 new register, so catch the error here. */
2144 if (regno >= max_regno)
2145 abort ();
2146
2147 p = &regs_sometimes_live[sometimes_max];
2148 p->offset = offset;
2149 p->bit = bit;
2150 p->live_length = 0;
2151 p->calls_crossed = 0;
2152 sometimes_max++;
2153 return sometimes_max;
2154 }
2155
2156 /* Count lengths of all regs we are currently tracking,
2157 and find new registers no longer live. */
2158
2159 static void
2160 finish_sometimes_live (regs_sometimes_live, sometimes_max)
2161 struct sometimes *regs_sometimes_live;
2162 int sometimes_max;
2163 {
2164 int i;
2165
2166 for (i = 0; i < sometimes_max; i++)
2167 {
2168 register struct sometimes *p = &regs_sometimes_live[i];
2169 int regno;
2170
2171 regno = p->offset * REGSET_ELT_BITS + p->bit;
2172
2173 sched_reg_live_length[regno] += p->live_length;
2174 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2175 }
2176 }
2177
2178 /* Use modified list scheduling to rearrange insns in basic block
2179 B. FILE, if nonzero, is where we dump interesting output about
2180 this pass. */
2181
2182 static void
2183 schedule_block (b, file)
2184 int b;
2185 FILE *file;
2186 {
2187 rtx insn, last;
2188 rtx last_note = 0;
2189 rtx *ready, link;
2190 int i, j, n_ready = 0, new_ready, n_insns = 0;
2191 int sched_n_insns = 0;
2192 #define NEED_NOTHING 0
2193 #define NEED_HEAD 1
2194 #define NEED_TAIL 2
2195 int new_needs;
2196
2197 /* HEAD and TAIL delimit the region being scheduled. */
2198 rtx head = basic_block_head[b];
2199 rtx tail = basic_block_end[b];
2200 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2201 being scheduled. When the insns have been ordered,
2202 these insns delimit where the new insns are to be
2203 spliced back into the insn chain. */
2204 rtx next_tail;
2205 rtx prev_head;
2206
2207 /* Keep life information accurate. */
2208 register struct sometimes *regs_sometimes_live;
2209 int sometimes_max;
2210
2211 if (file)
2212 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2213 b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
2214
2215 i = max_reg_num ();
2216 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2217 bzero (reg_last_uses, i * sizeof (rtx));
2218 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2219 bzero (reg_last_sets, i * sizeof (rtx));
2220
2221 /* Remove certain insns at the beginning from scheduling,
2222 by advancing HEAD. */
2223
2224 /* At the start of a function, before reload has run, don't delay getting
2225 parameters from hard registers into pseudo registers. */
2226 if (reload_completed == 0 && b == 0)
2227 {
2228 while (head != tail
2229 && GET_CODE (head) == NOTE
2230 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2231 head = NEXT_INSN (head);
2232 while (head != tail
2233 && GET_CODE (head) == INSN
2234 && GET_CODE (PATTERN (head)) == SET)
2235 {
2236 rtx src = SET_SRC (PATTERN (head));
2237 while (GET_CODE (src) == SUBREG
2238 || GET_CODE (src) == SIGN_EXTEND
2239 || GET_CODE (src) == ZERO_EXTEND
2240 || GET_CODE (src) == SIGN_EXTRACT
2241 || GET_CODE (src) == ZERO_EXTRACT)
2242 src = XEXP (src, 0);
2243 if (GET_CODE (src) != REG
2244 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2245 break;
2246 /* Keep this insn from ever being scheduled. */
2247 INSN_REF_COUNT (head) = 1;
2248 head = NEXT_INSN (head);
2249 }
2250 }
2251
2252 /* Don't include any notes or labels at the beginning of the
2253 basic block, or notes at the ends of basic blocks. */
2254 while (head != tail)
2255 {
2256 if (GET_CODE (head) == NOTE)
2257 head = NEXT_INSN (head);
2258 else if (GET_CODE (tail) == NOTE)
2259 tail = PREV_INSN (tail);
2260 else if (GET_CODE (head) == CODE_LABEL)
2261 head = NEXT_INSN (head);
2262 else break;
2263 }
2264 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2265 to schedule this block. */
2266 if (head == tail
2267 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2268 return;
2269
2270 #if 0
2271 /* This short-cut doesn't work. It does not count call insns crossed by
2272 registers in reg_sometimes_live. It does not mark these registers as
2273 dead if they die in this block. It does not mark these registers live
2274 (or create new reg_sometimes_live entries if necessary) if they are born
2275 in this block.
2276
2277 The easy solution is to just always schedule a block. This block only
2278 has one insn, so this won't slow down this pass by much. */
2279
2280 if (head == tail)
2281 return;
2282 #endif
2283
2284 /* Exclude certain insns at the end of the basic block by advancing TAIL. */
2285 /* This isn't correct. Instead of advancing TAIL, should assign very
2286 high priorities to these insns to guarantee that they get scheduled last.
2287 If these insns are ignored, as is currently done, the register life info
2288 may be incorrectly computed. */
2289 if (GET_CODE (tail) == INSN
2290 && GET_CODE (PATTERN (tail)) == USE
2291 && next_nonnote_insn (tail) == 0)
2292 {
2293 /* Don't try to reorder any USE insns at the end of a function.
2294 They must be last to ensure proper register allocation.
2295 Exclude them all from scheduling. */
2296 do
2297 {
2298 /* If we are down to one USE insn, then there are no insns to
2299 schedule. */
2300 if (head == tail)
2301 return;
2302
2303 tail = prev_nonnote_insn (tail);
2304 }
2305 while (GET_CODE (tail) == INSN
2306 && GET_CODE (PATTERN (tail)) == USE);
2307
2308 #if 0
2309 /* This short-cut does not work. See comment above. */
2310 if (head == tail)
2311 return;
2312 #endif
2313 }
2314 else if (GET_CODE (tail) == JUMP_INSN
2315 && SCHED_GROUP_P (tail) == 0
2316 && GET_CODE (PREV_INSN (tail)) == INSN
2317 && GET_CODE (PATTERN (PREV_INSN (tail))) == USE
2318 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (PREV_INSN (tail)), 0)))
2319 {
2320 /* Don't let the setting of the function's return value register
2321 move from this jump. For the same reason we want to get the
2322 parameters into pseudo registers as quickly as possible, we
2323 want to set the function's return value register as late as
2324 possible. */
2325
2326 /* If this is the only insn in the block, then there is no need to
2327 schedule the block. */
2328 if (head == tail)
2329 return;
2330
2331 tail = PREV_INSN (tail);
2332 if (head == tail)
2333 return;
2334
2335 tail = prev_nonnote_insn (tail);
2336
2337 #if 0
2338 /* This shortcut does not work. See comment above. */
2339 if (head == tail)
2340 return;
2341 #endif
2342 }
2343
2344 #ifdef HAVE_cc0
2345 /* This is probably wrong. Instead of doing this, should give this insn
2346 a very high priority to guarantee that it gets scheduled last. */
2347 /* Can not separate an insn that sets the condition code from one that
2348 uses it. So we must leave an insn that sets cc0 where it is. */
2349 if (sets_cc0_p (PATTERN (tail)))
2350 tail = PREV_INSN (tail);
2351 #endif
2352
2353 /* Now HEAD through TAIL are the insns actually to be rearranged;
2354 Let PREV_HEAD and NEXT_TAIL enclose them. */
2355 prev_head = PREV_INSN (head);
2356 next_tail = NEXT_INSN (tail);
2357
2358 /* Initialize basic block data structures. */
2359 dead_notes = 0;
2360 pending_read_insns = 0;
2361 pending_read_mems = 0;
2362 pending_write_insns = 0;
2363 pending_write_mems = 0;
2364 pending_lists_length = 0;
2365 last_pending_memory_flush = 0;
2366 last_function_call = 0;
2367 last_scheduled_insn = 0;
2368
2369 LOG_LINKS (sched_before_next_call) = 0;
2370
2371 n_insns += sched_analyze (head, tail);
2372 if (n_insns == 0)
2373 {
2374 free_pending_lists ();
2375 return;
2376 }
2377
2378 /* Allocate vector to hold insns to be rearranged (except those
2379 insns which are controlled by an insn with SCHED_GROUP_P set).
2380 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2381 as those variables ultimately are set up. */
2382 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2383
2384 /* TAIL is now the last of the insns to be rearranged.
2385 Put those insns into the READY vector. */
2386 insn = tail;
2387
2388 /* If the last insn is a branch, force it to be the last insn after
2389 scheduling. Also, don't try to reorder calls at the ends the basic
2390 block -- this will only lead to worse register allocation. */
2391 if (GET_CODE (tail) == CALL_INSN || GET_CODE (tail) == JUMP_INSN)
2392 {
2393 priority (tail);
2394 ready[n_ready++] = tail;
2395 INSN_PRIORITY (tail) = TAIL_PRIORITY;
2396 INSN_REF_COUNT (tail) = 0;
2397 insn = PREV_INSN (tail);
2398 }
2399
2400 /* Assign priorities to instructions. Also check whether they
2401 are in priority order already. If so then I will be nonnegative.
2402 We use this shortcut only before reloading. */
2403 #if 0
2404 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2405 #endif
2406
2407 for (; insn != prev_head; insn = PREV_INSN (insn))
2408 {
2409 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2410 {
2411 priority (insn);
2412 if (INSN_REF_COUNT (insn) == 0)
2413 ready[n_ready++] = insn;
2414 if (SCHED_GROUP_P (insn))
2415 {
2416 while (SCHED_GROUP_P (insn))
2417 {
2418 insn = PREV_INSN (insn);
2419 while (GET_CODE (insn) == NOTE)
2420 insn = PREV_INSN (insn);
2421 priority (insn);
2422 }
2423 continue;
2424 }
2425 #if 0
2426 if (i < 0)
2427 continue;
2428 if (INSN_PRIORITY (insn) < i)
2429 i = INSN_PRIORITY (insn);
2430 else if (INSN_PRIORITY (insn) > i)
2431 i = DONE_PRIORITY;
2432 #endif
2433 }
2434 }
2435
2436 #if 0
2437 /* This short-cut doesn't work. It does not count call insns crossed by
2438 registers in reg_sometimes_live. It does not mark these registers as
2439 dead if they die in this block. It does not mark these registers live
2440 (or create new reg_sometimes_live entries if necessary) if they are born
2441 in this block.
2442
2443 The easy solution is to just always schedule a block. These blocks tend
2444 to be very short, so this doesn't slow down this pass by much. */
2445
2446 /* If existing order is good, don't bother to reorder. */
2447 if (i != DONE_PRIORITY)
2448 {
2449 if (file)
2450 fprintf (file, ";; already scheduled\n");
2451
2452 if (reload_completed == 0)
2453 {
2454 for (i = 0; i < sometimes_max; i++)
2455 regs_sometimes_live[i].live_length += n_insns;
2456
2457 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2458 }
2459 free_pending_lists ();
2460 return;
2461 }
2462 #endif
2463
2464 /* Scan all the insns to be scheduled, removing NOTE insns
2465 and register death notes.
2466 Line number NOTE insns end up in NOTE_LIST.
2467 Register death notes end up in DEAD_NOTES.
2468
2469 Recreate the register life information for the end of this basic
2470 block. */
2471
2472 if (reload_completed == 0)
2473 {
2474 bcopy (basic_block_live_at_start[b], bb_live_regs, regset_bytes);
2475 bzero (bb_dead_regs, regset_bytes);
2476
2477 if (b == 0)
2478 {
2479 /* This is the first block in the function. There may be insns
2480 before head that we can't schedule. We still need to examine
2481 them though for accurate register lifetime analysis. */
2482
2483 /* We don't want to remove any REG_DEAD notes as the code below
2484 does. */
2485
2486 for (insn = basic_block_head[b]; insn != head;
2487 insn = NEXT_INSN (insn))
2488 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2489 {
2490 /* See if the register gets born here. */
2491 /* We must check for registers being born before we check for
2492 registers dying. It is possible for a register to be born
2493 and die in the same insn, e.g. reading from a volatile
2494 memory location into an otherwise unused register. Such
2495 a register must be marked as dead after this insn. */
2496 if (GET_CODE (PATTERN (insn)) == SET
2497 || GET_CODE (PATTERN (insn)) == CLOBBER)
2498 sched_note_set (b, PATTERN (insn), 0);
2499 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2500 {
2501 int j;
2502 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2503 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2504 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2505 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2506
2507 /* ??? This code is obsolete and should be deleted. It
2508 is harmless though, so we will leave it in for now. */
2509 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2510 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2511 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2512 }
2513
2514 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2515 {
2516 if ((REG_NOTE_KIND (link) == REG_DEAD
2517 || REG_NOTE_KIND (link) == REG_UNUSED)
2518 /* Verify that the REG_NOTE has a legal value. */
2519 && GET_CODE (XEXP (link, 0)) == REG)
2520 {
2521 register int regno = REGNO (XEXP (link, 0));
2522 register int offset = regno / REGSET_ELT_BITS;
2523 register int bit = 1 << (regno % REGSET_ELT_BITS);
2524
2525 if (regno < FIRST_PSEUDO_REGISTER)
2526 {
2527 int j = HARD_REGNO_NREGS (regno,
2528 GET_MODE (XEXP (link, 0)));
2529 while (--j >= 0)
2530 {
2531 offset = (regno + j) / REGSET_ELT_BITS;
2532 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2533
2534 bb_live_regs[offset] &= ~bit;
2535 bb_dead_regs[offset] |= bit;
2536 }
2537 }
2538 else
2539 {
2540 bb_live_regs[offset] &= ~bit;
2541 bb_dead_regs[offset] |= bit;
2542 }
2543 }
2544 }
2545 }
2546 }
2547 }
2548
2549 /* If debugging information is being produced, keep track of the line
2550 number notes for each insn. */
2551 if (write_symbols != NO_DEBUG)
2552 {
2553 /* We must use the true line number for the first insn in the block
2554 that was computed and saved at the start of this pass. We can't
2555 use the current line number, because scheduling of the previous
2556 block may have changed the current line number. */
2557 rtx line = line_note_head[b];
2558
2559 for (insn = basic_block_head[b];
2560 insn != next_tail;
2561 insn = NEXT_INSN (insn))
2562 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
2563 line = insn;
2564 else
2565 LINE_NOTE (insn) = line;
2566 }
2567
2568 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2569 {
2570 rtx prev, next, link;
2571
2572 /* Farm out notes. This is needed to keep the debugger from
2573 getting completely deranged. */
2574 if (GET_CODE (insn) == NOTE)
2575 {
2576 prev = insn;
2577 insn = unlink_notes (insn, next_tail);
2578 if (prev == tail)
2579 abort ();
2580 if (prev == head)
2581 abort ();
2582 if (insn == next_tail)
2583 abort ();
2584 }
2585
2586 if (reload_completed == 0
2587 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2588 {
2589 /* See if the register gets born here. */
2590 /* We must check for registers being born before we check for
2591 registers dying. It is possible for a register to be born and
2592 die in the same insn, e.g. reading from a volatile memory
2593 location into an otherwise unused register. Such a register
2594 must be marked as dead after this insn. */
2595 if (GET_CODE (PATTERN (insn)) == SET
2596 || GET_CODE (PATTERN (insn)) == CLOBBER)
2597 sched_note_set (b, PATTERN (insn), 0);
2598 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2599 {
2600 int j;
2601 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2602 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2603 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2604 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2605
2606 /* ??? This code is obsolete and should be deleted. It
2607 is harmless though, so we will leave it in for now. */
2608 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2609 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2610 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2611 }
2612
2613 /* Need to know what registers this insn kills. */
2614 for (prev = 0, link = REG_NOTES (insn); link; link = next)
2615 {
2616 int regno;
2617
2618 next = XEXP (link, 1);
2619 if ((REG_NOTE_KIND (link) == REG_DEAD
2620 || REG_NOTE_KIND (link) == REG_UNUSED)
2621 /* Verify that the REG_NOTE has a legal value. */
2622 && GET_CODE (XEXP (link, 0)) == REG)
2623 {
2624 register int regno = REGNO (XEXP (link, 0));
2625 register int offset = regno / REGSET_ELT_BITS;
2626 register int bit = 1 << (regno % REGSET_ELT_BITS);
2627
2628 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
2629 alone. */
2630 if (REG_NOTE_KIND (link) == REG_DEAD)
2631 {
2632 if (prev)
2633 XEXP (prev, 1) = next;
2634 else
2635 REG_NOTES (insn) = next;
2636 XEXP (link, 1) = dead_notes;
2637 dead_notes = link;
2638 }
2639 else
2640 prev = link;
2641
2642 if (regno < FIRST_PSEUDO_REGISTER)
2643 {
2644 int j = HARD_REGNO_NREGS (regno,
2645 GET_MODE (XEXP (link, 0)));
2646 while (--j >= 0)
2647 {
2648 offset = (regno + j) / REGSET_ELT_BITS;
2649 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2650
2651 bb_live_regs[offset] &= ~bit;
2652 bb_dead_regs[offset] |= bit;
2653 }
2654 }
2655 else
2656 {
2657 bb_live_regs[offset] &= ~bit;
2658 bb_dead_regs[offset] |= bit;
2659 }
2660 }
2661 else
2662 prev = link;
2663 }
2664 }
2665 }
2666
2667 if (reload_completed == 0)
2668 {
2669 /* Keep track of register lives. */
2670 old_live_regs = (regset) alloca (regset_bytes);
2671 regs_sometimes_live
2672 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
2673 sometimes_max = 0;
2674
2675 /* Start with registers live at end. */
2676 for (j = 0; j < regset_size; j++)
2677 {
2678 int live = bb_live_regs[j];
2679 old_live_regs[j] = live;
2680 if (live)
2681 {
2682 register int bit;
2683 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
2684 if (live & (1 << bit))
2685 sometimes_max = new_sometimes_live (regs_sometimes_live, j,
2686 bit, sometimes_max);
2687 }
2688 }
2689 }
2690
2691 SCHED_SORT (ready, n_ready, 1);
2692
2693 if (file)
2694 {
2695 fprintf (file, ";; ready list initially:\n;; ");
2696 for (i = 0; i < n_ready; i++)
2697 fprintf (file, "%d ", INSN_UID (ready[i]));
2698 fprintf (file, "\n\n");
2699
2700 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2701 if (INSN_PRIORITY (insn) > 0)
2702 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
2703 INSN_UID (insn), INSN_PRIORITY (insn),
2704 INSN_REF_COUNT (insn));
2705 }
2706
2707 /* Now HEAD and TAIL are going to become disconnected
2708 entirely from the insn chain. */
2709 tail = ready[0];
2710
2711 /* Q_SIZE will always be zero here. */
2712 q_ptr = 0;
2713 bzero (insn_queue, sizeof (insn_queue));
2714
2715 /* Now, perform list scheduling. */
2716
2717 /* Where we start inserting insns is after TAIL. */
2718 last = next_tail;
2719
2720 new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
2721 ? NEED_HEAD : NEED_NOTHING);
2722 if (PREV_INSN (next_tail) == basic_block_end[b])
2723 new_needs |= NEED_TAIL;
2724
2725 new_ready = n_ready;
2726 while (sched_n_insns < n_insns)
2727 {
2728 q_ptr = NEXT_Q (q_ptr);
2729
2730 /* Add all pending insns that can be scheduled without stalls to the
2731 ready list. */
2732 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
2733 {
2734 if (file)
2735 fprintf (file, ";; launching %d before %d with no stalls\n",
2736 INSN_UID (insn), INSN_UID (last));
2737 ready[new_ready++] = insn;
2738 q_size -= 1;
2739 }
2740 insn_queue[q_ptr] = 0;
2741
2742 /* If there are no ready insns, stall until one is ready and add all
2743 of the pending insns at that point to the ready list. */
2744 if (new_ready == 0)
2745 {
2746 register int stalls;
2747
2748 for (stalls = 1; stalls < Q_SIZE; stalls++)
2749 if (insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])
2750 {
2751 for (; insn; insn = NEXT_INSN (insn))
2752 {
2753 if (file)
2754 fprintf (file, ";; issue insn %d before %d with %d stalls\n",
2755 INSN_UID (insn), INSN_UID (last), stalls);
2756 ready[new_ready++] = insn;
2757 q_size -= 1;
2758 }
2759 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
2760 break;
2761 }
2762
2763 #if 0
2764 /* This looks logically correct, but on the SPEC benchmark set on
2765 the SPARC, I get better code without it. */
2766 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2767 #endif
2768 }
2769
2770 /* There should be some instructions waiting to fire. */
2771 if (new_ready == 0)
2772 abort ();
2773
2774 /* Sort the ready list and choose the best insn to schedule.
2775 N_READY holds the number of items that were scheduled the last time,
2776 minus the one instruction scheduled on the last loop iteration; it
2777 is not modified for any other reason in this loop. */
2778 SCHED_SORT (ready, new_ready, n_ready);
2779 n_ready = new_ready;
2780 last_scheduled_insn = insn = ready[0];
2781
2782 if (DONE_PRIORITY_P (insn))
2783 abort ();
2784
2785 if (reload_completed == 0)
2786 {
2787 /* Process this insn, and each insn linked to this one which must
2788 be immediately output after this insn. */
2789 do
2790 {
2791 /* First we kill registers set by this insn, and then we
2792 make registers used by this insn live. This is the opposite
2793 order used above because we are traversing the instructions
2794 backwards. */
2795
2796 /* Strictly speaking, we should scan REG_UNUSED notes and make
2797 every register mentioned there live, however, we will just
2798 kill them again immediately below, so there doesn't seem to
2799 be any reason why we bother to do this. */
2800
2801 /* See if this is the last notice we must take of a register. */
2802 if (GET_CODE (PATTERN (insn)) == SET
2803 || GET_CODE (PATTERN (insn)) == CLOBBER)
2804 sched_note_set (b, PATTERN (insn), 1);
2805 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2806 {
2807 int j;
2808 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2809 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2810 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2811 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
2812 }
2813
2814 /* This code keeps life analysis information up to date. */
2815 if (GET_CODE (insn) == CALL_INSN)
2816 {
2817 register struct sometimes *p;
2818
2819 /* A call kills all call used and global registers, except
2820 for those mentioned in the call pattern which will be
2821 made live again later. */
2822 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2823 if (call_used_regs[i] || global_regs[i])
2824 {
2825 register int offset = i / REGSET_ELT_BITS;
2826 register int bit = 1 << (i % REGSET_ELT_BITS);
2827
2828 bb_live_regs[offset] &= ~bit;
2829 bb_dead_regs[offset] |= bit;
2830 }
2831
2832 /* Regs live at the time of a call instruction must not
2833 go in a register clobbered by calls. Record this for
2834 all regs now live. Note that insns which are born or
2835 die in a call do not cross a call, so this must be done
2836 after the killings (above) and before the births
2837 (below). */
2838 p = regs_sometimes_live;
2839 for (i = 0; i < sometimes_max; i++, p++)
2840 if (bb_live_regs[p->offset] & (1 << p->bit))
2841 p->calls_crossed += 1;
2842 }
2843
2844 /* Make every register used live, and add REG_DEAD notes for
2845 registers which were not live before we started. */
2846 attach_deaths_insn (insn);
2847
2848 /* Find registers now made live by that instruction. */
2849 for (i = 0; i < regset_size; i++)
2850 {
2851 int diff = bb_live_regs[i] & ~old_live_regs[i];
2852 if (diff)
2853 {
2854 register int bit;
2855 old_live_regs[i] |= diff;
2856 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
2857 if (diff & (1 << bit))
2858 sometimes_max
2859 = new_sometimes_live (regs_sometimes_live, i, bit,
2860 sometimes_max);
2861 }
2862 }
2863
2864 /* Count lengths of all regs we are worrying about now,
2865 and handle registers no longer live. */
2866
2867 for (i = 0; i < sometimes_max; i++)
2868 {
2869 register struct sometimes *p = &regs_sometimes_live[i];
2870 int regno = p->offset*REGSET_ELT_BITS + p->bit;
2871
2872 p->live_length += 1;
2873
2874 if ((bb_live_regs[p->offset] & (1 << p->bit)) == 0)
2875 {
2876 /* This is the end of one of this register's lifetime
2877 segments. Save the lifetime info collected so far,
2878 and clear its bit in the old_live_regs entry. */
2879 sched_reg_live_length[regno] += p->live_length;
2880 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2881 old_live_regs[p->offset] &= ~(1 << p->bit);
2882
2883 /* Delete the reg_sometimes_live entry for this reg by
2884 copying the last entry over top of it. */
2885 *p = regs_sometimes_live[--sometimes_max];
2886 /* ...and decrement i so that this newly copied entry
2887 will be processed. */
2888 i--;
2889 }
2890 }
2891
2892 link = insn;
2893 insn = PREV_INSN (insn);
2894 }
2895 while (SCHED_GROUP_P (link));
2896
2897 /* Set INSN back to the insn we are scheduling now. */
2898 insn = ready[0];
2899 }
2900
2901 /* Schedule INSN. Remove it from the ready list. */
2902 ready += 1;
2903 n_ready -= 1;
2904
2905 sched_n_insns += 1;
2906 NEXT_INSN (insn) = last;
2907 PREV_INSN (last) = insn;
2908 last = insn;
2909
2910 /* Everything that precedes INSN now either becomes "ready", if
2911 it can execute immediately before INSN, or "pending", if
2912 there must be a delay. Give INSN high enough priority that
2913 at least one (maybe more) reg-killing insns can be launched
2914 ahead of all others. Mark INSN as scheduled by changing its
2915 priority to -1. */
2916 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
2917 new_ready = launch_links (insn, ready, n_ready);
2918 INSN_PRIORITY (insn) = DONE_PRIORITY;
2919
2920 /* Schedule all prior insns that must not be moved. */
2921 if (SCHED_GROUP_P (insn))
2922 {
2923 /* Disable these insns from being launched. */
2924 link = insn;
2925 while (SCHED_GROUP_P (link))
2926 {
2927 /* Disable these insns from being launched by anybody. */
2928 link = PREV_INSN (link);
2929 INSN_REF_COUNT (link) = 0;
2930 }
2931
2932 /* None of these insns can move forward into delay slots. */
2933 while (SCHED_GROUP_P (insn))
2934 {
2935 insn = PREV_INSN (insn);
2936 new_ready = launch_links (insn, ready, new_ready);
2937 INSN_PRIORITY (insn) = DONE_PRIORITY;
2938
2939 sched_n_insns += 1;
2940 NEXT_INSN (insn) = last;
2941 PREV_INSN (last) = insn;
2942 last = insn;
2943 }
2944 }
2945 }
2946 if (q_size != 0)
2947 abort ();
2948
2949 if (reload_completed == 0)
2950 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2951
2952 /* HEAD is now the first insn in the chain of insns that
2953 been scheduled by the loop above.
2954 TAIL is the last of those insns. */
2955 head = insn;
2956
2957 /* NOTE_LIST is the end of a chain of notes previously found
2958 among the insns. Insert them at the beginning of the insns. */
2959 if (note_list != 0)
2960 {
2961 rtx note_head = note_list;
2962 while (PREV_INSN (note_head))
2963 note_head = PREV_INSN (note_head);
2964
2965 PREV_INSN (head) = note_list;
2966 NEXT_INSN (note_list) = head;
2967 head = note_head;
2968 }
2969
2970 /* In theory, there should be no REG_DEAD notes leftover at the end.
2971 In practice, this can occur as the result of bugs in flow, combine.c,
2972 and/or sched.c. The values of the REG_DEAD notes remaining are
2973 meaningless, because dead_notes is just used as a free list. */
2974 #if 1
2975 if (dead_notes != 0)
2976 abort ();
2977 #endif
2978
2979 if (new_needs & NEED_HEAD)
2980 basic_block_head[b] = head;
2981 PREV_INSN (head) = prev_head;
2982 NEXT_INSN (prev_head) = head;
2983
2984 if (new_needs & NEED_TAIL)
2985 basic_block_end[b] = tail;
2986 NEXT_INSN (tail) = next_tail;
2987 PREV_INSN (next_tail) = tail;
2988
2989 /* Restore the line-number notes of each insn. */
2990 if (write_symbols != NO_DEBUG)
2991 {
2992 rtx line, note, prev, new;
2993 int notes = 0;
2994
2995 head = basic_block_head[b];
2996 next_tail = NEXT_INSN (basic_block_end[b]);
2997
2998 /* Determine the current line-number. We want to know the current
2999 line number of the first insn of the block here, in case it is
3000 different from the true line number that was saved earlier. If
3001 different, then we need a line number note before the first insn
3002 of this block. If it happens to be the same, then we don't want to
3003 emit another line number note here. */
3004 for (line = head; line; line = PREV_INSN (line))
3005 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3006 break;
3007
3008 /* Walk the insns keeping track of the current line-number and inserting
3009 the line-number notes as needed. */
3010 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3011 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3012 line = insn;
3013 else if (! (GET_CODE (insn) == NOTE
3014 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3015 && (note = LINE_NOTE (insn)) != 0
3016 && note != line
3017 && (line == 0
3018 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3019 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3020 {
3021 line = note;
3022 prev = PREV_INSN (insn);
3023 if (LINE_NOTE (note))
3024 {
3025 /* Re-use the orignal line-number note. */
3026 LINE_NOTE (note) = 0;
3027 PREV_INSN (note) = prev;
3028 NEXT_INSN (prev) = note;
3029 PREV_INSN (insn) = note;
3030 NEXT_INSN (note) = insn;
3031 }
3032 else
3033 {
3034 notes++;
3035 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3036 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3037 }
3038 }
3039 if (file && notes)
3040 fprintf (file, ";; added %d line-number notes\n", notes);
3041 }
3042
3043 if (file)
3044 {
3045 fprintf (file, ";; new basic block head = %d\n;; new basic block end = %d\n\n",
3046 INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
3047 }
3048
3049 /* Yow! We're done! */
3050 free_pending_lists ();
3051
3052 return;
3053 }
3054 \f
3055 /* Subroutine of split_hard_reg_notes. Searches X for any reference to
3056 REGNO, returning the rtx of the reference found if any. Otherwise,
3057 returns 0. */
3058
3059 rtx
3060 regno_use_in (regno, x)
3061 int regno;
3062 rtx x;
3063 {
3064 register char *fmt;
3065 int i, j;
3066 rtx tem;
3067
3068 if (GET_CODE (x) == REG && REGNO (x) == regno)
3069 return x;
3070
3071 fmt = GET_RTX_FORMAT (GET_CODE (x));
3072 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3073 {
3074 if (fmt[i] == 'e')
3075 {
3076 if (tem = regno_use_in (regno, XEXP (x, i)))
3077 return tem;
3078 }
3079 else if (fmt[i] == 'E')
3080 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3081 if (tem = regno_use_in (regno , XVECEXP (x, i, j)))
3082 return tem;
3083 }
3084
3085 return 0;
3086 }
3087
3088 /* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3089 needed for the hard register mentioned in the note. This can happen
3090 if the reference to the hard register in the original insn was split into
3091 several smaller hard register references in the split insns. */
3092
3093 static void
3094 split_hard_reg_notes (note, first, last, orig_insn)
3095 rtx note, first, last, orig_insn;
3096 {
3097 rtx reg, temp, link;
3098 int n_regs, i, new_reg;
3099 rtx insn;
3100
3101 /* Assume that this is a REG_DEAD note. */
3102 if (REG_NOTE_KIND (note) != REG_DEAD)
3103 abort ();
3104
3105 reg = XEXP (note, 0);
3106
3107 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3108
3109 /* ??? Could add check here to see whether, the hard register is referenced
3110 in the same mode as in the original insn. If so, then it has not been
3111 split, and the rest of the code below is unnecessary. */
3112
3113 for (i = 1; i < n_regs; i++)
3114 {
3115 new_reg = REGNO (reg) + i;
3116
3117 /* Check for references to new_reg in the split insns. */
3118 for (insn = last; ; insn = PREV_INSN (insn))
3119 {
3120 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3121 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3122 {
3123 /* Create a new reg dead note here. */
3124 link = rtx_alloc (EXPR_LIST);
3125 PUT_REG_NOTE_KIND (link, REG_DEAD);
3126 XEXP (link, 0) = temp;
3127 XEXP (link, 1) = REG_NOTES (insn);
3128 REG_NOTES (insn) = link;
3129 break;
3130 }
3131 /* It isn't mentioned anywhere, so no new reg note is needed for
3132 this register. */
3133 if (insn == first)
3134 break;
3135 }
3136 }
3137 }
3138
3139 /* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3140 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3141
3142 static void
3143 new_insn_dead_notes (pat, insn, last, orig_insn)
3144 rtx pat, insn, last, orig_insn;
3145 {
3146 rtx dest, tem, set;
3147
3148 /* PAT is either a CLOBBER or a SET here. */
3149 dest = XEXP (pat, 0);
3150
3151 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3152 || GET_CODE (dest) == STRICT_LOW_PART
3153 || GET_CODE (dest) == SIGN_EXTRACT)
3154 dest = XEXP (dest, 0);
3155
3156 if (GET_CODE (dest) == REG)
3157 {
3158 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3159 {
3160 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3161 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3162 && (set = single_set (tem)))
3163 {
3164 rtx tem_dest = SET_DEST (set);
3165
3166 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3167 || GET_CODE (tem_dest) == SUBREG
3168 || GET_CODE (tem_dest) == STRICT_LOW_PART
3169 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3170 tem_dest = XEXP (tem_dest, 0);
3171
3172 if (tem_dest != dest)
3173 {
3174 /* Use the same scheme as combine.c, don't put both REG_DEAD
3175 and REG_UNUSED notes on the same insn. */
3176 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3177 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3178 {
3179 rtx note = rtx_alloc (EXPR_LIST);
3180 PUT_REG_NOTE_KIND (note, REG_DEAD);
3181 XEXP (note, 0) = dest;
3182 XEXP (note, 1) = REG_NOTES (tem);
3183 REG_NOTES (tem) = note;
3184 }
3185 /* The reg only dies in one insn, the last one that uses
3186 it. */
3187 break;
3188 }
3189 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3190 /* We found an instruction that both uses the register,
3191 and sets it, so no new REG_NOTE is needed for this set. */
3192 break;
3193 }
3194 }
3195 /* If this is a set, it must die somewhere, unless it is the dest of
3196 the original insn, and hence is live after the original insn. Abort
3197 if it isn't supposed to be live after the original insn.
3198
3199 If this is a clobber, then just add a REG_UNUSED note. */
3200 if (tem == insn)
3201 {
3202 int live_after_orig_insn = 0;
3203 rtx pattern = PATTERN (orig_insn);
3204 int i;
3205
3206 if (GET_CODE (pat) == CLOBBER)
3207 {
3208 rtx note = rtx_alloc (EXPR_LIST);
3209 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3210 XEXP (note, 0) = dest;
3211 XEXP (note, 1) = REG_NOTES (insn);
3212 REG_NOTES (insn) = note;
3213 return;
3214 }
3215
3216 /* The original insn could have multiple sets, so search the
3217 insn for all sets. */
3218 if (GET_CODE (pattern) == SET)
3219 {
3220 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3221 live_after_orig_insn = 1;
3222 }
3223 else if (GET_CODE (pattern) == PARALLEL)
3224 {
3225 for (i = 0; i < XVECLEN (pattern, 0); i++)
3226 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3227 && reg_overlap_mentioned_p (dest,
3228 SET_DEST (XVECEXP (pattern,
3229 0, i))))
3230 live_after_orig_insn = 1;
3231 }
3232
3233 if (! live_after_orig_insn)
3234 abort ();
3235 }
3236 }
3237 }
3238
3239 /* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3240 registers modified by X. INC is -1 if the containing insn is being deleted,
3241 and is 1 if the containing insn is a newly generated insn. */
3242
3243 static void
3244 update_n_sets (x, inc)
3245 rtx x;
3246 int inc;
3247 {
3248 rtx dest = SET_DEST (x);
3249
3250 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3251 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3252 dest = SUBREG_REG (dest);
3253
3254 if (GET_CODE (dest) == REG)
3255 {
3256 int regno = REGNO (dest);
3257
3258 if (regno < FIRST_PSEUDO_REGISTER)
3259 {
3260 register int i;
3261 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3262
3263 for (i = regno; i < endregno; i++)
3264 reg_n_sets[i] += inc;
3265 }
3266 else
3267 reg_n_sets[regno] += inc;
3268 }
3269 }
3270
3271 /* Updates all flow-analysis related quantities (including REG_NOTES) for
3272 the insns from FIRST to LAST inclusive that were created by splitting
3273 ORIG_INSN. NOTES are the original REG_NOTES. */
3274
3275 static void
3276 update_flow_info (notes, first, last, orig_insn)
3277 rtx notes;
3278 rtx first, last;
3279 rtx orig_insn;
3280 {
3281 rtx insn, note;
3282 rtx next;
3283 rtx orig_dest, temp;
3284 rtx set;
3285
3286 /* Get and save the destination set by the original insn. */
3287
3288 orig_dest = single_set (orig_insn);
3289 if (orig_dest)
3290 orig_dest = SET_DEST (orig_dest);
3291
3292 /* Move REG_NOTES from the original insn to where they now belong. */
3293
3294 for (note = notes; note; note = next)
3295 {
3296 next = XEXP (note, 1);
3297 switch (REG_NOTE_KIND (note))
3298 {
3299 case REG_DEAD:
3300 case REG_UNUSED:
3301 /* Move these notes from the original insn to the last new insn where
3302 the register is now set. */
3303
3304 for (insn = last; ; insn = PREV_INSN (insn))
3305 {
3306 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3307 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3308 {
3309 XEXP (note, 1) = REG_NOTES (insn);
3310 REG_NOTES (insn) = note;
3311
3312 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3313 notes. */
3314 /* ??? This won't handle mutiple word registers correctly,
3315 but should be good enough for now. */
3316 if (REG_NOTE_KIND (note) == REG_UNUSED
3317 && ! dead_or_set_p (insn, XEXP (note, 0)))
3318 PUT_REG_NOTE_KIND (note, REG_DEAD);
3319
3320 /* The reg only dies in one insn, the last one that uses
3321 it. */
3322 break;
3323 }
3324 /* It must die somewhere, fail it we couldn't find where it died.
3325
3326 If this is a REG_UNUSED note, then it must be a temporary
3327 register that was not needed by this instantiation of the
3328 pattern, so we can safely ignore it. */
3329 if (insn == first)
3330 {
3331 if (REG_NOTE_KIND (note) != REG_UNUSED)
3332 abort ();
3333
3334 break;
3335 }
3336 }
3337
3338 /* If this note refers to a multiple word hard register, it may
3339 have been split into several smaller hard register references.
3340 Check to see if there are any new register references that
3341 need REG_NOTES added for them. */
3342 temp = XEXP (note, 0);
3343 if (REG_NOTE_KIND (note) == REG_DEAD
3344 && GET_CODE (temp) == REG
3345 && REGNO (temp) < FIRST_PSEUDO_REGISTER
3346 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)))
3347 split_hard_reg_notes (note, first, last, orig_insn);
3348 break;
3349
3350 case REG_WAS_0:
3351 /* This note applies to the dest of the original insn. Find the
3352 first new insn that now has the same dest, and move the note
3353 there. */
3354
3355 if (! orig_dest)
3356 abort ();
3357
3358 for (insn = first; ; insn = NEXT_INSN (insn))
3359 {
3360 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3361 && (temp = single_set (insn))
3362 && rtx_equal_p (SET_DEST (temp), orig_dest))
3363 {
3364 XEXP (note, 1) = REG_NOTES (insn);
3365 REG_NOTES (insn) = note;
3366 /* The reg is only zero before one insn, the first that
3367 uses it. */
3368 break;
3369 }
3370 /* It must be set somewhere, fail if we couldn't find where it
3371 was set. */
3372 if (insn == last)
3373 abort ();
3374 }
3375 break;
3376
3377 case REG_EQUAL:
3378 case REG_EQUIV:
3379 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
3380 set is meaningless. Just drop the note. */
3381 if (! orig_dest)
3382 break;
3383
3384 case REG_NO_CONFLICT:
3385 /* These notes apply to the dest of the original insn. Find the last
3386 new insn that now has the same dest, and move the note there. */
3387
3388 if (! orig_dest)
3389 abort ();
3390
3391 for (insn = last; ; insn = PREV_INSN (insn))
3392 {
3393 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3394 && (temp = single_set (insn))
3395 && rtx_equal_p (SET_DEST (temp), orig_dest))
3396 {
3397 XEXP (note, 1) = REG_NOTES (insn);
3398 REG_NOTES (insn) = note;
3399 /* Only put this note on one of the new insns. */
3400 break;
3401 }
3402
3403 /* The original dest must still be set someplace. Abort if we
3404 couldn't find it. */
3405 if (insn == first)
3406 abort ();
3407 }
3408 break;
3409
3410 case REG_LIBCALL:
3411 /* Move a REG_LIBCALL note to the first insn created, and update
3412 the corresponding REG_RETVAL note. */
3413 XEXP (note, 1) = REG_NOTES (first);
3414 REG_NOTES (first) = note;
3415
3416 insn = XEXP (note, 0);
3417 note = find_reg_note (insn, REG_RETVAL, 0);
3418 if (note)
3419 XEXP (note, 0) = first;
3420 break;
3421
3422 case REG_RETVAL:
3423 /* Move a REG_RETVAL note to the last insn created, and update
3424 the corresponding REG_LIBCALL note. */
3425 XEXP (note, 1) = REG_NOTES (last);
3426 REG_NOTES (last) = note;
3427
3428 insn = XEXP (note, 0);
3429 note = find_reg_note (insn, REG_LIBCALL, 0);
3430 if (note)
3431 XEXP (note, 0) = last;
3432 break;
3433
3434 case REG_NONNEG:
3435 /* This should be moved to whichever instruction is a JUMP_INSN. */
3436
3437 for (insn = last; ; insn = PREV_INSN (insn))
3438 {
3439 if (GET_CODE (insn) == JUMP_INSN)
3440 {
3441 XEXP (note, 1) = REG_NOTES (insn);
3442 REG_NOTES (insn) = note;
3443 /* Only put this note on one of the new insns. */
3444 break;
3445 }
3446 /* Fail if we couldn't find a JUMP_INSN. */
3447 if (insn == first)
3448 abort ();
3449 }
3450 break;
3451
3452 case REG_INC:
3453 /* This should be moved to whichever instruction now has the
3454 increment operation. */
3455 abort ();
3456
3457 case REG_LABEL:
3458 /* Should be moved to the new insn(s) which use the label. */
3459 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
3460 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3461 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3462 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL,
3463 XEXP (note, 0), REG_NOTES (insn));
3464 break;
3465
3466 case REG_CC_SETTER:
3467 case REG_CC_USER:
3468 /* These two notes will never appear until after reorg, so we don't
3469 have to handle them here. */
3470 default:
3471 abort ();
3472 }
3473 }
3474
3475 /* Each new insn created, except the last, has a new set. If the destination
3476 is a register, then this reg is now live across several insns, whereas
3477 previously the dest reg was born and died within the same insn. To
3478 reflect this, we now need a REG_DEAD note on the insn where this
3479 dest reg dies.
3480
3481 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
3482
3483 for (insn = first; insn != last; insn = NEXT_INSN (insn))
3484 {
3485 rtx pat;
3486 int i;
3487
3488 pat = PATTERN (insn);
3489 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
3490 new_insn_dead_notes (pat, insn, last, orig_insn);
3491 else if (GET_CODE (pat) == PARALLEL)
3492 {
3493 for (i = 0; i < XVECLEN (pat, 0); i++)
3494 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3495 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
3496 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
3497 }
3498 }
3499
3500 /* If any insn, except the last, uses the register set by the last insn,
3501 then we need a new REG_DEAD note on that insn. In this case, there
3502 would not have been a REG_DEAD note for this register in the original
3503 insn because it was used and set within one insn.
3504
3505 There is no new REG_DEAD note needed if the last insn uses the register
3506 that it is setting. */
3507
3508 set = single_set (last);
3509 if (set)
3510 {
3511 rtx dest = SET_DEST (set);
3512
3513 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3514 || GET_CODE (dest) == STRICT_LOW_PART
3515 || GET_CODE (dest) == SIGN_EXTRACT)
3516 dest = XEXP (dest, 0);
3517
3518 if (GET_CODE (dest) == REG
3519 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
3520 {
3521 for (insn = PREV_INSN (last); ; insn = PREV_INSN (insn))
3522 {
3523 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3524 && reg_mentioned_p (dest, PATTERN (insn))
3525 && (set = single_set (insn)))
3526 {
3527 rtx insn_dest = SET_DEST (set);
3528
3529 while (GET_CODE (insn_dest) == ZERO_EXTRACT
3530 || GET_CODE (insn_dest) == SUBREG
3531 || GET_CODE (insn_dest) == STRICT_LOW_PART
3532 || GET_CODE (insn_dest) == SIGN_EXTRACT)
3533 insn_dest = XEXP (insn_dest, 0);
3534
3535 if (insn_dest != dest)
3536 {
3537 note = rtx_alloc (EXPR_LIST);
3538 PUT_REG_NOTE_KIND (note, REG_DEAD);
3539 XEXP (note, 0) = dest;
3540 XEXP (note, 1) = REG_NOTES (insn);
3541 REG_NOTES (insn) = note;
3542 /* The reg only dies in one insn, the last one
3543 that uses it. */
3544 break;
3545 }
3546 }
3547 if (insn == first)
3548 break;
3549 }
3550 }
3551 }
3552
3553 /* If the original dest is modifying a multiple register target, and the
3554 original instruction was split such that the original dest is now set
3555 by two or more SUBREG sets, then the split insns no longer kill the
3556 destination of the original insn.
3557
3558 In this case, if there exists an instruction in the same basic block,
3559 before the split insn, which uses the original dest, and this use is
3560 killed by the original insn, then we must remove the REG_DEAD note on
3561 this insn, because it is now superfluous.
3562
3563 This does not apply when a hard register gets split, because the code
3564 knows how to handle overlapping hard registers properly. */
3565 if (orig_dest && GET_CODE (orig_dest) == REG)
3566 {
3567 int found_orig_dest = 0;
3568 int found_split_dest = 0;
3569
3570 for (insn = first; ; insn = NEXT_INSN (insn))
3571 {
3572 set = single_set (insn);
3573 if (set)
3574 {
3575 if (GET_CODE (SET_DEST (set)) == REG
3576 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
3577 {
3578 found_orig_dest = 1;
3579 break;
3580 }
3581 else if (GET_CODE (SET_DEST (set)) == SUBREG
3582 && SUBREG_REG (SET_DEST (set)) == orig_dest)
3583 {
3584 found_split_dest = 1;
3585 break;
3586 }
3587 }
3588
3589 if (insn == last)
3590 break;
3591 }
3592
3593 if (found_split_dest)
3594 {
3595 /* Search backwards from FIRST, looking for the first insn that uses
3596 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
3597 If we find an insn, and it has a REG_DEAD note, then delete the
3598 note. */
3599
3600 for (insn = first; insn; insn = PREV_INSN (insn))
3601 {
3602 if (GET_CODE (insn) == CODE_LABEL
3603 || GET_CODE (insn) == JUMP_INSN)
3604 break;
3605 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3606 && reg_mentioned_p (orig_dest, insn))
3607 {
3608 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
3609 if (note)
3610 remove_note (insn, note);
3611 }
3612 }
3613 }
3614 else if (! found_orig_dest)
3615 {
3616 /* This should never happen. */
3617 abort ();
3618 }
3619 }
3620
3621 /* Update reg_n_sets. This is necessary to prevent local alloc from
3622 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
3623 a reg from set once to set multiple times. */
3624
3625 {
3626 rtx x = PATTERN (orig_insn);
3627 RTX_CODE code = GET_CODE (x);
3628
3629 if (code == SET || code == CLOBBER)
3630 update_n_sets (x, -1);
3631 else if (code == PARALLEL)
3632 {
3633 int i;
3634 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3635 {
3636 code = GET_CODE (XVECEXP (x, 0, i));
3637 if (code == SET || code == CLOBBER)
3638 update_n_sets (XVECEXP (x, 0, i), -1);
3639 }
3640 }
3641
3642 for (insn = first; ; insn = NEXT_INSN (insn))
3643 {
3644 x = PATTERN (insn);
3645 code = GET_CODE (x);
3646
3647 if (code == SET || code == CLOBBER)
3648 update_n_sets (x, 1);
3649 else if (code == PARALLEL)
3650 {
3651 int i;
3652 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3653 {
3654 code = GET_CODE (XVECEXP (x, 0, i));
3655 if (code == SET || code == CLOBBER)
3656 update_n_sets (XVECEXP (x, 0, i), 1);
3657 }
3658 }
3659
3660 if (insn == last)
3661 break;
3662 }
3663 }
3664 }
3665
3666 /* The one entry point in this file. DUMP_FILE is the dump file for
3667 this pass. */
3668
3669 void
3670 schedule_insns (dump_file)
3671 FILE *dump_file;
3672 {
3673 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
3674 int i, b;
3675 rtx insn;
3676
3677 /* Taking care of this degenerate case makes the rest of
3678 this code simpler. */
3679 if (n_basic_blocks == 0)
3680 return;
3681
3682 /* Create an insn here so that we can hang dependencies off of it later. */
3683 sched_before_next_call = gen_rtx (INSN, VOIDmode, 0, 0, 0, 0, 0, 0, 0);
3684
3685 /* Initialize the unused_*_lists. We can't use the ones left over from
3686 the previous function, because gcc has freed that memory. We can use
3687 the ones left over from the first sched pass in the second pass however,
3688 so only clear them on the first sched pass. The first pass is before
3689 reload if flag_schedule_insns is set, otherwise it is afterwards. */
3690
3691 if (reload_completed == 0 || ! flag_schedule_insns)
3692 {
3693 unused_insn_list = 0;
3694 unused_expr_list = 0;
3695 }
3696
3697 /* We create no insns here, only reorder them, so we
3698 remember how far we can cut back the stack on exit. */
3699
3700 /* Allocate data for this pass. See comments, above,
3701 for what these vectors do. */
3702 /* ??? Instruction splitting below may create new instructions, so these
3703 arrays must be bigger than just max_uid. */
3704 insn_luid = (int *) alloca (max_uid * sizeof (int));
3705 insn_priority = (int *) alloca (max_uid * sizeof (int));
3706 insn_ref_count = (int *) alloca (max_uid * sizeof (int));
3707
3708 if (reload_completed == 0)
3709 {
3710 sched_reg_n_deaths = (short *) alloca (max_regno * sizeof (short));
3711 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
3712 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
3713 bb_dead_regs = (regset) alloca (regset_bytes);
3714 bb_live_regs = (regset) alloca (regset_bytes);
3715 bzero (sched_reg_n_calls_crossed, max_regno * sizeof (int));
3716 bzero (sched_reg_live_length, max_regno * sizeof (int));
3717 bcopy (reg_n_deaths, sched_reg_n_deaths, max_regno * sizeof (short));
3718 init_alias_analysis ();
3719 }
3720 else
3721 {
3722 sched_reg_n_deaths = 0;
3723 sched_reg_n_calls_crossed = 0;
3724 sched_reg_live_length = 0;
3725 bb_dead_regs = 0;
3726 bb_live_regs = 0;
3727 if (! flag_schedule_insns)
3728 init_alias_analysis ();
3729 }
3730
3731 if (write_symbols != NO_DEBUG)
3732 {
3733 rtx line;
3734
3735 line_note = (rtx *) alloca (max_uid * sizeof (rtx));
3736 bzero (line_note, max_uid * sizeof (rtx));
3737 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
3738 bzero (line_note_head, n_basic_blocks * sizeof (rtx));
3739
3740 /* Determine the line-number at the start of each basic block.
3741 This must be computed and saved now, because after a basic block's
3742 predecessor has been scheduled, it is impossible to accurately
3743 determine the correct line number for the first insn of the block. */
3744
3745 for (b = 0; b < n_basic_blocks; b++)
3746 for (line = basic_block_head[b]; line; line = PREV_INSN (line))
3747 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3748 {
3749 line_note_head[b] = line;
3750 break;
3751 }
3752 }
3753
3754 bzero (insn_luid, max_uid * sizeof (int));
3755 bzero (insn_priority, max_uid * sizeof (int));
3756 bzero (insn_ref_count, max_uid * sizeof (int));
3757
3758 /* Schedule each basic block, block by block. */
3759
3760 if (NEXT_INSN (basic_block_end[n_basic_blocks-1]) == 0
3761 || (GET_CODE (basic_block_end[n_basic_blocks-1]) != NOTE
3762 && GET_CODE (basic_block_end[n_basic_blocks-1]) != CODE_LABEL))
3763 emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
3764
3765 for (b = 0; b < n_basic_blocks; b++)
3766 {
3767 rtx insn, next;
3768 rtx insns;
3769
3770 note_list = 0;
3771
3772 for (insn = basic_block_head[b]; ; insn = next)
3773 {
3774 rtx prev;
3775 rtx set;
3776
3777 /* Can't use `next_real_insn' because that
3778 might go across CODE_LABELS and short-out basic blocks. */
3779 next = NEXT_INSN (insn);
3780 if (GET_CODE (insn) != INSN)
3781 {
3782 if (insn == basic_block_end[b])
3783 break;
3784
3785 continue;
3786 }
3787
3788 /* Don't split no-op move insns. These should silently disappear
3789 later in final. Splitting such insns would break the code
3790 that handles REG_NO_CONFLICT blocks. */
3791 set = single_set (insn);
3792 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
3793 {
3794 if (insn == basic_block_end[b])
3795 break;
3796
3797 /* Nops get in the way while scheduling, so delete them now if
3798 register allocation has already been done. It is too risky
3799 to try to do this before register allocation, and there are
3800 unlikely to be very many nops then anyways. */
3801 if (reload_completed)
3802 {
3803 PUT_CODE (insn, NOTE);
3804 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3805 NOTE_SOURCE_FILE (insn) = 0;
3806 }
3807
3808 continue;
3809 }
3810
3811 /* Split insns here to get max fine-grain parallelism. */
3812 prev = PREV_INSN (insn);
3813 if (reload_completed == 0)
3814 {
3815 rtx last, first = PREV_INSN (insn);
3816 rtx notes = REG_NOTES (insn);
3817
3818 last = try_split (PATTERN (insn), insn, 1);
3819 if (last != insn)
3820 {
3821 /* try_split returns the NOTE that INSN became. */
3822 first = NEXT_INSN (first);
3823 update_flow_info (notes, first, last, insn);
3824
3825 PUT_CODE (insn, NOTE);
3826 NOTE_SOURCE_FILE (insn) = 0;
3827 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3828 if (insn == basic_block_head[b])
3829 basic_block_head[b] = first;
3830 if (insn == basic_block_end[b])
3831 {
3832 basic_block_end[b] = last;
3833 break;
3834 }
3835 }
3836 }
3837
3838 if (insn == basic_block_end[b])
3839 break;
3840 }
3841
3842 schedule_block (b, dump_file);
3843
3844 #ifdef USE_C_ALLOCA
3845 alloca (0);
3846 #endif
3847 }
3848
3849 if (write_symbols != NO_DEBUG)
3850 {
3851 rtx line = 0;
3852 rtx insn = get_insns ();
3853 int active_insn = 0;
3854 int notes = 0;
3855
3856 /* Walk the insns deleting redundant line-number notes. Many of these
3857 are already present. The remainder tend to occur at basic
3858 block boundaries. */
3859 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
3860 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3861 {
3862 /* If there are no active insns following, INSN is redundant. */
3863 if (active_insn == 0)
3864 {
3865 notes++;
3866 NOTE_SOURCE_FILE (insn) = 0;
3867 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3868 }
3869 /* If the line number is unchanged, LINE is redundant. */
3870 else if (line
3871 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
3872 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
3873 {
3874 notes++;
3875 NOTE_SOURCE_FILE (line) = 0;
3876 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
3877 line = insn;
3878 }
3879 else
3880 line = insn;
3881 active_insn = 0;
3882 }
3883 else if (! ((GET_CODE (insn) == NOTE
3884 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3885 || (GET_CODE (insn) == INSN
3886 && (GET_CODE (PATTERN (insn)) == USE
3887 || GET_CODE (PATTERN (insn)) == CLOBBER))))
3888 active_insn++;
3889
3890 if (dump_file && notes)
3891 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
3892 }
3893
3894 if (reload_completed == 0)
3895 {
3896 int regno;
3897 for (regno = 0; regno < max_regno; regno++)
3898 if (sched_reg_live_length[regno])
3899 {
3900 if (dump_file)
3901 {
3902 if (reg_live_length[regno] > sched_reg_live_length[regno])
3903 fprintf (dump_file,
3904 ";; register %d life shortened from %d to %d\n",
3905 regno, reg_live_length[regno],
3906 sched_reg_live_length[regno]);
3907 /* Negative values are special; don't overwrite the current
3908 reg_live_length value if it is negative. */
3909 else if (reg_live_length[regno] < sched_reg_live_length[regno]
3910 && reg_live_length[regno] >= 0)
3911 fprintf (dump_file,
3912 ";; register %d life extended from %d to %d\n",
3913 regno, reg_live_length[regno],
3914 sched_reg_live_length[regno]);
3915
3916 if (reg_n_calls_crossed[regno]
3917 && ! sched_reg_n_calls_crossed[regno])
3918 fprintf (dump_file,
3919 ";; register %d no longer crosses calls\n", regno);
3920 else if (! reg_n_calls_crossed[regno]
3921 && sched_reg_n_calls_crossed[regno])
3922 fprintf (dump_file,
3923 ";; register %d now crosses calls\n", regno);
3924 }
3925 /* Negative values are special; don't overwrite the current
3926 reg_live_length value if it is negative. */
3927 if (reg_live_length[regno] >= 0)
3928 reg_live_length[regno] = sched_reg_live_length[regno];
3929 reg_n_calls_crossed[regno] = sched_reg_n_calls_crossed[regno];
3930 }
3931 }
3932 }
3933 #endif /* INSN_SCHEDULING */
This page took 0.263814 seconds and 5 git commands to generate.