]> gcc.gnu.org Git - gcc.git/blob - gcc/sched.c
entered into RCS
[gcc.git] / gcc / sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GNU CC.
7
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
21
22 /* Instruction scheduling pass.
23
24 This pass implements list scheduling within basic blocks. It is
25 run after flow analysis, but before register allocation. The
26 scheduler works as follows:
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning
39 values to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we also add it to the ready list. When all insns down
50 to the lowest priority have been scheduled, the critical path of the
51 basic block has been made as short as possible. The remaining insns
52 are then scheduled in remaining slots.
53
54 The following list shows the order in which we want to break ties:
55
56 1. choose insn with lowest conflict cost, ties broken by
57 2. choose insn with the longest path to end of bb, ties broken by
58 3. choose insn that kills the most registers, ties broken by
59 4. choose insn that conflicts with the most ready insns, or finally
60 5. choose insn with lowest UID.
61
62 Memory references complicate matters. Only if we can be certain
63 that memory references are not part of the data dependency graph
64 (via true, anti, or output dependence), can we move operations past
65 memory references. To first approximation, reads can be done
66 independently, while writes introduce dependencies. Better
67 approximations will yield fewer dependencies.
68
69 Dependencies set up by memory references are treated in exactly the
70 same way as other dependencies, by using LOG_LINKS.
71
72 Having optimized the critical path, we may have also unduly
73 extended the lifetimes of some registers. If an operation requires
74 that constants be loaded into registers, it is certainly desirable
75 to load those constants as early as necessary, but no earlier.
76 I.e., it will not do to load up a bunch of registers at the
77 beginning of a basic block only to use them at the end, if they
78 could be loaded later, since this may result in excessive register
79 utilization.
80
81 Note that since branches are never in basic blocks, but only end
82 basic blocks, this pass will not do any branch scheduling. But
83 that is ok, since we can use GNU's delayed branch scheduling
84 pass to take care of this case.
85
86 Also note that no further optimizations based on algebraic identities
87 are performed, so this pass would be a good one to perform instruction
88 splitting, such as breaking up a multiply instruction into shifts
89 and adds where that is profitable.
90
91 Given the memory aliasing analysis that this pass should perform,
92 it should be possible to remove redundant stores to memory, and to
93 load values from registers instead of hitting memory.
94
95 This pass must update information that subsequent passes expect to be
96 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
97 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
98 basic_block_end.
99
100 The information in the line number notes is carefully retained by this
101 pass. All other NOTE insns are grouped in their same relative order at
102 the beginning of basic blocks that have been scheduled. */
103 \f
104 #include <stdio.h>
105 #include "config.h"
106 #include "rtl.h"
107 #include "basic-block.h"
108 #include "regs.h"
109 #include "hard-reg-set.h"
110 #include "flags.h"
111 #include "insn-config.h"
112 #include "insn-attr.h"
113
114 /* Arrays set up by scheduling for the same respective purposes as
115 similar-named arrays set up by flow analysis. We work with these
116 arrays during the scheduling pass so we can compare values against
117 unscheduled code.
118
119 Values of these arrays are copied at the end of this pass into the
120 arrays set up by flow analysis. */
121 static short *sched_reg_n_deaths;
122 static int *sched_reg_n_calls_crossed;
123 static int *sched_reg_live_length;
124
125 /* Element N is the next insn that sets (hard or pseudo) register
126 N within the current basic block; or zero, if there is no
127 such insn. Needed for new registers which may be introduced
128 by splitting insns. */
129 static rtx *reg_last_uses;
130 static rtx *reg_last_sets;
131
132 /* Vector indexed by INSN_UID giving the original ordering of the insns. */
133 static int *insn_luid;
134 #define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
135
136 /* Vector indexed by INSN_UID giving each instruction a priority. */
137 static int *insn_priority;
138 #define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
139
140 #define DONE_PRIORITY -1
141 #define MAX_PRIORITY 0x7fffffff
142 #define TAIL_PRIORITY 0x7ffffffe
143 #define LAUNCH_PRIORITY 0x7f000001
144 #define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
145 #define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
146
147 /* Vector indexed by INSN_UID giving number of insns refering to this insn. */
148 static int *insn_ref_count;
149 #define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
150
151 /* Vector indexed by INSN_UID giving line-number note in effect for each
152 insn. For line-number notes, this indicates whether the note may be
153 reused. */
154 static rtx *line_note;
155 #define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
156
157 /* Vector indexed by basic block number giving the starting line-number
158 for each basic block. */
159 static rtx *line_note_head;
160
161 /* List of important notes we must keep around. This is a pointer to the
162 last element in the list. */
163 static rtx note_list;
164
165 /* Regsets telling whether a given register is live or dead before the last
166 scheduled insn. Must scan the instructions once before scheduling to
167 determine what registers are live or dead at the end of the block. */
168 static regset bb_dead_regs;
169 static regset bb_live_regs;
170
171 /* Regset telling whether a given register is live after the insn currently
172 being scheduled. Before processing an insn, this is equal to bb_live_regs
173 above. This is used so that we can find regsiters that are newly born/dead
174 after processing an insn. */
175 static regset old_live_regs;
176
177 /* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
178 during the initial scan and reused later. If there are not exactly as
179 many REG_DEAD notes in the post scheduled code as there were in the
180 prescheduled code then we trigger an abort because this indicates a bug. */
181 static rtx dead_notes;
182
183 /* Queues, etc. */
184
185 /* An instruction is ready to be scheduled when all insns following it
186 have already been scheduled. It is important to ensure that all
187 insns which use its result will not be executed until its result
188 has been computed. We maintain three lists (conceptually):
189
190 (1) a "Ready" list of unscheduled, uncommitted insns
191 (2) a "Scheduled" list of scheduled insns
192 (3) a "Pending" list of insns which can be scheduled, but
193 for stalls.
194
195 Insns move from the "Ready" list to the "Pending" list when
196 all insns following them have been scheduled.
197
198 Insns move from the "Pending" list to the "Scheduled" list
199 when there is sufficient space in the pipeline to prevent
200 stalls between the insn and scheduled insns which use it.
201
202 The "Pending" list acts as a buffer to prevent insns
203 from avalanching.
204
205 The "Ready" list is implemented by the variable `ready'.
206 The "Pending" list are the insns in the LOG_LINKS of ready insns.
207 The "Scheduled" list is the new insn chain built by this pass. */
208
209 /* Implement a circular buffer from which instructions are issued. */
210 #define Q_SIZE 128
211 static rtx insn_queue[Q_SIZE];
212 static int q_ptr = 0;
213 static int q_size = 0;
214 #define NEXT_Q(X) (((X)+1) & (Q_SIZE-1))
215 #define NEXT_Q_AFTER(X,C) (((X)+C) & (Q_SIZE-1))
216 \f
217 /* Forward declarations. */
218 static void sched_analyze_2 ();
219 static void schedule_block ();
220
221 /* Main entry point of this file. */
222 void schedule_insns ();
223 \f
224 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
225
226 /* Vector indexed by N giving the initial (unchanging) value known
227 for pseudo-register N. */
228 static rtx *reg_known_value;
229
230 /* Indicates number of valid entries in reg_known_value. */
231 static int reg_known_value_size;
232
233 static rtx
234 canon_rtx (x)
235 rtx x;
236 {
237 if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
238 && REGNO (x) <= reg_known_value_size)
239 return reg_known_value[REGNO (x)];
240 else if (GET_CODE (x) == PLUS)
241 {
242 rtx x0 = canon_rtx (XEXP (x, 0));
243 rtx x1 = canon_rtx (XEXP (x, 1));
244
245 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
246 {
247 /* We can tolerate LO_SUMs being offset here; these
248 rtl are used for nothing other than comparisons. */
249 if (GET_CODE (x0) == CONST_INT)
250 return plus_constant_for_output (x1, INTVAL (x0));
251 else if (GET_CODE (x1) == CONST_INT)
252 return plus_constant_for_output (x0, INTVAL (x1));
253 return gen_rtx (PLUS, GET_MODE (x), x0, x1);
254 }
255 }
256 return x;
257 }
258
259 /* Set up all info needed to perform alias analysis on memory references. */
260
261 void
262 init_alias_analysis ()
263 {
264 int maxreg = max_reg_num ();
265 rtx insn;
266 rtx note;
267 rtx set;
268
269 reg_known_value_size = maxreg;
270
271 reg_known_value
272 = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx))
273 - FIRST_PSEUDO_REGISTER;
274 bzero (reg_known_value+FIRST_PSEUDO_REGISTER,
275 (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
276
277 /* Fill in the entries with known constant values. */
278 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
279 if ((set = single_set (insn)) != 0
280 && GET_CODE (SET_DEST (set)) == REG
281 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
282 && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
283 && reg_n_sets[REGNO (SET_DEST (set))] == 1)
284 || (note = find_reg_note (insn, REG_EQUIV, 0)) != 0)
285 && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
286 reg_known_value[REGNO (SET_DEST (set))] = XEXP (note, 0);
287
288 /* Fill in the remaining entries. */
289 while (--maxreg >= FIRST_PSEUDO_REGISTER)
290 if (reg_known_value[maxreg] == 0)
291 reg_known_value[maxreg] = regno_reg_rtx[maxreg];
292 }
293
294 /* Return 1 if X and Y are identical-looking rtx's.
295
296 We use the data in reg_known_value above to see if two registers with
297 different numbers are, in fact, equivalent. */
298
299 static int
300 rtx_equal_for_memref_p (x, y)
301 rtx x, y;
302 {
303 register int i;
304 register int j;
305 register enum rtx_code code;
306 register char *fmt;
307
308 if (x == 0 && y == 0)
309 return 1;
310 if (x == 0 || y == 0)
311 return 0;
312 x = canon_rtx (x);
313 y = canon_rtx (y);
314
315 if (x == y)
316 return 1;
317
318 code = GET_CODE (x);
319 /* Rtx's of different codes cannot be equal. */
320 if (code != GET_CODE (y))
321 return 0;
322
323 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
324 (REG:SI x) and (REG:HI x) are NOT equivalent. */
325
326 if (GET_MODE (x) != GET_MODE (y))
327 return 0;
328
329 /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
330
331 if (code == REG)
332 return REGNO (x) == REGNO (y);
333 if (code == LABEL_REF)
334 return XEXP (x, 0) == XEXP (y, 0);
335 if (code == SYMBOL_REF)
336 return XSTR (x, 0) == XSTR (y, 0);
337
338 /* Compare the elements. If any pair of corresponding elements
339 fail to match, return 0 for the whole things. */
340
341 fmt = GET_RTX_FORMAT (code);
342 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
343 {
344 switch (fmt[i])
345 {
346 case 'n':
347 case 'i':
348 if (XINT (x, i) != XINT (y, i))
349 return 0;
350 break;
351
352 case 'V':
353 case 'E':
354 /* Two vectors must have the same length. */
355 if (XVECLEN (x, i) != XVECLEN (y, i))
356 return 0;
357
358 /* And the corresponding elements must match. */
359 for (j = 0; j < XVECLEN (x, i); j++)
360 if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
361 return 0;
362 break;
363
364 case 'e':
365 if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
366 return 0;
367 break;
368
369 case 'S':
370 case 's':
371 if (strcmp (XSTR (x, i), XSTR (y, i)))
372 return 0;
373 break;
374
375 case 'u':
376 /* These are just backpointers, so they don't matter. */
377 break;
378
379 case '0':
380 break;
381
382 /* It is believed that rtx's at this level will never
383 contain anything but integers and other rtx's,
384 except for within LABEL_REFs and SYMBOL_REFs. */
385 default:
386 abort ();
387 }
388 }
389 return 1;
390 }
391
392 /* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
393 X and return it, or return 0 if none found. */
394
395 static rtx
396 find_symbolic_term (x)
397 rtx x;
398 {
399 register int i;
400 register enum rtx_code code;
401 register char *fmt;
402
403 code = GET_CODE (x);
404 if (code == SYMBOL_REF || code == LABEL_REF)
405 return x;
406 if (GET_RTX_CLASS (code) == 'o')
407 return 0;
408
409 fmt = GET_RTX_FORMAT (code);
410 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
411 {
412 rtx t;
413
414 if (fmt[i] == 'e')
415 {
416 t = find_symbolic_term (XEXP (x, i));
417 if (t != 0)
418 return t;
419 }
420 else if (fmt[i] == 'E')
421 break;
422 }
423 return 0;
424 }
425
426 /* Return nonzero if X and Y (memory addresses) could reference the
427 same location in memory. C is an offset accumulator. When
428 C is nonzero, we are testing aliases between X and Y + C.
429 XSIZE is the size in bytes of the X reference,
430 similarly YSIZE is the size in bytes for Y.
431
432 If XSIZE or YSIZE is zero, we do not know the amount of memory being
433 referenced (the reference was BLKmode), so make the most pessimistic
434 assumptions.
435
436 We recognize the following cases of non-conflicting memory:
437
438 (1) addresses involving the frame pointer cannot conflict
439 with addresses involving static variables.
440 (2) static variables with different addresses cannot conflict.
441
442 Nice to notice that varying addresses cannot confict with fp if no
443 local variables had their addresses taken, but that's too hard now. */
444
445 static int
446 memrefs_conflict_p (xsize, x, ysize, y, c)
447 rtx x, y;
448 int xsize, ysize;
449 int c;
450 {
451 if (GET_CODE (x) == HIGH)
452 x = XEXP (x, 0);
453 else if (GET_CODE (x) == LO_SUM)
454 x = XEXP (x, 1);
455 else
456 x = canon_rtx (x);
457 if (GET_CODE (y) == HIGH)
458 y = XEXP (y, 0);
459 else if (GET_CODE (y) == LO_SUM)
460 y = XEXP (y, 1);
461 else
462 y = canon_rtx (y);
463
464 if (rtx_equal_for_memref_p (x, y))
465 return (xsize == 0 || ysize == 0 ||
466 (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
467
468 if (y == frame_pointer_rtx || y == stack_pointer_rtx)
469 {
470 rtx t = y;
471 int tsize = ysize;
472 y = x; ysize = xsize;
473 x = t; xsize = tsize;
474 }
475
476 if (x == frame_pointer_rtx || x == stack_pointer_rtx)
477 {
478 rtx y1;
479
480 if (CONSTANT_P (y))
481 return 0;
482
483 if (GET_CODE (y) == PLUS
484 && canon_rtx (XEXP (y, 0)) == x
485 && (y1 = canon_rtx (XEXP (y, 1)))
486 && GET_CODE (y1) == CONST_INT)
487 {
488 c += INTVAL (y1);
489 return (xsize == 0 || ysize == 0
490 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
491 }
492
493 if (GET_CODE (y) == PLUS
494 && (y1 = canon_rtx (XEXP (y, 0)))
495 && CONSTANT_P (y1))
496 return 0;
497
498 return 1;
499 }
500
501 if (GET_CODE (x) == PLUS)
502 {
503 /* The fact that X is canonnicallized means that this
504 PLUS rtx is canonnicallized. */
505 rtx x0 = XEXP (x, 0);
506 rtx x1 = XEXP (x, 1);
507
508 if (GET_CODE (y) == PLUS)
509 {
510 /* The fact that Y is canonnicallized means that this
511 PLUS rtx is canonnicallized. */
512 rtx y0 = XEXP (y, 0);
513 rtx y1 = XEXP (y, 1);
514
515 if (rtx_equal_for_memref_p (x1, y1))
516 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
517 if (rtx_equal_for_memref_p (x0, y0))
518 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
519 if (GET_CODE (x1) == CONST_INT)
520 if (GET_CODE (y1) == CONST_INT)
521 return memrefs_conflict_p (xsize, x0, ysize, y0,
522 c - INTVAL (x1) + INTVAL (y1));
523 else
524 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
525 else if (GET_CODE (y1) == CONST_INT)
526 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
527
528 /* Handle case where we cannot understand iteration operators,
529 but we notice that the base addresses are distinct objects. */
530 x = find_symbolic_term (x);
531 if (x == 0)
532 return 1;
533 y = find_symbolic_term (y);
534 if (y == 0)
535 return 1;
536 return rtx_equal_for_memref_p (x, y);
537 }
538 else if (GET_CODE (x1) == CONST_INT)
539 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
540 }
541 else if (GET_CODE (y) == PLUS)
542 {
543 /* The fact that Y is canonnicallized means that this
544 PLUS rtx is canonnicallized. */
545 rtx y0 = XEXP (y, 0);
546 rtx y1 = XEXP (y, 1);
547
548 if (GET_CODE (y1) == CONST_INT)
549 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
550 else
551 return 1;
552 }
553
554 if (GET_CODE (x) == GET_CODE (y))
555 switch (GET_CODE (x))
556 {
557 case MULT:
558 {
559 /* Handle cases where we expect the second operands to be the
560 same, and check only whether the first operand would conflict
561 or not. */
562 rtx x0, y0;
563 rtx x1 = canon_rtx (XEXP (x, 1));
564 rtx y1 = canon_rtx (XEXP (y, 1));
565 if (! rtx_equal_for_memref_p (x1, y1))
566 return 1;
567 x0 = canon_rtx (XEXP (x, 0));
568 y0 = canon_rtx (XEXP (y, 0));
569 if (rtx_equal_for_memref_p (x0, y0))
570 return (xsize == 0 || ysize == 0
571 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
572
573 /* Can't properly adjust our sizes. */
574 if (GET_CODE (x1) != CONST_INT)
575 return 1;
576 xsize /= INTVAL (x1);
577 ysize /= INTVAL (x1);
578 c /= INTVAL (x1);
579 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
580 }
581 }
582
583 if (CONSTANT_P (x))
584 {
585 if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
586 {
587 c += (INTVAL (y) - INTVAL (x));
588 return (xsize == 0 || ysize == 0
589 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
590 }
591
592 if (GET_CODE (x) == CONST)
593 {
594 if (GET_CODE (y) == CONST)
595 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
596 ysize, canon_rtx (XEXP (y, 0)), c);
597 else
598 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
599 ysize, y, c);
600 }
601 if (GET_CODE (y) == CONST)
602 return memrefs_conflict_p (xsize, x, ysize,
603 canon_rtx (XEXP (y, 0)), c);
604
605 if (CONSTANT_P (y))
606 return (rtx_equal_for_memref_p (x, y)
607 && (xsize == 0 || ysize == 0
608 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
609
610 return 1;
611 }
612 return 1;
613 }
614
615 /* Functions to compute memory dependencies.
616
617 Since we process the insns in execution order, we can build tables
618 to keep track of what registers are fixed (and not aliased), what registers
619 are varying in known ways, and what registers are varying in unknown
620 ways.
621
622 If both memory references are volatile, then there must always be a
623 dependence between the two references, since their order can not be
624 changed. A volatile and non-volatile reference can be interchanged
625 though.
626
627 A MEM_IN_STRUCT reference at a varying address can never conflict with a
628 non-MEM_IN_STRUCT reference at a fixed address. */
629
630 /* Read dependence: X is read after read in MEM takes place. There can
631 only be a dependence here if both reads are volatile. */
632
633 int
634 read_dependence (mem, x)
635 rtx mem;
636 rtx x;
637 {
638 return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
639 }
640
641 /* True dependence: X is read after store in MEM takes place. */
642
643 int
644 true_dependence (mem, x)
645 rtx mem;
646 rtx x;
647 {
648 if (RTX_UNCHANGING_P (x))
649 return 0;
650
651 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
652 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
653 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
654 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
655 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
656 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
657 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
658 }
659
660 /* Anti dependence: X is written after read in MEM takes place. */
661
662 int
663 anti_dependence (mem, x)
664 rtx mem;
665 rtx x;
666 {
667 if (RTX_UNCHANGING_P (mem))
668 return 0;
669
670 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
671 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
672 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
673 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
674 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
675 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
676 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
677 }
678
679 /* Output dependence: X is written after store in MEM takes place. */
680
681 int
682 output_dependence (mem, x)
683 rtx mem;
684 rtx x;
685 {
686 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
687 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
688 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
689 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
690 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
691 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
692 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
693 }
694 \f
695 #ifndef INSN_SCHEDULING
696 void schedule_insns () {}
697 #else
698 #ifndef __GNUC__
699 #define __inline
700 #endif
701
702 /* Computation of memory dependencies. */
703
704 /* The *_insns and *_mems are paired lists. Each pending memory operation
705 will have a pointer to the MEM rtx on one list and a pointer to the
706 containing insn on the other list in the same place in the list. */
707
708 /* We can't use add_dependence like the old code did, because a single insn
709 may have multiple memory accesses, and hence needs to be on the list
710 once for each memory access. Add_dependence won't let you add an insn
711 to a list more than once. */
712
713 /* An INSN_LIST containing all insns with pending read operations. */
714 static rtx pending_read_insns;
715
716 /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
717 static rtx pending_read_mems;
718
719 /* An INSN_LIST containing all insns with pending write operations. */
720 static rtx pending_write_insns;
721
722 /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
723 static rtx pending_write_mems;
724
725 /* Indicates the combined length of the two pending lists. We must prevent
726 these lists from ever growing too large since the number of dependencies
727 produced is at least O(N*N), and execution time is at least O(4*N*N), as
728 a function of the length of these pending lists. */
729
730 static int pending_lists_length;
731
732 /* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
733
734 static rtx unused_insn_list;
735
736 /* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
737
738 static rtx unused_expr_list;
739
740 /* The last insn upon which all memory references must depend.
741 This is an insn which flushed the pending lists, creating a dependency
742 between it and all previously pending memory references. This creates
743 a barrier (or a checkpoint) which no memory reference is allowed to cross.
744
745 This includes all non constant CALL_INSNs. When we do interprocedural
746 alias analysis, this restriction can be relaxed.
747 This may also be an INSN that writes memory if the pending lists grow
748 too large. */
749
750 static rtx last_pending_memory_flush;
751
752 /* The last function call we have seen. All hard regs, and, of course,
753 the last function call, must depend on this. */
754
755 static rtx last_function_call;
756
757 /* The LOG_LINKS field of this is a list of insns which use a pseudo register
758 that does not already cross a call. We create dependencies between each
759 of those insn and the next call insn, to ensure that they won't cross a call
760 after scheduling is done. */
761
762 static rtx sched_before_next_call;
763
764 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
765 so that insns independent of the last scheduled insn will be preferred
766 over dependent instructions. */
767
768 static rtx last_scheduled_insn;
769
770 /* Process an insn's memory dependencies. There are four kinds of
771 dependencies:
772
773 (0) read dependence: read follows read
774 (1) true dependence: read follows write
775 (2) anti dependence: write follows read
776 (3) output dependence: write follows write
777
778 We are careful to build only dependencies which actually exist, and
779 use transitivity to avoid building too many links. */
780 \f
781 /* Return the INSN_LIST containing INSN in LIST, or NULL
782 if LIST does not contain INSN. */
783
784 __inline static rtx
785 find_insn_list (insn, list)
786 rtx insn;
787 rtx list;
788 {
789 while (list)
790 {
791 if (XEXP (list, 0) == insn)
792 return list;
793 list = XEXP (list, 1);
794 }
795 return 0;
796 }
797
798 /* Compute cost of executing INSN. This is the number of virtual
799 cycles taken between instruction issue and instruction results. */
800
801 __inline static int
802 insn_cost (insn)
803 rtx insn;
804 {
805 register int cost;
806
807 recog_memoized (insn);
808
809 /* A USE insn, or something else we don't need to understand.
810 We can't pass these directly to result_ready_cost because it will trigger
811 a fatal error for unrecognizable insns. */
812 if (INSN_CODE (insn) < 0)
813 return 1;
814 else
815 {
816 cost = result_ready_cost (insn);
817
818 if (cost < 1)
819 cost = 1;
820
821 return cost;
822 }
823 }
824
825 /* Compute the priority number for INSN. */
826
827 static int
828 priority (insn)
829 rtx insn;
830 {
831 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
832 {
833 int prev_priority;
834 int max_priority;
835 int this_priority = INSN_PRIORITY (insn);
836 rtx prev;
837
838 if (this_priority > 0)
839 return this_priority;
840
841 max_priority = 1;
842
843 /* Nonzero if these insns must be scheduled together. */
844 if (SCHED_GROUP_P (insn))
845 {
846 prev = insn;
847 while (SCHED_GROUP_P (prev))
848 {
849 prev = PREV_INSN (prev);
850 INSN_REF_COUNT (prev) += 1;
851 }
852 }
853
854 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
855 {
856 rtx x = XEXP (prev, 0);
857
858 /* A dependence pointing to a note is always obsolete, because
859 sched_analyze_insn will have created any necessary new dependences
860 which replace it. Notes can be created when instructions are
861 deleted by insn splitting, or by register allocation. */
862 if (GET_CODE (x) == NOTE)
863 {
864 remove_dependence (insn, x);
865 continue;
866 }
867
868 /* This priority calculation was chosen because it results in the
869 least instruction movement, and does not hurt the performance
870 of the resulting code compared to the old algorithm.
871 This makes the sched algorithm more stable, which results
872 in better code, because there is less register pressure,
873 cross jumping is more likely to work, and debugging is easier.
874
875 When all instructions have a latency of 1, there is no need to
876 move any instructions. Subtracting one here ensures that in such
877 cases all instructions will end up with a priority of one, and
878 hence no scheduling will be done.
879
880 The original code did not subtract the one, and added the
881 insn_cost of the current instruction to its priority (e.g.
882 move the insn_cost call down to the end). */
883
884 if (REG_NOTE_KIND (prev) == 0)
885 /* Data dependence. */
886 prev_priority = priority (x) + insn_cost (x) - 1;
887 else
888 /* Anti or output dependence. Don't add the latency of this
889 insn's result, because it isn't being used. */
890 prev_priority = priority (x);
891
892 if (prev_priority > max_priority)
893 max_priority = prev_priority;
894 INSN_REF_COUNT (x) += 1;
895 }
896
897 INSN_PRIORITY (insn) = max_priority;
898 return INSN_PRIORITY (insn);
899 }
900 return 0;
901 }
902 \f
903 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
904 them to the unused_*_list variables, so that they can be reused. */
905
906 static void
907 free_pending_lists ()
908 {
909 register rtx link, prev_link;
910
911 if (pending_read_insns)
912 {
913 prev_link = pending_read_insns;
914 link = XEXP (prev_link, 1);
915
916 while (link)
917 {
918 prev_link = link;
919 link = XEXP (link, 1);
920 }
921
922 XEXP (prev_link, 1) = unused_insn_list;
923 unused_insn_list = pending_read_insns;
924 pending_read_insns = 0;
925 }
926
927 if (pending_write_insns)
928 {
929 prev_link = pending_write_insns;
930 link = XEXP (prev_link, 1);
931
932 while (link)
933 {
934 prev_link = link;
935 link = XEXP (link, 1);
936 }
937
938 XEXP (prev_link, 1) = unused_insn_list;
939 unused_insn_list = pending_write_insns;
940 pending_write_insns = 0;
941 }
942
943 if (pending_read_mems)
944 {
945 prev_link = pending_read_mems;
946 link = XEXP (prev_link, 1);
947
948 while (link)
949 {
950 prev_link = link;
951 link = XEXP (link, 1);
952 }
953
954 XEXP (prev_link, 1) = unused_expr_list;
955 unused_expr_list = pending_read_mems;
956 pending_read_mems = 0;
957 }
958
959 if (pending_write_mems)
960 {
961 prev_link = pending_write_mems;
962 link = XEXP (prev_link, 1);
963
964 while (link)
965 {
966 prev_link = link;
967 link = XEXP (link, 1);
968 }
969
970 XEXP (prev_link, 1) = unused_expr_list;
971 unused_expr_list = pending_write_mems;
972 pending_write_mems = 0;
973 }
974 }
975
976 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
977 The MEM is a memory reference contained within INSN, which we are saving
978 so that we can do memory aliasing on it. */
979
980 static void
981 add_insn_mem_dependence (insn_list, mem_list, insn, mem)
982 rtx *insn_list, *mem_list, insn, mem;
983 {
984 register rtx link;
985
986 if (unused_insn_list)
987 {
988 link = unused_insn_list;
989 unused_insn_list = XEXP (link, 1);
990 }
991 else
992 link = rtx_alloc (INSN_LIST);
993 XEXP (link, 0) = insn;
994 XEXP (link, 1) = *insn_list;
995 *insn_list = link;
996
997 if (unused_expr_list)
998 {
999 link = unused_expr_list;
1000 unused_expr_list = XEXP (link, 1);
1001 }
1002 else
1003 link = rtx_alloc (EXPR_LIST);
1004 XEXP (link, 0) = mem;
1005 XEXP (link, 1) = *mem_list;
1006 *mem_list = link;
1007
1008 pending_lists_length++;
1009 }
1010 \f
1011 /* Make a dependency between every memory reference on the pending lists
1012 and INSN, thus flushing the pending lists. */
1013
1014 static void
1015 flush_pending_lists (insn)
1016 rtx insn;
1017 {
1018 rtx link;
1019
1020 while (pending_read_insns)
1021 {
1022 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1023
1024 link = pending_read_insns;
1025 pending_read_insns = XEXP (pending_read_insns, 1);
1026 XEXP (link, 1) = unused_insn_list;
1027 unused_insn_list = link;
1028
1029 link = pending_read_mems;
1030 pending_read_mems = XEXP (pending_read_mems, 1);
1031 XEXP (link, 1) = unused_expr_list;
1032 unused_expr_list = link;
1033 }
1034 while (pending_write_insns)
1035 {
1036 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1037
1038 link = pending_write_insns;
1039 pending_write_insns = XEXP (pending_write_insns, 1);
1040 XEXP (link, 1) = unused_insn_list;
1041 unused_insn_list = link;
1042
1043 link = pending_write_mems;
1044 pending_write_mems = XEXP (pending_write_mems, 1);
1045 XEXP (link, 1) = unused_expr_list;
1046 unused_expr_list = link;
1047 }
1048 pending_lists_length = 0;
1049
1050 if (last_pending_memory_flush)
1051 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1052
1053 last_pending_memory_flush = insn;
1054 }
1055
1056 /* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1057 by the write to the destination of X, and reads of everything mentioned. */
1058
1059 static void
1060 sched_analyze_1 (x, insn)
1061 rtx x;
1062 rtx insn;
1063 {
1064 register int regno;
1065 register rtx dest = SET_DEST (x);
1066
1067 if (dest == 0)
1068 return;
1069
1070 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1071 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1072 {
1073 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1074 {
1075 /* The second and third arguments are values read by this insn. */
1076 sched_analyze_2 (XEXP (dest, 1), insn);
1077 sched_analyze_2 (XEXP (dest, 2), insn);
1078 }
1079 dest = SUBREG_REG (dest);
1080 }
1081
1082 if (GET_CODE (dest) == REG)
1083 {
1084 register int offset, bit, i;
1085
1086 regno = REGNO (dest);
1087
1088 /* A hard reg in a wide mode may really be multiple registers.
1089 If so, mark all of them just like the first. */
1090 if (regno < FIRST_PSEUDO_REGISTER)
1091 {
1092 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1093 while (--i >= 0)
1094 {
1095 rtx u;
1096
1097 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1098 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1099 reg_last_uses[regno + i] = 0;
1100 if (reg_last_sets[regno + i])
1101 add_dependence (insn, reg_last_sets[regno + i],
1102 REG_DEP_OUTPUT);
1103 reg_last_sets[regno + i] = insn;
1104 if ((call_used_regs[i] || global_regs[i])
1105 && last_function_call)
1106 /* Function calls clobber all call_used regs. */
1107 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1108 }
1109 }
1110 else
1111 {
1112 rtx u;
1113
1114 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1115 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1116 reg_last_uses[regno] = 0;
1117 if (reg_last_sets[regno])
1118 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1119 reg_last_sets[regno] = insn;
1120
1121 /* Don't let it cross a call after scheduling if it doesn't
1122 already cross one. */
1123 if (reg_n_calls_crossed[regno] == 0 && last_function_call)
1124 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1125 }
1126 }
1127 else if (GET_CODE (dest) == MEM)
1128 {
1129 /* Writing memory. */
1130
1131 if (pending_lists_length > 32)
1132 {
1133 /* Flush all pending reads and writes to prevent the pending lists
1134 from getting any larger. Insn scheduling runs too slowly when
1135 these lists get long. The number 32 was chosen because it
1136 seems like a resonable number. When compiling GCC with itself,
1137 this flush occurs 8 times for sparc, and 10 times for m88k using
1138 the number 32. */
1139 flush_pending_lists (insn);
1140 }
1141 else
1142 {
1143 rtx pending, pending_mem;
1144
1145 pending = pending_read_insns;
1146 pending_mem = pending_read_mems;
1147 while (pending)
1148 {
1149 /* If a dependency already exists, don't create a new one. */
1150 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1151 if (anti_dependence (XEXP (pending_mem, 0), dest, insn))
1152 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1153
1154 pending = XEXP (pending, 1);
1155 pending_mem = XEXP (pending_mem, 1);
1156 }
1157
1158 pending = pending_write_insns;
1159 pending_mem = pending_write_mems;
1160 while (pending)
1161 {
1162 /* If a dependency already exists, don't create a new one. */
1163 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1164 if (output_dependence (XEXP (pending_mem, 0), dest))
1165 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1166
1167 pending = XEXP (pending, 1);
1168 pending_mem = XEXP (pending_mem, 1);
1169 }
1170
1171 if (last_pending_memory_flush)
1172 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1173
1174 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1175 insn, dest);
1176 }
1177 sched_analyze_2 (XEXP (dest, 0), insn);
1178 }
1179
1180 /* Analyze reads. */
1181 if (GET_CODE (x) == SET)
1182 sched_analyze_2 (SET_SRC (x), insn);
1183 else if (GET_CODE (x) != CLOBBER)
1184 sched_analyze_2 (dest, insn);
1185 }
1186
1187 /* Analyze the uses of memory and registers in rtx X in INSN. */
1188
1189 static void
1190 sched_analyze_2 (x, insn)
1191 rtx x;
1192 rtx insn;
1193 {
1194 register int i;
1195 register int j;
1196 register enum rtx_code code;
1197 register char *fmt;
1198
1199 if (x == 0)
1200 return;
1201
1202 code = GET_CODE (x);
1203
1204 /* Get rid of the easy cases first. */
1205
1206 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1207 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1208 this does not mean that this insn is using cc0. */
1209 if (code == CONST_INT || code == CONST_DOUBLE || code == SYMBOL_REF
1210 || code == CONST || code == LABEL_REF)
1211 return;
1212
1213 #ifdef HAVE_cc0
1214 else if (code == CC0)
1215 {
1216 rtx link;
1217
1218 /* User of CC0 depends on immediately preceding insn.
1219 All notes are removed from the list of insns to schedule before we
1220 reach here, so the previous insn must be the setter of cc0. */
1221 if (GET_CODE (PREV_INSN (insn)) != INSN)
1222 abort ();
1223 SCHED_GROUP_P (insn) = 1;
1224
1225 /* Make a copy of all dependencies on PREV_INSN, and add to this insn.
1226 This is so that all the dependencies will apply to the group. */
1227
1228 for (link = LOG_LINKS (PREV_INSN (insn)); link; link = XEXP (link, 1))
1229 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
1230
1231 return;
1232 }
1233 #endif
1234
1235 else if (code == REG)
1236 {
1237 int regno = REGNO (x);
1238 if (regno < FIRST_PSEUDO_REGISTER)
1239 {
1240 int i;
1241
1242 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1243 while (--i >= 0)
1244 {
1245 reg_last_uses[regno + i]
1246 = gen_rtx (INSN_LIST, VOIDmode,
1247 insn, reg_last_uses[regno + i]);
1248 if (reg_last_sets[regno + i])
1249 add_dependence (insn, reg_last_sets[regno + i], 0);
1250 if ((call_used_regs[regno + i] || global_regs[regno + i])
1251 && last_function_call)
1252 /* Function calls clobber all call_used regs. */
1253 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1254 }
1255 }
1256 else
1257 {
1258 reg_last_uses[regno]
1259 = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
1260 if (reg_last_sets[regno])
1261 add_dependence (insn, reg_last_sets[regno], 0);
1262
1263 /* If the register does not already cross any calls, then add this
1264 insn to the sched_before_next_call list so that it will still
1265 not cross calls after scheduling. */
1266 if (reg_n_calls_crossed[regno] == 0)
1267 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1268 }
1269 return;
1270 }
1271
1272 /* The interesting case. */
1273 else if (code == MEM)
1274 {
1275 /* Reading memory. */
1276
1277 /* Don't create a dependence for memory references which are known to
1278 be unchanging, such as constant pool accesses. These will never
1279 conflict with any other memory access. */
1280 if (RTX_UNCHANGING_P (x) == 0)
1281 {
1282 rtx pending, pending_mem;
1283
1284 pending = pending_read_insns;
1285 pending_mem = pending_read_mems;
1286 while (pending)
1287 {
1288 /* If a dependency already exists, don't create a new one. */
1289 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1290 if (read_dependence (XEXP (pending_mem, 0), x))
1291 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1292
1293 pending = XEXP (pending, 1);
1294 pending_mem = XEXP (pending_mem, 1);
1295 }
1296
1297 pending = pending_write_insns;
1298 pending_mem = pending_write_mems;
1299 while (pending)
1300 {
1301 /* If a dependency already exists, don't create a new one. */
1302 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1303 if (true_dependence (XEXP (pending_mem, 0), x))
1304 add_dependence (insn, XEXP (pending, 0), 0);
1305
1306 pending = XEXP (pending, 1);
1307 pending_mem = XEXP (pending_mem, 1);
1308 }
1309 if (last_pending_memory_flush)
1310 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1311
1312 /* Always add these dependencies to pending_reads, since
1313 this insn may be followed by a write. */
1314 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1315 insn, x);
1316 }
1317 /* Take advantage of tail recursion here. */
1318 sched_analyze_2 (XEXP (x, 0), insn);
1319 return;
1320 }
1321
1322 else if (code == ASM_OPERANDS || code == ASM_INPUT
1323 || code == UNSPEC_VOLATILE)
1324 {
1325 rtx u;
1326
1327 /* Traditional and volatile asm instructions must be considered to use
1328 and clobber all hard registers and all of memory. So must
1329 UNSPEC_VOLATILE operations. */
1330 if ((code == ASM_OPERANDS && MEM_VOLATILE_P (x)) || code == ASM_INPUT
1331 || code == UNSPEC_VOLATILE)
1332 {
1333 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1334 {
1335 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1336 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1337 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1338 reg_last_uses[i] = 0;
1339 if (reg_last_sets[i]
1340 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1341 add_dependence (insn, reg_last_sets[i], 0);
1342 reg_last_sets[i] = insn;
1343 }
1344
1345 flush_pending_lists (insn);
1346 }
1347
1348 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1349 We can not just fall through here since then we would be confused
1350 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1351 traditional asms unlike their normal usage. */
1352
1353 if (code == ASM_OPERANDS)
1354 {
1355 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1356 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1357 return;
1358 }
1359 }
1360
1361 /* Other cases: walk the insn. */
1362 fmt = GET_RTX_FORMAT (code);
1363 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1364 {
1365 if (fmt[i] == 'e')
1366 sched_analyze_2 (XEXP (x, i), insn);
1367 else if (fmt[i] == 'E')
1368 for (j = 0; j < XVECLEN (x, i); j++)
1369 sched_analyze_2 (XVECEXP (x, i, j), insn);
1370 }
1371 }
1372
1373 /* Analyze an INSN with pattern X to find all dependencies. */
1374
1375 static void
1376 sched_analyze_insn (x, insn)
1377 rtx x, insn;
1378 {
1379 register RTX_CODE code = GET_CODE (x);
1380 rtx link;
1381
1382 if (code == SET || code == CLOBBER)
1383 sched_analyze_1 (x, insn);
1384 else if (code == PARALLEL)
1385 {
1386 register int i;
1387 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1388 {
1389 code = GET_CODE (XVECEXP (x, 0, i));
1390 if (code == SET || code == CLOBBER)
1391 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1392 else
1393 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1394 }
1395 }
1396 else
1397 sched_analyze_2 (x, insn);
1398
1399 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1400 {
1401 /* Any REG_INC note is a SET of the register indicated. */
1402 if (REG_NOTE_KIND (link) == REG_INC)
1403 {
1404 rtx dest = XEXP (link, 0);
1405 int regno = REGNO (dest);
1406 int i;
1407
1408 /* A hard reg in a wide mode may really be multiple registers.
1409 If so, mark all of them just like the first. */
1410 if (regno < FIRST_PSEUDO_REGISTER)
1411 {
1412 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1413 while (--i >= 0)
1414 {
1415 rtx u;
1416
1417 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1418 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1419 reg_last_uses[regno + i] = 0;
1420 if (reg_last_sets[regno + i])
1421 add_dependence (insn, reg_last_sets[regno + i],
1422 REG_DEP_OUTPUT);
1423 reg_last_sets[regno + i] = insn;
1424 if ((call_used_regs[i] || global_regs[i])
1425 && last_function_call)
1426 /* Function calls clobber all call_used regs. */
1427 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1428 }
1429 }
1430 else
1431 {
1432 rtx u;
1433
1434 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1435 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1436 reg_last_uses[regno] = 0;
1437 if (reg_last_sets[regno])
1438 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1439 reg_last_sets[regno] = insn;
1440
1441 /* Don't let it cross a call after scheduling if it doesn't
1442 already cross one. */
1443 if (reg_n_calls_crossed[regno] == 0 && last_function_call)
1444 add_dependence (insn, last_function_call, 0);
1445 }
1446 }
1447 }
1448
1449 /* Handle function calls. */
1450 if (GET_CODE (insn) == CALL_INSN)
1451 {
1452 rtx dep_insn;
1453 rtx prev_dep_insn;
1454
1455 /* When scheduling instructions, we make sure calls don't lose their
1456 accompanying USE insns by depending them one on another in order. */
1457
1458 prev_dep_insn = insn;
1459 dep_insn = PREV_INSN (insn);
1460 while (GET_CODE (dep_insn) == INSN
1461 && GET_CODE (PATTERN (dep_insn)) == USE)
1462 {
1463 SCHED_GROUP_P (prev_dep_insn) = 1;
1464
1465 /* Make a copy of all dependencies on dep_insn, and add to insn.
1466 This is so that all of the dependencies will apply to the
1467 group. */
1468
1469 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1470 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
1471
1472 prev_dep_insn = dep_insn;
1473 dep_insn = PREV_INSN (dep_insn);
1474 }
1475 }
1476 }
1477
1478 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1479 for every dependency. */
1480
1481 static int
1482 sched_analyze (head, tail)
1483 rtx head, tail;
1484 {
1485 register rtx insn;
1486 register int n_insns = 0;
1487 register rtx u;
1488 register int luid = 0;
1489
1490 for (insn = head; ; insn = NEXT_INSN (insn))
1491 {
1492 INSN_LUID (insn) = luid++;
1493
1494 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1495 {
1496 sched_analyze_insn (PATTERN (insn), insn);
1497 n_insns += 1;
1498 }
1499 else if (GET_CODE (insn) == CALL_INSN)
1500 {
1501 rtx dest = 0;
1502 rtx x;
1503 register int i;
1504
1505 /* Any instruction using a hard register which may get clobbered
1506 by a call needs to be marked as dependent on this call.
1507 This prevents a use of a hard return reg from being moved
1508 past a void call (i.e. it does not explicitly set the hard
1509 return reg). */
1510
1511 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1512 if (call_used_regs[i] || global_regs[i])
1513 {
1514 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1515 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1516 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1517 reg_last_uses[i] = 0;
1518 if (reg_last_sets[i]
1519 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1520 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1521 reg_last_sets[i] = insn;
1522 /* Insn, being a CALL_INSN, magically depends on
1523 `last_function_call' already. */
1524 }
1525
1526 /* For each insn which shouldn't cross a call, add a dependence
1527 between that insn and this call insn. */
1528 x = LOG_LINKS (sched_before_next_call);
1529 while (x)
1530 {
1531 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1532 x = XEXP (x, 1);
1533 }
1534 LOG_LINKS (sched_before_next_call) = 0;
1535
1536 sched_analyze_insn (PATTERN (insn), insn);
1537
1538 /* We don't need to flush memory for a function call which does
1539 not involve memory. */
1540 if (! CONST_CALL_P (insn))
1541 {
1542 /* In the absence of interprocedural alias analysis,
1543 we must flush all pending reads and writes, and
1544 start new dependencies starting from here. */
1545 flush_pending_lists (insn);
1546 }
1547
1548 /* Depend this function call (actually, the user of this
1549 function call) on all hard register clobberage. */
1550 last_function_call = insn;
1551 n_insns += 1;
1552 }
1553
1554 if (insn == tail)
1555 return n_insns;
1556 }
1557 }
1558 \f
1559 /* Called when we see a set of a register. If death is true, then we are
1560 scanning backwards. Mark that register as unborn. If nobody says
1561 otherwise, that is how things will remain. If death is false, then we
1562 are scanning forwards. Mark that register as being born. */
1563
1564 static void
1565 sched_note_set (b, x, death)
1566 int b;
1567 rtx x;
1568 int death;
1569 {
1570 register int regno, j;
1571 register rtx reg = SET_DEST (x);
1572 int subreg_p = 0;
1573
1574 if (reg == 0)
1575 return;
1576
1577 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
1578 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
1579 {
1580 /* Must treat modification of just one hardware register of a multi-reg
1581 value or just a byte field of a register exactly the same way that
1582 mark_set_1 in flow.c does. */
1583 if (GET_CODE (reg) == ZERO_EXTRACT
1584 || GET_CODE (reg) == SIGN_EXTRACT
1585 || (GET_CODE (reg) == SUBREG
1586 && REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg)))
1587 subreg_p = 1;
1588
1589 reg = SUBREG_REG (reg);
1590 }
1591
1592 if (GET_CODE (reg) != REG)
1593 return;
1594
1595 /* Global registers are always live, so the code below does not apply
1596 to them. */
1597
1598 regno = REGNO (reg);
1599 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1600 {
1601 register int offset = regno / REGSET_ELT_BITS;
1602 register int bit = 1 << (regno % REGSET_ELT_BITS);
1603
1604 if (death)
1605 {
1606 /* If we only set part of the register, then this set does not
1607 kill it. */
1608 if (subreg_p)
1609 return;
1610
1611 /* Try killing this register. */
1612 if (regno < FIRST_PSEUDO_REGISTER)
1613 {
1614 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1615 while (--j >= 0)
1616 {
1617 offset = (regno + j) / REGSET_ELT_BITS;
1618 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
1619
1620 bb_live_regs[offset] &= ~bit;
1621 bb_dead_regs[offset] |= bit;
1622 }
1623 }
1624 else
1625 {
1626 bb_live_regs[offset] &= ~bit;
1627 bb_dead_regs[offset] |= bit;
1628 }
1629 }
1630 else
1631 {
1632 /* Make the register live again. */
1633 if (regno < FIRST_PSEUDO_REGISTER)
1634 {
1635 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1636 while (--j >= 0)
1637 {
1638 offset = (regno + j) / REGSET_ELT_BITS;
1639 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
1640
1641 bb_live_regs[offset] |= bit;
1642 bb_dead_regs[offset] &= ~bit;
1643 }
1644 }
1645 else
1646 {
1647 bb_live_regs[offset] |= bit;
1648 bb_dead_regs[offset] &= ~bit;
1649 }
1650 }
1651 }
1652 }
1653 \f
1654 /* Macros and functions for keeping the priority queue sorted, and
1655 dealing with queueing and unqueueing of instructions. */
1656
1657 #define SCHED_SORT(READY, NEW_READY, OLD_READY) \
1658 do { if ((NEW_READY) - (OLD_READY) == 1) \
1659 swap_sort (READY, NEW_READY); \
1660 else if ((NEW_READY) - (OLD_READY) > 1) \
1661 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
1662 while (0)
1663
1664 /* Returns a positive value if y is preferred; returns a negative value if
1665 x is preferred. Should never return 0, since that will make the sort
1666 unstable. */
1667
1668 static int
1669 rank_for_schedule (x, y)
1670 rtx *x, *y;
1671 {
1672 rtx tmp = *y;
1673 rtx tmp2 = *x;
1674 rtx tmp_dep, tmp2_dep;
1675 int tmp_class, tmp2_class;
1676 int value;
1677
1678 /* Choose the instruction with the highest priority, if different. */
1679 if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2))
1680 return value;
1681
1682 if (last_scheduled_insn)
1683 {
1684 /* Classify the instructions into three classes:
1685 1) Data dependent on last schedule insn.
1686 2) Anti/Output dependent on last scheduled insn.
1687 3) Independent of last scheduled insn, or has latency of one.
1688 Choose the insn from the highest numbered class if different. */
1689 tmp_dep = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
1690 if (tmp_dep == 0 || insn_cost (tmp) == 1)
1691 tmp_class = 3;
1692 else if (REG_NOTE_KIND (tmp_dep) == 0)
1693 tmp_class = 1;
1694 else
1695 tmp_class = 2;
1696
1697 tmp2_dep = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
1698 if (tmp2_dep == 0 || insn_cost (tmp2) == 1)
1699 tmp2_class = 3;
1700 else if (REG_NOTE_KIND (tmp2_dep) == 0)
1701 tmp2_class = 1;
1702 else
1703 tmp2_class = 2;
1704
1705 if (value = tmp_class - tmp2_class)
1706 return value;
1707 }
1708
1709 /* If insns are equally good, sort by INSN_LUID (original insn order),
1710 so that we make the sort stable. This minimizes instruction movement,
1711 thus minimizing sched's effect on debugging and cross-jumping. */
1712 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1713 }
1714
1715 /* Resort the array A in which only element at index N may be out of order. */
1716
1717 __inline static void
1718 swap_sort (a, n)
1719 rtx *a;
1720 int n;
1721 {
1722 rtx insn = a[n-1];
1723 int i = n-2;
1724
1725 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
1726 {
1727 a[i+1] = a[i];
1728 i -= 1;
1729 }
1730 a[i+1] = insn;
1731 }
1732
1733 static int max_priority;
1734
1735 /* Add INSN to the insn queue so that it fires at least N_CYCLES
1736 before the currently executing insn. */
1737
1738 __inline static void
1739 queue_insn (insn, n_cycles)
1740 rtx insn;
1741 int n_cycles;
1742 {
1743 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1744 NEXT_INSN (insn) = insn_queue[next_q];
1745 insn_queue[next_q] = insn;
1746 q_size += 1;
1747 }
1748
1749 /* Return nonzero if PAT is the pattern of an insn which makes a
1750 register live. */
1751
1752 __inline static int
1753 birthing_insn_p (pat)
1754 rtx pat;
1755 {
1756 int j;
1757
1758 if (reload_completed == 1)
1759 return 0;
1760
1761 if (GET_CODE (pat) == SET
1762 && GET_CODE (SET_DEST (pat)) == REG)
1763 {
1764 rtx dest = SET_DEST (pat);
1765 int i = REGNO (dest);
1766 int offset = i / REGSET_ELT_BITS;
1767 int bit = 1 << (i % REGSET_ELT_BITS);
1768
1769 /* It would be more accurate to use refers_to_regno_p or
1770 reg_mentioned_p to determine when the dest is not live before this
1771 insn. */
1772
1773 if (bb_live_regs[offset] & bit)
1774 return (reg_n_sets[i] == 1);
1775
1776 return 0;
1777 }
1778 if (GET_CODE (pat) == PARALLEL)
1779 {
1780 for (j = 0; j < XVECLEN (pat, 0); j++)
1781 if (birthing_insn_p (XVECEXP (pat, 0, j)))
1782 return 1;
1783 }
1784 return 0;
1785 }
1786
1787 /* If PREV is an insn which is immediately ready to execute, return 1,
1788 otherwise return 0. We may adjust its priority if that will help shorten
1789 register lifetimes. */
1790
1791 static int
1792 launch_link (prev)
1793 rtx prev;
1794 {
1795 rtx pat = PATTERN (prev);
1796 rtx note;
1797 /* MAX of (a) number of cycles needed by prev
1798 (b) number of cycles before needed resources are free. */
1799 int n_cycles = insn_cost (prev);
1800 int n_deaths = 0;
1801
1802 /* Trying to shorten register lives after reload has completed
1803 is useless and wrong. It gives inaccurate schedules. */
1804 if (reload_completed == 0)
1805 {
1806 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
1807 if (REG_NOTE_KIND (note) == REG_DEAD)
1808 n_deaths += 1;
1809
1810 /* Defer scheduling insns which kill registers, since that
1811 shortens register lives. Prefer scheduling insns which
1812 make registers live for the same reason. */
1813 switch (n_deaths)
1814 {
1815 default:
1816 INSN_PRIORITY (prev) >>= 3;
1817 break;
1818 case 3:
1819 INSN_PRIORITY (prev) >>= 2;
1820 break;
1821 case 2:
1822 case 1:
1823 INSN_PRIORITY (prev) >>= 1;
1824 break;
1825 case 0:
1826 if (birthing_insn_p (pat))
1827 {
1828 int max = max_priority;
1829
1830 if (max > INSN_PRIORITY (prev))
1831 INSN_PRIORITY (prev) = max;
1832 }
1833 break;
1834 }
1835 }
1836
1837 if (n_cycles <= 1)
1838 return 1;
1839 queue_insn (prev, n_cycles);
1840 return 0;
1841 }
1842
1843 /* INSN is the "currently executing insn". Launch each insn which was
1844 waiting on INSN (in the backwards dataflow sense). READY is a
1845 vector of insns which are ready to fire. N_READY is the number of
1846 elements in READY. */
1847
1848 static int
1849 launch_links (insn, ready, n_ready)
1850 rtx insn;
1851 rtx *ready;
1852 int n_ready;
1853 {
1854 rtx link;
1855 int new_ready = n_ready;
1856
1857 if (LOG_LINKS (insn) == 0)
1858 return n_ready;
1859
1860 /* This is used by the function launch_link above. */
1861 if (n_ready > 0)
1862 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
1863 else
1864 max_priority = INSN_PRIORITY (insn);
1865
1866 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
1867 {
1868 rtx prev = XEXP (link, 0);
1869
1870 if ((INSN_REF_COUNT (prev) -= 1) == 0 && launch_link (prev))
1871 ready[new_ready++] = prev;
1872 }
1873
1874 return new_ready;
1875 }
1876
1877 /* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
1878 dead_notes list. */
1879
1880 static void
1881 create_reg_dead_note (reg, insn)
1882 rtx reg, insn;
1883 {
1884 rtx link = dead_notes;
1885
1886 if (link == 0)
1887 /* In theory, we should not end up with more REG_DEAD reg notes than we
1888 started with. In practice, this can occur as the result of bugs in
1889 flow, combine and/or sched. */
1890 {
1891 #if 1
1892 abort ();
1893 #else
1894 link = rtx_alloc (EXPR_LIST);
1895 PUT_REG_NOTE_KIND (link, REG_DEAD);
1896 #endif
1897 }
1898 else
1899 dead_notes = XEXP (dead_notes, 1);
1900
1901 XEXP (link, 0) = reg;
1902 XEXP (link, 1) = REG_NOTES (insn);
1903 REG_NOTES (insn) = link;
1904 }
1905
1906 /* Subroutine on attach_deaths_insn--handles the recursive search
1907 through INSN. If SET_P is true, then x is being modified by the insn. */
1908
1909 static void
1910 attach_deaths (x, insn, set_p)
1911 rtx x;
1912 rtx insn;
1913 int set_p;
1914 {
1915 register int i;
1916 register int j;
1917 register enum rtx_code code;
1918 register char *fmt;
1919
1920 if (x == 0)
1921 return;
1922
1923 code = GET_CODE (x);
1924
1925 switch (code)
1926 {
1927 case CONST_INT:
1928 case CONST_DOUBLE:
1929 case LABEL_REF:
1930 case SYMBOL_REF:
1931 case CONST:
1932 case CODE_LABEL:
1933 case PC:
1934 case CC0:
1935 /* Get rid of the easy cases first. */
1936 return;
1937
1938 case REG:
1939 {
1940 /* If the register dies in this insn, queue that note, and mark
1941 this register as needing to die. */
1942 /* This code is very similar to mark_used_1 (if set_p is false)
1943 and mark_set_1 (if set_p is true) in flow.c. */
1944
1945 register int regno = REGNO (x);
1946 register int offset = regno / REGSET_ELT_BITS;
1947 register int bit = 1 << (regno % REGSET_ELT_BITS);
1948 int all_needed = (old_live_regs[offset] & bit);
1949 int some_needed = (old_live_regs[offset] & bit);
1950
1951 if (set_p)
1952 return;
1953
1954 if (regno < FIRST_PSEUDO_REGISTER)
1955 {
1956 int n;
1957
1958 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
1959 while (--n > 0)
1960 {
1961 some_needed |= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
1962 & 1 << ((regno + n) % REGSET_ELT_BITS));
1963 all_needed &= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
1964 & 1 << ((regno + n) % REGSET_ELT_BITS));
1965 }
1966 }
1967
1968 /* If it wasn't live before we started, then add a REG_DEAD note.
1969 We must check the previous lifetime info not the current info,
1970 because we may have to execute this code several times, e.g.
1971 once for a clobber (which doesn't add a note) and later
1972 for a use (which does add a note).
1973
1974 Always make the register live. We must do this even if it was
1975 live before, because this may be an insn which sets and uses
1976 the same register, in which case the register has already been
1977 killed, so we must make it live again.
1978
1979 Global registers are always live, and should never have a REG_DEAD
1980 note added for them, so none of the code below applies to them. */
1981
1982 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1983 {
1984 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
1985 STACK_POINTER_REGNUM, since these are always considered to be
1986 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
1987 if (regno != FRAME_POINTER_REGNUM
1988 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
1989 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
1990 #endif
1991 && regno != STACK_POINTER_REGNUM)
1992 {
1993 if (! all_needed && ! dead_or_set_p (insn, x))
1994 {
1995 /* If none of the words in X is needed, make a REG_DEAD
1996 note. Otherwise, we must make partial REG_DEAD
1997 notes. */
1998 if (! some_needed)
1999 create_reg_dead_note (x, insn);
2000 else
2001 {
2002 int i;
2003
2004 /* Don't make a REG_DEAD note for a part of a
2005 register that is set in the insn. */
2006 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2007 i >= 0; i--)
2008 if ((old_live_regs[(regno + i) / REGSET_ELT_BITS]
2009 & 1 << ((regno +i) % REGSET_ELT_BITS)) == 0
2010 && ! dead_or_set_regno_p (insn, regno + i))
2011 create_reg_dead_note (gen_rtx (REG, word_mode,
2012 regno + i),
2013 insn);
2014 }
2015 }
2016 }
2017
2018 if (regno < FIRST_PSEUDO_REGISTER)
2019 {
2020 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2021 while (--j >= 0)
2022 {
2023 offset = (regno + j) / REGSET_ELT_BITS;
2024 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2025
2026 bb_dead_regs[offset] &= ~bit;
2027 bb_live_regs[offset] |= bit;
2028 }
2029 }
2030 else
2031 {
2032 bb_dead_regs[offset] &= ~bit;
2033 bb_live_regs[offset] |= bit;
2034 }
2035 }
2036 return;
2037 }
2038
2039 case MEM:
2040 /* Handle tail-recursive case. */
2041 attach_deaths (XEXP (x, 0), insn, 0);
2042 return;
2043
2044 case SUBREG:
2045 case STRICT_LOW_PART:
2046 /* These two cases preserve the value of SET_P, so handle them
2047 separately. */
2048 attach_deaths (XEXP (x, 0), insn, set_p);
2049 return;
2050
2051 case ZERO_EXTRACT:
2052 case SIGN_EXTRACT:
2053 /* This case preserves the value of SET_P for the first operand, but
2054 clears it for the other two. */
2055 attach_deaths (XEXP (x, 0), insn, set_p);
2056 attach_deaths (XEXP (x, 1), insn, 0);
2057 attach_deaths (XEXP (x, 2), insn, 0);
2058 return;
2059
2060 default:
2061 /* Other cases: walk the insn. */
2062 fmt = GET_RTX_FORMAT (code);
2063 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2064 {
2065 if (fmt[i] == 'e')
2066 attach_deaths (XEXP (x, i), insn, 0);
2067 else if (fmt[i] == 'E')
2068 for (j = 0; j < XVECLEN (x, i); j++)
2069 attach_deaths (XVECEXP (x, i, j), insn, 0);
2070 }
2071 }
2072 }
2073
2074 /* After INSN has executed, add register death notes for each register
2075 that is dead after INSN. */
2076
2077 static void
2078 attach_deaths_insn (insn)
2079 rtx insn;
2080 {
2081 rtx x = PATTERN (insn);
2082 register RTX_CODE code = GET_CODE (x);
2083
2084 if (code == SET)
2085 {
2086 attach_deaths (SET_SRC (x), insn, 0);
2087
2088 /* A register might die here even if it is the destination, e.g.
2089 it is the target of a volatile read and is otherwise unused.
2090 Hence we must always call attach_deaths for the SET_DEST. */
2091 attach_deaths (SET_DEST (x), insn, 1);
2092 }
2093 else if (code == PARALLEL)
2094 {
2095 register int i;
2096 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2097 {
2098 code = GET_CODE (XVECEXP (x, 0, i));
2099 if (code == SET)
2100 {
2101 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2102
2103 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2104 }
2105 else if (code == CLOBBER)
2106 attach_deaths (XEXP (XVECEXP (x, 0, i), 0), insn, 1);
2107 else
2108 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2109 }
2110 }
2111 else if (code == CLOBBER)
2112 attach_deaths (XEXP (x, 0), insn, 1);
2113 else
2114 attach_deaths (x, insn, 0);
2115 }
2116
2117 /* Delete notes beginning with INSN and maybe put them in the chain
2118 of notes ended by NOTE_LIST.
2119 Returns the insn following the notes. */
2120
2121 static rtx
2122 unlink_notes (insn, tail)
2123 rtx insn, tail;
2124 {
2125 rtx prev = PREV_INSN (insn);
2126
2127 while (insn != tail && GET_CODE (insn) == NOTE)
2128 {
2129 rtx next = NEXT_INSN (insn);
2130 /* Delete the note from its current position. */
2131 if (prev)
2132 NEXT_INSN (prev) = next;
2133 if (next)
2134 PREV_INSN (next) = prev;
2135
2136 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2137 /* Record line-number notes so they can be reused. */
2138 LINE_NOTE (insn) = insn;
2139 else
2140 {
2141 /* Insert the note at the end of the notes list. */
2142 PREV_INSN (insn) = note_list;
2143 if (note_list)
2144 NEXT_INSN (note_list) = insn;
2145 note_list = insn;
2146 }
2147
2148 insn = next;
2149 }
2150 return insn;
2151 }
2152
2153 /* Data structure for keeping track of register information
2154 during that register's life. */
2155
2156 struct sometimes
2157 {
2158 short offset; short bit;
2159 short live_length; short calls_crossed;
2160 };
2161
2162 /* Constructor for `sometimes' data structure. */
2163
2164 static int
2165 new_sometimes_live (regs_sometimes_live, offset, bit, sometimes_max)
2166 struct sometimes *regs_sometimes_live;
2167 int offset, bit;
2168 int sometimes_max;
2169 {
2170 register struct sometimes *p;
2171 register int regno = offset * REGSET_ELT_BITS + bit;
2172 int i;
2173
2174 /* There should never be a register greater than max_regno here. If there
2175 is, it means that a define_split has created a new pseudo reg. This
2176 is not allowed, since there will not be flow info available for any
2177 new register, so catch the error here. */
2178 if (regno >= max_regno)
2179 abort ();
2180
2181 p = &regs_sometimes_live[sometimes_max];
2182 p->offset = offset;
2183 p->bit = bit;
2184 p->live_length = 0;
2185 p->calls_crossed = 0;
2186 sometimes_max++;
2187 return sometimes_max;
2188 }
2189
2190 /* Count lengths of all regs we are currently tracking,
2191 and find new registers no longer live. */
2192
2193 static void
2194 finish_sometimes_live (regs_sometimes_live, sometimes_max)
2195 struct sometimes *regs_sometimes_live;
2196 int sometimes_max;
2197 {
2198 int i;
2199
2200 for (i = 0; i < sometimes_max; i++)
2201 {
2202 register struct sometimes *p = &regs_sometimes_live[i];
2203 int regno;
2204
2205 regno = p->offset * REGSET_ELT_BITS + p->bit;
2206
2207 sched_reg_live_length[regno] += p->live_length;
2208 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2209 }
2210 }
2211
2212 /* Use modified list scheduling to rearrange insns in basic block
2213 B. FILE, if nonzero, is where we dump interesting output about
2214 this pass. */
2215
2216 static void
2217 schedule_block (b, file)
2218 int b;
2219 FILE *file;
2220 {
2221 rtx insn, last;
2222 rtx last_note = 0;
2223 rtx *ready, link;
2224 int i, j, n_ready = 0, new_ready, n_insns = 0;
2225 int sched_n_insns = 0;
2226 #define NEED_NOTHING 0
2227 #define NEED_HEAD 1
2228 #define NEED_TAIL 2
2229 int new_needs;
2230
2231 /* HEAD and TAIL delimit the region being scheduled. */
2232 rtx head = basic_block_head[b];
2233 rtx tail = basic_block_end[b];
2234 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2235 being scheduled. When the insns have been ordered,
2236 these insns delimit where the new insns are to be
2237 spliced back into the insn chain. */
2238 rtx next_tail;
2239 rtx prev_head;
2240
2241 /* Keep life information accurate. */
2242 register struct sometimes *regs_sometimes_live;
2243 int sometimes_max;
2244
2245 if (file)
2246 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2247 b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
2248
2249 i = max_reg_num ();
2250 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2251 bzero (reg_last_uses, i * sizeof (rtx));
2252 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2253 bzero (reg_last_sets, i * sizeof (rtx));
2254
2255 /* Remove certain insns at the beginning from scheduling,
2256 by advancing HEAD. */
2257
2258 /* At the start of a function, before reload has run, don't delay getting
2259 parameters from hard registers into pseudo registers. */
2260 if (reload_completed == 0 && b == 0)
2261 {
2262 while (head != tail
2263 && GET_CODE (head) == NOTE
2264 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2265 head = NEXT_INSN (head);
2266 while (head != tail
2267 && GET_CODE (head) == INSN
2268 && GET_CODE (PATTERN (head)) == SET)
2269 {
2270 rtx src = SET_SRC (PATTERN (head));
2271 while (GET_CODE (src) == SUBREG
2272 || GET_CODE (src) == SIGN_EXTEND
2273 || GET_CODE (src) == ZERO_EXTEND
2274 || GET_CODE (src) == SIGN_EXTRACT
2275 || GET_CODE (src) == ZERO_EXTRACT)
2276 src = XEXP (src, 0);
2277 if (GET_CODE (src) != REG
2278 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2279 break;
2280 /* Keep this insn from ever being scheduled. */
2281 INSN_REF_COUNT (head) = 1;
2282 head = NEXT_INSN (head);
2283 }
2284 }
2285
2286 /* Don't include any notes or labels at the beginning of the
2287 basic block, or notes at the ends of basic blocks. */
2288 while (head != tail)
2289 {
2290 if (GET_CODE (head) == NOTE)
2291 head = NEXT_INSN (head);
2292 else if (GET_CODE (tail) == NOTE)
2293 tail = PREV_INSN (tail);
2294 else if (GET_CODE (head) == CODE_LABEL)
2295 head = NEXT_INSN (head);
2296 else break;
2297 }
2298 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2299 to schedule this block. */
2300 if (head == tail
2301 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2302 return;
2303
2304 #if 0
2305 /* This short-cut doesn't work. It does not count call insns crossed by
2306 registers in reg_sometimes_live. It does not mark these registers as
2307 dead if they die in this block. It does not mark these registers live
2308 (or create new reg_sometimes_live entries if necessary) if they are born
2309 in this block.
2310
2311 The easy solution is to just always schedule a block. This block only
2312 has one insn, so this won't slow down this pass by much. */
2313
2314 if (head == tail)
2315 return;
2316 #endif
2317
2318 /* Exclude certain insns at the end of the basic block by advancing TAIL. */
2319 /* This isn't correct. Instead of advancing TAIL, should assign very
2320 high priorities to these insns to guarantee that they get scheduled last.
2321 If these insns are ignored, as is currently done, the register life info
2322 may be incorrectly computed. */
2323 if (GET_CODE (tail) == INSN
2324 && GET_CODE (PATTERN (tail)) == USE
2325 && next_nonnote_insn (tail) == 0)
2326 {
2327 /* If this was the only insn in the block, then there are no insns to
2328 schedule. */
2329 if (head == tail)
2330 return;
2331
2332 /* We don't try to reorder the USE at the end of a function. */
2333 tail = prev_nonnote_insn (tail);
2334
2335 #if 0
2336 /* This short-cut does not work. See comment above. */
2337 if (head == tail)
2338 return;
2339 #endif
2340 }
2341 else if (GET_CODE (tail) == JUMP_INSN
2342 && SCHED_GROUP_P (tail) == 0
2343 && GET_CODE (PREV_INSN (tail)) == INSN
2344 && GET_CODE (PATTERN (PREV_INSN (tail))) == USE
2345 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (PREV_INSN (tail)), 0)))
2346 {
2347 /* Don't let the setting of the function's return value register
2348 move from this jump. For the same reason we want to get the
2349 parameters into pseudo registers as quickly as possible, we
2350 want to set the function's return value register as late as
2351 possible. */
2352
2353 /* If this is the only insn in the block, then there is no need to
2354 schedule the block. */
2355 if (head == tail)
2356 return;
2357
2358 tail = PREV_INSN (tail);
2359 if (head == tail)
2360 return;
2361
2362 tail = prev_nonnote_insn (tail);
2363
2364 #if 0
2365 /* This shortcut does not work. See comment above. */
2366 if (head == tail)
2367 return;
2368 #endif
2369 }
2370
2371 #ifdef HAVE_cc0
2372 /* This is probably wrong. Instead of doing this, should give this insn
2373 a very high priority to guarantee that it gets scheduled last. */
2374 /* Can not separate an insn that sets the condition code from one that
2375 uses it. So we must leave an insn that sets cc0 where it is. */
2376 if (sets_cc0_p (PATTERN (tail)))
2377 tail = PREV_INSN (tail);
2378 #endif
2379
2380 /* Now HEAD through TAIL are the insns actually to be rearranged;
2381 Let PREV_HEAD and NEXT_TAIL enclose them. */
2382 prev_head = PREV_INSN (head);
2383 next_tail = NEXT_INSN (tail);
2384
2385 /* Initialize basic block data structures. */
2386 dead_notes = 0;
2387 pending_read_insns = 0;
2388 pending_read_mems = 0;
2389 pending_write_insns = 0;
2390 pending_write_mems = 0;
2391 pending_lists_length = 0;
2392 last_pending_memory_flush = 0;
2393 last_function_call = 0;
2394 last_scheduled_insn = 0;
2395
2396 LOG_LINKS (sched_before_next_call) = 0;
2397
2398 n_insns += sched_analyze (head, tail);
2399 if (n_insns == 0)
2400 {
2401 free_pending_lists ();
2402 return;
2403 }
2404
2405 /* Allocate vector to hold insns to be rearranged (except those
2406 insns which are controlled by an insn with SCHED_GROUP_P set).
2407 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2408 as those variables ultimately are set up. */
2409 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2410
2411 /* TAIL is now the last of the insns to be rearranged.
2412 Put those insns into the READY vector. */
2413 insn = tail;
2414
2415 /* If the last insn is a branch, force it to be the last insn after
2416 scheduling. Also, don't try to reorder calls at the ends the basic
2417 block -- this will only lead to worse register allocation. */
2418 if (GET_CODE (tail) == CALL_INSN || GET_CODE (tail) == JUMP_INSN)
2419 {
2420 priority (tail);
2421 ready[n_ready++] = tail;
2422 INSN_PRIORITY (tail) = TAIL_PRIORITY;
2423 INSN_REF_COUNT (tail) = 0;
2424 insn = PREV_INSN (tail);
2425 }
2426
2427 /* Assign priorities to instructions. Also check whether they
2428 are in priority order already. If so then I will be nonnegative.
2429 We use this shortcut only before reloading. */
2430 #if 0
2431 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2432 #endif
2433
2434 for (; insn != prev_head; insn = PREV_INSN (insn))
2435 {
2436 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2437 {
2438 priority (insn);
2439 if (INSN_REF_COUNT (insn) == 0)
2440 ready[n_ready++] = insn;
2441 if (SCHED_GROUP_P (insn))
2442 {
2443 while (SCHED_GROUP_P (insn))
2444 {
2445 insn = PREV_INSN (insn);
2446 while (GET_CODE (insn) == NOTE)
2447 insn = PREV_INSN (insn);
2448 priority (insn);
2449 }
2450 continue;
2451 }
2452 #if 0
2453 if (i < 0)
2454 continue;
2455 if (INSN_PRIORITY (insn) < i)
2456 i = INSN_PRIORITY (insn);
2457 else if (INSN_PRIORITY (insn) > i)
2458 i = DONE_PRIORITY;
2459 #endif
2460 }
2461 }
2462
2463 #if 0
2464 /* This short-cut doesn't work. It does not count call insns crossed by
2465 registers in reg_sometimes_live. It does not mark these registers as
2466 dead if they die in this block. It does not mark these registers live
2467 (or create new reg_sometimes_live entries if necessary) if they are born
2468 in this block.
2469
2470 The easy solution is to just always schedule a block. These blocks tend
2471 to be very short, so this doesn't slow down this pass by much. */
2472
2473 /* If existing order is good, don't bother to reorder. */
2474 if (i != DONE_PRIORITY)
2475 {
2476 if (file)
2477 fprintf (file, ";; already scheduled\n");
2478
2479 if (reload_completed == 0)
2480 {
2481 for (i = 0; i < sometimes_max; i++)
2482 regs_sometimes_live[i].live_length += n_insns;
2483
2484 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2485 }
2486 free_pending_lists ();
2487 return;
2488 }
2489 #endif
2490
2491 /* Scan all the insns to be scheduled, removing NOTE insns
2492 and register death notes.
2493 Line number NOTE insns end up in NOTE_LIST.
2494 Register death notes end up in DEAD_NOTES.
2495
2496 Recreate the register life information for the end of this basic
2497 block. */
2498
2499 if (reload_completed == 0)
2500 {
2501 bcopy (basic_block_live_at_start[b], bb_live_regs, regset_bytes);
2502 bzero (bb_dead_regs, regset_bytes);
2503
2504 if (b == 0)
2505 {
2506 /* This is the first block in the function. There may be insns
2507 before head that we can't schedule. We still need to examine
2508 them though for accurate register lifetime analysis. */
2509
2510 /* We don't want to remove any REG_DEAD notes as the code below
2511 does. */
2512
2513 for (insn = basic_block_head[b]; insn != head;
2514 insn = NEXT_INSN (insn))
2515 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2516 {
2517 /* See if the register gets born here. */
2518 /* We must check for registers being born before we check for
2519 registers dying. It is possible for a register to be born
2520 and die in the same insn, e.g. reading from a volatile
2521 memory location into an otherwise unused register. Such
2522 a register must be marked as dead after this insn. */
2523 if (GET_CODE (PATTERN (insn)) == SET
2524 || GET_CODE (PATTERN (insn)) == CLOBBER)
2525 sched_note_set (b, PATTERN (insn), 0);
2526 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2527 {
2528 int j;
2529 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2530 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2531 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2532 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2533
2534 /* ??? This code is obsolete and should be deleted. It
2535 is harmless though, so we will leave it in for now. */
2536 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2537 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2538 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2539 }
2540
2541 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2542 {
2543 if ((REG_NOTE_KIND (link) == REG_DEAD
2544 || REG_NOTE_KIND (link) == REG_UNUSED)
2545 /* Verify that the REG_NOTE has a legal value. */
2546 && GET_CODE (XEXP (link, 0)) == REG)
2547 {
2548 register int regno = REGNO (XEXP (link, 0));
2549 register int offset = regno / REGSET_ELT_BITS;
2550 register int bit = 1 << (regno % REGSET_ELT_BITS);
2551
2552 if (regno < FIRST_PSEUDO_REGISTER)
2553 {
2554 int j = HARD_REGNO_NREGS (regno,
2555 GET_MODE (XEXP (link, 0)));
2556 while (--j >= 0)
2557 {
2558 offset = (regno + j) / REGSET_ELT_BITS;
2559 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2560
2561 bb_live_regs[offset] &= ~bit;
2562 bb_dead_regs[offset] |= bit;
2563 }
2564 }
2565 else
2566 {
2567 bb_live_regs[offset] &= ~bit;
2568 bb_dead_regs[offset] |= bit;
2569 }
2570 }
2571 }
2572 }
2573 }
2574 }
2575
2576 /* If debugging information is being produced, keep track of the line
2577 number notes for each insn. */
2578 if (write_symbols != NO_DEBUG)
2579 {
2580 /* We must use the true line number for the first insn in the block
2581 that was computed and saved at the start of this pass. We can't
2582 use the current line number, because scheduling of the previous
2583 block may have changed the current line number. */
2584 rtx line = line_note_head[b];
2585
2586 for (insn = basic_block_head[b];
2587 insn != next_tail;
2588 insn = NEXT_INSN (insn))
2589 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
2590 line = insn;
2591 else
2592 LINE_NOTE (insn) = line;
2593 }
2594
2595 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2596 {
2597 rtx prev, next, link;
2598
2599 /* Farm out notes. This is needed to keep the debugger from
2600 getting completely deranged. */
2601 if (GET_CODE (insn) == NOTE)
2602 {
2603 prev = insn;
2604 insn = unlink_notes (insn, next_tail);
2605 if (prev == tail)
2606 abort ();
2607 if (prev == head)
2608 abort ();
2609 if (insn == next_tail)
2610 abort ();
2611 }
2612
2613 if (reload_completed == 0
2614 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2615 {
2616 /* See if the register gets born here. */
2617 /* We must check for registers being born before we check for
2618 registers dying. It is possible for a register to be born and
2619 die in the same insn, e.g. reading from a volatile memory
2620 location into an otherwise unused register. Such a register
2621 must be marked as dead after this insn. */
2622 if (GET_CODE (PATTERN (insn)) == SET
2623 || GET_CODE (PATTERN (insn)) == CLOBBER)
2624 sched_note_set (b, PATTERN (insn), 0);
2625 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2626 {
2627 int j;
2628 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2629 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2630 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2631 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2632
2633 /* ??? This code is obsolete and should be deleted. It
2634 is harmless though, so we will leave it in for now. */
2635 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2636 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2637 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2638 }
2639
2640 /* Need to know what registers this insn kills. */
2641 for (prev = 0, link = REG_NOTES (insn); link; link = next)
2642 {
2643 int regno;
2644
2645 next = XEXP (link, 1);
2646 if ((REG_NOTE_KIND (link) == REG_DEAD
2647 || REG_NOTE_KIND (link) == REG_UNUSED)
2648 /* Verify that the REG_NOTE has a legal value. */
2649 && GET_CODE (XEXP (link, 0)) == REG)
2650 {
2651 register int regno = REGNO (XEXP (link, 0));
2652 register int offset = regno / REGSET_ELT_BITS;
2653 register int bit = 1 << (regno % REGSET_ELT_BITS);
2654
2655 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
2656 alone. */
2657 if (REG_NOTE_KIND (link) == REG_DEAD)
2658 {
2659 if (prev)
2660 XEXP (prev, 1) = next;
2661 else
2662 REG_NOTES (insn) = next;
2663 XEXP (link, 1) = dead_notes;
2664 dead_notes = link;
2665 }
2666 else
2667 prev = link;
2668
2669 if (regno < FIRST_PSEUDO_REGISTER)
2670 {
2671 int j = HARD_REGNO_NREGS (regno,
2672 GET_MODE (XEXP (link, 0)));
2673 while (--j >= 0)
2674 {
2675 offset = (regno + j) / REGSET_ELT_BITS;
2676 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2677
2678 bb_live_regs[offset] &= ~bit;
2679 bb_dead_regs[offset] |= bit;
2680 }
2681 }
2682 else
2683 {
2684 bb_live_regs[offset] &= ~bit;
2685 bb_dead_regs[offset] |= bit;
2686 }
2687 }
2688 else
2689 prev = link;
2690 }
2691 }
2692 }
2693
2694 if (reload_completed == 0)
2695 {
2696 /* Keep track of register lives. */
2697 old_live_regs = (regset) alloca (regset_bytes);
2698 regs_sometimes_live
2699 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
2700 sometimes_max = 0;
2701
2702 /* Start with registers live at end. */
2703 for (j = 0; j < regset_size; j++)
2704 {
2705 int live = bb_live_regs[j];
2706 old_live_regs[j] = live;
2707 if (live)
2708 {
2709 register int bit;
2710 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
2711 if (live & (1 << bit))
2712 sometimes_max = new_sometimes_live (regs_sometimes_live, j,
2713 bit, sometimes_max);
2714 }
2715 }
2716 }
2717
2718 SCHED_SORT (ready, n_ready, 1);
2719
2720 if (file)
2721 {
2722 fprintf (file, ";; ready list initially:\n;; ");
2723 for (i = 0; i < n_ready; i++)
2724 fprintf (file, "%d ", INSN_UID (ready[i]));
2725 fprintf (file, "\n\n");
2726
2727 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2728 if (INSN_PRIORITY (insn) > 0)
2729 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
2730 INSN_UID (insn), INSN_PRIORITY (insn),
2731 INSN_REF_COUNT (insn));
2732 }
2733
2734 /* Now HEAD and TAIL are going to become disconnected
2735 entirely from the insn chain. */
2736 tail = ready[0];
2737
2738 /* Q_SIZE will always be zero here. */
2739 q_ptr = 0;
2740 bzero (insn_queue, sizeof (insn_queue));
2741
2742 /* Now, perform list scheduling. */
2743
2744 /* Where we start inserting insns is after TAIL. */
2745 last = next_tail;
2746
2747 new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
2748 ? NEED_HEAD : NEED_NOTHING);
2749 if (PREV_INSN (next_tail) == basic_block_end[b])
2750 new_needs |= NEED_TAIL;
2751
2752 new_ready = n_ready;
2753 while (sched_n_insns < n_insns)
2754 {
2755 q_ptr = NEXT_Q (q_ptr);
2756
2757 /* Add all pending insns that can be scheduled without stalls to the
2758 ready list. */
2759 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
2760 {
2761 if (file)
2762 fprintf (file, ";; launching %d before %d with no stalls\n",
2763 INSN_UID (insn), INSN_UID (last));
2764 ready[new_ready++] = insn;
2765 q_size -= 1;
2766 }
2767 insn_queue[q_ptr] = 0;
2768
2769 /* If there are no ready insns, stall until one is ready and add all
2770 of the pending insns at that point to the ready list. */
2771 if (new_ready == 0)
2772 {
2773 register int stalls;
2774
2775 for (stalls = 1; stalls < Q_SIZE; stalls++)
2776 if (insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])
2777 {
2778 for (; insn; insn = NEXT_INSN (insn))
2779 {
2780 if (file)
2781 fprintf (file, ";; issue insn %d before %d with %d stalls\n",
2782 INSN_UID (insn), INSN_UID (last), stalls);
2783 ready[new_ready++] = insn;
2784 q_size -= 1;
2785 }
2786 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
2787 break;
2788 }
2789
2790 #if 0
2791 /* This looks logically correct, but on the SPEC benchmark set on
2792 the SPARC, I get better code without it. */
2793 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2794 #endif
2795 }
2796
2797 /* There should be some instructions waiting to fire. */
2798 if (new_ready == 0)
2799 abort ();
2800
2801 /* Sort the ready list and choose the best insn to schedule.
2802 N_READY holds the number of items that were scheduled the last time,
2803 minus the one instruction scheduled on the last loop iteration; it
2804 is not modified for any other reason in this loop. */
2805 SCHED_SORT (ready, new_ready, n_ready);
2806 n_ready = new_ready;
2807 last_scheduled_insn = insn = ready[0];
2808
2809 if (DONE_PRIORITY_P (insn))
2810 abort ();
2811
2812 if (reload_completed == 0)
2813 {
2814 /* Process this insn, and each insn linked to this one which must
2815 be immediately output after this insn. */
2816 do
2817 {
2818 /* First we kill registers set by this insn, and then we
2819 make registers used by this insn live. This is the opposite
2820 order used above because we are traversing the instructions
2821 backwards. */
2822
2823 /* Strictly speaking, we should scan REG_UNUSED notes and make
2824 every register mentioned there live, however, we will just
2825 kill them again immediately below, so there doesn't seem to
2826 be any reason why we bother to do this. */
2827
2828 /* See if this is the last notice we must take of a register. */
2829 if (GET_CODE (PATTERN (insn)) == SET
2830 || GET_CODE (PATTERN (insn)) == CLOBBER)
2831 sched_note_set (b, PATTERN (insn), 1);
2832 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2833 {
2834 int j;
2835 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2836 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2837 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2838 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
2839 }
2840
2841 /* This code keeps life analysis information up to date. */
2842 if (GET_CODE (insn) == CALL_INSN)
2843 {
2844 register struct sometimes *p;
2845
2846 /* A call kills all call used and global registers, except
2847 for those mentioned in the call pattern which will be
2848 made live again later. */
2849 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2850 if (call_used_regs[i] || global_regs[i])
2851 {
2852 register int offset = i / REGSET_ELT_BITS;
2853 register int bit = 1 << (i % REGSET_ELT_BITS);
2854
2855 bb_live_regs[offset] &= ~bit;
2856 bb_dead_regs[offset] |= bit;
2857 }
2858
2859 /* Regs live at the time of a call instruction must not
2860 go in a register clobbered by calls. Record this for
2861 all regs now live. Note that insns which are born or
2862 die in a call do not cross a call, so this must be done
2863 after the killings (above) and before the births
2864 (below). */
2865 p = regs_sometimes_live;
2866 for (i = 0; i < sometimes_max; i++, p++)
2867 if (bb_live_regs[p->offset] & (1 << p->bit))
2868 p->calls_crossed += 1;
2869 }
2870
2871 /* Make every register used live, and add REG_DEAD notes for
2872 registers which were not live before we started. */
2873 attach_deaths_insn (insn);
2874
2875 /* Find registers now made live by that instruction. */
2876 for (i = 0; i < regset_size; i++)
2877 {
2878 int diff = bb_live_regs[i] & ~old_live_regs[i];
2879 if (diff)
2880 {
2881 register int bit;
2882 old_live_regs[i] |= diff;
2883 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
2884 if (diff & (1 << bit))
2885 sometimes_max
2886 = new_sometimes_live (regs_sometimes_live, i, bit,
2887 sometimes_max);
2888 }
2889 }
2890
2891 /* Count lengths of all regs we are worrying about now,
2892 and handle registers no longer live. */
2893
2894 for (i = 0; i < sometimes_max; i++)
2895 {
2896 register struct sometimes *p = &regs_sometimes_live[i];
2897 int regno = p->offset*REGSET_ELT_BITS + p->bit;
2898
2899 p->live_length += 1;
2900
2901 if ((bb_live_regs[p->offset] & (1 << p->bit)) == 0)
2902 {
2903 /* This is the end of one of this register's lifetime
2904 segments. Save the lifetime info collected so far,
2905 and clear its bit in the old_live_regs entry. */
2906 sched_reg_live_length[regno] += p->live_length;
2907 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2908 old_live_regs[p->offset] &= ~(1 << p->bit);
2909
2910 /* Delete the reg_sometimes_live entry for this reg by
2911 copying the last entry over top of it. */
2912 *p = regs_sometimes_live[--sometimes_max];
2913 /* ...and decrement i so that this newly copied entry
2914 will be processed. */
2915 i--;
2916 }
2917 }
2918
2919 link = insn;
2920 insn = PREV_INSN (insn);
2921 }
2922 while (SCHED_GROUP_P (link));
2923
2924 /* Set INSN back to the insn we are scheduling now. */
2925 insn = ready[0];
2926 }
2927
2928 /* Schedule INSN. Remove it from the ready list. */
2929 ready += 1;
2930 n_ready -= 1;
2931
2932 sched_n_insns += 1;
2933 NEXT_INSN (insn) = last;
2934 PREV_INSN (last) = insn;
2935 last = insn;
2936
2937 /* Everything that precedes INSN now either becomes "ready", if
2938 it can execute immediately before INSN, or "pending", if
2939 there must be a delay. Give INSN high enough priority that
2940 at least one (maybe more) reg-killing insns can be launched
2941 ahead of all others. Mark INSN as scheduled by changing its
2942 priority to -1. */
2943 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
2944 new_ready = launch_links (insn, ready, n_ready);
2945 INSN_PRIORITY (insn) = DONE_PRIORITY;
2946
2947 /* Schedule all prior insns that must not be moved. */
2948 if (SCHED_GROUP_P (insn))
2949 {
2950 /* Disable these insns from being launched. */
2951 link = insn;
2952 while (SCHED_GROUP_P (link))
2953 {
2954 /* Disable these insns from being launched by anybody. */
2955 link = PREV_INSN (link);
2956 INSN_REF_COUNT (link) = 0;
2957 }
2958
2959 /* None of these insns can move forward into delay slots. */
2960 while (SCHED_GROUP_P (insn))
2961 {
2962 insn = PREV_INSN (insn);
2963 new_ready = launch_links (insn, ready, new_ready);
2964 INSN_PRIORITY (insn) = DONE_PRIORITY;
2965
2966 sched_n_insns += 1;
2967 NEXT_INSN (insn) = last;
2968 PREV_INSN (last) = insn;
2969 last = insn;
2970 }
2971 }
2972 }
2973 if (q_size != 0)
2974 abort ();
2975
2976 if (reload_completed == 0)
2977 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2978
2979 /* HEAD is now the first insn in the chain of insns that
2980 been scheduled by the loop above.
2981 TAIL is the last of those insns. */
2982 head = insn;
2983
2984 /* NOTE_LIST is the end of a chain of notes previously found
2985 among the insns. Insert them at the beginning of the insns. */
2986 if (note_list != 0)
2987 {
2988 rtx note_head = note_list;
2989 while (PREV_INSN (note_head))
2990 note_head = PREV_INSN (note_head);
2991
2992 PREV_INSN (head) = note_list;
2993 NEXT_INSN (note_list) = head;
2994 head = note_head;
2995 }
2996
2997 /* In theory, there should be no REG_DEAD notes leftover at the end.
2998 In practice, this can occur as the result of bugs in flow, combine.c,
2999 and/or sched.c. The values of the REG_DEAD notes remaining are
3000 meaningless, because dead_notes is just used as a free list. */
3001 #if 1
3002 if (dead_notes != 0)
3003 abort ();
3004 #endif
3005
3006 if (new_needs & NEED_HEAD)
3007 basic_block_head[b] = head;
3008 PREV_INSN (head) = prev_head;
3009 NEXT_INSN (prev_head) = head;
3010
3011 if (new_needs & NEED_TAIL)
3012 basic_block_end[b] = tail;
3013 NEXT_INSN (tail) = next_tail;
3014 PREV_INSN (next_tail) = tail;
3015
3016 /* Restore the line-number notes of each insn. */
3017 if (write_symbols != NO_DEBUG)
3018 {
3019 rtx line, note, prev, new;
3020 int notes = 0;
3021
3022 head = basic_block_head[b];
3023 next_tail = NEXT_INSN (basic_block_end[b]);
3024
3025 /* Determine the current line-number. We want to know the current
3026 line number of the first insn of the block here, in case it is
3027 different from the true line number that was saved earlier. If
3028 different, then we need a line number note before the first insn
3029 of this block. If it happens to be the same, then we don't want to
3030 emit another line number note here. */
3031 for (line = head; line; line = PREV_INSN (line))
3032 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3033 break;
3034
3035 /* Walk the insns keeping track of the current line-number and inserting
3036 the line-number notes as needed. */
3037 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3038 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3039 line = insn;
3040 else if (! (GET_CODE (insn) == NOTE
3041 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3042 && (note = LINE_NOTE (insn)) != 0
3043 && note != line
3044 && (line == 0
3045 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3046 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3047 {
3048 line = note;
3049 prev = PREV_INSN (insn);
3050 if (LINE_NOTE (note))
3051 {
3052 /* Re-use the orignal line-number note. */
3053 LINE_NOTE (note) = 0;
3054 PREV_INSN (note) = prev;
3055 NEXT_INSN (prev) = note;
3056 PREV_INSN (insn) = note;
3057 NEXT_INSN (note) = insn;
3058 }
3059 else
3060 {
3061 notes++;
3062 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3063 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3064 }
3065 }
3066 if (file && notes)
3067 fprintf (file, ";; added %d line-number notes\n", notes);
3068 }
3069
3070 if (file)
3071 {
3072 fprintf (file, ";; new basic block head = %d\n;; new basic block end = %d\n\n",
3073 INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
3074 }
3075
3076 /* Yow! We're done! */
3077 free_pending_lists ();
3078
3079 return;
3080 }
3081 \f
3082 /* Subroutine of split_hard_reg_notes. Searches X for any reference to
3083 REGNO, returning the rtx of the reference found if any. Otherwise,
3084 returns 0. */
3085
3086 rtx
3087 regno_use_in (regno, x)
3088 int regno;
3089 rtx x;
3090 {
3091 register char *fmt;
3092 int i, j;
3093 rtx tem;
3094
3095 if (GET_CODE (x) == REG && REGNO (x) == regno)
3096 return x;
3097
3098 fmt = GET_RTX_FORMAT (GET_CODE (x));
3099 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3100 {
3101 if (fmt[i] == 'e')
3102 {
3103 if (tem = regno_use_in (regno, XEXP (x, i)))
3104 return tem;
3105 }
3106 else if (fmt[i] == 'E')
3107 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3108 if (tem = regno_use_in (regno , XVECEXP (x, i, j)))
3109 return tem;
3110 }
3111
3112 return 0;
3113 }
3114
3115 /* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3116 needed for the hard register mentioned in the note. This can happen
3117 if the reference to the hard register in the original insn was split into
3118 several smaller hard register references in the split insns. */
3119
3120 static void
3121 split_hard_reg_notes (note, first, last, orig_insn)
3122 rtx note, first, last, orig_insn;
3123 {
3124 rtx reg, temp, link;
3125 int n_regs, i, new_reg;
3126 rtx insn;
3127
3128 /* Assume that this is a REG_DEAD note. */
3129 if (REG_NOTE_KIND (note) != REG_DEAD)
3130 abort ();
3131
3132 reg = XEXP (note, 0);
3133
3134 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3135
3136 /* ??? Could add check here to see whether, the hard register is referenced
3137 in the same mode as in the original insn. If so, then it has not been
3138 split, and the rest of the code below is unnecessary. */
3139
3140 for (i = 1; i < n_regs; i++)
3141 {
3142 new_reg = REGNO (reg) + i;
3143
3144 /* Check for references to new_reg in the split insns. */
3145 for (insn = last; ; insn = PREV_INSN (insn))
3146 {
3147 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3148 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3149 {
3150 /* Create a new reg dead note here. */
3151 link = rtx_alloc (EXPR_LIST);
3152 PUT_REG_NOTE_KIND (link, REG_DEAD);
3153 XEXP (link, 0) = temp;
3154 XEXP (link, 1) = REG_NOTES (insn);
3155 REG_NOTES (insn) = link;
3156 break;
3157 }
3158 /* It isn't mentioned anywhere, so no new reg note is needed for
3159 this register. */
3160 if (insn == first)
3161 break;
3162 }
3163 }
3164 }
3165
3166 /* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3167 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3168
3169 static void
3170 new_insn_dead_notes (pat, insn, last, orig_insn)
3171 rtx pat, insn, last, orig_insn;
3172 {
3173 rtx dest, tem, set;
3174
3175 /* PAT is either a CLOBBER or a SET here. */
3176 dest = XEXP (pat, 0);
3177
3178 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3179 || GET_CODE (dest) == STRICT_LOW_PART
3180 || GET_CODE (dest) == SIGN_EXTRACT)
3181 dest = XEXP (dest, 0);
3182
3183 if (GET_CODE (dest) == REG)
3184 {
3185 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3186 {
3187 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3188 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3189 && (set = single_set (tem)))
3190 {
3191 rtx tem_dest = SET_DEST (set);
3192
3193 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3194 || GET_CODE (tem_dest) == SUBREG
3195 || GET_CODE (tem_dest) == STRICT_LOW_PART
3196 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3197 tem_dest = XEXP (tem_dest, 0);
3198
3199 if (tem_dest != dest)
3200 {
3201 /* Use the same scheme as combine.c, don't put both REG_DEAD
3202 and REG_UNUSED notes on the same insn. */
3203 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3204 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3205 {
3206 rtx note = rtx_alloc (EXPR_LIST);
3207 PUT_REG_NOTE_KIND (note, REG_DEAD);
3208 XEXP (note, 0) = dest;
3209 XEXP (note, 1) = REG_NOTES (tem);
3210 REG_NOTES (tem) = note;
3211 }
3212 /* The reg only dies in one insn, the last one that uses
3213 it. */
3214 break;
3215 }
3216 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3217 /* We found an instruction that both uses the register,
3218 and sets it, so no new REG_NOTE is needed for this set. */
3219 break;
3220 }
3221 }
3222 /* If this is a set, it must die somewhere, unless it is the dest of
3223 the original insn, and hence is live after the original insn. Abort
3224 if it isn't supposed to be live after the original insn.
3225
3226 If this is a clobber, then just add a REG_UNUSED note. */
3227 if (tem == insn)
3228 {
3229 int live_after_orig_insn = 0;
3230 rtx pattern = PATTERN (orig_insn);
3231 int i;
3232
3233 if (GET_CODE (pat) == CLOBBER)
3234 {
3235 rtx note = rtx_alloc (EXPR_LIST);
3236 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3237 XEXP (note, 0) = dest;
3238 XEXP (note, 1) = REG_NOTES (insn);
3239 REG_NOTES (insn) = note;
3240 return;
3241 }
3242
3243 /* The original insn could have multiple sets, so search the
3244 insn for all sets. */
3245 if (GET_CODE (pattern) == SET)
3246 {
3247 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3248 live_after_orig_insn = 1;
3249 }
3250 else if (GET_CODE (pattern) == PARALLEL)
3251 {
3252 for (i = 0; i < XVECLEN (pattern, 0); i++)
3253 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3254 && reg_overlap_mentioned_p (dest,
3255 SET_DEST (XVECEXP (pattern,
3256 0, i))))
3257 live_after_orig_insn = 1;
3258 }
3259
3260 if (! live_after_orig_insn)
3261 abort ();
3262 }
3263 }
3264 }
3265
3266 /* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3267 registers modified by X. INC is -1 if the containing insn is being deleted,
3268 and is 1 if the containing insn is a newly generated insn. */
3269
3270 static void
3271 update_n_sets (x, inc)
3272 rtx x;
3273 int inc;
3274 {
3275 rtx dest = SET_DEST (x);
3276
3277 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3278 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3279 dest = SUBREG_REG (dest);
3280
3281 if (GET_CODE (dest) == REG)
3282 {
3283 int regno = REGNO (dest);
3284
3285 if (regno < FIRST_PSEUDO_REGISTER)
3286 {
3287 register int i;
3288 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3289
3290 for (i = regno; i < endregno; i++)
3291 reg_n_sets[i] += inc;
3292 }
3293 else
3294 reg_n_sets[regno] += inc;
3295 }
3296 }
3297
3298 /* Updates all flow-analysis related quantities (including REG_NOTES) for
3299 the insns from FIRST to LAST inclusive that were created by splitting
3300 ORIG_INSN. NOTES are the original REG_NOTES. */
3301
3302 static void
3303 update_flow_info (notes, first, last, orig_insn)
3304 rtx notes;
3305 rtx first, last;
3306 rtx orig_insn;
3307 {
3308 rtx insn, note;
3309 rtx next;
3310 rtx orig_dest, temp;
3311 rtx set;
3312
3313 /* Get and save the destination set by the original insn. */
3314
3315 orig_dest = single_set (orig_insn);
3316 if (orig_dest)
3317 orig_dest = SET_DEST (orig_dest);
3318
3319 /* Move REG_NOTES from the original insn to where they now belong. */
3320
3321 for (note = notes; note; note = next)
3322 {
3323 next = XEXP (note, 1);
3324 switch (REG_NOTE_KIND (note))
3325 {
3326 case REG_DEAD:
3327 case REG_UNUSED:
3328 /* Move these notes from the original insn to the last new insn where
3329 the register is now set. */
3330
3331 for (insn = last; ; insn = PREV_INSN (insn))
3332 {
3333 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3334 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3335 {
3336 XEXP (note, 1) = REG_NOTES (insn);
3337 REG_NOTES (insn) = note;
3338
3339 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3340 notes. */
3341 /* ??? This won't handle mutiple word registers correctly,
3342 but should be good enough for now. */
3343 if (REG_NOTE_KIND (note) == REG_UNUSED
3344 && ! dead_or_set_p (insn, XEXP (note, 0)))
3345 PUT_REG_NOTE_KIND (note, REG_DEAD);
3346
3347 /* The reg only dies in one insn, the last one that uses
3348 it. */
3349 break;
3350 }
3351 /* It must die somewhere, fail it we couldn't find where it died.
3352
3353 If this is a REG_UNUSED note, then it must be a temporary
3354 register that was not needed by this instantiation of the
3355 pattern, so we can safely ignore it. */
3356 if (insn == first)
3357 {
3358 if (REG_NOTE_KIND (note) != REG_UNUSED)
3359 abort ();
3360
3361 break;
3362 }
3363 }
3364
3365 /* If this note refers to a multiple word hard register, it may
3366 have been split into several smaller hard register references.
3367 Check to see if there are any new register references that
3368 need REG_NOTES added for them. */
3369 temp = XEXP (note, 0);
3370 if (REG_NOTE_KIND (note) == REG_DEAD
3371 && GET_CODE (temp) == REG
3372 && REGNO (temp) < FIRST_PSEUDO_REGISTER
3373 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)))
3374 split_hard_reg_notes (note, first, last, orig_insn);
3375 break;
3376
3377 case REG_WAS_0:
3378 /* This note applies to the dest of the original insn. Find the
3379 first new insn that now has the same dest, and move the note
3380 there. */
3381
3382 if (! orig_dest)
3383 abort ();
3384
3385 for (insn = first; ; insn = NEXT_INSN (insn))
3386 {
3387 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3388 && (temp = single_set (insn))
3389 && rtx_equal_p (SET_DEST (temp), orig_dest))
3390 {
3391 XEXP (note, 1) = REG_NOTES (insn);
3392 REG_NOTES (insn) = note;
3393 /* The reg is only zero before one insn, the first that
3394 uses it. */
3395 break;
3396 }
3397 /* It must be set somewhere, fail if we couldn't find where it
3398 was set. */
3399 if (insn == last)
3400 abort ();
3401 }
3402 break;
3403
3404 case REG_EQUAL:
3405 case REG_EQUIV:
3406 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
3407 set is meaningless. Just drop the note. */
3408 if (! orig_dest)
3409 break;
3410
3411 case REG_NO_CONFLICT:
3412 /* These notes apply to the dest of the original insn. Find the last
3413 new insn that now has the same dest, and move the note there. */
3414
3415 if (! orig_dest)
3416 abort ();
3417
3418 for (insn = last; ; insn = PREV_INSN (insn))
3419 {
3420 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3421 && (temp = single_set (insn))
3422 && rtx_equal_p (SET_DEST (temp), orig_dest))
3423 {
3424 XEXP (note, 1) = REG_NOTES (insn);
3425 REG_NOTES (insn) = note;
3426 /* Only put this note on one of the new insns. */
3427 break;
3428 }
3429
3430 /* The original dest must still be set someplace. Abort if we
3431 couldn't find it. */
3432 if (insn == first)
3433 abort ();
3434 }
3435 break;
3436
3437 case REG_LIBCALL:
3438 /* Move a REG_LIBCALL note to the first insn created, and update
3439 the corresponding REG_RETVAL note. */
3440 XEXP (note, 1) = REG_NOTES (first);
3441 REG_NOTES (first) = note;
3442
3443 insn = XEXP (note, 0);
3444 note = find_reg_note (insn, REG_RETVAL, 0);
3445 if (note)
3446 XEXP (note, 0) = first;
3447 break;
3448
3449 case REG_RETVAL:
3450 /* Move a REG_RETVAL note to the last insn created, and update
3451 the corresponding REG_LIBCALL note. */
3452 XEXP (note, 1) = REG_NOTES (last);
3453 REG_NOTES (last) = note;
3454
3455 insn = XEXP (note, 0);
3456 note = find_reg_note (insn, REG_LIBCALL, 0);
3457 if (note)
3458 XEXP (note, 0) = last;
3459 break;
3460
3461 case REG_NONNEG:
3462 /* This should be moved to whichever instruction is a JUMP_INSN. */
3463
3464 for (insn = last; ; insn = PREV_INSN (insn))
3465 {
3466 if (GET_CODE (insn) == JUMP_INSN)
3467 {
3468 XEXP (note, 1) = REG_NOTES (insn);
3469 REG_NOTES (insn) = note;
3470 /* Only put this note on one of the new insns. */
3471 break;
3472 }
3473 /* Fail if we couldn't find a JUMP_INSN. */
3474 if (insn == first)
3475 abort ();
3476 }
3477 break;
3478
3479 case REG_INC:
3480 /* This should be moved to whichever instruction now has the
3481 increment operation. */
3482 abort ();
3483
3484 case REG_LABEL:
3485 /* Should be moved to the new insn(s) which use the label. */
3486 abort ();
3487
3488 case REG_CC_SETTER:
3489 case REG_CC_USER:
3490 /* These two notes will never appear until after reorg, so we don't
3491 have to handle them here. */
3492 default:
3493 abort ();
3494 }
3495 }
3496
3497 /* Each new insn created, except the last, has a new set. If the destination
3498 is a register, then this reg is now live across several insns, whereas
3499 previously the dest reg was born and died within the same insn. To
3500 reflect this, we now need a REG_DEAD note on the insn where this
3501 dest reg dies.
3502
3503 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
3504
3505 for (insn = first; insn != last; insn = NEXT_INSN (insn))
3506 {
3507 rtx pat;
3508 int i;
3509
3510 pat = PATTERN (insn);
3511 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
3512 new_insn_dead_notes (pat, insn, last, orig_insn);
3513 else if (GET_CODE (pat) == PARALLEL)
3514 {
3515 for (i = 0; i < XVECLEN (pat, 0); i++)
3516 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3517 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
3518 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
3519 }
3520 }
3521
3522 /* If any insn, except the last, uses the register set by the last insn,
3523 then we need a new REG_DEAD note on that insn. In this case, there
3524 would not have been a REG_DEAD note for this register in the original
3525 insn because it was used and set within one insn.
3526
3527 There is no new REG_DEAD note needed if the last insn uses the register
3528 that it is setting. */
3529
3530 set = single_set (last);
3531 if (set)
3532 {
3533 rtx dest = SET_DEST (set);
3534
3535 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3536 || GET_CODE (dest) == STRICT_LOW_PART
3537 || GET_CODE (dest) == SIGN_EXTRACT)
3538 dest = XEXP (dest, 0);
3539
3540 if (GET_CODE (dest) == REG
3541 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
3542 {
3543 for (insn = PREV_INSN (last); ; insn = PREV_INSN (insn))
3544 {
3545 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3546 && reg_mentioned_p (dest, PATTERN (insn))
3547 && (set = single_set (insn)))
3548 {
3549 rtx insn_dest = SET_DEST (set);
3550
3551 while (GET_CODE (insn_dest) == ZERO_EXTRACT
3552 || GET_CODE (insn_dest) == SUBREG
3553 || GET_CODE (insn_dest) == STRICT_LOW_PART
3554 || GET_CODE (insn_dest) == SIGN_EXTRACT)
3555 insn_dest = XEXP (insn_dest, 0);
3556
3557 if (insn_dest != dest)
3558 {
3559 note = rtx_alloc (EXPR_LIST);
3560 PUT_REG_NOTE_KIND (note, REG_DEAD);
3561 XEXP (note, 0) = dest;
3562 XEXP (note, 1) = REG_NOTES (insn);
3563 REG_NOTES (insn) = note;
3564 /* The reg only dies in one insn, the last one
3565 that uses it. */
3566 break;
3567 }
3568 }
3569 if (insn == first)
3570 break;
3571 }
3572 }
3573 }
3574
3575 /* If the original dest is modifying a multiple register target, and the
3576 original instruction was split such that the original dest is now set
3577 by two or more SUBREG sets, then the split insns no longer kill the
3578 destination of the original insn.
3579
3580 In this case, if there exists an instruction in the same basic block,
3581 before the split insn, which uses the original dest, and this use is
3582 killed by the original insn, then we must remove the REG_DEAD note on
3583 this insn, because it is now superfluous.
3584
3585 This does not apply when a hard register gets split, because the code
3586 knows how to handle overlapping hard registers properly. */
3587 if (orig_dest && GET_CODE (orig_dest) == REG)
3588 {
3589 int found_orig_dest = 0;
3590 int found_split_dest = 0;
3591
3592 for (insn = first; ; insn = NEXT_INSN (insn))
3593 {
3594 set = single_set (insn);
3595 if (set)
3596 {
3597 if (GET_CODE (SET_DEST (set)) == REG
3598 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
3599 {
3600 found_orig_dest = 1;
3601 break;
3602 }
3603 else if (GET_CODE (SET_DEST (set)) == SUBREG
3604 && SUBREG_REG (SET_DEST (set)) == orig_dest)
3605 {
3606 found_split_dest = 1;
3607 break;
3608 }
3609 }
3610
3611 if (insn == last)
3612 break;
3613 }
3614
3615 if (found_split_dest)
3616 {
3617 /* Search backwards from FIRST, looking for the first insn that uses
3618 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
3619 If we find an insn, and it has a REG_DEAD note, then delete the
3620 note. */
3621
3622 for (insn = first; insn; insn = PREV_INSN (insn))
3623 {
3624 if (GET_CODE (insn) == CODE_LABEL
3625 || GET_CODE (insn) == JUMP_INSN)
3626 break;
3627 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3628 && reg_mentioned_p (orig_dest, insn))
3629 {
3630 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
3631 if (note)
3632 remove_note (insn, note);
3633 }
3634 }
3635 }
3636 else if (! found_orig_dest)
3637 {
3638 /* This should never happen. */
3639 abort ();
3640 }
3641 }
3642
3643 /* Update reg_n_sets. This is necessary to prevent local alloc from
3644 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
3645 a reg from set once to set multiple times. */
3646
3647 {
3648 rtx x = PATTERN (orig_insn);
3649 RTX_CODE code = GET_CODE (x);
3650
3651 if (code == SET || code == CLOBBER)
3652 update_n_sets (x, -1);
3653 else if (code == PARALLEL)
3654 {
3655 int i;
3656 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3657 {
3658 code = GET_CODE (XVECEXP (x, 0, i));
3659 if (code == SET || code == CLOBBER)
3660 update_n_sets (XVECEXP (x, 0, i), -1);
3661 }
3662 }
3663
3664 for (insn = first; ; insn = NEXT_INSN (insn))
3665 {
3666 x = PATTERN (insn);
3667 code = GET_CODE (x);
3668
3669 if (code == SET || code == CLOBBER)
3670 update_n_sets (x, 1);
3671 else if (code == PARALLEL)
3672 {
3673 int i;
3674 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3675 {
3676 code = GET_CODE (XVECEXP (x, 0, i));
3677 if (code == SET || code == CLOBBER)
3678 update_n_sets (XVECEXP (x, 0, i), 1);
3679 }
3680 }
3681
3682 if (insn == last)
3683 break;
3684 }
3685 }
3686 }
3687
3688 /* The one entry point in this file. DUMP_FILE is the dump file for
3689 this pass. */
3690
3691 void
3692 schedule_insns (dump_file)
3693 FILE *dump_file;
3694 {
3695 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
3696 int i, b;
3697 rtx insn;
3698
3699 /* Taking care of this degenerate case makes the rest of
3700 this code simpler. */
3701 if (n_basic_blocks == 0)
3702 return;
3703
3704 /* Create an insn here so that we can hang dependencies off of it later. */
3705 sched_before_next_call = gen_rtx (INSN, VOIDmode, 0, 0, 0, 0, 0, 0, 0);
3706
3707 /* Initialize the unused_*_lists. We can't use the ones left over from
3708 the previous function, because gcc has freed that memory. We can use
3709 the ones left over from the first sched pass in the second pass however,
3710 so only clear them on the first sched pass. The first pass is before
3711 reload if flag_schedule_insns is set, otherwise it is afterwards. */
3712
3713 if (reload_completed == 0 || ! flag_schedule_insns)
3714 {
3715 unused_insn_list = 0;
3716 unused_expr_list = 0;
3717 }
3718
3719 /* We create no insns here, only reorder them, so we
3720 remember how far we can cut back the stack on exit. */
3721
3722 /* Allocate data for this pass. See comments, above,
3723 for what these vectors do. */
3724 /* ??? Instruction splitting below may create new instructions, so these
3725 arrays must be bigger than just max_uid. */
3726 insn_luid = (int *) alloca (max_uid * sizeof (int));
3727 insn_priority = (int *) alloca (max_uid * sizeof (int));
3728 insn_ref_count = (int *) alloca (max_uid * sizeof (int));
3729
3730 if (reload_completed == 0)
3731 {
3732 sched_reg_n_deaths = (short *) alloca (max_regno * sizeof (short));
3733 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
3734 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
3735 bb_dead_regs = (regset) alloca (regset_bytes);
3736 bb_live_regs = (regset) alloca (regset_bytes);
3737 bzero (sched_reg_n_calls_crossed, max_regno * sizeof (int));
3738 bzero (sched_reg_live_length, max_regno * sizeof (int));
3739 bcopy (reg_n_deaths, sched_reg_n_deaths, max_regno * sizeof (short));
3740 init_alias_analysis ();
3741 }
3742 else
3743 {
3744 sched_reg_n_deaths = 0;
3745 sched_reg_n_calls_crossed = 0;
3746 sched_reg_live_length = 0;
3747 bb_dead_regs = 0;
3748 bb_live_regs = 0;
3749 if (! flag_schedule_insns)
3750 init_alias_analysis ();
3751 }
3752
3753 if (write_symbols != NO_DEBUG)
3754 {
3755 rtx line;
3756
3757 line_note = (rtx *) alloca (max_uid * sizeof (rtx));
3758 bzero (line_note, max_uid * sizeof (rtx));
3759 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
3760 bzero (line_note_head, n_basic_blocks * sizeof (rtx));
3761
3762 /* Determine the line-number at the start of each basic block.
3763 This must be computed and saved now, because after a basic block's
3764 predecessor has been scheduled, it is impossible to accurately
3765 determine the correct line number for the first insn of the block. */
3766
3767 for (b = 0; b < n_basic_blocks; b++)
3768 for (line = basic_block_head[b]; line; line = PREV_INSN (line))
3769 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3770 {
3771 line_note_head[b] = line;
3772 break;
3773 }
3774 }
3775
3776 bzero (insn_luid, max_uid * sizeof (int));
3777 bzero (insn_priority, max_uid * sizeof (int));
3778 bzero (insn_ref_count, max_uid * sizeof (int));
3779
3780 /* Schedule each basic block, block by block. */
3781
3782 if (NEXT_INSN (basic_block_end[n_basic_blocks-1]) == 0
3783 || (GET_CODE (basic_block_end[n_basic_blocks-1]) != NOTE
3784 && GET_CODE (basic_block_end[n_basic_blocks-1]) != CODE_LABEL))
3785 emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
3786
3787 for (b = 0; b < n_basic_blocks; b++)
3788 {
3789 rtx insn, next;
3790 rtx insns;
3791
3792 note_list = 0;
3793
3794 for (insn = basic_block_head[b]; ; insn = next)
3795 {
3796 rtx prev;
3797 rtx set;
3798
3799 /* Can't use `next_real_insn' because that
3800 might go across CODE_LABELS and short-out basic blocks. */
3801 next = NEXT_INSN (insn);
3802 if (GET_CODE (insn) != INSN)
3803 {
3804 if (insn == basic_block_end[b])
3805 break;
3806
3807 continue;
3808 }
3809
3810 /* Don't split no-op move insns. These should silently disappear
3811 later in final. Splitting such insns would break the code
3812 that handles REG_NO_CONFLICT blocks. */
3813 set = single_set (insn);
3814 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
3815 {
3816 if (insn == basic_block_end[b])
3817 break;
3818
3819 /* Nops get in the way while scheduling, so delete them now if
3820 register allocation has already been done. It is too risky
3821 to try to do this before register allocation, and there are
3822 unlikely to be very many nops then anyways. */
3823 if (reload_completed)
3824 {
3825 PUT_CODE (insn, NOTE);
3826 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3827 NOTE_SOURCE_FILE (insn) = 0;
3828 }
3829
3830 continue;
3831 }
3832
3833 /* Split insns here to get max fine-grain parallelism. */
3834 prev = PREV_INSN (insn);
3835 if (reload_completed == 0)
3836 {
3837 rtx last, first = PREV_INSN (insn);
3838 rtx notes = REG_NOTES (insn);
3839
3840 last = try_split (PATTERN (insn), insn, 1);
3841 if (last != insn)
3842 {
3843 /* try_split returns the NOTE that INSN became. */
3844 first = NEXT_INSN (first);
3845 update_flow_info (notes, first, last, insn);
3846
3847 PUT_CODE (insn, NOTE);
3848 NOTE_SOURCE_FILE (insn) = 0;
3849 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3850 if (insn == basic_block_head[b])
3851 basic_block_head[b] = first;
3852 if (insn == basic_block_end[b])
3853 {
3854 basic_block_end[b] = last;
3855 break;
3856 }
3857 }
3858 }
3859
3860 if (insn == basic_block_end[b])
3861 break;
3862 }
3863
3864 schedule_block (b, dump_file);
3865
3866 #ifdef USE_C_ALLOCA
3867 alloca (0);
3868 #endif
3869 }
3870
3871 if (write_symbols != NO_DEBUG)
3872 {
3873 rtx line = 0;
3874 rtx insn = get_insns ();
3875 int active_insn = 0;
3876 int notes = 0;
3877
3878 /* Walk the insns deleting redundant line-number notes. Many of these
3879 are already present. The remainder tend to occur at basic
3880 block boundaries. */
3881 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
3882 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3883 {
3884 /* If there are no active insns following, INSN is redundant. */
3885 if (active_insn == 0)
3886 {
3887 notes++;
3888 NOTE_SOURCE_FILE (insn) = 0;
3889 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3890 }
3891 /* If the line number is unchanged, LINE is redundant. */
3892 else if (line
3893 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
3894 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
3895 {
3896 notes++;
3897 NOTE_SOURCE_FILE (line) = 0;
3898 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
3899 line = insn;
3900 }
3901 else
3902 line = insn;
3903 active_insn = 0;
3904 }
3905 else if (! ((GET_CODE (insn) == NOTE
3906 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3907 || (GET_CODE (insn) == INSN
3908 && (GET_CODE (PATTERN (insn)) == USE
3909 || GET_CODE (PATTERN (insn)) == CLOBBER))))
3910 active_insn++;
3911
3912 if (dump_file && notes)
3913 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
3914 }
3915
3916 if (reload_completed == 0)
3917 {
3918 int regno;
3919 for (regno = 0; regno < max_regno; regno++)
3920 if (sched_reg_live_length[regno])
3921 {
3922 if (dump_file)
3923 {
3924 if (reg_live_length[regno] > sched_reg_live_length[regno])
3925 fprintf (dump_file,
3926 ";; register %d life shortened from %d to %d\n",
3927 regno, reg_live_length[regno],
3928 sched_reg_live_length[regno]);
3929 /* Negative values are special; don't overwrite the current
3930 reg_live_length value if it is negative. */
3931 else if (reg_live_length[regno] < sched_reg_live_length[regno]
3932 && reg_live_length[regno] >= 0)
3933 fprintf (dump_file,
3934 ";; register %d life extended from %d to %d\n",
3935 regno, reg_live_length[regno],
3936 sched_reg_live_length[regno]);
3937
3938 if (reg_n_calls_crossed[regno]
3939 && ! sched_reg_n_calls_crossed[regno])
3940 fprintf (dump_file,
3941 ";; register %d no longer crosses calls\n", regno);
3942 else if (! reg_n_calls_crossed[regno]
3943 && sched_reg_n_calls_crossed[regno])
3944 fprintf (dump_file,
3945 ";; register %d now crosses calls\n", regno);
3946 }
3947 reg_live_length[regno] = sched_reg_live_length[regno];
3948 reg_n_calls_crossed[regno] = sched_reg_n_calls_crossed[regno];
3949 }
3950 }
3951 }
3952 #endif /* INSN_SCHEDULING */
This page took 0.213597 seconds and 5 git commands to generate.