]> gcc.gnu.org Git - gcc.git/blame - gcc/sched.c
*** empty log message ***
[gcc.git] / gcc / sched.c
CommitLineData
bd58420c
RK
1/* Instruction scheduling pass.
2 Copyright (C) 1992 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6This file is part of GNU CC.
7
8GNU CC is free software; you can redistribute it and/or modify
9it under the terms of the GNU General Public License as published by
10the Free Software Foundation; either version 2, or (at your option)
11any later version.
12
13GNU CC is distributed in the hope that it will be useful,
14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
19along with GNU CC; see the file COPYING. If not, write to
20the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
21
22/* Instruction scheduling pass.
23
24 This pass implements list scheduling within basic blocks. It is
25 run after flow analysis, but before register allocation. The
26 scheduler works as follows:
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning
39 values to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we also add it to the ready list. When all insns down
50 to the lowest priority have been scheduled, the critical path of the
51 basic block has been made as short as possible. The remaining insns
52 are then scheduled in remaining slots.
53
54 The following list shows the order in which we want to break ties:
55
56 1. choose insn with lowest conflict cost, ties broken by
57 2. choose insn with the longest path to end of bb, ties broken by
58 3. choose insn that kills the most registers, ties broken by
59 4. choose insn that conflicts with the most ready insns, or finally
60 5. choose insn with lowest UID.
61
62 Memory references complicate matters. Only if we can be certain
63 that memory references are not part of the data dependency graph
64 (via true, anti, or output dependence), can we move operations past
65 memory references. To first approximation, reads can be done
66 independently, while writes introduce dependencies. Better
67 approximations will yield fewer dependencies.
68
69 Dependencies set up by memory references are treated in exactly the
70 same way as other dependencies, by using LOG_LINKS.
71
72 Having optimized the critical path, we may have also unduly
73 extended the lifetimes of some registers. If an operation requires
74 that constants be loaded into registers, it is certainly desirable
75 to load those constants as early as necessary, but no earlier.
76 I.e., it will not do to load up a bunch of registers at the
77 beginning of a basic block only to use them at the end, if they
78 could be loaded later, since this may result in excessive register
79 utilization.
80
81 Note that since branches are never in basic blocks, but only end
82 basic blocks, this pass will not do any branch scheduling. But
83 that is ok, since we can use GNU's delayed branch scheduling
84 pass to take care of this case.
85
86 Also note that no further optimizations based on algebraic identities
87 are performed, so this pass would be a good one to perform instruction
88 splitting, such as breaking up a multiply instruction into shifts
89 and adds where that is profitable.
90
91 Given the memory aliasing analysis that this pass should perform,
92 it should be possible to remove redundant stores to memory, and to
93 load values from registers instead of hitting memory.
94
95 This pass must update information that subsequent passes expect to be
96 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
97 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
98 basic_block_end.
99
100 The information in the line number notes is carefully retained by this
101 pass. All other NOTE insns are grouped in their same relative order at
102 the beginning of basic blocks that have been scheduled. */
103\f
104#include <stdio.h>
105#include "config.h"
106#include "rtl.h"
107#include "basic-block.h"
108#include "regs.h"
109#include "hard-reg-set.h"
110#include "flags.h"
111#include "insn-config.h"
112#include "insn-attr.h"
113
114/* Arrays set up by scheduling for the same respective purposes as
115 similar-named arrays set up by flow analysis. We work with these
116 arrays during the scheduling pass so we can compare values against
117 unscheduled code.
118
119 Values of these arrays are copied at the end of this pass into the
120 arrays set up by flow analysis. */
121static short *sched_reg_n_deaths;
122static int *sched_reg_n_calls_crossed;
123static int *sched_reg_live_length;
124
125/* Element N is the next insn that sets (hard or pseudo) register
126 N within the current basic block; or zero, if there is no
127 such insn. Needed for new registers which may be introduced
128 by splitting insns. */
129static rtx *reg_last_uses;
130static rtx *reg_last_sets;
131
132/* Vector indexed by INSN_UID giving the original ordering of the insns. */
133static int *insn_luid;
134#define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
135
136/* Vector indexed by INSN_UID giving each instruction a priority. */
137static int *insn_priority;
138#define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
139
140#define DONE_PRIORITY -1
141#define MAX_PRIORITY 0x7fffffff
142#define TAIL_PRIORITY 0x7ffffffe
143#define LAUNCH_PRIORITY 0x7f000001
144#define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
145#define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
146
b4ac57ab 147/* Vector indexed by INSN_UID giving number of insns referring to this insn. */
bd58420c
RK
148static int *insn_ref_count;
149#define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
150
151/* Vector indexed by INSN_UID giving line-number note in effect for each
152 insn. For line-number notes, this indicates whether the note may be
153 reused. */
154static rtx *line_note;
155#define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
156
157/* Vector indexed by basic block number giving the starting line-number
158 for each basic block. */
159static rtx *line_note_head;
160
161/* List of important notes we must keep around. This is a pointer to the
162 last element in the list. */
163static rtx note_list;
164
165/* Regsets telling whether a given register is live or dead before the last
166 scheduled insn. Must scan the instructions once before scheduling to
167 determine what registers are live or dead at the end of the block. */
168static regset bb_dead_regs;
169static regset bb_live_regs;
170
171/* Regset telling whether a given register is live after the insn currently
172 being scheduled. Before processing an insn, this is equal to bb_live_regs
173 above. This is used so that we can find regsiters that are newly born/dead
174 after processing an insn. */
175static regset old_live_regs;
176
177/* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
178 during the initial scan and reused later. If there are not exactly as
179 many REG_DEAD notes in the post scheduled code as there were in the
180 prescheduled code then we trigger an abort because this indicates a bug. */
181static rtx dead_notes;
182
183/* Queues, etc. */
184
185/* An instruction is ready to be scheduled when all insns following it
186 have already been scheduled. It is important to ensure that all
187 insns which use its result will not be executed until its result
188 has been computed. We maintain three lists (conceptually):
189
190 (1) a "Ready" list of unscheduled, uncommitted insns
191 (2) a "Scheduled" list of scheduled insns
192 (3) a "Pending" list of insns which can be scheduled, but
193 for stalls.
194
195 Insns move from the "Ready" list to the "Pending" list when
196 all insns following them have been scheduled.
197
198 Insns move from the "Pending" list to the "Scheduled" list
199 when there is sufficient space in the pipeline to prevent
200 stalls between the insn and scheduled insns which use it.
201
202 The "Pending" list acts as a buffer to prevent insns
203 from avalanching.
204
205 The "Ready" list is implemented by the variable `ready'.
206 The "Pending" list are the insns in the LOG_LINKS of ready insns.
207 The "Scheduled" list is the new insn chain built by this pass. */
208
209/* Implement a circular buffer from which instructions are issued. */
210#define Q_SIZE 128
211static rtx insn_queue[Q_SIZE];
212static int q_ptr = 0;
213static int q_size = 0;
214#define NEXT_Q(X) (((X)+1) & (Q_SIZE-1))
215#define NEXT_Q_AFTER(X,C) (((X)+C) & (Q_SIZE-1))
216\f
217/* Forward declarations. */
218static void sched_analyze_2 ();
219static void schedule_block ();
220
221/* Main entry point of this file. */
222void schedule_insns ();
223\f
224#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
225
226/* Vector indexed by N giving the initial (unchanging) value known
227 for pseudo-register N. */
228static rtx *reg_known_value;
229
230/* Indicates number of valid entries in reg_known_value. */
231static int reg_known_value_size;
232
233static rtx
234canon_rtx (x)
235 rtx x;
236{
237 if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
238 && REGNO (x) <= reg_known_value_size)
239 return reg_known_value[REGNO (x)];
240 else if (GET_CODE (x) == PLUS)
241 {
242 rtx x0 = canon_rtx (XEXP (x, 0));
243 rtx x1 = canon_rtx (XEXP (x, 1));
244
245 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
246 {
247 /* We can tolerate LO_SUMs being offset here; these
248 rtl are used for nothing other than comparisons. */
249 if (GET_CODE (x0) == CONST_INT)
250 return plus_constant_for_output (x1, INTVAL (x0));
251 else if (GET_CODE (x1) == CONST_INT)
252 return plus_constant_for_output (x0, INTVAL (x1));
253 return gen_rtx (PLUS, GET_MODE (x), x0, x1);
254 }
255 }
256 return x;
257}
258
259/* Set up all info needed to perform alias analysis on memory references. */
260
261void
262init_alias_analysis ()
263{
264 int maxreg = max_reg_num ();
265 rtx insn;
266 rtx note;
267 rtx set;
268
269 reg_known_value_size = maxreg;
270
271 reg_known_value
272 = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx))
273 - FIRST_PSEUDO_REGISTER;
274 bzero (reg_known_value+FIRST_PSEUDO_REGISTER,
275 (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
276
277 /* Fill in the entries with known constant values. */
278 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
279 if ((set = single_set (insn)) != 0
280 && GET_CODE (SET_DEST (set)) == REG
281 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
282 && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
283 && reg_n_sets[REGNO (SET_DEST (set))] == 1)
284 || (note = find_reg_note (insn, REG_EQUIV, 0)) != 0)
285 && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
286 reg_known_value[REGNO (SET_DEST (set))] = XEXP (note, 0);
287
288 /* Fill in the remaining entries. */
289 while (--maxreg >= FIRST_PSEUDO_REGISTER)
290 if (reg_known_value[maxreg] == 0)
291 reg_known_value[maxreg] = regno_reg_rtx[maxreg];
292}
293
294/* Return 1 if X and Y are identical-looking rtx's.
295
296 We use the data in reg_known_value above to see if two registers with
297 different numbers are, in fact, equivalent. */
298
299static int
300rtx_equal_for_memref_p (x, y)
301 rtx x, y;
302{
303 register int i;
304 register int j;
305 register enum rtx_code code;
306 register char *fmt;
307
308 if (x == 0 && y == 0)
309 return 1;
310 if (x == 0 || y == 0)
311 return 0;
312 x = canon_rtx (x);
313 y = canon_rtx (y);
314
315 if (x == y)
316 return 1;
317
318 code = GET_CODE (x);
319 /* Rtx's of different codes cannot be equal. */
320 if (code != GET_CODE (y))
321 return 0;
322
323 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
324 (REG:SI x) and (REG:HI x) are NOT equivalent. */
325
326 if (GET_MODE (x) != GET_MODE (y))
327 return 0;
328
329 /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
330
331 if (code == REG)
332 return REGNO (x) == REGNO (y);
333 if (code == LABEL_REF)
334 return XEXP (x, 0) == XEXP (y, 0);
335 if (code == SYMBOL_REF)
336 return XSTR (x, 0) == XSTR (y, 0);
337
338 /* Compare the elements. If any pair of corresponding elements
339 fail to match, return 0 for the whole things. */
340
341 fmt = GET_RTX_FORMAT (code);
342 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
343 {
344 switch (fmt[i])
345 {
346 case 'n':
347 case 'i':
348 if (XINT (x, i) != XINT (y, i))
349 return 0;
350 break;
351
352 case 'V':
353 case 'E':
354 /* Two vectors must have the same length. */
355 if (XVECLEN (x, i) != XVECLEN (y, i))
356 return 0;
357
358 /* And the corresponding elements must match. */
359 for (j = 0; j < XVECLEN (x, i); j++)
360 if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
361 return 0;
362 break;
363
364 case 'e':
365 if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
366 return 0;
367 break;
368
369 case 'S':
370 case 's':
371 if (strcmp (XSTR (x, i), XSTR (y, i)))
372 return 0;
373 break;
374
375 case 'u':
376 /* These are just backpointers, so they don't matter. */
377 break;
378
379 case '0':
380 break;
381
382 /* It is believed that rtx's at this level will never
383 contain anything but integers and other rtx's,
384 except for within LABEL_REFs and SYMBOL_REFs. */
385 default:
386 abort ();
387 }
388 }
389 return 1;
390}
391
392/* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
393 X and return it, or return 0 if none found. */
394
395static rtx
396find_symbolic_term (x)
397 rtx x;
398{
399 register int i;
400 register enum rtx_code code;
401 register char *fmt;
402
403 code = GET_CODE (x);
404 if (code == SYMBOL_REF || code == LABEL_REF)
405 return x;
406 if (GET_RTX_CLASS (code) == 'o')
407 return 0;
408
409 fmt = GET_RTX_FORMAT (code);
410 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
411 {
412 rtx t;
413
414 if (fmt[i] == 'e')
415 {
416 t = find_symbolic_term (XEXP (x, i));
417 if (t != 0)
418 return t;
419 }
420 else if (fmt[i] == 'E')
421 break;
422 }
423 return 0;
424}
425
426/* Return nonzero if X and Y (memory addresses) could reference the
427 same location in memory. C is an offset accumulator. When
428 C is nonzero, we are testing aliases between X and Y + C.
429 XSIZE is the size in bytes of the X reference,
430 similarly YSIZE is the size in bytes for Y.
431
432 If XSIZE or YSIZE is zero, we do not know the amount of memory being
433 referenced (the reference was BLKmode), so make the most pessimistic
434 assumptions.
435
436 We recognize the following cases of non-conflicting memory:
437
438 (1) addresses involving the frame pointer cannot conflict
439 with addresses involving static variables.
440 (2) static variables with different addresses cannot conflict.
441
442 Nice to notice that varying addresses cannot confict with fp if no
443 local variables had their addresses taken, but that's too hard now. */
444
445static int
446memrefs_conflict_p (xsize, x, ysize, y, c)
447 rtx x, y;
448 int xsize, ysize;
449 int c;
450{
451 if (GET_CODE (x) == HIGH)
452 x = XEXP (x, 0);
453 else if (GET_CODE (x) == LO_SUM)
454 x = XEXP (x, 1);
455 else
456 x = canon_rtx (x);
457 if (GET_CODE (y) == HIGH)
458 y = XEXP (y, 0);
459 else if (GET_CODE (y) == LO_SUM)
460 y = XEXP (y, 1);
461 else
462 y = canon_rtx (y);
463
464 if (rtx_equal_for_memref_p (x, y))
465 return (xsize == 0 || ysize == 0 ||
466 (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
467
468 if (y == frame_pointer_rtx || y == stack_pointer_rtx)
469 {
470 rtx t = y;
471 int tsize = ysize;
472 y = x; ysize = xsize;
473 x = t; xsize = tsize;
474 }
475
476 if (x == frame_pointer_rtx || x == stack_pointer_rtx)
477 {
478 rtx y1;
479
480 if (CONSTANT_P (y))
481 return 0;
482
483 if (GET_CODE (y) == PLUS
484 && canon_rtx (XEXP (y, 0)) == x
485 && (y1 = canon_rtx (XEXP (y, 1)))
486 && GET_CODE (y1) == CONST_INT)
487 {
488 c += INTVAL (y1);
489 return (xsize == 0 || ysize == 0
490 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
491 }
492
493 if (GET_CODE (y) == PLUS
494 && (y1 = canon_rtx (XEXP (y, 0)))
495 && CONSTANT_P (y1))
496 return 0;
497
498 return 1;
499 }
500
501 if (GET_CODE (x) == PLUS)
502 {
503 /* The fact that X is canonnicallized means that this
504 PLUS rtx is canonnicallized. */
505 rtx x0 = XEXP (x, 0);
506 rtx x1 = XEXP (x, 1);
507
508 if (GET_CODE (y) == PLUS)
509 {
510 /* The fact that Y is canonnicallized means that this
511 PLUS rtx is canonnicallized. */
512 rtx y0 = XEXP (y, 0);
513 rtx y1 = XEXP (y, 1);
514
515 if (rtx_equal_for_memref_p (x1, y1))
516 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
517 if (rtx_equal_for_memref_p (x0, y0))
518 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
519 if (GET_CODE (x1) == CONST_INT)
520 if (GET_CODE (y1) == CONST_INT)
521 return memrefs_conflict_p (xsize, x0, ysize, y0,
522 c - INTVAL (x1) + INTVAL (y1));
523 else
524 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
525 else if (GET_CODE (y1) == CONST_INT)
526 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
527
528 /* Handle case where we cannot understand iteration operators,
529 but we notice that the base addresses are distinct objects. */
530 x = find_symbolic_term (x);
531 if (x == 0)
532 return 1;
533 y = find_symbolic_term (y);
534 if (y == 0)
535 return 1;
536 return rtx_equal_for_memref_p (x, y);
537 }
538 else if (GET_CODE (x1) == CONST_INT)
539 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
540 }
541 else if (GET_CODE (y) == PLUS)
542 {
543 /* The fact that Y is canonnicallized means that this
544 PLUS rtx is canonnicallized. */
545 rtx y0 = XEXP (y, 0);
546 rtx y1 = XEXP (y, 1);
547
548 if (GET_CODE (y1) == CONST_INT)
549 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
550 else
551 return 1;
552 }
553
554 if (GET_CODE (x) == GET_CODE (y))
555 switch (GET_CODE (x))
556 {
557 case MULT:
558 {
559 /* Handle cases where we expect the second operands to be the
560 same, and check only whether the first operand would conflict
561 or not. */
562 rtx x0, y0;
563 rtx x1 = canon_rtx (XEXP (x, 1));
564 rtx y1 = canon_rtx (XEXP (y, 1));
565 if (! rtx_equal_for_memref_p (x1, y1))
566 return 1;
567 x0 = canon_rtx (XEXP (x, 0));
568 y0 = canon_rtx (XEXP (y, 0));
569 if (rtx_equal_for_memref_p (x0, y0))
570 return (xsize == 0 || ysize == 0
571 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
572
573 /* Can't properly adjust our sizes. */
574 if (GET_CODE (x1) != CONST_INT)
575 return 1;
576 xsize /= INTVAL (x1);
577 ysize /= INTVAL (x1);
578 c /= INTVAL (x1);
579 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
580 }
581 }
582
583 if (CONSTANT_P (x))
584 {
585 if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
586 {
587 c += (INTVAL (y) - INTVAL (x));
588 return (xsize == 0 || ysize == 0
589 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
590 }
591
592 if (GET_CODE (x) == CONST)
593 {
594 if (GET_CODE (y) == CONST)
595 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
596 ysize, canon_rtx (XEXP (y, 0)), c);
597 else
598 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
599 ysize, y, c);
600 }
601 if (GET_CODE (y) == CONST)
602 return memrefs_conflict_p (xsize, x, ysize,
603 canon_rtx (XEXP (y, 0)), c);
604
605 if (CONSTANT_P (y))
606 return (rtx_equal_for_memref_p (x, y)
607 && (xsize == 0 || ysize == 0
608 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
609
610 return 1;
611 }
612 return 1;
613}
614
615/* Functions to compute memory dependencies.
616
617 Since we process the insns in execution order, we can build tables
618 to keep track of what registers are fixed (and not aliased), what registers
619 are varying in known ways, and what registers are varying in unknown
620 ways.
621
622 If both memory references are volatile, then there must always be a
623 dependence between the two references, since their order can not be
624 changed. A volatile and non-volatile reference can be interchanged
625 though.
626
627 A MEM_IN_STRUCT reference at a varying address can never conflict with a
628 non-MEM_IN_STRUCT reference at a fixed address. */
629
630/* Read dependence: X is read after read in MEM takes place. There can
631 only be a dependence here if both reads are volatile. */
632
633int
634read_dependence (mem, x)
635 rtx mem;
636 rtx x;
637{
638 return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
639}
640
641/* True dependence: X is read after store in MEM takes place. */
642
643int
644true_dependence (mem, x)
645 rtx mem;
646 rtx x;
647{
648 if (RTX_UNCHANGING_P (x))
649 return 0;
650
651 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
652 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
653 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
654 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
655 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
656 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
657 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
658}
659
660/* Anti dependence: X is written after read in MEM takes place. */
661
662int
663anti_dependence (mem, x)
664 rtx mem;
665 rtx x;
666{
667 if (RTX_UNCHANGING_P (mem))
668 return 0;
669
670 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
671 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
672 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
673 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
674 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
675 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
676 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
677}
678
679/* Output dependence: X is written after store in MEM takes place. */
680
681int
682output_dependence (mem, x)
683 rtx mem;
684 rtx x;
685{
686 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
687 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
688 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
689 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
690 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
691 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
692 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
693}
694\f
695#ifndef INSN_SCHEDULING
696void schedule_insns () {}
697#else
698#ifndef __GNUC__
699#define __inline
700#endif
701
702/* Computation of memory dependencies. */
703
704/* The *_insns and *_mems are paired lists. Each pending memory operation
705 will have a pointer to the MEM rtx on one list and a pointer to the
706 containing insn on the other list in the same place in the list. */
707
708/* We can't use add_dependence like the old code did, because a single insn
709 may have multiple memory accesses, and hence needs to be on the list
710 once for each memory access. Add_dependence won't let you add an insn
711 to a list more than once. */
712
713/* An INSN_LIST containing all insns with pending read operations. */
714static rtx pending_read_insns;
715
716/* An EXPR_LIST containing all MEM rtx's which are pending reads. */
717static rtx pending_read_mems;
718
719/* An INSN_LIST containing all insns with pending write operations. */
720static rtx pending_write_insns;
721
722/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
723static rtx pending_write_mems;
724
725/* Indicates the combined length of the two pending lists. We must prevent
726 these lists from ever growing too large since the number of dependencies
727 produced is at least O(N*N), and execution time is at least O(4*N*N), as
728 a function of the length of these pending lists. */
729
730static int pending_lists_length;
731
732/* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
733
734static rtx unused_insn_list;
735
736/* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
737
738static rtx unused_expr_list;
739
740/* The last insn upon which all memory references must depend.
741 This is an insn which flushed the pending lists, creating a dependency
742 between it and all previously pending memory references. This creates
743 a barrier (or a checkpoint) which no memory reference is allowed to cross.
744
745 This includes all non constant CALL_INSNs. When we do interprocedural
746 alias analysis, this restriction can be relaxed.
747 This may also be an INSN that writes memory if the pending lists grow
748 too large. */
749
750static rtx last_pending_memory_flush;
751
752/* The last function call we have seen. All hard regs, and, of course,
753 the last function call, must depend on this. */
754
755static rtx last_function_call;
756
757/* The LOG_LINKS field of this is a list of insns which use a pseudo register
758 that does not already cross a call. We create dependencies between each
759 of those insn and the next call insn, to ensure that they won't cross a call
760 after scheduling is done. */
761
762static rtx sched_before_next_call;
763
764/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
765 so that insns independent of the last scheduled insn will be preferred
766 over dependent instructions. */
767
768static rtx last_scheduled_insn;
769
770/* Process an insn's memory dependencies. There are four kinds of
771 dependencies:
772
773 (0) read dependence: read follows read
774 (1) true dependence: read follows write
775 (2) anti dependence: write follows read
776 (3) output dependence: write follows write
777
778 We are careful to build only dependencies which actually exist, and
779 use transitivity to avoid building too many links. */
780\f
781/* Return the INSN_LIST containing INSN in LIST, or NULL
782 if LIST does not contain INSN. */
783
784__inline static rtx
785find_insn_list (insn, list)
786 rtx insn;
787 rtx list;
788{
789 while (list)
790 {
791 if (XEXP (list, 0) == insn)
792 return list;
793 list = XEXP (list, 1);
794 }
795 return 0;
796}
797
798/* Compute cost of executing INSN. This is the number of virtual
799 cycles taken between instruction issue and instruction results. */
800
801__inline static int
802insn_cost (insn)
803 rtx insn;
804{
805 register int cost;
806
807 recog_memoized (insn);
808
809 /* A USE insn, or something else we don't need to understand.
810 We can't pass these directly to result_ready_cost because it will trigger
811 a fatal error for unrecognizable insns. */
812 if (INSN_CODE (insn) < 0)
813 return 1;
814 else
815 {
816 cost = result_ready_cost (insn);
817
818 if (cost < 1)
819 cost = 1;
820
821 return cost;
822 }
823}
824
825/* Compute the priority number for INSN. */
826
827static int
828priority (insn)
829 rtx insn;
830{
831 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
832 {
833 int prev_priority;
834 int max_priority;
835 int this_priority = INSN_PRIORITY (insn);
836 rtx prev;
837
838 if (this_priority > 0)
839 return this_priority;
840
841 max_priority = 1;
842
843 /* Nonzero if these insns must be scheduled together. */
844 if (SCHED_GROUP_P (insn))
845 {
846 prev = insn;
847 while (SCHED_GROUP_P (prev))
848 {
849 prev = PREV_INSN (prev);
850 INSN_REF_COUNT (prev) += 1;
851 }
852 }
853
854 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
855 {
856 rtx x = XEXP (prev, 0);
857
858 /* A dependence pointing to a note is always obsolete, because
859 sched_analyze_insn will have created any necessary new dependences
860 which replace it. Notes can be created when instructions are
861 deleted by insn splitting, or by register allocation. */
862 if (GET_CODE (x) == NOTE)
863 {
864 remove_dependence (insn, x);
865 continue;
866 }
867
868 /* This priority calculation was chosen because it results in the
869 least instruction movement, and does not hurt the performance
870 of the resulting code compared to the old algorithm.
871 This makes the sched algorithm more stable, which results
872 in better code, because there is less register pressure,
873 cross jumping is more likely to work, and debugging is easier.
874
875 When all instructions have a latency of 1, there is no need to
876 move any instructions. Subtracting one here ensures that in such
877 cases all instructions will end up with a priority of one, and
878 hence no scheduling will be done.
879
880 The original code did not subtract the one, and added the
881 insn_cost of the current instruction to its priority (e.g.
882 move the insn_cost call down to the end). */
883
884 if (REG_NOTE_KIND (prev) == 0)
885 /* Data dependence. */
886 prev_priority = priority (x) + insn_cost (x) - 1;
887 else
888 /* Anti or output dependence. Don't add the latency of this
889 insn's result, because it isn't being used. */
890 prev_priority = priority (x);
891
892 if (prev_priority > max_priority)
893 max_priority = prev_priority;
894 INSN_REF_COUNT (x) += 1;
895 }
896
897 INSN_PRIORITY (insn) = max_priority;
898 return INSN_PRIORITY (insn);
899 }
900 return 0;
901}
902\f
903/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
904 them to the unused_*_list variables, so that they can be reused. */
905
906static void
907free_pending_lists ()
908{
909 register rtx link, prev_link;
910
911 if (pending_read_insns)
912 {
913 prev_link = pending_read_insns;
914 link = XEXP (prev_link, 1);
915
916 while (link)
917 {
918 prev_link = link;
919 link = XEXP (link, 1);
920 }
921
922 XEXP (prev_link, 1) = unused_insn_list;
923 unused_insn_list = pending_read_insns;
924 pending_read_insns = 0;
925 }
926
927 if (pending_write_insns)
928 {
929 prev_link = pending_write_insns;
930 link = XEXP (prev_link, 1);
931
932 while (link)
933 {
934 prev_link = link;
935 link = XEXP (link, 1);
936 }
937
938 XEXP (prev_link, 1) = unused_insn_list;
939 unused_insn_list = pending_write_insns;
940 pending_write_insns = 0;
941 }
942
943 if (pending_read_mems)
944 {
945 prev_link = pending_read_mems;
946 link = XEXP (prev_link, 1);
947
948 while (link)
949 {
950 prev_link = link;
951 link = XEXP (link, 1);
952 }
953
954 XEXP (prev_link, 1) = unused_expr_list;
955 unused_expr_list = pending_read_mems;
956 pending_read_mems = 0;
957 }
958
959 if (pending_write_mems)
960 {
961 prev_link = pending_write_mems;
962 link = XEXP (prev_link, 1);
963
964 while (link)
965 {
966 prev_link = link;
967 link = XEXP (link, 1);
968 }
969
970 XEXP (prev_link, 1) = unused_expr_list;
971 unused_expr_list = pending_write_mems;
972 pending_write_mems = 0;
973 }
974}
975
976/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
977 The MEM is a memory reference contained within INSN, which we are saving
978 so that we can do memory aliasing on it. */
979
980static void
981add_insn_mem_dependence (insn_list, mem_list, insn, mem)
982 rtx *insn_list, *mem_list, insn, mem;
983{
984 register rtx link;
985
986 if (unused_insn_list)
987 {
988 link = unused_insn_list;
989 unused_insn_list = XEXP (link, 1);
990 }
991 else
992 link = rtx_alloc (INSN_LIST);
993 XEXP (link, 0) = insn;
994 XEXP (link, 1) = *insn_list;
995 *insn_list = link;
996
997 if (unused_expr_list)
998 {
999 link = unused_expr_list;
1000 unused_expr_list = XEXP (link, 1);
1001 }
1002 else
1003 link = rtx_alloc (EXPR_LIST);
1004 XEXP (link, 0) = mem;
1005 XEXP (link, 1) = *mem_list;
1006 *mem_list = link;
1007
1008 pending_lists_length++;
1009}
1010\f
1011/* Make a dependency between every memory reference on the pending lists
1012 and INSN, thus flushing the pending lists. */
1013
1014static void
1015flush_pending_lists (insn)
1016 rtx insn;
1017{
1018 rtx link;
1019
1020 while (pending_read_insns)
1021 {
1022 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1023
1024 link = pending_read_insns;
1025 pending_read_insns = XEXP (pending_read_insns, 1);
1026 XEXP (link, 1) = unused_insn_list;
1027 unused_insn_list = link;
1028
1029 link = pending_read_mems;
1030 pending_read_mems = XEXP (pending_read_mems, 1);
1031 XEXP (link, 1) = unused_expr_list;
1032 unused_expr_list = link;
1033 }
1034 while (pending_write_insns)
1035 {
1036 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1037
1038 link = pending_write_insns;
1039 pending_write_insns = XEXP (pending_write_insns, 1);
1040 XEXP (link, 1) = unused_insn_list;
1041 unused_insn_list = link;
1042
1043 link = pending_write_mems;
1044 pending_write_mems = XEXP (pending_write_mems, 1);
1045 XEXP (link, 1) = unused_expr_list;
1046 unused_expr_list = link;
1047 }
1048 pending_lists_length = 0;
1049
1050 if (last_pending_memory_flush)
1051 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1052
1053 last_pending_memory_flush = insn;
1054}
1055
1056/* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1057 by the write to the destination of X, and reads of everything mentioned. */
1058
1059static void
1060sched_analyze_1 (x, insn)
1061 rtx x;
1062 rtx insn;
1063{
1064 register int regno;
1065 register rtx dest = SET_DEST (x);
1066
1067 if (dest == 0)
1068 return;
1069
1070 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1071 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1072 {
1073 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1074 {
1075 /* The second and third arguments are values read by this insn. */
1076 sched_analyze_2 (XEXP (dest, 1), insn);
1077 sched_analyze_2 (XEXP (dest, 2), insn);
1078 }
1079 dest = SUBREG_REG (dest);
1080 }
1081
1082 if (GET_CODE (dest) == REG)
1083 {
1084 register int offset, bit, i;
1085
1086 regno = REGNO (dest);
1087
1088 /* A hard reg in a wide mode may really be multiple registers.
1089 If so, mark all of them just like the first. */
1090 if (regno < FIRST_PSEUDO_REGISTER)
1091 {
1092 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1093 while (--i >= 0)
1094 {
1095 rtx u;
1096
1097 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1098 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1099 reg_last_uses[regno + i] = 0;
1100 if (reg_last_sets[regno + i])
1101 add_dependence (insn, reg_last_sets[regno + i],
1102 REG_DEP_OUTPUT);
1103 reg_last_sets[regno + i] = insn;
1104 if ((call_used_regs[i] || global_regs[i])
1105 && last_function_call)
1106 /* Function calls clobber all call_used regs. */
1107 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1108 }
1109 }
1110 else
1111 {
1112 rtx u;
1113
1114 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1115 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1116 reg_last_uses[regno] = 0;
1117 if (reg_last_sets[regno])
1118 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1119 reg_last_sets[regno] = insn;
1120
1121 /* Don't let it cross a call after scheduling if it doesn't
1122 already cross one. */
1123 if (reg_n_calls_crossed[regno] == 0 && last_function_call)
1124 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1125 }
1126 }
1127 else if (GET_CODE (dest) == MEM)
1128 {
1129 /* Writing memory. */
1130
1131 if (pending_lists_length > 32)
1132 {
1133 /* Flush all pending reads and writes to prevent the pending lists
1134 from getting any larger. Insn scheduling runs too slowly when
1135 these lists get long. The number 32 was chosen because it
1136 seems like a resonable number. When compiling GCC with itself,
1137 this flush occurs 8 times for sparc, and 10 times for m88k using
1138 the number 32. */
1139 flush_pending_lists (insn);
1140 }
1141 else
1142 {
1143 rtx pending, pending_mem;
1144
1145 pending = pending_read_insns;
1146 pending_mem = pending_read_mems;
1147 while (pending)
1148 {
1149 /* If a dependency already exists, don't create a new one. */
1150 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1151 if (anti_dependence (XEXP (pending_mem, 0), dest, insn))
1152 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1153
1154 pending = XEXP (pending, 1);
1155 pending_mem = XEXP (pending_mem, 1);
1156 }
1157
1158 pending = pending_write_insns;
1159 pending_mem = pending_write_mems;
1160 while (pending)
1161 {
1162 /* If a dependency already exists, don't create a new one. */
1163 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1164 if (output_dependence (XEXP (pending_mem, 0), dest))
1165 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1166
1167 pending = XEXP (pending, 1);
1168 pending_mem = XEXP (pending_mem, 1);
1169 }
1170
1171 if (last_pending_memory_flush)
1172 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1173
1174 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1175 insn, dest);
1176 }
1177 sched_analyze_2 (XEXP (dest, 0), insn);
1178 }
1179
1180 /* Analyze reads. */
1181 if (GET_CODE (x) == SET)
1182 sched_analyze_2 (SET_SRC (x), insn);
1183 else if (GET_CODE (x) != CLOBBER)
1184 sched_analyze_2 (dest, insn);
1185}
1186
1187/* Analyze the uses of memory and registers in rtx X in INSN. */
1188
1189static void
1190sched_analyze_2 (x, insn)
1191 rtx x;
1192 rtx insn;
1193{
1194 register int i;
1195 register int j;
1196 register enum rtx_code code;
1197 register char *fmt;
1198
1199 if (x == 0)
1200 return;
1201
1202 code = GET_CODE (x);
1203
1204 /* Get rid of the easy cases first. */
1205
1206 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1207 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1208 this does not mean that this insn is using cc0. */
1209 if (code == CONST_INT || code == CONST_DOUBLE || code == SYMBOL_REF
1210 || code == CONST || code == LABEL_REF)
1211 return;
1212
1213#ifdef HAVE_cc0
1214 else if (code == CC0)
1215 {
1216 rtx link;
1217
1218 /* User of CC0 depends on immediately preceding insn.
1219 All notes are removed from the list of insns to schedule before we
1220 reach here, so the previous insn must be the setter of cc0. */
1221 if (GET_CODE (PREV_INSN (insn)) != INSN)
1222 abort ();
1223 SCHED_GROUP_P (insn) = 1;
1224
1225 /* Make a copy of all dependencies on PREV_INSN, and add to this insn.
1226 This is so that all the dependencies will apply to the group. */
1227
1228 for (link = LOG_LINKS (PREV_INSN (insn)); link; link = XEXP (link, 1))
1229 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
1230
1231 return;
1232 }
1233#endif
1234
1235 else if (code == REG)
1236 {
1237 int regno = REGNO (x);
1238 if (regno < FIRST_PSEUDO_REGISTER)
1239 {
1240 int i;
1241
1242 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1243 while (--i >= 0)
1244 {
1245 reg_last_uses[regno + i]
1246 = gen_rtx (INSN_LIST, VOIDmode,
1247 insn, reg_last_uses[regno + i]);
1248 if (reg_last_sets[regno + i])
1249 add_dependence (insn, reg_last_sets[regno + i], 0);
1250 if ((call_used_regs[regno + i] || global_regs[regno + i])
1251 && last_function_call)
1252 /* Function calls clobber all call_used regs. */
1253 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1254 }
1255 }
1256 else
1257 {
1258 reg_last_uses[regno]
1259 = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
1260 if (reg_last_sets[regno])
1261 add_dependence (insn, reg_last_sets[regno], 0);
1262
1263 /* If the register does not already cross any calls, then add this
1264 insn to the sched_before_next_call list so that it will still
1265 not cross calls after scheduling. */
1266 if (reg_n_calls_crossed[regno] == 0)
1267 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1268 }
1269 return;
1270 }
1271
1272 /* The interesting case. */
1273 else if (code == MEM)
1274 {
1275 /* Reading memory. */
1276
1277 /* Don't create a dependence for memory references which are known to
1278 be unchanging, such as constant pool accesses. These will never
1279 conflict with any other memory access. */
1280 if (RTX_UNCHANGING_P (x) == 0)
1281 {
1282 rtx pending, pending_mem;
1283
1284 pending = pending_read_insns;
1285 pending_mem = pending_read_mems;
1286 while (pending)
1287 {
1288 /* If a dependency already exists, don't create a new one. */
1289 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1290 if (read_dependence (XEXP (pending_mem, 0), x))
1291 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1292
1293 pending = XEXP (pending, 1);
1294 pending_mem = XEXP (pending_mem, 1);
1295 }
1296
1297 pending = pending_write_insns;
1298 pending_mem = pending_write_mems;
1299 while (pending)
1300 {
1301 /* If a dependency already exists, don't create a new one. */
1302 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1303 if (true_dependence (XEXP (pending_mem, 0), x))
1304 add_dependence (insn, XEXP (pending, 0), 0);
1305
1306 pending = XEXP (pending, 1);
1307 pending_mem = XEXP (pending_mem, 1);
1308 }
1309 if (last_pending_memory_flush)
1310 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1311
1312 /* Always add these dependencies to pending_reads, since
1313 this insn may be followed by a write. */
1314 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1315 insn, x);
1316 }
1317 /* Take advantage of tail recursion here. */
1318 sched_analyze_2 (XEXP (x, 0), insn);
1319 return;
1320 }
1321
1322 else if (code == ASM_OPERANDS || code == ASM_INPUT
1323 || code == UNSPEC_VOLATILE)
1324 {
1325 rtx u;
1326
1327 /* Traditional and volatile asm instructions must be considered to use
1328 and clobber all hard registers and all of memory. So must
1329 UNSPEC_VOLATILE operations. */
1330 if ((code == ASM_OPERANDS && MEM_VOLATILE_P (x)) || code == ASM_INPUT
1331 || code == UNSPEC_VOLATILE)
1332 {
1333 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1334 {
1335 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1336 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1337 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1338 reg_last_uses[i] = 0;
1339 if (reg_last_sets[i]
1340 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1341 add_dependence (insn, reg_last_sets[i], 0);
1342 reg_last_sets[i] = insn;
1343 }
1344
1345 flush_pending_lists (insn);
1346 }
1347
1348 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1349 We can not just fall through here since then we would be confused
1350 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1351 traditional asms unlike their normal usage. */
1352
1353 if (code == ASM_OPERANDS)
1354 {
1355 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1356 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1357 return;
1358 }
1359 }
1360
1361 /* Other cases: walk the insn. */
1362 fmt = GET_RTX_FORMAT (code);
1363 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1364 {
1365 if (fmt[i] == 'e')
1366 sched_analyze_2 (XEXP (x, i), insn);
1367 else if (fmt[i] == 'E')
1368 for (j = 0; j < XVECLEN (x, i); j++)
1369 sched_analyze_2 (XVECEXP (x, i, j), insn);
1370 }
1371}
1372
1373/* Analyze an INSN with pattern X to find all dependencies. */
1374
1375static void
1376sched_analyze_insn (x, insn)
1377 rtx x, insn;
1378{
1379 register RTX_CODE code = GET_CODE (x);
1380 rtx link;
1381
1382 if (code == SET || code == CLOBBER)
1383 sched_analyze_1 (x, insn);
1384 else if (code == PARALLEL)
1385 {
1386 register int i;
1387 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1388 {
1389 code = GET_CODE (XVECEXP (x, 0, i));
1390 if (code == SET || code == CLOBBER)
1391 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1392 else
1393 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1394 }
1395 }
1396 else
1397 sched_analyze_2 (x, insn);
1398
1399 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1400 {
1401 /* Any REG_INC note is a SET of the register indicated. */
1402 if (REG_NOTE_KIND (link) == REG_INC)
1403 {
1404 rtx dest = XEXP (link, 0);
1405 int regno = REGNO (dest);
1406 int i;
1407
1408 /* A hard reg in a wide mode may really be multiple registers.
1409 If so, mark all of them just like the first. */
1410 if (regno < FIRST_PSEUDO_REGISTER)
1411 {
1412 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1413 while (--i >= 0)
1414 {
1415 rtx u;
1416
1417 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1418 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1419 reg_last_uses[regno + i] = 0;
1420 if (reg_last_sets[regno + i])
1421 add_dependence (insn, reg_last_sets[regno + i],
1422 REG_DEP_OUTPUT);
1423 reg_last_sets[regno + i] = insn;
1424 if ((call_used_regs[i] || global_regs[i])
1425 && last_function_call)
1426 /* Function calls clobber all call_used regs. */
1427 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1428 }
1429 }
1430 else
1431 {
1432 rtx u;
1433
1434 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1435 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1436 reg_last_uses[regno] = 0;
1437 if (reg_last_sets[regno])
1438 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1439 reg_last_sets[regno] = insn;
1440
1441 /* Don't let it cross a call after scheduling if it doesn't
1442 already cross one. */
1443 if (reg_n_calls_crossed[regno] == 0 && last_function_call)
1444 add_dependence (insn, last_function_call, 0);
1445 }
1446 }
1447 }
1448
1449 /* Handle function calls. */
1450 if (GET_CODE (insn) == CALL_INSN)
1451 {
1452 rtx dep_insn;
1453 rtx prev_dep_insn;
1454
1455 /* When scheduling instructions, we make sure calls don't lose their
1456 accompanying USE insns by depending them one on another in order. */
1457
1458 prev_dep_insn = insn;
1459 dep_insn = PREV_INSN (insn);
1460 while (GET_CODE (dep_insn) == INSN
1461 && GET_CODE (PATTERN (dep_insn)) == USE)
1462 {
1463 SCHED_GROUP_P (prev_dep_insn) = 1;
1464
1465 /* Make a copy of all dependencies on dep_insn, and add to insn.
1466 This is so that all of the dependencies will apply to the
1467 group. */
1468
1469 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1470 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
1471
1472 prev_dep_insn = dep_insn;
1473 dep_insn = PREV_INSN (dep_insn);
1474 }
1475 }
1476}
1477
1478/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1479 for every dependency. */
1480
1481static int
1482sched_analyze (head, tail)
1483 rtx head, tail;
1484{
1485 register rtx insn;
1486 register int n_insns = 0;
1487 register rtx u;
1488 register int luid = 0;
1489
1490 for (insn = head; ; insn = NEXT_INSN (insn))
1491 {
1492 INSN_LUID (insn) = luid++;
1493
1494 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1495 {
1496 sched_analyze_insn (PATTERN (insn), insn);
1497 n_insns += 1;
1498 }
1499 else if (GET_CODE (insn) == CALL_INSN)
1500 {
1501 rtx dest = 0;
1502 rtx x;
1503 register int i;
1504
1505 /* Any instruction using a hard register which may get clobbered
1506 by a call needs to be marked as dependent on this call.
1507 This prevents a use of a hard return reg from being moved
1508 past a void call (i.e. it does not explicitly set the hard
1509 return reg). */
1510
1511 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1512 if (call_used_regs[i] || global_regs[i])
1513 {
1514 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1515 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1516 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1517 reg_last_uses[i] = 0;
1518 if (reg_last_sets[i]
1519 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1520 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1521 reg_last_sets[i] = insn;
1522 /* Insn, being a CALL_INSN, magically depends on
1523 `last_function_call' already. */
1524 }
1525
1526 /* For each insn which shouldn't cross a call, add a dependence
1527 between that insn and this call insn. */
1528 x = LOG_LINKS (sched_before_next_call);
1529 while (x)
1530 {
1531 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1532 x = XEXP (x, 1);
1533 }
1534 LOG_LINKS (sched_before_next_call) = 0;
1535
1536 sched_analyze_insn (PATTERN (insn), insn);
1537
1538 /* We don't need to flush memory for a function call which does
1539 not involve memory. */
1540 if (! CONST_CALL_P (insn))
1541 {
1542 /* In the absence of interprocedural alias analysis,
1543 we must flush all pending reads and writes, and
1544 start new dependencies starting from here. */
1545 flush_pending_lists (insn);
1546 }
1547
1548 /* Depend this function call (actually, the user of this
1549 function call) on all hard register clobberage. */
1550 last_function_call = insn;
1551 n_insns += 1;
1552 }
1553
1554 if (insn == tail)
1555 return n_insns;
1556 }
1557}
1558\f
1559/* Called when we see a set of a register. If death is true, then we are
1560 scanning backwards. Mark that register as unborn. If nobody says
1561 otherwise, that is how things will remain. If death is false, then we
1562 are scanning forwards. Mark that register as being born. */
1563
1564static void
1565sched_note_set (b, x, death)
1566 int b;
1567 rtx x;
1568 int death;
1569{
1570 register int regno, j;
1571 register rtx reg = SET_DEST (x);
1572 int subreg_p = 0;
1573
1574 if (reg == 0)
1575 return;
1576
1577 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
1578 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
1579 {
1580 /* Must treat modification of just one hardware register of a multi-reg
1581 value or just a byte field of a register exactly the same way that
1582 mark_set_1 in flow.c does. */
1583 if (GET_CODE (reg) == ZERO_EXTRACT
1584 || GET_CODE (reg) == SIGN_EXTRACT
1585 || (GET_CODE (reg) == SUBREG
1586 && REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg)))
1587 subreg_p = 1;
1588
1589 reg = SUBREG_REG (reg);
1590 }
1591
1592 if (GET_CODE (reg) != REG)
1593 return;
1594
1595 /* Global registers are always live, so the code below does not apply
1596 to them. */
1597
1598 regno = REGNO (reg);
1599 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1600 {
1601 register int offset = regno / REGSET_ELT_BITS;
1602 register int bit = 1 << (regno % REGSET_ELT_BITS);
1603
1604 if (death)
1605 {
1606 /* If we only set part of the register, then this set does not
1607 kill it. */
1608 if (subreg_p)
1609 return;
1610
1611 /* Try killing this register. */
1612 if (regno < FIRST_PSEUDO_REGISTER)
1613 {
1614 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1615 while (--j >= 0)
1616 {
1617 offset = (regno + j) / REGSET_ELT_BITS;
1618 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
1619
1620 bb_live_regs[offset] &= ~bit;
1621 bb_dead_regs[offset] |= bit;
1622 }
1623 }
1624 else
1625 {
1626 bb_live_regs[offset] &= ~bit;
1627 bb_dead_regs[offset] |= bit;
1628 }
1629 }
1630 else
1631 {
1632 /* Make the register live again. */
1633 if (regno < FIRST_PSEUDO_REGISTER)
1634 {
1635 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1636 while (--j >= 0)
1637 {
1638 offset = (regno + j) / REGSET_ELT_BITS;
1639 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
1640
1641 bb_live_regs[offset] |= bit;
1642 bb_dead_regs[offset] &= ~bit;
1643 }
1644 }
1645 else
1646 {
1647 bb_live_regs[offset] |= bit;
1648 bb_dead_regs[offset] &= ~bit;
1649 }
1650 }
1651 }
1652}
1653\f
1654/* Macros and functions for keeping the priority queue sorted, and
1655 dealing with queueing and unqueueing of instructions. */
1656
1657#define SCHED_SORT(READY, NEW_READY, OLD_READY) \
1658 do { if ((NEW_READY) - (OLD_READY) == 1) \
1659 swap_sort (READY, NEW_READY); \
1660 else if ((NEW_READY) - (OLD_READY) > 1) \
1661 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
1662 while (0)
1663
1664/* Returns a positive value if y is preferred; returns a negative value if
1665 x is preferred. Should never return 0, since that will make the sort
1666 unstable. */
1667
1668static int
1669rank_for_schedule (x, y)
1670 rtx *x, *y;
1671{
1672 rtx tmp = *y;
1673 rtx tmp2 = *x;
1674 rtx tmp_dep, tmp2_dep;
1675 int tmp_class, tmp2_class;
1676 int value;
1677
1678 /* Choose the instruction with the highest priority, if different. */
1679 if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2))
1680 return value;
1681
1682 if (last_scheduled_insn)
1683 {
1684 /* Classify the instructions into three classes:
1685 1) Data dependent on last schedule insn.
1686 2) Anti/Output dependent on last scheduled insn.
1687 3) Independent of last scheduled insn, or has latency of one.
1688 Choose the insn from the highest numbered class if different. */
1689 tmp_dep = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
1690 if (tmp_dep == 0 || insn_cost (tmp) == 1)
1691 tmp_class = 3;
1692 else if (REG_NOTE_KIND (tmp_dep) == 0)
1693 tmp_class = 1;
1694 else
1695 tmp_class = 2;
1696
1697 tmp2_dep = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
1698 if (tmp2_dep == 0 || insn_cost (tmp2) == 1)
1699 tmp2_class = 3;
1700 else if (REG_NOTE_KIND (tmp2_dep) == 0)
1701 tmp2_class = 1;
1702 else
1703 tmp2_class = 2;
1704
1705 if (value = tmp_class - tmp2_class)
1706 return value;
1707 }
1708
1709 /* If insns are equally good, sort by INSN_LUID (original insn order),
1710 so that we make the sort stable. This minimizes instruction movement,
1711 thus minimizing sched's effect on debugging and cross-jumping. */
1712 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1713}
1714
1715/* Resort the array A in which only element at index N may be out of order. */
1716
1717__inline static void
1718swap_sort (a, n)
1719 rtx *a;
1720 int n;
1721{
1722 rtx insn = a[n-1];
1723 int i = n-2;
1724
1725 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
1726 {
1727 a[i+1] = a[i];
1728 i -= 1;
1729 }
1730 a[i+1] = insn;
1731}
1732
1733static int max_priority;
1734
1735/* Add INSN to the insn queue so that it fires at least N_CYCLES
1736 before the currently executing insn. */
1737
1738__inline static void
1739queue_insn (insn, n_cycles)
1740 rtx insn;
1741 int n_cycles;
1742{
1743 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1744 NEXT_INSN (insn) = insn_queue[next_q];
1745 insn_queue[next_q] = insn;
1746 q_size += 1;
1747}
1748
1749/* Return nonzero if PAT is the pattern of an insn which makes a
1750 register live. */
1751
1752__inline static int
1753birthing_insn_p (pat)
1754 rtx pat;
1755{
1756 int j;
1757
1758 if (reload_completed == 1)
1759 return 0;
1760
1761 if (GET_CODE (pat) == SET
1762 && GET_CODE (SET_DEST (pat)) == REG)
1763 {
1764 rtx dest = SET_DEST (pat);
1765 int i = REGNO (dest);
1766 int offset = i / REGSET_ELT_BITS;
1767 int bit = 1 << (i % REGSET_ELT_BITS);
1768
1769 /* It would be more accurate to use refers_to_regno_p or
1770 reg_mentioned_p to determine when the dest is not live before this
1771 insn. */
1772
1773 if (bb_live_regs[offset] & bit)
1774 return (reg_n_sets[i] == 1);
1775
1776 return 0;
1777 }
1778 if (GET_CODE (pat) == PARALLEL)
1779 {
1780 for (j = 0; j < XVECLEN (pat, 0); j++)
1781 if (birthing_insn_p (XVECEXP (pat, 0, j)))
1782 return 1;
1783 }
1784 return 0;
1785}
1786
1787/* If PREV is an insn which is immediately ready to execute, return 1,
1788 otherwise return 0. We may adjust its priority if that will help shorten
1789 register lifetimes. */
1790
1791static int
1792launch_link (prev)
1793 rtx prev;
1794{
1795 rtx pat = PATTERN (prev);
1796 rtx note;
1797 /* MAX of (a) number of cycles needed by prev
1798 (b) number of cycles before needed resources are free. */
1799 int n_cycles = insn_cost (prev);
1800 int n_deaths = 0;
1801
1802 /* Trying to shorten register lives after reload has completed
1803 is useless and wrong. It gives inaccurate schedules. */
1804 if (reload_completed == 0)
1805 {
1806 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
1807 if (REG_NOTE_KIND (note) == REG_DEAD)
1808 n_deaths += 1;
1809
1810 /* Defer scheduling insns which kill registers, since that
1811 shortens register lives. Prefer scheduling insns which
1812 make registers live for the same reason. */
1813 switch (n_deaths)
1814 {
1815 default:
1816 INSN_PRIORITY (prev) >>= 3;
1817 break;
1818 case 3:
1819 INSN_PRIORITY (prev) >>= 2;
1820 break;
1821 case 2:
1822 case 1:
1823 INSN_PRIORITY (prev) >>= 1;
1824 break;
1825 case 0:
1826 if (birthing_insn_p (pat))
1827 {
1828 int max = max_priority;
1829
1830 if (max > INSN_PRIORITY (prev))
1831 INSN_PRIORITY (prev) = max;
1832 }
1833 break;
1834 }
1835 }
1836
1837 if (n_cycles <= 1)
1838 return 1;
1839 queue_insn (prev, n_cycles);
1840 return 0;
1841}
1842
1843/* INSN is the "currently executing insn". Launch each insn which was
1844 waiting on INSN (in the backwards dataflow sense). READY is a
1845 vector of insns which are ready to fire. N_READY is the number of
1846 elements in READY. */
1847
1848static int
1849launch_links (insn, ready, n_ready)
1850 rtx insn;
1851 rtx *ready;
1852 int n_ready;
1853{
1854 rtx link;
1855 int new_ready = n_ready;
1856
1857 if (LOG_LINKS (insn) == 0)
1858 return n_ready;
1859
1860 /* This is used by the function launch_link above. */
1861 if (n_ready > 0)
1862 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
1863 else
1864 max_priority = INSN_PRIORITY (insn);
1865
1866 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
1867 {
1868 rtx prev = XEXP (link, 0);
1869
1870 if ((INSN_REF_COUNT (prev) -= 1) == 0 && launch_link (prev))
1871 ready[new_ready++] = prev;
1872 }
1873
1874 return new_ready;
1875}
1876
1877/* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
1878 dead_notes list. */
1879
1880static void
1881create_reg_dead_note (reg, insn)
1882 rtx reg, insn;
1883{
1884 rtx link = dead_notes;
1885
1886 if (link == 0)
1887 /* In theory, we should not end up with more REG_DEAD reg notes than we
1888 started with. In practice, this can occur as the result of bugs in
1889 flow, combine and/or sched. */
1890 {
1891#if 1
1892 abort ();
1893#else
1894 link = rtx_alloc (EXPR_LIST);
1895 PUT_REG_NOTE_KIND (link, REG_DEAD);
1896#endif
1897 }
1898 else
1899 dead_notes = XEXP (dead_notes, 1);
1900
1901 XEXP (link, 0) = reg;
1902 XEXP (link, 1) = REG_NOTES (insn);
1903 REG_NOTES (insn) = link;
1904}
1905
1906/* Subroutine on attach_deaths_insn--handles the recursive search
1907 through INSN. If SET_P is true, then x is being modified by the insn. */
1908
1909static void
1910attach_deaths (x, insn, set_p)
1911 rtx x;
1912 rtx insn;
1913 int set_p;
1914{
1915 register int i;
1916 register int j;
1917 register enum rtx_code code;
1918 register char *fmt;
1919
1920 if (x == 0)
1921 return;
1922
1923 code = GET_CODE (x);
1924
1925 switch (code)
1926 {
1927 case CONST_INT:
1928 case CONST_DOUBLE:
1929 case LABEL_REF:
1930 case SYMBOL_REF:
1931 case CONST:
1932 case CODE_LABEL:
1933 case PC:
1934 case CC0:
1935 /* Get rid of the easy cases first. */
1936 return;
1937
1938 case REG:
1939 {
1940 /* If the register dies in this insn, queue that note, and mark
1941 this register as needing to die. */
1942 /* This code is very similar to mark_used_1 (if set_p is false)
1943 and mark_set_1 (if set_p is true) in flow.c. */
1944
1945 register int regno = REGNO (x);
1946 register int offset = regno / REGSET_ELT_BITS;
1947 register int bit = 1 << (regno % REGSET_ELT_BITS);
1948 int all_needed = (old_live_regs[offset] & bit);
1949 int some_needed = (old_live_regs[offset] & bit);
1950
1951 if (set_p)
1952 return;
1953
1954 if (regno < FIRST_PSEUDO_REGISTER)
1955 {
1956 int n;
1957
1958 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
1959 while (--n > 0)
1960 {
1961 some_needed |= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
1962 & 1 << ((regno + n) % REGSET_ELT_BITS));
1963 all_needed &= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
1964 & 1 << ((regno + n) % REGSET_ELT_BITS));
1965 }
1966 }
1967
1968 /* If it wasn't live before we started, then add a REG_DEAD note.
1969 We must check the previous lifetime info not the current info,
1970 because we may have to execute this code several times, e.g.
1971 once for a clobber (which doesn't add a note) and later
1972 for a use (which does add a note).
1973
1974 Always make the register live. We must do this even if it was
1975 live before, because this may be an insn which sets and uses
1976 the same register, in which case the register has already been
1977 killed, so we must make it live again.
1978
1979 Global registers are always live, and should never have a REG_DEAD
1980 note added for them, so none of the code below applies to them. */
1981
1982 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1983 {
1984 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
1985 STACK_POINTER_REGNUM, since these are always considered to be
1986 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
1987 if (regno != FRAME_POINTER_REGNUM
1988#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
1989 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
bd58420c
RK
1990#endif
1991 && regno != STACK_POINTER_REGNUM)
1992 {
1993 if (! all_needed && ! dead_or_set_p (insn, x))
1994 {
1995 /* If none of the words in X is needed, make a REG_DEAD
1996 note. Otherwise, we must make partial REG_DEAD
1997 notes. */
1998 if (! some_needed)
1999 create_reg_dead_note (x, insn);
2000 else
2001 {
2002 int i;
2003
2004 /* Don't make a REG_DEAD note for a part of a
2005 register that is set in the insn. */
2006 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2007 i >= 0; i--)
2008 if ((old_live_regs[(regno + i) / REGSET_ELT_BITS]
2009 & 1 << ((regno +i) % REGSET_ELT_BITS)) == 0
2010 && ! dead_or_set_regno_p (insn, regno + i))
2011 create_reg_dead_note (gen_rtx (REG, word_mode,
2012 regno + i),
2013 insn);
2014 }
2015 }
2016 }
2017
2018 if (regno < FIRST_PSEUDO_REGISTER)
2019 {
2020 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2021 while (--j >= 0)
2022 {
2023 offset = (regno + j) / REGSET_ELT_BITS;
2024 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2025
2026 bb_dead_regs[offset] &= ~bit;
2027 bb_live_regs[offset] |= bit;
2028 }
2029 }
2030 else
2031 {
2032 bb_dead_regs[offset] &= ~bit;
2033 bb_live_regs[offset] |= bit;
2034 }
2035 }
2036 return;
2037 }
2038
2039 case MEM:
2040 /* Handle tail-recursive case. */
2041 attach_deaths (XEXP (x, 0), insn, 0);
2042 return;
2043
2044 case SUBREG:
2045 case STRICT_LOW_PART:
2046 /* These two cases preserve the value of SET_P, so handle them
2047 separately. */
2048 attach_deaths (XEXP (x, 0), insn, set_p);
2049 return;
2050
2051 case ZERO_EXTRACT:
2052 case SIGN_EXTRACT:
2053 /* This case preserves the value of SET_P for the first operand, but
2054 clears it for the other two. */
2055 attach_deaths (XEXP (x, 0), insn, set_p);
2056 attach_deaths (XEXP (x, 1), insn, 0);
2057 attach_deaths (XEXP (x, 2), insn, 0);
2058 return;
2059
2060 default:
2061 /* Other cases: walk the insn. */
2062 fmt = GET_RTX_FORMAT (code);
2063 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2064 {
2065 if (fmt[i] == 'e')
2066 attach_deaths (XEXP (x, i), insn, 0);
2067 else if (fmt[i] == 'E')
2068 for (j = 0; j < XVECLEN (x, i); j++)
2069 attach_deaths (XVECEXP (x, i, j), insn, 0);
2070 }
2071 }
2072}
2073
2074/* After INSN has executed, add register death notes for each register
2075 that is dead after INSN. */
2076
2077static void
2078attach_deaths_insn (insn)
2079 rtx insn;
2080{
2081 rtx x = PATTERN (insn);
2082 register RTX_CODE code = GET_CODE (x);
2083
2084 if (code == SET)
2085 {
2086 attach_deaths (SET_SRC (x), insn, 0);
2087
2088 /* A register might die here even if it is the destination, e.g.
2089 it is the target of a volatile read and is otherwise unused.
2090 Hence we must always call attach_deaths for the SET_DEST. */
2091 attach_deaths (SET_DEST (x), insn, 1);
2092 }
2093 else if (code == PARALLEL)
2094 {
2095 register int i;
2096 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2097 {
2098 code = GET_CODE (XVECEXP (x, 0, i));
2099 if (code == SET)
2100 {
2101 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2102
2103 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2104 }
2105 else if (code == CLOBBER)
2106 attach_deaths (XEXP (XVECEXP (x, 0, i), 0), insn, 1);
2107 else
2108 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2109 }
2110 }
2111 else if (code == CLOBBER)
2112 attach_deaths (XEXP (x, 0), insn, 1);
2113 else
2114 attach_deaths (x, insn, 0);
2115}
2116
2117/* Delete notes beginning with INSN and maybe put them in the chain
2118 of notes ended by NOTE_LIST.
2119 Returns the insn following the notes. */
2120
2121static rtx
2122unlink_notes (insn, tail)
2123 rtx insn, tail;
2124{
2125 rtx prev = PREV_INSN (insn);
2126
2127 while (insn != tail && GET_CODE (insn) == NOTE)
2128 {
2129 rtx next = NEXT_INSN (insn);
2130 /* Delete the note from its current position. */
2131 if (prev)
2132 NEXT_INSN (prev) = next;
2133 if (next)
2134 PREV_INSN (next) = prev;
2135
2136 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2137 /* Record line-number notes so they can be reused. */
2138 LINE_NOTE (insn) = insn;
2139 else
2140 {
2141 /* Insert the note at the end of the notes list. */
2142 PREV_INSN (insn) = note_list;
2143 if (note_list)
2144 NEXT_INSN (note_list) = insn;
2145 note_list = insn;
2146 }
2147
2148 insn = next;
2149 }
2150 return insn;
2151}
2152
2153/* Data structure for keeping track of register information
2154 during that register's life. */
2155
2156struct sometimes
2157{
2158 short offset; short bit;
2159 short live_length; short calls_crossed;
2160};
2161
2162/* Constructor for `sometimes' data structure. */
2163
2164static int
2165new_sometimes_live (regs_sometimes_live, offset, bit, sometimes_max)
2166 struct sometimes *regs_sometimes_live;
2167 int offset, bit;
2168 int sometimes_max;
2169{
2170 register struct sometimes *p;
2171 register int regno = offset * REGSET_ELT_BITS + bit;
2172 int i;
2173
2174 /* There should never be a register greater than max_regno here. If there
2175 is, it means that a define_split has created a new pseudo reg. This
2176 is not allowed, since there will not be flow info available for any
2177 new register, so catch the error here. */
2178 if (regno >= max_regno)
2179 abort ();
2180
2181 p = &regs_sometimes_live[sometimes_max];
2182 p->offset = offset;
2183 p->bit = bit;
2184 p->live_length = 0;
2185 p->calls_crossed = 0;
2186 sometimes_max++;
2187 return sometimes_max;
2188}
2189
2190/* Count lengths of all regs we are currently tracking,
2191 and find new registers no longer live. */
2192
2193static void
2194finish_sometimes_live (regs_sometimes_live, sometimes_max)
2195 struct sometimes *regs_sometimes_live;
2196 int sometimes_max;
2197{
2198 int i;
2199
2200 for (i = 0; i < sometimes_max; i++)
2201 {
2202 register struct sometimes *p = &regs_sometimes_live[i];
2203 int regno;
2204
2205 regno = p->offset * REGSET_ELT_BITS + p->bit;
2206
2207 sched_reg_live_length[regno] += p->live_length;
2208 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2209 }
2210}
2211
2212/* Use modified list scheduling to rearrange insns in basic block
2213 B. FILE, if nonzero, is where we dump interesting output about
2214 this pass. */
2215
2216static void
2217schedule_block (b, file)
2218 int b;
2219 FILE *file;
2220{
2221 rtx insn, last;
2222 rtx last_note = 0;
2223 rtx *ready, link;
2224 int i, j, n_ready = 0, new_ready, n_insns = 0;
2225 int sched_n_insns = 0;
2226#define NEED_NOTHING 0
2227#define NEED_HEAD 1
2228#define NEED_TAIL 2
2229 int new_needs;
2230
2231 /* HEAD and TAIL delimit the region being scheduled. */
2232 rtx head = basic_block_head[b];
2233 rtx tail = basic_block_end[b];
2234 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2235 being scheduled. When the insns have been ordered,
2236 these insns delimit where the new insns are to be
2237 spliced back into the insn chain. */
2238 rtx next_tail;
2239 rtx prev_head;
2240
2241 /* Keep life information accurate. */
2242 register struct sometimes *regs_sometimes_live;
2243 int sometimes_max;
2244
2245 if (file)
2246 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2247 b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
2248
2249 i = max_reg_num ();
2250 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2251 bzero (reg_last_uses, i * sizeof (rtx));
2252 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2253 bzero (reg_last_sets, i * sizeof (rtx));
2254
2255 /* Remove certain insns at the beginning from scheduling,
2256 by advancing HEAD. */
2257
2258 /* At the start of a function, before reload has run, don't delay getting
2259 parameters from hard registers into pseudo registers. */
2260 if (reload_completed == 0 && b == 0)
2261 {
2262 while (head != tail
2263 && GET_CODE (head) == NOTE
2264 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2265 head = NEXT_INSN (head);
2266 while (head != tail
2267 && GET_CODE (head) == INSN
2268 && GET_CODE (PATTERN (head)) == SET)
2269 {
2270 rtx src = SET_SRC (PATTERN (head));
2271 while (GET_CODE (src) == SUBREG
2272 || GET_CODE (src) == SIGN_EXTEND
2273 || GET_CODE (src) == ZERO_EXTEND
2274 || GET_CODE (src) == SIGN_EXTRACT
2275 || GET_CODE (src) == ZERO_EXTRACT)
2276 src = XEXP (src, 0);
2277 if (GET_CODE (src) != REG
2278 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2279 break;
2280 /* Keep this insn from ever being scheduled. */
2281 INSN_REF_COUNT (head) = 1;
2282 head = NEXT_INSN (head);
2283 }
2284 }
2285
2286 /* Don't include any notes or labels at the beginning of the
2287 basic block, or notes at the ends of basic blocks. */
2288 while (head != tail)
2289 {
2290 if (GET_CODE (head) == NOTE)
2291 head = NEXT_INSN (head);
2292 else if (GET_CODE (tail) == NOTE)
2293 tail = PREV_INSN (tail);
2294 else if (GET_CODE (head) == CODE_LABEL)
2295 head = NEXT_INSN (head);
2296 else break;
2297 }
2298 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2299 to schedule this block. */
2300 if (head == tail
2301 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2302 return;
2303
2304#if 0
2305 /* This short-cut doesn't work. It does not count call insns crossed by
2306 registers in reg_sometimes_live. It does not mark these registers as
2307 dead if they die in this block. It does not mark these registers live
2308 (or create new reg_sometimes_live entries if necessary) if they are born
2309 in this block.
2310
2311 The easy solution is to just always schedule a block. This block only
2312 has one insn, so this won't slow down this pass by much. */
2313
2314 if (head == tail)
2315 return;
2316#endif
2317
2318 /* Exclude certain insns at the end of the basic block by advancing TAIL. */
2319 /* This isn't correct. Instead of advancing TAIL, should assign very
2320 high priorities to these insns to guarantee that they get scheduled last.
2321 If these insns are ignored, as is currently done, the register life info
2322 may be incorrectly computed. */
2323 if (GET_CODE (tail) == INSN
2324 && GET_CODE (PATTERN (tail)) == USE
2325 && next_nonnote_insn (tail) == 0)
2326 {
b4ac57ab
RS
2327 /* Don't try to reorder any USE insns at the end of a function.
2328 They must be last to ensure proper register allocation.
2329 Exclude them all from scheduling. */
2330 do
2331 {
2332 /* If we are down to one USE insn, then there are no insns to
2333 schedule. */
2334 if (head == tail)
2335 return;
bd58420c 2336
b4ac57ab
RS
2337 tail = prev_nonnote_insn (tail);
2338 }
2339 while (GET_CODE (tail) == INSN
2340 && GET_CODE (PATTERN (tail)) == USE);
bd58420c
RK
2341
2342#if 0
2343 /* This short-cut does not work. See comment above. */
2344 if (head == tail)
2345 return;
2346#endif
2347 }
2348 else if (GET_CODE (tail) == JUMP_INSN
2349 && SCHED_GROUP_P (tail) == 0
2350 && GET_CODE (PREV_INSN (tail)) == INSN
2351 && GET_CODE (PATTERN (PREV_INSN (tail))) == USE
2352 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (PREV_INSN (tail)), 0)))
2353 {
2354 /* Don't let the setting of the function's return value register
2355 move from this jump. For the same reason we want to get the
2356 parameters into pseudo registers as quickly as possible, we
2357 want to set the function's return value register as late as
2358 possible. */
2359
2360 /* If this is the only insn in the block, then there is no need to
2361 schedule the block. */
2362 if (head == tail)
2363 return;
2364
2365 tail = PREV_INSN (tail);
2366 if (head == tail)
2367 return;
2368
2369 tail = prev_nonnote_insn (tail);
2370
2371#if 0
2372 /* This shortcut does not work. See comment above. */
2373 if (head == tail)
2374 return;
2375#endif
2376 }
2377
2378#ifdef HAVE_cc0
2379 /* This is probably wrong. Instead of doing this, should give this insn
2380 a very high priority to guarantee that it gets scheduled last. */
2381 /* Can not separate an insn that sets the condition code from one that
2382 uses it. So we must leave an insn that sets cc0 where it is. */
2383 if (sets_cc0_p (PATTERN (tail)))
2384 tail = PREV_INSN (tail);
2385#endif
2386
2387 /* Now HEAD through TAIL are the insns actually to be rearranged;
2388 Let PREV_HEAD and NEXT_TAIL enclose them. */
2389 prev_head = PREV_INSN (head);
2390 next_tail = NEXT_INSN (tail);
2391
2392 /* Initialize basic block data structures. */
2393 dead_notes = 0;
2394 pending_read_insns = 0;
2395 pending_read_mems = 0;
2396 pending_write_insns = 0;
2397 pending_write_mems = 0;
2398 pending_lists_length = 0;
2399 last_pending_memory_flush = 0;
2400 last_function_call = 0;
2401 last_scheduled_insn = 0;
2402
2403 LOG_LINKS (sched_before_next_call) = 0;
2404
2405 n_insns += sched_analyze (head, tail);
2406 if (n_insns == 0)
2407 {
2408 free_pending_lists ();
2409 return;
2410 }
2411
2412 /* Allocate vector to hold insns to be rearranged (except those
2413 insns which are controlled by an insn with SCHED_GROUP_P set).
2414 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2415 as those variables ultimately are set up. */
2416 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2417
2418 /* TAIL is now the last of the insns to be rearranged.
2419 Put those insns into the READY vector. */
2420 insn = tail;
2421
2422 /* If the last insn is a branch, force it to be the last insn after
2423 scheduling. Also, don't try to reorder calls at the ends the basic
2424 block -- this will only lead to worse register allocation. */
2425 if (GET_CODE (tail) == CALL_INSN || GET_CODE (tail) == JUMP_INSN)
2426 {
2427 priority (tail);
2428 ready[n_ready++] = tail;
2429 INSN_PRIORITY (tail) = TAIL_PRIORITY;
2430 INSN_REF_COUNT (tail) = 0;
2431 insn = PREV_INSN (tail);
2432 }
2433
2434 /* Assign priorities to instructions. Also check whether they
2435 are in priority order already. If so then I will be nonnegative.
2436 We use this shortcut only before reloading. */
2437#if 0
2438 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2439#endif
2440
2441 for (; insn != prev_head; insn = PREV_INSN (insn))
2442 {
2443 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2444 {
2445 priority (insn);
2446 if (INSN_REF_COUNT (insn) == 0)
2447 ready[n_ready++] = insn;
2448 if (SCHED_GROUP_P (insn))
2449 {
2450 while (SCHED_GROUP_P (insn))
2451 {
2452 insn = PREV_INSN (insn);
2453 while (GET_CODE (insn) == NOTE)
2454 insn = PREV_INSN (insn);
2455 priority (insn);
2456 }
2457 continue;
2458 }
2459#if 0
2460 if (i < 0)
2461 continue;
2462 if (INSN_PRIORITY (insn) < i)
2463 i = INSN_PRIORITY (insn);
2464 else if (INSN_PRIORITY (insn) > i)
2465 i = DONE_PRIORITY;
2466#endif
2467 }
2468 }
2469
2470#if 0
2471 /* This short-cut doesn't work. It does not count call insns crossed by
2472 registers in reg_sometimes_live. It does not mark these registers as
2473 dead if they die in this block. It does not mark these registers live
2474 (or create new reg_sometimes_live entries if necessary) if they are born
2475 in this block.
2476
2477 The easy solution is to just always schedule a block. These blocks tend
2478 to be very short, so this doesn't slow down this pass by much. */
2479
2480 /* If existing order is good, don't bother to reorder. */
2481 if (i != DONE_PRIORITY)
2482 {
2483 if (file)
2484 fprintf (file, ";; already scheduled\n");
2485
2486 if (reload_completed == 0)
2487 {
2488 for (i = 0; i < sometimes_max; i++)
2489 regs_sometimes_live[i].live_length += n_insns;
2490
2491 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2492 }
2493 free_pending_lists ();
2494 return;
2495 }
2496#endif
2497
2498 /* Scan all the insns to be scheduled, removing NOTE insns
2499 and register death notes.
2500 Line number NOTE insns end up in NOTE_LIST.
2501 Register death notes end up in DEAD_NOTES.
2502
2503 Recreate the register life information for the end of this basic
2504 block. */
2505
2506 if (reload_completed == 0)
2507 {
2508 bcopy (basic_block_live_at_start[b], bb_live_regs, regset_bytes);
2509 bzero (bb_dead_regs, regset_bytes);
2510
2511 if (b == 0)
2512 {
2513 /* This is the first block in the function. There may be insns
2514 before head that we can't schedule. We still need to examine
2515 them though for accurate register lifetime analysis. */
2516
2517 /* We don't want to remove any REG_DEAD notes as the code below
2518 does. */
2519
2520 for (insn = basic_block_head[b]; insn != head;
2521 insn = NEXT_INSN (insn))
2522 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2523 {
2524 /* See if the register gets born here. */
2525 /* We must check for registers being born before we check for
2526 registers dying. It is possible for a register to be born
2527 and die in the same insn, e.g. reading from a volatile
2528 memory location into an otherwise unused register. Such
2529 a register must be marked as dead after this insn. */
2530 if (GET_CODE (PATTERN (insn)) == SET
2531 || GET_CODE (PATTERN (insn)) == CLOBBER)
2532 sched_note_set (b, PATTERN (insn), 0);
2533 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2534 {
2535 int j;
2536 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2537 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2538 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2539 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2540
53dd0e6e
JW
2541 /* ??? This code is obsolete and should be deleted. It
2542 is harmless though, so we will leave it in for now. */
bd58420c
RK
2543 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2544 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2545 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2546 }
2547
2548 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2549 {
2550 if ((REG_NOTE_KIND (link) == REG_DEAD
2551 || REG_NOTE_KIND (link) == REG_UNUSED)
2552 /* Verify that the REG_NOTE has a legal value. */
2553 && GET_CODE (XEXP (link, 0)) == REG)
2554 {
2555 register int regno = REGNO (XEXP (link, 0));
2556 register int offset = regno / REGSET_ELT_BITS;
2557 register int bit = 1 << (regno % REGSET_ELT_BITS);
2558
2559 if (regno < FIRST_PSEUDO_REGISTER)
2560 {
2561 int j = HARD_REGNO_NREGS (regno,
2562 GET_MODE (XEXP (link, 0)));
2563 while (--j >= 0)
2564 {
2565 offset = (regno + j) / REGSET_ELT_BITS;
2566 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2567
2568 bb_live_regs[offset] &= ~bit;
2569 bb_dead_regs[offset] |= bit;
2570 }
2571 }
2572 else
2573 {
2574 bb_live_regs[offset] &= ~bit;
2575 bb_dead_regs[offset] |= bit;
2576 }
2577 }
2578 }
2579 }
2580 }
2581 }
2582
2583 /* If debugging information is being produced, keep track of the line
2584 number notes for each insn. */
2585 if (write_symbols != NO_DEBUG)
2586 {
2587 /* We must use the true line number for the first insn in the block
2588 that was computed and saved at the start of this pass. We can't
2589 use the current line number, because scheduling of the previous
2590 block may have changed the current line number. */
2591 rtx line = line_note_head[b];
2592
2593 for (insn = basic_block_head[b];
2594 insn != next_tail;
2595 insn = NEXT_INSN (insn))
2596 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
2597 line = insn;
2598 else
2599 LINE_NOTE (insn) = line;
2600 }
2601
2602 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2603 {
2604 rtx prev, next, link;
2605
2606 /* Farm out notes. This is needed to keep the debugger from
2607 getting completely deranged. */
2608 if (GET_CODE (insn) == NOTE)
2609 {
2610 prev = insn;
2611 insn = unlink_notes (insn, next_tail);
2612 if (prev == tail)
2613 abort ();
2614 if (prev == head)
2615 abort ();
2616 if (insn == next_tail)
2617 abort ();
2618 }
2619
2620 if (reload_completed == 0
2621 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2622 {
2623 /* See if the register gets born here. */
2624 /* We must check for registers being born before we check for
2625 registers dying. It is possible for a register to be born and
2626 die in the same insn, e.g. reading from a volatile memory
2627 location into an otherwise unused register. Such a register
2628 must be marked as dead after this insn. */
2629 if (GET_CODE (PATTERN (insn)) == SET
2630 || GET_CODE (PATTERN (insn)) == CLOBBER)
2631 sched_note_set (b, PATTERN (insn), 0);
2632 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2633 {
2634 int j;
2635 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2636 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2637 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2638 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2639
53dd0e6e
JW
2640 /* ??? This code is obsolete and should be deleted. It
2641 is harmless though, so we will leave it in for now. */
bd58420c
RK
2642 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2643 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2644 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2645 }
2646
2647 /* Need to know what registers this insn kills. */
2648 for (prev = 0, link = REG_NOTES (insn); link; link = next)
2649 {
2650 int regno;
2651
2652 next = XEXP (link, 1);
2653 if ((REG_NOTE_KIND (link) == REG_DEAD
2654 || REG_NOTE_KIND (link) == REG_UNUSED)
2655 /* Verify that the REG_NOTE has a legal value. */
2656 && GET_CODE (XEXP (link, 0)) == REG)
2657 {
2658 register int regno = REGNO (XEXP (link, 0));
2659 register int offset = regno / REGSET_ELT_BITS;
2660 register int bit = 1 << (regno % REGSET_ELT_BITS);
2661
2662 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
2663 alone. */
2664 if (REG_NOTE_KIND (link) == REG_DEAD)
2665 {
2666 if (prev)
2667 XEXP (prev, 1) = next;
2668 else
2669 REG_NOTES (insn) = next;
2670 XEXP (link, 1) = dead_notes;
2671 dead_notes = link;
2672 }
2673 else
2674 prev = link;
2675
2676 if (regno < FIRST_PSEUDO_REGISTER)
2677 {
2678 int j = HARD_REGNO_NREGS (regno,
2679 GET_MODE (XEXP (link, 0)));
2680 while (--j >= 0)
2681 {
2682 offset = (regno + j) / REGSET_ELT_BITS;
2683 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2684
2685 bb_live_regs[offset] &= ~bit;
2686 bb_dead_regs[offset] |= bit;
2687 }
2688 }
2689 else
2690 {
2691 bb_live_regs[offset] &= ~bit;
2692 bb_dead_regs[offset] |= bit;
2693 }
2694 }
2695 else
2696 prev = link;
2697 }
2698 }
2699 }
2700
2701 if (reload_completed == 0)
2702 {
2703 /* Keep track of register lives. */
2704 old_live_regs = (regset) alloca (regset_bytes);
2705 regs_sometimes_live
2706 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
2707 sometimes_max = 0;
2708
2709 /* Start with registers live at end. */
2710 for (j = 0; j < regset_size; j++)
2711 {
2712 int live = bb_live_regs[j];
2713 old_live_regs[j] = live;
2714 if (live)
2715 {
2716 register int bit;
2717 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
2718 if (live & (1 << bit))
2719 sometimes_max = new_sometimes_live (regs_sometimes_live, j,
2720 bit, sometimes_max);
2721 }
2722 }
2723 }
2724
2725 SCHED_SORT (ready, n_ready, 1);
2726
2727 if (file)
2728 {
2729 fprintf (file, ";; ready list initially:\n;; ");
2730 for (i = 0; i < n_ready; i++)
2731 fprintf (file, "%d ", INSN_UID (ready[i]));
2732 fprintf (file, "\n\n");
2733
2734 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2735 if (INSN_PRIORITY (insn) > 0)
2736 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
2737 INSN_UID (insn), INSN_PRIORITY (insn),
2738 INSN_REF_COUNT (insn));
2739 }
2740
2741 /* Now HEAD and TAIL are going to become disconnected
2742 entirely from the insn chain. */
2743 tail = ready[0];
2744
2745 /* Q_SIZE will always be zero here. */
2746 q_ptr = 0;
2747 bzero (insn_queue, sizeof (insn_queue));
2748
2749 /* Now, perform list scheduling. */
2750
2751 /* Where we start inserting insns is after TAIL. */
2752 last = next_tail;
2753
2754 new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
2755 ? NEED_HEAD : NEED_NOTHING);
2756 if (PREV_INSN (next_tail) == basic_block_end[b])
2757 new_needs |= NEED_TAIL;
2758
2759 new_ready = n_ready;
2760 while (sched_n_insns < n_insns)
2761 {
2762 q_ptr = NEXT_Q (q_ptr);
2763
2764 /* Add all pending insns that can be scheduled without stalls to the
2765 ready list. */
2766 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
2767 {
2768 if (file)
2769 fprintf (file, ";; launching %d before %d with no stalls\n",
2770 INSN_UID (insn), INSN_UID (last));
2771 ready[new_ready++] = insn;
2772 q_size -= 1;
2773 }
2774 insn_queue[q_ptr] = 0;
2775
2776 /* If there are no ready insns, stall until one is ready and add all
2777 of the pending insns at that point to the ready list. */
2778 if (new_ready == 0)
2779 {
2780 register int stalls;
2781
2782 for (stalls = 1; stalls < Q_SIZE; stalls++)
2783 if (insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])
2784 {
2785 for (; insn; insn = NEXT_INSN (insn))
2786 {
2787 if (file)
2788 fprintf (file, ";; issue insn %d before %d with %d stalls\n",
2789 INSN_UID (insn), INSN_UID (last), stalls);
2790 ready[new_ready++] = insn;
2791 q_size -= 1;
2792 }
2793 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
2794 break;
2795 }
2796
2797#if 0
2798 /* This looks logically correct, but on the SPEC benchmark set on
2799 the SPARC, I get better code without it. */
2800 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2801#endif
2802 }
2803
2804 /* There should be some instructions waiting to fire. */
2805 if (new_ready == 0)
2806 abort ();
2807
2808 /* Sort the ready list and choose the best insn to schedule.
2809 N_READY holds the number of items that were scheduled the last time,
2810 minus the one instruction scheduled on the last loop iteration; it
2811 is not modified for any other reason in this loop. */
2812 SCHED_SORT (ready, new_ready, n_ready);
2813 n_ready = new_ready;
2814 last_scheduled_insn = insn = ready[0];
2815
2816 if (DONE_PRIORITY_P (insn))
2817 abort ();
2818
2819 if (reload_completed == 0)
2820 {
2821 /* Process this insn, and each insn linked to this one which must
2822 be immediately output after this insn. */
2823 do
2824 {
2825 /* First we kill registers set by this insn, and then we
2826 make registers used by this insn live. This is the opposite
2827 order used above because we are traversing the instructions
2828 backwards. */
2829
2830 /* Strictly speaking, we should scan REG_UNUSED notes and make
2831 every register mentioned there live, however, we will just
2832 kill them again immediately below, so there doesn't seem to
2833 be any reason why we bother to do this. */
2834
2835 /* See if this is the last notice we must take of a register. */
2836 if (GET_CODE (PATTERN (insn)) == SET
2837 || GET_CODE (PATTERN (insn)) == CLOBBER)
2838 sched_note_set (b, PATTERN (insn), 1);
2839 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2840 {
2841 int j;
2842 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2843 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2844 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2845 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
2846 }
2847
2848 /* This code keeps life analysis information up to date. */
2849 if (GET_CODE (insn) == CALL_INSN)
2850 {
2851 register struct sometimes *p;
2852
2853 /* A call kills all call used and global registers, except
2854 for those mentioned in the call pattern which will be
2855 made live again later. */
2856 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2857 if (call_used_regs[i] || global_regs[i])
2858 {
2859 register int offset = i / REGSET_ELT_BITS;
2860 register int bit = 1 << (i % REGSET_ELT_BITS);
2861
2862 bb_live_regs[offset] &= ~bit;
2863 bb_dead_regs[offset] |= bit;
2864 }
2865
2866 /* Regs live at the time of a call instruction must not
2867 go in a register clobbered by calls. Record this for
2868 all regs now live. Note that insns which are born or
2869 die in a call do not cross a call, so this must be done
2870 after the killings (above) and before the births
2871 (below). */
2872 p = regs_sometimes_live;
2873 for (i = 0; i < sometimes_max; i++, p++)
2874 if (bb_live_regs[p->offset] & (1 << p->bit))
2875 p->calls_crossed += 1;
2876 }
2877
2878 /* Make every register used live, and add REG_DEAD notes for
2879 registers which were not live before we started. */
2880 attach_deaths_insn (insn);
2881
2882 /* Find registers now made live by that instruction. */
2883 for (i = 0; i < regset_size; i++)
2884 {
2885 int diff = bb_live_regs[i] & ~old_live_regs[i];
2886 if (diff)
2887 {
2888 register int bit;
2889 old_live_regs[i] |= diff;
2890 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
2891 if (diff & (1 << bit))
2892 sometimes_max
2893 = new_sometimes_live (regs_sometimes_live, i, bit,
2894 sometimes_max);
2895 }
2896 }
2897
2898 /* Count lengths of all regs we are worrying about now,
2899 and handle registers no longer live. */
2900
2901 for (i = 0; i < sometimes_max; i++)
2902 {
2903 register struct sometimes *p = &regs_sometimes_live[i];
2904 int regno = p->offset*REGSET_ELT_BITS + p->bit;
2905
2906 p->live_length += 1;
2907
2908 if ((bb_live_regs[p->offset] & (1 << p->bit)) == 0)
2909 {
2910 /* This is the end of one of this register's lifetime
2911 segments. Save the lifetime info collected so far,
2912 and clear its bit in the old_live_regs entry. */
2913 sched_reg_live_length[regno] += p->live_length;
2914 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2915 old_live_regs[p->offset] &= ~(1 << p->bit);
2916
2917 /* Delete the reg_sometimes_live entry for this reg by
2918 copying the last entry over top of it. */
2919 *p = regs_sometimes_live[--sometimes_max];
2920 /* ...and decrement i so that this newly copied entry
2921 will be processed. */
2922 i--;
2923 }
2924 }
2925
2926 link = insn;
2927 insn = PREV_INSN (insn);
2928 }
2929 while (SCHED_GROUP_P (link));
2930
2931 /* Set INSN back to the insn we are scheduling now. */
2932 insn = ready[0];
2933 }
2934
2935 /* Schedule INSN. Remove it from the ready list. */
2936 ready += 1;
2937 n_ready -= 1;
2938
2939 sched_n_insns += 1;
2940 NEXT_INSN (insn) = last;
2941 PREV_INSN (last) = insn;
2942 last = insn;
2943
2944 /* Everything that precedes INSN now either becomes "ready", if
2945 it can execute immediately before INSN, or "pending", if
2946 there must be a delay. Give INSN high enough priority that
2947 at least one (maybe more) reg-killing insns can be launched
2948 ahead of all others. Mark INSN as scheduled by changing its
2949 priority to -1. */
2950 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
2951 new_ready = launch_links (insn, ready, n_ready);
2952 INSN_PRIORITY (insn) = DONE_PRIORITY;
2953
2954 /* Schedule all prior insns that must not be moved. */
2955 if (SCHED_GROUP_P (insn))
2956 {
2957 /* Disable these insns from being launched. */
2958 link = insn;
2959 while (SCHED_GROUP_P (link))
2960 {
2961 /* Disable these insns from being launched by anybody. */
2962 link = PREV_INSN (link);
2963 INSN_REF_COUNT (link) = 0;
2964 }
2965
2966 /* None of these insns can move forward into delay slots. */
2967 while (SCHED_GROUP_P (insn))
2968 {
2969 insn = PREV_INSN (insn);
2970 new_ready = launch_links (insn, ready, new_ready);
2971 INSN_PRIORITY (insn) = DONE_PRIORITY;
2972
2973 sched_n_insns += 1;
2974 NEXT_INSN (insn) = last;
2975 PREV_INSN (last) = insn;
2976 last = insn;
2977 }
2978 }
2979 }
2980 if (q_size != 0)
2981 abort ();
2982
2983 if (reload_completed == 0)
2984 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2985
2986 /* HEAD is now the first insn in the chain of insns that
2987 been scheduled by the loop above.
2988 TAIL is the last of those insns. */
2989 head = insn;
2990
2991 /* NOTE_LIST is the end of a chain of notes previously found
2992 among the insns. Insert them at the beginning of the insns. */
2993 if (note_list != 0)
2994 {
2995 rtx note_head = note_list;
2996 while (PREV_INSN (note_head))
2997 note_head = PREV_INSN (note_head);
2998
2999 PREV_INSN (head) = note_list;
3000 NEXT_INSN (note_list) = head;
3001 head = note_head;
3002 }
3003
3004 /* In theory, there should be no REG_DEAD notes leftover at the end.
3005 In practice, this can occur as the result of bugs in flow, combine.c,
3006 and/or sched.c. The values of the REG_DEAD notes remaining are
3007 meaningless, because dead_notes is just used as a free list. */
3008#if 1
3009 if (dead_notes != 0)
3010 abort ();
3011#endif
3012
3013 if (new_needs & NEED_HEAD)
3014 basic_block_head[b] = head;
3015 PREV_INSN (head) = prev_head;
3016 NEXT_INSN (prev_head) = head;
3017
3018 if (new_needs & NEED_TAIL)
3019 basic_block_end[b] = tail;
3020 NEXT_INSN (tail) = next_tail;
3021 PREV_INSN (next_tail) = tail;
3022
3023 /* Restore the line-number notes of each insn. */
3024 if (write_symbols != NO_DEBUG)
3025 {
3026 rtx line, note, prev, new;
3027 int notes = 0;
3028
3029 head = basic_block_head[b];
3030 next_tail = NEXT_INSN (basic_block_end[b]);
3031
3032 /* Determine the current line-number. We want to know the current
3033 line number of the first insn of the block here, in case it is
3034 different from the true line number that was saved earlier. If
3035 different, then we need a line number note before the first insn
3036 of this block. If it happens to be the same, then we don't want to
3037 emit another line number note here. */
3038 for (line = head; line; line = PREV_INSN (line))
3039 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3040 break;
3041
3042 /* Walk the insns keeping track of the current line-number and inserting
3043 the line-number notes as needed. */
3044 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3045 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3046 line = insn;
3047 else if (! (GET_CODE (insn) == NOTE
3048 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3049 && (note = LINE_NOTE (insn)) != 0
3050 && note != line
3051 && (line == 0
3052 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3053 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3054 {
3055 line = note;
3056 prev = PREV_INSN (insn);
3057 if (LINE_NOTE (note))
3058 {
3059 /* Re-use the orignal line-number note. */
3060 LINE_NOTE (note) = 0;
3061 PREV_INSN (note) = prev;
3062 NEXT_INSN (prev) = note;
3063 PREV_INSN (insn) = note;
3064 NEXT_INSN (note) = insn;
3065 }
3066 else
3067 {
3068 notes++;
3069 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3070 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3071 }
3072 }
3073 if (file && notes)
3074 fprintf (file, ";; added %d line-number notes\n", notes);
3075 }
3076
3077 if (file)
3078 {
3079 fprintf (file, ";; new basic block head = %d\n;; new basic block end = %d\n\n",
3080 INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
3081 }
3082
3083 /* Yow! We're done! */
3084 free_pending_lists ();
3085
3086 return;
3087}
3088\f
3089/* Subroutine of split_hard_reg_notes. Searches X for any reference to
3090 REGNO, returning the rtx of the reference found if any. Otherwise,
3091 returns 0. */
3092
3093rtx
3094regno_use_in (regno, x)
3095 int regno;
3096 rtx x;
3097{
3098 register char *fmt;
3099 int i, j;
3100 rtx tem;
3101
3102 if (GET_CODE (x) == REG && REGNO (x) == regno)
3103 return x;
3104
3105 fmt = GET_RTX_FORMAT (GET_CODE (x));
3106 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3107 {
3108 if (fmt[i] == 'e')
3109 {
3110 if (tem = regno_use_in (regno, XEXP (x, i)))
3111 return tem;
3112 }
3113 else if (fmt[i] == 'E')
3114 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3115 if (tem = regno_use_in (regno , XVECEXP (x, i, j)))
3116 return tem;
3117 }
3118
3119 return 0;
3120}
3121
3122/* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3123 needed for the hard register mentioned in the note. This can happen
3124 if the reference to the hard register in the original insn was split into
3125 several smaller hard register references in the split insns. */
3126
3127static void
3128split_hard_reg_notes (note, first, last, orig_insn)
3129 rtx note, first, last, orig_insn;
3130{
3131 rtx reg, temp, link;
3132 int n_regs, i, new_reg;
3133 rtx insn;
3134
3135 /* Assume that this is a REG_DEAD note. */
3136 if (REG_NOTE_KIND (note) != REG_DEAD)
3137 abort ();
3138
3139 reg = XEXP (note, 0);
3140
3141 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3142
3143 /* ??? Could add check here to see whether, the hard register is referenced
3144 in the same mode as in the original insn. If so, then it has not been
3145 split, and the rest of the code below is unnecessary. */
3146
3147 for (i = 1; i < n_regs; i++)
3148 {
3149 new_reg = REGNO (reg) + i;
3150
3151 /* Check for references to new_reg in the split insns. */
3152 for (insn = last; ; insn = PREV_INSN (insn))
3153 {
3154 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3155 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3156 {
3157 /* Create a new reg dead note here. */
3158 link = rtx_alloc (EXPR_LIST);
3159 PUT_REG_NOTE_KIND (link, REG_DEAD);
3160 XEXP (link, 0) = temp;
3161 XEXP (link, 1) = REG_NOTES (insn);
3162 REG_NOTES (insn) = link;
3163 break;
3164 }
3165 /* It isn't mentioned anywhere, so no new reg note is needed for
3166 this register. */
3167 if (insn == first)
3168 break;
3169 }
3170 }
3171}
3172
3173/* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3174 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3175
3176static void
3177new_insn_dead_notes (pat, insn, last, orig_insn)
3178 rtx pat, insn, last, orig_insn;
3179{
3180 rtx dest, tem, set;
3181
3182 /* PAT is either a CLOBBER or a SET here. */
3183 dest = XEXP (pat, 0);
3184
3185 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3186 || GET_CODE (dest) == STRICT_LOW_PART
3187 || GET_CODE (dest) == SIGN_EXTRACT)
3188 dest = XEXP (dest, 0);
3189
3190 if (GET_CODE (dest) == REG)
3191 {
3192 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3193 {
3194 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3195 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3196 && (set = single_set (tem)))
3197 {
3198 rtx tem_dest = SET_DEST (set);
3199
3200 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3201 || GET_CODE (tem_dest) == SUBREG
3202 || GET_CODE (tem_dest) == STRICT_LOW_PART
3203 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3204 tem_dest = XEXP (tem_dest, 0);
3205
3206 if (tem_dest != dest)
3207 {
3208 /* Use the same scheme as combine.c, don't put both REG_DEAD
3209 and REG_UNUSED notes on the same insn. */
3210 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3211 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3212 {
3213 rtx note = rtx_alloc (EXPR_LIST);
3214 PUT_REG_NOTE_KIND (note, REG_DEAD);
3215 XEXP (note, 0) = dest;
3216 XEXP (note, 1) = REG_NOTES (tem);
3217 REG_NOTES (tem) = note;
3218 }
3219 /* The reg only dies in one insn, the last one that uses
3220 it. */
3221 break;
3222 }
3223 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3224 /* We found an instruction that both uses the register,
3225 and sets it, so no new REG_NOTE is needed for this set. */
3226 break;
3227 }
3228 }
3229 /* If this is a set, it must die somewhere, unless it is the dest of
3230 the original insn, and hence is live after the original insn. Abort
3231 if it isn't supposed to be live after the original insn.
3232
3233 If this is a clobber, then just add a REG_UNUSED note. */
3234 if (tem == insn)
3235 {
3236 int live_after_orig_insn = 0;
3237 rtx pattern = PATTERN (orig_insn);
3238 int i;
3239
3240 if (GET_CODE (pat) == CLOBBER)
3241 {
3242 rtx note = rtx_alloc (EXPR_LIST);
3243 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3244 XEXP (note, 0) = dest;
3245 XEXP (note, 1) = REG_NOTES (insn);
3246 REG_NOTES (insn) = note;
3247 return;
3248 }
3249
3250 /* The original insn could have multiple sets, so search the
3251 insn for all sets. */
3252 if (GET_CODE (pattern) == SET)
3253 {
3254 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3255 live_after_orig_insn = 1;
3256 }
3257 else if (GET_CODE (pattern) == PARALLEL)
3258 {
3259 for (i = 0; i < XVECLEN (pattern, 0); i++)
3260 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3261 && reg_overlap_mentioned_p (dest,
3262 SET_DEST (XVECEXP (pattern,
3263 0, i))))
3264 live_after_orig_insn = 1;
3265 }
3266
3267 if (! live_after_orig_insn)
3268 abort ();
3269 }
3270 }
3271}
3272
3273/* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3274 registers modified by X. INC is -1 if the containing insn is being deleted,
3275 and is 1 if the containing insn is a newly generated insn. */
3276
3277static void
3278update_n_sets (x, inc)
3279 rtx x;
3280 int inc;
3281{
3282 rtx dest = SET_DEST (x);
3283
3284 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3285 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3286 dest = SUBREG_REG (dest);
3287
3288 if (GET_CODE (dest) == REG)
3289 {
3290 int regno = REGNO (dest);
3291
3292 if (regno < FIRST_PSEUDO_REGISTER)
3293 {
3294 register int i;
3295 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3296
3297 for (i = regno; i < endregno; i++)
3298 reg_n_sets[i] += inc;
3299 }
3300 else
3301 reg_n_sets[regno] += inc;
3302 }
3303}
3304
3305/* Updates all flow-analysis related quantities (including REG_NOTES) for
3306 the insns from FIRST to LAST inclusive that were created by splitting
3307 ORIG_INSN. NOTES are the original REG_NOTES. */
3308
3309static void
3310update_flow_info (notes, first, last, orig_insn)
3311 rtx notes;
3312 rtx first, last;
3313 rtx orig_insn;
3314{
3315 rtx insn, note;
3316 rtx next;
3317 rtx orig_dest, temp;
3318 rtx set;
3319
3320 /* Get and save the destination set by the original insn. */
3321
3322 orig_dest = single_set (orig_insn);
3323 if (orig_dest)
3324 orig_dest = SET_DEST (orig_dest);
3325
3326 /* Move REG_NOTES from the original insn to where they now belong. */
3327
3328 for (note = notes; note; note = next)
3329 {
3330 next = XEXP (note, 1);
3331 switch (REG_NOTE_KIND (note))
3332 {
3333 case REG_DEAD:
3334 case REG_UNUSED:
3335 /* Move these notes from the original insn to the last new insn where
3336 the register is now set. */
3337
3338 for (insn = last; ; insn = PREV_INSN (insn))
3339 {
3340 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3341 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3342 {
3343 XEXP (note, 1) = REG_NOTES (insn);
3344 REG_NOTES (insn) = note;
3345
3346 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3347 notes. */
3348 /* ??? This won't handle mutiple word registers correctly,
3349 but should be good enough for now. */
3350 if (REG_NOTE_KIND (note) == REG_UNUSED
3351 && ! dead_or_set_p (insn, XEXP (note, 0)))
3352 PUT_REG_NOTE_KIND (note, REG_DEAD);
3353
3354 /* The reg only dies in one insn, the last one that uses
3355 it. */
3356 break;
3357 }
3358 /* It must die somewhere, fail it we couldn't find where it died.
3359
3360 If this is a REG_UNUSED note, then it must be a temporary
3361 register that was not needed by this instantiation of the
3362 pattern, so we can safely ignore it. */
3363 if (insn == first)
3364 {
3365 if (REG_NOTE_KIND (note) != REG_UNUSED)
3366 abort ();
3367
3368 break;
3369 }
3370 }
3371
3372 /* If this note refers to a multiple word hard register, it may
3373 have been split into several smaller hard register references.
3374 Check to see if there are any new register references that
3375 need REG_NOTES added for them. */
3376 temp = XEXP (note, 0);
3377 if (REG_NOTE_KIND (note) == REG_DEAD
3378 && GET_CODE (temp) == REG
3379 && REGNO (temp) < FIRST_PSEUDO_REGISTER
3380 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)))
3381 split_hard_reg_notes (note, first, last, orig_insn);
3382 break;
3383
3384 case REG_WAS_0:
3385 /* This note applies to the dest of the original insn. Find the
3386 first new insn that now has the same dest, and move the note
3387 there. */
3388
3389 if (! orig_dest)
3390 abort ();
3391
3392 for (insn = first; ; insn = NEXT_INSN (insn))
3393 {
3394 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3395 && (temp = single_set (insn))
3396 && rtx_equal_p (SET_DEST (temp), orig_dest))
3397 {
3398 XEXP (note, 1) = REG_NOTES (insn);
3399 REG_NOTES (insn) = note;
3400 /* The reg is only zero before one insn, the first that
3401 uses it. */
3402 break;
3403 }
3404 /* It must be set somewhere, fail if we couldn't find where it
3405 was set. */
3406 if (insn == last)
3407 abort ();
3408 }
3409 break;
3410
3411 case REG_EQUAL:
3412 case REG_EQUIV:
3413 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
3414 set is meaningless. Just drop the note. */
3415 if (! orig_dest)
3416 break;
3417
3418 case REG_NO_CONFLICT:
3419 /* These notes apply to the dest of the original insn. Find the last
3420 new insn that now has the same dest, and move the note there. */
3421
3422 if (! orig_dest)
3423 abort ();
3424
3425 for (insn = last; ; insn = PREV_INSN (insn))
3426 {
3427 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3428 && (temp = single_set (insn))
3429 && rtx_equal_p (SET_DEST (temp), orig_dest))
3430 {
3431 XEXP (note, 1) = REG_NOTES (insn);
3432 REG_NOTES (insn) = note;
3433 /* Only put this note on one of the new insns. */
3434 break;
3435 }
3436
3437 /* The original dest must still be set someplace. Abort if we
3438 couldn't find it. */
3439 if (insn == first)
3440 abort ();
3441 }
3442 break;
3443
3444 case REG_LIBCALL:
3445 /* Move a REG_LIBCALL note to the first insn created, and update
3446 the corresponding REG_RETVAL note. */
3447 XEXP (note, 1) = REG_NOTES (first);
3448 REG_NOTES (first) = note;
3449
3450 insn = XEXP (note, 0);
3451 note = find_reg_note (insn, REG_RETVAL, 0);
3452 if (note)
3453 XEXP (note, 0) = first;
3454 break;
3455
3456 case REG_RETVAL:
3457 /* Move a REG_RETVAL note to the last insn created, and update
3458 the corresponding REG_LIBCALL note. */
3459 XEXP (note, 1) = REG_NOTES (last);
3460 REG_NOTES (last) = note;
3461
3462 insn = XEXP (note, 0);
3463 note = find_reg_note (insn, REG_LIBCALL, 0);
3464 if (note)
3465 XEXP (note, 0) = last;
3466 break;
3467
3468 case REG_NONNEG:
3469 /* This should be moved to whichever instruction is a JUMP_INSN. */
3470
3471 for (insn = last; ; insn = PREV_INSN (insn))
3472 {
3473 if (GET_CODE (insn) == JUMP_INSN)
3474 {
3475 XEXP (note, 1) = REG_NOTES (insn);
3476 REG_NOTES (insn) = note;
3477 /* Only put this note on one of the new insns. */
3478 break;
3479 }
3480 /* Fail if we couldn't find a JUMP_INSN. */
3481 if (insn == first)
3482 abort ();
3483 }
3484 break;
3485
3486 case REG_INC:
3487 /* This should be moved to whichever instruction now has the
3488 increment operation. */
3489 abort ();
3490
3491 case REG_LABEL:
3492 /* Should be moved to the new insn(s) which use the label. */
3493 abort ();
3494
3495 case REG_CC_SETTER:
3496 case REG_CC_USER:
3497 /* These two notes will never appear until after reorg, so we don't
3498 have to handle them here. */
3499 default:
3500 abort ();
3501 }
3502 }
3503
3504 /* Each new insn created, except the last, has a new set. If the destination
3505 is a register, then this reg is now live across several insns, whereas
3506 previously the dest reg was born and died within the same insn. To
3507 reflect this, we now need a REG_DEAD note on the insn where this
3508 dest reg dies.
3509
3510 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
3511
3512 for (insn = first; insn != last; insn = NEXT_INSN (insn))
3513 {
3514 rtx pat;
3515 int i;
3516
3517 pat = PATTERN (insn);
3518 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
3519 new_insn_dead_notes (pat, insn, last, orig_insn);
3520 else if (GET_CODE (pat) == PARALLEL)
3521 {
3522 for (i = 0; i < XVECLEN (pat, 0); i++)
3523 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3524 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
3525 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
3526 }
3527 }
3528
3529 /* If any insn, except the last, uses the register set by the last insn,
3530 then we need a new REG_DEAD note on that insn. In this case, there
3531 would not have been a REG_DEAD note for this register in the original
3532 insn because it was used and set within one insn.
3533
3534 There is no new REG_DEAD note needed if the last insn uses the register
3535 that it is setting. */
3536
3537 set = single_set (last);
3538 if (set)
3539 {
3540 rtx dest = SET_DEST (set);
3541
3542 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3543 || GET_CODE (dest) == STRICT_LOW_PART
3544 || GET_CODE (dest) == SIGN_EXTRACT)
3545 dest = XEXP (dest, 0);
3546
3547 if (GET_CODE (dest) == REG
3548 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
3549 {
3550 for (insn = PREV_INSN (last); ; insn = PREV_INSN (insn))
3551 {
3552 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3553 && reg_mentioned_p (dest, PATTERN (insn))
3554 && (set = single_set (insn)))
3555 {
3556 rtx insn_dest = SET_DEST (set);
3557
3558 while (GET_CODE (insn_dest) == ZERO_EXTRACT
3559 || GET_CODE (insn_dest) == SUBREG
3560 || GET_CODE (insn_dest) == STRICT_LOW_PART
3561 || GET_CODE (insn_dest) == SIGN_EXTRACT)
3562 insn_dest = XEXP (insn_dest, 0);
3563
3564 if (insn_dest != dest)
3565 {
3566 note = rtx_alloc (EXPR_LIST);
3567 PUT_REG_NOTE_KIND (note, REG_DEAD);
3568 XEXP (note, 0) = dest;
3569 XEXP (note, 1) = REG_NOTES (insn);
3570 REG_NOTES (insn) = note;
3571 /* The reg only dies in one insn, the last one
3572 that uses it. */
3573 break;
3574 }
3575 }
3576 if (insn == first)
3577 break;
3578 }
3579 }
3580 }
3581
3582 /* If the original dest is modifying a multiple register target, and the
3583 original instruction was split such that the original dest is now set
3584 by two or more SUBREG sets, then the split insns no longer kill the
3585 destination of the original insn.
3586
3587 In this case, if there exists an instruction in the same basic block,
3588 before the split insn, which uses the original dest, and this use is
3589 killed by the original insn, then we must remove the REG_DEAD note on
3590 this insn, because it is now superfluous.
3591
3592 This does not apply when a hard register gets split, because the code
3593 knows how to handle overlapping hard registers properly. */
3594 if (orig_dest && GET_CODE (orig_dest) == REG)
3595 {
3596 int found_orig_dest = 0;
3597 int found_split_dest = 0;
3598
3599 for (insn = first; ; insn = NEXT_INSN (insn))
3600 {
3601 set = single_set (insn);
3602 if (set)
3603 {
3604 if (GET_CODE (SET_DEST (set)) == REG
3605 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
3606 {
3607 found_orig_dest = 1;
3608 break;
3609 }
3610 else if (GET_CODE (SET_DEST (set)) == SUBREG
3611 && SUBREG_REG (SET_DEST (set)) == orig_dest)
3612 {
3613 found_split_dest = 1;
3614 break;
3615 }
3616 }
3617
3618 if (insn == last)
3619 break;
3620 }
3621
3622 if (found_split_dest)
3623 {
3624 /* Search backwards from FIRST, looking for the first insn that uses
3625 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
3626 If we find an insn, and it has a REG_DEAD note, then delete the
3627 note. */
3628
3629 for (insn = first; insn; insn = PREV_INSN (insn))
3630 {
3631 if (GET_CODE (insn) == CODE_LABEL
3632 || GET_CODE (insn) == JUMP_INSN)
3633 break;
3634 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3635 && reg_mentioned_p (orig_dest, insn))
3636 {
3637 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
3638 if (note)
3639 remove_note (insn, note);
3640 }
3641 }
3642 }
3643 else if (! found_orig_dest)
3644 {
3645 /* This should never happen. */
3646 abort ();
3647 }
3648 }
3649
3650 /* Update reg_n_sets. This is necessary to prevent local alloc from
3651 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
3652 a reg from set once to set multiple times. */
3653
3654 {
3655 rtx x = PATTERN (orig_insn);
3656 RTX_CODE code = GET_CODE (x);
3657
3658 if (code == SET || code == CLOBBER)
3659 update_n_sets (x, -1);
3660 else if (code == PARALLEL)
3661 {
3662 int i;
3663 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3664 {
3665 code = GET_CODE (XVECEXP (x, 0, i));
3666 if (code == SET || code == CLOBBER)
3667 update_n_sets (XVECEXP (x, 0, i), -1);
3668 }
3669 }
3670
3671 for (insn = first; ; insn = NEXT_INSN (insn))
3672 {
3673 x = PATTERN (insn);
3674 code = GET_CODE (x);
3675
3676 if (code == SET || code == CLOBBER)
3677 update_n_sets (x, 1);
3678 else if (code == PARALLEL)
3679 {
3680 int i;
3681 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3682 {
3683 code = GET_CODE (XVECEXP (x, 0, i));
3684 if (code == SET || code == CLOBBER)
3685 update_n_sets (XVECEXP (x, 0, i), 1);
3686 }
3687 }
3688
3689 if (insn == last)
3690 break;
3691 }
3692 }
3693}
3694
3695/* The one entry point in this file. DUMP_FILE is the dump file for
3696 this pass. */
3697
3698void
3699schedule_insns (dump_file)
3700 FILE *dump_file;
3701{
3702 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
3703 int i, b;
3704 rtx insn;
3705
3706 /* Taking care of this degenerate case makes the rest of
3707 this code simpler. */
3708 if (n_basic_blocks == 0)
3709 return;
3710
3711 /* Create an insn here so that we can hang dependencies off of it later. */
3712 sched_before_next_call = gen_rtx (INSN, VOIDmode, 0, 0, 0, 0, 0, 0, 0);
3713
3714 /* Initialize the unused_*_lists. We can't use the ones left over from
3715 the previous function, because gcc has freed that memory. We can use
3716 the ones left over from the first sched pass in the second pass however,
3717 so only clear them on the first sched pass. The first pass is before
3718 reload if flag_schedule_insns is set, otherwise it is afterwards. */
3719
3720 if (reload_completed == 0 || ! flag_schedule_insns)
3721 {
3722 unused_insn_list = 0;
3723 unused_expr_list = 0;
3724 }
3725
3726 /* We create no insns here, only reorder them, so we
3727 remember how far we can cut back the stack on exit. */
3728
3729 /* Allocate data for this pass. See comments, above,
3730 for what these vectors do. */
3731 /* ??? Instruction splitting below may create new instructions, so these
3732 arrays must be bigger than just max_uid. */
3733 insn_luid = (int *) alloca (max_uid * sizeof (int));
3734 insn_priority = (int *) alloca (max_uid * sizeof (int));
3735 insn_ref_count = (int *) alloca (max_uid * sizeof (int));
3736
3737 if (reload_completed == 0)
3738 {
3739 sched_reg_n_deaths = (short *) alloca (max_regno * sizeof (short));
3740 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
3741 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
3742 bb_dead_regs = (regset) alloca (regset_bytes);
3743 bb_live_regs = (regset) alloca (regset_bytes);
3744 bzero (sched_reg_n_calls_crossed, max_regno * sizeof (int));
3745 bzero (sched_reg_live_length, max_regno * sizeof (int));
3746 bcopy (reg_n_deaths, sched_reg_n_deaths, max_regno * sizeof (short));
3747 init_alias_analysis ();
3748 }
3749 else
3750 {
3751 sched_reg_n_deaths = 0;
3752 sched_reg_n_calls_crossed = 0;
3753 sched_reg_live_length = 0;
3754 bb_dead_regs = 0;
3755 bb_live_regs = 0;
3756 if (! flag_schedule_insns)
3757 init_alias_analysis ();
3758 }
3759
3760 if (write_symbols != NO_DEBUG)
3761 {
3762 rtx line;
3763
3764 line_note = (rtx *) alloca (max_uid * sizeof (rtx));
3765 bzero (line_note, max_uid * sizeof (rtx));
3766 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
3767 bzero (line_note_head, n_basic_blocks * sizeof (rtx));
3768
3769 /* Determine the line-number at the start of each basic block.
3770 This must be computed and saved now, because after a basic block's
3771 predecessor has been scheduled, it is impossible to accurately
3772 determine the correct line number for the first insn of the block. */
3773
3774 for (b = 0; b < n_basic_blocks; b++)
3775 for (line = basic_block_head[b]; line; line = PREV_INSN (line))
3776 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3777 {
3778 line_note_head[b] = line;
3779 break;
3780 }
3781 }
3782
3783 bzero (insn_luid, max_uid * sizeof (int));
3784 bzero (insn_priority, max_uid * sizeof (int));
3785 bzero (insn_ref_count, max_uid * sizeof (int));
3786
3787 /* Schedule each basic block, block by block. */
3788
3789 if (NEXT_INSN (basic_block_end[n_basic_blocks-1]) == 0
3790 || (GET_CODE (basic_block_end[n_basic_blocks-1]) != NOTE
3791 && GET_CODE (basic_block_end[n_basic_blocks-1]) != CODE_LABEL))
3792 emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
3793
3794 for (b = 0; b < n_basic_blocks; b++)
3795 {
3796 rtx insn, next;
3797 rtx insns;
3798
3799 note_list = 0;
3800
3801 for (insn = basic_block_head[b]; ; insn = next)
3802 {
3803 rtx prev;
3804 rtx set;
3805
3806 /* Can't use `next_real_insn' because that
3807 might go across CODE_LABELS and short-out basic blocks. */
3808 next = NEXT_INSN (insn);
3809 if (GET_CODE (insn) != INSN)
3810 {
3811 if (insn == basic_block_end[b])
3812 break;
3813
3814 continue;
3815 }
3816
3817 /* Don't split no-op move insns. These should silently disappear
3818 later in final. Splitting such insns would break the code
3819 that handles REG_NO_CONFLICT blocks. */
3820 set = single_set (insn);
3821 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
3822 {
3823 if (insn == basic_block_end[b])
3824 break;
3825
3826 /* Nops get in the way while scheduling, so delete them now if
3827 register allocation has already been done. It is too risky
3828 to try to do this before register allocation, and there are
3829 unlikely to be very many nops then anyways. */
3830 if (reload_completed)
3831 {
3832 PUT_CODE (insn, NOTE);
3833 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3834 NOTE_SOURCE_FILE (insn) = 0;
3835 }
3836
3837 continue;
3838 }
3839
3840 /* Split insns here to get max fine-grain parallelism. */
3841 prev = PREV_INSN (insn);
3842 if (reload_completed == 0)
3843 {
3844 rtx last, first = PREV_INSN (insn);
3845 rtx notes = REG_NOTES (insn);
3846
3847 last = try_split (PATTERN (insn), insn, 1);
3848 if (last != insn)
3849 {
3850 /* try_split returns the NOTE that INSN became. */
3851 first = NEXT_INSN (first);
3852 update_flow_info (notes, first, last, insn);
3853
3854 PUT_CODE (insn, NOTE);
3855 NOTE_SOURCE_FILE (insn) = 0;
3856 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3857 if (insn == basic_block_head[b])
3858 basic_block_head[b] = first;
3859 if (insn == basic_block_end[b])
3860 {
3861 basic_block_end[b] = last;
3862 break;
3863 }
3864 }
3865 }
3866
3867 if (insn == basic_block_end[b])
3868 break;
3869 }
3870
3871 schedule_block (b, dump_file);
3872
3873#ifdef USE_C_ALLOCA
3874 alloca (0);
3875#endif
3876 }
3877
3878 if (write_symbols != NO_DEBUG)
3879 {
3880 rtx line = 0;
3881 rtx insn = get_insns ();
3882 int active_insn = 0;
3883 int notes = 0;
3884
3885 /* Walk the insns deleting redundant line-number notes. Many of these
3886 are already present. The remainder tend to occur at basic
3887 block boundaries. */
3888 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
3889 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3890 {
3891 /* If there are no active insns following, INSN is redundant. */
3892 if (active_insn == 0)
3893 {
3894 notes++;
3895 NOTE_SOURCE_FILE (insn) = 0;
3896 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3897 }
3898 /* If the line number is unchanged, LINE is redundant. */
3899 else if (line
3900 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
3901 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
3902 {
3903 notes++;
3904 NOTE_SOURCE_FILE (line) = 0;
3905 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
3906 line = insn;
3907 }
3908 else
3909 line = insn;
3910 active_insn = 0;
3911 }
3912 else if (! ((GET_CODE (insn) == NOTE
3913 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3914 || (GET_CODE (insn) == INSN
3915 && (GET_CODE (PATTERN (insn)) == USE
3916 || GET_CODE (PATTERN (insn)) == CLOBBER))))
3917 active_insn++;
3918
3919 if (dump_file && notes)
3920 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
3921 }
3922
3923 if (reload_completed == 0)
3924 {
3925 int regno;
3926 for (regno = 0; regno < max_regno; regno++)
3927 if (sched_reg_live_length[regno])
3928 {
3929 if (dump_file)
3930 {
3931 if (reg_live_length[regno] > sched_reg_live_length[regno])
3932 fprintf (dump_file,
3933 ";; register %d life shortened from %d to %d\n",
3934 regno, reg_live_length[regno],
3935 sched_reg_live_length[regno]);
3936 /* Negative values are special; don't overwrite the current
3937 reg_live_length value if it is negative. */
3938 else if (reg_live_length[regno] < sched_reg_live_length[regno]
3939 && reg_live_length[regno] >= 0)
3940 fprintf (dump_file,
3941 ";; register %d life extended from %d to %d\n",
3942 regno, reg_live_length[regno],
3943 sched_reg_live_length[regno]);
3944
3945 if (reg_n_calls_crossed[regno]
3946 && ! sched_reg_n_calls_crossed[regno])
3947 fprintf (dump_file,
3948 ";; register %d no longer crosses calls\n", regno);
3949 else if (! reg_n_calls_crossed[regno]
3950 && sched_reg_n_calls_crossed[regno])
3951 fprintf (dump_file,
3952 ";; register %d now crosses calls\n", regno);
3953 }
b4ac57ab
RS
3954 /* Negative values are special; don't overwrite the current
3955 reg_live_length value if it is negative. */
3956 if (reg_live_length[regno] >= 0)
3957 reg_live_length[regno] = sched_reg_live_length[regno];
bd58420c
RK
3958 reg_n_calls_crossed[regno] = sched_reg_n_calls_crossed[regno];
3959 }
3960 }
3961}
3962#endif /* INSN_SCHEDULING */
This page took 0.368547 seconds and 5 git commands to generate.