]> gcc.gnu.org Git - gcc.git/blame - gcc/sched.c
*** empty log message ***
[gcc.git] / gcc / sched.c
CommitLineData
bd58420c
RK
1/* Instruction scheduling pass.
2 Copyright (C) 1992 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6This file is part of GNU CC.
7
8GNU CC is free software; you can redistribute it and/or modify
9it under the terms of the GNU General Public License as published by
10the Free Software Foundation; either version 2, or (at your option)
11any later version.
12
13GNU CC is distributed in the hope that it will be useful,
14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
19along with GNU CC; see the file COPYING. If not, write to
20the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
21
22/* Instruction scheduling pass.
23
24 This pass implements list scheduling within basic blocks. It is
25 run after flow analysis, but before register allocation. The
26 scheduler works as follows:
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning
39 values to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we also add it to the ready list. When all insns down
50 to the lowest priority have been scheduled, the critical path of the
51 basic block has been made as short as possible. The remaining insns
52 are then scheduled in remaining slots.
53
54 The following list shows the order in which we want to break ties:
55
56 1. choose insn with lowest conflict cost, ties broken by
57 2. choose insn with the longest path to end of bb, ties broken by
58 3. choose insn that kills the most registers, ties broken by
59 4. choose insn that conflicts with the most ready insns, or finally
60 5. choose insn with lowest UID.
61
62 Memory references complicate matters. Only if we can be certain
63 that memory references are not part of the data dependency graph
64 (via true, anti, or output dependence), can we move operations past
65 memory references. To first approximation, reads can be done
66 independently, while writes introduce dependencies. Better
67 approximations will yield fewer dependencies.
68
69 Dependencies set up by memory references are treated in exactly the
70 same way as other dependencies, by using LOG_LINKS.
71
72 Having optimized the critical path, we may have also unduly
73 extended the lifetimes of some registers. If an operation requires
74 that constants be loaded into registers, it is certainly desirable
75 to load those constants as early as necessary, but no earlier.
76 I.e., it will not do to load up a bunch of registers at the
77 beginning of a basic block only to use them at the end, if they
78 could be loaded later, since this may result in excessive register
79 utilization.
80
81 Note that since branches are never in basic blocks, but only end
82 basic blocks, this pass will not do any branch scheduling. But
83 that is ok, since we can use GNU's delayed branch scheduling
84 pass to take care of this case.
85
86 Also note that no further optimizations based on algebraic identities
87 are performed, so this pass would be a good one to perform instruction
88 splitting, such as breaking up a multiply instruction into shifts
89 and adds where that is profitable.
90
91 Given the memory aliasing analysis that this pass should perform,
92 it should be possible to remove redundant stores to memory, and to
93 load values from registers instead of hitting memory.
94
95 This pass must update information that subsequent passes expect to be
96 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
97 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
98 basic_block_end.
99
100 The information in the line number notes is carefully retained by this
101 pass. All other NOTE insns are grouped in their same relative order at
102 the beginning of basic blocks that have been scheduled. */
103\f
104#include <stdio.h>
105#include "config.h"
106#include "rtl.h"
107#include "basic-block.h"
108#include "regs.h"
109#include "hard-reg-set.h"
110#include "flags.h"
111#include "insn-config.h"
112#include "insn-attr.h"
113
114/* Arrays set up by scheduling for the same respective purposes as
115 similar-named arrays set up by flow analysis. We work with these
116 arrays during the scheduling pass so we can compare values against
117 unscheduled code.
118
119 Values of these arrays are copied at the end of this pass into the
120 arrays set up by flow analysis. */
121static short *sched_reg_n_deaths;
122static int *sched_reg_n_calls_crossed;
123static int *sched_reg_live_length;
124
125/* Element N is the next insn that sets (hard or pseudo) register
126 N within the current basic block; or zero, if there is no
127 such insn. Needed for new registers which may be introduced
128 by splitting insns. */
129static rtx *reg_last_uses;
130static rtx *reg_last_sets;
131
132/* Vector indexed by INSN_UID giving the original ordering of the insns. */
133static int *insn_luid;
134#define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
135
136/* Vector indexed by INSN_UID giving each instruction a priority. */
137static int *insn_priority;
138#define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
139
c3de4fe6
JW
140static short *insn_costs;
141#define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
142
bd58420c
RK
143#define DONE_PRIORITY -1
144#define MAX_PRIORITY 0x7fffffff
145#define TAIL_PRIORITY 0x7ffffffe
146#define LAUNCH_PRIORITY 0x7f000001
147#define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
148#define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
149
b4ac57ab 150/* Vector indexed by INSN_UID giving number of insns referring to this insn. */
bd58420c
RK
151static int *insn_ref_count;
152#define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
153
154/* Vector indexed by INSN_UID giving line-number note in effect for each
155 insn. For line-number notes, this indicates whether the note may be
156 reused. */
157static rtx *line_note;
158#define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
159
160/* Vector indexed by basic block number giving the starting line-number
161 for each basic block. */
162static rtx *line_note_head;
163
164/* List of important notes we must keep around. This is a pointer to the
165 last element in the list. */
166static rtx note_list;
167
168/* Regsets telling whether a given register is live or dead before the last
169 scheduled insn. Must scan the instructions once before scheduling to
170 determine what registers are live or dead at the end of the block. */
171static regset bb_dead_regs;
172static regset bb_live_regs;
173
174/* Regset telling whether a given register is live after the insn currently
175 being scheduled. Before processing an insn, this is equal to bb_live_regs
176 above. This is used so that we can find regsiters that are newly born/dead
177 after processing an insn. */
178static regset old_live_regs;
179
180/* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
181 during the initial scan and reused later. If there are not exactly as
182 many REG_DEAD notes in the post scheduled code as there were in the
183 prescheduled code then we trigger an abort because this indicates a bug. */
184static rtx dead_notes;
185
186/* Queues, etc. */
187
188/* An instruction is ready to be scheduled when all insns following it
189 have already been scheduled. It is important to ensure that all
190 insns which use its result will not be executed until its result
191 has been computed. We maintain three lists (conceptually):
192
193 (1) a "Ready" list of unscheduled, uncommitted insns
194 (2) a "Scheduled" list of scheduled insns
195 (3) a "Pending" list of insns which can be scheduled, but
196 for stalls.
197
198 Insns move from the "Ready" list to the "Pending" list when
199 all insns following them have been scheduled.
200
201 Insns move from the "Pending" list to the "Scheduled" list
202 when there is sufficient space in the pipeline to prevent
203 stalls between the insn and scheduled insns which use it.
204
205 The "Pending" list acts as a buffer to prevent insns
206 from avalanching.
207
208 The "Ready" list is implemented by the variable `ready'.
209 The "Pending" list are the insns in the LOG_LINKS of ready insns.
210 The "Scheduled" list is the new insn chain built by this pass. */
211
212/* Implement a circular buffer from which instructions are issued. */
213#define Q_SIZE 128
214static rtx insn_queue[Q_SIZE];
215static int q_ptr = 0;
216static int q_size = 0;
217#define NEXT_Q(X) (((X)+1) & (Q_SIZE-1))
218#define NEXT_Q_AFTER(X,C) (((X)+C) & (Q_SIZE-1))
6dab9d41 219
bd58420c
RK
220/* Forward declarations. */
221static void sched_analyze_2 ();
222static void schedule_block ();
223
224/* Main entry point of this file. */
225void schedule_insns ();
226\f
227#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
228
229/* Vector indexed by N giving the initial (unchanging) value known
230 for pseudo-register N. */
231static rtx *reg_known_value;
232
233/* Indicates number of valid entries in reg_known_value. */
234static int reg_known_value_size;
235
236static rtx
237canon_rtx (x)
238 rtx x;
239{
240 if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
241 && REGNO (x) <= reg_known_value_size)
242 return reg_known_value[REGNO (x)];
243 else if (GET_CODE (x) == PLUS)
244 {
245 rtx x0 = canon_rtx (XEXP (x, 0));
246 rtx x1 = canon_rtx (XEXP (x, 1));
247
248 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
249 {
250 /* We can tolerate LO_SUMs being offset here; these
251 rtl are used for nothing other than comparisons. */
252 if (GET_CODE (x0) == CONST_INT)
253 return plus_constant_for_output (x1, INTVAL (x0));
254 else if (GET_CODE (x1) == CONST_INT)
255 return plus_constant_for_output (x0, INTVAL (x1));
256 return gen_rtx (PLUS, GET_MODE (x), x0, x1);
257 }
258 }
259 return x;
260}
261
262/* Set up all info needed to perform alias analysis on memory references. */
263
264void
265init_alias_analysis ()
266{
267 int maxreg = max_reg_num ();
268 rtx insn;
269 rtx note;
270 rtx set;
271
272 reg_known_value_size = maxreg;
273
274 reg_known_value
275 = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx))
276 - FIRST_PSEUDO_REGISTER;
277 bzero (reg_known_value+FIRST_PSEUDO_REGISTER,
278 (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
279
280 /* Fill in the entries with known constant values. */
281 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
282 if ((set = single_set (insn)) != 0
283 && GET_CODE (SET_DEST (set)) == REG
284 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
285 && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
286 && reg_n_sets[REGNO (SET_DEST (set))] == 1)
287 || (note = find_reg_note (insn, REG_EQUIV, 0)) != 0)
288 && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
289 reg_known_value[REGNO (SET_DEST (set))] = XEXP (note, 0);
290
291 /* Fill in the remaining entries. */
292 while (--maxreg >= FIRST_PSEUDO_REGISTER)
293 if (reg_known_value[maxreg] == 0)
294 reg_known_value[maxreg] = regno_reg_rtx[maxreg];
295}
296
297/* Return 1 if X and Y are identical-looking rtx's.
298
299 We use the data in reg_known_value above to see if two registers with
300 different numbers are, in fact, equivalent. */
301
302static int
303rtx_equal_for_memref_p (x, y)
304 rtx x, y;
305{
306 register int i;
307 register int j;
308 register enum rtx_code code;
309 register char *fmt;
310
311 if (x == 0 && y == 0)
312 return 1;
313 if (x == 0 || y == 0)
314 return 0;
315 x = canon_rtx (x);
316 y = canon_rtx (y);
317
318 if (x == y)
319 return 1;
320
321 code = GET_CODE (x);
322 /* Rtx's of different codes cannot be equal. */
323 if (code != GET_CODE (y))
324 return 0;
325
326 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
327 (REG:SI x) and (REG:HI x) are NOT equivalent. */
328
329 if (GET_MODE (x) != GET_MODE (y))
330 return 0;
331
332 /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
333
334 if (code == REG)
335 return REGNO (x) == REGNO (y);
336 if (code == LABEL_REF)
337 return XEXP (x, 0) == XEXP (y, 0);
338 if (code == SYMBOL_REF)
339 return XSTR (x, 0) == XSTR (y, 0);
340
341 /* Compare the elements. If any pair of corresponding elements
342 fail to match, return 0 for the whole things. */
343
344 fmt = GET_RTX_FORMAT (code);
345 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
346 {
347 switch (fmt[i])
348 {
349 case 'n':
350 case 'i':
351 if (XINT (x, i) != XINT (y, i))
352 return 0;
353 break;
354
355 case 'V':
356 case 'E':
357 /* Two vectors must have the same length. */
358 if (XVECLEN (x, i) != XVECLEN (y, i))
359 return 0;
360
361 /* And the corresponding elements must match. */
362 for (j = 0; j < XVECLEN (x, i); j++)
363 if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
364 return 0;
365 break;
366
367 case 'e':
368 if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
369 return 0;
370 break;
371
372 case 'S':
373 case 's':
374 if (strcmp (XSTR (x, i), XSTR (y, i)))
375 return 0;
376 break;
377
378 case 'u':
379 /* These are just backpointers, so they don't matter. */
380 break;
381
382 case '0':
383 break;
384
385 /* It is believed that rtx's at this level will never
386 contain anything but integers and other rtx's,
387 except for within LABEL_REFs and SYMBOL_REFs. */
388 default:
389 abort ();
390 }
391 }
392 return 1;
393}
394
395/* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
396 X and return it, or return 0 if none found. */
397
398static rtx
399find_symbolic_term (x)
400 rtx x;
401{
402 register int i;
403 register enum rtx_code code;
404 register char *fmt;
405
406 code = GET_CODE (x);
407 if (code == SYMBOL_REF || code == LABEL_REF)
408 return x;
409 if (GET_RTX_CLASS (code) == 'o')
410 return 0;
411
412 fmt = GET_RTX_FORMAT (code);
413 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
414 {
415 rtx t;
416
417 if (fmt[i] == 'e')
418 {
419 t = find_symbolic_term (XEXP (x, i));
420 if (t != 0)
421 return t;
422 }
423 else if (fmt[i] == 'E')
424 break;
425 }
426 return 0;
427}
428
429/* Return nonzero if X and Y (memory addresses) could reference the
430 same location in memory. C is an offset accumulator. When
431 C is nonzero, we are testing aliases between X and Y + C.
432 XSIZE is the size in bytes of the X reference,
433 similarly YSIZE is the size in bytes for Y.
434
435 If XSIZE or YSIZE is zero, we do not know the amount of memory being
436 referenced (the reference was BLKmode), so make the most pessimistic
437 assumptions.
438
439 We recognize the following cases of non-conflicting memory:
440
441 (1) addresses involving the frame pointer cannot conflict
442 with addresses involving static variables.
443 (2) static variables with different addresses cannot conflict.
444
445 Nice to notice that varying addresses cannot confict with fp if no
446 local variables had their addresses taken, but that's too hard now. */
447
448static int
449memrefs_conflict_p (xsize, x, ysize, y, c)
450 rtx x, y;
451 int xsize, ysize;
452 int c;
453{
454 if (GET_CODE (x) == HIGH)
455 x = XEXP (x, 0);
456 else if (GET_CODE (x) == LO_SUM)
457 x = XEXP (x, 1);
458 else
459 x = canon_rtx (x);
460 if (GET_CODE (y) == HIGH)
461 y = XEXP (y, 0);
462 else if (GET_CODE (y) == LO_SUM)
463 y = XEXP (y, 1);
464 else
465 y = canon_rtx (y);
466
467 if (rtx_equal_for_memref_p (x, y))
468 return (xsize == 0 || ysize == 0 ||
469 (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
470
471 if (y == frame_pointer_rtx || y == stack_pointer_rtx)
472 {
473 rtx t = y;
474 int tsize = ysize;
475 y = x; ysize = xsize;
476 x = t; xsize = tsize;
477 }
478
479 if (x == frame_pointer_rtx || x == stack_pointer_rtx)
480 {
481 rtx y1;
482
483 if (CONSTANT_P (y))
484 return 0;
485
486 if (GET_CODE (y) == PLUS
487 && canon_rtx (XEXP (y, 0)) == x
488 && (y1 = canon_rtx (XEXP (y, 1)))
489 && GET_CODE (y1) == CONST_INT)
490 {
491 c += INTVAL (y1);
492 return (xsize == 0 || ysize == 0
493 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
494 }
495
496 if (GET_CODE (y) == PLUS
497 && (y1 = canon_rtx (XEXP (y, 0)))
498 && CONSTANT_P (y1))
499 return 0;
500
501 return 1;
502 }
503
504 if (GET_CODE (x) == PLUS)
505 {
506 /* The fact that X is canonnicallized means that this
507 PLUS rtx is canonnicallized. */
508 rtx x0 = XEXP (x, 0);
509 rtx x1 = XEXP (x, 1);
510
511 if (GET_CODE (y) == PLUS)
512 {
513 /* The fact that Y is canonnicallized means that this
514 PLUS rtx is canonnicallized. */
515 rtx y0 = XEXP (y, 0);
516 rtx y1 = XEXP (y, 1);
517
518 if (rtx_equal_for_memref_p (x1, y1))
519 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
520 if (rtx_equal_for_memref_p (x0, y0))
521 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
522 if (GET_CODE (x1) == CONST_INT)
523 if (GET_CODE (y1) == CONST_INT)
524 return memrefs_conflict_p (xsize, x0, ysize, y0,
525 c - INTVAL (x1) + INTVAL (y1));
526 else
527 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
528 else if (GET_CODE (y1) == CONST_INT)
529 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
530
531 /* Handle case where we cannot understand iteration operators,
532 but we notice that the base addresses are distinct objects. */
533 x = find_symbolic_term (x);
534 if (x == 0)
535 return 1;
536 y = find_symbolic_term (y);
537 if (y == 0)
538 return 1;
539 return rtx_equal_for_memref_p (x, y);
540 }
541 else if (GET_CODE (x1) == CONST_INT)
542 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
543 }
544 else if (GET_CODE (y) == PLUS)
545 {
546 /* The fact that Y is canonnicallized means that this
547 PLUS rtx is canonnicallized. */
548 rtx y0 = XEXP (y, 0);
549 rtx y1 = XEXP (y, 1);
550
551 if (GET_CODE (y1) == CONST_INT)
552 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
553 else
554 return 1;
555 }
556
557 if (GET_CODE (x) == GET_CODE (y))
558 switch (GET_CODE (x))
559 {
560 case MULT:
561 {
562 /* Handle cases where we expect the second operands to be the
563 same, and check only whether the first operand would conflict
564 or not. */
565 rtx x0, y0;
566 rtx x1 = canon_rtx (XEXP (x, 1));
567 rtx y1 = canon_rtx (XEXP (y, 1));
568 if (! rtx_equal_for_memref_p (x1, y1))
569 return 1;
570 x0 = canon_rtx (XEXP (x, 0));
571 y0 = canon_rtx (XEXP (y, 0));
572 if (rtx_equal_for_memref_p (x0, y0))
573 return (xsize == 0 || ysize == 0
574 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
575
576 /* Can't properly adjust our sizes. */
577 if (GET_CODE (x1) != CONST_INT)
578 return 1;
579 xsize /= INTVAL (x1);
580 ysize /= INTVAL (x1);
581 c /= INTVAL (x1);
582 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
583 }
584 }
585
586 if (CONSTANT_P (x))
587 {
588 if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
589 {
590 c += (INTVAL (y) - INTVAL (x));
591 return (xsize == 0 || ysize == 0
592 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
593 }
594
595 if (GET_CODE (x) == CONST)
596 {
597 if (GET_CODE (y) == CONST)
598 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
599 ysize, canon_rtx (XEXP (y, 0)), c);
600 else
601 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
602 ysize, y, c);
603 }
604 if (GET_CODE (y) == CONST)
605 return memrefs_conflict_p (xsize, x, ysize,
606 canon_rtx (XEXP (y, 0)), c);
607
608 if (CONSTANT_P (y))
609 return (rtx_equal_for_memref_p (x, y)
610 && (xsize == 0 || ysize == 0
611 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
612
613 return 1;
614 }
615 return 1;
616}
617
618/* Functions to compute memory dependencies.
619
620 Since we process the insns in execution order, we can build tables
621 to keep track of what registers are fixed (and not aliased), what registers
622 are varying in known ways, and what registers are varying in unknown
623 ways.
624
625 If both memory references are volatile, then there must always be a
626 dependence between the two references, since their order can not be
627 changed. A volatile and non-volatile reference can be interchanged
628 though.
629
630 A MEM_IN_STRUCT reference at a varying address can never conflict with a
631 non-MEM_IN_STRUCT reference at a fixed address. */
632
633/* Read dependence: X is read after read in MEM takes place. There can
634 only be a dependence here if both reads are volatile. */
635
636int
637read_dependence (mem, x)
638 rtx mem;
639 rtx x;
640{
641 return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
642}
643
644/* True dependence: X is read after store in MEM takes place. */
645
646int
647true_dependence (mem, x)
648 rtx mem;
649 rtx x;
650{
651 if (RTX_UNCHANGING_P (x))
652 return 0;
653
654 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
655 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
656 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
657 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
658 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
659 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
660 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
661}
662
663/* Anti dependence: X is written after read in MEM takes place. */
664
665int
666anti_dependence (mem, x)
667 rtx mem;
668 rtx x;
669{
670 if (RTX_UNCHANGING_P (mem))
671 return 0;
672
673 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
674 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
675 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
676 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
677 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
678 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
679 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
680}
681
682/* Output dependence: X is written after store in MEM takes place. */
683
684int
685output_dependence (mem, x)
686 rtx mem;
687 rtx x;
688{
689 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
690 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
691 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
692 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
693 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
694 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
695 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
696}
697\f
6dab9d41
RS
698/* Helper functions for instruction scheduling. */
699
700/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
701 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
702 of dependence that this link represents. */
703
704void
705add_dependence (insn, elem, dep_type)
706 rtx insn;
707 rtx elem;
708 enum reg_note dep_type;
709{
710 rtx link, next;
711
712 /* Don't depend an insn on itself. */
713 if (insn == elem)
714 return;
715
716 /* If elem is part of a sequence that must be scheduled together, then
717 make the dependence point to the last insn of the sequence.
718 When HAVE_cc0, it is possible for NOTEs to exist between users and
719 setters of the condition codes, so we must skip past notes here.
720 Otherwise, NOTEs are impossible here. */
721
722 next = NEXT_INSN (elem);
723
724#ifdef HAVE_cc0
725 while (next && GET_CODE (next) == NOTE)
726 next = NEXT_INSN (next);
727#endif
728
729 if (next && SCHED_GROUP_P (next))
730 {
731 /* Notes will never intervene here though, so don't bother checking
732 for them. */
733 while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next)))
734 next = NEXT_INSN (next);
735
736 /* Again, don't depend an insn on itself. */
737 if (insn == next)
738 return;
739
740 /* Make the dependence to NEXT, the last insn of the group, instead
741 of the original ELEM. */
742 elem = next;
743 }
744
745 /* Check that we don't already have this dependence. */
746 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
747 if (XEXP (link, 0) == elem)
748 {
749 /* If this is a more restrictive type of dependence than the existing
750 one, then change the existing dependence to this type. */
751 if ((int) dep_type < (int) REG_NOTE_KIND (link))
752 PUT_REG_NOTE_KIND (link, dep_type);
753 return;
754 }
755 /* Might want to check one level of transitivity to save conses. */
756
757 link = rtx_alloc (INSN_LIST);
758 /* Insn dependency, not data dependency. */
759 PUT_REG_NOTE_KIND (link, dep_type);
760 XEXP (link, 0) = elem;
761 XEXP (link, 1) = LOG_LINKS (insn);
762 LOG_LINKS (insn) = link;
763}
764
765/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
766 of INSN. Abort if not found. */
767void
768remove_dependence (insn, elem)
769 rtx insn;
770 rtx elem;
771{
772 rtx prev, link;
773 int found = 0;
774
775 for (prev = 0, link = LOG_LINKS (insn); link;
776 prev = link, link = XEXP (link, 1))
777 {
778 if (XEXP (link, 0) == elem)
779 {
780 if (prev)
781 XEXP (prev, 1) = XEXP (link, 1);
782 else
783 LOG_LINKS (insn) = XEXP (link, 1);
784 found = 1;
785 }
786 }
787
788 if (! found)
789 abort ();
790 return;
791}
792\f
bd58420c
RK
793#ifndef INSN_SCHEDULING
794void schedule_insns () {}
795#else
796#ifndef __GNUC__
797#define __inline
798#endif
799
800/* Computation of memory dependencies. */
801
802/* The *_insns and *_mems are paired lists. Each pending memory operation
803 will have a pointer to the MEM rtx on one list and a pointer to the
804 containing insn on the other list in the same place in the list. */
805
806/* We can't use add_dependence like the old code did, because a single insn
807 may have multiple memory accesses, and hence needs to be on the list
808 once for each memory access. Add_dependence won't let you add an insn
809 to a list more than once. */
810
811/* An INSN_LIST containing all insns with pending read operations. */
812static rtx pending_read_insns;
813
814/* An EXPR_LIST containing all MEM rtx's which are pending reads. */
815static rtx pending_read_mems;
816
817/* An INSN_LIST containing all insns with pending write operations. */
818static rtx pending_write_insns;
819
820/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
821static rtx pending_write_mems;
822
823/* Indicates the combined length of the two pending lists. We must prevent
824 these lists from ever growing too large since the number of dependencies
825 produced is at least O(N*N), and execution time is at least O(4*N*N), as
826 a function of the length of these pending lists. */
827
828static int pending_lists_length;
829
830/* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
831
832static rtx unused_insn_list;
833
834/* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
835
836static rtx unused_expr_list;
837
838/* The last insn upon which all memory references must depend.
839 This is an insn which flushed the pending lists, creating a dependency
840 between it and all previously pending memory references. This creates
841 a barrier (or a checkpoint) which no memory reference is allowed to cross.
842
843 This includes all non constant CALL_INSNs. When we do interprocedural
844 alias analysis, this restriction can be relaxed.
845 This may also be an INSN that writes memory if the pending lists grow
846 too large. */
847
848static rtx last_pending_memory_flush;
849
850/* The last function call we have seen. All hard regs, and, of course,
851 the last function call, must depend on this. */
852
853static rtx last_function_call;
854
855/* The LOG_LINKS field of this is a list of insns which use a pseudo register
856 that does not already cross a call. We create dependencies between each
857 of those insn and the next call insn, to ensure that they won't cross a call
858 after scheduling is done. */
859
860static rtx sched_before_next_call;
861
862/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
863 so that insns independent of the last scheduled insn will be preferred
864 over dependent instructions. */
865
866static rtx last_scheduled_insn;
867
868/* Process an insn's memory dependencies. There are four kinds of
869 dependencies:
870
871 (0) read dependence: read follows read
872 (1) true dependence: read follows write
873 (2) anti dependence: write follows read
874 (3) output dependence: write follows write
875
876 We are careful to build only dependencies which actually exist, and
877 use transitivity to avoid building too many links. */
878\f
879/* Return the INSN_LIST containing INSN in LIST, or NULL
880 if LIST does not contain INSN. */
881
882__inline static rtx
883find_insn_list (insn, list)
884 rtx insn;
885 rtx list;
886{
887 while (list)
888 {
889 if (XEXP (list, 0) == insn)
890 return list;
891 list = XEXP (list, 1);
892 }
893 return 0;
894}
895
896/* Compute cost of executing INSN. This is the number of virtual
897 cycles taken between instruction issue and instruction results. */
898
899__inline static int
900insn_cost (insn)
901 rtx insn;
902{
903 register int cost;
904
c3de4fe6
JW
905 if (INSN_COST (insn))
906 return INSN_COST (insn);
907
bd58420c
RK
908 recog_memoized (insn);
909
910 /* A USE insn, or something else we don't need to understand.
911 We can't pass these directly to result_ready_cost because it will trigger
912 a fatal error for unrecognizable insns. */
913 if (INSN_CODE (insn) < 0)
c3de4fe6
JW
914 {
915 INSN_COST (insn) = 1;
916 return 1;
917 }
bd58420c
RK
918 else
919 {
920 cost = result_ready_cost (insn);
921
922 if (cost < 1)
923 cost = 1;
924
c3de4fe6 925 INSN_COST (insn) = cost;
bd58420c
RK
926 return cost;
927 }
928}
929
930/* Compute the priority number for INSN. */
931
932static int
933priority (insn)
934 rtx insn;
935{
936 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
937 {
938 int prev_priority;
939 int max_priority;
940 int this_priority = INSN_PRIORITY (insn);
941 rtx prev;
942
943 if (this_priority > 0)
944 return this_priority;
945
946 max_priority = 1;
947
948 /* Nonzero if these insns must be scheduled together. */
949 if (SCHED_GROUP_P (insn))
950 {
951 prev = insn;
952 while (SCHED_GROUP_P (prev))
953 {
954 prev = PREV_INSN (prev);
955 INSN_REF_COUNT (prev) += 1;
956 }
957 }
958
959 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
960 {
961 rtx x = XEXP (prev, 0);
962
963 /* A dependence pointing to a note is always obsolete, because
964 sched_analyze_insn will have created any necessary new dependences
965 which replace it. Notes can be created when instructions are
966 deleted by insn splitting, or by register allocation. */
967 if (GET_CODE (x) == NOTE)
968 {
969 remove_dependence (insn, x);
970 continue;
971 }
972
973 /* This priority calculation was chosen because it results in the
974 least instruction movement, and does not hurt the performance
975 of the resulting code compared to the old algorithm.
976 This makes the sched algorithm more stable, which results
977 in better code, because there is less register pressure,
978 cross jumping is more likely to work, and debugging is easier.
979
980 When all instructions have a latency of 1, there is no need to
981 move any instructions. Subtracting one here ensures that in such
982 cases all instructions will end up with a priority of one, and
983 hence no scheduling will be done.
984
985 The original code did not subtract the one, and added the
986 insn_cost of the current instruction to its priority (e.g.
987 move the insn_cost call down to the end). */
988
989 if (REG_NOTE_KIND (prev) == 0)
990 /* Data dependence. */
991 prev_priority = priority (x) + insn_cost (x) - 1;
992 else
993 /* Anti or output dependence. Don't add the latency of this
994 insn's result, because it isn't being used. */
995 prev_priority = priority (x);
996
997 if (prev_priority > max_priority)
998 max_priority = prev_priority;
999 INSN_REF_COUNT (x) += 1;
1000 }
1001
1002 INSN_PRIORITY (insn) = max_priority;
1003 return INSN_PRIORITY (insn);
1004 }
1005 return 0;
1006}
1007\f
1008/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
1009 them to the unused_*_list variables, so that they can be reused. */
1010
1011static void
1012free_pending_lists ()
1013{
1014 register rtx link, prev_link;
1015
1016 if (pending_read_insns)
1017 {
1018 prev_link = pending_read_insns;
1019 link = XEXP (prev_link, 1);
1020
1021 while (link)
1022 {
1023 prev_link = link;
1024 link = XEXP (link, 1);
1025 }
1026
1027 XEXP (prev_link, 1) = unused_insn_list;
1028 unused_insn_list = pending_read_insns;
1029 pending_read_insns = 0;
1030 }
1031
1032 if (pending_write_insns)
1033 {
1034 prev_link = pending_write_insns;
1035 link = XEXP (prev_link, 1);
1036
1037 while (link)
1038 {
1039 prev_link = link;
1040 link = XEXP (link, 1);
1041 }
1042
1043 XEXP (prev_link, 1) = unused_insn_list;
1044 unused_insn_list = pending_write_insns;
1045 pending_write_insns = 0;
1046 }
1047
1048 if (pending_read_mems)
1049 {
1050 prev_link = pending_read_mems;
1051 link = XEXP (prev_link, 1);
1052
1053 while (link)
1054 {
1055 prev_link = link;
1056 link = XEXP (link, 1);
1057 }
1058
1059 XEXP (prev_link, 1) = unused_expr_list;
1060 unused_expr_list = pending_read_mems;
1061 pending_read_mems = 0;
1062 }
1063
1064 if (pending_write_mems)
1065 {
1066 prev_link = pending_write_mems;
1067 link = XEXP (prev_link, 1);
1068
1069 while (link)
1070 {
1071 prev_link = link;
1072 link = XEXP (link, 1);
1073 }
1074
1075 XEXP (prev_link, 1) = unused_expr_list;
1076 unused_expr_list = pending_write_mems;
1077 pending_write_mems = 0;
1078 }
1079}
1080
1081/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1082 The MEM is a memory reference contained within INSN, which we are saving
1083 so that we can do memory aliasing on it. */
1084
1085static void
1086add_insn_mem_dependence (insn_list, mem_list, insn, mem)
1087 rtx *insn_list, *mem_list, insn, mem;
1088{
1089 register rtx link;
1090
1091 if (unused_insn_list)
1092 {
1093 link = unused_insn_list;
1094 unused_insn_list = XEXP (link, 1);
1095 }
1096 else
1097 link = rtx_alloc (INSN_LIST);
1098 XEXP (link, 0) = insn;
1099 XEXP (link, 1) = *insn_list;
1100 *insn_list = link;
1101
1102 if (unused_expr_list)
1103 {
1104 link = unused_expr_list;
1105 unused_expr_list = XEXP (link, 1);
1106 }
1107 else
1108 link = rtx_alloc (EXPR_LIST);
1109 XEXP (link, 0) = mem;
1110 XEXP (link, 1) = *mem_list;
1111 *mem_list = link;
1112
1113 pending_lists_length++;
1114}
1115\f
1116/* Make a dependency between every memory reference on the pending lists
1117 and INSN, thus flushing the pending lists. */
1118
1119static void
1120flush_pending_lists (insn)
1121 rtx insn;
1122{
1123 rtx link;
1124
1125 while (pending_read_insns)
1126 {
1127 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1128
1129 link = pending_read_insns;
1130 pending_read_insns = XEXP (pending_read_insns, 1);
1131 XEXP (link, 1) = unused_insn_list;
1132 unused_insn_list = link;
1133
1134 link = pending_read_mems;
1135 pending_read_mems = XEXP (pending_read_mems, 1);
1136 XEXP (link, 1) = unused_expr_list;
1137 unused_expr_list = link;
1138 }
1139 while (pending_write_insns)
1140 {
1141 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1142
1143 link = pending_write_insns;
1144 pending_write_insns = XEXP (pending_write_insns, 1);
1145 XEXP (link, 1) = unused_insn_list;
1146 unused_insn_list = link;
1147
1148 link = pending_write_mems;
1149 pending_write_mems = XEXP (pending_write_mems, 1);
1150 XEXP (link, 1) = unused_expr_list;
1151 unused_expr_list = link;
1152 }
1153 pending_lists_length = 0;
1154
1155 if (last_pending_memory_flush)
1156 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1157
1158 last_pending_memory_flush = insn;
1159}
1160
1161/* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1162 by the write to the destination of X, and reads of everything mentioned. */
1163
1164static void
1165sched_analyze_1 (x, insn)
1166 rtx x;
1167 rtx insn;
1168{
1169 register int regno;
1170 register rtx dest = SET_DEST (x);
1171
1172 if (dest == 0)
1173 return;
1174
1175 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1176 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1177 {
1178 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1179 {
1180 /* The second and third arguments are values read by this insn. */
1181 sched_analyze_2 (XEXP (dest, 1), insn);
1182 sched_analyze_2 (XEXP (dest, 2), insn);
1183 }
1184 dest = SUBREG_REG (dest);
1185 }
1186
1187 if (GET_CODE (dest) == REG)
1188 {
1189 register int offset, bit, i;
1190
1191 regno = REGNO (dest);
1192
1193 /* A hard reg in a wide mode may really be multiple registers.
1194 If so, mark all of them just like the first. */
1195 if (regno < FIRST_PSEUDO_REGISTER)
1196 {
1197 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1198 while (--i >= 0)
1199 {
1200 rtx u;
1201
1202 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1203 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1204 reg_last_uses[regno + i] = 0;
1205 if (reg_last_sets[regno + i])
1206 add_dependence (insn, reg_last_sets[regno + i],
1207 REG_DEP_OUTPUT);
1208 reg_last_sets[regno + i] = insn;
1209 if ((call_used_regs[i] || global_regs[i])
1210 && last_function_call)
1211 /* Function calls clobber all call_used regs. */
1212 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1213 }
1214 }
1215 else
1216 {
1217 rtx u;
1218
1219 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1220 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1221 reg_last_uses[regno] = 0;
1222 if (reg_last_sets[regno])
1223 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1224 reg_last_sets[regno] = insn;
1225
1226 /* Don't let it cross a call after scheduling if it doesn't
1227 already cross one. */
1228 if (reg_n_calls_crossed[regno] == 0 && last_function_call)
1229 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1230 }
1231 }
1232 else if (GET_CODE (dest) == MEM)
1233 {
1234 /* Writing memory. */
1235
1236 if (pending_lists_length > 32)
1237 {
1238 /* Flush all pending reads and writes to prevent the pending lists
1239 from getting any larger. Insn scheduling runs too slowly when
1240 these lists get long. The number 32 was chosen because it
1241 seems like a resonable number. When compiling GCC with itself,
1242 this flush occurs 8 times for sparc, and 10 times for m88k using
1243 the number 32. */
1244 flush_pending_lists (insn);
1245 }
1246 else
1247 {
1248 rtx pending, pending_mem;
1249
1250 pending = pending_read_insns;
1251 pending_mem = pending_read_mems;
1252 while (pending)
1253 {
1254 /* If a dependency already exists, don't create a new one. */
1255 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1256 if (anti_dependence (XEXP (pending_mem, 0), dest, insn))
1257 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1258
1259 pending = XEXP (pending, 1);
1260 pending_mem = XEXP (pending_mem, 1);
1261 }
1262
1263 pending = pending_write_insns;
1264 pending_mem = pending_write_mems;
1265 while (pending)
1266 {
1267 /* If a dependency already exists, don't create a new one. */
1268 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1269 if (output_dependence (XEXP (pending_mem, 0), dest))
1270 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1271
1272 pending = XEXP (pending, 1);
1273 pending_mem = XEXP (pending_mem, 1);
1274 }
1275
1276 if (last_pending_memory_flush)
1277 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1278
1279 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1280 insn, dest);
1281 }
1282 sched_analyze_2 (XEXP (dest, 0), insn);
1283 }
1284
1285 /* Analyze reads. */
1286 if (GET_CODE (x) == SET)
1287 sched_analyze_2 (SET_SRC (x), insn);
bd58420c
RK
1288}
1289
1290/* Analyze the uses of memory and registers in rtx X in INSN. */
1291
1292static void
1293sched_analyze_2 (x, insn)
1294 rtx x;
1295 rtx insn;
1296{
1297 register int i;
1298 register int j;
1299 register enum rtx_code code;
1300 register char *fmt;
1301
1302 if (x == 0)
1303 return;
1304
1305 code = GET_CODE (x);
1306
b85b8af2 1307 switch (code)
bd58420c 1308 {
b85b8af2
JW
1309 case CONST_INT:
1310 case CONST_DOUBLE:
1311 case SYMBOL_REF:
1312 case CONST:
1313 case LABEL_REF:
1314 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1315 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1316 this does not mean that this insn is using cc0. */
bd58420c 1317 return;
bd58420c 1318
b85b8af2
JW
1319#ifdef HAVE_cc0
1320 case CC0:
1321 {
a3ee5899 1322 rtx link, prev;
bd58420c 1323
a3ee5899 1324 /* There may be a note before this insn now, but all notes will
ea3fa5f7 1325 be removed before we actually try to schedule the insns, so
a3ee5899
JW
1326 it won't cause a problem later. We must avoid it here though. */
1327
d21d2fb8 1328 /* User of CC0 depends on immediately preceding insn. */
b85b8af2 1329 SCHED_GROUP_P (insn) = 1;
bd58420c 1330
a3ee5899
JW
1331 /* Make a copy of all dependencies on the immediately previous insn,
1332 and add to this insn. This is so that all the dependencies will
1333 apply to the group. */
1334
1335 prev = PREV_INSN (insn);
1336 while (GET_CODE (prev) == NOTE)
1337 prev = PREV_INSN (prev);
bd58420c 1338
a3ee5899 1339 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
b85b8af2 1340 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
bd58420c 1341
b85b8af2
JW
1342 return;
1343 }
1344#endif
bd58420c 1345
b85b8af2
JW
1346 case REG:
1347 {
1348 int regno = REGNO (x);
1349 if (regno < FIRST_PSEUDO_REGISTER)
1350 {
1351 int i;
1352
1353 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1354 while (--i >= 0)
1355 {
1356 reg_last_uses[regno + i]
1357 = gen_rtx (INSN_LIST, VOIDmode,
1358 insn, reg_last_uses[regno + i]);
1359 if (reg_last_sets[regno + i])
1360 add_dependence (insn, reg_last_sets[regno + i], 0);
1361 if ((call_used_regs[regno + i] || global_regs[regno + i])
1362 && last_function_call)
1363 /* Function calls clobber all call_used regs. */
1364 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1365 }
1366 }
1367 else
1368 {
1369 reg_last_uses[regno]
1370 = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
1371 if (reg_last_sets[regno])
1372 add_dependence (insn, reg_last_sets[regno], 0);
1373
1374 /* If the register does not already cross any calls, then add this
1375 insn to the sched_before_next_call list so that it will still
1376 not cross calls after scheduling. */
1377 if (reg_n_calls_crossed[regno] == 0)
1378 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1379 }
1380 return;
1381 }
bd58420c 1382
b85b8af2
JW
1383 case MEM:
1384 {
1385 /* Reading memory. */
1386
1387 /* Don't create a dependence for memory references which are known to
1388 be unchanging, such as constant pool accesses. These will never
1389 conflict with any other memory access. */
1390 if (RTX_UNCHANGING_P (x) == 0)
1391 {
1392 rtx pending, pending_mem;
1393
1394 pending = pending_read_insns;
1395 pending_mem = pending_read_mems;
1396 while (pending)
1397 {
1398 /* If a dependency already exists, don't create a new one. */
1399 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1400 if (read_dependence (XEXP (pending_mem, 0), x))
1401 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1402
1403 pending = XEXP (pending, 1);
1404 pending_mem = XEXP (pending_mem, 1);
1405 }
1406
1407 pending = pending_write_insns;
1408 pending_mem = pending_write_mems;
1409 while (pending)
1410 {
1411 /* If a dependency already exists, don't create a new one. */
1412 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1413 if (true_dependence (XEXP (pending_mem, 0), x))
1414 add_dependence (insn, XEXP (pending, 0), 0);
1415
1416 pending = XEXP (pending, 1);
1417 pending_mem = XEXP (pending_mem, 1);
1418 }
1419 if (last_pending_memory_flush)
1420 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1421
1422 /* Always add these dependencies to pending_reads, since
1423 this insn may be followed by a write. */
1424 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1425 insn, x);
1426 }
1427 /* Take advantage of tail recursion here. */
1428 sched_analyze_2 (XEXP (x, 0), insn);
1429 return;
1430 }
bd58420c 1431
b85b8af2
JW
1432 case ASM_OPERANDS:
1433 case ASM_INPUT:
1434 case UNSPEC_VOLATILE:
c3de4fe6 1435 case TRAP_IF:
b85b8af2
JW
1436 {
1437 rtx u;
1438
1439 /* Traditional and volatile asm instructions must be considered to use
1440 and clobber all hard registers and all of memory. So must
c3de4fe6
JW
1441 TRAP_IF and UNSPEC_VOLATILE operations. */
1442 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
b85b8af2
JW
1443 {
1444 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1445 {
1446 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1447 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1448 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1449 reg_last_uses[i] = 0;
1450 if (reg_last_sets[i]
1451 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1452 add_dependence (insn, reg_last_sets[i], 0);
1453 reg_last_sets[i] = insn;
1454 }
1455
1456 flush_pending_lists (insn);
1457 }
1458
1459 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1460 We can not just fall through here since then we would be confused
1461 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1462 traditional asms unlike their normal usage. */
1463
1464 if (code == ASM_OPERANDS)
1465 {
1466 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1467 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1468 return;
1469 }
1470 break;
1471 }
bd58420c 1472
b85b8af2
JW
1473 case PRE_DEC:
1474 case POST_DEC:
1475 case PRE_INC:
1476 case POST_INC:
1477 /* These read and modify the result; just consider them writes. */
1478 sched_analyze_1 (x, insn);
bd58420c
RK
1479 return;
1480 }
1481
bd58420c
RK
1482 /* Other cases: walk the insn. */
1483 fmt = GET_RTX_FORMAT (code);
1484 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1485 {
1486 if (fmt[i] == 'e')
1487 sched_analyze_2 (XEXP (x, i), insn);
1488 else if (fmt[i] == 'E')
1489 for (j = 0; j < XVECLEN (x, i); j++)
1490 sched_analyze_2 (XVECEXP (x, i, j), insn);
1491 }
1492}
1493
1494/* Analyze an INSN with pattern X to find all dependencies. */
1495
1496static void
1497sched_analyze_insn (x, insn)
1498 rtx x, insn;
1499{
1500 register RTX_CODE code = GET_CODE (x);
1501 rtx link;
1502
1503 if (code == SET || code == CLOBBER)
1504 sched_analyze_1 (x, insn);
1505 else if (code == PARALLEL)
1506 {
1507 register int i;
1508 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1509 {
1510 code = GET_CODE (XVECEXP (x, 0, i));
1511 if (code == SET || code == CLOBBER)
1512 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1513 else
1514 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1515 }
1516 }
1517 else
1518 sched_analyze_2 (x, insn);
1519
bd58420c
RK
1520 /* Handle function calls. */
1521 if (GET_CODE (insn) == CALL_INSN)
1522 {
1523 rtx dep_insn;
1524 rtx prev_dep_insn;
1525
1526 /* When scheduling instructions, we make sure calls don't lose their
1527 accompanying USE insns by depending them one on another in order. */
1528
1529 prev_dep_insn = insn;
1530 dep_insn = PREV_INSN (insn);
1531 while (GET_CODE (dep_insn) == INSN
1532 && GET_CODE (PATTERN (dep_insn)) == USE)
1533 {
1534 SCHED_GROUP_P (prev_dep_insn) = 1;
1535
1536 /* Make a copy of all dependencies on dep_insn, and add to insn.
1537 This is so that all of the dependencies will apply to the
1538 group. */
1539
1540 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1541 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
1542
1543 prev_dep_insn = dep_insn;
1544 dep_insn = PREV_INSN (dep_insn);
1545 }
1546 }
1547}
1548
1549/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1550 for every dependency. */
1551
1552static int
1553sched_analyze (head, tail)
1554 rtx head, tail;
1555{
1556 register rtx insn;
1557 register int n_insns = 0;
1558 register rtx u;
1559 register int luid = 0;
1560
1561 for (insn = head; ; insn = NEXT_INSN (insn))
1562 {
1563 INSN_LUID (insn) = luid++;
1564
1565 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1566 {
1567 sched_analyze_insn (PATTERN (insn), insn);
1568 n_insns += 1;
1569 }
1570 else if (GET_CODE (insn) == CALL_INSN)
1571 {
1572 rtx dest = 0;
1573 rtx x;
1574 register int i;
1575
1576 /* Any instruction using a hard register which may get clobbered
1577 by a call needs to be marked as dependent on this call.
1578 This prevents a use of a hard return reg from being moved
1579 past a void call (i.e. it does not explicitly set the hard
1580 return reg). */
1581
1582 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1583 if (call_used_regs[i] || global_regs[i])
1584 {
1585 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1586 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1587 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1588 reg_last_uses[i] = 0;
1589 if (reg_last_sets[i]
1590 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1591 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1592 reg_last_sets[i] = insn;
1593 /* Insn, being a CALL_INSN, magically depends on
1594 `last_function_call' already. */
1595 }
1596
1597 /* For each insn which shouldn't cross a call, add a dependence
1598 between that insn and this call insn. */
1599 x = LOG_LINKS (sched_before_next_call);
1600 while (x)
1601 {
1602 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1603 x = XEXP (x, 1);
1604 }
1605 LOG_LINKS (sched_before_next_call) = 0;
1606
1607 sched_analyze_insn (PATTERN (insn), insn);
1608
1609 /* We don't need to flush memory for a function call which does
1610 not involve memory. */
1611 if (! CONST_CALL_P (insn))
1612 {
1613 /* In the absence of interprocedural alias analysis,
1614 we must flush all pending reads and writes, and
1615 start new dependencies starting from here. */
1616 flush_pending_lists (insn);
1617 }
1618
1619 /* Depend this function call (actually, the user of this
1620 function call) on all hard register clobberage. */
1621 last_function_call = insn;
1622 n_insns += 1;
1623 }
1624
1625 if (insn == tail)
1626 return n_insns;
1627 }
1628}
1629\f
1630/* Called when we see a set of a register. If death is true, then we are
1631 scanning backwards. Mark that register as unborn. If nobody says
1632 otherwise, that is how things will remain. If death is false, then we
1633 are scanning forwards. Mark that register as being born. */
1634
1635static void
1636sched_note_set (b, x, death)
1637 int b;
1638 rtx x;
1639 int death;
1640{
1641 register int regno, j;
1642 register rtx reg = SET_DEST (x);
1643 int subreg_p = 0;
1644
1645 if (reg == 0)
1646 return;
1647
1648 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
1649 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
1650 {
1651 /* Must treat modification of just one hardware register of a multi-reg
1652 value or just a byte field of a register exactly the same way that
1653 mark_set_1 in flow.c does. */
1654 if (GET_CODE (reg) == ZERO_EXTRACT
1655 || GET_CODE (reg) == SIGN_EXTRACT
1656 || (GET_CODE (reg) == SUBREG
1657 && REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg)))
1658 subreg_p = 1;
1659
1660 reg = SUBREG_REG (reg);
1661 }
1662
1663 if (GET_CODE (reg) != REG)
1664 return;
1665
1666 /* Global registers are always live, so the code below does not apply
1667 to them. */
1668
1669 regno = REGNO (reg);
1670 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1671 {
1672 register int offset = regno / REGSET_ELT_BITS;
1673 register int bit = 1 << (regno % REGSET_ELT_BITS);
1674
1675 if (death)
1676 {
1677 /* If we only set part of the register, then this set does not
1678 kill it. */
1679 if (subreg_p)
1680 return;
1681
1682 /* Try killing this register. */
1683 if (regno < FIRST_PSEUDO_REGISTER)
1684 {
1685 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1686 while (--j >= 0)
1687 {
1688 offset = (regno + j) / REGSET_ELT_BITS;
1689 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
1690
1691 bb_live_regs[offset] &= ~bit;
1692 bb_dead_regs[offset] |= bit;
1693 }
1694 }
1695 else
1696 {
1697 bb_live_regs[offset] &= ~bit;
1698 bb_dead_regs[offset] |= bit;
1699 }
1700 }
1701 else
1702 {
1703 /* Make the register live again. */
1704 if (regno < FIRST_PSEUDO_REGISTER)
1705 {
1706 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1707 while (--j >= 0)
1708 {
1709 offset = (regno + j) / REGSET_ELT_BITS;
1710 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
1711
1712 bb_live_regs[offset] |= bit;
1713 bb_dead_regs[offset] &= ~bit;
1714 }
1715 }
1716 else
1717 {
1718 bb_live_regs[offset] |= bit;
1719 bb_dead_regs[offset] &= ~bit;
1720 }
1721 }
1722 }
1723}
1724\f
1725/* Macros and functions for keeping the priority queue sorted, and
1726 dealing with queueing and unqueueing of instructions. */
1727
1728#define SCHED_SORT(READY, NEW_READY, OLD_READY) \
1729 do { if ((NEW_READY) - (OLD_READY) == 1) \
1730 swap_sort (READY, NEW_READY); \
1731 else if ((NEW_READY) - (OLD_READY) > 1) \
1732 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
1733 while (0)
1734
1735/* Returns a positive value if y is preferred; returns a negative value if
1736 x is preferred. Should never return 0, since that will make the sort
1737 unstable. */
1738
1739static int
1740rank_for_schedule (x, y)
1741 rtx *x, *y;
1742{
1743 rtx tmp = *y;
1744 rtx tmp2 = *x;
1745 rtx tmp_dep, tmp2_dep;
1746 int tmp_class, tmp2_class;
1747 int value;
1748
1749 /* Choose the instruction with the highest priority, if different. */
1750 if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2))
1751 return value;
1752
1753 if (last_scheduled_insn)
1754 {
1755 /* Classify the instructions into three classes:
1756 1) Data dependent on last schedule insn.
1757 2) Anti/Output dependent on last scheduled insn.
1758 3) Independent of last scheduled insn, or has latency of one.
1759 Choose the insn from the highest numbered class if different. */
1760 tmp_dep = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
1761 if (tmp_dep == 0 || insn_cost (tmp) == 1)
1762 tmp_class = 3;
1763 else if (REG_NOTE_KIND (tmp_dep) == 0)
1764 tmp_class = 1;
1765 else
1766 tmp_class = 2;
1767
1768 tmp2_dep = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
1769 if (tmp2_dep == 0 || insn_cost (tmp2) == 1)
1770 tmp2_class = 3;
1771 else if (REG_NOTE_KIND (tmp2_dep) == 0)
1772 tmp2_class = 1;
1773 else
1774 tmp2_class = 2;
1775
1776 if (value = tmp_class - tmp2_class)
1777 return value;
1778 }
1779
1780 /* If insns are equally good, sort by INSN_LUID (original insn order),
1781 so that we make the sort stable. This minimizes instruction movement,
1782 thus minimizing sched's effect on debugging and cross-jumping. */
1783 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1784}
1785
1786/* Resort the array A in which only element at index N may be out of order. */
1787
1788__inline static void
1789swap_sort (a, n)
1790 rtx *a;
1791 int n;
1792{
1793 rtx insn = a[n-1];
1794 int i = n-2;
1795
1796 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
1797 {
1798 a[i+1] = a[i];
1799 i -= 1;
1800 }
1801 a[i+1] = insn;
1802}
1803
1804static int max_priority;
1805
1806/* Add INSN to the insn queue so that it fires at least N_CYCLES
1807 before the currently executing insn. */
1808
1809__inline static void
1810queue_insn (insn, n_cycles)
1811 rtx insn;
1812 int n_cycles;
1813{
1814 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1815 NEXT_INSN (insn) = insn_queue[next_q];
1816 insn_queue[next_q] = insn;
1817 q_size += 1;
1818}
1819
1820/* Return nonzero if PAT is the pattern of an insn which makes a
1821 register live. */
1822
1823__inline static int
1824birthing_insn_p (pat)
1825 rtx pat;
1826{
1827 int j;
1828
1829 if (reload_completed == 1)
1830 return 0;
1831
1832 if (GET_CODE (pat) == SET
1833 && GET_CODE (SET_DEST (pat)) == REG)
1834 {
1835 rtx dest = SET_DEST (pat);
1836 int i = REGNO (dest);
1837 int offset = i / REGSET_ELT_BITS;
1838 int bit = 1 << (i % REGSET_ELT_BITS);
1839
1840 /* It would be more accurate to use refers_to_regno_p or
1841 reg_mentioned_p to determine when the dest is not live before this
1842 insn. */
1843
1844 if (bb_live_regs[offset] & bit)
1845 return (reg_n_sets[i] == 1);
1846
1847 return 0;
1848 }
1849 if (GET_CODE (pat) == PARALLEL)
1850 {
1851 for (j = 0; j < XVECLEN (pat, 0); j++)
1852 if (birthing_insn_p (XVECEXP (pat, 0, j)))
1853 return 1;
1854 }
1855 return 0;
1856}
1857
1858/* If PREV is an insn which is immediately ready to execute, return 1,
1859 otherwise return 0. We may adjust its priority if that will help shorten
1860 register lifetimes. */
1861
1862static int
1863launch_link (prev)
1864 rtx prev;
1865{
1866 rtx pat = PATTERN (prev);
1867 rtx note;
1868 /* MAX of (a) number of cycles needed by prev
1869 (b) number of cycles before needed resources are free. */
1870 int n_cycles = insn_cost (prev);
1871 int n_deaths = 0;
1872
1873 /* Trying to shorten register lives after reload has completed
1874 is useless and wrong. It gives inaccurate schedules. */
1875 if (reload_completed == 0)
1876 {
1877 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
1878 if (REG_NOTE_KIND (note) == REG_DEAD)
1879 n_deaths += 1;
1880
1881 /* Defer scheduling insns which kill registers, since that
1882 shortens register lives. Prefer scheduling insns which
1883 make registers live for the same reason. */
1884 switch (n_deaths)
1885 {
1886 default:
1887 INSN_PRIORITY (prev) >>= 3;
1888 break;
1889 case 3:
1890 INSN_PRIORITY (prev) >>= 2;
1891 break;
1892 case 2:
1893 case 1:
1894 INSN_PRIORITY (prev) >>= 1;
1895 break;
1896 case 0:
1897 if (birthing_insn_p (pat))
1898 {
1899 int max = max_priority;
1900
1901 if (max > INSN_PRIORITY (prev))
1902 INSN_PRIORITY (prev) = max;
1903 }
1904 break;
1905 }
1906 }
1907
1908 if (n_cycles <= 1)
1909 return 1;
1910 queue_insn (prev, n_cycles);
1911 return 0;
1912}
1913
1914/* INSN is the "currently executing insn". Launch each insn which was
1915 waiting on INSN (in the backwards dataflow sense). READY is a
1916 vector of insns which are ready to fire. N_READY is the number of
1917 elements in READY. */
1918
1919static int
1920launch_links (insn, ready, n_ready)
1921 rtx insn;
1922 rtx *ready;
1923 int n_ready;
1924{
1925 rtx link;
1926 int new_ready = n_ready;
1927
1928 if (LOG_LINKS (insn) == 0)
1929 return n_ready;
1930
1931 /* This is used by the function launch_link above. */
1932 if (n_ready > 0)
1933 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
1934 else
1935 max_priority = INSN_PRIORITY (insn);
1936
1937 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
1938 {
1939 rtx prev = XEXP (link, 0);
1940
1941 if ((INSN_REF_COUNT (prev) -= 1) == 0 && launch_link (prev))
1942 ready[new_ready++] = prev;
1943 }
1944
1945 return new_ready;
1946}
1947
1948/* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
1949 dead_notes list. */
1950
1951static void
1952create_reg_dead_note (reg, insn)
1953 rtx reg, insn;
1954{
1955 rtx link = dead_notes;
1956
1957 if (link == 0)
1958 /* In theory, we should not end up with more REG_DEAD reg notes than we
1959 started with. In practice, this can occur as the result of bugs in
1960 flow, combine and/or sched. */
1961 {
1962#if 1
1963 abort ();
1964#else
1965 link = rtx_alloc (EXPR_LIST);
1966 PUT_REG_NOTE_KIND (link, REG_DEAD);
1967#endif
1968 }
1969 else
1970 dead_notes = XEXP (dead_notes, 1);
1971
1972 XEXP (link, 0) = reg;
1973 XEXP (link, 1) = REG_NOTES (insn);
1974 REG_NOTES (insn) = link;
1975}
1976
1977/* Subroutine on attach_deaths_insn--handles the recursive search
1978 through INSN. If SET_P is true, then x is being modified by the insn. */
1979
1980static void
1981attach_deaths (x, insn, set_p)
1982 rtx x;
1983 rtx insn;
1984 int set_p;
1985{
1986 register int i;
1987 register int j;
1988 register enum rtx_code code;
1989 register char *fmt;
1990
1991 if (x == 0)
1992 return;
1993
1994 code = GET_CODE (x);
1995
1996 switch (code)
1997 {
1998 case CONST_INT:
1999 case CONST_DOUBLE:
2000 case LABEL_REF:
2001 case SYMBOL_REF:
2002 case CONST:
2003 case CODE_LABEL:
2004 case PC:
2005 case CC0:
2006 /* Get rid of the easy cases first. */
2007 return;
2008
2009 case REG:
2010 {
2011 /* If the register dies in this insn, queue that note, and mark
2012 this register as needing to die. */
2013 /* This code is very similar to mark_used_1 (if set_p is false)
2014 and mark_set_1 (if set_p is true) in flow.c. */
2015
2016 register int regno = REGNO (x);
2017 register int offset = regno / REGSET_ELT_BITS;
2018 register int bit = 1 << (regno % REGSET_ELT_BITS);
2019 int all_needed = (old_live_regs[offset] & bit);
2020 int some_needed = (old_live_regs[offset] & bit);
2021
2022 if (set_p)
2023 return;
2024
2025 if (regno < FIRST_PSEUDO_REGISTER)
2026 {
2027 int n;
2028
2029 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2030 while (--n > 0)
2031 {
2032 some_needed |= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
2033 & 1 << ((regno + n) % REGSET_ELT_BITS));
2034 all_needed &= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
2035 & 1 << ((regno + n) % REGSET_ELT_BITS));
2036 }
2037 }
2038
2039 /* If it wasn't live before we started, then add a REG_DEAD note.
2040 We must check the previous lifetime info not the current info,
2041 because we may have to execute this code several times, e.g.
2042 once for a clobber (which doesn't add a note) and later
2043 for a use (which does add a note).
2044
2045 Always make the register live. We must do this even if it was
2046 live before, because this may be an insn which sets and uses
2047 the same register, in which case the register has already been
2048 killed, so we must make it live again.
2049
2050 Global registers are always live, and should never have a REG_DEAD
2051 note added for them, so none of the code below applies to them. */
2052
2053 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2054 {
2055 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2056 STACK_POINTER_REGNUM, since these are always considered to be
2057 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2058 if (regno != FRAME_POINTER_REGNUM
2059#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2060 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
bd58420c
RK
2061#endif
2062 && regno != STACK_POINTER_REGNUM)
2063 {
2064 if (! all_needed && ! dead_or_set_p (insn, x))
2065 {
2066 /* If none of the words in X is needed, make a REG_DEAD
2067 note. Otherwise, we must make partial REG_DEAD
2068 notes. */
2069 if (! some_needed)
2070 create_reg_dead_note (x, insn);
2071 else
2072 {
2073 int i;
2074
2075 /* Don't make a REG_DEAD note for a part of a
2076 register that is set in the insn. */
2077 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2078 i >= 0; i--)
2079 if ((old_live_regs[(regno + i) / REGSET_ELT_BITS]
2080 & 1 << ((regno +i) % REGSET_ELT_BITS)) == 0
2081 && ! dead_or_set_regno_p (insn, regno + i))
2082 create_reg_dead_note (gen_rtx (REG, word_mode,
2083 regno + i),
2084 insn);
2085 }
2086 }
2087 }
2088
2089 if (regno < FIRST_PSEUDO_REGISTER)
2090 {
2091 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2092 while (--j >= 0)
2093 {
2094 offset = (regno + j) / REGSET_ELT_BITS;
2095 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2096
2097 bb_dead_regs[offset] &= ~bit;
2098 bb_live_regs[offset] |= bit;
2099 }
2100 }
2101 else
2102 {
2103 bb_dead_regs[offset] &= ~bit;
2104 bb_live_regs[offset] |= bit;
2105 }
2106 }
2107 return;
2108 }
2109
2110 case MEM:
2111 /* Handle tail-recursive case. */
2112 attach_deaths (XEXP (x, 0), insn, 0);
2113 return;
2114
2115 case SUBREG:
2116 case STRICT_LOW_PART:
2117 /* These two cases preserve the value of SET_P, so handle them
2118 separately. */
2119 attach_deaths (XEXP (x, 0), insn, set_p);
2120 return;
2121
2122 case ZERO_EXTRACT:
2123 case SIGN_EXTRACT:
2124 /* This case preserves the value of SET_P for the first operand, but
2125 clears it for the other two. */
2126 attach_deaths (XEXP (x, 0), insn, set_p);
2127 attach_deaths (XEXP (x, 1), insn, 0);
2128 attach_deaths (XEXP (x, 2), insn, 0);
2129 return;
2130
2131 default:
2132 /* Other cases: walk the insn. */
2133 fmt = GET_RTX_FORMAT (code);
2134 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2135 {
2136 if (fmt[i] == 'e')
2137 attach_deaths (XEXP (x, i), insn, 0);
2138 else if (fmt[i] == 'E')
2139 for (j = 0; j < XVECLEN (x, i); j++)
2140 attach_deaths (XVECEXP (x, i, j), insn, 0);
2141 }
2142 }
2143}
2144
2145/* After INSN has executed, add register death notes for each register
2146 that is dead after INSN. */
2147
2148static void
2149attach_deaths_insn (insn)
2150 rtx insn;
2151{
2152 rtx x = PATTERN (insn);
2153 register RTX_CODE code = GET_CODE (x);
2154
2155 if (code == SET)
2156 {
2157 attach_deaths (SET_SRC (x), insn, 0);
2158
2159 /* A register might die here even if it is the destination, e.g.
2160 it is the target of a volatile read and is otherwise unused.
2161 Hence we must always call attach_deaths for the SET_DEST. */
2162 attach_deaths (SET_DEST (x), insn, 1);
2163 }
2164 else if (code == PARALLEL)
2165 {
2166 register int i;
2167 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2168 {
2169 code = GET_CODE (XVECEXP (x, 0, i));
2170 if (code == SET)
2171 {
2172 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2173
2174 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2175 }
2176 else if (code == CLOBBER)
2177 attach_deaths (XEXP (XVECEXP (x, 0, i), 0), insn, 1);
2178 else
2179 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2180 }
2181 }
2182 else if (code == CLOBBER)
2183 attach_deaths (XEXP (x, 0), insn, 1);
2184 else
2185 attach_deaths (x, insn, 0);
2186}
2187
2188/* Delete notes beginning with INSN and maybe put them in the chain
2189 of notes ended by NOTE_LIST.
2190 Returns the insn following the notes. */
2191
2192static rtx
2193unlink_notes (insn, tail)
2194 rtx insn, tail;
2195{
2196 rtx prev = PREV_INSN (insn);
2197
2198 while (insn != tail && GET_CODE (insn) == NOTE)
2199 {
2200 rtx next = NEXT_INSN (insn);
2201 /* Delete the note from its current position. */
2202 if (prev)
2203 NEXT_INSN (prev) = next;
2204 if (next)
2205 PREV_INSN (next) = prev;
2206
2207 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2208 /* Record line-number notes so they can be reused. */
2209 LINE_NOTE (insn) = insn;
2210 else
2211 {
2212 /* Insert the note at the end of the notes list. */
2213 PREV_INSN (insn) = note_list;
2214 if (note_list)
2215 NEXT_INSN (note_list) = insn;
2216 note_list = insn;
2217 }
2218
2219 insn = next;
2220 }
2221 return insn;
2222}
2223
2224/* Data structure for keeping track of register information
2225 during that register's life. */
2226
2227struct sometimes
2228{
2229 short offset; short bit;
2230 short live_length; short calls_crossed;
2231};
2232
2233/* Constructor for `sometimes' data structure. */
2234
2235static int
2236new_sometimes_live (regs_sometimes_live, offset, bit, sometimes_max)
2237 struct sometimes *regs_sometimes_live;
2238 int offset, bit;
2239 int sometimes_max;
2240{
2241 register struct sometimes *p;
2242 register int regno = offset * REGSET_ELT_BITS + bit;
2243 int i;
2244
2245 /* There should never be a register greater than max_regno here. If there
2246 is, it means that a define_split has created a new pseudo reg. This
2247 is not allowed, since there will not be flow info available for any
2248 new register, so catch the error here. */
2249 if (regno >= max_regno)
2250 abort ();
2251
2252 p = &regs_sometimes_live[sometimes_max];
2253 p->offset = offset;
2254 p->bit = bit;
2255 p->live_length = 0;
2256 p->calls_crossed = 0;
2257 sometimes_max++;
2258 return sometimes_max;
2259}
2260
2261/* Count lengths of all regs we are currently tracking,
2262 and find new registers no longer live. */
2263
2264static void
2265finish_sometimes_live (regs_sometimes_live, sometimes_max)
2266 struct sometimes *regs_sometimes_live;
2267 int sometimes_max;
2268{
2269 int i;
2270
2271 for (i = 0; i < sometimes_max; i++)
2272 {
2273 register struct sometimes *p = &regs_sometimes_live[i];
2274 int regno;
2275
2276 regno = p->offset * REGSET_ELT_BITS + p->bit;
2277
2278 sched_reg_live_length[regno] += p->live_length;
2279 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2280 }
2281}
2282
2283/* Use modified list scheduling to rearrange insns in basic block
2284 B. FILE, if nonzero, is where we dump interesting output about
2285 this pass. */
2286
2287static void
2288schedule_block (b, file)
2289 int b;
2290 FILE *file;
2291{
2292 rtx insn, last;
2293 rtx last_note = 0;
2294 rtx *ready, link;
2295 int i, j, n_ready = 0, new_ready, n_insns = 0;
2296 int sched_n_insns = 0;
2297#define NEED_NOTHING 0
2298#define NEED_HEAD 1
2299#define NEED_TAIL 2
2300 int new_needs;
2301
2302 /* HEAD and TAIL delimit the region being scheduled. */
2303 rtx head = basic_block_head[b];
2304 rtx tail = basic_block_end[b];
2305 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2306 being scheduled. When the insns have been ordered,
2307 these insns delimit where the new insns are to be
2308 spliced back into the insn chain. */
2309 rtx next_tail;
2310 rtx prev_head;
2311
2312 /* Keep life information accurate. */
2313 register struct sometimes *regs_sometimes_live;
2314 int sometimes_max;
2315
2316 if (file)
2317 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2318 b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
2319
2320 i = max_reg_num ();
2321 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2322 bzero (reg_last_uses, i * sizeof (rtx));
2323 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2324 bzero (reg_last_sets, i * sizeof (rtx));
2325
2326 /* Remove certain insns at the beginning from scheduling,
2327 by advancing HEAD. */
2328
2329 /* At the start of a function, before reload has run, don't delay getting
2330 parameters from hard registers into pseudo registers. */
2331 if (reload_completed == 0 && b == 0)
2332 {
2333 while (head != tail
2334 && GET_CODE (head) == NOTE
2335 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2336 head = NEXT_INSN (head);
2337 while (head != tail
2338 && GET_CODE (head) == INSN
2339 && GET_CODE (PATTERN (head)) == SET)
2340 {
2341 rtx src = SET_SRC (PATTERN (head));
2342 while (GET_CODE (src) == SUBREG
2343 || GET_CODE (src) == SIGN_EXTEND
2344 || GET_CODE (src) == ZERO_EXTEND
2345 || GET_CODE (src) == SIGN_EXTRACT
2346 || GET_CODE (src) == ZERO_EXTRACT)
2347 src = XEXP (src, 0);
2348 if (GET_CODE (src) != REG
2349 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2350 break;
2351 /* Keep this insn from ever being scheduled. */
2352 INSN_REF_COUNT (head) = 1;
2353 head = NEXT_INSN (head);
2354 }
2355 }
2356
2357 /* Don't include any notes or labels at the beginning of the
2358 basic block, or notes at the ends of basic blocks. */
2359 while (head != tail)
2360 {
2361 if (GET_CODE (head) == NOTE)
2362 head = NEXT_INSN (head);
2363 else if (GET_CODE (tail) == NOTE)
2364 tail = PREV_INSN (tail);
2365 else if (GET_CODE (head) == CODE_LABEL)
2366 head = NEXT_INSN (head);
2367 else break;
2368 }
2369 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2370 to schedule this block. */
2371 if (head == tail
2372 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2373 return;
2374
2375#if 0
2376 /* This short-cut doesn't work. It does not count call insns crossed by
2377 registers in reg_sometimes_live. It does not mark these registers as
2378 dead if they die in this block. It does not mark these registers live
2379 (or create new reg_sometimes_live entries if necessary) if they are born
2380 in this block.
2381
2382 The easy solution is to just always schedule a block. This block only
2383 has one insn, so this won't slow down this pass by much. */
2384
2385 if (head == tail)
2386 return;
2387#endif
2388
2389 /* Exclude certain insns at the end of the basic block by advancing TAIL. */
2390 /* This isn't correct. Instead of advancing TAIL, should assign very
2391 high priorities to these insns to guarantee that they get scheduled last.
2392 If these insns are ignored, as is currently done, the register life info
2393 may be incorrectly computed. */
ead69aea 2394 if (GET_CODE (tail) == INSN && GET_CODE (PATTERN (tail)) == USE)
bd58420c 2395 {
ead69aea 2396 /* Don't try to reorder any USE insns at the end of any block.
b4ac57ab
RS
2397 They must be last to ensure proper register allocation.
2398 Exclude them all from scheduling. */
2399 do
2400 {
2401 /* If we are down to one USE insn, then there are no insns to
2402 schedule. */
2403 if (head == tail)
2404 return;
bd58420c 2405
b4ac57ab
RS
2406 tail = prev_nonnote_insn (tail);
2407 }
2408 while (GET_CODE (tail) == INSN
2409 && GET_CODE (PATTERN (tail)) == USE);
bd58420c
RK
2410
2411#if 0
2412 /* This short-cut does not work. See comment above. */
2413 if (head == tail)
2414 return;
2415#endif
2416 }
2417 else if (GET_CODE (tail) == JUMP_INSN
2418 && SCHED_GROUP_P (tail) == 0
2419 && GET_CODE (PREV_INSN (tail)) == INSN
2420 && GET_CODE (PATTERN (PREV_INSN (tail))) == USE
2421 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (PREV_INSN (tail)), 0)))
2422 {
2423 /* Don't let the setting of the function's return value register
2424 move from this jump. For the same reason we want to get the
2425 parameters into pseudo registers as quickly as possible, we
2426 want to set the function's return value register as late as
2427 possible. */
2428
2429 /* If this is the only insn in the block, then there is no need to
2430 schedule the block. */
2431 if (head == tail)
2432 return;
2433
2434 tail = PREV_INSN (tail);
2435 if (head == tail)
2436 return;
2437
2438 tail = prev_nonnote_insn (tail);
2439
2440#if 0
2441 /* This shortcut does not work. See comment above. */
2442 if (head == tail)
2443 return;
2444#endif
2445 }
2446
2447#ifdef HAVE_cc0
2448 /* This is probably wrong. Instead of doing this, should give this insn
2449 a very high priority to guarantee that it gets scheduled last. */
2450 /* Can not separate an insn that sets the condition code from one that
2451 uses it. So we must leave an insn that sets cc0 where it is. */
2452 if (sets_cc0_p (PATTERN (tail)))
2453 tail = PREV_INSN (tail);
2454#endif
2455
2456 /* Now HEAD through TAIL are the insns actually to be rearranged;
2457 Let PREV_HEAD and NEXT_TAIL enclose them. */
2458 prev_head = PREV_INSN (head);
2459 next_tail = NEXT_INSN (tail);
2460
2461 /* Initialize basic block data structures. */
2462 dead_notes = 0;
2463 pending_read_insns = 0;
2464 pending_read_mems = 0;
2465 pending_write_insns = 0;
2466 pending_write_mems = 0;
2467 pending_lists_length = 0;
2468 last_pending_memory_flush = 0;
2469 last_function_call = 0;
2470 last_scheduled_insn = 0;
2471
2472 LOG_LINKS (sched_before_next_call) = 0;
2473
2474 n_insns += sched_analyze (head, tail);
2475 if (n_insns == 0)
2476 {
2477 free_pending_lists ();
2478 return;
2479 }
2480
2481 /* Allocate vector to hold insns to be rearranged (except those
2482 insns which are controlled by an insn with SCHED_GROUP_P set).
2483 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2484 as those variables ultimately are set up. */
2485 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2486
2487 /* TAIL is now the last of the insns to be rearranged.
2488 Put those insns into the READY vector. */
2489 insn = tail;
2490
2491 /* If the last insn is a branch, force it to be the last insn after
2492 scheduling. Also, don't try to reorder calls at the ends the basic
2493 block -- this will only lead to worse register allocation. */
2494 if (GET_CODE (tail) == CALL_INSN || GET_CODE (tail) == JUMP_INSN)
2495 {
2496 priority (tail);
2497 ready[n_ready++] = tail;
2498 INSN_PRIORITY (tail) = TAIL_PRIORITY;
2499 INSN_REF_COUNT (tail) = 0;
2500 insn = PREV_INSN (tail);
2501 }
2502
2503 /* Assign priorities to instructions. Also check whether they
2504 are in priority order already. If so then I will be nonnegative.
2505 We use this shortcut only before reloading. */
2506#if 0
2507 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2508#endif
2509
2510 for (; insn != prev_head; insn = PREV_INSN (insn))
2511 {
2512 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2513 {
2514 priority (insn);
2515 if (INSN_REF_COUNT (insn) == 0)
2516 ready[n_ready++] = insn;
2517 if (SCHED_GROUP_P (insn))
2518 {
2519 while (SCHED_GROUP_P (insn))
2520 {
2521 insn = PREV_INSN (insn);
2522 while (GET_CODE (insn) == NOTE)
2523 insn = PREV_INSN (insn);
2524 priority (insn);
2525 }
2526 continue;
2527 }
2528#if 0
2529 if (i < 0)
2530 continue;
2531 if (INSN_PRIORITY (insn) < i)
2532 i = INSN_PRIORITY (insn);
2533 else if (INSN_PRIORITY (insn) > i)
2534 i = DONE_PRIORITY;
2535#endif
2536 }
2537 }
2538
2539#if 0
2540 /* This short-cut doesn't work. It does not count call insns crossed by
2541 registers in reg_sometimes_live. It does not mark these registers as
2542 dead if they die in this block. It does not mark these registers live
2543 (or create new reg_sometimes_live entries if necessary) if they are born
2544 in this block.
2545
2546 The easy solution is to just always schedule a block. These blocks tend
2547 to be very short, so this doesn't slow down this pass by much. */
2548
2549 /* If existing order is good, don't bother to reorder. */
2550 if (i != DONE_PRIORITY)
2551 {
2552 if (file)
2553 fprintf (file, ";; already scheduled\n");
2554
2555 if (reload_completed == 0)
2556 {
2557 for (i = 0; i < sometimes_max; i++)
2558 regs_sometimes_live[i].live_length += n_insns;
2559
2560 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2561 }
2562 free_pending_lists ();
2563 return;
2564 }
2565#endif
2566
2567 /* Scan all the insns to be scheduled, removing NOTE insns
2568 and register death notes.
2569 Line number NOTE insns end up in NOTE_LIST.
2570 Register death notes end up in DEAD_NOTES.
2571
2572 Recreate the register life information for the end of this basic
2573 block. */
2574
2575 if (reload_completed == 0)
2576 {
2577 bcopy (basic_block_live_at_start[b], bb_live_regs, regset_bytes);
2578 bzero (bb_dead_regs, regset_bytes);
2579
2580 if (b == 0)
2581 {
2582 /* This is the first block in the function. There may be insns
2583 before head that we can't schedule. We still need to examine
2584 them though for accurate register lifetime analysis. */
2585
2586 /* We don't want to remove any REG_DEAD notes as the code below
2587 does. */
2588
2589 for (insn = basic_block_head[b]; insn != head;
2590 insn = NEXT_INSN (insn))
2591 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2592 {
2593 /* See if the register gets born here. */
2594 /* We must check for registers being born before we check for
2595 registers dying. It is possible for a register to be born
2596 and die in the same insn, e.g. reading from a volatile
2597 memory location into an otherwise unused register. Such
2598 a register must be marked as dead after this insn. */
2599 if (GET_CODE (PATTERN (insn)) == SET
2600 || GET_CODE (PATTERN (insn)) == CLOBBER)
2601 sched_note_set (b, PATTERN (insn), 0);
2602 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2603 {
2604 int j;
2605 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2606 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2607 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2608 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2609
53dd0e6e
JW
2610 /* ??? This code is obsolete and should be deleted. It
2611 is harmless though, so we will leave it in for now. */
bd58420c
RK
2612 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2613 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2614 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2615 }
2616
2617 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2618 {
2619 if ((REG_NOTE_KIND (link) == REG_DEAD
2620 || REG_NOTE_KIND (link) == REG_UNUSED)
2621 /* Verify that the REG_NOTE has a legal value. */
2622 && GET_CODE (XEXP (link, 0)) == REG)
2623 {
2624 register int regno = REGNO (XEXP (link, 0));
2625 register int offset = regno / REGSET_ELT_BITS;
2626 register int bit = 1 << (regno % REGSET_ELT_BITS);
2627
2628 if (regno < FIRST_PSEUDO_REGISTER)
2629 {
2630 int j = HARD_REGNO_NREGS (regno,
2631 GET_MODE (XEXP (link, 0)));
2632 while (--j >= 0)
2633 {
2634 offset = (regno + j) / REGSET_ELT_BITS;
2635 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2636
2637 bb_live_regs[offset] &= ~bit;
2638 bb_dead_regs[offset] |= bit;
2639 }
2640 }
2641 else
2642 {
2643 bb_live_regs[offset] &= ~bit;
2644 bb_dead_regs[offset] |= bit;
2645 }
2646 }
2647 }
2648 }
2649 }
2650 }
2651
2652 /* If debugging information is being produced, keep track of the line
2653 number notes for each insn. */
2654 if (write_symbols != NO_DEBUG)
2655 {
2656 /* We must use the true line number for the first insn in the block
2657 that was computed and saved at the start of this pass. We can't
2658 use the current line number, because scheduling of the previous
2659 block may have changed the current line number. */
2660 rtx line = line_note_head[b];
2661
2662 for (insn = basic_block_head[b];
2663 insn != next_tail;
2664 insn = NEXT_INSN (insn))
2665 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
2666 line = insn;
2667 else
2668 LINE_NOTE (insn) = line;
2669 }
2670
2671 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2672 {
2673 rtx prev, next, link;
2674
2675 /* Farm out notes. This is needed to keep the debugger from
2676 getting completely deranged. */
2677 if (GET_CODE (insn) == NOTE)
2678 {
2679 prev = insn;
2680 insn = unlink_notes (insn, next_tail);
2681 if (prev == tail)
2682 abort ();
2683 if (prev == head)
2684 abort ();
2685 if (insn == next_tail)
2686 abort ();
2687 }
2688
2689 if (reload_completed == 0
2690 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2691 {
2692 /* See if the register gets born here. */
2693 /* We must check for registers being born before we check for
2694 registers dying. It is possible for a register to be born and
2695 die in the same insn, e.g. reading from a volatile memory
2696 location into an otherwise unused register. Such a register
2697 must be marked as dead after this insn. */
2698 if (GET_CODE (PATTERN (insn)) == SET
2699 || GET_CODE (PATTERN (insn)) == CLOBBER)
2700 sched_note_set (b, PATTERN (insn), 0);
2701 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2702 {
2703 int j;
2704 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2705 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2706 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2707 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2708
53dd0e6e
JW
2709 /* ??? This code is obsolete and should be deleted. It
2710 is harmless though, so we will leave it in for now. */
bd58420c
RK
2711 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2712 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2713 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2714 }
2715
2716 /* Need to know what registers this insn kills. */
2717 for (prev = 0, link = REG_NOTES (insn); link; link = next)
2718 {
2719 int regno;
2720
2721 next = XEXP (link, 1);
2722 if ((REG_NOTE_KIND (link) == REG_DEAD
2723 || REG_NOTE_KIND (link) == REG_UNUSED)
2724 /* Verify that the REG_NOTE has a legal value. */
2725 && GET_CODE (XEXP (link, 0)) == REG)
2726 {
2727 register int regno = REGNO (XEXP (link, 0));
2728 register int offset = regno / REGSET_ELT_BITS;
2729 register int bit = 1 << (regno % REGSET_ELT_BITS);
2730
2731 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
2732 alone. */
2733 if (REG_NOTE_KIND (link) == REG_DEAD)
2734 {
2735 if (prev)
2736 XEXP (prev, 1) = next;
2737 else
2738 REG_NOTES (insn) = next;
2739 XEXP (link, 1) = dead_notes;
2740 dead_notes = link;
2741 }
2742 else
2743 prev = link;
2744
2745 if (regno < FIRST_PSEUDO_REGISTER)
2746 {
2747 int j = HARD_REGNO_NREGS (regno,
2748 GET_MODE (XEXP (link, 0)));
2749 while (--j >= 0)
2750 {
2751 offset = (regno + j) / REGSET_ELT_BITS;
2752 bit = 1 << ((regno + j) % REGSET_ELT_BITS);
2753
2754 bb_live_regs[offset] &= ~bit;
2755 bb_dead_regs[offset] |= bit;
2756 }
2757 }
2758 else
2759 {
2760 bb_live_regs[offset] &= ~bit;
2761 bb_dead_regs[offset] |= bit;
2762 }
2763 }
2764 else
2765 prev = link;
2766 }
2767 }
2768 }
2769
2770 if (reload_completed == 0)
2771 {
2772 /* Keep track of register lives. */
2773 old_live_regs = (regset) alloca (regset_bytes);
2774 regs_sometimes_live
2775 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
2776 sometimes_max = 0;
2777
2778 /* Start with registers live at end. */
2779 for (j = 0; j < regset_size; j++)
2780 {
2781 int live = bb_live_regs[j];
2782 old_live_regs[j] = live;
2783 if (live)
2784 {
2785 register int bit;
2786 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
2787 if (live & (1 << bit))
2788 sometimes_max = new_sometimes_live (regs_sometimes_live, j,
2789 bit, sometimes_max);
2790 }
2791 }
2792 }
2793
2794 SCHED_SORT (ready, n_ready, 1);
2795
2796 if (file)
2797 {
2798 fprintf (file, ";; ready list initially:\n;; ");
2799 for (i = 0; i < n_ready; i++)
2800 fprintf (file, "%d ", INSN_UID (ready[i]));
2801 fprintf (file, "\n\n");
2802
2803 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2804 if (INSN_PRIORITY (insn) > 0)
2805 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
2806 INSN_UID (insn), INSN_PRIORITY (insn),
2807 INSN_REF_COUNT (insn));
2808 }
2809
2810 /* Now HEAD and TAIL are going to become disconnected
2811 entirely from the insn chain. */
2812 tail = ready[0];
2813
2814 /* Q_SIZE will always be zero here. */
2815 q_ptr = 0;
2816 bzero (insn_queue, sizeof (insn_queue));
2817
2818 /* Now, perform list scheduling. */
2819
2820 /* Where we start inserting insns is after TAIL. */
2821 last = next_tail;
2822
2823 new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
2824 ? NEED_HEAD : NEED_NOTHING);
2825 if (PREV_INSN (next_tail) == basic_block_end[b])
2826 new_needs |= NEED_TAIL;
2827
2828 new_ready = n_ready;
2829 while (sched_n_insns < n_insns)
2830 {
2831 q_ptr = NEXT_Q (q_ptr);
2832
2833 /* Add all pending insns that can be scheduled without stalls to the
2834 ready list. */
2835 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
2836 {
2837 if (file)
2838 fprintf (file, ";; launching %d before %d with no stalls\n",
2839 INSN_UID (insn), INSN_UID (last));
2840 ready[new_ready++] = insn;
2841 q_size -= 1;
2842 }
2843 insn_queue[q_ptr] = 0;
2844
2845 /* If there are no ready insns, stall until one is ready and add all
2846 of the pending insns at that point to the ready list. */
2847 if (new_ready == 0)
2848 {
2849 register int stalls;
2850
2851 for (stalls = 1; stalls < Q_SIZE; stalls++)
2852 if (insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])
2853 {
2854 for (; insn; insn = NEXT_INSN (insn))
2855 {
2856 if (file)
2857 fprintf (file, ";; issue insn %d before %d with %d stalls\n",
2858 INSN_UID (insn), INSN_UID (last), stalls);
2859 ready[new_ready++] = insn;
2860 q_size -= 1;
2861 }
2862 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
2863 break;
2864 }
2865
2866#if 0
2867 /* This looks logically correct, but on the SPEC benchmark set on
2868 the SPARC, I get better code without it. */
2869 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2870#endif
2871 }
2872
2873 /* There should be some instructions waiting to fire. */
2874 if (new_ready == 0)
2875 abort ();
2876
2877 /* Sort the ready list and choose the best insn to schedule.
2878 N_READY holds the number of items that were scheduled the last time,
2879 minus the one instruction scheduled on the last loop iteration; it
2880 is not modified for any other reason in this loop. */
2881 SCHED_SORT (ready, new_ready, n_ready);
2882 n_ready = new_ready;
2883 last_scheduled_insn = insn = ready[0];
2884
2885 if (DONE_PRIORITY_P (insn))
2886 abort ();
2887
2888 if (reload_completed == 0)
2889 {
2890 /* Process this insn, and each insn linked to this one which must
2891 be immediately output after this insn. */
2892 do
2893 {
2894 /* First we kill registers set by this insn, and then we
2895 make registers used by this insn live. This is the opposite
2896 order used above because we are traversing the instructions
2897 backwards. */
2898
2899 /* Strictly speaking, we should scan REG_UNUSED notes and make
2900 every register mentioned there live, however, we will just
2901 kill them again immediately below, so there doesn't seem to
2902 be any reason why we bother to do this. */
2903
2904 /* See if this is the last notice we must take of a register. */
2905 if (GET_CODE (PATTERN (insn)) == SET
2906 || GET_CODE (PATTERN (insn)) == CLOBBER)
2907 sched_note_set (b, PATTERN (insn), 1);
2908 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2909 {
2910 int j;
2911 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2912 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2913 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2914 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
2915 }
2916
2917 /* This code keeps life analysis information up to date. */
2918 if (GET_CODE (insn) == CALL_INSN)
2919 {
2920 register struct sometimes *p;
2921
2922 /* A call kills all call used and global registers, except
2923 for those mentioned in the call pattern which will be
2924 made live again later. */
2925 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2926 if (call_used_regs[i] || global_regs[i])
2927 {
2928 register int offset = i / REGSET_ELT_BITS;
2929 register int bit = 1 << (i % REGSET_ELT_BITS);
2930
2931 bb_live_regs[offset] &= ~bit;
2932 bb_dead_regs[offset] |= bit;
2933 }
2934
2935 /* Regs live at the time of a call instruction must not
2936 go in a register clobbered by calls. Record this for
2937 all regs now live. Note that insns which are born or
2938 die in a call do not cross a call, so this must be done
2939 after the killings (above) and before the births
2940 (below). */
2941 p = regs_sometimes_live;
2942 for (i = 0; i < sometimes_max; i++, p++)
2943 if (bb_live_regs[p->offset] & (1 << p->bit))
2944 p->calls_crossed += 1;
2945 }
2946
2947 /* Make every register used live, and add REG_DEAD notes for
2948 registers which were not live before we started. */
2949 attach_deaths_insn (insn);
2950
2951 /* Find registers now made live by that instruction. */
2952 for (i = 0; i < regset_size; i++)
2953 {
2954 int diff = bb_live_regs[i] & ~old_live_regs[i];
2955 if (diff)
2956 {
2957 register int bit;
2958 old_live_regs[i] |= diff;
2959 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
2960 if (diff & (1 << bit))
2961 sometimes_max
2962 = new_sometimes_live (regs_sometimes_live, i, bit,
2963 sometimes_max);
2964 }
2965 }
2966
2967 /* Count lengths of all regs we are worrying about now,
2968 and handle registers no longer live. */
2969
2970 for (i = 0; i < sometimes_max; i++)
2971 {
2972 register struct sometimes *p = &regs_sometimes_live[i];
2973 int regno = p->offset*REGSET_ELT_BITS + p->bit;
2974
2975 p->live_length += 1;
2976
2977 if ((bb_live_regs[p->offset] & (1 << p->bit)) == 0)
2978 {
2979 /* This is the end of one of this register's lifetime
2980 segments. Save the lifetime info collected so far,
2981 and clear its bit in the old_live_regs entry. */
2982 sched_reg_live_length[regno] += p->live_length;
2983 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2984 old_live_regs[p->offset] &= ~(1 << p->bit);
2985
2986 /* Delete the reg_sometimes_live entry for this reg by
2987 copying the last entry over top of it. */
2988 *p = regs_sometimes_live[--sometimes_max];
2989 /* ...and decrement i so that this newly copied entry
2990 will be processed. */
2991 i--;
2992 }
2993 }
2994
2995 link = insn;
2996 insn = PREV_INSN (insn);
2997 }
2998 while (SCHED_GROUP_P (link));
2999
3000 /* Set INSN back to the insn we are scheduling now. */
3001 insn = ready[0];
3002 }
3003
3004 /* Schedule INSN. Remove it from the ready list. */
3005 ready += 1;
3006 n_ready -= 1;
3007
3008 sched_n_insns += 1;
3009 NEXT_INSN (insn) = last;
3010 PREV_INSN (last) = insn;
3011 last = insn;
3012
3013 /* Everything that precedes INSN now either becomes "ready", if
3014 it can execute immediately before INSN, or "pending", if
3015 there must be a delay. Give INSN high enough priority that
3016 at least one (maybe more) reg-killing insns can be launched
3017 ahead of all others. Mark INSN as scheduled by changing its
3018 priority to -1. */
3019 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
3020 new_ready = launch_links (insn, ready, n_ready);
3021 INSN_PRIORITY (insn) = DONE_PRIORITY;
3022
3023 /* Schedule all prior insns that must not be moved. */
3024 if (SCHED_GROUP_P (insn))
3025 {
3026 /* Disable these insns from being launched. */
3027 link = insn;
3028 while (SCHED_GROUP_P (link))
3029 {
3030 /* Disable these insns from being launched by anybody. */
3031 link = PREV_INSN (link);
3032 INSN_REF_COUNT (link) = 0;
3033 }
3034
3035 /* None of these insns can move forward into delay slots. */
3036 while (SCHED_GROUP_P (insn))
3037 {
3038 insn = PREV_INSN (insn);
3039 new_ready = launch_links (insn, ready, new_ready);
3040 INSN_PRIORITY (insn) = DONE_PRIORITY;
3041
3042 sched_n_insns += 1;
3043 NEXT_INSN (insn) = last;
3044 PREV_INSN (last) = insn;
3045 last = insn;
3046 }
3047 }
3048 }
3049 if (q_size != 0)
3050 abort ();
3051
3052 if (reload_completed == 0)
3053 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3054
3055 /* HEAD is now the first insn in the chain of insns that
3056 been scheduled by the loop above.
3057 TAIL is the last of those insns. */
3058 head = insn;
3059
3060 /* NOTE_LIST is the end of a chain of notes previously found
3061 among the insns. Insert them at the beginning of the insns. */
3062 if (note_list != 0)
3063 {
3064 rtx note_head = note_list;
3065 while (PREV_INSN (note_head))
3066 note_head = PREV_INSN (note_head);
3067
3068 PREV_INSN (head) = note_list;
3069 NEXT_INSN (note_list) = head;
3070 head = note_head;
3071 }
3072
3073 /* In theory, there should be no REG_DEAD notes leftover at the end.
3074 In practice, this can occur as the result of bugs in flow, combine.c,
3075 and/or sched.c. The values of the REG_DEAD notes remaining are
3076 meaningless, because dead_notes is just used as a free list. */
3077#if 1
3078 if (dead_notes != 0)
3079 abort ();
3080#endif
3081
3082 if (new_needs & NEED_HEAD)
3083 basic_block_head[b] = head;
3084 PREV_INSN (head) = prev_head;
3085 NEXT_INSN (prev_head) = head;
3086
3087 if (new_needs & NEED_TAIL)
3088 basic_block_end[b] = tail;
3089 NEXT_INSN (tail) = next_tail;
3090 PREV_INSN (next_tail) = tail;
3091
3092 /* Restore the line-number notes of each insn. */
3093 if (write_symbols != NO_DEBUG)
3094 {
3095 rtx line, note, prev, new;
3096 int notes = 0;
3097
3098 head = basic_block_head[b];
3099 next_tail = NEXT_INSN (basic_block_end[b]);
3100
3101 /* Determine the current line-number. We want to know the current
3102 line number of the first insn of the block here, in case it is
3103 different from the true line number that was saved earlier. If
3104 different, then we need a line number note before the first insn
3105 of this block. If it happens to be the same, then we don't want to
3106 emit another line number note here. */
3107 for (line = head; line; line = PREV_INSN (line))
3108 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3109 break;
3110
3111 /* Walk the insns keeping track of the current line-number and inserting
3112 the line-number notes as needed. */
3113 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3114 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3115 line = insn;
3116 else if (! (GET_CODE (insn) == NOTE
3117 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3118 && (note = LINE_NOTE (insn)) != 0
3119 && note != line
3120 && (line == 0
3121 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3122 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3123 {
3124 line = note;
3125 prev = PREV_INSN (insn);
3126 if (LINE_NOTE (note))
3127 {
3128 /* Re-use the orignal line-number note. */
3129 LINE_NOTE (note) = 0;
3130 PREV_INSN (note) = prev;
3131 NEXT_INSN (prev) = note;
3132 PREV_INSN (insn) = note;
3133 NEXT_INSN (note) = insn;
3134 }
3135 else
3136 {
3137 notes++;
3138 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3139 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3140 }
3141 }
3142 if (file && notes)
3143 fprintf (file, ";; added %d line-number notes\n", notes);
3144 }
3145
3146 if (file)
3147 {
3148 fprintf (file, ";; new basic block head = %d\n;; new basic block end = %d\n\n",
3149 INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
3150 }
3151
3152 /* Yow! We're done! */
3153 free_pending_lists ();
3154
3155 return;
3156}
3157\f
3158/* Subroutine of split_hard_reg_notes. Searches X for any reference to
3159 REGNO, returning the rtx of the reference found if any. Otherwise,
3160 returns 0. */
3161
3162rtx
3163regno_use_in (regno, x)
3164 int regno;
3165 rtx x;
3166{
3167 register char *fmt;
3168 int i, j;
3169 rtx tem;
3170
3171 if (GET_CODE (x) == REG && REGNO (x) == regno)
3172 return x;
3173
3174 fmt = GET_RTX_FORMAT (GET_CODE (x));
3175 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3176 {
3177 if (fmt[i] == 'e')
3178 {
3179 if (tem = regno_use_in (regno, XEXP (x, i)))
3180 return tem;
3181 }
3182 else if (fmt[i] == 'E')
3183 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3184 if (tem = regno_use_in (regno , XVECEXP (x, i, j)))
3185 return tem;
3186 }
3187
3188 return 0;
3189}
3190
3191/* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3192 needed for the hard register mentioned in the note. This can happen
3193 if the reference to the hard register in the original insn was split into
3194 several smaller hard register references in the split insns. */
3195
3196static void
3197split_hard_reg_notes (note, first, last, orig_insn)
3198 rtx note, first, last, orig_insn;
3199{
3200 rtx reg, temp, link;
3201 int n_regs, i, new_reg;
3202 rtx insn;
3203
3204 /* Assume that this is a REG_DEAD note. */
3205 if (REG_NOTE_KIND (note) != REG_DEAD)
3206 abort ();
3207
3208 reg = XEXP (note, 0);
3209
3210 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3211
3212 /* ??? Could add check here to see whether, the hard register is referenced
3213 in the same mode as in the original insn. If so, then it has not been
3214 split, and the rest of the code below is unnecessary. */
3215
3216 for (i = 1; i < n_regs; i++)
3217 {
3218 new_reg = REGNO (reg) + i;
3219
3220 /* Check for references to new_reg in the split insns. */
3221 for (insn = last; ; insn = PREV_INSN (insn))
3222 {
3223 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3224 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3225 {
3226 /* Create a new reg dead note here. */
3227 link = rtx_alloc (EXPR_LIST);
3228 PUT_REG_NOTE_KIND (link, REG_DEAD);
3229 XEXP (link, 0) = temp;
3230 XEXP (link, 1) = REG_NOTES (insn);
3231 REG_NOTES (insn) = link;
3232 break;
3233 }
3234 /* It isn't mentioned anywhere, so no new reg note is needed for
3235 this register. */
3236 if (insn == first)
3237 break;
3238 }
3239 }
3240}
3241
3242/* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3243 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3244
3245static void
3246new_insn_dead_notes (pat, insn, last, orig_insn)
3247 rtx pat, insn, last, orig_insn;
3248{
3249 rtx dest, tem, set;
3250
3251 /* PAT is either a CLOBBER or a SET here. */
3252 dest = XEXP (pat, 0);
3253
3254 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3255 || GET_CODE (dest) == STRICT_LOW_PART
3256 || GET_CODE (dest) == SIGN_EXTRACT)
3257 dest = XEXP (dest, 0);
3258
3259 if (GET_CODE (dest) == REG)
3260 {
3261 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3262 {
3263 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3264 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3265 && (set = single_set (tem)))
3266 {
3267 rtx tem_dest = SET_DEST (set);
3268
3269 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3270 || GET_CODE (tem_dest) == SUBREG
3271 || GET_CODE (tem_dest) == STRICT_LOW_PART
3272 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3273 tem_dest = XEXP (tem_dest, 0);
3274
3275 if (tem_dest != dest)
3276 {
3277 /* Use the same scheme as combine.c, don't put both REG_DEAD
3278 and REG_UNUSED notes on the same insn. */
3279 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3280 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3281 {
3282 rtx note = rtx_alloc (EXPR_LIST);
3283 PUT_REG_NOTE_KIND (note, REG_DEAD);
3284 XEXP (note, 0) = dest;
3285 XEXP (note, 1) = REG_NOTES (tem);
3286 REG_NOTES (tem) = note;
3287 }
3288 /* The reg only dies in one insn, the last one that uses
3289 it. */
3290 break;
3291 }
3292 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3293 /* We found an instruction that both uses the register,
3294 and sets it, so no new REG_NOTE is needed for this set. */
3295 break;
3296 }
3297 }
3298 /* If this is a set, it must die somewhere, unless it is the dest of
3299 the original insn, and hence is live after the original insn. Abort
3300 if it isn't supposed to be live after the original insn.
3301
3302 If this is a clobber, then just add a REG_UNUSED note. */
3303 if (tem == insn)
3304 {
3305 int live_after_orig_insn = 0;
3306 rtx pattern = PATTERN (orig_insn);
3307 int i;
3308
3309 if (GET_CODE (pat) == CLOBBER)
3310 {
3311 rtx note = rtx_alloc (EXPR_LIST);
3312 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3313 XEXP (note, 0) = dest;
3314 XEXP (note, 1) = REG_NOTES (insn);
3315 REG_NOTES (insn) = note;
3316 return;
3317 }
3318
3319 /* The original insn could have multiple sets, so search the
3320 insn for all sets. */
3321 if (GET_CODE (pattern) == SET)
3322 {
3323 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3324 live_after_orig_insn = 1;
3325 }
3326 else if (GET_CODE (pattern) == PARALLEL)
3327 {
3328 for (i = 0; i < XVECLEN (pattern, 0); i++)
3329 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3330 && reg_overlap_mentioned_p (dest,
3331 SET_DEST (XVECEXP (pattern,
3332 0, i))))
3333 live_after_orig_insn = 1;
3334 }
3335
3336 if (! live_after_orig_insn)
3337 abort ();
3338 }
3339 }
3340}
3341
3342/* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3343 registers modified by X. INC is -1 if the containing insn is being deleted,
3344 and is 1 if the containing insn is a newly generated insn. */
3345
3346static void
3347update_n_sets (x, inc)
3348 rtx x;
3349 int inc;
3350{
3351 rtx dest = SET_DEST (x);
3352
3353 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3354 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3355 dest = SUBREG_REG (dest);
3356
3357 if (GET_CODE (dest) == REG)
3358 {
3359 int regno = REGNO (dest);
3360
3361 if (regno < FIRST_PSEUDO_REGISTER)
3362 {
3363 register int i;
3364 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3365
3366 for (i = regno; i < endregno; i++)
3367 reg_n_sets[i] += inc;
3368 }
3369 else
3370 reg_n_sets[regno] += inc;
3371 }
3372}
3373
3374/* Updates all flow-analysis related quantities (including REG_NOTES) for
3375 the insns from FIRST to LAST inclusive that were created by splitting
3376 ORIG_INSN. NOTES are the original REG_NOTES. */
3377
3378static void
3379update_flow_info (notes, first, last, orig_insn)
3380 rtx notes;
3381 rtx first, last;
3382 rtx orig_insn;
3383{
3384 rtx insn, note;
3385 rtx next;
3386 rtx orig_dest, temp;
3387 rtx set;
3388
3389 /* Get and save the destination set by the original insn. */
3390
3391 orig_dest = single_set (orig_insn);
3392 if (orig_dest)
3393 orig_dest = SET_DEST (orig_dest);
3394
3395 /* Move REG_NOTES from the original insn to where they now belong. */
3396
3397 for (note = notes; note; note = next)
3398 {
3399 next = XEXP (note, 1);
3400 switch (REG_NOTE_KIND (note))
3401 {
3402 case REG_DEAD:
3403 case REG_UNUSED:
3404 /* Move these notes from the original insn to the last new insn where
3405 the register is now set. */
3406
3407 for (insn = last; ; insn = PREV_INSN (insn))
3408 {
3409 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3410 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3411 {
3412 XEXP (note, 1) = REG_NOTES (insn);
3413 REG_NOTES (insn) = note;
3414
3415 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3416 notes. */
3417 /* ??? This won't handle mutiple word registers correctly,
3418 but should be good enough for now. */
3419 if (REG_NOTE_KIND (note) == REG_UNUSED
3420 && ! dead_or_set_p (insn, XEXP (note, 0)))
3421 PUT_REG_NOTE_KIND (note, REG_DEAD);
3422
3423 /* The reg only dies in one insn, the last one that uses
3424 it. */
3425 break;
3426 }
3427 /* It must die somewhere, fail it we couldn't find where it died.
3428
3429 If this is a REG_UNUSED note, then it must be a temporary
3430 register that was not needed by this instantiation of the
3431 pattern, so we can safely ignore it. */
3432 if (insn == first)
3433 {
3434 if (REG_NOTE_KIND (note) != REG_UNUSED)
3435 abort ();
3436
3437 break;
3438 }
3439 }
3440
3441 /* If this note refers to a multiple word hard register, it may
3442 have been split into several smaller hard register references.
3443 Check to see if there are any new register references that
3444 need REG_NOTES added for them. */
3445 temp = XEXP (note, 0);
3446 if (REG_NOTE_KIND (note) == REG_DEAD
3447 && GET_CODE (temp) == REG
3448 && REGNO (temp) < FIRST_PSEUDO_REGISTER
3449 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)))
3450 split_hard_reg_notes (note, first, last, orig_insn);
3451 break;
3452
3453 case REG_WAS_0:
3454 /* This note applies to the dest of the original insn. Find the
3455 first new insn that now has the same dest, and move the note
3456 there. */
3457
3458 if (! orig_dest)
3459 abort ();
3460
3461 for (insn = first; ; insn = NEXT_INSN (insn))
3462 {
3463 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3464 && (temp = single_set (insn))
3465 && rtx_equal_p (SET_DEST (temp), orig_dest))
3466 {
3467 XEXP (note, 1) = REG_NOTES (insn);
3468 REG_NOTES (insn) = note;
3469 /* The reg is only zero before one insn, the first that
3470 uses it. */
3471 break;
3472 }
3473 /* It must be set somewhere, fail if we couldn't find where it
3474 was set. */
3475 if (insn == last)
3476 abort ();
3477 }
3478 break;
3479
3480 case REG_EQUAL:
3481 case REG_EQUIV:
3482 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
3483 set is meaningless. Just drop the note. */
3484 if (! orig_dest)
3485 break;
3486
3487 case REG_NO_CONFLICT:
3488 /* These notes apply to the dest of the original insn. Find the last
3489 new insn that now has the same dest, and move the note there. */
3490
3491 if (! orig_dest)
3492 abort ();
3493
3494 for (insn = last; ; insn = PREV_INSN (insn))
3495 {
3496 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3497 && (temp = single_set (insn))
3498 && rtx_equal_p (SET_DEST (temp), orig_dest))
3499 {
3500 XEXP (note, 1) = REG_NOTES (insn);
3501 REG_NOTES (insn) = note;
3502 /* Only put this note on one of the new insns. */
3503 break;
3504 }
3505
3506 /* The original dest must still be set someplace. Abort if we
3507 couldn't find it. */
3508 if (insn == first)
3509 abort ();
3510 }
3511 break;
3512
3513 case REG_LIBCALL:
3514 /* Move a REG_LIBCALL note to the first insn created, and update
3515 the corresponding REG_RETVAL note. */
3516 XEXP (note, 1) = REG_NOTES (first);
3517 REG_NOTES (first) = note;
3518
3519 insn = XEXP (note, 0);
3520 note = find_reg_note (insn, REG_RETVAL, 0);
3521 if (note)
3522 XEXP (note, 0) = first;
3523 break;
3524
3525 case REG_RETVAL:
3526 /* Move a REG_RETVAL note to the last insn created, and update
3527 the corresponding REG_LIBCALL note. */
3528 XEXP (note, 1) = REG_NOTES (last);
3529 REG_NOTES (last) = note;
3530
3531 insn = XEXP (note, 0);
3532 note = find_reg_note (insn, REG_LIBCALL, 0);
3533 if (note)
3534 XEXP (note, 0) = last;
3535 break;
3536
3537 case REG_NONNEG:
3538 /* This should be moved to whichever instruction is a JUMP_INSN. */
3539
3540 for (insn = last; ; insn = PREV_INSN (insn))
3541 {
3542 if (GET_CODE (insn) == JUMP_INSN)
3543 {
3544 XEXP (note, 1) = REG_NOTES (insn);
3545 REG_NOTES (insn) = note;
3546 /* Only put this note on one of the new insns. */
3547 break;
3548 }
3549 /* Fail if we couldn't find a JUMP_INSN. */
3550 if (insn == first)
3551 abort ();
3552 }
3553 break;
3554
3555 case REG_INC:
3556 /* This should be moved to whichever instruction now has the
3557 increment operation. */
3558 abort ();
3559
3560 case REG_LABEL:
3561 /* Should be moved to the new insn(s) which use the label. */
9ae57db8
RS
3562 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
3563 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3564 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3565 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL,
3566 XEXP (note, 0), REG_NOTES (insn));
3567 break;
bd58420c
RK
3568
3569 case REG_CC_SETTER:
3570 case REG_CC_USER:
3571 /* These two notes will never appear until after reorg, so we don't
3572 have to handle them here. */
3573 default:
3574 abort ();
3575 }
3576 }
3577
3578 /* Each new insn created, except the last, has a new set. If the destination
3579 is a register, then this reg is now live across several insns, whereas
3580 previously the dest reg was born and died within the same insn. To
3581 reflect this, we now need a REG_DEAD note on the insn where this
3582 dest reg dies.
3583
3584 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
3585
3586 for (insn = first; insn != last; insn = NEXT_INSN (insn))
3587 {
3588 rtx pat;
3589 int i;
3590
3591 pat = PATTERN (insn);
3592 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
3593 new_insn_dead_notes (pat, insn, last, orig_insn);
3594 else if (GET_CODE (pat) == PARALLEL)
3595 {
3596 for (i = 0; i < XVECLEN (pat, 0); i++)
3597 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3598 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
3599 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
3600 }
3601 }
3602
3603 /* If any insn, except the last, uses the register set by the last insn,
3604 then we need a new REG_DEAD note on that insn. In this case, there
3605 would not have been a REG_DEAD note for this register in the original
3606 insn because it was used and set within one insn.
3607
3608 There is no new REG_DEAD note needed if the last insn uses the register
3609 that it is setting. */
3610
3611 set = single_set (last);
3612 if (set)
3613 {
3614 rtx dest = SET_DEST (set);
3615
3616 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3617 || GET_CODE (dest) == STRICT_LOW_PART
3618 || GET_CODE (dest) == SIGN_EXTRACT)
3619 dest = XEXP (dest, 0);
3620
3621 if (GET_CODE (dest) == REG
3622 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
3623 {
3624 for (insn = PREV_INSN (last); ; insn = PREV_INSN (insn))
3625 {
3626 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3627 && reg_mentioned_p (dest, PATTERN (insn))
3628 && (set = single_set (insn)))
3629 {
3630 rtx insn_dest = SET_DEST (set);
3631
3632 while (GET_CODE (insn_dest) == ZERO_EXTRACT
3633 || GET_CODE (insn_dest) == SUBREG
3634 || GET_CODE (insn_dest) == STRICT_LOW_PART
3635 || GET_CODE (insn_dest) == SIGN_EXTRACT)
3636 insn_dest = XEXP (insn_dest, 0);
3637
3638 if (insn_dest != dest)
3639 {
3640 note = rtx_alloc (EXPR_LIST);
3641 PUT_REG_NOTE_KIND (note, REG_DEAD);
3642 XEXP (note, 0) = dest;
3643 XEXP (note, 1) = REG_NOTES (insn);
3644 REG_NOTES (insn) = note;
3645 /* The reg only dies in one insn, the last one
3646 that uses it. */
3647 break;
3648 }
3649 }
3650 if (insn == first)
3651 break;
3652 }
3653 }
3654 }
3655
3656 /* If the original dest is modifying a multiple register target, and the
3657 original instruction was split such that the original dest is now set
3658 by two or more SUBREG sets, then the split insns no longer kill the
3659 destination of the original insn.
3660
3661 In this case, if there exists an instruction in the same basic block,
3662 before the split insn, which uses the original dest, and this use is
3663 killed by the original insn, then we must remove the REG_DEAD note on
3664 this insn, because it is now superfluous.
3665
3666 This does not apply when a hard register gets split, because the code
3667 knows how to handle overlapping hard registers properly. */
3668 if (orig_dest && GET_CODE (orig_dest) == REG)
3669 {
3670 int found_orig_dest = 0;
3671 int found_split_dest = 0;
3672
3673 for (insn = first; ; insn = NEXT_INSN (insn))
3674 {
3675 set = single_set (insn);
3676 if (set)
3677 {
3678 if (GET_CODE (SET_DEST (set)) == REG
3679 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
3680 {
3681 found_orig_dest = 1;
3682 break;
3683 }
3684 else if (GET_CODE (SET_DEST (set)) == SUBREG
3685 && SUBREG_REG (SET_DEST (set)) == orig_dest)
3686 {
3687 found_split_dest = 1;
3688 break;
3689 }
3690 }
3691
3692 if (insn == last)
3693 break;
3694 }
3695
3696 if (found_split_dest)
3697 {
3698 /* Search backwards from FIRST, looking for the first insn that uses
3699 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
3700 If we find an insn, and it has a REG_DEAD note, then delete the
3701 note. */
3702
3703 for (insn = first; insn; insn = PREV_INSN (insn))
3704 {
3705 if (GET_CODE (insn) == CODE_LABEL
3706 || GET_CODE (insn) == JUMP_INSN)
3707 break;
3708 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3709 && reg_mentioned_p (orig_dest, insn))
3710 {
3711 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
3712 if (note)
3713 remove_note (insn, note);
3714 }
3715 }
3716 }
3717 else if (! found_orig_dest)
3718 {
3719 /* This should never happen. */
3720 abort ();
3721 }
3722 }
3723
3724 /* Update reg_n_sets. This is necessary to prevent local alloc from
3725 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
3726 a reg from set once to set multiple times. */
3727
3728 {
3729 rtx x = PATTERN (orig_insn);
3730 RTX_CODE code = GET_CODE (x);
3731
3732 if (code == SET || code == CLOBBER)
3733 update_n_sets (x, -1);
3734 else if (code == PARALLEL)
3735 {
3736 int i;
3737 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3738 {
3739 code = GET_CODE (XVECEXP (x, 0, i));
3740 if (code == SET || code == CLOBBER)
3741 update_n_sets (XVECEXP (x, 0, i), -1);
3742 }
3743 }
3744
3745 for (insn = first; ; insn = NEXT_INSN (insn))
3746 {
3747 x = PATTERN (insn);
3748 code = GET_CODE (x);
3749
3750 if (code == SET || code == CLOBBER)
3751 update_n_sets (x, 1);
3752 else if (code == PARALLEL)
3753 {
3754 int i;
3755 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3756 {
3757 code = GET_CODE (XVECEXP (x, 0, i));
3758 if (code == SET || code == CLOBBER)
3759 update_n_sets (XVECEXP (x, 0, i), 1);
3760 }
3761 }
3762
3763 if (insn == last)
3764 break;
3765 }
3766 }
3767}
3768
3769/* The one entry point in this file. DUMP_FILE is the dump file for
3770 this pass. */
3771
3772void
3773schedule_insns (dump_file)
3774 FILE *dump_file;
3775{
3776 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
3777 int i, b;
3778 rtx insn;
3779
3780 /* Taking care of this degenerate case makes the rest of
3781 this code simpler. */
3782 if (n_basic_blocks == 0)
3783 return;
3784
3785 /* Create an insn here so that we can hang dependencies off of it later. */
3786 sched_before_next_call = gen_rtx (INSN, VOIDmode, 0, 0, 0, 0, 0, 0, 0);
3787
3788 /* Initialize the unused_*_lists. We can't use the ones left over from
3789 the previous function, because gcc has freed that memory. We can use
3790 the ones left over from the first sched pass in the second pass however,
3791 so only clear them on the first sched pass. The first pass is before
3792 reload if flag_schedule_insns is set, otherwise it is afterwards. */
3793
3794 if (reload_completed == 0 || ! flag_schedule_insns)
3795 {
3796 unused_insn_list = 0;
3797 unused_expr_list = 0;
3798 }
3799
3800 /* We create no insns here, only reorder them, so we
3801 remember how far we can cut back the stack on exit. */
3802
3803 /* Allocate data for this pass. See comments, above,
3804 for what these vectors do. */
bd58420c
RK
3805 insn_luid = (int *) alloca (max_uid * sizeof (int));
3806 insn_priority = (int *) alloca (max_uid * sizeof (int));
c3de4fe6 3807 insn_costs = (short *) alloca (max_uid * sizeof (short));
bd58420c
RK
3808 insn_ref_count = (int *) alloca (max_uid * sizeof (int));
3809
3810 if (reload_completed == 0)
3811 {
3812 sched_reg_n_deaths = (short *) alloca (max_regno * sizeof (short));
3813 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
3814 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
3815 bb_dead_regs = (regset) alloca (regset_bytes);
3816 bb_live_regs = (regset) alloca (regset_bytes);
3817 bzero (sched_reg_n_calls_crossed, max_regno * sizeof (int));
3818 bzero (sched_reg_live_length, max_regno * sizeof (int));
3819 bcopy (reg_n_deaths, sched_reg_n_deaths, max_regno * sizeof (short));
3820 init_alias_analysis ();
3821 }
3822 else
3823 {
3824 sched_reg_n_deaths = 0;
3825 sched_reg_n_calls_crossed = 0;
3826 sched_reg_live_length = 0;
3827 bb_dead_regs = 0;
3828 bb_live_regs = 0;
3829 if (! flag_schedule_insns)
3830 init_alias_analysis ();
3831 }
3832
3833 if (write_symbols != NO_DEBUG)
3834 {
3835 rtx line;
3836
3837 line_note = (rtx *) alloca (max_uid * sizeof (rtx));
3838 bzero (line_note, max_uid * sizeof (rtx));
3839 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
3840 bzero (line_note_head, n_basic_blocks * sizeof (rtx));
3841
3842 /* Determine the line-number at the start of each basic block.
3843 This must be computed and saved now, because after a basic block's
3844 predecessor has been scheduled, it is impossible to accurately
3845 determine the correct line number for the first insn of the block. */
3846
3847 for (b = 0; b < n_basic_blocks; b++)
3848 for (line = basic_block_head[b]; line; line = PREV_INSN (line))
3849 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3850 {
3851 line_note_head[b] = line;
3852 break;
3853 }
3854 }
3855
3856 bzero (insn_luid, max_uid * sizeof (int));
3857 bzero (insn_priority, max_uid * sizeof (int));
c3de4fe6 3858 bzero (insn_costs, max_uid * sizeof (short));
bd58420c
RK
3859 bzero (insn_ref_count, max_uid * sizeof (int));
3860
3861 /* Schedule each basic block, block by block. */
3862
3863 if (NEXT_INSN (basic_block_end[n_basic_blocks-1]) == 0
3864 || (GET_CODE (basic_block_end[n_basic_blocks-1]) != NOTE
3865 && GET_CODE (basic_block_end[n_basic_blocks-1]) != CODE_LABEL))
3866 emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
3867
3868 for (b = 0; b < n_basic_blocks; b++)
3869 {
3870 rtx insn, next;
3871 rtx insns;
3872
3873 note_list = 0;
3874
3875 for (insn = basic_block_head[b]; ; insn = next)
3876 {
3877 rtx prev;
3878 rtx set;
3879
3880 /* Can't use `next_real_insn' because that
3881 might go across CODE_LABELS and short-out basic blocks. */
3882 next = NEXT_INSN (insn);
3883 if (GET_CODE (insn) != INSN)
3884 {
3885 if (insn == basic_block_end[b])
3886 break;
3887
3888 continue;
3889 }
3890
3891 /* Don't split no-op move insns. These should silently disappear
3892 later in final. Splitting such insns would break the code
3893 that handles REG_NO_CONFLICT blocks. */
3894 set = single_set (insn);
3895 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
3896 {
3897 if (insn == basic_block_end[b])
3898 break;
3899
3900 /* Nops get in the way while scheduling, so delete them now if
3901 register allocation has already been done. It is too risky
3902 to try to do this before register allocation, and there are
3903 unlikely to be very many nops then anyways. */
3904 if (reload_completed)
3905 {
3906 PUT_CODE (insn, NOTE);
3907 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3908 NOTE_SOURCE_FILE (insn) = 0;
3909 }
3910
3911 continue;
3912 }
3913
3914 /* Split insns here to get max fine-grain parallelism. */
3915 prev = PREV_INSN (insn);
3916 if (reload_completed == 0)
3917 {
3918 rtx last, first = PREV_INSN (insn);
3919 rtx notes = REG_NOTES (insn);
3920
3921 last = try_split (PATTERN (insn), insn, 1);
3922 if (last != insn)
3923 {
3924 /* try_split returns the NOTE that INSN became. */
3925 first = NEXT_INSN (first);
3926 update_flow_info (notes, first, last, insn);
3927
3928 PUT_CODE (insn, NOTE);
3929 NOTE_SOURCE_FILE (insn) = 0;
3930 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3931 if (insn == basic_block_head[b])
3932 basic_block_head[b] = first;
3933 if (insn == basic_block_end[b])
3934 {
3935 basic_block_end[b] = last;
3936 break;
3937 }
3938 }
3939 }
3940
3941 if (insn == basic_block_end[b])
3942 break;
3943 }
3944
3945 schedule_block (b, dump_file);
3946
3947#ifdef USE_C_ALLOCA
3948 alloca (0);
3949#endif
3950 }
3951
3952 if (write_symbols != NO_DEBUG)
3953 {
3954 rtx line = 0;
3955 rtx insn = get_insns ();
3956 int active_insn = 0;
3957 int notes = 0;
3958
3959 /* Walk the insns deleting redundant line-number notes. Many of these
3960 are already present. The remainder tend to occur at basic
3961 block boundaries. */
3962 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
3963 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3964 {
3965 /* If there are no active insns following, INSN is redundant. */
3966 if (active_insn == 0)
3967 {
3968 notes++;
3969 NOTE_SOURCE_FILE (insn) = 0;
3970 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3971 }
3972 /* If the line number is unchanged, LINE is redundant. */
3973 else if (line
3974 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
3975 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
3976 {
3977 notes++;
3978 NOTE_SOURCE_FILE (line) = 0;
3979 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
3980 line = insn;
3981 }
3982 else
3983 line = insn;
3984 active_insn = 0;
3985 }
3986 else if (! ((GET_CODE (insn) == NOTE
3987 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3988 || (GET_CODE (insn) == INSN
3989 && (GET_CODE (PATTERN (insn)) == USE
3990 || GET_CODE (PATTERN (insn)) == CLOBBER))))
3991 active_insn++;
3992
3993 if (dump_file && notes)
3994 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
3995 }
3996
3997 if (reload_completed == 0)
3998 {
3999 int regno;
4000 for (regno = 0; regno < max_regno; regno++)
4001 if (sched_reg_live_length[regno])
4002 {
4003 if (dump_file)
4004 {
4005 if (reg_live_length[regno] > sched_reg_live_length[regno])
4006 fprintf (dump_file,
4007 ";; register %d life shortened from %d to %d\n",
4008 regno, reg_live_length[regno],
4009 sched_reg_live_length[regno]);
4010 /* Negative values are special; don't overwrite the current
4011 reg_live_length value if it is negative. */
4012 else if (reg_live_length[regno] < sched_reg_live_length[regno]
4013 && reg_live_length[regno] >= 0)
4014 fprintf (dump_file,
4015 ";; register %d life extended from %d to %d\n",
4016 regno, reg_live_length[regno],
4017 sched_reg_live_length[regno]);
4018
4019 if (reg_n_calls_crossed[regno]
4020 && ! sched_reg_n_calls_crossed[regno])
4021 fprintf (dump_file,
4022 ";; register %d no longer crosses calls\n", regno);
4023 else if (! reg_n_calls_crossed[regno]
4024 && sched_reg_n_calls_crossed[regno])
4025 fprintf (dump_file,
4026 ";; register %d now crosses calls\n", regno);
4027 }
b4ac57ab
RS
4028 /* Negative values are special; don't overwrite the current
4029 reg_live_length value if it is negative. */
4030 if (reg_live_length[regno] >= 0)
4031 reg_live_length[regno] = sched_reg_live_length[regno];
bd58420c
RK
4032 reg_n_calls_crossed[regno] = sched_reg_n_calls_crossed[regno];
4033 }
4034 }
4035}
4036#endif /* INSN_SCHEDULING */
This page took 0.412122 seconds and 5 git commands to generate.