]> gcc.gnu.org Git - gcc.git/blame - gcc/sched.c
Change the location of the Sun bundled C compiler (for backup defaults).
[gcc.git] / gcc / sched.c
CommitLineData
bd58420c
RK
1/* Instruction scheduling pass.
2 Copyright (C) 1992 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6This file is part of GNU CC.
7
8GNU CC is free software; you can redistribute it and/or modify
9it under the terms of the GNU General Public License as published by
10the Free Software Foundation; either version 2, or (at your option)
11any later version.
12
13GNU CC is distributed in the hope that it will be useful,
14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
19along with GNU CC; see the file COPYING. If not, write to
20the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
21
22/* Instruction scheduling pass.
23
24 This pass implements list scheduling within basic blocks. It is
25 run after flow analysis, but before register allocation. The
26 scheduler works as follows:
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning
39 values to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
de19227a
TW
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
56
57 Function unit conflicts are resolved during reverse list scheduling
58 by tracking the time when each insn is committed to the schedule
59 and from that, the time the function units it uses must be free.
60 As insns on the ready list are considered for scheduling, those
61 that would result in a blockage of the already committed insns are
62 queued until no blockage will result. Among the remaining insns on
63 the ready list to be considered, the first one with the largest
64 potential for causing a subsequent blockage is chosen.
65
66 The following list shows the order in which we want to break ties
67 among insns in the ready list:
bd58420c
RK
68
69 1. choose insn with lowest conflict cost, ties broken by
70 2. choose insn with the longest path to end of bb, ties broken by
71 3. choose insn that kills the most registers, ties broken by
72 4. choose insn that conflicts with the most ready insns, or finally
73 5. choose insn with lowest UID.
74
75 Memory references complicate matters. Only if we can be certain
76 that memory references are not part of the data dependency graph
77 (via true, anti, or output dependence), can we move operations past
78 memory references. To first approximation, reads can be done
79 independently, while writes introduce dependencies. Better
80 approximations will yield fewer dependencies.
81
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using LOG_LINKS.
84
85 Having optimized the critical path, we may have also unduly
86 extended the lifetimes of some registers. If an operation requires
87 that constants be loaded into registers, it is certainly desirable
88 to load those constants as early as necessary, but no earlier.
89 I.e., it will not do to load up a bunch of registers at the
90 beginning of a basic block only to use them at the end, if they
91 could be loaded later, since this may result in excessive register
92 utilization.
93
94 Note that since branches are never in basic blocks, but only end
95 basic blocks, this pass will not do any branch scheduling. But
96 that is ok, since we can use GNU's delayed branch scheduling
97 pass to take care of this case.
98
99 Also note that no further optimizations based on algebraic identities
100 are performed, so this pass would be a good one to perform instruction
101 splitting, such as breaking up a multiply instruction into shifts
102 and adds where that is profitable.
103
104 Given the memory aliasing analysis that this pass should perform,
105 it should be possible to remove redundant stores to memory, and to
106 load values from registers instead of hitting memory.
107
108 This pass must update information that subsequent passes expect to be
109 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
110 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
111 basic_block_end.
112
113 The information in the line number notes is carefully retained by this
114 pass. All other NOTE insns are grouped in their same relative order at
115 the beginning of basic blocks that have been scheduled. */
116\f
117#include <stdio.h>
118#include "config.h"
119#include "rtl.h"
120#include "basic-block.h"
121#include "regs.h"
122#include "hard-reg-set.h"
123#include "flags.h"
124#include "insn-config.h"
125#include "insn-attr.h"
126
c0ac5f31 127#ifdef INSN_SCHEDULING
bd58420c
RK
128/* Arrays set up by scheduling for the same respective purposes as
129 similar-named arrays set up by flow analysis. We work with these
130 arrays during the scheduling pass so we can compare values against
131 unscheduled code.
132
133 Values of these arrays are copied at the end of this pass into the
134 arrays set up by flow analysis. */
135static short *sched_reg_n_deaths;
136static int *sched_reg_n_calls_crossed;
137static int *sched_reg_live_length;
138
139/* Element N is the next insn that sets (hard or pseudo) register
140 N within the current basic block; or zero, if there is no
141 such insn. Needed for new registers which may be introduced
142 by splitting insns. */
143static rtx *reg_last_uses;
144static rtx *reg_last_sets;
145
146/* Vector indexed by INSN_UID giving the original ordering of the insns. */
147static int *insn_luid;
148#define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
149
150/* Vector indexed by INSN_UID giving each instruction a priority. */
151static int *insn_priority;
152#define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
153
c3de4fe6
JW
154static short *insn_costs;
155#define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
156
de19227a
TW
157/* Vector indexed by INSN_UID giving an encoding of the function units
158 used. */
159static short *insn_units;
160#define INSN_UNIT(INSN) insn_units[INSN_UID (INSN)]
161
162/* Vector indexed by INSN_UID giving an encoding of the blockage range
163 function. The unit and the range are encoded. */
164static unsigned int *insn_blockage;
165#define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
166#define UNIT_BITS 5
167#define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
168#define ENCODE_BLOCKAGE(U,R) \
169 ((((U) << UNIT_BITS) << BLOCKAGE_BITS \
170 | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
171 | MAX_BLOCKAGE_COST (R))
172#define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
173#define BLOCKAGE_RANGE(B) \
174 (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
175 | (B) & BLOCKAGE_MASK)
176
177/* Encodings of the `<name>_unit_blockage_range' function. */
178#define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
179#define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
180
bd58420c
RK
181#define DONE_PRIORITY -1
182#define MAX_PRIORITY 0x7fffffff
183#define TAIL_PRIORITY 0x7ffffffe
184#define LAUNCH_PRIORITY 0x7f000001
185#define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
186#define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
187
b4ac57ab 188/* Vector indexed by INSN_UID giving number of insns referring to this insn. */
bd58420c
RK
189static int *insn_ref_count;
190#define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
191
192/* Vector indexed by INSN_UID giving line-number note in effect for each
193 insn. For line-number notes, this indicates whether the note may be
194 reused. */
195static rtx *line_note;
196#define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
197
198/* Vector indexed by basic block number giving the starting line-number
199 for each basic block. */
200static rtx *line_note_head;
201
202/* List of important notes we must keep around. This is a pointer to the
203 last element in the list. */
204static rtx note_list;
205
206/* Regsets telling whether a given register is live or dead before the last
207 scheduled insn. Must scan the instructions once before scheduling to
208 determine what registers are live or dead at the end of the block. */
209static regset bb_dead_regs;
210static regset bb_live_regs;
211
212/* Regset telling whether a given register is live after the insn currently
213 being scheduled. Before processing an insn, this is equal to bb_live_regs
6dc42e49 214 above. This is used so that we can find registers that are newly born/dead
bd58420c
RK
215 after processing an insn. */
216static regset old_live_regs;
217
218/* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
219 during the initial scan and reused later. If there are not exactly as
220 many REG_DEAD notes in the post scheduled code as there were in the
221 prescheduled code then we trigger an abort because this indicates a bug. */
222static rtx dead_notes;
223
224/* Queues, etc. */
225
226/* An instruction is ready to be scheduled when all insns following it
227 have already been scheduled. It is important to ensure that all
228 insns which use its result will not be executed until its result
de19227a
TW
229 has been computed. An insn is maintained in one of four structures:
230
231 (P) the "Pending" set of insns which cannot be scheduled until
232 their dependencies have been satisfied.
233 (Q) the "Queued" set of insns that can be scheduled when sufficient
234 time has passed.
235 (R) the "Ready" list of unscheduled, uncommitted insns.
236 (S) the "Scheduled" list of insns.
237
238 Initially, all insns are either "Pending" or "Ready" depending on
239 whether their dependencies are satisfied.
240
241 Insns move from the "Ready" list to the "Scheduled" list as they
242 are committed to the schedule. As this occurs, the insns in the
243 "Pending" list have their dependencies satisfied and move to either
244 the "Ready" list or the "Queued" set depending on whether
245 sufficient time has passed to make them ready. As time passes,
246 insns move from the "Queued" set to the "Ready" list. Insns may
247 move from the "Ready" list to the "Queued" set if they are blocked
248 due to a function unit conflict.
249
250 The "Pending" list (P) are the insns in the LOG_LINKS of the unscheduled
251 insns, i.e., those that are ready, queued, and pending.
252 The "Queued" set (Q) is implemented by the variable `insn_queue'.
253 The "Ready" list (R) is implemented by the variables `ready' and
254 `n_ready'.
255 The "Scheduled" list (S) is the new insn chain built by this pass.
256
257 The transition (R->S) is implemented in the scheduling loop in
258 `schedule_block' when the best insn to schedule is chosen.
259 The transition (R->Q) is implemented in `schedule_select' when an
260 insn is found to to have a function unit conflict with the already
261 committed insns.
262 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
263 insns move from the ready list to the scheduled list.
264 The transition (Q->R) is implemented at the top of the scheduling
265 loop in `schedule_block' as time passes or stalls are introduced. */
266
267/* Implement a circular buffer to delay instructions until sufficient
268 time has passed. INSN_QUEUE_SIZE is a power of two larger than
269 MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. This is the
270 longest time an isnsn may be queued. */
271static rtx insn_queue[INSN_QUEUE_SIZE];
bd58420c
RK
272static int q_ptr = 0;
273static int q_size = 0;
de19227a
TW
274#define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
275#define NEXT_Q_AFTER(X,C) (((X)+C) & (INSN_QUEUE_SIZE-1))
276
277/* Vector indexed by INSN_UID giving the minimum clock tick at which
278 the insn becomes ready. This is used to note timing constraints for
279 insns in the pending list. */
280static int *insn_tick;
281#define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
6dab9d41 282
bd58420c
RK
283/* Forward declarations. */
284static void sched_analyze_2 ();
285static void schedule_block ();
286
287/* Main entry point of this file. */
288void schedule_insns ();
c0ac5f31 289#endif /* INSN_SCHEDULING */
bd58420c
RK
290\f
291#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
292
293/* Vector indexed by N giving the initial (unchanging) value known
294 for pseudo-register N. */
295static rtx *reg_known_value;
296
297/* Indicates number of valid entries in reg_known_value. */
298static int reg_known_value_size;
299
300static rtx
301canon_rtx (x)
302 rtx x;
303{
304 if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
305 && REGNO (x) <= reg_known_value_size)
306 return reg_known_value[REGNO (x)];
307 else if (GET_CODE (x) == PLUS)
308 {
309 rtx x0 = canon_rtx (XEXP (x, 0));
310 rtx x1 = canon_rtx (XEXP (x, 1));
311
312 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
313 {
314 /* We can tolerate LO_SUMs being offset here; these
315 rtl are used for nothing other than comparisons. */
316 if (GET_CODE (x0) == CONST_INT)
317 return plus_constant_for_output (x1, INTVAL (x0));
318 else if (GET_CODE (x1) == CONST_INT)
319 return plus_constant_for_output (x0, INTVAL (x1));
320 return gen_rtx (PLUS, GET_MODE (x), x0, x1);
321 }
322 }
323 return x;
324}
325
326/* Set up all info needed to perform alias analysis on memory references. */
327
328void
329init_alias_analysis ()
330{
331 int maxreg = max_reg_num ();
332 rtx insn;
333 rtx note;
334 rtx set;
335
336 reg_known_value_size = maxreg;
337
338 reg_known_value
339 = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx))
340 - FIRST_PSEUDO_REGISTER;
341 bzero (reg_known_value+FIRST_PSEUDO_REGISTER,
342 (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
343
344 /* Fill in the entries with known constant values. */
345 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
346 if ((set = single_set (insn)) != 0
347 && GET_CODE (SET_DEST (set)) == REG
348 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
349 && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
350 && reg_n_sets[REGNO (SET_DEST (set))] == 1)
fb3821f7 351 || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0)
bd58420c
RK
352 && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
353 reg_known_value[REGNO (SET_DEST (set))] = XEXP (note, 0);
354
355 /* Fill in the remaining entries. */
356 while (--maxreg >= FIRST_PSEUDO_REGISTER)
357 if (reg_known_value[maxreg] == 0)
358 reg_known_value[maxreg] = regno_reg_rtx[maxreg];
359}
360
361/* Return 1 if X and Y are identical-looking rtx's.
362
363 We use the data in reg_known_value above to see if two registers with
364 different numbers are, in fact, equivalent. */
365
366static int
367rtx_equal_for_memref_p (x, y)
368 rtx x, y;
369{
370 register int i;
371 register int j;
372 register enum rtx_code code;
373 register char *fmt;
374
375 if (x == 0 && y == 0)
376 return 1;
377 if (x == 0 || y == 0)
378 return 0;
379 x = canon_rtx (x);
380 y = canon_rtx (y);
381
382 if (x == y)
383 return 1;
384
385 code = GET_CODE (x);
386 /* Rtx's of different codes cannot be equal. */
387 if (code != GET_CODE (y))
388 return 0;
389
390 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
391 (REG:SI x) and (REG:HI x) are NOT equivalent. */
392
393 if (GET_MODE (x) != GET_MODE (y))
394 return 0;
395
396 /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
397
398 if (code == REG)
399 return REGNO (x) == REGNO (y);
400 if (code == LABEL_REF)
401 return XEXP (x, 0) == XEXP (y, 0);
402 if (code == SYMBOL_REF)
403 return XSTR (x, 0) == XSTR (y, 0);
404
405 /* Compare the elements. If any pair of corresponding elements
406 fail to match, return 0 for the whole things. */
407
408 fmt = GET_RTX_FORMAT (code);
409 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
410 {
411 switch (fmt[i])
412 {
fb3821f7
CH
413 case 'w':
414 if (XWINT (x, i) != XWINT (y, i))
415 return 0;
416 break;
417
bd58420c
RK
418 case 'n':
419 case 'i':
420 if (XINT (x, i) != XINT (y, i))
421 return 0;
422 break;
423
424 case 'V':
425 case 'E':
426 /* Two vectors must have the same length. */
427 if (XVECLEN (x, i) != XVECLEN (y, i))
428 return 0;
429
430 /* And the corresponding elements must match. */
431 for (j = 0; j < XVECLEN (x, i); j++)
432 if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
433 return 0;
434 break;
435
436 case 'e':
437 if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
438 return 0;
439 break;
440
441 case 'S':
442 case 's':
443 if (strcmp (XSTR (x, i), XSTR (y, i)))
444 return 0;
445 break;
446
447 case 'u':
448 /* These are just backpointers, so they don't matter. */
449 break;
450
451 case '0':
452 break;
453
454 /* It is believed that rtx's at this level will never
455 contain anything but integers and other rtx's,
456 except for within LABEL_REFs and SYMBOL_REFs. */
457 default:
458 abort ();
459 }
460 }
461 return 1;
462}
463
464/* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
465 X and return it, or return 0 if none found. */
466
467static rtx
468find_symbolic_term (x)
469 rtx x;
470{
471 register int i;
472 register enum rtx_code code;
473 register char *fmt;
474
475 code = GET_CODE (x);
476 if (code == SYMBOL_REF || code == LABEL_REF)
477 return x;
478 if (GET_RTX_CLASS (code) == 'o')
479 return 0;
480
481 fmt = GET_RTX_FORMAT (code);
482 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
483 {
484 rtx t;
485
486 if (fmt[i] == 'e')
487 {
488 t = find_symbolic_term (XEXP (x, i));
489 if (t != 0)
490 return t;
491 }
492 else if (fmt[i] == 'E')
493 break;
494 }
495 return 0;
496}
497
498/* Return nonzero if X and Y (memory addresses) could reference the
499 same location in memory. C is an offset accumulator. When
500 C is nonzero, we are testing aliases between X and Y + C.
501 XSIZE is the size in bytes of the X reference,
502 similarly YSIZE is the size in bytes for Y.
503
504 If XSIZE or YSIZE is zero, we do not know the amount of memory being
505 referenced (the reference was BLKmode), so make the most pessimistic
506 assumptions.
507
508 We recognize the following cases of non-conflicting memory:
509
510 (1) addresses involving the frame pointer cannot conflict
511 with addresses involving static variables.
512 (2) static variables with different addresses cannot conflict.
513
6dc42e49 514 Nice to notice that varying addresses cannot conflict with fp if no
bd58420c
RK
515 local variables had their addresses taken, but that's too hard now. */
516
517static int
518memrefs_conflict_p (xsize, x, ysize, y, c)
519 rtx x, y;
520 int xsize, ysize;
fb3821f7 521 HOST_WIDE_INT c;
bd58420c
RK
522{
523 if (GET_CODE (x) == HIGH)
524 x = XEXP (x, 0);
525 else if (GET_CODE (x) == LO_SUM)
526 x = XEXP (x, 1);
527 else
528 x = canon_rtx (x);
529 if (GET_CODE (y) == HIGH)
530 y = XEXP (y, 0);
531 else if (GET_CODE (y) == LO_SUM)
532 y = XEXP (y, 1);
533 else
534 y = canon_rtx (y);
535
536 if (rtx_equal_for_memref_p (x, y))
537 return (xsize == 0 || ysize == 0 ||
538 (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
539
540 if (y == frame_pointer_rtx || y == stack_pointer_rtx)
541 {
542 rtx t = y;
543 int tsize = ysize;
544 y = x; ysize = xsize;
545 x = t; xsize = tsize;
546 }
547
548 if (x == frame_pointer_rtx || x == stack_pointer_rtx)
549 {
550 rtx y1;
551
552 if (CONSTANT_P (y))
553 return 0;
554
555 if (GET_CODE (y) == PLUS
556 && canon_rtx (XEXP (y, 0)) == x
557 && (y1 = canon_rtx (XEXP (y, 1)))
558 && GET_CODE (y1) == CONST_INT)
559 {
560 c += INTVAL (y1);
561 return (xsize == 0 || ysize == 0
562 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
563 }
564
565 if (GET_CODE (y) == PLUS
566 && (y1 = canon_rtx (XEXP (y, 0)))
567 && CONSTANT_P (y1))
568 return 0;
569
570 return 1;
571 }
572
573 if (GET_CODE (x) == PLUS)
574 {
6dc42e49
RS
575 /* The fact that X is canonicalized means that this
576 PLUS rtx is canonicalized. */
bd58420c
RK
577 rtx x0 = XEXP (x, 0);
578 rtx x1 = XEXP (x, 1);
579
580 if (GET_CODE (y) == PLUS)
581 {
6dc42e49
RS
582 /* The fact that Y is canonicalized means that this
583 PLUS rtx is canonicalized. */
bd58420c
RK
584 rtx y0 = XEXP (y, 0);
585 rtx y1 = XEXP (y, 1);
586
587 if (rtx_equal_for_memref_p (x1, y1))
588 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
589 if (rtx_equal_for_memref_p (x0, y0))
590 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
591 if (GET_CODE (x1) == CONST_INT)
592 if (GET_CODE (y1) == CONST_INT)
593 return memrefs_conflict_p (xsize, x0, ysize, y0,
594 c - INTVAL (x1) + INTVAL (y1));
595 else
596 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
597 else if (GET_CODE (y1) == CONST_INT)
598 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
599
600 /* Handle case where we cannot understand iteration operators,
601 but we notice that the base addresses are distinct objects. */
602 x = find_symbolic_term (x);
603 if (x == 0)
604 return 1;
605 y = find_symbolic_term (y);
606 if (y == 0)
607 return 1;
608 return rtx_equal_for_memref_p (x, y);
609 }
610 else if (GET_CODE (x1) == CONST_INT)
611 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
612 }
613 else if (GET_CODE (y) == PLUS)
614 {
6dc42e49
RS
615 /* The fact that Y is canonicalized means that this
616 PLUS rtx is canonicalized. */
bd58420c
RK
617 rtx y0 = XEXP (y, 0);
618 rtx y1 = XEXP (y, 1);
619
620 if (GET_CODE (y1) == CONST_INT)
621 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
622 else
623 return 1;
624 }
625
626 if (GET_CODE (x) == GET_CODE (y))
627 switch (GET_CODE (x))
628 {
629 case MULT:
630 {
631 /* Handle cases where we expect the second operands to be the
632 same, and check only whether the first operand would conflict
633 or not. */
634 rtx x0, y0;
635 rtx x1 = canon_rtx (XEXP (x, 1));
636 rtx y1 = canon_rtx (XEXP (y, 1));
637 if (! rtx_equal_for_memref_p (x1, y1))
638 return 1;
639 x0 = canon_rtx (XEXP (x, 0));
640 y0 = canon_rtx (XEXP (y, 0));
641 if (rtx_equal_for_memref_p (x0, y0))
642 return (xsize == 0 || ysize == 0
643 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
644
645 /* Can't properly adjust our sizes. */
646 if (GET_CODE (x1) != CONST_INT)
647 return 1;
648 xsize /= INTVAL (x1);
649 ysize /= INTVAL (x1);
650 c /= INTVAL (x1);
651 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
652 }
653 }
654
655 if (CONSTANT_P (x))
656 {
657 if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
658 {
659 c += (INTVAL (y) - INTVAL (x));
660 return (xsize == 0 || ysize == 0
661 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
662 }
663
664 if (GET_CODE (x) == CONST)
665 {
666 if (GET_CODE (y) == CONST)
667 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
668 ysize, canon_rtx (XEXP (y, 0)), c);
669 else
670 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
671 ysize, y, c);
672 }
673 if (GET_CODE (y) == CONST)
674 return memrefs_conflict_p (xsize, x, ysize,
675 canon_rtx (XEXP (y, 0)), c);
676
677 if (CONSTANT_P (y))
678 return (rtx_equal_for_memref_p (x, y)
679 && (xsize == 0 || ysize == 0
680 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
681
682 return 1;
683 }
684 return 1;
685}
686
687/* Functions to compute memory dependencies.
688
689 Since we process the insns in execution order, we can build tables
690 to keep track of what registers are fixed (and not aliased), what registers
691 are varying in known ways, and what registers are varying in unknown
692 ways.
693
694 If both memory references are volatile, then there must always be a
695 dependence between the two references, since their order can not be
696 changed. A volatile and non-volatile reference can be interchanged
697 though.
698
699 A MEM_IN_STRUCT reference at a varying address can never conflict with a
700 non-MEM_IN_STRUCT reference at a fixed address. */
701
702/* Read dependence: X is read after read in MEM takes place. There can
703 only be a dependence here if both reads are volatile. */
704
705int
706read_dependence (mem, x)
707 rtx mem;
708 rtx x;
709{
710 return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
711}
712
713/* True dependence: X is read after store in MEM takes place. */
714
715int
716true_dependence (mem, x)
717 rtx mem;
718 rtx x;
719{
1f6fc4de
JW
720 /* If X is an unchanging read, then it can't possibly conflict with any
721 non-unchanging store. It may conflict with an unchanging write though,
722 because there may be a single store to this address to initialize it.
723 Just fall through to the code below to resolve the case where we have
724 both an unchanging read and an unchanging write. This won't handle all
725 cases optimally, but the possible performance loss should be
726 negligible. */
727 if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem))
bd58420c
RK
728 return 0;
729
730 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
731 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
732 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
733 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
734 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
735 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
736 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
737}
738
739/* Anti dependence: X is written after read in MEM takes place. */
740
741int
742anti_dependence (mem, x)
743 rtx mem;
744 rtx x;
745{
1f6fc4de
JW
746 /* If MEM is an unchanging read, then it can't possibly conflict with
747 the store to X, because there is at most one store to MEM, and it must
748 have occured somewhere before MEM. */
bd58420c
RK
749 if (RTX_UNCHANGING_P (mem))
750 return 0;
751
752 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
753 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
754 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
755 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
756 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
757 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
758 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
759}
760
761/* Output dependence: X is written after store in MEM takes place. */
762
763int
764output_dependence (mem, x)
765 rtx mem;
766 rtx x;
767{
768 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
769 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
770 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
771 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
772 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
773 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
774 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
775}
776\f
6dab9d41
RS
777/* Helper functions for instruction scheduling. */
778
779/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
780 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
781 of dependence that this link represents. */
782
783void
784add_dependence (insn, elem, dep_type)
785 rtx insn;
786 rtx elem;
787 enum reg_note dep_type;
788{
789 rtx link, next;
790
791 /* Don't depend an insn on itself. */
792 if (insn == elem)
793 return;
794
795 /* If elem is part of a sequence that must be scheduled together, then
796 make the dependence point to the last insn of the sequence.
797 When HAVE_cc0, it is possible for NOTEs to exist between users and
798 setters of the condition codes, so we must skip past notes here.
799 Otherwise, NOTEs are impossible here. */
800
801 next = NEXT_INSN (elem);
802
803#ifdef HAVE_cc0
804 while (next && GET_CODE (next) == NOTE)
805 next = NEXT_INSN (next);
806#endif
807
808 if (next && SCHED_GROUP_P (next))
809 {
810 /* Notes will never intervene here though, so don't bother checking
811 for them. */
812 while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next)))
813 next = NEXT_INSN (next);
814
815 /* Again, don't depend an insn on itself. */
816 if (insn == next)
817 return;
818
819 /* Make the dependence to NEXT, the last insn of the group, instead
820 of the original ELEM. */
821 elem = next;
822 }
823
824 /* Check that we don't already have this dependence. */
825 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
826 if (XEXP (link, 0) == elem)
827 {
828 /* If this is a more restrictive type of dependence than the existing
829 one, then change the existing dependence to this type. */
830 if ((int) dep_type < (int) REG_NOTE_KIND (link))
831 PUT_REG_NOTE_KIND (link, dep_type);
832 return;
833 }
834 /* Might want to check one level of transitivity to save conses. */
835
836 link = rtx_alloc (INSN_LIST);
837 /* Insn dependency, not data dependency. */
838 PUT_REG_NOTE_KIND (link, dep_type);
839 XEXP (link, 0) = elem;
840 XEXP (link, 1) = LOG_LINKS (insn);
841 LOG_LINKS (insn) = link;
842}
843
844/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
845 of INSN. Abort if not found. */
846void
847remove_dependence (insn, elem)
848 rtx insn;
849 rtx elem;
850{
851 rtx prev, link;
852 int found = 0;
853
854 for (prev = 0, link = LOG_LINKS (insn); link;
855 prev = link, link = XEXP (link, 1))
856 {
857 if (XEXP (link, 0) == elem)
858 {
859 if (prev)
860 XEXP (prev, 1) = XEXP (link, 1);
861 else
862 LOG_LINKS (insn) = XEXP (link, 1);
863 found = 1;
864 }
865 }
866
867 if (! found)
868 abort ();
869 return;
870}
871\f
bd58420c
RK
872#ifndef INSN_SCHEDULING
873void schedule_insns () {}
874#else
875#ifndef __GNUC__
876#define __inline
877#endif
878
879/* Computation of memory dependencies. */
880
881/* The *_insns and *_mems are paired lists. Each pending memory operation
882 will have a pointer to the MEM rtx on one list and a pointer to the
883 containing insn on the other list in the same place in the list. */
884
885/* We can't use add_dependence like the old code did, because a single insn
886 may have multiple memory accesses, and hence needs to be on the list
887 once for each memory access. Add_dependence won't let you add an insn
888 to a list more than once. */
889
890/* An INSN_LIST containing all insns with pending read operations. */
891static rtx pending_read_insns;
892
893/* An EXPR_LIST containing all MEM rtx's which are pending reads. */
894static rtx pending_read_mems;
895
896/* An INSN_LIST containing all insns with pending write operations. */
897static rtx pending_write_insns;
898
899/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
900static rtx pending_write_mems;
901
902/* Indicates the combined length of the two pending lists. We must prevent
903 these lists from ever growing too large since the number of dependencies
904 produced is at least O(N*N), and execution time is at least O(4*N*N), as
905 a function of the length of these pending lists. */
906
907static int pending_lists_length;
908
909/* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
910
911static rtx unused_insn_list;
912
913/* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
914
915static rtx unused_expr_list;
916
917/* The last insn upon which all memory references must depend.
918 This is an insn which flushed the pending lists, creating a dependency
919 between it and all previously pending memory references. This creates
920 a barrier (or a checkpoint) which no memory reference is allowed to cross.
921
922 This includes all non constant CALL_INSNs. When we do interprocedural
923 alias analysis, this restriction can be relaxed.
924 This may also be an INSN that writes memory if the pending lists grow
925 too large. */
926
927static rtx last_pending_memory_flush;
928
929/* The last function call we have seen. All hard regs, and, of course,
930 the last function call, must depend on this. */
931
932static rtx last_function_call;
933
934/* The LOG_LINKS field of this is a list of insns which use a pseudo register
935 that does not already cross a call. We create dependencies between each
936 of those insn and the next call insn, to ensure that they won't cross a call
937 after scheduling is done. */
938
939static rtx sched_before_next_call;
940
941/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
942 so that insns independent of the last scheduled insn will be preferred
943 over dependent instructions. */
944
945static rtx last_scheduled_insn;
946
947/* Process an insn's memory dependencies. There are four kinds of
948 dependencies:
949
950 (0) read dependence: read follows read
951 (1) true dependence: read follows write
952 (2) anti dependence: write follows read
953 (3) output dependence: write follows write
954
955 We are careful to build only dependencies which actually exist, and
956 use transitivity to avoid building too many links. */
957\f
958/* Return the INSN_LIST containing INSN in LIST, or NULL
959 if LIST does not contain INSN. */
960
961__inline static rtx
962find_insn_list (insn, list)
963 rtx insn;
964 rtx list;
965{
966 while (list)
967 {
968 if (XEXP (list, 0) == insn)
969 return list;
970 list = XEXP (list, 1);
971 }
972 return 0;
973}
974
de19227a
TW
975/* Compute the function units used by INSN. This caches the value
976 returned by function_units_used. A function unit is encoded as the
977 unit number if the value is non-negative and the compliment of a
978 mask if the value is negative. A function unit index is the
979 non-negative encoding. */
bd58420c
RK
980
981__inline static int
de19227a 982insn_unit (insn)
bd58420c
RK
983 rtx insn;
984{
de19227a
TW
985 register int unit = INSN_UNIT (insn);
986
987 if (unit == 0)
988 {
989 recog_memoized (insn);
990
991 /* A USE insn, or something else we don't need to understand.
992 We can't pass these directly to function_units_used because it will
993 trigger a fatal error for unrecognizable insns. */
994 if (INSN_CODE (insn) < 0)
995 unit = -1;
996 else
997 {
998 unit = function_units_used (insn);
999 /* Increment non-negative values so we can cache zero. */
1000 if (unit >= 0) unit++;
1001 }
1002 /* We only cache 16 bits of the result, so if the value is out of
1003 range, don't cache it. */
1004 if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
1005 || unit >= 0
1006 || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
1007 INSN_UNIT (insn) = unit;
1008 }
1009 return (unit > 0 ? unit - 1 : unit);
1010}
bd58420c 1011
de19227a
TW
1012/* Compute the blockage range for executing INSN on UNIT. This caches
1013 the value returned by the blockage_range_function for the unit.
1014 These values are encoded in an int where the upper half gives the
1015 minimum value and the lower half gives the maximum value. */
c3de4fe6 1016
de19227a
TW
1017__inline static unsigned int
1018blockage_range (unit, insn)
1019 int unit;
1020 rtx insn;
1021{
1022 unsigned int blockage = INSN_BLOCKAGE (insn);
1023 unsigned int range;
bd58420c 1024
de19227a 1025 if (UNIT_BLOCKED (blockage) != unit + 1)
c3de4fe6 1026 {
de19227a
TW
1027 range = function_units[unit].blockage_range_function (insn);
1028 /* We only cache the blockage range for one unit and then only if
1029 the values fit. */
1030 if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
1031 INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
1032 }
1033 else
1034 range = BLOCKAGE_RANGE (blockage);
1035
1036 return range;
1037}
1038
1039/* A vector indexed by function unit instance giving the last insn to use
1040 the unit. The value of the function unit instance index for unit U
1041 instance I is (U + I * FUNCTION_UNITS_SIZE). */
1042static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
1043
1044/* A vector indexed by function unit instance giving the minimum time when
1045 the unit will unblock based on the maximum blockage cost. */
1046static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
1047
1048/* A vector indexed by function unit number giving the number of insns
1049 that remain to use the unit. */
1050static int unit_n_insns[FUNCTION_UNITS_SIZE];
1051
1052/* Reset the function unit state to the null state. */
1053
1054static void
1055clear_units ()
1056{
1057 int unit;
1058
1059 bzero (unit_last_insn, sizeof (unit_last_insn));
1060 bzero (unit_tick, sizeof (unit_tick));
1061 bzero (unit_n_insns, sizeof (unit_n_insns));
1062}
1063
1064/* Record an insn as one that will use the units encoded by UNIT. */
1065
1066__inline static void
1067prepare_unit (unit)
1068 int unit;
1069{
1070 int i;
1071
1072 if (unit >= 0)
1073 unit_n_insns[unit]++;
1074 else
1075 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1076 if ((unit & 1) != 0)
1077 prepare_unit (i);
1078}
1079
1080/* Return the actual hazard cost of executing INSN on the unit UNIT,
1081 instance INSTANCE at time CLOCK if the previous actual hazard cost
1082 was COST. */
1083
1084__inline static int
1085actual_hazard_this_instance (unit, instance, insn, clock, cost)
1086 int unit, instance, clock, cost;
1087 rtx insn;
1088{
1089 int i;
1090 int tick = unit_tick[instance];
1091
1092 if (tick - clock > cost)
1093 {
1094 /* The scheduler is operating in reverse, so INSN is the executing
1095 insn and the unit's last insn is the candidate insn. We want a
1096 more exact measure of the blockage if we execute INSN at CLOCK
1097 given when we committed the execution of the unit's last insn.
1098
1099 The blockage value is given by either the unit's max blockage
1100 constant, blockage range function, or blockage function. Use
1101 the most exact form for the given unit. */
1102
1103 if (function_units[unit].blockage_range_function)
1104 {
1105 if (function_units[unit].blockage_function)
1106 tick += (function_units[unit].blockage_function
1107 (insn, unit_last_insn[instance])
1108 - function_units[unit].max_blockage);
1109 else
1110 tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
1111 - function_units[unit].max_blockage);
1112 }
1113 if (tick - clock > cost)
1114 cost = tick - clock;
1115 }
1116 return cost;
1117}
1118
1119/* Record INSN as having begun execution on the units encoded by UNIT at
1120 time CLOCK. */
1121
1122__inline static void
1123schedule_unit (unit, insn, clock)
1124 int unit, clock;
1125 rtx insn;
1126{
1127 int i;
1128
1129 if (unit >= 0)
1130 {
1131 int instance = unit;
1132#if MAX_MULTIPLICITY > 1
1133 /* Find the first free instance of the function unit and use that
1134 one. We assume that one is free. */
1135 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
1136 {
1137 if (! actual_hazard_this_instance (unit, instance, insn, clock, 0))
1138 break;
1139 instance += FUNCTION_UNITS_SIZE;
1140 }
1141#endif
1142 unit_last_insn[instance] = insn;
1143 unit_tick[instance] = (clock + function_units[unit].max_blockage);
c3de4fe6 1144 }
bd58420c 1145 else
de19227a
TW
1146 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1147 if ((unit & 1) != 0)
1148 schedule_unit (i, insn, clock);
1149}
1150
1151/* Return the actual hazard cost of executing INSN on the units encoded by
1152 UNIT at time CLOCK if the previous actual hazard cost was COST. */
1153
1154__inline static int
1155actual_hazard (unit, insn, clock, cost)
1156 int unit, clock, cost;
1157 rtx insn;
1158{
1159 int i;
1160
1161 if (unit >= 0)
bd58420c 1162 {
de19227a
TW
1163 /* Find the instance of the function unit with the minimum hazard. */
1164 int instance = unit;
1165 int best = instance;
1166 int best_cost = actual_hazard_this_instance (unit, instance, insn,
1167 clock, cost);
1168 int this_cost;
1169
1170#if MAX_MULTIPLICITY > 1
1171 if (best_cost > cost)
1172 {
1173 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
1174 {
1175 instance += FUNCTION_UNITS_SIZE;
1176 this_cost = actual_hazard_this_instance (unit, instance, insn,
1177 clock, cost);
1178 if (this_cost < best_cost)
1179 {
1180 best = instance;
1181 best_cost = this_cost;
1182 if (this_cost <= cost)
1183 break;
1184 }
1185 }
1186 }
1187#endif
1188 cost = MAX (cost, best_cost);
1189 }
1190 else
1191 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1192 if ((unit & 1) != 0)
1193 cost = actual_hazard (i, insn, clock, cost);
1194
1195 return cost;
1196}
bd58420c 1197
de19227a
TW
1198/* Return the potential hazard cost of executing an instruction on the
1199 units encoded by UNIT if the previous potential hazard cost was COST.
1200 An insn with a large blockage time is chosen in preference to one
1201 with a smaller time; an insn that uses a unit that is more likely
1202 to be used is chosen in preference to one with a unit that is less
1203 used. We are trying to minimize a subsequent actual hazard. */
bd58420c 1204
de19227a
TW
1205__inline static int
1206potential_hazard (unit, insn, cost)
1207 int unit, cost;
1208 rtx insn;
1209{
1210 int i, ncost;
1211 unsigned int minb, maxb;
1212
1213 if (unit >= 0)
1214 {
1215 minb = maxb = function_units[unit].max_blockage;
1216 if (maxb > 1)
1217 {
1218 if (function_units[unit].blockage_range_function)
1219 {
1220 maxb = minb = blockage_range (unit, insn);
1221 maxb = MAX_BLOCKAGE_COST (maxb);
1222 minb = MIN_BLOCKAGE_COST (minb);
1223 }
1224
1225 if (maxb > 1)
1226 {
1227 /* Make the number of instructions left dominate. Make the
1228 minimum delay dominate the maximum delay. If all these
1229 are the same, use the unit number to add an arbitrary
1230 ordering. Other terms can be added. */
1231 ncost = minb * 0x40 + maxb;
1232 ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
1233 if (ncost > cost)
1234 cost = ncost;
1235 }
1236 }
bd58420c 1237 }
de19227a
TW
1238 else
1239 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1240 if ((unit & 1) != 0)
1241 cost = potential_hazard (i, insn, cost);
1242
1243 return cost;
1244}
1245
1246/* Compute cost of executing INSN given the dependence LINK on the insn USED.
1247 This is the number of virtual cycles taken between instruction issue and
1248 instruction results. */
1249
1250__inline static int
1251insn_cost (insn, link, used)
1252 rtx insn, link, used;
1253{
1254 register int cost = INSN_COST (insn);
1255
1256 if (cost == 0)
1257 {
1258 recog_memoized (insn);
1259
1260 /* A USE insn, or something else we don't need to understand.
1261 We can't pass these directly to result_ready_cost because it will
1262 trigger a fatal error for unrecognizable insns. */
1263 if (INSN_CODE (insn) < 0)
1264 {
1265 INSN_COST (insn) = 1;
1266 return 1;
1267 }
1268 else
1269 {
1270 cost = result_ready_cost (insn);
1271
1272 if (cost < 1)
1273 cost = 1;
1274
1275 INSN_COST (insn) = cost;
1276 }
1277 }
1278
1279 /* A USE insn should never require the value used to be computed. This
1280 allows the computation of a function's result and parameter values to
1281 overlap the return and call. */
1282 recog_memoized (used);
1283 if (INSN_CODE (used) < 0)
1284 LINK_COST_FREE (link) = 1;
1285
1286 /* If some dependencies vary the cost, compute the adjustment. Most
1287 commonly, the adjustment is complete: either the cost is ignored
1288 (in the case of an output- or anti-dependence), or the cost is
1289 unchanged. These values are cached in the link as LINK_COST_FREE
1290 and LINK_COST_ZERO. */
1291
1292 if (LINK_COST_FREE (link))
1293 cost = 1;
1294#ifdef ADJUST_COST
1295 else if (! LINK_COST_ZERO (link))
1296 {
1297 int ncost = cost;
1298
1299 ADJUST_COST (used, link, insn, ncost);
1300 if (ncost <= 1)
1301 LINK_COST_FREE (link) = ncost = 1;
1302 if (cost == ncost)
1303 LINK_COST_ZERO (link) = 1;
1304 cost = ncost;
1305 }
1306#endif
1307 return cost;
bd58420c
RK
1308}
1309
1310/* Compute the priority number for INSN. */
1311
1312static int
1313priority (insn)
1314 rtx insn;
1315{
1316 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
1317 {
1318 int prev_priority;
1319 int max_priority;
1320 int this_priority = INSN_PRIORITY (insn);
1321 rtx prev;
1322
1323 if (this_priority > 0)
1324 return this_priority;
1325
1326 max_priority = 1;
1327
1328 /* Nonzero if these insns must be scheduled together. */
1329 if (SCHED_GROUP_P (insn))
1330 {
1331 prev = insn;
1332 while (SCHED_GROUP_P (prev))
1333 {
1334 prev = PREV_INSN (prev);
1335 INSN_REF_COUNT (prev) += 1;
1336 }
1337 }
1338
1339 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
1340 {
1341 rtx x = XEXP (prev, 0);
1342
1343 /* A dependence pointing to a note is always obsolete, because
1344 sched_analyze_insn will have created any necessary new dependences
1345 which replace it. Notes can be created when instructions are
1346 deleted by insn splitting, or by register allocation. */
1347 if (GET_CODE (x) == NOTE)
1348 {
1349 remove_dependence (insn, x);
1350 continue;
1351 }
1352
de19227a
TW
1353 /* Clear the link cost adjustment bits. */
1354 LINK_COST_FREE (prev) = 0;
1355#ifdef ADJUST_COST
1356 LINK_COST_ZERO (prev) = 0;
1357#endif
1358
bd58420c
RK
1359 /* This priority calculation was chosen because it results in the
1360 least instruction movement, and does not hurt the performance
1361 of the resulting code compared to the old algorithm.
1362 This makes the sched algorithm more stable, which results
1363 in better code, because there is less register pressure,
1364 cross jumping is more likely to work, and debugging is easier.
1365
1366 When all instructions have a latency of 1, there is no need to
1367 move any instructions. Subtracting one here ensures that in such
1368 cases all instructions will end up with a priority of one, and
1369 hence no scheduling will be done.
1370
1371 The original code did not subtract the one, and added the
1372 insn_cost of the current instruction to its priority (e.g.
1373 move the insn_cost call down to the end). */
1374
1375 if (REG_NOTE_KIND (prev) == 0)
1376 /* Data dependence. */
de19227a 1377 prev_priority = priority (x) + insn_cost (x, prev, insn) - 1;
bd58420c
RK
1378 else
1379 /* Anti or output dependence. Don't add the latency of this
1380 insn's result, because it isn't being used. */
1381 prev_priority = priority (x);
1382
1383 if (prev_priority > max_priority)
1384 max_priority = prev_priority;
1385 INSN_REF_COUNT (x) += 1;
1386 }
1387
de19227a 1388 prepare_unit (insn_unit (insn));
bd58420c
RK
1389 INSN_PRIORITY (insn) = max_priority;
1390 return INSN_PRIORITY (insn);
1391 }
1392 return 0;
1393}
1394\f
1395/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
1396 them to the unused_*_list variables, so that they can be reused. */
1397
1398static void
1399free_pending_lists ()
1400{
1401 register rtx link, prev_link;
1402
1403 if (pending_read_insns)
1404 {
1405 prev_link = pending_read_insns;
1406 link = XEXP (prev_link, 1);
1407
1408 while (link)
1409 {
1410 prev_link = link;
1411 link = XEXP (link, 1);
1412 }
1413
1414 XEXP (prev_link, 1) = unused_insn_list;
1415 unused_insn_list = pending_read_insns;
1416 pending_read_insns = 0;
1417 }
1418
1419 if (pending_write_insns)
1420 {
1421 prev_link = pending_write_insns;
1422 link = XEXP (prev_link, 1);
1423
1424 while (link)
1425 {
1426 prev_link = link;
1427 link = XEXP (link, 1);
1428 }
1429
1430 XEXP (prev_link, 1) = unused_insn_list;
1431 unused_insn_list = pending_write_insns;
1432 pending_write_insns = 0;
1433 }
1434
1435 if (pending_read_mems)
1436 {
1437 prev_link = pending_read_mems;
1438 link = XEXP (prev_link, 1);
1439
1440 while (link)
1441 {
1442 prev_link = link;
1443 link = XEXP (link, 1);
1444 }
1445
1446 XEXP (prev_link, 1) = unused_expr_list;
1447 unused_expr_list = pending_read_mems;
1448 pending_read_mems = 0;
1449 }
1450
1451 if (pending_write_mems)
1452 {
1453 prev_link = pending_write_mems;
1454 link = XEXP (prev_link, 1);
1455
1456 while (link)
1457 {
1458 prev_link = link;
1459 link = XEXP (link, 1);
1460 }
1461
1462 XEXP (prev_link, 1) = unused_expr_list;
1463 unused_expr_list = pending_write_mems;
1464 pending_write_mems = 0;
1465 }
1466}
1467
1468/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1469 The MEM is a memory reference contained within INSN, which we are saving
1470 so that we can do memory aliasing on it. */
1471
1472static void
1473add_insn_mem_dependence (insn_list, mem_list, insn, mem)
1474 rtx *insn_list, *mem_list, insn, mem;
1475{
1476 register rtx link;
1477
1478 if (unused_insn_list)
1479 {
1480 link = unused_insn_list;
1481 unused_insn_list = XEXP (link, 1);
1482 }
1483 else
1484 link = rtx_alloc (INSN_LIST);
1485 XEXP (link, 0) = insn;
1486 XEXP (link, 1) = *insn_list;
1487 *insn_list = link;
1488
1489 if (unused_expr_list)
1490 {
1491 link = unused_expr_list;
1492 unused_expr_list = XEXP (link, 1);
1493 }
1494 else
1495 link = rtx_alloc (EXPR_LIST);
1496 XEXP (link, 0) = mem;
1497 XEXP (link, 1) = *mem_list;
1498 *mem_list = link;
1499
1500 pending_lists_length++;
1501}
1502\f
1503/* Make a dependency between every memory reference on the pending lists
1504 and INSN, thus flushing the pending lists. */
1505
1506static void
1507flush_pending_lists (insn)
1508 rtx insn;
1509{
1510 rtx link;
1511
1512 while (pending_read_insns)
1513 {
1514 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1515
1516 link = pending_read_insns;
1517 pending_read_insns = XEXP (pending_read_insns, 1);
1518 XEXP (link, 1) = unused_insn_list;
1519 unused_insn_list = link;
1520
1521 link = pending_read_mems;
1522 pending_read_mems = XEXP (pending_read_mems, 1);
1523 XEXP (link, 1) = unused_expr_list;
1524 unused_expr_list = link;
1525 }
1526 while (pending_write_insns)
1527 {
1528 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1529
1530 link = pending_write_insns;
1531 pending_write_insns = XEXP (pending_write_insns, 1);
1532 XEXP (link, 1) = unused_insn_list;
1533 unused_insn_list = link;
1534
1535 link = pending_write_mems;
1536 pending_write_mems = XEXP (pending_write_mems, 1);
1537 XEXP (link, 1) = unused_expr_list;
1538 unused_expr_list = link;
1539 }
1540 pending_lists_length = 0;
1541
1542 if (last_pending_memory_flush)
1543 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1544
1545 last_pending_memory_flush = insn;
1546}
1547
1548/* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1549 by the write to the destination of X, and reads of everything mentioned. */
1550
1551static void
1552sched_analyze_1 (x, insn)
1553 rtx x;
1554 rtx insn;
1555{
1556 register int regno;
1557 register rtx dest = SET_DEST (x);
1558
1559 if (dest == 0)
1560 return;
1561
1562 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1563 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1564 {
1565 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1566 {
1567 /* The second and third arguments are values read by this insn. */
1568 sched_analyze_2 (XEXP (dest, 1), insn);
1569 sched_analyze_2 (XEXP (dest, 2), insn);
1570 }
1571 dest = SUBREG_REG (dest);
1572 }
1573
1574 if (GET_CODE (dest) == REG)
1575 {
1576 register int offset, bit, i;
1577
1578 regno = REGNO (dest);
1579
1580 /* A hard reg in a wide mode may really be multiple registers.
1581 If so, mark all of them just like the first. */
1582 if (regno < FIRST_PSEUDO_REGISTER)
1583 {
1584 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1585 while (--i >= 0)
1586 {
1587 rtx u;
1588
1589 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1590 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1591 reg_last_uses[regno + i] = 0;
1592 if (reg_last_sets[regno + i])
1593 add_dependence (insn, reg_last_sets[regno + i],
1594 REG_DEP_OUTPUT);
1595 reg_last_sets[regno + i] = insn;
1596 if ((call_used_regs[i] || global_regs[i])
1597 && last_function_call)
1598 /* Function calls clobber all call_used regs. */
1599 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1600 }
1601 }
1602 else
1603 {
1604 rtx u;
1605
1606 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1607 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1608 reg_last_uses[regno] = 0;
1609 if (reg_last_sets[regno])
1610 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1611 reg_last_sets[regno] = insn;
1612
1613 /* Don't let it cross a call after scheduling if it doesn't
1614 already cross one. */
1615 if (reg_n_calls_crossed[regno] == 0 && last_function_call)
1616 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1617 }
1618 }
1619 else if (GET_CODE (dest) == MEM)
1620 {
1621 /* Writing memory. */
1622
1623 if (pending_lists_length > 32)
1624 {
1625 /* Flush all pending reads and writes to prevent the pending lists
1626 from getting any larger. Insn scheduling runs too slowly when
1627 these lists get long. The number 32 was chosen because it
6dc42e49 1628 seems like a reasonable number. When compiling GCC with itself,
bd58420c
RK
1629 this flush occurs 8 times for sparc, and 10 times for m88k using
1630 the number 32. */
1631 flush_pending_lists (insn);
1632 }
1633 else
1634 {
1635 rtx pending, pending_mem;
1636
1637 pending = pending_read_insns;
1638 pending_mem = pending_read_mems;
1639 while (pending)
1640 {
1641 /* If a dependency already exists, don't create a new one. */
1642 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1643 if (anti_dependence (XEXP (pending_mem, 0), dest, insn))
1644 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1645
1646 pending = XEXP (pending, 1);
1647 pending_mem = XEXP (pending_mem, 1);
1648 }
1649
1650 pending = pending_write_insns;
1651 pending_mem = pending_write_mems;
1652 while (pending)
1653 {
1654 /* If a dependency already exists, don't create a new one. */
1655 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1656 if (output_dependence (XEXP (pending_mem, 0), dest))
1657 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1658
1659 pending = XEXP (pending, 1);
1660 pending_mem = XEXP (pending_mem, 1);
1661 }
1662
1663 if (last_pending_memory_flush)
1664 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1665
1666 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1667 insn, dest);
1668 }
1669 sched_analyze_2 (XEXP (dest, 0), insn);
1670 }
1671
1672 /* Analyze reads. */
1673 if (GET_CODE (x) == SET)
1674 sched_analyze_2 (SET_SRC (x), insn);
bd58420c
RK
1675}
1676
1677/* Analyze the uses of memory and registers in rtx X in INSN. */
1678
1679static void
1680sched_analyze_2 (x, insn)
1681 rtx x;
1682 rtx insn;
1683{
1684 register int i;
1685 register int j;
1686 register enum rtx_code code;
1687 register char *fmt;
1688
1689 if (x == 0)
1690 return;
1691
1692 code = GET_CODE (x);
1693
b85b8af2 1694 switch (code)
bd58420c 1695 {
b85b8af2
JW
1696 case CONST_INT:
1697 case CONST_DOUBLE:
1698 case SYMBOL_REF:
1699 case CONST:
1700 case LABEL_REF:
1701 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1702 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1703 this does not mean that this insn is using cc0. */
bd58420c 1704 return;
bd58420c 1705
b85b8af2
JW
1706#ifdef HAVE_cc0
1707 case CC0:
1708 {
a3ee5899 1709 rtx link, prev;
bd58420c 1710
a3ee5899 1711 /* There may be a note before this insn now, but all notes will
ea3fa5f7 1712 be removed before we actually try to schedule the insns, so
a3ee5899
JW
1713 it won't cause a problem later. We must avoid it here though. */
1714
d21d2fb8 1715 /* User of CC0 depends on immediately preceding insn. */
b85b8af2 1716 SCHED_GROUP_P (insn) = 1;
bd58420c 1717
a3ee5899
JW
1718 /* Make a copy of all dependencies on the immediately previous insn,
1719 and add to this insn. This is so that all the dependencies will
11222b61
JW
1720 apply to the group. Remove an explicit dependence on this insn
1721 as SCHED_GROUP_P now represents it. */
a3ee5899
JW
1722
1723 prev = PREV_INSN (insn);
1724 while (GET_CODE (prev) == NOTE)
1725 prev = PREV_INSN (prev);
bd58420c 1726
11222b61
JW
1727 if (find_insn_list (prev, LOG_LINKS (insn)))
1728 remove_dependence (insn, prev);
1729
a3ee5899 1730 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
b85b8af2 1731 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
bd58420c 1732
b85b8af2
JW
1733 return;
1734 }
1735#endif
bd58420c 1736
b85b8af2
JW
1737 case REG:
1738 {
1739 int regno = REGNO (x);
1740 if (regno < FIRST_PSEUDO_REGISTER)
1741 {
1742 int i;
1743
1744 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1745 while (--i >= 0)
1746 {
1747 reg_last_uses[regno + i]
1748 = gen_rtx (INSN_LIST, VOIDmode,
1749 insn, reg_last_uses[regno + i]);
1750 if (reg_last_sets[regno + i])
1751 add_dependence (insn, reg_last_sets[regno + i], 0);
1752 if ((call_used_regs[regno + i] || global_regs[regno + i])
1753 && last_function_call)
1754 /* Function calls clobber all call_used regs. */
1755 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1756 }
1757 }
1758 else
1759 {
1760 reg_last_uses[regno]
1761 = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
1762 if (reg_last_sets[regno])
1763 add_dependence (insn, reg_last_sets[regno], 0);
1764
1765 /* If the register does not already cross any calls, then add this
1766 insn to the sched_before_next_call list so that it will still
1767 not cross calls after scheduling. */
1768 if (reg_n_calls_crossed[regno] == 0)
1769 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1770 }
1771 return;
1772 }
bd58420c 1773
b85b8af2
JW
1774 case MEM:
1775 {
1776 /* Reading memory. */
1777
1f6fc4de
JW
1778 rtx pending, pending_mem;
1779
1780 pending = pending_read_insns;
1781 pending_mem = pending_read_mems;
1782 while (pending)
b85b8af2 1783 {
1f6fc4de
JW
1784 /* If a dependency already exists, don't create a new one. */
1785 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1786 if (read_dependence (XEXP (pending_mem, 0), x))
1787 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
b85b8af2 1788
1f6fc4de
JW
1789 pending = XEXP (pending, 1);
1790 pending_mem = XEXP (pending_mem, 1);
1791 }
b85b8af2 1792
1f6fc4de
JW
1793 pending = pending_write_insns;
1794 pending_mem = pending_write_mems;
1795 while (pending)
1796 {
1797 /* If a dependency already exists, don't create a new one. */
1798 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1799 if (true_dependence (XEXP (pending_mem, 0), x))
1800 add_dependence (insn, XEXP (pending, 0), 0);
b85b8af2 1801
1f6fc4de
JW
1802 pending = XEXP (pending, 1);
1803 pending_mem = XEXP (pending_mem, 1);
1804 }
1805 if (last_pending_memory_flush)
1806 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
b85b8af2 1807
1f6fc4de
JW
1808 /* Always add these dependencies to pending_reads, since
1809 this insn may be followed by a write. */
1810 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1811 insn, x);
b85b8af2 1812
b85b8af2
JW
1813 /* Take advantage of tail recursion here. */
1814 sched_analyze_2 (XEXP (x, 0), insn);
1815 return;
1816 }
bd58420c 1817
b85b8af2
JW
1818 case ASM_OPERANDS:
1819 case ASM_INPUT:
1820 case UNSPEC_VOLATILE:
c3de4fe6 1821 case TRAP_IF:
b85b8af2
JW
1822 {
1823 rtx u;
1824
1825 /* Traditional and volatile asm instructions must be considered to use
1826 and clobber all hard registers and all of memory. So must
c3de4fe6
JW
1827 TRAP_IF and UNSPEC_VOLATILE operations. */
1828 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
b85b8af2
JW
1829 {
1830 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1831 {
1832 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1833 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1834 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1835 reg_last_uses[i] = 0;
1836 if (reg_last_sets[i]
1837 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1838 add_dependence (insn, reg_last_sets[i], 0);
1839 reg_last_sets[i] = insn;
1840 }
1841
1842 flush_pending_lists (insn);
1843 }
1844
1845 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1846 We can not just fall through here since then we would be confused
1847 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1848 traditional asms unlike their normal usage. */
1849
1850 if (code == ASM_OPERANDS)
1851 {
1852 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1853 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1854 return;
1855 }
1856 break;
1857 }
bd58420c 1858
b85b8af2
JW
1859 case PRE_DEC:
1860 case POST_DEC:
1861 case PRE_INC:
1862 case POST_INC:
1863 /* These read and modify the result; just consider them writes. */
1864 sched_analyze_1 (x, insn);
bd58420c
RK
1865 return;
1866 }
1867
bd58420c
RK
1868 /* Other cases: walk the insn. */
1869 fmt = GET_RTX_FORMAT (code);
1870 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1871 {
1872 if (fmt[i] == 'e')
1873 sched_analyze_2 (XEXP (x, i), insn);
1874 else if (fmt[i] == 'E')
1875 for (j = 0; j < XVECLEN (x, i); j++)
1876 sched_analyze_2 (XVECEXP (x, i, j), insn);
1877 }
1878}
1879
1880/* Analyze an INSN with pattern X to find all dependencies. */
1881
1882static void
1883sched_analyze_insn (x, insn)
1884 rtx x, insn;
1885{
1886 register RTX_CODE code = GET_CODE (x);
1887 rtx link;
1888
1889 if (code == SET || code == CLOBBER)
1890 sched_analyze_1 (x, insn);
1891 else if (code == PARALLEL)
1892 {
1893 register int i;
1894 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1895 {
1896 code = GET_CODE (XVECEXP (x, 0, i));
1897 if (code == SET || code == CLOBBER)
1898 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1899 else
1900 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1901 }
1902 }
1903 else
1904 sched_analyze_2 (x, insn);
1905
bd58420c
RK
1906 /* Handle function calls. */
1907 if (GET_CODE (insn) == CALL_INSN)
1908 {
1909 rtx dep_insn;
1910 rtx prev_dep_insn;
1911
1912 /* When scheduling instructions, we make sure calls don't lose their
1913 accompanying USE insns by depending them one on another in order. */
1914
1915 prev_dep_insn = insn;
1916 dep_insn = PREV_INSN (insn);
1917 while (GET_CODE (dep_insn) == INSN
1918 && GET_CODE (PATTERN (dep_insn)) == USE)
1919 {
1920 SCHED_GROUP_P (prev_dep_insn) = 1;
1921
1922 /* Make a copy of all dependencies on dep_insn, and add to insn.
1923 This is so that all of the dependencies will apply to the
1924 group. */
1925
1926 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1927 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
1928
1929 prev_dep_insn = dep_insn;
1930 dep_insn = PREV_INSN (dep_insn);
1931 }
1932 }
1933}
1934
1935/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1936 for every dependency. */
1937
1938static int
1939sched_analyze (head, tail)
1940 rtx head, tail;
1941{
1942 register rtx insn;
1943 register int n_insns = 0;
1944 register rtx u;
1945 register int luid = 0;
1946
1947 for (insn = head; ; insn = NEXT_INSN (insn))
1948 {
1949 INSN_LUID (insn) = luid++;
1950
1951 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1952 {
1953 sched_analyze_insn (PATTERN (insn), insn);
1954 n_insns += 1;
1955 }
1956 else if (GET_CODE (insn) == CALL_INSN)
1957 {
1958 rtx dest = 0;
1959 rtx x;
1960 register int i;
1961
1962 /* Any instruction using a hard register which may get clobbered
1963 by a call needs to be marked as dependent on this call.
1964 This prevents a use of a hard return reg from being moved
1965 past a void call (i.e. it does not explicitly set the hard
1966 return reg). */
1967
1968 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1969 if (call_used_regs[i] || global_regs[i])
1970 {
1971 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1972 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1973 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1974 reg_last_uses[i] = 0;
1975 if (reg_last_sets[i]
1976 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1977 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1978 reg_last_sets[i] = insn;
1979 /* Insn, being a CALL_INSN, magically depends on
1980 `last_function_call' already. */
1981 }
1982
1983 /* For each insn which shouldn't cross a call, add a dependence
1984 between that insn and this call insn. */
1985 x = LOG_LINKS (sched_before_next_call);
1986 while (x)
1987 {
1988 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1989 x = XEXP (x, 1);
1990 }
1991 LOG_LINKS (sched_before_next_call) = 0;
1992
1993 sched_analyze_insn (PATTERN (insn), insn);
1994
1995 /* We don't need to flush memory for a function call which does
1996 not involve memory. */
1997 if (! CONST_CALL_P (insn))
1998 {
1999 /* In the absence of interprocedural alias analysis,
2000 we must flush all pending reads and writes, and
2001 start new dependencies starting from here. */
2002 flush_pending_lists (insn);
2003 }
2004
2005 /* Depend this function call (actually, the user of this
2006 function call) on all hard register clobberage. */
2007 last_function_call = insn;
2008 n_insns += 1;
2009 }
2010
2011 if (insn == tail)
2012 return n_insns;
2013 }
2014}
2015\f
2016/* Called when we see a set of a register. If death is true, then we are
2017 scanning backwards. Mark that register as unborn. If nobody says
2018 otherwise, that is how things will remain. If death is false, then we
2019 are scanning forwards. Mark that register as being born. */
2020
2021static void
2022sched_note_set (b, x, death)
2023 int b;
2024 rtx x;
2025 int death;
2026{
2027 register int regno, j;
2028 register rtx reg = SET_DEST (x);
2029 int subreg_p = 0;
2030
2031 if (reg == 0)
2032 return;
2033
2034 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
2035 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
2036 {
2037 /* Must treat modification of just one hardware register of a multi-reg
2038 value or just a byte field of a register exactly the same way that
2039 mark_set_1 in flow.c does. */
2040 if (GET_CODE (reg) == ZERO_EXTRACT
2041 || GET_CODE (reg) == SIGN_EXTRACT
2042 || (GET_CODE (reg) == SUBREG
2043 && REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg)))
2044 subreg_p = 1;
2045
2046 reg = SUBREG_REG (reg);
2047 }
2048
2049 if (GET_CODE (reg) != REG)
2050 return;
2051
2052 /* Global registers are always live, so the code below does not apply
2053 to them. */
2054
2055 regno = REGNO (reg);
2056 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2057 {
2058 register int offset = regno / REGSET_ELT_BITS;
fb3821f7
CH
2059 register REGSET_ELT_TYPE bit
2060 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
bd58420c
RK
2061
2062 if (death)
2063 {
2064 /* If we only set part of the register, then this set does not
2065 kill it. */
2066 if (subreg_p)
2067 return;
2068
2069 /* Try killing this register. */
2070 if (regno < FIRST_PSEUDO_REGISTER)
2071 {
2072 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2073 while (--j >= 0)
2074 {
2075 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7 2076 bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
bd58420c
RK
2077
2078 bb_live_regs[offset] &= ~bit;
2079 bb_dead_regs[offset] |= bit;
2080 }
2081 }
2082 else
2083 {
2084 bb_live_regs[offset] &= ~bit;
2085 bb_dead_regs[offset] |= bit;
2086 }
2087 }
2088 else
2089 {
2090 /* Make the register live again. */
2091 if (regno < FIRST_PSEUDO_REGISTER)
2092 {
2093 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2094 while (--j >= 0)
2095 {
2096 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7 2097 bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
bd58420c
RK
2098
2099 bb_live_regs[offset] |= bit;
2100 bb_dead_regs[offset] &= ~bit;
2101 }
2102 }
2103 else
2104 {
2105 bb_live_regs[offset] |= bit;
2106 bb_dead_regs[offset] &= ~bit;
2107 }
2108 }
2109 }
2110}
2111\f
2112/* Macros and functions for keeping the priority queue sorted, and
2113 dealing with queueing and unqueueing of instructions. */
2114
2115#define SCHED_SORT(READY, NEW_READY, OLD_READY) \
2116 do { if ((NEW_READY) - (OLD_READY) == 1) \
2117 swap_sort (READY, NEW_READY); \
2118 else if ((NEW_READY) - (OLD_READY) > 1) \
2119 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
2120 while (0)
2121
2122/* Returns a positive value if y is preferred; returns a negative value if
2123 x is preferred. Should never return 0, since that will make the sort
2124 unstable. */
2125
2126static int
2127rank_for_schedule (x, y)
2128 rtx *x, *y;
2129{
2130 rtx tmp = *y;
2131 rtx tmp2 = *x;
de19227a 2132 rtx link;
bd58420c
RK
2133 int tmp_class, tmp2_class;
2134 int value;
2135
2136 /* Choose the instruction with the highest priority, if different. */
2137 if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2))
2138 return value;
2139
2140 if (last_scheduled_insn)
2141 {
2142 /* Classify the instructions into three classes:
2143 1) Data dependent on last schedule insn.
2144 2) Anti/Output dependent on last scheduled insn.
2145 3) Independent of last scheduled insn, or has latency of one.
2146 Choose the insn from the highest numbered class if different. */
de19227a
TW
2147 link = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
2148 if (link == 0 || insn_cost (tmp, link, last_scheduled_insn) == 1)
bd58420c 2149 tmp_class = 3;
de19227a 2150 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
bd58420c
RK
2151 tmp_class = 1;
2152 else
2153 tmp_class = 2;
2154
de19227a
TW
2155 link = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
2156 if (link == 0 || insn_cost (tmp2, link, last_scheduled_insn) == 1)
bd58420c 2157 tmp2_class = 3;
de19227a 2158 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
bd58420c
RK
2159 tmp2_class = 1;
2160 else
2161 tmp2_class = 2;
2162
2163 if (value = tmp_class - tmp2_class)
2164 return value;
2165 }
2166
2167 /* If insns are equally good, sort by INSN_LUID (original insn order),
2168 so that we make the sort stable. This minimizes instruction movement,
2169 thus minimizing sched's effect on debugging and cross-jumping. */
2170 return INSN_LUID (tmp) - INSN_LUID (tmp2);
2171}
2172
2173/* Resort the array A in which only element at index N may be out of order. */
2174
2175__inline static void
2176swap_sort (a, n)
2177 rtx *a;
2178 int n;
2179{
2180 rtx insn = a[n-1];
2181 int i = n-2;
2182
2183 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
2184 {
2185 a[i+1] = a[i];
2186 i -= 1;
2187 }
2188 a[i+1] = insn;
2189}
2190
2191static int max_priority;
2192
2193/* Add INSN to the insn queue so that it fires at least N_CYCLES
2194 before the currently executing insn. */
2195
2196__inline static void
2197queue_insn (insn, n_cycles)
2198 rtx insn;
2199 int n_cycles;
2200{
2201 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2202 NEXT_INSN (insn) = insn_queue[next_q];
2203 insn_queue[next_q] = insn;
2204 q_size += 1;
2205}
2206
2207/* Return nonzero if PAT is the pattern of an insn which makes a
2208 register live. */
2209
2210__inline static int
2211birthing_insn_p (pat)
2212 rtx pat;
2213{
2214 int j;
2215
2216 if (reload_completed == 1)
2217 return 0;
2218
2219 if (GET_CODE (pat) == SET
2220 && GET_CODE (SET_DEST (pat)) == REG)
2221 {
2222 rtx dest = SET_DEST (pat);
2223 int i = REGNO (dest);
2224 int offset = i / REGSET_ELT_BITS;
fb3821f7 2225 REGSET_ELT_TYPE bit = (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
bd58420c
RK
2226
2227 /* It would be more accurate to use refers_to_regno_p or
2228 reg_mentioned_p to determine when the dest is not live before this
2229 insn. */
2230
2231 if (bb_live_regs[offset] & bit)
2232 return (reg_n_sets[i] == 1);
2233
2234 return 0;
2235 }
2236 if (GET_CODE (pat) == PARALLEL)
2237 {
2238 for (j = 0; j < XVECLEN (pat, 0); j++)
2239 if (birthing_insn_p (XVECEXP (pat, 0, j)))
2240 return 1;
2241 }
2242 return 0;
2243}
2244
de19227a
TW
2245/* PREV is an insn that is ready to execute. Adjust its priority if that
2246 will help shorten register lifetimes. */
bd58420c 2247
de19227a
TW
2248__inline static void
2249adjust_priority (prev)
bd58420c
RK
2250 rtx prev;
2251{
bd58420c
RK
2252 /* Trying to shorten register lives after reload has completed
2253 is useless and wrong. It gives inaccurate schedules. */
2254 if (reload_completed == 0)
2255 {
de19227a
TW
2256 rtx note;
2257 int n_deaths = 0;
2258
bd58420c
RK
2259 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
2260 if (REG_NOTE_KIND (note) == REG_DEAD)
2261 n_deaths += 1;
2262
2263 /* Defer scheduling insns which kill registers, since that
2264 shortens register lives. Prefer scheduling insns which
2265 make registers live for the same reason. */
2266 switch (n_deaths)
2267 {
2268 default:
2269 INSN_PRIORITY (prev) >>= 3;
2270 break;
2271 case 3:
2272 INSN_PRIORITY (prev) >>= 2;
2273 break;
2274 case 2:
2275 case 1:
2276 INSN_PRIORITY (prev) >>= 1;
2277 break;
2278 case 0:
de19227a 2279 if (birthing_insn_p (PATTERN (prev)))
bd58420c
RK
2280 {
2281 int max = max_priority;
2282
2283 if (max > INSN_PRIORITY (prev))
2284 INSN_PRIORITY (prev) = max;
2285 }
2286 break;
2287 }
2288 }
bd58420c
RK
2289}
2290
2291/* INSN is the "currently executing insn". Launch each insn which was
2292 waiting on INSN (in the backwards dataflow sense). READY is a
2293 vector of insns which are ready to fire. N_READY is the number of
de19227a 2294 elements in READY. CLOCK is the current virtual cycle. */
bd58420c
RK
2295
2296static int
de19227a 2297schedule_insn (insn, ready, n_ready, clock)
bd58420c
RK
2298 rtx insn;
2299 rtx *ready;
2300 int n_ready;
de19227a 2301 int clock;
bd58420c
RK
2302{
2303 rtx link;
2304 int new_ready = n_ready;
2305
de19227a
TW
2306 if (MAX_BLOCKAGE > 1)
2307 schedule_unit (insn_unit (insn), insn, clock);
2308
bd58420c
RK
2309 if (LOG_LINKS (insn) == 0)
2310 return n_ready;
2311
de19227a 2312 /* This is used by the function adjust_priority above. */
bd58420c
RK
2313 if (n_ready > 0)
2314 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
2315 else
2316 max_priority = INSN_PRIORITY (insn);
2317
2318 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
2319 {
2320 rtx prev = XEXP (link, 0);
de19227a 2321 int cost = insn_cost (prev, link, insn);
bd58420c 2322
de19227a
TW
2323 if ((INSN_REF_COUNT (prev) -= 1) != 0)
2324 {
2325 /* We satisfied one requirement to fire PREV. Record the earliest
2326 time when PREV can fire. No need to do this if the cost is 1,
2327 because PREV can fire no sooner than the next cycle. */
2328 if (cost > 1)
2329 INSN_TICK (prev) = MAX (INSN_TICK (prev), clock + cost);
2330 }
2331 else
2332 {
2333 /* We satisfied the last requirement to fire PREV. Ensure that all
2334 timing requirements are satisfied. */
2335 if (INSN_TICK (prev) - clock > cost)
2336 cost = INSN_TICK (prev) - clock;
2337
2338 /* Adjust the priority of PREV and either put it on the ready
2339 list or queue it. */
2340 adjust_priority (prev);
2341 if (cost <= 1)
2342 ready[new_ready++] = prev;
2343 else
2344 queue_insn (prev, cost);
2345 }
bd58420c
RK
2346 }
2347
2348 return new_ready;
2349}
2350
de19227a
TW
2351/* Given N_READY insns in the ready list READY at time CLOCK, queue
2352 those that are blocked due to function unit hazards and rearrange
2353 the remaining ones to minimize subsequent function unit hazards. */
2354
2355static int
2356schedule_select (ready, n_ready, clock, file)
2357 rtx *ready;
2358 int n_ready, clock;
2359 FILE *file;
2360{
2361 int pri = INSN_PRIORITY (ready[0]);
2362 int i, j, k, q, cost, best_cost, best_insn = 0, new_ready = n_ready;
2363 rtx insn;
2364
2365 /* Work down the ready list in groups of instructions with the same
2366 priority value. Queue insns in the group that are blocked and
2367 select among those that remain for the one with the largest
2368 potential hazard. */
2369 for (i = 0; i < n_ready; i = j)
2370 {
2371 int opri = pri;
2372 for (j = i + 1; j < n_ready; j++)
2373 if ((pri = INSN_PRIORITY (ready[j])) != opri)
2374 break;
2375
2376 /* Queue insns in the group that are blocked. */
2377 for (k = i, q = 0; k < j; k++)
2378 {
2379 insn = ready[k];
2380 if ((cost = actual_hazard (insn_unit (insn), insn, clock, 0)) != 0)
2381 {
2382 q++;
2383 ready[k] = 0;
2384 queue_insn (insn, cost);
2385 if (file)
2386 fprintf (file, "\n;; blocking insn %d for %d cycles",
2387 INSN_UID (insn), cost);
2388 }
2389 }
2390 new_ready -= q;
2391
2392 /* Check the next group if all insns were queued. */
2393 if (j - i - q == 0)
2394 continue;
2395
2396 /* If more than one remains, select the first one with the largest
2397 potential hazard. */
2398 else if (j - i - q > 1)
2399 {
2400 best_cost = -1;
2401 for (k = i; k < j; k++)
2402 {
2403 if ((insn = ready[k]) == 0)
2404 continue;
2405 if ((cost = potential_hazard (insn_unit (insn), insn, 0))
2406 > best_cost)
2407 {
2408 best_cost = cost;
2409 best_insn = k;
2410 }
2411 }
2412 }
2413 /* We have found a suitable insn to schedule. */
2414 break;
2415 }
2416
2417 /* Move the best insn to be front of the ready list. */
2418 if (best_insn != 0)
2419 {
2420 if (file)
2421 {
2422 fprintf (file, ", now");
2423 for (i = 0; i < n_ready; i++)
2424 if (ready[i])
2425 fprintf (file, " %d", INSN_UID (ready[i]));
2426 fprintf (file, "\n;; insn %d has a greater potential hazard",
2427 INSN_UID (ready[best_insn]));
2428 }
2429 for (i = best_insn; i > 0; i--)
2430 {
2431 insn = ready[i-1];
2432 ready[i-1] = ready[i];
2433 ready[i] = insn;
2434 }
2435 }
2436
2437 /* Compact the ready list. */
2438 if (new_ready < n_ready)
2439 for (i = j = 0; i < n_ready; i++)
2440 if (ready[i])
2441 ready[j++] = ready[i];
2442
2443 return new_ready;
2444}
2445
bd58420c
RK
2446/* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
2447 dead_notes list. */
2448
2449static void
2450create_reg_dead_note (reg, insn)
2451 rtx reg, insn;
2452{
2453 rtx link = dead_notes;
2454
2455 if (link == 0)
2456 /* In theory, we should not end up with more REG_DEAD reg notes than we
2457 started with. In practice, this can occur as the result of bugs in
2458 flow, combine and/or sched. */
2459 {
2460#if 1
2461 abort ();
2462#else
2463 link = rtx_alloc (EXPR_LIST);
2464 PUT_REG_NOTE_KIND (link, REG_DEAD);
2465#endif
2466 }
2467 else
2468 dead_notes = XEXP (dead_notes, 1);
2469
2470 XEXP (link, 0) = reg;
2471 XEXP (link, 1) = REG_NOTES (insn);
2472 REG_NOTES (insn) = link;
2473}
2474
2475/* Subroutine on attach_deaths_insn--handles the recursive search
2476 through INSN. If SET_P is true, then x is being modified by the insn. */
2477
2478static void
2479attach_deaths (x, insn, set_p)
2480 rtx x;
2481 rtx insn;
2482 int set_p;
2483{
2484 register int i;
2485 register int j;
2486 register enum rtx_code code;
2487 register char *fmt;
2488
2489 if (x == 0)
2490 return;
2491
2492 code = GET_CODE (x);
2493
2494 switch (code)
2495 {
2496 case CONST_INT:
2497 case CONST_DOUBLE:
2498 case LABEL_REF:
2499 case SYMBOL_REF:
2500 case CONST:
2501 case CODE_LABEL:
2502 case PC:
2503 case CC0:
2504 /* Get rid of the easy cases first. */
2505 return;
2506
2507 case REG:
2508 {
2509 /* If the register dies in this insn, queue that note, and mark
2510 this register as needing to die. */
2511 /* This code is very similar to mark_used_1 (if set_p is false)
2512 and mark_set_1 (if set_p is true) in flow.c. */
2513
2514 register int regno = REGNO (x);
2515 register int offset = regno / REGSET_ELT_BITS;
fb3821f7
CH
2516 register REGSET_ELT_TYPE bit
2517 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
2518 REGSET_ELT_TYPE all_needed = (old_live_regs[offset] & bit);
2519 REGSET_ELT_TYPE some_needed = (old_live_regs[offset] & bit);
bd58420c
RK
2520
2521 if (set_p)
2522 return;
2523
2524 if (regno < FIRST_PSEUDO_REGISTER)
2525 {
2526 int n;
2527
2528 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2529 while (--n > 0)
2530 {
2531 some_needed |= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
fb3821f7
CH
2532 & ((REGSET_ELT_TYPE) 1
2533 << ((regno + n) % REGSET_ELT_BITS)));
bd58420c 2534 all_needed &= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
fb3821f7
CH
2535 & ((REGSET_ELT_TYPE) 1
2536 << ((regno + n) % REGSET_ELT_BITS)));
bd58420c
RK
2537 }
2538 }
2539
2540 /* If it wasn't live before we started, then add a REG_DEAD note.
2541 We must check the previous lifetime info not the current info,
2542 because we may have to execute this code several times, e.g.
2543 once for a clobber (which doesn't add a note) and later
2544 for a use (which does add a note).
2545
2546 Always make the register live. We must do this even if it was
2547 live before, because this may be an insn which sets and uses
2548 the same register, in which case the register has already been
2549 killed, so we must make it live again.
2550
2551 Global registers are always live, and should never have a REG_DEAD
2552 note added for them, so none of the code below applies to them. */
2553
2554 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2555 {
2556 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2557 STACK_POINTER_REGNUM, since these are always considered to be
2558 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2559 if (regno != FRAME_POINTER_REGNUM
2560#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2561 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
bd58420c
RK
2562#endif
2563 && regno != STACK_POINTER_REGNUM)
2564 {
2565 if (! all_needed && ! dead_or_set_p (insn, x))
2566 {
2567 /* If none of the words in X is needed, make a REG_DEAD
2568 note. Otherwise, we must make partial REG_DEAD
2569 notes. */
2570 if (! some_needed)
2571 create_reg_dead_note (x, insn);
2572 else
2573 {
2574 int i;
2575
2576 /* Don't make a REG_DEAD note for a part of a
2577 register that is set in the insn. */
2578 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2579 i >= 0; i--)
2580 if ((old_live_regs[(regno + i) / REGSET_ELT_BITS]
fb3821f7
CH
2581 & ((REGSET_ELT_TYPE) 1
2582 << ((regno +i) % REGSET_ELT_BITS))) == 0
bd58420c
RK
2583 && ! dead_or_set_regno_p (insn, regno + i))
2584 create_reg_dead_note (gen_rtx (REG, word_mode,
2585 regno + i),
2586 insn);
2587 }
2588 }
2589 }
2590
2591 if (regno < FIRST_PSEUDO_REGISTER)
2592 {
2593 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2594 while (--j >= 0)
2595 {
2596 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7
CH
2597 bit
2598 = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
bd58420c
RK
2599
2600 bb_dead_regs[offset] &= ~bit;
2601 bb_live_regs[offset] |= bit;
2602 }
2603 }
2604 else
2605 {
2606 bb_dead_regs[offset] &= ~bit;
2607 bb_live_regs[offset] |= bit;
2608 }
2609 }
2610 return;
2611 }
2612
2613 case MEM:
2614 /* Handle tail-recursive case. */
2615 attach_deaths (XEXP (x, 0), insn, 0);
2616 return;
2617
2618 case SUBREG:
2619 case STRICT_LOW_PART:
2620 /* These two cases preserve the value of SET_P, so handle them
2621 separately. */
2622 attach_deaths (XEXP (x, 0), insn, set_p);
2623 return;
2624
2625 case ZERO_EXTRACT:
2626 case SIGN_EXTRACT:
2627 /* This case preserves the value of SET_P for the first operand, but
2628 clears it for the other two. */
2629 attach_deaths (XEXP (x, 0), insn, set_p);
2630 attach_deaths (XEXP (x, 1), insn, 0);
2631 attach_deaths (XEXP (x, 2), insn, 0);
2632 return;
2633
2634 default:
2635 /* Other cases: walk the insn. */
2636 fmt = GET_RTX_FORMAT (code);
2637 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2638 {
2639 if (fmt[i] == 'e')
2640 attach_deaths (XEXP (x, i), insn, 0);
2641 else if (fmt[i] == 'E')
2642 for (j = 0; j < XVECLEN (x, i); j++)
2643 attach_deaths (XVECEXP (x, i, j), insn, 0);
2644 }
2645 }
2646}
2647
2648/* After INSN has executed, add register death notes for each register
2649 that is dead after INSN. */
2650
2651static void
2652attach_deaths_insn (insn)
2653 rtx insn;
2654{
2655 rtx x = PATTERN (insn);
2656 register RTX_CODE code = GET_CODE (x);
2657
2658 if (code == SET)
2659 {
2660 attach_deaths (SET_SRC (x), insn, 0);
2661
2662 /* A register might die here even if it is the destination, e.g.
2663 it is the target of a volatile read and is otherwise unused.
2664 Hence we must always call attach_deaths for the SET_DEST. */
2665 attach_deaths (SET_DEST (x), insn, 1);
2666 }
2667 else if (code == PARALLEL)
2668 {
2669 register int i;
2670 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2671 {
2672 code = GET_CODE (XVECEXP (x, 0, i));
2673 if (code == SET)
2674 {
2675 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2676
2677 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2678 }
5856c581
JW
2679 /* Flow does not add REG_DEAD notes to registers that die in
2680 clobbers, so we can't either. */
2681 else if (code != CLOBBER)
bd58420c
RK
2682 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2683 }
2684 }
5856c581
JW
2685 /* Flow does not add REG_DEAD notes to registers that die in
2686 clobbers, so we can't either. */
2687 else if (code != CLOBBER)
bd58420c
RK
2688 attach_deaths (x, insn, 0);
2689}
2690
2691/* Delete notes beginning with INSN and maybe put them in the chain
2692 of notes ended by NOTE_LIST.
2693 Returns the insn following the notes. */
2694
2695static rtx
2696unlink_notes (insn, tail)
2697 rtx insn, tail;
2698{
2699 rtx prev = PREV_INSN (insn);
2700
2701 while (insn != tail && GET_CODE (insn) == NOTE)
2702 {
2703 rtx next = NEXT_INSN (insn);
2704 /* Delete the note from its current position. */
2705 if (prev)
2706 NEXT_INSN (prev) = next;
2707 if (next)
2708 PREV_INSN (next) = prev;
2709
2710 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2711 /* Record line-number notes so they can be reused. */
2712 LINE_NOTE (insn) = insn;
2713 else
2714 {
2715 /* Insert the note at the end of the notes list. */
2716 PREV_INSN (insn) = note_list;
2717 if (note_list)
2718 NEXT_INSN (note_list) = insn;
2719 note_list = insn;
2720 }
2721
2722 insn = next;
2723 }
2724 return insn;
2725}
2726
2727/* Data structure for keeping track of register information
2728 during that register's life. */
2729
2730struct sometimes
2731{
2732 short offset; short bit;
2733 short live_length; short calls_crossed;
2734};
2735
2736/* Constructor for `sometimes' data structure. */
2737
2738static int
2739new_sometimes_live (regs_sometimes_live, offset, bit, sometimes_max)
2740 struct sometimes *regs_sometimes_live;
2741 int offset, bit;
2742 int sometimes_max;
2743{
2744 register struct sometimes *p;
2745 register int regno = offset * REGSET_ELT_BITS + bit;
2746 int i;
2747
2748 /* There should never be a register greater than max_regno here. If there
2749 is, it means that a define_split has created a new pseudo reg. This
2750 is not allowed, since there will not be flow info available for any
2751 new register, so catch the error here. */
2752 if (regno >= max_regno)
2753 abort ();
2754
2755 p = &regs_sometimes_live[sometimes_max];
2756 p->offset = offset;
2757 p->bit = bit;
2758 p->live_length = 0;
2759 p->calls_crossed = 0;
2760 sometimes_max++;
2761 return sometimes_max;
2762}
2763
2764/* Count lengths of all regs we are currently tracking,
2765 and find new registers no longer live. */
2766
2767static void
2768finish_sometimes_live (regs_sometimes_live, sometimes_max)
2769 struct sometimes *regs_sometimes_live;
2770 int sometimes_max;
2771{
2772 int i;
2773
2774 for (i = 0; i < sometimes_max; i++)
2775 {
2776 register struct sometimes *p = &regs_sometimes_live[i];
2777 int regno;
2778
2779 regno = p->offset * REGSET_ELT_BITS + p->bit;
2780
2781 sched_reg_live_length[regno] += p->live_length;
2782 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2783 }
2784}
2785
2786/* Use modified list scheduling to rearrange insns in basic block
2787 B. FILE, if nonzero, is where we dump interesting output about
2788 this pass. */
2789
2790static void
2791schedule_block (b, file)
2792 int b;
2793 FILE *file;
2794{
2795 rtx insn, last;
2796 rtx last_note = 0;
2797 rtx *ready, link;
2798 int i, j, n_ready = 0, new_ready, n_insns = 0;
2799 int sched_n_insns = 0;
de19227a 2800 int clock;
bd58420c
RK
2801#define NEED_NOTHING 0
2802#define NEED_HEAD 1
2803#define NEED_TAIL 2
2804 int new_needs;
2805
2806 /* HEAD and TAIL delimit the region being scheduled. */
2807 rtx head = basic_block_head[b];
2808 rtx tail = basic_block_end[b];
2809 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2810 being scheduled. When the insns have been ordered,
2811 these insns delimit where the new insns are to be
2812 spliced back into the insn chain. */
2813 rtx next_tail;
2814 rtx prev_head;
2815
2816 /* Keep life information accurate. */
2817 register struct sometimes *regs_sometimes_live;
2818 int sometimes_max;
2819
2820 if (file)
2821 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2822 b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
2823
2824 i = max_reg_num ();
2825 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2826 bzero (reg_last_uses, i * sizeof (rtx));
2827 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2828 bzero (reg_last_sets, i * sizeof (rtx));
de19227a 2829 clear_units ();
bd58420c
RK
2830
2831 /* Remove certain insns at the beginning from scheduling,
2832 by advancing HEAD. */
2833
2834 /* At the start of a function, before reload has run, don't delay getting
2835 parameters from hard registers into pseudo registers. */
2836 if (reload_completed == 0 && b == 0)
2837 {
2838 while (head != tail
2839 && GET_CODE (head) == NOTE
2840 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2841 head = NEXT_INSN (head);
2842 while (head != tail
2843 && GET_CODE (head) == INSN
2844 && GET_CODE (PATTERN (head)) == SET)
2845 {
2846 rtx src = SET_SRC (PATTERN (head));
2847 while (GET_CODE (src) == SUBREG
2848 || GET_CODE (src) == SIGN_EXTEND
2849 || GET_CODE (src) == ZERO_EXTEND
2850 || GET_CODE (src) == SIGN_EXTRACT
2851 || GET_CODE (src) == ZERO_EXTRACT)
2852 src = XEXP (src, 0);
2853 if (GET_CODE (src) != REG
2854 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2855 break;
2856 /* Keep this insn from ever being scheduled. */
2857 INSN_REF_COUNT (head) = 1;
2858 head = NEXT_INSN (head);
2859 }
2860 }
2861
2862 /* Don't include any notes or labels at the beginning of the
2863 basic block, or notes at the ends of basic blocks. */
2864 while (head != tail)
2865 {
2866 if (GET_CODE (head) == NOTE)
2867 head = NEXT_INSN (head);
2868 else if (GET_CODE (tail) == NOTE)
2869 tail = PREV_INSN (tail);
2870 else if (GET_CODE (head) == CODE_LABEL)
2871 head = NEXT_INSN (head);
2872 else break;
2873 }
2874 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2875 to schedule this block. */
2876 if (head == tail
2877 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2878 return;
2879
2880#if 0
2881 /* This short-cut doesn't work. It does not count call insns crossed by
2882 registers in reg_sometimes_live. It does not mark these registers as
2883 dead if they die in this block. It does not mark these registers live
2884 (or create new reg_sometimes_live entries if necessary) if they are born
2885 in this block.
2886
2887 The easy solution is to just always schedule a block. This block only
2888 has one insn, so this won't slow down this pass by much. */
2889
2890 if (head == tail)
2891 return;
2892#endif
2893
bd58420c
RK
2894 /* Now HEAD through TAIL are the insns actually to be rearranged;
2895 Let PREV_HEAD and NEXT_TAIL enclose them. */
2896 prev_head = PREV_INSN (head);
2897 next_tail = NEXT_INSN (tail);
2898
2899 /* Initialize basic block data structures. */
2900 dead_notes = 0;
2901 pending_read_insns = 0;
2902 pending_read_mems = 0;
2903 pending_write_insns = 0;
2904 pending_write_mems = 0;
2905 pending_lists_length = 0;
2906 last_pending_memory_flush = 0;
2907 last_function_call = 0;
2908 last_scheduled_insn = 0;
2909
2910 LOG_LINKS (sched_before_next_call) = 0;
2911
2912 n_insns += sched_analyze (head, tail);
2913 if (n_insns == 0)
2914 {
2915 free_pending_lists ();
2916 return;
2917 }
2918
2919 /* Allocate vector to hold insns to be rearranged (except those
2920 insns which are controlled by an insn with SCHED_GROUP_P set).
2921 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2922 as those variables ultimately are set up. */
2923 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2924
2925 /* TAIL is now the last of the insns to be rearranged.
2926 Put those insns into the READY vector. */
2927 insn = tail;
2928
aa265568 2929 /* For all branches, calls, uses, and cc0 setters, force them to remain
de19227a
TW
2930 in order at the end of the block by adding dependencies and giving
2931 the last a high priority. There may be notes present, and prev_head
2932 may also be a note.
aa265568
JW
2933
2934 Branches must obviously remain at the end. Calls should remain at the
2935 end since moving them results in worse register allocation. Uses remain
2936 at the end to ensure proper register allocation. cc0 setters remaim
2937 at the end because they can't be moved away from their cc0 user. */
de19227a 2938 last = 0;
aa265568 2939 while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
11222b61
JW
2940 || (GET_CODE (insn) == INSN
2941 && (GET_CODE (PATTERN (insn)) == USE
aa265568 2942#ifdef HAVE_cc0
11222b61 2943 || sets_cc0_p (PATTERN (insn))
aa265568 2944#endif
11222b61 2945 ))
aa265568 2946 || GET_CODE (insn) == NOTE)
bd58420c 2947 {
aa265568
JW
2948 if (GET_CODE (insn) != NOTE)
2949 {
2950 priority (insn);
de19227a
TW
2951 if (last == 0)
2952 {
2953 ready[n_ready++] = insn;
2954 INSN_PRIORITY (insn) = TAIL_PRIORITY - i;
2955 INSN_REF_COUNT (insn) = 0;
2956 }
2957 else if (! find_insn_list (insn, LOG_LINKS (last)))
2958 {
2959 add_dependence (last, insn, REG_DEP_ANTI);
2960 INSN_REF_COUNT (insn)++;
2961 }
2962 last = insn;
aa265568
JW
2963
2964 /* Skip over insns that are part of a group. */
2965 while (SCHED_GROUP_P (insn))
2966 {
2967 insn = prev_nonnote_insn (insn);
2968 priority (insn);
2969 }
2970 }
2971
2972 insn = PREV_INSN (insn);
2973 /* Don't overrun the bounds of the basic block. */
2974 if (insn == prev_head)
2975 break;
bd58420c
RK
2976 }
2977
2978 /* Assign priorities to instructions. Also check whether they
2979 are in priority order already. If so then I will be nonnegative.
2980 We use this shortcut only before reloading. */
2981#if 0
2982 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2983#endif
2984
2985 for (; insn != prev_head; insn = PREV_INSN (insn))
2986 {
2987 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2988 {
2989 priority (insn);
2990 if (INSN_REF_COUNT (insn) == 0)
de19227a
TW
2991 {
2992 if (last == 0)
2993 ready[n_ready++] = insn;
2994 else
2995 {
2996 /* Make this dependent on the last of the instructions
2997 that must remain in order at the end of the block. */
2998 add_dependence (last, insn, REG_DEP_ANTI);
2999 INSN_REF_COUNT (insn) = 1;
3000 }
3001 }
bd58420c
RK
3002 if (SCHED_GROUP_P (insn))
3003 {
3004 while (SCHED_GROUP_P (insn))
3005 {
3006 insn = PREV_INSN (insn);
3007 while (GET_CODE (insn) == NOTE)
3008 insn = PREV_INSN (insn);
3009 priority (insn);
3010 }
3011 continue;
3012 }
3013#if 0
3014 if (i < 0)
3015 continue;
3016 if (INSN_PRIORITY (insn) < i)
3017 i = INSN_PRIORITY (insn);
3018 else if (INSN_PRIORITY (insn) > i)
3019 i = DONE_PRIORITY;
3020#endif
3021 }
3022 }
3023
3024#if 0
3025 /* This short-cut doesn't work. It does not count call insns crossed by
3026 registers in reg_sometimes_live. It does not mark these registers as
3027 dead if they die in this block. It does not mark these registers live
3028 (or create new reg_sometimes_live entries if necessary) if they are born
3029 in this block.
3030
3031 The easy solution is to just always schedule a block. These blocks tend
3032 to be very short, so this doesn't slow down this pass by much. */
3033
3034 /* If existing order is good, don't bother to reorder. */
3035 if (i != DONE_PRIORITY)
3036 {
3037 if (file)
3038 fprintf (file, ";; already scheduled\n");
3039
3040 if (reload_completed == 0)
3041 {
3042 for (i = 0; i < sometimes_max; i++)
3043 regs_sometimes_live[i].live_length += n_insns;
3044
3045 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3046 }
3047 free_pending_lists ();
3048 return;
3049 }
3050#endif
3051
3052 /* Scan all the insns to be scheduled, removing NOTE insns
3053 and register death notes.
3054 Line number NOTE insns end up in NOTE_LIST.
3055 Register death notes end up in DEAD_NOTES.
3056
3057 Recreate the register life information for the end of this basic
3058 block. */
3059
3060 if (reload_completed == 0)
3061 {
3062 bcopy (basic_block_live_at_start[b], bb_live_regs, regset_bytes);
3063 bzero (bb_dead_regs, regset_bytes);
3064
3065 if (b == 0)
3066 {
3067 /* This is the first block in the function. There may be insns
3068 before head that we can't schedule. We still need to examine
3069 them though for accurate register lifetime analysis. */
3070
3071 /* We don't want to remove any REG_DEAD notes as the code below
3072 does. */
3073
3074 for (insn = basic_block_head[b]; insn != head;
3075 insn = NEXT_INSN (insn))
3076 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3077 {
3078 /* See if the register gets born here. */
3079 /* We must check for registers being born before we check for
3080 registers dying. It is possible for a register to be born
3081 and die in the same insn, e.g. reading from a volatile
3082 memory location into an otherwise unused register. Such
3083 a register must be marked as dead after this insn. */
3084 if (GET_CODE (PATTERN (insn)) == SET
3085 || GET_CODE (PATTERN (insn)) == CLOBBER)
3086 sched_note_set (b, PATTERN (insn), 0);
3087 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3088 {
3089 int j;
3090 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3091 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3092 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3093 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3094
53dd0e6e
JW
3095 /* ??? This code is obsolete and should be deleted. It
3096 is harmless though, so we will leave it in for now. */
bd58420c
RK
3097 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3098 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3099 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3100 }
3101
3102 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
3103 {
3104 if ((REG_NOTE_KIND (link) == REG_DEAD
3105 || REG_NOTE_KIND (link) == REG_UNUSED)
3106 /* Verify that the REG_NOTE has a legal value. */
3107 && GET_CODE (XEXP (link, 0)) == REG)
3108 {
3109 register int regno = REGNO (XEXP (link, 0));
3110 register int offset = regno / REGSET_ELT_BITS;
fb3821f7
CH
3111 register REGSET_ELT_TYPE bit
3112 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
bd58420c
RK
3113
3114 if (regno < FIRST_PSEUDO_REGISTER)
3115 {
3116 int j = HARD_REGNO_NREGS (regno,
3117 GET_MODE (XEXP (link, 0)));
3118 while (--j >= 0)
3119 {
3120 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7
CH
3121 bit = ((REGSET_ELT_TYPE) 1
3122 << ((regno + j) % REGSET_ELT_BITS));
bd58420c
RK
3123
3124 bb_live_regs[offset] &= ~bit;
3125 bb_dead_regs[offset] |= bit;
3126 }
3127 }
3128 else
3129 {
3130 bb_live_regs[offset] &= ~bit;
3131 bb_dead_regs[offset] |= bit;
3132 }
3133 }
3134 }
3135 }
3136 }
3137 }
3138
3139 /* If debugging information is being produced, keep track of the line
3140 number notes for each insn. */
3141 if (write_symbols != NO_DEBUG)
3142 {
3143 /* We must use the true line number for the first insn in the block
3144 that was computed and saved at the start of this pass. We can't
3145 use the current line number, because scheduling of the previous
3146 block may have changed the current line number. */
3147 rtx line = line_note_head[b];
3148
3149 for (insn = basic_block_head[b];
3150 insn != next_tail;
3151 insn = NEXT_INSN (insn))
3152 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3153 line = insn;
3154 else
3155 LINE_NOTE (insn) = line;
3156 }
3157
3158 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3159 {
3160 rtx prev, next, link;
3161
3162 /* Farm out notes. This is needed to keep the debugger from
3163 getting completely deranged. */
3164 if (GET_CODE (insn) == NOTE)
3165 {
3166 prev = insn;
3167 insn = unlink_notes (insn, next_tail);
3168 if (prev == tail)
3169 abort ();
3170 if (prev == head)
3171 abort ();
3172 if (insn == next_tail)
3173 abort ();
3174 }
3175
3176 if (reload_completed == 0
3177 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3178 {
3179 /* See if the register gets born here. */
3180 /* We must check for registers being born before we check for
3181 registers dying. It is possible for a register to be born and
3182 die in the same insn, e.g. reading from a volatile memory
3183 location into an otherwise unused register. Such a register
3184 must be marked as dead after this insn. */
3185 if (GET_CODE (PATTERN (insn)) == SET
3186 || GET_CODE (PATTERN (insn)) == CLOBBER)
3187 sched_note_set (b, PATTERN (insn), 0);
3188 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3189 {
3190 int j;
3191 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3192 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3193 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3194 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3195
53dd0e6e
JW
3196 /* ??? This code is obsolete and should be deleted. It
3197 is harmless though, so we will leave it in for now. */
bd58420c
RK
3198 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3199 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3200 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3201 }
3202
3203 /* Need to know what registers this insn kills. */
3204 for (prev = 0, link = REG_NOTES (insn); link; link = next)
3205 {
3206 int regno;
3207
3208 next = XEXP (link, 1);
3209 if ((REG_NOTE_KIND (link) == REG_DEAD
3210 || REG_NOTE_KIND (link) == REG_UNUSED)
3211 /* Verify that the REG_NOTE has a legal value. */
3212 && GET_CODE (XEXP (link, 0)) == REG)
3213 {
3214 register int regno = REGNO (XEXP (link, 0));
3215 register int offset = regno / REGSET_ELT_BITS;
fb3821f7
CH
3216 register REGSET_ELT_TYPE bit
3217 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
bd58420c
RK
3218
3219 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
3220 alone. */
3221 if (REG_NOTE_KIND (link) == REG_DEAD)
3222 {
3223 if (prev)
3224 XEXP (prev, 1) = next;
3225 else
3226 REG_NOTES (insn) = next;
3227 XEXP (link, 1) = dead_notes;
3228 dead_notes = link;
3229 }
3230 else
3231 prev = link;
3232
3233 if (regno < FIRST_PSEUDO_REGISTER)
3234 {
3235 int j = HARD_REGNO_NREGS (regno,
3236 GET_MODE (XEXP (link, 0)));
3237 while (--j >= 0)
3238 {
3239 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7
CH
3240 bit = ((REGSET_ELT_TYPE) 1
3241 << ((regno + j) % REGSET_ELT_BITS));
bd58420c
RK
3242
3243 bb_live_regs[offset] &= ~bit;
3244 bb_dead_regs[offset] |= bit;
3245 }
3246 }
3247 else
3248 {
3249 bb_live_regs[offset] &= ~bit;
3250 bb_dead_regs[offset] |= bit;
3251 }
3252 }
3253 else
3254 prev = link;
3255 }
3256 }
3257 }
3258
3259 if (reload_completed == 0)
3260 {
3261 /* Keep track of register lives. */
3262 old_live_regs = (regset) alloca (regset_bytes);
3263 regs_sometimes_live
3264 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
3265 sometimes_max = 0;
3266
3267 /* Start with registers live at end. */
3268 for (j = 0; j < regset_size; j++)
3269 {
fb3821f7 3270 REGSET_ELT_TYPE live = bb_live_regs[j];
bd58420c
RK
3271 old_live_regs[j] = live;
3272 if (live)
3273 {
fb3821f7 3274 register REGSET_ELT_TYPE bit;
bd58420c 3275 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
fb3821f7 3276 if (live & ((REGSET_ELT_TYPE) 1 << bit))
bd58420c
RK
3277 sometimes_max = new_sometimes_live (regs_sometimes_live, j,
3278 bit, sometimes_max);
3279 }
3280 }
3281 }
3282
3283 SCHED_SORT (ready, n_ready, 1);
3284
3285 if (file)
3286 {
3287 fprintf (file, ";; ready list initially:\n;; ");
3288 for (i = 0; i < n_ready; i++)
3289 fprintf (file, "%d ", INSN_UID (ready[i]));
3290 fprintf (file, "\n\n");
3291
3292 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3293 if (INSN_PRIORITY (insn) > 0)
3294 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
3295 INSN_UID (insn), INSN_PRIORITY (insn),
3296 INSN_REF_COUNT (insn));
3297 }
3298
3299 /* Now HEAD and TAIL are going to become disconnected
3300 entirely from the insn chain. */
de19227a 3301 tail = 0;
bd58420c
RK
3302
3303 /* Q_SIZE will always be zero here. */
de19227a 3304 q_ptr = 0; clock = 0;
bd58420c
RK
3305 bzero (insn_queue, sizeof (insn_queue));
3306
3307 /* Now, perform list scheduling. */
3308
3309 /* Where we start inserting insns is after TAIL. */
3310 last = next_tail;
3311
3312 new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
3313 ? NEED_HEAD : NEED_NOTHING);
3314 if (PREV_INSN (next_tail) == basic_block_end[b])
3315 new_needs |= NEED_TAIL;
3316
3317 new_ready = n_ready;
3318 while (sched_n_insns < n_insns)
3319 {
de19227a 3320 q_ptr = NEXT_Q (q_ptr); clock++;
bd58420c
RK
3321
3322 /* Add all pending insns that can be scheduled without stalls to the
3323 ready list. */
3324 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
3325 {
3326 if (file)
de19227a
TW
3327 fprintf (file, ";; launching %d before %d with no stalls at T-%d\n",
3328 INSN_UID (insn), INSN_UID (last), clock);
bd58420c
RK
3329 ready[new_ready++] = insn;
3330 q_size -= 1;
3331 }
3332 insn_queue[q_ptr] = 0;
3333
3334 /* If there are no ready insns, stall until one is ready and add all
3335 of the pending insns at that point to the ready list. */
3336 if (new_ready == 0)
3337 {
3338 register int stalls;
3339
de19227a 3340 for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
bd58420c
RK
3341 if (insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])
3342 {
3343 for (; insn; insn = NEXT_INSN (insn))
3344 {
3345 if (file)
de19227a
TW
3346 fprintf (file, ";; launching %d before %d with %d stalls at T-%d\n",
3347 INSN_UID (insn), INSN_UID (last), stalls, clock);
bd58420c
RK
3348 ready[new_ready++] = insn;
3349 q_size -= 1;
3350 }
3351 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
3352 break;
3353 }
3354
de19227a 3355 q_ptr = NEXT_Q_AFTER (q_ptr, stalls); clock += stalls;
bd58420c
RK
3356 }
3357
3358 /* There should be some instructions waiting to fire. */
3359 if (new_ready == 0)
3360 abort ();
3361
de19227a
TW
3362 if (file)
3363 {
3364 fprintf (file, ";; ready list at T-%d:", clock);
3365 for (i = 0; i < new_ready; i++)
3366 fprintf (file, " %d (%x)",
3367 INSN_UID (ready[i]), INSN_PRIORITY (ready[i]));
3368 }
3369
3370 /* Sort the ready list and choose the best insn to schedule. Select
3371 which insn should issue in this cycle and queue those that are
3372 blocked by function unit hazards.
3373
bd58420c
RK
3374 N_READY holds the number of items that were scheduled the last time,
3375 minus the one instruction scheduled on the last loop iteration; it
3376 is not modified for any other reason in this loop. */
de19227a 3377
bd58420c 3378 SCHED_SORT (ready, new_ready, n_ready);
de19227a
TW
3379 if (MAX_BLOCKAGE > 1)
3380 {
3381 new_ready = schedule_select (ready, new_ready, clock, file);
3382 if (new_ready == 0)
3383 {
3384 if (file)
3385 fprintf (file, "\n");
3386 continue;
3387 }
3388 }
bd58420c
RK
3389 n_ready = new_ready;
3390 last_scheduled_insn = insn = ready[0];
3391
de19227a
TW
3392 /* The first insn scheduled becomes the new tail. */
3393 if (tail == 0)
3394 tail = insn;
3395
3396 if (file)
3397 {
3398 fprintf (file, ", now");
3399 for (i = 0; i < n_ready; i++)
3400 fprintf (file, " %d", INSN_UID (ready[i]));
3401 fprintf (file, "\n");
3402 }
3403
bd58420c
RK
3404 if (DONE_PRIORITY_P (insn))
3405 abort ();
3406
3407 if (reload_completed == 0)
3408 {
3409 /* Process this insn, and each insn linked to this one which must
3410 be immediately output after this insn. */
3411 do
3412 {
3413 /* First we kill registers set by this insn, and then we
3414 make registers used by this insn live. This is the opposite
3415 order used above because we are traversing the instructions
3416 backwards. */
3417
3418 /* Strictly speaking, we should scan REG_UNUSED notes and make
3419 every register mentioned there live, however, we will just
3420 kill them again immediately below, so there doesn't seem to
3421 be any reason why we bother to do this. */
3422
3423 /* See if this is the last notice we must take of a register. */
3424 if (GET_CODE (PATTERN (insn)) == SET
3425 || GET_CODE (PATTERN (insn)) == CLOBBER)
3426 sched_note_set (b, PATTERN (insn), 1);
3427 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3428 {
3429 int j;
3430 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3431 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3432 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3433 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
3434 }
3435
3436 /* This code keeps life analysis information up to date. */
3437 if (GET_CODE (insn) == CALL_INSN)
3438 {
3439 register struct sometimes *p;
3440
3441 /* A call kills all call used and global registers, except
3442 for those mentioned in the call pattern which will be
3443 made live again later. */
3444 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3445 if (call_used_regs[i] || global_regs[i])
3446 {
3447 register int offset = i / REGSET_ELT_BITS;
fb3821f7
CH
3448 register REGSET_ELT_TYPE bit
3449 = (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
bd58420c
RK
3450
3451 bb_live_regs[offset] &= ~bit;
3452 bb_dead_regs[offset] |= bit;
3453 }
3454
3455 /* Regs live at the time of a call instruction must not
3456 go in a register clobbered by calls. Record this for
3457 all regs now live. Note that insns which are born or
3458 die in a call do not cross a call, so this must be done
3459 after the killings (above) and before the births
3460 (below). */
3461 p = regs_sometimes_live;
3462 for (i = 0; i < sometimes_max; i++, p++)
fb3821f7
CH
3463 if (bb_live_regs[p->offset]
3464 & ((REGSET_ELT_TYPE) 1 << p->bit))
bd58420c
RK
3465 p->calls_crossed += 1;
3466 }
3467
3468 /* Make every register used live, and add REG_DEAD notes for
3469 registers which were not live before we started. */
3470 attach_deaths_insn (insn);
3471
3472 /* Find registers now made live by that instruction. */
3473 for (i = 0; i < regset_size; i++)
3474 {
fb3821f7 3475 REGSET_ELT_TYPE diff = bb_live_regs[i] & ~old_live_regs[i];
bd58420c
RK
3476 if (diff)
3477 {
3478 register int bit;
3479 old_live_regs[i] |= diff;
3480 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
fb3821f7 3481 if (diff & ((REGSET_ELT_TYPE) 1 << bit))
bd58420c
RK
3482 sometimes_max
3483 = new_sometimes_live (regs_sometimes_live, i, bit,
3484 sometimes_max);
3485 }
3486 }
3487
3488 /* Count lengths of all regs we are worrying about now,
3489 and handle registers no longer live. */
3490
3491 for (i = 0; i < sometimes_max; i++)
3492 {
3493 register struct sometimes *p = &regs_sometimes_live[i];
3494 int regno = p->offset*REGSET_ELT_BITS + p->bit;
3495
3496 p->live_length += 1;
3497
fb3821f7
CH
3498 if ((bb_live_regs[p->offset]
3499 & ((REGSET_ELT_TYPE) 1 << p->bit)) == 0)
bd58420c
RK
3500 {
3501 /* This is the end of one of this register's lifetime
3502 segments. Save the lifetime info collected so far,
3503 and clear its bit in the old_live_regs entry. */
3504 sched_reg_live_length[regno] += p->live_length;
3505 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
fb3821f7
CH
3506 old_live_regs[p->offset]
3507 &= ~((REGSET_ELT_TYPE) 1 << p->bit);
bd58420c
RK
3508
3509 /* Delete the reg_sometimes_live entry for this reg by
3510 copying the last entry over top of it. */
3511 *p = regs_sometimes_live[--sometimes_max];
3512 /* ...and decrement i so that this newly copied entry
3513 will be processed. */
3514 i--;
3515 }
3516 }
3517
3518 link = insn;
3519 insn = PREV_INSN (insn);
3520 }
3521 while (SCHED_GROUP_P (link));
3522
3523 /* Set INSN back to the insn we are scheduling now. */
3524 insn = ready[0];
3525 }
3526
3527 /* Schedule INSN. Remove it from the ready list. */
3528 ready += 1;
3529 n_ready -= 1;
3530
3531 sched_n_insns += 1;
3532 NEXT_INSN (insn) = last;
3533 PREV_INSN (last) = insn;
3534 last = insn;
3535
3536 /* Everything that precedes INSN now either becomes "ready", if
3537 it can execute immediately before INSN, or "pending", if
3538 there must be a delay. Give INSN high enough priority that
3539 at least one (maybe more) reg-killing insns can be launched
3540 ahead of all others. Mark INSN as scheduled by changing its
3541 priority to -1. */
3542 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
de19227a 3543 new_ready = schedule_insn (insn, ready, n_ready, clock);
bd58420c
RK
3544 INSN_PRIORITY (insn) = DONE_PRIORITY;
3545
3546 /* Schedule all prior insns that must not be moved. */
3547 if (SCHED_GROUP_P (insn))
3548 {
3549 /* Disable these insns from being launched. */
3550 link = insn;
3551 while (SCHED_GROUP_P (link))
3552 {
3553 /* Disable these insns from being launched by anybody. */
3554 link = PREV_INSN (link);
3555 INSN_REF_COUNT (link) = 0;
3556 }
3557
3558 /* None of these insns can move forward into delay slots. */
3559 while (SCHED_GROUP_P (insn))
3560 {
3561 insn = PREV_INSN (insn);
de19227a 3562 new_ready = schedule_insn (insn, ready, new_ready, clock);
bd58420c
RK
3563 INSN_PRIORITY (insn) = DONE_PRIORITY;
3564
3565 sched_n_insns += 1;
3566 NEXT_INSN (insn) = last;
3567 PREV_INSN (last) = insn;
3568 last = insn;
3569 }
3570 }
3571 }
3572 if (q_size != 0)
3573 abort ();
3574
3575 if (reload_completed == 0)
3576 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3577
3578 /* HEAD is now the first insn in the chain of insns that
3579 been scheduled by the loop above.
3580 TAIL is the last of those insns. */
3581 head = insn;
3582
3583 /* NOTE_LIST is the end of a chain of notes previously found
3584 among the insns. Insert them at the beginning of the insns. */
3585 if (note_list != 0)
3586 {
3587 rtx note_head = note_list;
3588 while (PREV_INSN (note_head))
3589 note_head = PREV_INSN (note_head);
3590
3591 PREV_INSN (head) = note_list;
3592 NEXT_INSN (note_list) = head;
3593 head = note_head;
3594 }
3595
3596 /* In theory, there should be no REG_DEAD notes leftover at the end.
3597 In practice, this can occur as the result of bugs in flow, combine.c,
3598 and/or sched.c. The values of the REG_DEAD notes remaining are
3599 meaningless, because dead_notes is just used as a free list. */
3600#if 1
3601 if (dead_notes != 0)
3602 abort ();
3603#endif
3604
3605 if (new_needs & NEED_HEAD)
3606 basic_block_head[b] = head;
3607 PREV_INSN (head) = prev_head;
3608 NEXT_INSN (prev_head) = head;
3609
3610 if (new_needs & NEED_TAIL)
3611 basic_block_end[b] = tail;
3612 NEXT_INSN (tail) = next_tail;
3613 PREV_INSN (next_tail) = tail;
3614
3615 /* Restore the line-number notes of each insn. */
3616 if (write_symbols != NO_DEBUG)
3617 {
3618 rtx line, note, prev, new;
3619 int notes = 0;
3620
3621 head = basic_block_head[b];
3622 next_tail = NEXT_INSN (basic_block_end[b]);
3623
3624 /* Determine the current line-number. We want to know the current
3625 line number of the first insn of the block here, in case it is
3626 different from the true line number that was saved earlier. If
3627 different, then we need a line number note before the first insn
3628 of this block. If it happens to be the same, then we don't want to
3629 emit another line number note here. */
3630 for (line = head; line; line = PREV_INSN (line))
3631 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3632 break;
3633
3634 /* Walk the insns keeping track of the current line-number and inserting
3635 the line-number notes as needed. */
3636 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3637 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3638 line = insn;
3639 else if (! (GET_CODE (insn) == NOTE
3640 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3641 && (note = LINE_NOTE (insn)) != 0
3642 && note != line
3643 && (line == 0
3644 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3645 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3646 {
3647 line = note;
3648 prev = PREV_INSN (insn);
3649 if (LINE_NOTE (note))
3650 {
6dc42e49 3651 /* Re-use the original line-number note. */
bd58420c
RK
3652 LINE_NOTE (note) = 0;
3653 PREV_INSN (note) = prev;
3654 NEXT_INSN (prev) = note;
3655 PREV_INSN (insn) = note;
3656 NEXT_INSN (note) = insn;
3657 }
3658 else
3659 {
3660 notes++;
3661 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3662 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3663 }
3664 }
3665 if (file && notes)
3666 fprintf (file, ";; added %d line-number notes\n", notes);
3667 }
3668
3669 if (file)
3670 {
de19227a
TW
3671 fprintf (file, ";; total time = %d\n;; new basic block head = %d\n;; new basic block end = %d\n\n",
3672 clock, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
bd58420c
RK
3673 }
3674
3675 /* Yow! We're done! */
3676 free_pending_lists ();
3677
3678 return;
3679}
3680\f
3681/* Subroutine of split_hard_reg_notes. Searches X for any reference to
3682 REGNO, returning the rtx of the reference found if any. Otherwise,
3683 returns 0. */
3684
3685rtx
3686regno_use_in (regno, x)
3687 int regno;
3688 rtx x;
3689{
3690 register char *fmt;
3691 int i, j;
3692 rtx tem;
3693
3694 if (GET_CODE (x) == REG && REGNO (x) == regno)
3695 return x;
3696
3697 fmt = GET_RTX_FORMAT (GET_CODE (x));
3698 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3699 {
3700 if (fmt[i] == 'e')
3701 {
3702 if (tem = regno_use_in (regno, XEXP (x, i)))
3703 return tem;
3704 }
3705 else if (fmt[i] == 'E')
3706 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3707 if (tem = regno_use_in (regno , XVECEXP (x, i, j)))
3708 return tem;
3709 }
3710
3711 return 0;
3712}
3713
3714/* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3715 needed for the hard register mentioned in the note. This can happen
3716 if the reference to the hard register in the original insn was split into
3717 several smaller hard register references in the split insns. */
3718
3719static void
3720split_hard_reg_notes (note, first, last, orig_insn)
3721 rtx note, first, last, orig_insn;
3722{
3723 rtx reg, temp, link;
3724 int n_regs, i, new_reg;
3725 rtx insn;
3726
3727 /* Assume that this is a REG_DEAD note. */
3728 if (REG_NOTE_KIND (note) != REG_DEAD)
3729 abort ();
3730
3731 reg = XEXP (note, 0);
3732
3733 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3734
3735 /* ??? Could add check here to see whether, the hard register is referenced
3736 in the same mode as in the original insn. If so, then it has not been
3737 split, and the rest of the code below is unnecessary. */
3738
3739 for (i = 1; i < n_regs; i++)
3740 {
3741 new_reg = REGNO (reg) + i;
3742
3743 /* Check for references to new_reg in the split insns. */
3744 for (insn = last; ; insn = PREV_INSN (insn))
3745 {
3746 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3747 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3748 {
3749 /* Create a new reg dead note here. */
3750 link = rtx_alloc (EXPR_LIST);
3751 PUT_REG_NOTE_KIND (link, REG_DEAD);
3752 XEXP (link, 0) = temp;
3753 XEXP (link, 1) = REG_NOTES (insn);
3754 REG_NOTES (insn) = link;
3755 break;
3756 }
3757 /* It isn't mentioned anywhere, so no new reg note is needed for
3758 this register. */
3759 if (insn == first)
3760 break;
3761 }
3762 }
3763}
3764
3765/* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3766 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3767
3768static void
3769new_insn_dead_notes (pat, insn, last, orig_insn)
3770 rtx pat, insn, last, orig_insn;
3771{
3772 rtx dest, tem, set;
3773
3774 /* PAT is either a CLOBBER or a SET here. */
3775 dest = XEXP (pat, 0);
3776
3777 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3778 || GET_CODE (dest) == STRICT_LOW_PART
3779 || GET_CODE (dest) == SIGN_EXTRACT)
3780 dest = XEXP (dest, 0);
3781
3782 if (GET_CODE (dest) == REG)
3783 {
3784 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3785 {
3786 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3787 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3788 && (set = single_set (tem)))
3789 {
3790 rtx tem_dest = SET_DEST (set);
3791
3792 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3793 || GET_CODE (tem_dest) == SUBREG
3794 || GET_CODE (tem_dest) == STRICT_LOW_PART
3795 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3796 tem_dest = XEXP (tem_dest, 0);
3797
3798 if (tem_dest != dest)
3799 {
3800 /* Use the same scheme as combine.c, don't put both REG_DEAD
3801 and REG_UNUSED notes on the same insn. */
3802 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3803 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3804 {
3805 rtx note = rtx_alloc (EXPR_LIST);
3806 PUT_REG_NOTE_KIND (note, REG_DEAD);
3807 XEXP (note, 0) = dest;
3808 XEXP (note, 1) = REG_NOTES (tem);
3809 REG_NOTES (tem) = note;
3810 }
3811 /* The reg only dies in one insn, the last one that uses
3812 it. */
3813 break;
3814 }
3815 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3816 /* We found an instruction that both uses the register,
3817 and sets it, so no new REG_NOTE is needed for this set. */
3818 break;
3819 }
3820 }
3821 /* If this is a set, it must die somewhere, unless it is the dest of
3822 the original insn, and hence is live after the original insn. Abort
3823 if it isn't supposed to be live after the original insn.
3824
3825 If this is a clobber, then just add a REG_UNUSED note. */
3826 if (tem == insn)
3827 {
3828 int live_after_orig_insn = 0;
3829 rtx pattern = PATTERN (orig_insn);
3830 int i;
3831
3832 if (GET_CODE (pat) == CLOBBER)
3833 {
3834 rtx note = rtx_alloc (EXPR_LIST);
3835 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3836 XEXP (note, 0) = dest;
3837 XEXP (note, 1) = REG_NOTES (insn);
3838 REG_NOTES (insn) = note;
3839 return;
3840 }
3841
3842 /* The original insn could have multiple sets, so search the
3843 insn for all sets. */
3844 if (GET_CODE (pattern) == SET)
3845 {
3846 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3847 live_after_orig_insn = 1;
3848 }
3849 else if (GET_CODE (pattern) == PARALLEL)
3850 {
3851 for (i = 0; i < XVECLEN (pattern, 0); i++)
3852 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3853 && reg_overlap_mentioned_p (dest,
3854 SET_DEST (XVECEXP (pattern,
3855 0, i))))
3856 live_after_orig_insn = 1;
3857 }
3858
3859 if (! live_after_orig_insn)
3860 abort ();
3861 }
3862 }
3863}
3864
3865/* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3866 registers modified by X. INC is -1 if the containing insn is being deleted,
3867 and is 1 if the containing insn is a newly generated insn. */
3868
3869static void
3870update_n_sets (x, inc)
3871 rtx x;
3872 int inc;
3873{
3874 rtx dest = SET_DEST (x);
3875
3876 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3877 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3878 dest = SUBREG_REG (dest);
3879
3880 if (GET_CODE (dest) == REG)
3881 {
3882 int regno = REGNO (dest);
3883
3884 if (regno < FIRST_PSEUDO_REGISTER)
3885 {
3886 register int i;
3887 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3888
3889 for (i = regno; i < endregno; i++)
3890 reg_n_sets[i] += inc;
3891 }
3892 else
3893 reg_n_sets[regno] += inc;
3894 }
3895}
3896
3897/* Updates all flow-analysis related quantities (including REG_NOTES) for
3898 the insns from FIRST to LAST inclusive that were created by splitting
3899 ORIG_INSN. NOTES are the original REG_NOTES. */
3900
3901static void
3902update_flow_info (notes, first, last, orig_insn)
3903 rtx notes;
3904 rtx first, last;
3905 rtx orig_insn;
3906{
3907 rtx insn, note;
3908 rtx next;
3909 rtx orig_dest, temp;
3910 rtx set;
3911
3912 /* Get and save the destination set by the original insn. */
3913
3914 orig_dest = single_set (orig_insn);
3915 if (orig_dest)
3916 orig_dest = SET_DEST (orig_dest);
3917
3918 /* Move REG_NOTES from the original insn to where they now belong. */
3919
3920 for (note = notes; note; note = next)
3921 {
3922 next = XEXP (note, 1);
3923 switch (REG_NOTE_KIND (note))
3924 {
3925 case REG_DEAD:
3926 case REG_UNUSED:
3927 /* Move these notes from the original insn to the last new insn where
3928 the register is now set. */
3929
3930 for (insn = last; ; insn = PREV_INSN (insn))
3931 {
3932 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3933 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3934 {
3935 XEXP (note, 1) = REG_NOTES (insn);
3936 REG_NOTES (insn) = note;
3937
3938 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3939 notes. */
6dc42e49 3940 /* ??? This won't handle multiple word registers correctly,
bd58420c
RK
3941 but should be good enough for now. */
3942 if (REG_NOTE_KIND (note) == REG_UNUSED
3943 && ! dead_or_set_p (insn, XEXP (note, 0)))
3944 PUT_REG_NOTE_KIND (note, REG_DEAD);
3945
3946 /* The reg only dies in one insn, the last one that uses
3947 it. */
3948 break;
3949 }
3950 /* It must die somewhere, fail it we couldn't find where it died.
3951
3952 If this is a REG_UNUSED note, then it must be a temporary
3953 register that was not needed by this instantiation of the
3954 pattern, so we can safely ignore it. */
3955 if (insn == first)
3956 {
3957 if (REG_NOTE_KIND (note) != REG_UNUSED)
3958 abort ();
3959
3960 break;
3961 }
3962 }
3963
3964 /* If this note refers to a multiple word hard register, it may
3965 have been split into several smaller hard register references.
3966 Check to see if there are any new register references that
3967 need REG_NOTES added for them. */
3968 temp = XEXP (note, 0);
3969 if (REG_NOTE_KIND (note) == REG_DEAD
3970 && GET_CODE (temp) == REG
3971 && REGNO (temp) < FIRST_PSEUDO_REGISTER
3972 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)))
3973 split_hard_reg_notes (note, first, last, orig_insn);
3974 break;
3975
3976 case REG_WAS_0:
3977 /* This note applies to the dest of the original insn. Find the
3978 first new insn that now has the same dest, and move the note
3979 there. */
3980
3981 if (! orig_dest)
3982 abort ();
3983
3984 for (insn = first; ; insn = NEXT_INSN (insn))
3985 {
3986 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3987 && (temp = single_set (insn))
3988 && rtx_equal_p (SET_DEST (temp), orig_dest))
3989 {
3990 XEXP (note, 1) = REG_NOTES (insn);
3991 REG_NOTES (insn) = note;
3992 /* The reg is only zero before one insn, the first that
3993 uses it. */
3994 break;
3995 }
3996 /* It must be set somewhere, fail if we couldn't find where it
3997 was set. */
3998 if (insn == last)
3999 abort ();
4000 }
4001 break;
4002
4003 case REG_EQUAL:
4004 case REG_EQUIV:
4005 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
4006 set is meaningless. Just drop the note. */
4007 if (! orig_dest)
4008 break;
4009
4010 case REG_NO_CONFLICT:
4011 /* These notes apply to the dest of the original insn. Find the last
4012 new insn that now has the same dest, and move the note there. */
4013
4014 if (! orig_dest)
4015 abort ();
4016
4017 for (insn = last; ; insn = PREV_INSN (insn))
4018 {
4019 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4020 && (temp = single_set (insn))
4021 && rtx_equal_p (SET_DEST (temp), orig_dest))
4022 {
4023 XEXP (note, 1) = REG_NOTES (insn);
4024 REG_NOTES (insn) = note;
4025 /* Only put this note on one of the new insns. */
4026 break;
4027 }
4028
4029 /* The original dest must still be set someplace. Abort if we
4030 couldn't find it. */
4031 if (insn == first)
4032 abort ();
4033 }
4034 break;
4035
4036 case REG_LIBCALL:
4037 /* Move a REG_LIBCALL note to the first insn created, and update
4038 the corresponding REG_RETVAL note. */
4039 XEXP (note, 1) = REG_NOTES (first);
4040 REG_NOTES (first) = note;
4041
4042 insn = XEXP (note, 0);
fb3821f7 4043 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
bd58420c
RK
4044 if (note)
4045 XEXP (note, 0) = first;
4046 break;
4047
4048 case REG_RETVAL:
4049 /* Move a REG_RETVAL note to the last insn created, and update
4050 the corresponding REG_LIBCALL note. */
4051 XEXP (note, 1) = REG_NOTES (last);
4052 REG_NOTES (last) = note;
4053
4054 insn = XEXP (note, 0);
fb3821f7 4055 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
bd58420c
RK
4056 if (note)
4057 XEXP (note, 0) = last;
4058 break;
4059
4060 case REG_NONNEG:
4061 /* This should be moved to whichever instruction is a JUMP_INSN. */
4062
4063 for (insn = last; ; insn = PREV_INSN (insn))
4064 {
4065 if (GET_CODE (insn) == JUMP_INSN)
4066 {
4067 XEXP (note, 1) = REG_NOTES (insn);
4068 REG_NOTES (insn) = note;
4069 /* Only put this note on one of the new insns. */
4070 break;
4071 }
4072 /* Fail if we couldn't find a JUMP_INSN. */
4073 if (insn == first)
4074 abort ();
4075 }
4076 break;
4077
4078 case REG_INC:
4079 /* This should be moved to whichever instruction now has the
4080 increment operation. */
4081 abort ();
4082
4083 case REG_LABEL:
4084 /* Should be moved to the new insn(s) which use the label. */
9ae57db8
RS
4085 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
4086 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4087 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
4088 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL,
4089 XEXP (note, 0), REG_NOTES (insn));
4090 break;
bd58420c
RK
4091
4092 case REG_CC_SETTER:
4093 case REG_CC_USER:
4094 /* These two notes will never appear until after reorg, so we don't
4095 have to handle them here. */
4096 default:
4097 abort ();
4098 }
4099 }
4100
4101 /* Each new insn created, except the last, has a new set. If the destination
4102 is a register, then this reg is now live across several insns, whereas
4103 previously the dest reg was born and died within the same insn. To
4104 reflect this, we now need a REG_DEAD note on the insn where this
4105 dest reg dies.
4106
4107 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
4108
4109 for (insn = first; insn != last; insn = NEXT_INSN (insn))
4110 {
4111 rtx pat;
4112 int i;
4113
4114 pat = PATTERN (insn);
4115 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
4116 new_insn_dead_notes (pat, insn, last, orig_insn);
4117 else if (GET_CODE (pat) == PARALLEL)
4118 {
4119 for (i = 0; i < XVECLEN (pat, 0); i++)
4120 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
4121 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
4122 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
4123 }
4124 }
4125
4126 /* If any insn, except the last, uses the register set by the last insn,
4127 then we need a new REG_DEAD note on that insn. In this case, there
4128 would not have been a REG_DEAD note for this register in the original
4129 insn because it was used and set within one insn.
4130
4131 There is no new REG_DEAD note needed if the last insn uses the register
4132 that it is setting. */
4133
4134 set = single_set (last);
4135 if (set)
4136 {
4137 rtx dest = SET_DEST (set);
4138
4139 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
4140 || GET_CODE (dest) == STRICT_LOW_PART
4141 || GET_CODE (dest) == SIGN_EXTRACT)
4142 dest = XEXP (dest, 0);
4143
4144 if (GET_CODE (dest) == REG
4145 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
4146 {
4147 for (insn = PREV_INSN (last); ; insn = PREV_INSN (insn))
4148 {
4149 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4150 && reg_mentioned_p (dest, PATTERN (insn))
4151 && (set = single_set (insn)))
4152 {
4153 rtx insn_dest = SET_DEST (set);
4154
4155 while (GET_CODE (insn_dest) == ZERO_EXTRACT
4156 || GET_CODE (insn_dest) == SUBREG
4157 || GET_CODE (insn_dest) == STRICT_LOW_PART
4158 || GET_CODE (insn_dest) == SIGN_EXTRACT)
4159 insn_dest = XEXP (insn_dest, 0);
4160
4161 if (insn_dest != dest)
4162 {
4163 note = rtx_alloc (EXPR_LIST);
4164 PUT_REG_NOTE_KIND (note, REG_DEAD);
4165 XEXP (note, 0) = dest;
4166 XEXP (note, 1) = REG_NOTES (insn);
4167 REG_NOTES (insn) = note;
4168 /* The reg only dies in one insn, the last one
4169 that uses it. */
4170 break;
4171 }
4172 }
4173 if (insn == first)
4174 break;
4175 }
4176 }
4177 }
4178
4179 /* If the original dest is modifying a multiple register target, and the
4180 original instruction was split such that the original dest is now set
4181 by two or more SUBREG sets, then the split insns no longer kill the
4182 destination of the original insn.
4183
4184 In this case, if there exists an instruction in the same basic block,
4185 before the split insn, which uses the original dest, and this use is
4186 killed by the original insn, then we must remove the REG_DEAD note on
4187 this insn, because it is now superfluous.
4188
4189 This does not apply when a hard register gets split, because the code
4190 knows how to handle overlapping hard registers properly. */
4191 if (orig_dest && GET_CODE (orig_dest) == REG)
4192 {
4193 int found_orig_dest = 0;
4194 int found_split_dest = 0;
4195
4196 for (insn = first; ; insn = NEXT_INSN (insn))
4197 {
4198 set = single_set (insn);
4199 if (set)
4200 {
4201 if (GET_CODE (SET_DEST (set)) == REG
4202 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
4203 {
4204 found_orig_dest = 1;
4205 break;
4206 }
4207 else if (GET_CODE (SET_DEST (set)) == SUBREG
4208 && SUBREG_REG (SET_DEST (set)) == orig_dest)
4209 {
4210 found_split_dest = 1;
4211 break;
4212 }
4213 }
4214
4215 if (insn == last)
4216 break;
4217 }
4218
4219 if (found_split_dest)
4220 {
4221 /* Search backwards from FIRST, looking for the first insn that uses
4222 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
4223 If we find an insn, and it has a REG_DEAD note, then delete the
4224 note. */
4225
4226 for (insn = first; insn; insn = PREV_INSN (insn))
4227 {
4228 if (GET_CODE (insn) == CODE_LABEL
4229 || GET_CODE (insn) == JUMP_INSN)
4230 break;
4231 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4232 && reg_mentioned_p (orig_dest, insn))
4233 {
4234 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
4235 if (note)
4236 remove_note (insn, note);
4237 }
4238 }
4239 }
4240 else if (! found_orig_dest)
4241 {
4242 /* This should never happen. */
4243 abort ();
4244 }
4245 }
4246
4247 /* Update reg_n_sets. This is necessary to prevent local alloc from
4248 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
4249 a reg from set once to set multiple times. */
4250
4251 {
4252 rtx x = PATTERN (orig_insn);
4253 RTX_CODE code = GET_CODE (x);
4254
4255 if (code == SET || code == CLOBBER)
4256 update_n_sets (x, -1);
4257 else if (code == PARALLEL)
4258 {
4259 int i;
4260 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4261 {
4262 code = GET_CODE (XVECEXP (x, 0, i));
4263 if (code == SET || code == CLOBBER)
4264 update_n_sets (XVECEXP (x, 0, i), -1);
4265 }
4266 }
4267
4268 for (insn = first; ; insn = NEXT_INSN (insn))
4269 {
4270 x = PATTERN (insn);
4271 code = GET_CODE (x);
4272
4273 if (code == SET || code == CLOBBER)
4274 update_n_sets (x, 1);
4275 else if (code == PARALLEL)
4276 {
4277 int i;
4278 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4279 {
4280 code = GET_CODE (XVECEXP (x, 0, i));
4281 if (code == SET || code == CLOBBER)
4282 update_n_sets (XVECEXP (x, 0, i), 1);
4283 }
4284 }
4285
4286 if (insn == last)
4287 break;
4288 }
4289 }
4290}
4291
4292/* The one entry point in this file. DUMP_FILE is the dump file for
4293 this pass. */
4294
4295void
4296schedule_insns (dump_file)
4297 FILE *dump_file;
4298{
4299 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
4300 int i, b;
4301 rtx insn;
4302
4303 /* Taking care of this degenerate case makes the rest of
4304 this code simpler. */
4305 if (n_basic_blocks == 0)
4306 return;
4307
4308 /* Create an insn here so that we can hang dependencies off of it later. */
fb3821f7
CH
4309 sched_before_next_call
4310 = gen_rtx (INSN, VOIDmode, 0, NULL_RTX, NULL_RTX,
4311 NULL_RTX, 0, NULL_RTX, 0);
bd58420c
RK
4312
4313 /* Initialize the unused_*_lists. We can't use the ones left over from
4314 the previous function, because gcc has freed that memory. We can use
4315 the ones left over from the first sched pass in the second pass however,
4316 so only clear them on the first sched pass. The first pass is before
4317 reload if flag_schedule_insns is set, otherwise it is afterwards. */
4318
4319 if (reload_completed == 0 || ! flag_schedule_insns)
4320 {
4321 unused_insn_list = 0;
4322 unused_expr_list = 0;
4323 }
4324
4325 /* We create no insns here, only reorder them, so we
4326 remember how far we can cut back the stack on exit. */
4327
4328 /* Allocate data for this pass. See comments, above,
4329 for what these vectors do. */
bd58420c
RK
4330 insn_luid = (int *) alloca (max_uid * sizeof (int));
4331 insn_priority = (int *) alloca (max_uid * sizeof (int));
de19227a 4332 insn_tick = (int *) alloca (max_uid * sizeof (int));
c3de4fe6 4333 insn_costs = (short *) alloca (max_uid * sizeof (short));
de19227a
TW
4334 insn_units = (short *) alloca (max_uid * sizeof (short));
4335 insn_blockage = (unsigned int *) alloca (max_uid * sizeof (unsigned int));
bd58420c
RK
4336 insn_ref_count = (int *) alloca (max_uid * sizeof (int));
4337
4338 if (reload_completed == 0)
4339 {
4340 sched_reg_n_deaths = (short *) alloca (max_regno * sizeof (short));
4341 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
4342 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
4343 bb_dead_regs = (regset) alloca (regset_bytes);
4344 bb_live_regs = (regset) alloca (regset_bytes);
4345 bzero (sched_reg_n_calls_crossed, max_regno * sizeof (int));
4346 bzero (sched_reg_live_length, max_regno * sizeof (int));
4347 bcopy (reg_n_deaths, sched_reg_n_deaths, max_regno * sizeof (short));
4348 init_alias_analysis ();
4349 }
4350 else
4351 {
4352 sched_reg_n_deaths = 0;
4353 sched_reg_n_calls_crossed = 0;
4354 sched_reg_live_length = 0;
4355 bb_dead_regs = 0;
4356 bb_live_regs = 0;
4357 if (! flag_schedule_insns)
4358 init_alias_analysis ();
4359 }
4360
4361 if (write_symbols != NO_DEBUG)
4362 {
4363 rtx line;
4364
4365 line_note = (rtx *) alloca (max_uid * sizeof (rtx));
4366 bzero (line_note, max_uid * sizeof (rtx));
4367 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
4368 bzero (line_note_head, n_basic_blocks * sizeof (rtx));
4369
4370 /* Determine the line-number at the start of each basic block.
4371 This must be computed and saved now, because after a basic block's
4372 predecessor has been scheduled, it is impossible to accurately
4373 determine the correct line number for the first insn of the block. */
4374
4375 for (b = 0; b < n_basic_blocks; b++)
4376 for (line = basic_block_head[b]; line; line = PREV_INSN (line))
4377 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
4378 {
4379 line_note_head[b] = line;
4380 break;
4381 }
4382 }
4383
4384 bzero (insn_luid, max_uid * sizeof (int));
4385 bzero (insn_priority, max_uid * sizeof (int));
de19227a 4386 bzero (insn_tick, max_uid * sizeof (int));
c3de4fe6 4387 bzero (insn_costs, max_uid * sizeof (short));
de19227a
TW
4388 bzero (insn_units, max_uid * sizeof (short));
4389 bzero (insn_blockage, max_uid * sizeof (unsigned int));
bd58420c
RK
4390 bzero (insn_ref_count, max_uid * sizeof (int));
4391
4392 /* Schedule each basic block, block by block. */
4393
4394 if (NEXT_INSN (basic_block_end[n_basic_blocks-1]) == 0
4395 || (GET_CODE (basic_block_end[n_basic_blocks-1]) != NOTE
4396 && GET_CODE (basic_block_end[n_basic_blocks-1]) != CODE_LABEL))
4397 emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
4398
4399 for (b = 0; b < n_basic_blocks; b++)
4400 {
4401 rtx insn, next;
4402 rtx insns;
4403
4404 note_list = 0;
4405
4406 for (insn = basic_block_head[b]; ; insn = next)
4407 {
4408 rtx prev;
4409 rtx set;
4410
4411 /* Can't use `next_real_insn' because that
4412 might go across CODE_LABELS and short-out basic blocks. */
4413 next = NEXT_INSN (insn);
4414 if (GET_CODE (insn) != INSN)
4415 {
4416 if (insn == basic_block_end[b])
4417 break;
4418
4419 continue;
4420 }
4421
4422 /* Don't split no-op move insns. These should silently disappear
4423 later in final. Splitting such insns would break the code
4424 that handles REG_NO_CONFLICT blocks. */
4425 set = single_set (insn);
4426 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
4427 {
4428 if (insn == basic_block_end[b])
4429 break;
4430
4431 /* Nops get in the way while scheduling, so delete them now if
4432 register allocation has already been done. It is too risky
4433 to try to do this before register allocation, and there are
4434 unlikely to be very many nops then anyways. */
4435 if (reload_completed)
4436 {
4437 PUT_CODE (insn, NOTE);
4438 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4439 NOTE_SOURCE_FILE (insn) = 0;
4440 }
4441
4442 continue;
4443 }
4444
4445 /* Split insns here to get max fine-grain parallelism. */
4446 prev = PREV_INSN (insn);
4447 if (reload_completed == 0)
4448 {
4449 rtx last, first = PREV_INSN (insn);
4450 rtx notes = REG_NOTES (insn);
4451
4452 last = try_split (PATTERN (insn), insn, 1);
4453 if (last != insn)
4454 {
4455 /* try_split returns the NOTE that INSN became. */
4456 first = NEXT_INSN (first);
4457 update_flow_info (notes, first, last, insn);
4458
4459 PUT_CODE (insn, NOTE);
4460 NOTE_SOURCE_FILE (insn) = 0;
4461 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4462 if (insn == basic_block_head[b])
4463 basic_block_head[b] = first;
4464 if (insn == basic_block_end[b])
4465 {
4466 basic_block_end[b] = last;
4467 break;
4468 }
4469 }
4470 }
4471
4472 if (insn == basic_block_end[b])
4473 break;
4474 }
4475
4476 schedule_block (b, dump_file);
4477
4478#ifdef USE_C_ALLOCA
4479 alloca (0);
4480#endif
4481 }
4482
bdac5f58
TW
4483 /* Reposition the prologue and epilogue notes in case we moved the
4484 prologue/epilogue insns. */
4485 if (reload_completed)
4486 reposition_prologue_and_epilogue_notes (get_insns ());
4487
bd58420c
RK
4488 if (write_symbols != NO_DEBUG)
4489 {
4490 rtx line = 0;
4491 rtx insn = get_insns ();
4492 int active_insn = 0;
4493 int notes = 0;
4494
4495 /* Walk the insns deleting redundant line-number notes. Many of these
4496 are already present. The remainder tend to occur at basic
4497 block boundaries. */
4498 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
4499 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4500 {
4501 /* If there are no active insns following, INSN is redundant. */
4502 if (active_insn == 0)
4503 {
4504 notes++;
4505 NOTE_SOURCE_FILE (insn) = 0;
4506 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4507 }
4508 /* If the line number is unchanged, LINE is redundant. */
4509 else if (line
4510 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
4511 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
4512 {
4513 notes++;
4514 NOTE_SOURCE_FILE (line) = 0;
4515 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
4516 line = insn;
4517 }
4518 else
4519 line = insn;
4520 active_insn = 0;
4521 }
4522 else if (! ((GET_CODE (insn) == NOTE
4523 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
4524 || (GET_CODE (insn) == INSN
4525 && (GET_CODE (PATTERN (insn)) == USE
4526 || GET_CODE (PATTERN (insn)) == CLOBBER))))
4527 active_insn++;
4528
4529 if (dump_file && notes)
4530 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
4531 }
4532
4533 if (reload_completed == 0)
4534 {
4535 int regno;
4536 for (regno = 0; regno < max_regno; regno++)
4537 if (sched_reg_live_length[regno])
4538 {
4539 if (dump_file)
4540 {
4541 if (reg_live_length[regno] > sched_reg_live_length[regno])
4542 fprintf (dump_file,
4543 ";; register %d life shortened from %d to %d\n",
4544 regno, reg_live_length[regno],
4545 sched_reg_live_length[regno]);
4546 /* Negative values are special; don't overwrite the current
4547 reg_live_length value if it is negative. */
4548 else if (reg_live_length[regno] < sched_reg_live_length[regno]
4549 && reg_live_length[regno] >= 0)
4550 fprintf (dump_file,
4551 ";; register %d life extended from %d to %d\n",
4552 regno, reg_live_length[regno],
4553 sched_reg_live_length[regno]);
4554
4555 if (reg_n_calls_crossed[regno]
4556 && ! sched_reg_n_calls_crossed[regno])
4557 fprintf (dump_file,
4558 ";; register %d no longer crosses calls\n", regno);
4559 else if (! reg_n_calls_crossed[regno]
4560 && sched_reg_n_calls_crossed[regno])
4561 fprintf (dump_file,
4562 ";; register %d now crosses calls\n", regno);
4563 }
b4ac57ab
RS
4564 /* Negative values are special; don't overwrite the current
4565 reg_live_length value if it is negative. */
4566 if (reg_live_length[regno] >= 0)
4567 reg_live_length[regno] = sched_reg_live_length[regno];
bd58420c
RK
4568 reg_n_calls_crossed[regno] = sched_reg_n_calls_crossed[regno];
4569 }
4570 }
4571}
4572#endif /* INSN_SCHEDULING */
This page took 0.513433 seconds and 5 git commands to generate.