]> gcc.gnu.org Git - gcc.git/blame - gcc/sched.c
Partially undo Mar 7 change.
[gcc.git] / gcc / sched.c
CommitLineData
bd58420c
RK
1/* Instruction scheduling pass.
2 Copyright (C) 1992 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6This file is part of GNU CC.
7
8GNU CC is free software; you can redistribute it and/or modify
9it under the terms of the GNU General Public License as published by
10the Free Software Foundation; either version 2, or (at your option)
11any later version.
12
13GNU CC is distributed in the hope that it will be useful,
14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
19along with GNU CC; see the file COPYING. If not, write to
20the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
21
22/* Instruction scheduling pass.
23
24 This pass implements list scheduling within basic blocks. It is
25 run after flow analysis, but before register allocation. The
26 scheduler works as follows:
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning
39 values to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
de19227a
TW
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
56
57 Function unit conflicts are resolved during reverse list scheduling
58 by tracking the time when each insn is committed to the schedule
59 and from that, the time the function units it uses must be free.
60 As insns on the ready list are considered for scheduling, those
61 that would result in a blockage of the already committed insns are
62 queued until no blockage will result. Among the remaining insns on
63 the ready list to be considered, the first one with the largest
64 potential for causing a subsequent blockage is chosen.
65
66 The following list shows the order in which we want to break ties
67 among insns in the ready list:
bd58420c
RK
68
69 1. choose insn with lowest conflict cost, ties broken by
70 2. choose insn with the longest path to end of bb, ties broken by
71 3. choose insn that kills the most registers, ties broken by
72 4. choose insn that conflicts with the most ready insns, or finally
73 5. choose insn with lowest UID.
74
75 Memory references complicate matters. Only if we can be certain
76 that memory references are not part of the data dependency graph
77 (via true, anti, or output dependence), can we move operations past
78 memory references. To first approximation, reads can be done
79 independently, while writes introduce dependencies. Better
80 approximations will yield fewer dependencies.
81
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using LOG_LINKS.
84
85 Having optimized the critical path, we may have also unduly
86 extended the lifetimes of some registers. If an operation requires
87 that constants be loaded into registers, it is certainly desirable
88 to load those constants as early as necessary, but no earlier.
89 I.e., it will not do to load up a bunch of registers at the
90 beginning of a basic block only to use them at the end, if they
91 could be loaded later, since this may result in excessive register
92 utilization.
93
94 Note that since branches are never in basic blocks, but only end
95 basic blocks, this pass will not do any branch scheduling. But
96 that is ok, since we can use GNU's delayed branch scheduling
97 pass to take care of this case.
98
99 Also note that no further optimizations based on algebraic identities
100 are performed, so this pass would be a good one to perform instruction
101 splitting, such as breaking up a multiply instruction into shifts
102 and adds where that is profitable.
103
104 Given the memory aliasing analysis that this pass should perform,
105 it should be possible to remove redundant stores to memory, and to
106 load values from registers instead of hitting memory.
107
108 This pass must update information that subsequent passes expect to be
109 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
110 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
111 basic_block_end.
112
113 The information in the line number notes is carefully retained by this
114 pass. All other NOTE insns are grouped in their same relative order at
115 the beginning of basic blocks that have been scheduled. */
116\f
117#include <stdio.h>
118#include "config.h"
119#include "rtl.h"
120#include "basic-block.h"
121#include "regs.h"
122#include "hard-reg-set.h"
123#include "flags.h"
124#include "insn-config.h"
125#include "insn-attr.h"
126
127/* Arrays set up by scheduling for the same respective purposes as
128 similar-named arrays set up by flow analysis. We work with these
129 arrays during the scheduling pass so we can compare values against
130 unscheduled code.
131
132 Values of these arrays are copied at the end of this pass into the
133 arrays set up by flow analysis. */
134static short *sched_reg_n_deaths;
135static int *sched_reg_n_calls_crossed;
136static int *sched_reg_live_length;
137
138/* Element N is the next insn that sets (hard or pseudo) register
139 N within the current basic block; or zero, if there is no
140 such insn. Needed for new registers which may be introduced
141 by splitting insns. */
142static rtx *reg_last_uses;
143static rtx *reg_last_sets;
144
145/* Vector indexed by INSN_UID giving the original ordering of the insns. */
146static int *insn_luid;
147#define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
148
149/* Vector indexed by INSN_UID giving each instruction a priority. */
150static int *insn_priority;
151#define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
152
c3de4fe6
JW
153static short *insn_costs;
154#define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
155
de19227a
TW
156/* Vector indexed by INSN_UID giving an encoding of the function units
157 used. */
158static short *insn_units;
159#define INSN_UNIT(INSN) insn_units[INSN_UID (INSN)]
160
161/* Vector indexed by INSN_UID giving an encoding of the blockage range
162 function. The unit and the range are encoded. */
163static unsigned int *insn_blockage;
164#define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
165#define UNIT_BITS 5
166#define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
167#define ENCODE_BLOCKAGE(U,R) \
168 ((((U) << UNIT_BITS) << BLOCKAGE_BITS \
169 | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
170 | MAX_BLOCKAGE_COST (R))
171#define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
172#define BLOCKAGE_RANGE(B) \
173 (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
174 | (B) & BLOCKAGE_MASK)
175
176/* Encodings of the `<name>_unit_blockage_range' function. */
177#define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
178#define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
179
bd58420c
RK
180#define DONE_PRIORITY -1
181#define MAX_PRIORITY 0x7fffffff
182#define TAIL_PRIORITY 0x7ffffffe
183#define LAUNCH_PRIORITY 0x7f000001
184#define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
185#define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
186
b4ac57ab 187/* Vector indexed by INSN_UID giving number of insns referring to this insn. */
bd58420c
RK
188static int *insn_ref_count;
189#define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
190
191/* Vector indexed by INSN_UID giving line-number note in effect for each
192 insn. For line-number notes, this indicates whether the note may be
193 reused. */
194static rtx *line_note;
195#define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
196
197/* Vector indexed by basic block number giving the starting line-number
198 for each basic block. */
199static rtx *line_note_head;
200
201/* List of important notes we must keep around. This is a pointer to the
202 last element in the list. */
203static rtx note_list;
204
205/* Regsets telling whether a given register is live or dead before the last
206 scheduled insn. Must scan the instructions once before scheduling to
207 determine what registers are live or dead at the end of the block. */
208static regset bb_dead_regs;
209static regset bb_live_regs;
210
211/* Regset telling whether a given register is live after the insn currently
212 being scheduled. Before processing an insn, this is equal to bb_live_regs
6dc42e49 213 above. This is used so that we can find registers that are newly born/dead
bd58420c
RK
214 after processing an insn. */
215static regset old_live_regs;
216
217/* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
218 during the initial scan and reused later. If there are not exactly as
219 many REG_DEAD notes in the post scheduled code as there were in the
220 prescheduled code then we trigger an abort because this indicates a bug. */
221static rtx dead_notes;
222
223/* Queues, etc. */
224
225/* An instruction is ready to be scheduled when all insns following it
226 have already been scheduled. It is important to ensure that all
227 insns which use its result will not be executed until its result
de19227a
TW
228 has been computed. An insn is maintained in one of four structures:
229
230 (P) the "Pending" set of insns which cannot be scheduled until
231 their dependencies have been satisfied.
232 (Q) the "Queued" set of insns that can be scheduled when sufficient
233 time has passed.
234 (R) the "Ready" list of unscheduled, uncommitted insns.
235 (S) the "Scheduled" list of insns.
236
237 Initially, all insns are either "Pending" or "Ready" depending on
238 whether their dependencies are satisfied.
239
240 Insns move from the "Ready" list to the "Scheduled" list as they
241 are committed to the schedule. As this occurs, the insns in the
242 "Pending" list have their dependencies satisfied and move to either
243 the "Ready" list or the "Queued" set depending on whether
244 sufficient time has passed to make them ready. As time passes,
245 insns move from the "Queued" set to the "Ready" list. Insns may
246 move from the "Ready" list to the "Queued" set if they are blocked
247 due to a function unit conflict.
248
249 The "Pending" list (P) are the insns in the LOG_LINKS of the unscheduled
250 insns, i.e., those that are ready, queued, and pending.
251 The "Queued" set (Q) is implemented by the variable `insn_queue'.
252 The "Ready" list (R) is implemented by the variables `ready' and
253 `n_ready'.
254 The "Scheduled" list (S) is the new insn chain built by this pass.
255
256 The transition (R->S) is implemented in the scheduling loop in
257 `schedule_block' when the best insn to schedule is chosen.
258 The transition (R->Q) is implemented in `schedule_select' when an
259 insn is found to to have a function unit conflict with the already
260 committed insns.
261 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
262 insns move from the ready list to the scheduled list.
263 The transition (Q->R) is implemented at the top of the scheduling
264 loop in `schedule_block' as time passes or stalls are introduced. */
265
266/* Implement a circular buffer to delay instructions until sufficient
267 time has passed. INSN_QUEUE_SIZE is a power of two larger than
268 MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. This is the
269 longest time an isnsn may be queued. */
e939539b
RS
270#ifndef INSN_QUEUE_SIZE
271#define INSN_QUEUE_SIZE 10
272#endif
de19227a 273static rtx insn_queue[INSN_QUEUE_SIZE];
bd58420c
RK
274static int q_ptr = 0;
275static int q_size = 0;
de19227a
TW
276#define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
277#define NEXT_Q_AFTER(X,C) (((X)+C) & (INSN_QUEUE_SIZE-1))
278
279/* Vector indexed by INSN_UID giving the minimum clock tick at which
280 the insn becomes ready. This is used to note timing constraints for
281 insns in the pending list. */
282static int *insn_tick;
283#define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
6dab9d41 284
bd58420c
RK
285/* Forward declarations. */
286static void sched_analyze_2 ();
287static void schedule_block ();
288
289/* Main entry point of this file. */
290void schedule_insns ();
291\f
292#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
293
294/* Vector indexed by N giving the initial (unchanging) value known
295 for pseudo-register N. */
296static rtx *reg_known_value;
297
298/* Indicates number of valid entries in reg_known_value. */
299static int reg_known_value_size;
300
301static rtx
302canon_rtx (x)
303 rtx x;
304{
305 if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
306 && REGNO (x) <= reg_known_value_size)
307 return reg_known_value[REGNO (x)];
308 else if (GET_CODE (x) == PLUS)
309 {
310 rtx x0 = canon_rtx (XEXP (x, 0));
311 rtx x1 = canon_rtx (XEXP (x, 1));
312
313 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
314 {
315 /* We can tolerate LO_SUMs being offset here; these
316 rtl are used for nothing other than comparisons. */
317 if (GET_CODE (x0) == CONST_INT)
318 return plus_constant_for_output (x1, INTVAL (x0));
319 else if (GET_CODE (x1) == CONST_INT)
320 return plus_constant_for_output (x0, INTVAL (x1));
321 return gen_rtx (PLUS, GET_MODE (x), x0, x1);
322 }
323 }
324 return x;
325}
326
327/* Set up all info needed to perform alias analysis on memory references. */
328
329void
330init_alias_analysis ()
331{
332 int maxreg = max_reg_num ();
333 rtx insn;
334 rtx note;
335 rtx set;
336
337 reg_known_value_size = maxreg;
338
339 reg_known_value
340 = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx))
341 - FIRST_PSEUDO_REGISTER;
342 bzero (reg_known_value+FIRST_PSEUDO_REGISTER,
343 (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
344
345 /* Fill in the entries with known constant values. */
346 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
347 if ((set = single_set (insn)) != 0
348 && GET_CODE (SET_DEST (set)) == REG
349 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
350 && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
351 && reg_n_sets[REGNO (SET_DEST (set))] == 1)
fb3821f7 352 || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0)
bd58420c
RK
353 && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
354 reg_known_value[REGNO (SET_DEST (set))] = XEXP (note, 0);
355
356 /* Fill in the remaining entries. */
357 while (--maxreg >= FIRST_PSEUDO_REGISTER)
358 if (reg_known_value[maxreg] == 0)
359 reg_known_value[maxreg] = regno_reg_rtx[maxreg];
360}
361
362/* Return 1 if X and Y are identical-looking rtx's.
363
364 We use the data in reg_known_value above to see if two registers with
365 different numbers are, in fact, equivalent. */
366
367static int
368rtx_equal_for_memref_p (x, y)
369 rtx x, y;
370{
371 register int i;
372 register int j;
373 register enum rtx_code code;
374 register char *fmt;
375
376 if (x == 0 && y == 0)
377 return 1;
378 if (x == 0 || y == 0)
379 return 0;
380 x = canon_rtx (x);
381 y = canon_rtx (y);
382
383 if (x == y)
384 return 1;
385
386 code = GET_CODE (x);
387 /* Rtx's of different codes cannot be equal. */
388 if (code != GET_CODE (y))
389 return 0;
390
391 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
392 (REG:SI x) and (REG:HI x) are NOT equivalent. */
393
394 if (GET_MODE (x) != GET_MODE (y))
395 return 0;
396
397 /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
398
399 if (code == REG)
400 return REGNO (x) == REGNO (y);
401 if (code == LABEL_REF)
402 return XEXP (x, 0) == XEXP (y, 0);
403 if (code == SYMBOL_REF)
404 return XSTR (x, 0) == XSTR (y, 0);
405
406 /* Compare the elements. If any pair of corresponding elements
407 fail to match, return 0 for the whole things. */
408
409 fmt = GET_RTX_FORMAT (code);
410 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
411 {
412 switch (fmt[i])
413 {
fb3821f7
CH
414 case 'w':
415 if (XWINT (x, i) != XWINT (y, i))
416 return 0;
417 break;
418
bd58420c
RK
419 case 'n':
420 case 'i':
421 if (XINT (x, i) != XINT (y, i))
422 return 0;
423 break;
424
425 case 'V':
426 case 'E':
427 /* Two vectors must have the same length. */
428 if (XVECLEN (x, i) != XVECLEN (y, i))
429 return 0;
430
431 /* And the corresponding elements must match. */
432 for (j = 0; j < XVECLEN (x, i); j++)
433 if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
434 return 0;
435 break;
436
437 case 'e':
438 if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
439 return 0;
440 break;
441
442 case 'S':
443 case 's':
444 if (strcmp (XSTR (x, i), XSTR (y, i)))
445 return 0;
446 break;
447
448 case 'u':
449 /* These are just backpointers, so they don't matter. */
450 break;
451
452 case '0':
453 break;
454
455 /* It is believed that rtx's at this level will never
456 contain anything but integers and other rtx's,
457 except for within LABEL_REFs and SYMBOL_REFs. */
458 default:
459 abort ();
460 }
461 }
462 return 1;
463}
464
465/* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
466 X and return it, or return 0 if none found. */
467
468static rtx
469find_symbolic_term (x)
470 rtx x;
471{
472 register int i;
473 register enum rtx_code code;
474 register char *fmt;
475
476 code = GET_CODE (x);
477 if (code == SYMBOL_REF || code == LABEL_REF)
478 return x;
479 if (GET_RTX_CLASS (code) == 'o')
480 return 0;
481
482 fmt = GET_RTX_FORMAT (code);
483 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
484 {
485 rtx t;
486
487 if (fmt[i] == 'e')
488 {
489 t = find_symbolic_term (XEXP (x, i));
490 if (t != 0)
491 return t;
492 }
493 else if (fmt[i] == 'E')
494 break;
495 }
496 return 0;
497}
498
499/* Return nonzero if X and Y (memory addresses) could reference the
500 same location in memory. C is an offset accumulator. When
501 C is nonzero, we are testing aliases between X and Y + C.
502 XSIZE is the size in bytes of the X reference,
503 similarly YSIZE is the size in bytes for Y.
504
505 If XSIZE or YSIZE is zero, we do not know the amount of memory being
506 referenced (the reference was BLKmode), so make the most pessimistic
507 assumptions.
508
509 We recognize the following cases of non-conflicting memory:
510
511 (1) addresses involving the frame pointer cannot conflict
512 with addresses involving static variables.
513 (2) static variables with different addresses cannot conflict.
514
6dc42e49 515 Nice to notice that varying addresses cannot conflict with fp if no
bd58420c
RK
516 local variables had their addresses taken, but that's too hard now. */
517
518static int
519memrefs_conflict_p (xsize, x, ysize, y, c)
520 rtx x, y;
521 int xsize, ysize;
fb3821f7 522 HOST_WIDE_INT c;
bd58420c
RK
523{
524 if (GET_CODE (x) == HIGH)
525 x = XEXP (x, 0);
526 else if (GET_CODE (x) == LO_SUM)
527 x = XEXP (x, 1);
528 else
529 x = canon_rtx (x);
530 if (GET_CODE (y) == HIGH)
531 y = XEXP (y, 0);
532 else if (GET_CODE (y) == LO_SUM)
533 y = XEXP (y, 1);
534 else
535 y = canon_rtx (y);
536
537 if (rtx_equal_for_memref_p (x, y))
538 return (xsize == 0 || ysize == 0 ||
539 (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
540
541 if (y == frame_pointer_rtx || y == stack_pointer_rtx)
542 {
543 rtx t = y;
544 int tsize = ysize;
545 y = x; ysize = xsize;
546 x = t; xsize = tsize;
547 }
548
549 if (x == frame_pointer_rtx || x == stack_pointer_rtx)
550 {
551 rtx y1;
552
553 if (CONSTANT_P (y))
554 return 0;
555
556 if (GET_CODE (y) == PLUS
557 && canon_rtx (XEXP (y, 0)) == x
558 && (y1 = canon_rtx (XEXP (y, 1)))
559 && GET_CODE (y1) == CONST_INT)
560 {
561 c += INTVAL (y1);
562 return (xsize == 0 || ysize == 0
563 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
564 }
565
566 if (GET_CODE (y) == PLUS
567 && (y1 = canon_rtx (XEXP (y, 0)))
568 && CONSTANT_P (y1))
569 return 0;
570
571 return 1;
572 }
573
574 if (GET_CODE (x) == PLUS)
575 {
6dc42e49
RS
576 /* The fact that X is canonicalized means that this
577 PLUS rtx is canonicalized. */
bd58420c
RK
578 rtx x0 = XEXP (x, 0);
579 rtx x1 = XEXP (x, 1);
580
581 if (GET_CODE (y) == PLUS)
582 {
6dc42e49
RS
583 /* The fact that Y is canonicalized means that this
584 PLUS rtx is canonicalized. */
bd58420c
RK
585 rtx y0 = XEXP (y, 0);
586 rtx y1 = XEXP (y, 1);
587
588 if (rtx_equal_for_memref_p (x1, y1))
589 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
590 if (rtx_equal_for_memref_p (x0, y0))
591 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
592 if (GET_CODE (x1) == CONST_INT)
593 if (GET_CODE (y1) == CONST_INT)
594 return memrefs_conflict_p (xsize, x0, ysize, y0,
595 c - INTVAL (x1) + INTVAL (y1));
596 else
597 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
598 else if (GET_CODE (y1) == CONST_INT)
599 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
600
601 /* Handle case where we cannot understand iteration operators,
602 but we notice that the base addresses are distinct objects. */
603 x = find_symbolic_term (x);
604 if (x == 0)
605 return 1;
606 y = find_symbolic_term (y);
607 if (y == 0)
608 return 1;
609 return rtx_equal_for_memref_p (x, y);
610 }
611 else if (GET_CODE (x1) == CONST_INT)
612 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
613 }
614 else if (GET_CODE (y) == PLUS)
615 {
6dc42e49
RS
616 /* The fact that Y is canonicalized means that this
617 PLUS rtx is canonicalized. */
bd58420c
RK
618 rtx y0 = XEXP (y, 0);
619 rtx y1 = XEXP (y, 1);
620
621 if (GET_CODE (y1) == CONST_INT)
622 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
623 else
624 return 1;
625 }
626
627 if (GET_CODE (x) == GET_CODE (y))
628 switch (GET_CODE (x))
629 {
630 case MULT:
631 {
632 /* Handle cases where we expect the second operands to be the
633 same, and check only whether the first operand would conflict
634 or not. */
635 rtx x0, y0;
636 rtx x1 = canon_rtx (XEXP (x, 1));
637 rtx y1 = canon_rtx (XEXP (y, 1));
638 if (! rtx_equal_for_memref_p (x1, y1))
639 return 1;
640 x0 = canon_rtx (XEXP (x, 0));
641 y0 = canon_rtx (XEXP (y, 0));
642 if (rtx_equal_for_memref_p (x0, y0))
643 return (xsize == 0 || ysize == 0
644 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
645
646 /* Can't properly adjust our sizes. */
647 if (GET_CODE (x1) != CONST_INT)
648 return 1;
649 xsize /= INTVAL (x1);
650 ysize /= INTVAL (x1);
651 c /= INTVAL (x1);
652 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
653 }
654 }
655
656 if (CONSTANT_P (x))
657 {
658 if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
659 {
660 c += (INTVAL (y) - INTVAL (x));
661 return (xsize == 0 || ysize == 0
662 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
663 }
664
665 if (GET_CODE (x) == CONST)
666 {
667 if (GET_CODE (y) == CONST)
668 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
669 ysize, canon_rtx (XEXP (y, 0)), c);
670 else
671 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
672 ysize, y, c);
673 }
674 if (GET_CODE (y) == CONST)
675 return memrefs_conflict_p (xsize, x, ysize,
676 canon_rtx (XEXP (y, 0)), c);
677
678 if (CONSTANT_P (y))
679 return (rtx_equal_for_memref_p (x, y)
680 && (xsize == 0 || ysize == 0
681 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
682
683 return 1;
684 }
685 return 1;
686}
687
688/* Functions to compute memory dependencies.
689
690 Since we process the insns in execution order, we can build tables
691 to keep track of what registers are fixed (and not aliased), what registers
692 are varying in known ways, and what registers are varying in unknown
693 ways.
694
695 If both memory references are volatile, then there must always be a
696 dependence between the two references, since their order can not be
697 changed. A volatile and non-volatile reference can be interchanged
698 though.
699
700 A MEM_IN_STRUCT reference at a varying address can never conflict with a
701 non-MEM_IN_STRUCT reference at a fixed address. */
702
703/* Read dependence: X is read after read in MEM takes place. There can
704 only be a dependence here if both reads are volatile. */
705
706int
707read_dependence (mem, x)
708 rtx mem;
709 rtx x;
710{
711 return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
712}
713
714/* True dependence: X is read after store in MEM takes place. */
715
716int
717true_dependence (mem, x)
718 rtx mem;
719 rtx x;
720{
1f6fc4de
JW
721 /* If X is an unchanging read, then it can't possibly conflict with any
722 non-unchanging store. It may conflict with an unchanging write though,
723 because there may be a single store to this address to initialize it.
724 Just fall through to the code below to resolve the case where we have
725 both an unchanging read and an unchanging write. This won't handle all
726 cases optimally, but the possible performance loss should be
727 negligible. */
728 if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem))
bd58420c
RK
729 return 0;
730
731 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
732 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
733 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
734 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
735 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
736 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
737 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
738}
739
740/* Anti dependence: X is written after read in MEM takes place. */
741
742int
743anti_dependence (mem, x)
744 rtx mem;
745 rtx x;
746{
1f6fc4de
JW
747 /* If MEM is an unchanging read, then it can't possibly conflict with
748 the store to X, because there is at most one store to MEM, and it must
749 have occured somewhere before MEM. */
bd58420c
RK
750 if (RTX_UNCHANGING_P (mem))
751 return 0;
752
753 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
754 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
755 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
756 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
757 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
758 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
759 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
760}
761
762/* Output dependence: X is written after store in MEM takes place. */
763
764int
765output_dependence (mem, x)
766 rtx mem;
767 rtx x;
768{
769 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
770 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
771 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
772 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
773 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
774 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
775 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
776}
777\f
6dab9d41
RS
778/* Helper functions for instruction scheduling. */
779
780/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
781 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
782 of dependence that this link represents. */
783
784void
785add_dependence (insn, elem, dep_type)
786 rtx insn;
787 rtx elem;
788 enum reg_note dep_type;
789{
790 rtx link, next;
791
792 /* Don't depend an insn on itself. */
793 if (insn == elem)
794 return;
795
796 /* If elem is part of a sequence that must be scheduled together, then
797 make the dependence point to the last insn of the sequence.
798 When HAVE_cc0, it is possible for NOTEs to exist between users and
799 setters of the condition codes, so we must skip past notes here.
800 Otherwise, NOTEs are impossible here. */
801
802 next = NEXT_INSN (elem);
803
804#ifdef HAVE_cc0
805 while (next && GET_CODE (next) == NOTE)
806 next = NEXT_INSN (next);
807#endif
808
809 if (next && SCHED_GROUP_P (next))
810 {
811 /* Notes will never intervene here though, so don't bother checking
812 for them. */
813 while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next)))
814 next = NEXT_INSN (next);
815
816 /* Again, don't depend an insn on itself. */
817 if (insn == next)
818 return;
819
820 /* Make the dependence to NEXT, the last insn of the group, instead
821 of the original ELEM. */
822 elem = next;
823 }
824
825 /* Check that we don't already have this dependence. */
826 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
827 if (XEXP (link, 0) == elem)
828 {
829 /* If this is a more restrictive type of dependence than the existing
830 one, then change the existing dependence to this type. */
831 if ((int) dep_type < (int) REG_NOTE_KIND (link))
832 PUT_REG_NOTE_KIND (link, dep_type);
833 return;
834 }
835 /* Might want to check one level of transitivity to save conses. */
836
837 link = rtx_alloc (INSN_LIST);
838 /* Insn dependency, not data dependency. */
839 PUT_REG_NOTE_KIND (link, dep_type);
840 XEXP (link, 0) = elem;
841 XEXP (link, 1) = LOG_LINKS (insn);
842 LOG_LINKS (insn) = link;
843}
844
845/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
846 of INSN. Abort if not found. */
847void
848remove_dependence (insn, elem)
849 rtx insn;
850 rtx elem;
851{
852 rtx prev, link;
853 int found = 0;
854
855 for (prev = 0, link = LOG_LINKS (insn); link;
856 prev = link, link = XEXP (link, 1))
857 {
858 if (XEXP (link, 0) == elem)
859 {
860 if (prev)
861 XEXP (prev, 1) = XEXP (link, 1);
862 else
863 LOG_LINKS (insn) = XEXP (link, 1);
864 found = 1;
865 }
866 }
867
868 if (! found)
869 abort ();
870 return;
871}
872\f
bd58420c
RK
873#ifndef INSN_SCHEDULING
874void schedule_insns () {}
875#else
876#ifndef __GNUC__
877#define __inline
878#endif
879
880/* Computation of memory dependencies. */
881
882/* The *_insns and *_mems are paired lists. Each pending memory operation
883 will have a pointer to the MEM rtx on one list and a pointer to the
884 containing insn on the other list in the same place in the list. */
885
886/* We can't use add_dependence like the old code did, because a single insn
887 may have multiple memory accesses, and hence needs to be on the list
888 once for each memory access. Add_dependence won't let you add an insn
889 to a list more than once. */
890
891/* An INSN_LIST containing all insns with pending read operations. */
892static rtx pending_read_insns;
893
894/* An EXPR_LIST containing all MEM rtx's which are pending reads. */
895static rtx pending_read_mems;
896
897/* An INSN_LIST containing all insns with pending write operations. */
898static rtx pending_write_insns;
899
900/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
901static rtx pending_write_mems;
902
903/* Indicates the combined length of the two pending lists. We must prevent
904 these lists from ever growing too large since the number of dependencies
905 produced is at least O(N*N), and execution time is at least O(4*N*N), as
906 a function of the length of these pending lists. */
907
908static int pending_lists_length;
909
910/* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
911
912static rtx unused_insn_list;
913
914/* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
915
916static rtx unused_expr_list;
917
918/* The last insn upon which all memory references must depend.
919 This is an insn which flushed the pending lists, creating a dependency
920 between it and all previously pending memory references. This creates
921 a barrier (or a checkpoint) which no memory reference is allowed to cross.
922
923 This includes all non constant CALL_INSNs. When we do interprocedural
924 alias analysis, this restriction can be relaxed.
925 This may also be an INSN that writes memory if the pending lists grow
926 too large. */
927
928static rtx last_pending_memory_flush;
929
930/* The last function call we have seen. All hard regs, and, of course,
931 the last function call, must depend on this. */
932
933static rtx last_function_call;
934
935/* The LOG_LINKS field of this is a list of insns which use a pseudo register
936 that does not already cross a call. We create dependencies between each
937 of those insn and the next call insn, to ensure that they won't cross a call
938 after scheduling is done. */
939
940static rtx sched_before_next_call;
941
942/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
943 so that insns independent of the last scheduled insn will be preferred
944 over dependent instructions. */
945
946static rtx last_scheduled_insn;
947
948/* Process an insn's memory dependencies. There are four kinds of
949 dependencies:
950
951 (0) read dependence: read follows read
952 (1) true dependence: read follows write
953 (2) anti dependence: write follows read
954 (3) output dependence: write follows write
955
956 We are careful to build only dependencies which actually exist, and
957 use transitivity to avoid building too many links. */
958\f
959/* Return the INSN_LIST containing INSN in LIST, or NULL
960 if LIST does not contain INSN. */
961
962__inline static rtx
963find_insn_list (insn, list)
964 rtx insn;
965 rtx list;
966{
967 while (list)
968 {
969 if (XEXP (list, 0) == insn)
970 return list;
971 list = XEXP (list, 1);
972 }
973 return 0;
974}
975
de19227a
TW
976/* Compute the function units used by INSN. This caches the value
977 returned by function_units_used. A function unit is encoded as the
978 unit number if the value is non-negative and the compliment of a
979 mask if the value is negative. A function unit index is the
980 non-negative encoding. */
bd58420c
RK
981
982__inline static int
de19227a 983insn_unit (insn)
bd58420c
RK
984 rtx insn;
985{
de19227a
TW
986 register int unit = INSN_UNIT (insn);
987
988 if (unit == 0)
989 {
990 recog_memoized (insn);
991
992 /* A USE insn, or something else we don't need to understand.
993 We can't pass these directly to function_units_used because it will
994 trigger a fatal error for unrecognizable insns. */
995 if (INSN_CODE (insn) < 0)
996 unit = -1;
997 else
998 {
999 unit = function_units_used (insn);
1000 /* Increment non-negative values so we can cache zero. */
1001 if (unit >= 0) unit++;
1002 }
1003 /* We only cache 16 bits of the result, so if the value is out of
1004 range, don't cache it. */
1005 if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
1006 || unit >= 0
1007 || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
1008 INSN_UNIT (insn) = unit;
1009 }
1010 return (unit > 0 ? unit - 1 : unit);
1011}
bd58420c 1012
de19227a
TW
1013/* Compute the blockage range for executing INSN on UNIT. This caches
1014 the value returned by the blockage_range_function for the unit.
1015 These values are encoded in an int where the upper half gives the
1016 minimum value and the lower half gives the maximum value. */
c3de4fe6 1017
de19227a
TW
1018__inline static unsigned int
1019blockage_range (unit, insn)
1020 int unit;
1021 rtx insn;
1022{
1023 unsigned int blockage = INSN_BLOCKAGE (insn);
1024 unsigned int range;
bd58420c 1025
de19227a 1026 if (UNIT_BLOCKED (blockage) != unit + 1)
c3de4fe6 1027 {
de19227a
TW
1028 range = function_units[unit].blockage_range_function (insn);
1029 /* We only cache the blockage range for one unit and then only if
1030 the values fit. */
1031 if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
1032 INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
1033 }
1034 else
1035 range = BLOCKAGE_RANGE (blockage);
1036
1037 return range;
1038}
1039
1040/* A vector indexed by function unit instance giving the last insn to use
1041 the unit. The value of the function unit instance index for unit U
1042 instance I is (U + I * FUNCTION_UNITS_SIZE). */
1043static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
1044
1045/* A vector indexed by function unit instance giving the minimum time when
1046 the unit will unblock based on the maximum blockage cost. */
1047static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
1048
1049/* A vector indexed by function unit number giving the number of insns
1050 that remain to use the unit. */
1051static int unit_n_insns[FUNCTION_UNITS_SIZE];
1052
1053/* Reset the function unit state to the null state. */
1054
1055static void
1056clear_units ()
1057{
1058 int unit;
1059
1060 bzero (unit_last_insn, sizeof (unit_last_insn));
1061 bzero (unit_tick, sizeof (unit_tick));
1062 bzero (unit_n_insns, sizeof (unit_n_insns));
1063}
1064
1065/* Record an insn as one that will use the units encoded by UNIT. */
1066
1067__inline static void
1068prepare_unit (unit)
1069 int unit;
1070{
1071 int i;
1072
1073 if (unit >= 0)
1074 unit_n_insns[unit]++;
1075 else
1076 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1077 if ((unit & 1) != 0)
1078 prepare_unit (i);
1079}
1080
1081/* Return the actual hazard cost of executing INSN on the unit UNIT,
1082 instance INSTANCE at time CLOCK if the previous actual hazard cost
1083 was COST. */
1084
1085__inline static int
1086actual_hazard_this_instance (unit, instance, insn, clock, cost)
1087 int unit, instance, clock, cost;
1088 rtx insn;
1089{
1090 int i;
1091 int tick = unit_tick[instance];
1092
1093 if (tick - clock > cost)
1094 {
1095 /* The scheduler is operating in reverse, so INSN is the executing
1096 insn and the unit's last insn is the candidate insn. We want a
1097 more exact measure of the blockage if we execute INSN at CLOCK
1098 given when we committed the execution of the unit's last insn.
1099
1100 The blockage value is given by either the unit's max blockage
1101 constant, blockage range function, or blockage function. Use
1102 the most exact form for the given unit. */
1103
1104 if (function_units[unit].blockage_range_function)
1105 {
1106 if (function_units[unit].blockage_function)
1107 tick += (function_units[unit].blockage_function
1108 (insn, unit_last_insn[instance])
1109 - function_units[unit].max_blockage);
1110 else
1111 tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
1112 - function_units[unit].max_blockage);
1113 }
1114 if (tick - clock > cost)
1115 cost = tick - clock;
1116 }
1117 return cost;
1118}
1119
1120/* Record INSN as having begun execution on the units encoded by UNIT at
1121 time CLOCK. */
1122
1123__inline static void
1124schedule_unit (unit, insn, clock)
1125 int unit, clock;
1126 rtx insn;
1127{
1128 int i;
1129
1130 if (unit >= 0)
1131 {
1132 int instance = unit;
1133#if MAX_MULTIPLICITY > 1
1134 /* Find the first free instance of the function unit and use that
1135 one. We assume that one is free. */
1136 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
1137 {
1138 if (! actual_hazard_this_instance (unit, instance, insn, clock, 0))
1139 break;
1140 instance += FUNCTION_UNITS_SIZE;
1141 }
1142#endif
1143 unit_last_insn[instance] = insn;
1144 unit_tick[instance] = (clock + function_units[unit].max_blockage);
c3de4fe6 1145 }
bd58420c 1146 else
de19227a
TW
1147 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1148 if ((unit & 1) != 0)
1149 schedule_unit (i, insn, clock);
1150}
1151
1152/* Return the actual hazard cost of executing INSN on the units encoded by
1153 UNIT at time CLOCK if the previous actual hazard cost was COST. */
1154
1155__inline static int
1156actual_hazard (unit, insn, clock, cost)
1157 int unit, clock, cost;
1158 rtx insn;
1159{
1160 int i;
1161
1162 if (unit >= 0)
bd58420c 1163 {
de19227a
TW
1164 /* Find the instance of the function unit with the minimum hazard. */
1165 int instance = unit;
1166 int best = instance;
1167 int best_cost = actual_hazard_this_instance (unit, instance, insn,
1168 clock, cost);
1169 int this_cost;
1170
1171#if MAX_MULTIPLICITY > 1
1172 if (best_cost > cost)
1173 {
1174 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
1175 {
1176 instance += FUNCTION_UNITS_SIZE;
1177 this_cost = actual_hazard_this_instance (unit, instance, insn,
1178 clock, cost);
1179 if (this_cost < best_cost)
1180 {
1181 best = instance;
1182 best_cost = this_cost;
1183 if (this_cost <= cost)
1184 break;
1185 }
1186 }
1187 }
1188#endif
1189 cost = MAX (cost, best_cost);
1190 }
1191 else
1192 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1193 if ((unit & 1) != 0)
1194 cost = actual_hazard (i, insn, clock, cost);
1195
1196 return cost;
1197}
bd58420c 1198
de19227a
TW
1199/* Return the potential hazard cost of executing an instruction on the
1200 units encoded by UNIT if the previous potential hazard cost was COST.
1201 An insn with a large blockage time is chosen in preference to one
1202 with a smaller time; an insn that uses a unit that is more likely
1203 to be used is chosen in preference to one with a unit that is less
1204 used. We are trying to minimize a subsequent actual hazard. */
bd58420c 1205
de19227a
TW
1206__inline static int
1207potential_hazard (unit, insn, cost)
1208 int unit, cost;
1209 rtx insn;
1210{
1211 int i, ncost;
1212 unsigned int minb, maxb;
1213
1214 if (unit >= 0)
1215 {
1216 minb = maxb = function_units[unit].max_blockage;
1217 if (maxb > 1)
1218 {
1219 if (function_units[unit].blockage_range_function)
1220 {
1221 maxb = minb = blockage_range (unit, insn);
1222 maxb = MAX_BLOCKAGE_COST (maxb);
1223 minb = MIN_BLOCKAGE_COST (minb);
1224 }
1225
1226 if (maxb > 1)
1227 {
1228 /* Make the number of instructions left dominate. Make the
1229 minimum delay dominate the maximum delay. If all these
1230 are the same, use the unit number to add an arbitrary
1231 ordering. Other terms can be added. */
1232 ncost = minb * 0x40 + maxb;
1233 ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
1234 if (ncost > cost)
1235 cost = ncost;
1236 }
1237 }
bd58420c 1238 }
de19227a
TW
1239 else
1240 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1241 if ((unit & 1) != 0)
1242 cost = potential_hazard (i, insn, cost);
1243
1244 return cost;
1245}
1246
1247/* Compute cost of executing INSN given the dependence LINK on the insn USED.
1248 This is the number of virtual cycles taken between instruction issue and
1249 instruction results. */
1250
1251__inline static int
1252insn_cost (insn, link, used)
1253 rtx insn, link, used;
1254{
1255 register int cost = INSN_COST (insn);
1256
1257 if (cost == 0)
1258 {
1259 recog_memoized (insn);
1260
1261 /* A USE insn, or something else we don't need to understand.
1262 We can't pass these directly to result_ready_cost because it will
1263 trigger a fatal error for unrecognizable insns. */
1264 if (INSN_CODE (insn) < 0)
1265 {
1266 INSN_COST (insn) = 1;
1267 return 1;
1268 }
1269 else
1270 {
1271 cost = result_ready_cost (insn);
1272
1273 if (cost < 1)
1274 cost = 1;
1275
1276 INSN_COST (insn) = cost;
1277 }
1278 }
1279
1280 /* A USE insn should never require the value used to be computed. This
1281 allows the computation of a function's result and parameter values to
1282 overlap the return and call. */
1283 recog_memoized (used);
1284 if (INSN_CODE (used) < 0)
1285 LINK_COST_FREE (link) = 1;
1286
1287 /* If some dependencies vary the cost, compute the adjustment. Most
1288 commonly, the adjustment is complete: either the cost is ignored
1289 (in the case of an output- or anti-dependence), or the cost is
1290 unchanged. These values are cached in the link as LINK_COST_FREE
1291 and LINK_COST_ZERO. */
1292
1293 if (LINK_COST_FREE (link))
1294 cost = 1;
1295#ifdef ADJUST_COST
1296 else if (! LINK_COST_ZERO (link))
1297 {
1298 int ncost = cost;
1299
1300 ADJUST_COST (used, link, insn, ncost);
1301 if (ncost <= 1)
1302 LINK_COST_FREE (link) = ncost = 1;
1303 if (cost == ncost)
1304 LINK_COST_ZERO (link) = 1;
1305 cost = ncost;
1306 }
1307#endif
1308 return cost;
bd58420c
RK
1309}
1310
1311/* Compute the priority number for INSN. */
1312
1313static int
1314priority (insn)
1315 rtx insn;
1316{
1317 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
1318 {
1319 int prev_priority;
1320 int max_priority;
1321 int this_priority = INSN_PRIORITY (insn);
1322 rtx prev;
1323
1324 if (this_priority > 0)
1325 return this_priority;
1326
1327 max_priority = 1;
1328
1329 /* Nonzero if these insns must be scheduled together. */
1330 if (SCHED_GROUP_P (insn))
1331 {
1332 prev = insn;
1333 while (SCHED_GROUP_P (prev))
1334 {
1335 prev = PREV_INSN (prev);
1336 INSN_REF_COUNT (prev) += 1;
1337 }
1338 }
1339
1340 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
1341 {
1342 rtx x = XEXP (prev, 0);
1343
1344 /* A dependence pointing to a note is always obsolete, because
1345 sched_analyze_insn will have created any necessary new dependences
1346 which replace it. Notes can be created when instructions are
1347 deleted by insn splitting, or by register allocation. */
1348 if (GET_CODE (x) == NOTE)
1349 {
1350 remove_dependence (insn, x);
1351 continue;
1352 }
1353
de19227a
TW
1354 /* Clear the link cost adjustment bits. */
1355 LINK_COST_FREE (prev) = 0;
1356#ifdef ADJUST_COST
1357 LINK_COST_ZERO (prev) = 0;
1358#endif
1359
bd58420c
RK
1360 /* This priority calculation was chosen because it results in the
1361 least instruction movement, and does not hurt the performance
1362 of the resulting code compared to the old algorithm.
1363 This makes the sched algorithm more stable, which results
1364 in better code, because there is less register pressure,
1365 cross jumping is more likely to work, and debugging is easier.
1366
1367 When all instructions have a latency of 1, there is no need to
1368 move any instructions. Subtracting one here ensures that in such
1369 cases all instructions will end up with a priority of one, and
1370 hence no scheduling will be done.
1371
1372 The original code did not subtract the one, and added the
1373 insn_cost of the current instruction to its priority (e.g.
1374 move the insn_cost call down to the end). */
1375
1376 if (REG_NOTE_KIND (prev) == 0)
1377 /* Data dependence. */
de19227a 1378 prev_priority = priority (x) + insn_cost (x, prev, insn) - 1;
bd58420c
RK
1379 else
1380 /* Anti or output dependence. Don't add the latency of this
1381 insn's result, because it isn't being used. */
1382 prev_priority = priority (x);
1383
1384 if (prev_priority > max_priority)
1385 max_priority = prev_priority;
1386 INSN_REF_COUNT (x) += 1;
1387 }
1388
de19227a 1389 prepare_unit (insn_unit (insn));
bd58420c
RK
1390 INSN_PRIORITY (insn) = max_priority;
1391 return INSN_PRIORITY (insn);
1392 }
1393 return 0;
1394}
1395\f
1396/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
1397 them to the unused_*_list variables, so that they can be reused. */
1398
1399static void
1400free_pending_lists ()
1401{
1402 register rtx link, prev_link;
1403
1404 if (pending_read_insns)
1405 {
1406 prev_link = pending_read_insns;
1407 link = XEXP (prev_link, 1);
1408
1409 while (link)
1410 {
1411 prev_link = link;
1412 link = XEXP (link, 1);
1413 }
1414
1415 XEXP (prev_link, 1) = unused_insn_list;
1416 unused_insn_list = pending_read_insns;
1417 pending_read_insns = 0;
1418 }
1419
1420 if (pending_write_insns)
1421 {
1422 prev_link = pending_write_insns;
1423 link = XEXP (prev_link, 1);
1424
1425 while (link)
1426 {
1427 prev_link = link;
1428 link = XEXP (link, 1);
1429 }
1430
1431 XEXP (prev_link, 1) = unused_insn_list;
1432 unused_insn_list = pending_write_insns;
1433 pending_write_insns = 0;
1434 }
1435
1436 if (pending_read_mems)
1437 {
1438 prev_link = pending_read_mems;
1439 link = XEXP (prev_link, 1);
1440
1441 while (link)
1442 {
1443 prev_link = link;
1444 link = XEXP (link, 1);
1445 }
1446
1447 XEXP (prev_link, 1) = unused_expr_list;
1448 unused_expr_list = pending_read_mems;
1449 pending_read_mems = 0;
1450 }
1451
1452 if (pending_write_mems)
1453 {
1454 prev_link = pending_write_mems;
1455 link = XEXP (prev_link, 1);
1456
1457 while (link)
1458 {
1459 prev_link = link;
1460 link = XEXP (link, 1);
1461 }
1462
1463 XEXP (prev_link, 1) = unused_expr_list;
1464 unused_expr_list = pending_write_mems;
1465 pending_write_mems = 0;
1466 }
1467}
1468
1469/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1470 The MEM is a memory reference contained within INSN, which we are saving
1471 so that we can do memory aliasing on it. */
1472
1473static void
1474add_insn_mem_dependence (insn_list, mem_list, insn, mem)
1475 rtx *insn_list, *mem_list, insn, mem;
1476{
1477 register rtx link;
1478
1479 if (unused_insn_list)
1480 {
1481 link = unused_insn_list;
1482 unused_insn_list = XEXP (link, 1);
1483 }
1484 else
1485 link = rtx_alloc (INSN_LIST);
1486 XEXP (link, 0) = insn;
1487 XEXP (link, 1) = *insn_list;
1488 *insn_list = link;
1489
1490 if (unused_expr_list)
1491 {
1492 link = unused_expr_list;
1493 unused_expr_list = XEXP (link, 1);
1494 }
1495 else
1496 link = rtx_alloc (EXPR_LIST);
1497 XEXP (link, 0) = mem;
1498 XEXP (link, 1) = *mem_list;
1499 *mem_list = link;
1500
1501 pending_lists_length++;
1502}
1503\f
1504/* Make a dependency between every memory reference on the pending lists
1505 and INSN, thus flushing the pending lists. */
1506
1507static void
1508flush_pending_lists (insn)
1509 rtx insn;
1510{
1511 rtx link;
1512
1513 while (pending_read_insns)
1514 {
1515 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1516
1517 link = pending_read_insns;
1518 pending_read_insns = XEXP (pending_read_insns, 1);
1519 XEXP (link, 1) = unused_insn_list;
1520 unused_insn_list = link;
1521
1522 link = pending_read_mems;
1523 pending_read_mems = XEXP (pending_read_mems, 1);
1524 XEXP (link, 1) = unused_expr_list;
1525 unused_expr_list = link;
1526 }
1527 while (pending_write_insns)
1528 {
1529 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1530
1531 link = pending_write_insns;
1532 pending_write_insns = XEXP (pending_write_insns, 1);
1533 XEXP (link, 1) = unused_insn_list;
1534 unused_insn_list = link;
1535
1536 link = pending_write_mems;
1537 pending_write_mems = XEXP (pending_write_mems, 1);
1538 XEXP (link, 1) = unused_expr_list;
1539 unused_expr_list = link;
1540 }
1541 pending_lists_length = 0;
1542
1543 if (last_pending_memory_flush)
1544 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1545
1546 last_pending_memory_flush = insn;
1547}
1548
1549/* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1550 by the write to the destination of X, and reads of everything mentioned. */
1551
1552static void
1553sched_analyze_1 (x, insn)
1554 rtx x;
1555 rtx insn;
1556{
1557 register int regno;
1558 register rtx dest = SET_DEST (x);
1559
1560 if (dest == 0)
1561 return;
1562
1563 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1564 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1565 {
1566 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1567 {
1568 /* The second and third arguments are values read by this insn. */
1569 sched_analyze_2 (XEXP (dest, 1), insn);
1570 sched_analyze_2 (XEXP (dest, 2), insn);
1571 }
1572 dest = SUBREG_REG (dest);
1573 }
1574
1575 if (GET_CODE (dest) == REG)
1576 {
1577 register int offset, bit, i;
1578
1579 regno = REGNO (dest);
1580
1581 /* A hard reg in a wide mode may really be multiple registers.
1582 If so, mark all of them just like the first. */
1583 if (regno < FIRST_PSEUDO_REGISTER)
1584 {
1585 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1586 while (--i >= 0)
1587 {
1588 rtx u;
1589
1590 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1591 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1592 reg_last_uses[regno + i] = 0;
1593 if (reg_last_sets[regno + i])
1594 add_dependence (insn, reg_last_sets[regno + i],
1595 REG_DEP_OUTPUT);
1596 reg_last_sets[regno + i] = insn;
1597 if ((call_used_regs[i] || global_regs[i])
1598 && last_function_call)
1599 /* Function calls clobber all call_used regs. */
1600 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1601 }
1602 }
1603 else
1604 {
1605 rtx u;
1606
1607 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1608 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1609 reg_last_uses[regno] = 0;
1610 if (reg_last_sets[regno])
1611 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1612 reg_last_sets[regno] = insn;
1613
1614 /* Don't let it cross a call after scheduling if it doesn't
1615 already cross one. */
1616 if (reg_n_calls_crossed[regno] == 0 && last_function_call)
1617 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1618 }
1619 }
1620 else if (GET_CODE (dest) == MEM)
1621 {
1622 /* Writing memory. */
1623
1624 if (pending_lists_length > 32)
1625 {
1626 /* Flush all pending reads and writes to prevent the pending lists
1627 from getting any larger. Insn scheduling runs too slowly when
1628 these lists get long. The number 32 was chosen because it
6dc42e49 1629 seems like a reasonable number. When compiling GCC with itself,
bd58420c
RK
1630 this flush occurs 8 times for sparc, and 10 times for m88k using
1631 the number 32. */
1632 flush_pending_lists (insn);
1633 }
1634 else
1635 {
1636 rtx pending, pending_mem;
1637
1638 pending = pending_read_insns;
1639 pending_mem = pending_read_mems;
1640 while (pending)
1641 {
1642 /* If a dependency already exists, don't create a new one. */
1643 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1644 if (anti_dependence (XEXP (pending_mem, 0), dest, insn))
1645 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1646
1647 pending = XEXP (pending, 1);
1648 pending_mem = XEXP (pending_mem, 1);
1649 }
1650
1651 pending = pending_write_insns;
1652 pending_mem = pending_write_mems;
1653 while (pending)
1654 {
1655 /* If a dependency already exists, don't create a new one. */
1656 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1657 if (output_dependence (XEXP (pending_mem, 0), dest))
1658 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1659
1660 pending = XEXP (pending, 1);
1661 pending_mem = XEXP (pending_mem, 1);
1662 }
1663
1664 if (last_pending_memory_flush)
1665 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1666
1667 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1668 insn, dest);
1669 }
1670 sched_analyze_2 (XEXP (dest, 0), insn);
1671 }
1672
1673 /* Analyze reads. */
1674 if (GET_CODE (x) == SET)
1675 sched_analyze_2 (SET_SRC (x), insn);
bd58420c
RK
1676}
1677
1678/* Analyze the uses of memory and registers in rtx X in INSN. */
1679
1680static void
1681sched_analyze_2 (x, insn)
1682 rtx x;
1683 rtx insn;
1684{
1685 register int i;
1686 register int j;
1687 register enum rtx_code code;
1688 register char *fmt;
1689
1690 if (x == 0)
1691 return;
1692
1693 code = GET_CODE (x);
1694
b85b8af2 1695 switch (code)
bd58420c 1696 {
b85b8af2
JW
1697 case CONST_INT:
1698 case CONST_DOUBLE:
1699 case SYMBOL_REF:
1700 case CONST:
1701 case LABEL_REF:
1702 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1703 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1704 this does not mean that this insn is using cc0. */
bd58420c 1705 return;
bd58420c 1706
b85b8af2
JW
1707#ifdef HAVE_cc0
1708 case CC0:
1709 {
a3ee5899 1710 rtx link, prev;
bd58420c 1711
a3ee5899 1712 /* There may be a note before this insn now, but all notes will
ea3fa5f7 1713 be removed before we actually try to schedule the insns, so
a3ee5899
JW
1714 it won't cause a problem later. We must avoid it here though. */
1715
d21d2fb8 1716 /* User of CC0 depends on immediately preceding insn. */
b85b8af2 1717 SCHED_GROUP_P (insn) = 1;
bd58420c 1718
a3ee5899
JW
1719 /* Make a copy of all dependencies on the immediately previous insn,
1720 and add to this insn. This is so that all the dependencies will
1721 apply to the group. */
1722
1723 prev = PREV_INSN (insn);
1724 while (GET_CODE (prev) == NOTE)
1725 prev = PREV_INSN (prev);
bd58420c 1726
a3ee5899 1727 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
b85b8af2 1728 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
bd58420c 1729
b85b8af2
JW
1730 return;
1731 }
1732#endif
bd58420c 1733
b85b8af2
JW
1734 case REG:
1735 {
1736 int regno = REGNO (x);
1737 if (regno < FIRST_PSEUDO_REGISTER)
1738 {
1739 int i;
1740
1741 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1742 while (--i >= 0)
1743 {
1744 reg_last_uses[regno + i]
1745 = gen_rtx (INSN_LIST, VOIDmode,
1746 insn, reg_last_uses[regno + i]);
1747 if (reg_last_sets[regno + i])
1748 add_dependence (insn, reg_last_sets[regno + i], 0);
1749 if ((call_used_regs[regno + i] || global_regs[regno + i])
1750 && last_function_call)
1751 /* Function calls clobber all call_used regs. */
1752 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1753 }
1754 }
1755 else
1756 {
1757 reg_last_uses[regno]
1758 = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
1759 if (reg_last_sets[regno])
1760 add_dependence (insn, reg_last_sets[regno], 0);
1761
1762 /* If the register does not already cross any calls, then add this
1763 insn to the sched_before_next_call list so that it will still
1764 not cross calls after scheduling. */
1765 if (reg_n_calls_crossed[regno] == 0)
1766 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1767 }
1768 return;
1769 }
bd58420c 1770
b85b8af2
JW
1771 case MEM:
1772 {
1773 /* Reading memory. */
1774
1f6fc4de
JW
1775 rtx pending, pending_mem;
1776
1777 pending = pending_read_insns;
1778 pending_mem = pending_read_mems;
1779 while (pending)
b85b8af2 1780 {
1f6fc4de
JW
1781 /* If a dependency already exists, don't create a new one. */
1782 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1783 if (read_dependence (XEXP (pending_mem, 0), x))
1784 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
b85b8af2 1785
1f6fc4de
JW
1786 pending = XEXP (pending, 1);
1787 pending_mem = XEXP (pending_mem, 1);
1788 }
b85b8af2 1789
1f6fc4de
JW
1790 pending = pending_write_insns;
1791 pending_mem = pending_write_mems;
1792 while (pending)
1793 {
1794 /* If a dependency already exists, don't create a new one. */
1795 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1796 if (true_dependence (XEXP (pending_mem, 0), x))
1797 add_dependence (insn, XEXP (pending, 0), 0);
b85b8af2 1798
1f6fc4de
JW
1799 pending = XEXP (pending, 1);
1800 pending_mem = XEXP (pending_mem, 1);
1801 }
1802 if (last_pending_memory_flush)
1803 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
b85b8af2 1804
1f6fc4de
JW
1805 /* Always add these dependencies to pending_reads, since
1806 this insn may be followed by a write. */
1807 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1808 insn, x);
b85b8af2 1809
b85b8af2
JW
1810 /* Take advantage of tail recursion here. */
1811 sched_analyze_2 (XEXP (x, 0), insn);
1812 return;
1813 }
bd58420c 1814
b85b8af2
JW
1815 case ASM_OPERANDS:
1816 case ASM_INPUT:
1817 case UNSPEC_VOLATILE:
c3de4fe6 1818 case TRAP_IF:
b85b8af2
JW
1819 {
1820 rtx u;
1821
1822 /* Traditional and volatile asm instructions must be considered to use
1823 and clobber all hard registers and all of memory. So must
c3de4fe6
JW
1824 TRAP_IF and UNSPEC_VOLATILE operations. */
1825 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
b85b8af2
JW
1826 {
1827 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1828 {
1829 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1830 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1831 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1832 reg_last_uses[i] = 0;
1833 if (reg_last_sets[i]
1834 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1835 add_dependence (insn, reg_last_sets[i], 0);
1836 reg_last_sets[i] = insn;
1837 }
1838
1839 flush_pending_lists (insn);
1840 }
1841
1842 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1843 We can not just fall through here since then we would be confused
1844 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1845 traditional asms unlike their normal usage. */
1846
1847 if (code == ASM_OPERANDS)
1848 {
1849 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1850 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1851 return;
1852 }
1853 break;
1854 }
bd58420c 1855
b85b8af2
JW
1856 case PRE_DEC:
1857 case POST_DEC:
1858 case PRE_INC:
1859 case POST_INC:
1860 /* These read and modify the result; just consider them writes. */
1861 sched_analyze_1 (x, insn);
bd58420c
RK
1862 return;
1863 }
1864
bd58420c
RK
1865 /* Other cases: walk the insn. */
1866 fmt = GET_RTX_FORMAT (code);
1867 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1868 {
1869 if (fmt[i] == 'e')
1870 sched_analyze_2 (XEXP (x, i), insn);
1871 else if (fmt[i] == 'E')
1872 for (j = 0; j < XVECLEN (x, i); j++)
1873 sched_analyze_2 (XVECEXP (x, i, j), insn);
1874 }
1875}
1876
1877/* Analyze an INSN with pattern X to find all dependencies. */
1878
1879static void
1880sched_analyze_insn (x, insn)
1881 rtx x, insn;
1882{
1883 register RTX_CODE code = GET_CODE (x);
1884 rtx link;
1885
1886 if (code == SET || code == CLOBBER)
1887 sched_analyze_1 (x, insn);
1888 else if (code == PARALLEL)
1889 {
1890 register int i;
1891 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1892 {
1893 code = GET_CODE (XVECEXP (x, 0, i));
1894 if (code == SET || code == CLOBBER)
1895 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1896 else
1897 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1898 }
1899 }
1900 else
1901 sched_analyze_2 (x, insn);
1902
bd58420c
RK
1903 /* Handle function calls. */
1904 if (GET_CODE (insn) == CALL_INSN)
1905 {
1906 rtx dep_insn;
1907 rtx prev_dep_insn;
1908
1909 /* When scheduling instructions, we make sure calls don't lose their
1910 accompanying USE insns by depending them one on another in order. */
1911
1912 prev_dep_insn = insn;
1913 dep_insn = PREV_INSN (insn);
1914 while (GET_CODE (dep_insn) == INSN
1915 && GET_CODE (PATTERN (dep_insn)) == USE)
1916 {
1917 SCHED_GROUP_P (prev_dep_insn) = 1;
1918
1919 /* Make a copy of all dependencies on dep_insn, and add to insn.
1920 This is so that all of the dependencies will apply to the
1921 group. */
1922
1923 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1924 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
1925
1926 prev_dep_insn = dep_insn;
1927 dep_insn = PREV_INSN (dep_insn);
1928 }
1929 }
1930}
1931
1932/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1933 for every dependency. */
1934
1935static int
1936sched_analyze (head, tail)
1937 rtx head, tail;
1938{
1939 register rtx insn;
1940 register int n_insns = 0;
1941 register rtx u;
1942 register int luid = 0;
1943
1944 for (insn = head; ; insn = NEXT_INSN (insn))
1945 {
1946 INSN_LUID (insn) = luid++;
1947
1948 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1949 {
1950 sched_analyze_insn (PATTERN (insn), insn);
1951 n_insns += 1;
1952 }
1953 else if (GET_CODE (insn) == CALL_INSN)
1954 {
1955 rtx dest = 0;
1956 rtx x;
1957 register int i;
1958
1959 /* Any instruction using a hard register which may get clobbered
1960 by a call needs to be marked as dependent on this call.
1961 This prevents a use of a hard return reg from being moved
1962 past a void call (i.e. it does not explicitly set the hard
1963 return reg). */
1964
1965 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1966 if (call_used_regs[i] || global_regs[i])
1967 {
1968 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1969 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1970 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1971 reg_last_uses[i] = 0;
1972 if (reg_last_sets[i]
1973 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1974 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1975 reg_last_sets[i] = insn;
1976 /* Insn, being a CALL_INSN, magically depends on
1977 `last_function_call' already. */
1978 }
1979
1980 /* For each insn which shouldn't cross a call, add a dependence
1981 between that insn and this call insn. */
1982 x = LOG_LINKS (sched_before_next_call);
1983 while (x)
1984 {
1985 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1986 x = XEXP (x, 1);
1987 }
1988 LOG_LINKS (sched_before_next_call) = 0;
1989
1990 sched_analyze_insn (PATTERN (insn), insn);
1991
1992 /* We don't need to flush memory for a function call which does
1993 not involve memory. */
1994 if (! CONST_CALL_P (insn))
1995 {
1996 /* In the absence of interprocedural alias analysis,
1997 we must flush all pending reads and writes, and
1998 start new dependencies starting from here. */
1999 flush_pending_lists (insn);
2000 }
2001
2002 /* Depend this function call (actually, the user of this
2003 function call) on all hard register clobberage. */
2004 last_function_call = insn;
2005 n_insns += 1;
2006 }
2007
2008 if (insn == tail)
2009 return n_insns;
2010 }
2011}
2012\f
2013/* Called when we see a set of a register. If death is true, then we are
2014 scanning backwards. Mark that register as unborn. If nobody says
2015 otherwise, that is how things will remain. If death is false, then we
2016 are scanning forwards. Mark that register as being born. */
2017
2018static void
2019sched_note_set (b, x, death)
2020 int b;
2021 rtx x;
2022 int death;
2023{
2024 register int regno, j;
2025 register rtx reg = SET_DEST (x);
2026 int subreg_p = 0;
2027
2028 if (reg == 0)
2029 return;
2030
2031 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
2032 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
2033 {
2034 /* Must treat modification of just one hardware register of a multi-reg
2035 value or just a byte field of a register exactly the same way that
2036 mark_set_1 in flow.c does. */
2037 if (GET_CODE (reg) == ZERO_EXTRACT
2038 || GET_CODE (reg) == SIGN_EXTRACT
2039 || (GET_CODE (reg) == SUBREG
2040 && REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg)))
2041 subreg_p = 1;
2042
2043 reg = SUBREG_REG (reg);
2044 }
2045
2046 if (GET_CODE (reg) != REG)
2047 return;
2048
2049 /* Global registers are always live, so the code below does not apply
2050 to them. */
2051
2052 regno = REGNO (reg);
2053 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2054 {
2055 register int offset = regno / REGSET_ELT_BITS;
fb3821f7
CH
2056 register REGSET_ELT_TYPE bit
2057 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
bd58420c
RK
2058
2059 if (death)
2060 {
2061 /* If we only set part of the register, then this set does not
2062 kill it. */
2063 if (subreg_p)
2064 return;
2065
2066 /* Try killing this register. */
2067 if (regno < FIRST_PSEUDO_REGISTER)
2068 {
2069 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2070 while (--j >= 0)
2071 {
2072 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7 2073 bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
bd58420c
RK
2074
2075 bb_live_regs[offset] &= ~bit;
2076 bb_dead_regs[offset] |= bit;
2077 }
2078 }
2079 else
2080 {
2081 bb_live_regs[offset] &= ~bit;
2082 bb_dead_regs[offset] |= bit;
2083 }
2084 }
2085 else
2086 {
2087 /* Make the register live again. */
2088 if (regno < FIRST_PSEUDO_REGISTER)
2089 {
2090 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2091 while (--j >= 0)
2092 {
2093 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7 2094 bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
bd58420c
RK
2095
2096 bb_live_regs[offset] |= bit;
2097 bb_dead_regs[offset] &= ~bit;
2098 }
2099 }
2100 else
2101 {
2102 bb_live_regs[offset] |= bit;
2103 bb_dead_regs[offset] &= ~bit;
2104 }
2105 }
2106 }
2107}
2108\f
2109/* Macros and functions for keeping the priority queue sorted, and
2110 dealing with queueing and unqueueing of instructions. */
2111
2112#define SCHED_SORT(READY, NEW_READY, OLD_READY) \
2113 do { if ((NEW_READY) - (OLD_READY) == 1) \
2114 swap_sort (READY, NEW_READY); \
2115 else if ((NEW_READY) - (OLD_READY) > 1) \
2116 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
2117 while (0)
2118
2119/* Returns a positive value if y is preferred; returns a negative value if
2120 x is preferred. Should never return 0, since that will make the sort
2121 unstable. */
2122
2123static int
2124rank_for_schedule (x, y)
2125 rtx *x, *y;
2126{
2127 rtx tmp = *y;
2128 rtx tmp2 = *x;
de19227a 2129 rtx link;
bd58420c
RK
2130 int tmp_class, tmp2_class;
2131 int value;
2132
2133 /* Choose the instruction with the highest priority, if different. */
2134 if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2))
2135 return value;
2136
2137 if (last_scheduled_insn)
2138 {
2139 /* Classify the instructions into three classes:
2140 1) Data dependent on last schedule insn.
2141 2) Anti/Output dependent on last scheduled insn.
2142 3) Independent of last scheduled insn, or has latency of one.
2143 Choose the insn from the highest numbered class if different. */
de19227a
TW
2144 link = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
2145 if (link == 0 || insn_cost (tmp, link, last_scheduled_insn) == 1)
bd58420c 2146 tmp_class = 3;
de19227a 2147 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
bd58420c
RK
2148 tmp_class = 1;
2149 else
2150 tmp_class = 2;
2151
de19227a
TW
2152 link = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
2153 if (link == 0 || insn_cost (tmp2, link, last_scheduled_insn) == 1)
bd58420c 2154 tmp2_class = 3;
de19227a 2155 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
bd58420c
RK
2156 tmp2_class = 1;
2157 else
2158 tmp2_class = 2;
2159
2160 if (value = tmp_class - tmp2_class)
2161 return value;
2162 }
2163
2164 /* If insns are equally good, sort by INSN_LUID (original insn order),
2165 so that we make the sort stable. This minimizes instruction movement,
2166 thus minimizing sched's effect on debugging and cross-jumping. */
2167 return INSN_LUID (tmp) - INSN_LUID (tmp2);
2168}
2169
2170/* Resort the array A in which only element at index N may be out of order. */
2171
2172__inline static void
2173swap_sort (a, n)
2174 rtx *a;
2175 int n;
2176{
2177 rtx insn = a[n-1];
2178 int i = n-2;
2179
2180 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
2181 {
2182 a[i+1] = a[i];
2183 i -= 1;
2184 }
2185 a[i+1] = insn;
2186}
2187
2188static int max_priority;
2189
2190/* Add INSN to the insn queue so that it fires at least N_CYCLES
2191 before the currently executing insn. */
2192
2193__inline static void
2194queue_insn (insn, n_cycles)
2195 rtx insn;
2196 int n_cycles;
2197{
2198 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2199 NEXT_INSN (insn) = insn_queue[next_q];
2200 insn_queue[next_q] = insn;
2201 q_size += 1;
2202}
2203
2204/* Return nonzero if PAT is the pattern of an insn which makes a
2205 register live. */
2206
2207__inline static int
2208birthing_insn_p (pat)
2209 rtx pat;
2210{
2211 int j;
2212
2213 if (reload_completed == 1)
2214 return 0;
2215
2216 if (GET_CODE (pat) == SET
2217 && GET_CODE (SET_DEST (pat)) == REG)
2218 {
2219 rtx dest = SET_DEST (pat);
2220 int i = REGNO (dest);
2221 int offset = i / REGSET_ELT_BITS;
fb3821f7 2222 REGSET_ELT_TYPE bit = (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
bd58420c
RK
2223
2224 /* It would be more accurate to use refers_to_regno_p or
2225 reg_mentioned_p to determine when the dest is not live before this
2226 insn. */
2227
2228 if (bb_live_regs[offset] & bit)
2229 return (reg_n_sets[i] == 1);
2230
2231 return 0;
2232 }
2233 if (GET_CODE (pat) == PARALLEL)
2234 {
2235 for (j = 0; j < XVECLEN (pat, 0); j++)
2236 if (birthing_insn_p (XVECEXP (pat, 0, j)))
2237 return 1;
2238 }
2239 return 0;
2240}
2241
de19227a
TW
2242/* PREV is an insn that is ready to execute. Adjust its priority if that
2243 will help shorten register lifetimes. */
bd58420c 2244
de19227a
TW
2245__inline static void
2246adjust_priority (prev)
bd58420c
RK
2247 rtx prev;
2248{
bd58420c
RK
2249 /* Trying to shorten register lives after reload has completed
2250 is useless and wrong. It gives inaccurate schedules. */
2251 if (reload_completed == 0)
2252 {
de19227a
TW
2253 rtx note;
2254 int n_deaths = 0;
2255
bd58420c
RK
2256 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
2257 if (REG_NOTE_KIND (note) == REG_DEAD)
2258 n_deaths += 1;
2259
2260 /* Defer scheduling insns which kill registers, since that
2261 shortens register lives. Prefer scheduling insns which
2262 make registers live for the same reason. */
2263 switch (n_deaths)
2264 {
2265 default:
2266 INSN_PRIORITY (prev) >>= 3;
2267 break;
2268 case 3:
2269 INSN_PRIORITY (prev) >>= 2;
2270 break;
2271 case 2:
2272 case 1:
2273 INSN_PRIORITY (prev) >>= 1;
2274 break;
2275 case 0:
de19227a 2276 if (birthing_insn_p (PATTERN (prev)))
bd58420c
RK
2277 {
2278 int max = max_priority;
2279
2280 if (max > INSN_PRIORITY (prev))
2281 INSN_PRIORITY (prev) = max;
2282 }
2283 break;
2284 }
2285 }
bd58420c
RK
2286}
2287
2288/* INSN is the "currently executing insn". Launch each insn which was
2289 waiting on INSN (in the backwards dataflow sense). READY is a
2290 vector of insns which are ready to fire. N_READY is the number of
de19227a 2291 elements in READY. CLOCK is the current virtual cycle. */
bd58420c
RK
2292
2293static int
de19227a 2294schedule_insn (insn, ready, n_ready, clock)
bd58420c
RK
2295 rtx insn;
2296 rtx *ready;
2297 int n_ready;
de19227a 2298 int clock;
bd58420c
RK
2299{
2300 rtx link;
2301 int new_ready = n_ready;
2302
de19227a
TW
2303 if (MAX_BLOCKAGE > 1)
2304 schedule_unit (insn_unit (insn), insn, clock);
2305
bd58420c
RK
2306 if (LOG_LINKS (insn) == 0)
2307 return n_ready;
2308
de19227a 2309 /* This is used by the function adjust_priority above. */
bd58420c
RK
2310 if (n_ready > 0)
2311 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
2312 else
2313 max_priority = INSN_PRIORITY (insn);
2314
2315 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
2316 {
2317 rtx prev = XEXP (link, 0);
de19227a 2318 int cost = insn_cost (prev, link, insn);
bd58420c 2319
de19227a
TW
2320 if ((INSN_REF_COUNT (prev) -= 1) != 0)
2321 {
2322 /* We satisfied one requirement to fire PREV. Record the earliest
2323 time when PREV can fire. No need to do this if the cost is 1,
2324 because PREV can fire no sooner than the next cycle. */
2325 if (cost > 1)
2326 INSN_TICK (prev) = MAX (INSN_TICK (prev), clock + cost);
2327 }
2328 else
2329 {
2330 /* We satisfied the last requirement to fire PREV. Ensure that all
2331 timing requirements are satisfied. */
2332 if (INSN_TICK (prev) - clock > cost)
2333 cost = INSN_TICK (prev) - clock;
2334
2335 /* Adjust the priority of PREV and either put it on the ready
2336 list or queue it. */
2337 adjust_priority (prev);
2338 if (cost <= 1)
2339 ready[new_ready++] = prev;
2340 else
2341 queue_insn (prev, cost);
2342 }
bd58420c
RK
2343 }
2344
2345 return new_ready;
2346}
2347
de19227a
TW
2348/* Given N_READY insns in the ready list READY at time CLOCK, queue
2349 those that are blocked due to function unit hazards and rearrange
2350 the remaining ones to minimize subsequent function unit hazards. */
2351
2352static int
2353schedule_select (ready, n_ready, clock, file)
2354 rtx *ready;
2355 int n_ready, clock;
2356 FILE *file;
2357{
2358 int pri = INSN_PRIORITY (ready[0]);
2359 int i, j, k, q, cost, best_cost, best_insn = 0, new_ready = n_ready;
2360 rtx insn;
2361
2362 /* Work down the ready list in groups of instructions with the same
2363 priority value. Queue insns in the group that are blocked and
2364 select among those that remain for the one with the largest
2365 potential hazard. */
2366 for (i = 0; i < n_ready; i = j)
2367 {
2368 int opri = pri;
2369 for (j = i + 1; j < n_ready; j++)
2370 if ((pri = INSN_PRIORITY (ready[j])) != opri)
2371 break;
2372
2373 /* Queue insns in the group that are blocked. */
2374 for (k = i, q = 0; k < j; k++)
2375 {
2376 insn = ready[k];
2377 if ((cost = actual_hazard (insn_unit (insn), insn, clock, 0)) != 0)
2378 {
2379 q++;
2380 ready[k] = 0;
2381 queue_insn (insn, cost);
2382 if (file)
2383 fprintf (file, "\n;; blocking insn %d for %d cycles",
2384 INSN_UID (insn), cost);
2385 }
2386 }
2387 new_ready -= q;
2388
2389 /* Check the next group if all insns were queued. */
2390 if (j - i - q == 0)
2391 continue;
2392
2393 /* If more than one remains, select the first one with the largest
2394 potential hazard. */
2395 else if (j - i - q > 1)
2396 {
2397 best_cost = -1;
2398 for (k = i; k < j; k++)
2399 {
2400 if ((insn = ready[k]) == 0)
2401 continue;
2402 if ((cost = potential_hazard (insn_unit (insn), insn, 0))
2403 > best_cost)
2404 {
2405 best_cost = cost;
2406 best_insn = k;
2407 }
2408 }
2409 }
2410 /* We have found a suitable insn to schedule. */
2411 break;
2412 }
2413
2414 /* Move the best insn to be front of the ready list. */
2415 if (best_insn != 0)
2416 {
2417 if (file)
2418 {
2419 fprintf (file, ", now");
2420 for (i = 0; i < n_ready; i++)
2421 if (ready[i])
2422 fprintf (file, " %d", INSN_UID (ready[i]));
2423 fprintf (file, "\n;; insn %d has a greater potential hazard",
2424 INSN_UID (ready[best_insn]));
2425 }
2426 for (i = best_insn; i > 0; i--)
2427 {
2428 insn = ready[i-1];
2429 ready[i-1] = ready[i];
2430 ready[i] = insn;
2431 }
2432 }
2433
2434 /* Compact the ready list. */
2435 if (new_ready < n_ready)
2436 for (i = j = 0; i < n_ready; i++)
2437 if (ready[i])
2438 ready[j++] = ready[i];
2439
2440 return new_ready;
2441}
2442
bd58420c
RK
2443/* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
2444 dead_notes list. */
2445
2446static void
2447create_reg_dead_note (reg, insn)
2448 rtx reg, insn;
2449{
2450 rtx link = dead_notes;
2451
2452 if (link == 0)
2453 /* In theory, we should not end up with more REG_DEAD reg notes than we
2454 started with. In practice, this can occur as the result of bugs in
2455 flow, combine and/or sched. */
2456 {
2457#if 1
2458 abort ();
2459#else
2460 link = rtx_alloc (EXPR_LIST);
2461 PUT_REG_NOTE_KIND (link, REG_DEAD);
2462#endif
2463 }
2464 else
2465 dead_notes = XEXP (dead_notes, 1);
2466
2467 XEXP (link, 0) = reg;
2468 XEXP (link, 1) = REG_NOTES (insn);
2469 REG_NOTES (insn) = link;
2470}
2471
2472/* Subroutine on attach_deaths_insn--handles the recursive search
2473 through INSN. If SET_P is true, then x is being modified by the insn. */
2474
2475static void
2476attach_deaths (x, insn, set_p)
2477 rtx x;
2478 rtx insn;
2479 int set_p;
2480{
2481 register int i;
2482 register int j;
2483 register enum rtx_code code;
2484 register char *fmt;
2485
2486 if (x == 0)
2487 return;
2488
2489 code = GET_CODE (x);
2490
2491 switch (code)
2492 {
2493 case CONST_INT:
2494 case CONST_DOUBLE:
2495 case LABEL_REF:
2496 case SYMBOL_REF:
2497 case CONST:
2498 case CODE_LABEL:
2499 case PC:
2500 case CC0:
2501 /* Get rid of the easy cases first. */
2502 return;
2503
2504 case REG:
2505 {
2506 /* If the register dies in this insn, queue that note, and mark
2507 this register as needing to die. */
2508 /* This code is very similar to mark_used_1 (if set_p is false)
2509 and mark_set_1 (if set_p is true) in flow.c. */
2510
2511 register int regno = REGNO (x);
2512 register int offset = regno / REGSET_ELT_BITS;
fb3821f7
CH
2513 register REGSET_ELT_TYPE bit
2514 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
2515 REGSET_ELT_TYPE all_needed = (old_live_regs[offset] & bit);
2516 REGSET_ELT_TYPE some_needed = (old_live_regs[offset] & bit);
bd58420c
RK
2517
2518 if (set_p)
2519 return;
2520
2521 if (regno < FIRST_PSEUDO_REGISTER)
2522 {
2523 int n;
2524
2525 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2526 while (--n > 0)
2527 {
2528 some_needed |= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
fb3821f7
CH
2529 & ((REGSET_ELT_TYPE) 1
2530 << ((regno + n) % REGSET_ELT_BITS)));
bd58420c 2531 all_needed &= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
fb3821f7
CH
2532 & ((REGSET_ELT_TYPE) 1
2533 << ((regno + n) % REGSET_ELT_BITS)));
bd58420c
RK
2534 }
2535 }
2536
2537 /* If it wasn't live before we started, then add a REG_DEAD note.
2538 We must check the previous lifetime info not the current info,
2539 because we may have to execute this code several times, e.g.
2540 once for a clobber (which doesn't add a note) and later
2541 for a use (which does add a note).
2542
2543 Always make the register live. We must do this even if it was
2544 live before, because this may be an insn which sets and uses
2545 the same register, in which case the register has already been
2546 killed, so we must make it live again.
2547
2548 Global registers are always live, and should never have a REG_DEAD
2549 note added for them, so none of the code below applies to them. */
2550
2551 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2552 {
2553 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2554 STACK_POINTER_REGNUM, since these are always considered to be
2555 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2556 if (regno != FRAME_POINTER_REGNUM
2557#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2558 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
bd58420c
RK
2559#endif
2560 && regno != STACK_POINTER_REGNUM)
2561 {
2562 if (! all_needed && ! dead_or_set_p (insn, x))
2563 {
2564 /* If none of the words in X is needed, make a REG_DEAD
2565 note. Otherwise, we must make partial REG_DEAD
2566 notes. */
2567 if (! some_needed)
2568 create_reg_dead_note (x, insn);
2569 else
2570 {
2571 int i;
2572
2573 /* Don't make a REG_DEAD note for a part of a
2574 register that is set in the insn. */
2575 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2576 i >= 0; i--)
2577 if ((old_live_regs[(regno + i) / REGSET_ELT_BITS]
fb3821f7
CH
2578 & ((REGSET_ELT_TYPE) 1
2579 << ((regno +i) % REGSET_ELT_BITS))) == 0
bd58420c
RK
2580 && ! dead_or_set_regno_p (insn, regno + i))
2581 create_reg_dead_note (gen_rtx (REG, word_mode,
2582 regno + i),
2583 insn);
2584 }
2585 }
2586 }
2587
2588 if (regno < FIRST_PSEUDO_REGISTER)
2589 {
2590 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2591 while (--j >= 0)
2592 {
2593 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7
CH
2594 bit
2595 = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
bd58420c
RK
2596
2597 bb_dead_regs[offset] &= ~bit;
2598 bb_live_regs[offset] |= bit;
2599 }
2600 }
2601 else
2602 {
2603 bb_dead_regs[offset] &= ~bit;
2604 bb_live_regs[offset] |= bit;
2605 }
2606 }
2607 return;
2608 }
2609
2610 case MEM:
2611 /* Handle tail-recursive case. */
2612 attach_deaths (XEXP (x, 0), insn, 0);
2613 return;
2614
2615 case SUBREG:
2616 case STRICT_LOW_PART:
2617 /* These two cases preserve the value of SET_P, so handle them
2618 separately. */
2619 attach_deaths (XEXP (x, 0), insn, set_p);
2620 return;
2621
2622 case ZERO_EXTRACT:
2623 case SIGN_EXTRACT:
2624 /* This case preserves the value of SET_P for the first operand, but
2625 clears it for the other two. */
2626 attach_deaths (XEXP (x, 0), insn, set_p);
2627 attach_deaths (XEXP (x, 1), insn, 0);
2628 attach_deaths (XEXP (x, 2), insn, 0);
2629 return;
2630
2631 default:
2632 /* Other cases: walk the insn. */
2633 fmt = GET_RTX_FORMAT (code);
2634 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2635 {
2636 if (fmt[i] == 'e')
2637 attach_deaths (XEXP (x, i), insn, 0);
2638 else if (fmt[i] == 'E')
2639 for (j = 0; j < XVECLEN (x, i); j++)
2640 attach_deaths (XVECEXP (x, i, j), insn, 0);
2641 }
2642 }
2643}
2644
2645/* After INSN has executed, add register death notes for each register
2646 that is dead after INSN. */
2647
2648static void
2649attach_deaths_insn (insn)
2650 rtx insn;
2651{
2652 rtx x = PATTERN (insn);
2653 register RTX_CODE code = GET_CODE (x);
2654
2655 if (code == SET)
2656 {
2657 attach_deaths (SET_SRC (x), insn, 0);
2658
2659 /* A register might die here even if it is the destination, e.g.
2660 it is the target of a volatile read and is otherwise unused.
2661 Hence we must always call attach_deaths for the SET_DEST. */
2662 attach_deaths (SET_DEST (x), insn, 1);
2663 }
2664 else if (code == PARALLEL)
2665 {
2666 register int i;
2667 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2668 {
2669 code = GET_CODE (XVECEXP (x, 0, i));
2670 if (code == SET)
2671 {
2672 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2673
2674 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2675 }
5856c581
JW
2676 /* Flow does not add REG_DEAD notes to registers that die in
2677 clobbers, so we can't either. */
2678 else if (code != CLOBBER)
bd58420c
RK
2679 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2680 }
2681 }
5856c581
JW
2682 /* Flow does not add REG_DEAD notes to registers that die in
2683 clobbers, so we can't either. */
2684 else if (code != CLOBBER)
bd58420c
RK
2685 attach_deaths (x, insn, 0);
2686}
2687
2688/* Delete notes beginning with INSN and maybe put them in the chain
2689 of notes ended by NOTE_LIST.
2690 Returns the insn following the notes. */
2691
2692static rtx
2693unlink_notes (insn, tail)
2694 rtx insn, tail;
2695{
2696 rtx prev = PREV_INSN (insn);
2697
2698 while (insn != tail && GET_CODE (insn) == NOTE)
2699 {
2700 rtx next = NEXT_INSN (insn);
2701 /* Delete the note from its current position. */
2702 if (prev)
2703 NEXT_INSN (prev) = next;
2704 if (next)
2705 PREV_INSN (next) = prev;
2706
2707 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2708 /* Record line-number notes so they can be reused. */
2709 LINE_NOTE (insn) = insn;
2710 else
2711 {
2712 /* Insert the note at the end of the notes list. */
2713 PREV_INSN (insn) = note_list;
2714 if (note_list)
2715 NEXT_INSN (note_list) = insn;
2716 note_list = insn;
2717 }
2718
2719 insn = next;
2720 }
2721 return insn;
2722}
2723
2724/* Data structure for keeping track of register information
2725 during that register's life. */
2726
2727struct sometimes
2728{
2729 short offset; short bit;
2730 short live_length; short calls_crossed;
2731};
2732
2733/* Constructor for `sometimes' data structure. */
2734
2735static int
2736new_sometimes_live (regs_sometimes_live, offset, bit, sometimes_max)
2737 struct sometimes *regs_sometimes_live;
2738 int offset, bit;
2739 int sometimes_max;
2740{
2741 register struct sometimes *p;
2742 register int regno = offset * REGSET_ELT_BITS + bit;
2743 int i;
2744
2745 /* There should never be a register greater than max_regno here. If there
2746 is, it means that a define_split has created a new pseudo reg. This
2747 is not allowed, since there will not be flow info available for any
2748 new register, so catch the error here. */
2749 if (regno >= max_regno)
2750 abort ();
2751
2752 p = &regs_sometimes_live[sometimes_max];
2753 p->offset = offset;
2754 p->bit = bit;
2755 p->live_length = 0;
2756 p->calls_crossed = 0;
2757 sometimes_max++;
2758 return sometimes_max;
2759}
2760
2761/* Count lengths of all regs we are currently tracking,
2762 and find new registers no longer live. */
2763
2764static void
2765finish_sometimes_live (regs_sometimes_live, sometimes_max)
2766 struct sometimes *regs_sometimes_live;
2767 int sometimes_max;
2768{
2769 int i;
2770
2771 for (i = 0; i < sometimes_max; i++)
2772 {
2773 register struct sometimes *p = &regs_sometimes_live[i];
2774 int regno;
2775
2776 regno = p->offset * REGSET_ELT_BITS + p->bit;
2777
2778 sched_reg_live_length[regno] += p->live_length;
2779 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2780 }
2781}
2782
2783/* Use modified list scheduling to rearrange insns in basic block
2784 B. FILE, if nonzero, is where we dump interesting output about
2785 this pass. */
2786
2787static void
2788schedule_block (b, file)
2789 int b;
2790 FILE *file;
2791{
2792 rtx insn, last;
2793 rtx last_note = 0;
2794 rtx *ready, link;
2795 int i, j, n_ready = 0, new_ready, n_insns = 0;
2796 int sched_n_insns = 0;
de19227a 2797 int clock;
bd58420c
RK
2798#define NEED_NOTHING 0
2799#define NEED_HEAD 1
2800#define NEED_TAIL 2
2801 int new_needs;
2802
2803 /* HEAD and TAIL delimit the region being scheduled. */
2804 rtx head = basic_block_head[b];
2805 rtx tail = basic_block_end[b];
2806 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2807 being scheduled. When the insns have been ordered,
2808 these insns delimit where the new insns are to be
2809 spliced back into the insn chain. */
2810 rtx next_tail;
2811 rtx prev_head;
2812
2813 /* Keep life information accurate. */
2814 register struct sometimes *regs_sometimes_live;
2815 int sometimes_max;
2816
2817 if (file)
2818 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2819 b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
2820
2821 i = max_reg_num ();
2822 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2823 bzero (reg_last_uses, i * sizeof (rtx));
2824 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2825 bzero (reg_last_sets, i * sizeof (rtx));
de19227a 2826 clear_units ();
bd58420c
RK
2827
2828 /* Remove certain insns at the beginning from scheduling,
2829 by advancing HEAD. */
2830
2831 /* At the start of a function, before reload has run, don't delay getting
2832 parameters from hard registers into pseudo registers. */
2833 if (reload_completed == 0 && b == 0)
2834 {
2835 while (head != tail
2836 && GET_CODE (head) == NOTE
2837 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2838 head = NEXT_INSN (head);
2839 while (head != tail
2840 && GET_CODE (head) == INSN
2841 && GET_CODE (PATTERN (head)) == SET)
2842 {
2843 rtx src = SET_SRC (PATTERN (head));
2844 while (GET_CODE (src) == SUBREG
2845 || GET_CODE (src) == SIGN_EXTEND
2846 || GET_CODE (src) == ZERO_EXTEND
2847 || GET_CODE (src) == SIGN_EXTRACT
2848 || GET_CODE (src) == ZERO_EXTRACT)
2849 src = XEXP (src, 0);
2850 if (GET_CODE (src) != REG
2851 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2852 break;
2853 /* Keep this insn from ever being scheduled. */
2854 INSN_REF_COUNT (head) = 1;
2855 head = NEXT_INSN (head);
2856 }
2857 }
2858
2859 /* Don't include any notes or labels at the beginning of the
2860 basic block, or notes at the ends of basic blocks. */
2861 while (head != tail)
2862 {
2863 if (GET_CODE (head) == NOTE)
2864 head = NEXT_INSN (head);
2865 else if (GET_CODE (tail) == NOTE)
2866 tail = PREV_INSN (tail);
2867 else if (GET_CODE (head) == CODE_LABEL)
2868 head = NEXT_INSN (head);
2869 else break;
2870 }
2871 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2872 to schedule this block. */
2873 if (head == tail
2874 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2875 return;
2876
2877#if 0
2878 /* This short-cut doesn't work. It does not count call insns crossed by
2879 registers in reg_sometimes_live. It does not mark these registers as
2880 dead if they die in this block. It does not mark these registers live
2881 (or create new reg_sometimes_live entries if necessary) if they are born
2882 in this block.
2883
2884 The easy solution is to just always schedule a block. This block only
2885 has one insn, so this won't slow down this pass by much. */
2886
2887 if (head == tail)
2888 return;
2889#endif
2890
bd58420c
RK
2891 /* Now HEAD through TAIL are the insns actually to be rearranged;
2892 Let PREV_HEAD and NEXT_TAIL enclose them. */
2893 prev_head = PREV_INSN (head);
2894 next_tail = NEXT_INSN (tail);
2895
2896 /* Initialize basic block data structures. */
2897 dead_notes = 0;
2898 pending_read_insns = 0;
2899 pending_read_mems = 0;
2900 pending_write_insns = 0;
2901 pending_write_mems = 0;
2902 pending_lists_length = 0;
2903 last_pending_memory_flush = 0;
2904 last_function_call = 0;
2905 last_scheduled_insn = 0;
2906
2907 LOG_LINKS (sched_before_next_call) = 0;
2908
2909 n_insns += sched_analyze (head, tail);
2910 if (n_insns == 0)
2911 {
2912 free_pending_lists ();
2913 return;
2914 }
2915
2916 /* Allocate vector to hold insns to be rearranged (except those
2917 insns which are controlled by an insn with SCHED_GROUP_P set).
2918 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2919 as those variables ultimately are set up. */
2920 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2921
2922 /* TAIL is now the last of the insns to be rearranged.
2923 Put those insns into the READY vector. */
2924 insn = tail;
2925
aa265568 2926 /* For all branches, calls, uses, and cc0 setters, force them to remain
de19227a
TW
2927 in order at the end of the block by adding dependencies and giving
2928 the last a high priority. There may be notes present, and prev_head
2929 may also be a note.
aa265568
JW
2930
2931 Branches must obviously remain at the end. Calls should remain at the
2932 end since moving them results in worse register allocation. Uses remain
2933 at the end to ensure proper register allocation. cc0 setters remaim
2934 at the end because they can't be moved away from their cc0 user. */
de19227a 2935 last = 0;
aa265568
JW
2936 while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
2937 || (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE)
2938#ifdef HAVE_cc0
2939 || sets_cc0_p (PATTERN (insn))
2940#endif
2941 || GET_CODE (insn) == NOTE)
bd58420c 2942 {
aa265568
JW
2943 if (GET_CODE (insn) != NOTE)
2944 {
2945 priority (insn);
de19227a
TW
2946 if (last == 0)
2947 {
2948 ready[n_ready++] = insn;
2949 INSN_PRIORITY (insn) = TAIL_PRIORITY - i;
2950 INSN_REF_COUNT (insn) = 0;
2951 }
2952 else if (! find_insn_list (insn, LOG_LINKS (last)))
2953 {
2954 add_dependence (last, insn, REG_DEP_ANTI);
2955 INSN_REF_COUNT (insn)++;
2956 }
2957 last = insn;
aa265568
JW
2958
2959 /* Skip over insns that are part of a group. */
2960 while (SCHED_GROUP_P (insn))
2961 {
2962 insn = prev_nonnote_insn (insn);
2963 priority (insn);
2964 }
2965 }
2966
2967 insn = PREV_INSN (insn);
2968 /* Don't overrun the bounds of the basic block. */
2969 if (insn == prev_head)
2970 break;
bd58420c
RK
2971 }
2972
2973 /* Assign priorities to instructions. Also check whether they
2974 are in priority order already. If so then I will be nonnegative.
2975 We use this shortcut only before reloading. */
2976#if 0
2977 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2978#endif
2979
2980 for (; insn != prev_head; insn = PREV_INSN (insn))
2981 {
2982 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2983 {
2984 priority (insn);
2985 if (INSN_REF_COUNT (insn) == 0)
de19227a
TW
2986 {
2987 if (last == 0)
2988 ready[n_ready++] = insn;
2989 else
2990 {
2991 /* Make this dependent on the last of the instructions
2992 that must remain in order at the end of the block. */
2993 add_dependence (last, insn, REG_DEP_ANTI);
2994 INSN_REF_COUNT (insn) = 1;
2995 }
2996 }
bd58420c
RK
2997 if (SCHED_GROUP_P (insn))
2998 {
2999 while (SCHED_GROUP_P (insn))
3000 {
3001 insn = PREV_INSN (insn);
3002 while (GET_CODE (insn) == NOTE)
3003 insn = PREV_INSN (insn);
3004 priority (insn);
3005 }
3006 continue;
3007 }
3008#if 0
3009 if (i < 0)
3010 continue;
3011 if (INSN_PRIORITY (insn) < i)
3012 i = INSN_PRIORITY (insn);
3013 else if (INSN_PRIORITY (insn) > i)
3014 i = DONE_PRIORITY;
3015#endif
3016 }
3017 }
3018
3019#if 0
3020 /* This short-cut doesn't work. It does not count call insns crossed by
3021 registers in reg_sometimes_live. It does not mark these registers as
3022 dead if they die in this block. It does not mark these registers live
3023 (or create new reg_sometimes_live entries if necessary) if they are born
3024 in this block.
3025
3026 The easy solution is to just always schedule a block. These blocks tend
3027 to be very short, so this doesn't slow down this pass by much. */
3028
3029 /* If existing order is good, don't bother to reorder. */
3030 if (i != DONE_PRIORITY)
3031 {
3032 if (file)
3033 fprintf (file, ";; already scheduled\n");
3034
3035 if (reload_completed == 0)
3036 {
3037 for (i = 0; i < sometimes_max; i++)
3038 regs_sometimes_live[i].live_length += n_insns;
3039
3040 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3041 }
3042 free_pending_lists ();
3043 return;
3044 }
3045#endif
3046
3047 /* Scan all the insns to be scheduled, removing NOTE insns
3048 and register death notes.
3049 Line number NOTE insns end up in NOTE_LIST.
3050 Register death notes end up in DEAD_NOTES.
3051
3052 Recreate the register life information for the end of this basic
3053 block. */
3054
3055 if (reload_completed == 0)
3056 {
3057 bcopy (basic_block_live_at_start[b], bb_live_regs, regset_bytes);
3058 bzero (bb_dead_regs, regset_bytes);
3059
3060 if (b == 0)
3061 {
3062 /* This is the first block in the function. There may be insns
3063 before head that we can't schedule. We still need to examine
3064 them though for accurate register lifetime analysis. */
3065
3066 /* We don't want to remove any REG_DEAD notes as the code below
3067 does. */
3068
3069 for (insn = basic_block_head[b]; insn != head;
3070 insn = NEXT_INSN (insn))
3071 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3072 {
3073 /* See if the register gets born here. */
3074 /* We must check for registers being born before we check for
3075 registers dying. It is possible for a register to be born
3076 and die in the same insn, e.g. reading from a volatile
3077 memory location into an otherwise unused register. Such
3078 a register must be marked as dead after this insn. */
3079 if (GET_CODE (PATTERN (insn)) == SET
3080 || GET_CODE (PATTERN (insn)) == CLOBBER)
3081 sched_note_set (b, PATTERN (insn), 0);
3082 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3083 {
3084 int j;
3085 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3086 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3087 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3088 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3089
53dd0e6e
JW
3090 /* ??? This code is obsolete and should be deleted. It
3091 is harmless though, so we will leave it in for now. */
bd58420c
RK
3092 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3093 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3094 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3095 }
3096
3097 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
3098 {
3099 if ((REG_NOTE_KIND (link) == REG_DEAD
3100 || REG_NOTE_KIND (link) == REG_UNUSED)
3101 /* Verify that the REG_NOTE has a legal value. */
3102 && GET_CODE (XEXP (link, 0)) == REG)
3103 {
3104 register int regno = REGNO (XEXP (link, 0));
3105 register int offset = regno / REGSET_ELT_BITS;
fb3821f7
CH
3106 register REGSET_ELT_TYPE bit
3107 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
bd58420c
RK
3108
3109 if (regno < FIRST_PSEUDO_REGISTER)
3110 {
3111 int j = HARD_REGNO_NREGS (regno,
3112 GET_MODE (XEXP (link, 0)));
3113 while (--j >= 0)
3114 {
3115 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7
CH
3116 bit = ((REGSET_ELT_TYPE) 1
3117 << ((regno + j) % REGSET_ELT_BITS));
bd58420c
RK
3118
3119 bb_live_regs[offset] &= ~bit;
3120 bb_dead_regs[offset] |= bit;
3121 }
3122 }
3123 else
3124 {
3125 bb_live_regs[offset] &= ~bit;
3126 bb_dead_regs[offset] |= bit;
3127 }
3128 }
3129 }
3130 }
3131 }
3132 }
3133
3134 /* If debugging information is being produced, keep track of the line
3135 number notes for each insn. */
3136 if (write_symbols != NO_DEBUG)
3137 {
3138 /* We must use the true line number for the first insn in the block
3139 that was computed and saved at the start of this pass. We can't
3140 use the current line number, because scheduling of the previous
3141 block may have changed the current line number. */
3142 rtx line = line_note_head[b];
3143
3144 for (insn = basic_block_head[b];
3145 insn != next_tail;
3146 insn = NEXT_INSN (insn))
3147 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3148 line = insn;
3149 else
3150 LINE_NOTE (insn) = line;
3151 }
3152
3153 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3154 {
3155 rtx prev, next, link;
3156
3157 /* Farm out notes. This is needed to keep the debugger from
3158 getting completely deranged. */
3159 if (GET_CODE (insn) == NOTE)
3160 {
3161 prev = insn;
3162 insn = unlink_notes (insn, next_tail);
3163 if (prev == tail)
3164 abort ();
3165 if (prev == head)
3166 abort ();
3167 if (insn == next_tail)
3168 abort ();
3169 }
3170
3171 if (reload_completed == 0
3172 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3173 {
3174 /* See if the register gets born here. */
3175 /* We must check for registers being born before we check for
3176 registers dying. It is possible for a register to be born and
3177 die in the same insn, e.g. reading from a volatile memory
3178 location into an otherwise unused register. Such a register
3179 must be marked as dead after this insn. */
3180 if (GET_CODE (PATTERN (insn)) == SET
3181 || GET_CODE (PATTERN (insn)) == CLOBBER)
3182 sched_note_set (b, PATTERN (insn), 0);
3183 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3184 {
3185 int j;
3186 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3187 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3188 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3189 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3190
53dd0e6e
JW
3191 /* ??? This code is obsolete and should be deleted. It
3192 is harmless though, so we will leave it in for now. */
bd58420c
RK
3193 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3194 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3195 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3196 }
3197
3198 /* Need to know what registers this insn kills. */
3199 for (prev = 0, link = REG_NOTES (insn); link; link = next)
3200 {
3201 int regno;
3202
3203 next = XEXP (link, 1);
3204 if ((REG_NOTE_KIND (link) == REG_DEAD
3205 || REG_NOTE_KIND (link) == REG_UNUSED)
3206 /* Verify that the REG_NOTE has a legal value. */
3207 && GET_CODE (XEXP (link, 0)) == REG)
3208 {
3209 register int regno = REGNO (XEXP (link, 0));
3210 register int offset = regno / REGSET_ELT_BITS;
fb3821f7
CH
3211 register REGSET_ELT_TYPE bit
3212 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
bd58420c
RK
3213
3214 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
3215 alone. */
3216 if (REG_NOTE_KIND (link) == REG_DEAD)
3217 {
3218 if (prev)
3219 XEXP (prev, 1) = next;
3220 else
3221 REG_NOTES (insn) = next;
3222 XEXP (link, 1) = dead_notes;
3223 dead_notes = link;
3224 }
3225 else
3226 prev = link;
3227
3228 if (regno < FIRST_PSEUDO_REGISTER)
3229 {
3230 int j = HARD_REGNO_NREGS (regno,
3231 GET_MODE (XEXP (link, 0)));
3232 while (--j >= 0)
3233 {
3234 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7
CH
3235 bit = ((REGSET_ELT_TYPE) 1
3236 << ((regno + j) % REGSET_ELT_BITS));
bd58420c
RK
3237
3238 bb_live_regs[offset] &= ~bit;
3239 bb_dead_regs[offset] |= bit;
3240 }
3241 }
3242 else
3243 {
3244 bb_live_regs[offset] &= ~bit;
3245 bb_dead_regs[offset] |= bit;
3246 }
3247 }
3248 else
3249 prev = link;
3250 }
3251 }
3252 }
3253
3254 if (reload_completed == 0)
3255 {
3256 /* Keep track of register lives. */
3257 old_live_regs = (regset) alloca (regset_bytes);
3258 regs_sometimes_live
3259 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
3260 sometimes_max = 0;
3261
3262 /* Start with registers live at end. */
3263 for (j = 0; j < regset_size; j++)
3264 {
fb3821f7 3265 REGSET_ELT_TYPE live = bb_live_regs[j];
bd58420c
RK
3266 old_live_regs[j] = live;
3267 if (live)
3268 {
fb3821f7 3269 register REGSET_ELT_TYPE bit;
bd58420c 3270 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
fb3821f7 3271 if (live & ((REGSET_ELT_TYPE) 1 << bit))
bd58420c
RK
3272 sometimes_max = new_sometimes_live (regs_sometimes_live, j,
3273 bit, sometimes_max);
3274 }
3275 }
3276 }
3277
3278 SCHED_SORT (ready, n_ready, 1);
3279
3280 if (file)
3281 {
3282 fprintf (file, ";; ready list initially:\n;; ");
3283 for (i = 0; i < n_ready; i++)
3284 fprintf (file, "%d ", INSN_UID (ready[i]));
3285 fprintf (file, "\n\n");
3286
3287 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3288 if (INSN_PRIORITY (insn) > 0)
3289 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
3290 INSN_UID (insn), INSN_PRIORITY (insn),
3291 INSN_REF_COUNT (insn));
3292 }
3293
3294 /* Now HEAD and TAIL are going to become disconnected
3295 entirely from the insn chain. */
de19227a 3296 tail = 0;
bd58420c
RK
3297
3298 /* Q_SIZE will always be zero here. */
de19227a 3299 q_ptr = 0; clock = 0;
bd58420c
RK
3300 bzero (insn_queue, sizeof (insn_queue));
3301
3302 /* Now, perform list scheduling. */
3303
3304 /* Where we start inserting insns is after TAIL. */
3305 last = next_tail;
3306
3307 new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
3308 ? NEED_HEAD : NEED_NOTHING);
3309 if (PREV_INSN (next_tail) == basic_block_end[b])
3310 new_needs |= NEED_TAIL;
3311
3312 new_ready = n_ready;
3313 while (sched_n_insns < n_insns)
3314 {
de19227a 3315 q_ptr = NEXT_Q (q_ptr); clock++;
bd58420c
RK
3316
3317 /* Add all pending insns that can be scheduled without stalls to the
3318 ready list. */
3319 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
3320 {
3321 if (file)
de19227a
TW
3322 fprintf (file, ";; launching %d before %d with no stalls at T-%d\n",
3323 INSN_UID (insn), INSN_UID (last), clock);
bd58420c
RK
3324 ready[new_ready++] = insn;
3325 q_size -= 1;
3326 }
3327 insn_queue[q_ptr] = 0;
3328
3329 /* If there are no ready insns, stall until one is ready and add all
3330 of the pending insns at that point to the ready list. */
3331 if (new_ready == 0)
3332 {
3333 register int stalls;
3334
de19227a 3335 for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
bd58420c
RK
3336 if (insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])
3337 {
3338 for (; insn; insn = NEXT_INSN (insn))
3339 {
3340 if (file)
de19227a
TW
3341 fprintf (file, ";; launching %d before %d with %d stalls at T-%d\n",
3342 INSN_UID (insn), INSN_UID (last), stalls, clock);
bd58420c
RK
3343 ready[new_ready++] = insn;
3344 q_size -= 1;
3345 }
3346 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
3347 break;
3348 }
3349
de19227a 3350 q_ptr = NEXT_Q_AFTER (q_ptr, stalls); clock += stalls;
bd58420c
RK
3351 }
3352
3353 /* There should be some instructions waiting to fire. */
3354 if (new_ready == 0)
3355 abort ();
3356
de19227a
TW
3357 if (file)
3358 {
3359 fprintf (file, ";; ready list at T-%d:", clock);
3360 for (i = 0; i < new_ready; i++)
3361 fprintf (file, " %d (%x)",
3362 INSN_UID (ready[i]), INSN_PRIORITY (ready[i]));
3363 }
3364
3365 /* Sort the ready list and choose the best insn to schedule. Select
3366 which insn should issue in this cycle and queue those that are
3367 blocked by function unit hazards.
3368
bd58420c
RK
3369 N_READY holds the number of items that were scheduled the last time,
3370 minus the one instruction scheduled on the last loop iteration; it
3371 is not modified for any other reason in this loop. */
de19227a 3372
bd58420c 3373 SCHED_SORT (ready, new_ready, n_ready);
de19227a
TW
3374 if (MAX_BLOCKAGE > 1)
3375 {
3376 new_ready = schedule_select (ready, new_ready, clock, file);
3377 if (new_ready == 0)
3378 {
3379 if (file)
3380 fprintf (file, "\n");
3381 continue;
3382 }
3383 }
bd58420c
RK
3384 n_ready = new_ready;
3385 last_scheduled_insn = insn = ready[0];
3386
de19227a
TW
3387 /* The first insn scheduled becomes the new tail. */
3388 if (tail == 0)
3389 tail = insn;
3390
3391 if (file)
3392 {
3393 fprintf (file, ", now");
3394 for (i = 0; i < n_ready; i++)
3395 fprintf (file, " %d", INSN_UID (ready[i]));
3396 fprintf (file, "\n");
3397 }
3398
bd58420c
RK
3399 if (DONE_PRIORITY_P (insn))
3400 abort ();
3401
3402 if (reload_completed == 0)
3403 {
3404 /* Process this insn, and each insn linked to this one which must
3405 be immediately output after this insn. */
3406 do
3407 {
3408 /* First we kill registers set by this insn, and then we
3409 make registers used by this insn live. This is the opposite
3410 order used above because we are traversing the instructions
3411 backwards. */
3412
3413 /* Strictly speaking, we should scan REG_UNUSED notes and make
3414 every register mentioned there live, however, we will just
3415 kill them again immediately below, so there doesn't seem to
3416 be any reason why we bother to do this. */
3417
3418 /* See if this is the last notice we must take of a register. */
3419 if (GET_CODE (PATTERN (insn)) == SET
3420 || GET_CODE (PATTERN (insn)) == CLOBBER)
3421 sched_note_set (b, PATTERN (insn), 1);
3422 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3423 {
3424 int j;
3425 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3426 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3427 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3428 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
3429 }
3430
3431 /* This code keeps life analysis information up to date. */
3432 if (GET_CODE (insn) == CALL_INSN)
3433 {
3434 register struct sometimes *p;
3435
3436 /* A call kills all call used and global registers, except
3437 for those mentioned in the call pattern which will be
3438 made live again later. */
3439 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3440 if (call_used_regs[i] || global_regs[i])
3441 {
3442 register int offset = i / REGSET_ELT_BITS;
fb3821f7
CH
3443 register REGSET_ELT_TYPE bit
3444 = (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
bd58420c
RK
3445
3446 bb_live_regs[offset] &= ~bit;
3447 bb_dead_regs[offset] |= bit;
3448 }
3449
3450 /* Regs live at the time of a call instruction must not
3451 go in a register clobbered by calls. Record this for
3452 all regs now live. Note that insns which are born or
3453 die in a call do not cross a call, so this must be done
3454 after the killings (above) and before the births
3455 (below). */
3456 p = regs_sometimes_live;
3457 for (i = 0; i < sometimes_max; i++, p++)
fb3821f7
CH
3458 if (bb_live_regs[p->offset]
3459 & ((REGSET_ELT_TYPE) 1 << p->bit))
bd58420c
RK
3460 p->calls_crossed += 1;
3461 }
3462
3463 /* Make every register used live, and add REG_DEAD notes for
3464 registers which were not live before we started. */
3465 attach_deaths_insn (insn);
3466
3467 /* Find registers now made live by that instruction. */
3468 for (i = 0; i < regset_size; i++)
3469 {
fb3821f7 3470 REGSET_ELT_TYPE diff = bb_live_regs[i] & ~old_live_regs[i];
bd58420c
RK
3471 if (diff)
3472 {
3473 register int bit;
3474 old_live_regs[i] |= diff;
3475 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
fb3821f7 3476 if (diff & ((REGSET_ELT_TYPE) 1 << bit))
bd58420c
RK
3477 sometimes_max
3478 = new_sometimes_live (regs_sometimes_live, i, bit,
3479 sometimes_max);
3480 }
3481 }
3482
3483 /* Count lengths of all regs we are worrying about now,
3484 and handle registers no longer live. */
3485
3486 for (i = 0; i < sometimes_max; i++)
3487 {
3488 register struct sometimes *p = &regs_sometimes_live[i];
3489 int regno = p->offset*REGSET_ELT_BITS + p->bit;
3490
3491 p->live_length += 1;
3492
fb3821f7
CH
3493 if ((bb_live_regs[p->offset]
3494 & ((REGSET_ELT_TYPE) 1 << p->bit)) == 0)
bd58420c
RK
3495 {
3496 /* This is the end of one of this register's lifetime
3497 segments. Save the lifetime info collected so far,
3498 and clear its bit in the old_live_regs entry. */
3499 sched_reg_live_length[regno] += p->live_length;
3500 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
fb3821f7
CH
3501 old_live_regs[p->offset]
3502 &= ~((REGSET_ELT_TYPE) 1 << p->bit);
bd58420c
RK
3503
3504 /* Delete the reg_sometimes_live entry for this reg by
3505 copying the last entry over top of it. */
3506 *p = regs_sometimes_live[--sometimes_max];
3507 /* ...and decrement i so that this newly copied entry
3508 will be processed. */
3509 i--;
3510 }
3511 }
3512
3513 link = insn;
3514 insn = PREV_INSN (insn);
3515 }
3516 while (SCHED_GROUP_P (link));
3517
3518 /* Set INSN back to the insn we are scheduling now. */
3519 insn = ready[0];
3520 }
3521
3522 /* Schedule INSN. Remove it from the ready list. */
3523 ready += 1;
3524 n_ready -= 1;
3525
3526 sched_n_insns += 1;
3527 NEXT_INSN (insn) = last;
3528 PREV_INSN (last) = insn;
3529 last = insn;
3530
3531 /* Everything that precedes INSN now either becomes "ready", if
3532 it can execute immediately before INSN, or "pending", if
3533 there must be a delay. Give INSN high enough priority that
3534 at least one (maybe more) reg-killing insns can be launched
3535 ahead of all others. Mark INSN as scheduled by changing its
3536 priority to -1. */
3537 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
de19227a 3538 new_ready = schedule_insn (insn, ready, n_ready, clock);
bd58420c
RK
3539 INSN_PRIORITY (insn) = DONE_PRIORITY;
3540
3541 /* Schedule all prior insns that must not be moved. */
3542 if (SCHED_GROUP_P (insn))
3543 {
3544 /* Disable these insns from being launched. */
3545 link = insn;
3546 while (SCHED_GROUP_P (link))
3547 {
3548 /* Disable these insns from being launched by anybody. */
3549 link = PREV_INSN (link);
3550 INSN_REF_COUNT (link) = 0;
3551 }
3552
3553 /* None of these insns can move forward into delay slots. */
3554 while (SCHED_GROUP_P (insn))
3555 {
3556 insn = PREV_INSN (insn);
de19227a 3557 new_ready = schedule_insn (insn, ready, new_ready, clock);
bd58420c
RK
3558 INSN_PRIORITY (insn) = DONE_PRIORITY;
3559
3560 sched_n_insns += 1;
3561 NEXT_INSN (insn) = last;
3562 PREV_INSN (last) = insn;
3563 last = insn;
3564 }
3565 }
3566 }
3567 if (q_size != 0)
3568 abort ();
3569
3570 if (reload_completed == 0)
3571 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3572
3573 /* HEAD is now the first insn in the chain of insns that
3574 been scheduled by the loop above.
3575 TAIL is the last of those insns. */
3576 head = insn;
3577
3578 /* NOTE_LIST is the end of a chain of notes previously found
3579 among the insns. Insert them at the beginning of the insns. */
3580 if (note_list != 0)
3581 {
3582 rtx note_head = note_list;
3583 while (PREV_INSN (note_head))
3584 note_head = PREV_INSN (note_head);
3585
3586 PREV_INSN (head) = note_list;
3587 NEXT_INSN (note_list) = head;
3588 head = note_head;
3589 }
3590
3591 /* In theory, there should be no REG_DEAD notes leftover at the end.
3592 In practice, this can occur as the result of bugs in flow, combine.c,
3593 and/or sched.c. The values of the REG_DEAD notes remaining are
3594 meaningless, because dead_notes is just used as a free list. */
3595#if 1
3596 if (dead_notes != 0)
3597 abort ();
3598#endif
3599
3600 if (new_needs & NEED_HEAD)
3601 basic_block_head[b] = head;
3602 PREV_INSN (head) = prev_head;
3603 NEXT_INSN (prev_head) = head;
3604
3605 if (new_needs & NEED_TAIL)
3606 basic_block_end[b] = tail;
3607 NEXT_INSN (tail) = next_tail;
3608 PREV_INSN (next_tail) = tail;
3609
3610 /* Restore the line-number notes of each insn. */
3611 if (write_symbols != NO_DEBUG)
3612 {
3613 rtx line, note, prev, new;
3614 int notes = 0;
3615
3616 head = basic_block_head[b];
3617 next_tail = NEXT_INSN (basic_block_end[b]);
3618
3619 /* Determine the current line-number. We want to know the current
3620 line number of the first insn of the block here, in case it is
3621 different from the true line number that was saved earlier. If
3622 different, then we need a line number note before the first insn
3623 of this block. If it happens to be the same, then we don't want to
3624 emit another line number note here. */
3625 for (line = head; line; line = PREV_INSN (line))
3626 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3627 break;
3628
3629 /* Walk the insns keeping track of the current line-number and inserting
3630 the line-number notes as needed. */
3631 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3632 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3633 line = insn;
3634 else if (! (GET_CODE (insn) == NOTE
3635 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3636 && (note = LINE_NOTE (insn)) != 0
3637 && note != line
3638 && (line == 0
3639 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3640 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3641 {
3642 line = note;
3643 prev = PREV_INSN (insn);
3644 if (LINE_NOTE (note))
3645 {
6dc42e49 3646 /* Re-use the original line-number note. */
bd58420c
RK
3647 LINE_NOTE (note) = 0;
3648 PREV_INSN (note) = prev;
3649 NEXT_INSN (prev) = note;
3650 PREV_INSN (insn) = note;
3651 NEXT_INSN (note) = insn;
3652 }
3653 else
3654 {
3655 notes++;
3656 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3657 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3658 }
3659 }
3660 if (file && notes)
3661 fprintf (file, ";; added %d line-number notes\n", notes);
3662 }
3663
3664 if (file)
3665 {
de19227a
TW
3666 fprintf (file, ";; total time = %d\n;; new basic block head = %d\n;; new basic block end = %d\n\n",
3667 clock, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
bd58420c
RK
3668 }
3669
3670 /* Yow! We're done! */
3671 free_pending_lists ();
3672
3673 return;
3674}
3675\f
3676/* Subroutine of split_hard_reg_notes. Searches X for any reference to
3677 REGNO, returning the rtx of the reference found if any. Otherwise,
3678 returns 0. */
3679
3680rtx
3681regno_use_in (regno, x)
3682 int regno;
3683 rtx x;
3684{
3685 register char *fmt;
3686 int i, j;
3687 rtx tem;
3688
3689 if (GET_CODE (x) == REG && REGNO (x) == regno)
3690 return x;
3691
3692 fmt = GET_RTX_FORMAT (GET_CODE (x));
3693 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3694 {
3695 if (fmt[i] == 'e')
3696 {
3697 if (tem = regno_use_in (regno, XEXP (x, i)))
3698 return tem;
3699 }
3700 else if (fmt[i] == 'E')
3701 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3702 if (tem = regno_use_in (regno , XVECEXP (x, i, j)))
3703 return tem;
3704 }
3705
3706 return 0;
3707}
3708
3709/* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3710 needed for the hard register mentioned in the note. This can happen
3711 if the reference to the hard register in the original insn was split into
3712 several smaller hard register references in the split insns. */
3713
3714static void
3715split_hard_reg_notes (note, first, last, orig_insn)
3716 rtx note, first, last, orig_insn;
3717{
3718 rtx reg, temp, link;
3719 int n_regs, i, new_reg;
3720 rtx insn;
3721
3722 /* Assume that this is a REG_DEAD note. */
3723 if (REG_NOTE_KIND (note) != REG_DEAD)
3724 abort ();
3725
3726 reg = XEXP (note, 0);
3727
3728 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3729
3730 /* ??? Could add check here to see whether, the hard register is referenced
3731 in the same mode as in the original insn. If so, then it has not been
3732 split, and the rest of the code below is unnecessary. */
3733
3734 for (i = 1; i < n_regs; i++)
3735 {
3736 new_reg = REGNO (reg) + i;
3737
3738 /* Check for references to new_reg in the split insns. */
3739 for (insn = last; ; insn = PREV_INSN (insn))
3740 {
3741 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3742 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3743 {
3744 /* Create a new reg dead note here. */
3745 link = rtx_alloc (EXPR_LIST);
3746 PUT_REG_NOTE_KIND (link, REG_DEAD);
3747 XEXP (link, 0) = temp;
3748 XEXP (link, 1) = REG_NOTES (insn);
3749 REG_NOTES (insn) = link;
3750 break;
3751 }
3752 /* It isn't mentioned anywhere, so no new reg note is needed for
3753 this register. */
3754 if (insn == first)
3755 break;
3756 }
3757 }
3758}
3759
3760/* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3761 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3762
3763static void
3764new_insn_dead_notes (pat, insn, last, orig_insn)
3765 rtx pat, insn, last, orig_insn;
3766{
3767 rtx dest, tem, set;
3768
3769 /* PAT is either a CLOBBER or a SET here. */
3770 dest = XEXP (pat, 0);
3771
3772 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3773 || GET_CODE (dest) == STRICT_LOW_PART
3774 || GET_CODE (dest) == SIGN_EXTRACT)
3775 dest = XEXP (dest, 0);
3776
3777 if (GET_CODE (dest) == REG)
3778 {
3779 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3780 {
3781 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3782 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3783 && (set = single_set (tem)))
3784 {
3785 rtx tem_dest = SET_DEST (set);
3786
3787 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3788 || GET_CODE (tem_dest) == SUBREG
3789 || GET_CODE (tem_dest) == STRICT_LOW_PART
3790 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3791 tem_dest = XEXP (tem_dest, 0);
3792
3793 if (tem_dest != dest)
3794 {
3795 /* Use the same scheme as combine.c, don't put both REG_DEAD
3796 and REG_UNUSED notes on the same insn. */
3797 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3798 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3799 {
3800 rtx note = rtx_alloc (EXPR_LIST);
3801 PUT_REG_NOTE_KIND (note, REG_DEAD);
3802 XEXP (note, 0) = dest;
3803 XEXP (note, 1) = REG_NOTES (tem);
3804 REG_NOTES (tem) = note;
3805 }
3806 /* The reg only dies in one insn, the last one that uses
3807 it. */
3808 break;
3809 }
3810 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3811 /* We found an instruction that both uses the register,
3812 and sets it, so no new REG_NOTE is needed for this set. */
3813 break;
3814 }
3815 }
3816 /* If this is a set, it must die somewhere, unless it is the dest of
3817 the original insn, and hence is live after the original insn. Abort
3818 if it isn't supposed to be live after the original insn.
3819
3820 If this is a clobber, then just add a REG_UNUSED note. */
3821 if (tem == insn)
3822 {
3823 int live_after_orig_insn = 0;
3824 rtx pattern = PATTERN (orig_insn);
3825 int i;
3826
3827 if (GET_CODE (pat) == CLOBBER)
3828 {
3829 rtx note = rtx_alloc (EXPR_LIST);
3830 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3831 XEXP (note, 0) = dest;
3832 XEXP (note, 1) = REG_NOTES (insn);
3833 REG_NOTES (insn) = note;
3834 return;
3835 }
3836
3837 /* The original insn could have multiple sets, so search the
3838 insn for all sets. */
3839 if (GET_CODE (pattern) == SET)
3840 {
3841 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3842 live_after_orig_insn = 1;
3843 }
3844 else if (GET_CODE (pattern) == PARALLEL)
3845 {
3846 for (i = 0; i < XVECLEN (pattern, 0); i++)
3847 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3848 && reg_overlap_mentioned_p (dest,
3849 SET_DEST (XVECEXP (pattern,
3850 0, i))))
3851 live_after_orig_insn = 1;
3852 }
3853
3854 if (! live_after_orig_insn)
3855 abort ();
3856 }
3857 }
3858}
3859
3860/* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3861 registers modified by X. INC is -1 if the containing insn is being deleted,
3862 and is 1 if the containing insn is a newly generated insn. */
3863
3864static void
3865update_n_sets (x, inc)
3866 rtx x;
3867 int inc;
3868{
3869 rtx dest = SET_DEST (x);
3870
3871 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3872 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3873 dest = SUBREG_REG (dest);
3874
3875 if (GET_CODE (dest) == REG)
3876 {
3877 int regno = REGNO (dest);
3878
3879 if (regno < FIRST_PSEUDO_REGISTER)
3880 {
3881 register int i;
3882 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3883
3884 for (i = regno; i < endregno; i++)
3885 reg_n_sets[i] += inc;
3886 }
3887 else
3888 reg_n_sets[regno] += inc;
3889 }
3890}
3891
3892/* Updates all flow-analysis related quantities (including REG_NOTES) for
3893 the insns from FIRST to LAST inclusive that were created by splitting
3894 ORIG_INSN. NOTES are the original REG_NOTES. */
3895
3896static void
3897update_flow_info (notes, first, last, orig_insn)
3898 rtx notes;
3899 rtx first, last;
3900 rtx orig_insn;
3901{
3902 rtx insn, note;
3903 rtx next;
3904 rtx orig_dest, temp;
3905 rtx set;
3906
3907 /* Get and save the destination set by the original insn. */
3908
3909 orig_dest = single_set (orig_insn);
3910 if (orig_dest)
3911 orig_dest = SET_DEST (orig_dest);
3912
3913 /* Move REG_NOTES from the original insn to where they now belong. */
3914
3915 for (note = notes; note; note = next)
3916 {
3917 next = XEXP (note, 1);
3918 switch (REG_NOTE_KIND (note))
3919 {
3920 case REG_DEAD:
3921 case REG_UNUSED:
3922 /* Move these notes from the original insn to the last new insn where
3923 the register is now set. */
3924
3925 for (insn = last; ; insn = PREV_INSN (insn))
3926 {
3927 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3928 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3929 {
3930 XEXP (note, 1) = REG_NOTES (insn);
3931 REG_NOTES (insn) = note;
3932
3933 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3934 notes. */
6dc42e49 3935 /* ??? This won't handle multiple word registers correctly,
bd58420c
RK
3936 but should be good enough for now. */
3937 if (REG_NOTE_KIND (note) == REG_UNUSED
3938 && ! dead_or_set_p (insn, XEXP (note, 0)))
3939 PUT_REG_NOTE_KIND (note, REG_DEAD);
3940
3941 /* The reg only dies in one insn, the last one that uses
3942 it. */
3943 break;
3944 }
3945 /* It must die somewhere, fail it we couldn't find where it died.
3946
3947 If this is a REG_UNUSED note, then it must be a temporary
3948 register that was not needed by this instantiation of the
3949 pattern, so we can safely ignore it. */
3950 if (insn == first)
3951 {
3952 if (REG_NOTE_KIND (note) != REG_UNUSED)
3953 abort ();
3954
3955 break;
3956 }
3957 }
3958
3959 /* If this note refers to a multiple word hard register, it may
3960 have been split into several smaller hard register references.
3961 Check to see if there are any new register references that
3962 need REG_NOTES added for them. */
3963 temp = XEXP (note, 0);
3964 if (REG_NOTE_KIND (note) == REG_DEAD
3965 && GET_CODE (temp) == REG
3966 && REGNO (temp) < FIRST_PSEUDO_REGISTER
3967 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)))
3968 split_hard_reg_notes (note, first, last, orig_insn);
3969 break;
3970
3971 case REG_WAS_0:
3972 /* This note applies to the dest of the original insn. Find the
3973 first new insn that now has the same dest, and move the note
3974 there. */
3975
3976 if (! orig_dest)
3977 abort ();
3978
3979 for (insn = first; ; insn = NEXT_INSN (insn))
3980 {
3981 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3982 && (temp = single_set (insn))
3983 && rtx_equal_p (SET_DEST (temp), orig_dest))
3984 {
3985 XEXP (note, 1) = REG_NOTES (insn);
3986 REG_NOTES (insn) = note;
3987 /* The reg is only zero before one insn, the first that
3988 uses it. */
3989 break;
3990 }
3991 /* It must be set somewhere, fail if we couldn't find where it
3992 was set. */
3993 if (insn == last)
3994 abort ();
3995 }
3996 break;
3997
3998 case REG_EQUAL:
3999 case REG_EQUIV:
4000 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
4001 set is meaningless. Just drop the note. */
4002 if (! orig_dest)
4003 break;
4004
4005 case REG_NO_CONFLICT:
4006 /* These notes apply to the dest of the original insn. Find the last
4007 new insn that now has the same dest, and move the note there. */
4008
4009 if (! orig_dest)
4010 abort ();
4011
4012 for (insn = last; ; insn = PREV_INSN (insn))
4013 {
4014 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4015 && (temp = single_set (insn))
4016 && rtx_equal_p (SET_DEST (temp), orig_dest))
4017 {
4018 XEXP (note, 1) = REG_NOTES (insn);
4019 REG_NOTES (insn) = note;
4020 /* Only put this note on one of the new insns. */
4021 break;
4022 }
4023
4024 /* The original dest must still be set someplace. Abort if we
4025 couldn't find it. */
4026 if (insn == first)
4027 abort ();
4028 }
4029 break;
4030
4031 case REG_LIBCALL:
4032 /* Move a REG_LIBCALL note to the first insn created, and update
4033 the corresponding REG_RETVAL note. */
4034 XEXP (note, 1) = REG_NOTES (first);
4035 REG_NOTES (first) = note;
4036
4037 insn = XEXP (note, 0);
fb3821f7 4038 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
bd58420c
RK
4039 if (note)
4040 XEXP (note, 0) = first;
4041 break;
4042
4043 case REG_RETVAL:
4044 /* Move a REG_RETVAL note to the last insn created, and update
4045 the corresponding REG_LIBCALL note. */
4046 XEXP (note, 1) = REG_NOTES (last);
4047 REG_NOTES (last) = note;
4048
4049 insn = XEXP (note, 0);
fb3821f7 4050 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
bd58420c
RK
4051 if (note)
4052 XEXP (note, 0) = last;
4053 break;
4054
4055 case REG_NONNEG:
4056 /* This should be moved to whichever instruction is a JUMP_INSN. */
4057
4058 for (insn = last; ; insn = PREV_INSN (insn))
4059 {
4060 if (GET_CODE (insn) == JUMP_INSN)
4061 {
4062 XEXP (note, 1) = REG_NOTES (insn);
4063 REG_NOTES (insn) = note;
4064 /* Only put this note on one of the new insns. */
4065 break;
4066 }
4067 /* Fail if we couldn't find a JUMP_INSN. */
4068 if (insn == first)
4069 abort ();
4070 }
4071 break;
4072
4073 case REG_INC:
4074 /* This should be moved to whichever instruction now has the
4075 increment operation. */
4076 abort ();
4077
4078 case REG_LABEL:
4079 /* Should be moved to the new insn(s) which use the label. */
9ae57db8
RS
4080 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
4081 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4082 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
4083 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL,
4084 XEXP (note, 0), REG_NOTES (insn));
4085 break;
bd58420c
RK
4086
4087 case REG_CC_SETTER:
4088 case REG_CC_USER:
4089 /* These two notes will never appear until after reorg, so we don't
4090 have to handle them here. */
4091 default:
4092 abort ();
4093 }
4094 }
4095
4096 /* Each new insn created, except the last, has a new set. If the destination
4097 is a register, then this reg is now live across several insns, whereas
4098 previously the dest reg was born and died within the same insn. To
4099 reflect this, we now need a REG_DEAD note on the insn where this
4100 dest reg dies.
4101
4102 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
4103
4104 for (insn = first; insn != last; insn = NEXT_INSN (insn))
4105 {
4106 rtx pat;
4107 int i;
4108
4109 pat = PATTERN (insn);
4110 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
4111 new_insn_dead_notes (pat, insn, last, orig_insn);
4112 else if (GET_CODE (pat) == PARALLEL)
4113 {
4114 for (i = 0; i < XVECLEN (pat, 0); i++)
4115 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
4116 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
4117 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
4118 }
4119 }
4120
4121 /* If any insn, except the last, uses the register set by the last insn,
4122 then we need a new REG_DEAD note on that insn. In this case, there
4123 would not have been a REG_DEAD note for this register in the original
4124 insn because it was used and set within one insn.
4125
4126 There is no new REG_DEAD note needed if the last insn uses the register
4127 that it is setting. */
4128
4129 set = single_set (last);
4130 if (set)
4131 {
4132 rtx dest = SET_DEST (set);
4133
4134 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
4135 || GET_CODE (dest) == STRICT_LOW_PART
4136 || GET_CODE (dest) == SIGN_EXTRACT)
4137 dest = XEXP (dest, 0);
4138
4139 if (GET_CODE (dest) == REG
4140 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
4141 {
4142 for (insn = PREV_INSN (last); ; insn = PREV_INSN (insn))
4143 {
4144 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4145 && reg_mentioned_p (dest, PATTERN (insn))
4146 && (set = single_set (insn)))
4147 {
4148 rtx insn_dest = SET_DEST (set);
4149
4150 while (GET_CODE (insn_dest) == ZERO_EXTRACT
4151 || GET_CODE (insn_dest) == SUBREG
4152 || GET_CODE (insn_dest) == STRICT_LOW_PART
4153 || GET_CODE (insn_dest) == SIGN_EXTRACT)
4154 insn_dest = XEXP (insn_dest, 0);
4155
4156 if (insn_dest != dest)
4157 {
4158 note = rtx_alloc (EXPR_LIST);
4159 PUT_REG_NOTE_KIND (note, REG_DEAD);
4160 XEXP (note, 0) = dest;
4161 XEXP (note, 1) = REG_NOTES (insn);
4162 REG_NOTES (insn) = note;
4163 /* The reg only dies in one insn, the last one
4164 that uses it. */
4165 break;
4166 }
4167 }
4168 if (insn == first)
4169 break;
4170 }
4171 }
4172 }
4173
4174 /* If the original dest is modifying a multiple register target, and the
4175 original instruction was split such that the original dest is now set
4176 by two or more SUBREG sets, then the split insns no longer kill the
4177 destination of the original insn.
4178
4179 In this case, if there exists an instruction in the same basic block,
4180 before the split insn, which uses the original dest, and this use is
4181 killed by the original insn, then we must remove the REG_DEAD note on
4182 this insn, because it is now superfluous.
4183
4184 This does not apply when a hard register gets split, because the code
4185 knows how to handle overlapping hard registers properly. */
4186 if (orig_dest && GET_CODE (orig_dest) == REG)
4187 {
4188 int found_orig_dest = 0;
4189 int found_split_dest = 0;
4190
4191 for (insn = first; ; insn = NEXT_INSN (insn))
4192 {
4193 set = single_set (insn);
4194 if (set)
4195 {
4196 if (GET_CODE (SET_DEST (set)) == REG
4197 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
4198 {
4199 found_orig_dest = 1;
4200 break;
4201 }
4202 else if (GET_CODE (SET_DEST (set)) == SUBREG
4203 && SUBREG_REG (SET_DEST (set)) == orig_dest)
4204 {
4205 found_split_dest = 1;
4206 break;
4207 }
4208 }
4209
4210 if (insn == last)
4211 break;
4212 }
4213
4214 if (found_split_dest)
4215 {
4216 /* Search backwards from FIRST, looking for the first insn that uses
4217 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
4218 If we find an insn, and it has a REG_DEAD note, then delete the
4219 note. */
4220
4221 for (insn = first; insn; insn = PREV_INSN (insn))
4222 {
4223 if (GET_CODE (insn) == CODE_LABEL
4224 || GET_CODE (insn) == JUMP_INSN)
4225 break;
4226 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4227 && reg_mentioned_p (orig_dest, insn))
4228 {
4229 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
4230 if (note)
4231 remove_note (insn, note);
4232 }
4233 }
4234 }
4235 else if (! found_orig_dest)
4236 {
4237 /* This should never happen. */
4238 abort ();
4239 }
4240 }
4241
4242 /* Update reg_n_sets. This is necessary to prevent local alloc from
4243 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
4244 a reg from set once to set multiple times. */
4245
4246 {
4247 rtx x = PATTERN (orig_insn);
4248 RTX_CODE code = GET_CODE (x);
4249
4250 if (code == SET || code == CLOBBER)
4251 update_n_sets (x, -1);
4252 else if (code == PARALLEL)
4253 {
4254 int i;
4255 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4256 {
4257 code = GET_CODE (XVECEXP (x, 0, i));
4258 if (code == SET || code == CLOBBER)
4259 update_n_sets (XVECEXP (x, 0, i), -1);
4260 }
4261 }
4262
4263 for (insn = first; ; insn = NEXT_INSN (insn))
4264 {
4265 x = PATTERN (insn);
4266 code = GET_CODE (x);
4267
4268 if (code == SET || code == CLOBBER)
4269 update_n_sets (x, 1);
4270 else if (code == PARALLEL)
4271 {
4272 int i;
4273 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4274 {
4275 code = GET_CODE (XVECEXP (x, 0, i));
4276 if (code == SET || code == CLOBBER)
4277 update_n_sets (XVECEXP (x, 0, i), 1);
4278 }
4279 }
4280
4281 if (insn == last)
4282 break;
4283 }
4284 }
4285}
4286
4287/* The one entry point in this file. DUMP_FILE is the dump file for
4288 this pass. */
4289
4290void
4291schedule_insns (dump_file)
4292 FILE *dump_file;
4293{
4294 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
4295 int i, b;
4296 rtx insn;
4297
4298 /* Taking care of this degenerate case makes the rest of
4299 this code simpler. */
4300 if (n_basic_blocks == 0)
4301 return;
4302
4303 /* Create an insn here so that we can hang dependencies off of it later. */
fb3821f7
CH
4304 sched_before_next_call
4305 = gen_rtx (INSN, VOIDmode, 0, NULL_RTX, NULL_RTX,
4306 NULL_RTX, 0, NULL_RTX, 0);
bd58420c
RK
4307
4308 /* Initialize the unused_*_lists. We can't use the ones left over from
4309 the previous function, because gcc has freed that memory. We can use
4310 the ones left over from the first sched pass in the second pass however,
4311 so only clear them on the first sched pass. The first pass is before
4312 reload if flag_schedule_insns is set, otherwise it is afterwards. */
4313
4314 if (reload_completed == 0 || ! flag_schedule_insns)
4315 {
4316 unused_insn_list = 0;
4317 unused_expr_list = 0;
4318 }
4319
4320 /* We create no insns here, only reorder them, so we
4321 remember how far we can cut back the stack on exit. */
4322
4323 /* Allocate data for this pass. See comments, above,
4324 for what these vectors do. */
bd58420c
RK
4325 insn_luid = (int *) alloca (max_uid * sizeof (int));
4326 insn_priority = (int *) alloca (max_uid * sizeof (int));
de19227a 4327 insn_tick = (int *) alloca (max_uid * sizeof (int));
c3de4fe6 4328 insn_costs = (short *) alloca (max_uid * sizeof (short));
de19227a
TW
4329 insn_units = (short *) alloca (max_uid * sizeof (short));
4330 insn_blockage = (unsigned int *) alloca (max_uid * sizeof (unsigned int));
bd58420c
RK
4331 insn_ref_count = (int *) alloca (max_uid * sizeof (int));
4332
4333 if (reload_completed == 0)
4334 {
4335 sched_reg_n_deaths = (short *) alloca (max_regno * sizeof (short));
4336 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
4337 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
4338 bb_dead_regs = (regset) alloca (regset_bytes);
4339 bb_live_regs = (regset) alloca (regset_bytes);
4340 bzero (sched_reg_n_calls_crossed, max_regno * sizeof (int));
4341 bzero (sched_reg_live_length, max_regno * sizeof (int));
4342 bcopy (reg_n_deaths, sched_reg_n_deaths, max_regno * sizeof (short));
4343 init_alias_analysis ();
4344 }
4345 else
4346 {
4347 sched_reg_n_deaths = 0;
4348 sched_reg_n_calls_crossed = 0;
4349 sched_reg_live_length = 0;
4350 bb_dead_regs = 0;
4351 bb_live_regs = 0;
4352 if (! flag_schedule_insns)
4353 init_alias_analysis ();
4354 }
4355
4356 if (write_symbols != NO_DEBUG)
4357 {
4358 rtx line;
4359
4360 line_note = (rtx *) alloca (max_uid * sizeof (rtx));
4361 bzero (line_note, max_uid * sizeof (rtx));
4362 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
4363 bzero (line_note_head, n_basic_blocks * sizeof (rtx));
4364
4365 /* Determine the line-number at the start of each basic block.
4366 This must be computed and saved now, because after a basic block's
4367 predecessor has been scheduled, it is impossible to accurately
4368 determine the correct line number for the first insn of the block. */
4369
4370 for (b = 0; b < n_basic_blocks; b++)
4371 for (line = basic_block_head[b]; line; line = PREV_INSN (line))
4372 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
4373 {
4374 line_note_head[b] = line;
4375 break;
4376 }
4377 }
4378
4379 bzero (insn_luid, max_uid * sizeof (int));
4380 bzero (insn_priority, max_uid * sizeof (int));
de19227a 4381 bzero (insn_tick, max_uid * sizeof (int));
c3de4fe6 4382 bzero (insn_costs, max_uid * sizeof (short));
de19227a
TW
4383 bzero (insn_units, max_uid * sizeof (short));
4384 bzero (insn_blockage, max_uid * sizeof (unsigned int));
bd58420c
RK
4385 bzero (insn_ref_count, max_uid * sizeof (int));
4386
4387 /* Schedule each basic block, block by block. */
4388
4389 if (NEXT_INSN (basic_block_end[n_basic_blocks-1]) == 0
4390 || (GET_CODE (basic_block_end[n_basic_blocks-1]) != NOTE
4391 && GET_CODE (basic_block_end[n_basic_blocks-1]) != CODE_LABEL))
4392 emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
4393
4394 for (b = 0; b < n_basic_blocks; b++)
4395 {
4396 rtx insn, next;
4397 rtx insns;
4398
4399 note_list = 0;
4400
4401 for (insn = basic_block_head[b]; ; insn = next)
4402 {
4403 rtx prev;
4404 rtx set;
4405
4406 /* Can't use `next_real_insn' because that
4407 might go across CODE_LABELS and short-out basic blocks. */
4408 next = NEXT_INSN (insn);
4409 if (GET_CODE (insn) != INSN)
4410 {
4411 if (insn == basic_block_end[b])
4412 break;
4413
4414 continue;
4415 }
4416
4417 /* Don't split no-op move insns. These should silently disappear
4418 later in final. Splitting such insns would break the code
4419 that handles REG_NO_CONFLICT blocks. */
4420 set = single_set (insn);
4421 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
4422 {
4423 if (insn == basic_block_end[b])
4424 break;
4425
4426 /* Nops get in the way while scheduling, so delete them now if
4427 register allocation has already been done. It is too risky
4428 to try to do this before register allocation, and there are
4429 unlikely to be very many nops then anyways. */
4430 if (reload_completed)
4431 {
4432 PUT_CODE (insn, NOTE);
4433 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4434 NOTE_SOURCE_FILE (insn) = 0;
4435 }
4436
4437 continue;
4438 }
4439
4440 /* Split insns here to get max fine-grain parallelism. */
4441 prev = PREV_INSN (insn);
4442 if (reload_completed == 0)
4443 {
4444 rtx last, first = PREV_INSN (insn);
4445 rtx notes = REG_NOTES (insn);
4446
4447 last = try_split (PATTERN (insn), insn, 1);
4448 if (last != insn)
4449 {
4450 /* try_split returns the NOTE that INSN became. */
4451 first = NEXT_INSN (first);
4452 update_flow_info (notes, first, last, insn);
4453
4454 PUT_CODE (insn, NOTE);
4455 NOTE_SOURCE_FILE (insn) = 0;
4456 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4457 if (insn == basic_block_head[b])
4458 basic_block_head[b] = first;
4459 if (insn == basic_block_end[b])
4460 {
4461 basic_block_end[b] = last;
4462 break;
4463 }
4464 }
4465 }
4466
4467 if (insn == basic_block_end[b])
4468 break;
4469 }
4470
4471 schedule_block (b, dump_file);
4472
4473#ifdef USE_C_ALLOCA
4474 alloca (0);
4475#endif
4476 }
4477
bdac5f58
TW
4478 /* Reposition the prologue and epilogue notes in case we moved the
4479 prologue/epilogue insns. */
4480 if (reload_completed)
4481 reposition_prologue_and_epilogue_notes (get_insns ());
4482
bd58420c
RK
4483 if (write_symbols != NO_DEBUG)
4484 {
4485 rtx line = 0;
4486 rtx insn = get_insns ();
4487 int active_insn = 0;
4488 int notes = 0;
4489
4490 /* Walk the insns deleting redundant line-number notes. Many of these
4491 are already present. The remainder tend to occur at basic
4492 block boundaries. */
4493 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
4494 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4495 {
4496 /* If there are no active insns following, INSN is redundant. */
4497 if (active_insn == 0)
4498 {
4499 notes++;
4500 NOTE_SOURCE_FILE (insn) = 0;
4501 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4502 }
4503 /* If the line number is unchanged, LINE is redundant. */
4504 else if (line
4505 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
4506 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
4507 {
4508 notes++;
4509 NOTE_SOURCE_FILE (line) = 0;
4510 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
4511 line = insn;
4512 }
4513 else
4514 line = insn;
4515 active_insn = 0;
4516 }
4517 else if (! ((GET_CODE (insn) == NOTE
4518 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
4519 || (GET_CODE (insn) == INSN
4520 && (GET_CODE (PATTERN (insn)) == USE
4521 || GET_CODE (PATTERN (insn)) == CLOBBER))))
4522 active_insn++;
4523
4524 if (dump_file && notes)
4525 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
4526 }
4527
4528 if (reload_completed == 0)
4529 {
4530 int regno;
4531 for (regno = 0; regno < max_regno; regno++)
4532 if (sched_reg_live_length[regno])
4533 {
4534 if (dump_file)
4535 {
4536 if (reg_live_length[regno] > sched_reg_live_length[regno])
4537 fprintf (dump_file,
4538 ";; register %d life shortened from %d to %d\n",
4539 regno, reg_live_length[regno],
4540 sched_reg_live_length[regno]);
4541 /* Negative values are special; don't overwrite the current
4542 reg_live_length value if it is negative. */
4543 else if (reg_live_length[regno] < sched_reg_live_length[regno]
4544 && reg_live_length[regno] >= 0)
4545 fprintf (dump_file,
4546 ";; register %d life extended from %d to %d\n",
4547 regno, reg_live_length[regno],
4548 sched_reg_live_length[regno]);
4549
4550 if (reg_n_calls_crossed[regno]
4551 && ! sched_reg_n_calls_crossed[regno])
4552 fprintf (dump_file,
4553 ";; register %d no longer crosses calls\n", regno);
4554 else if (! reg_n_calls_crossed[regno]
4555 && sched_reg_n_calls_crossed[regno])
4556 fprintf (dump_file,
4557 ";; register %d now crosses calls\n", regno);
4558 }
b4ac57ab
RS
4559 /* Negative values are special; don't overwrite the current
4560 reg_live_length value if it is negative. */
4561 if (reg_live_length[regno] >= 0)
4562 reg_live_length[regno] = sched_reg_live_length[regno];
bd58420c
RK
4563 reg_n_calls_crossed[regno] = sched_reg_n_calls_crossed[regno];
4564 }
4565 }
4566}
4567#endif /* INSN_SCHEDULING */
This page took 0.479263 seconds and 5 git commands to generate.