]> gcc.gnu.org Git - gcc.git/blame - gcc/sched.c
(do_spec_1): Add %u.
[gcc.git] / gcc / sched.c
CommitLineData
bd58420c
RK
1/* Instruction scheduling pass.
2 Copyright (C) 1992 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6This file is part of GNU CC.
7
8GNU CC is free software; you can redistribute it and/or modify
9it under the terms of the GNU General Public License as published by
10the Free Software Foundation; either version 2, or (at your option)
11any later version.
12
13GNU CC is distributed in the hope that it will be useful,
14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
19along with GNU CC; see the file COPYING. If not, write to
20the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
21
22/* Instruction scheduling pass.
23
24 This pass implements list scheduling within basic blocks. It is
25 run after flow analysis, but before register allocation. The
26 scheduler works as follows:
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning
39 values to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we also add it to the ready list. When all insns down
50 to the lowest priority have been scheduled, the critical path of the
51 basic block has been made as short as possible. The remaining insns
52 are then scheduled in remaining slots.
53
54 The following list shows the order in which we want to break ties:
55
56 1. choose insn with lowest conflict cost, ties broken by
57 2. choose insn with the longest path to end of bb, ties broken by
58 3. choose insn that kills the most registers, ties broken by
59 4. choose insn that conflicts with the most ready insns, or finally
60 5. choose insn with lowest UID.
61
62 Memory references complicate matters. Only if we can be certain
63 that memory references are not part of the data dependency graph
64 (via true, anti, or output dependence), can we move operations past
65 memory references. To first approximation, reads can be done
66 independently, while writes introduce dependencies. Better
67 approximations will yield fewer dependencies.
68
69 Dependencies set up by memory references are treated in exactly the
70 same way as other dependencies, by using LOG_LINKS.
71
72 Having optimized the critical path, we may have also unduly
73 extended the lifetimes of some registers. If an operation requires
74 that constants be loaded into registers, it is certainly desirable
75 to load those constants as early as necessary, but no earlier.
76 I.e., it will not do to load up a bunch of registers at the
77 beginning of a basic block only to use them at the end, if they
78 could be loaded later, since this may result in excessive register
79 utilization.
80
81 Note that since branches are never in basic blocks, but only end
82 basic blocks, this pass will not do any branch scheduling. But
83 that is ok, since we can use GNU's delayed branch scheduling
84 pass to take care of this case.
85
86 Also note that no further optimizations based on algebraic identities
87 are performed, so this pass would be a good one to perform instruction
88 splitting, such as breaking up a multiply instruction into shifts
89 and adds where that is profitable.
90
91 Given the memory aliasing analysis that this pass should perform,
92 it should be possible to remove redundant stores to memory, and to
93 load values from registers instead of hitting memory.
94
95 This pass must update information that subsequent passes expect to be
96 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
97 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
98 basic_block_end.
99
100 The information in the line number notes is carefully retained by this
101 pass. All other NOTE insns are grouped in their same relative order at
102 the beginning of basic blocks that have been scheduled. */
103\f
104#include <stdio.h>
105#include "config.h"
106#include "rtl.h"
107#include "basic-block.h"
108#include "regs.h"
109#include "hard-reg-set.h"
110#include "flags.h"
111#include "insn-config.h"
112#include "insn-attr.h"
113
114/* Arrays set up by scheduling for the same respective purposes as
115 similar-named arrays set up by flow analysis. We work with these
116 arrays during the scheduling pass so we can compare values against
117 unscheduled code.
118
119 Values of these arrays are copied at the end of this pass into the
120 arrays set up by flow analysis. */
121static short *sched_reg_n_deaths;
122static int *sched_reg_n_calls_crossed;
123static int *sched_reg_live_length;
124
125/* Element N is the next insn that sets (hard or pseudo) register
126 N within the current basic block; or zero, if there is no
127 such insn. Needed for new registers which may be introduced
128 by splitting insns. */
129static rtx *reg_last_uses;
130static rtx *reg_last_sets;
131
132/* Vector indexed by INSN_UID giving the original ordering of the insns. */
133static int *insn_luid;
134#define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
135
136/* Vector indexed by INSN_UID giving each instruction a priority. */
137static int *insn_priority;
138#define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
139
c3de4fe6
JW
140static short *insn_costs;
141#define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
142
bd58420c
RK
143#define DONE_PRIORITY -1
144#define MAX_PRIORITY 0x7fffffff
145#define TAIL_PRIORITY 0x7ffffffe
146#define LAUNCH_PRIORITY 0x7f000001
147#define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
148#define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
149
b4ac57ab 150/* Vector indexed by INSN_UID giving number of insns referring to this insn. */
bd58420c
RK
151static int *insn_ref_count;
152#define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
153
154/* Vector indexed by INSN_UID giving line-number note in effect for each
155 insn. For line-number notes, this indicates whether the note may be
156 reused. */
157static rtx *line_note;
158#define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
159
160/* Vector indexed by basic block number giving the starting line-number
161 for each basic block. */
162static rtx *line_note_head;
163
164/* List of important notes we must keep around. This is a pointer to the
165 last element in the list. */
166static rtx note_list;
167
168/* Regsets telling whether a given register is live or dead before the last
169 scheduled insn. Must scan the instructions once before scheduling to
170 determine what registers are live or dead at the end of the block. */
171static regset bb_dead_regs;
172static regset bb_live_regs;
173
174/* Regset telling whether a given register is live after the insn currently
175 being scheduled. Before processing an insn, this is equal to bb_live_regs
6dc42e49 176 above. This is used so that we can find registers that are newly born/dead
bd58420c
RK
177 after processing an insn. */
178static regset old_live_regs;
179
180/* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
181 during the initial scan and reused later. If there are not exactly as
182 many REG_DEAD notes in the post scheduled code as there were in the
183 prescheduled code then we trigger an abort because this indicates a bug. */
184static rtx dead_notes;
185
186/* Queues, etc. */
187
188/* An instruction is ready to be scheduled when all insns following it
189 have already been scheduled. It is important to ensure that all
190 insns which use its result will not be executed until its result
191 has been computed. We maintain three lists (conceptually):
192
193 (1) a "Ready" list of unscheduled, uncommitted insns
194 (2) a "Scheduled" list of scheduled insns
195 (3) a "Pending" list of insns which can be scheduled, but
196 for stalls.
197
198 Insns move from the "Ready" list to the "Pending" list when
199 all insns following them have been scheduled.
200
201 Insns move from the "Pending" list to the "Scheduled" list
202 when there is sufficient space in the pipeline to prevent
203 stalls between the insn and scheduled insns which use it.
204
205 The "Pending" list acts as a buffer to prevent insns
206 from avalanching.
207
208 The "Ready" list is implemented by the variable `ready'.
209 The "Pending" list are the insns in the LOG_LINKS of ready insns.
210 The "Scheduled" list is the new insn chain built by this pass. */
211
212/* Implement a circular buffer from which instructions are issued. */
213#define Q_SIZE 128
214static rtx insn_queue[Q_SIZE];
215static int q_ptr = 0;
216static int q_size = 0;
217#define NEXT_Q(X) (((X)+1) & (Q_SIZE-1))
218#define NEXT_Q_AFTER(X,C) (((X)+C) & (Q_SIZE-1))
6dab9d41 219
bd58420c
RK
220/* Forward declarations. */
221static void sched_analyze_2 ();
222static void schedule_block ();
223
224/* Main entry point of this file. */
225void schedule_insns ();
226\f
227#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
228
229/* Vector indexed by N giving the initial (unchanging) value known
230 for pseudo-register N. */
231static rtx *reg_known_value;
232
233/* Indicates number of valid entries in reg_known_value. */
234static int reg_known_value_size;
235
236static rtx
237canon_rtx (x)
238 rtx x;
239{
240 if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
241 && REGNO (x) <= reg_known_value_size)
242 return reg_known_value[REGNO (x)];
243 else if (GET_CODE (x) == PLUS)
244 {
245 rtx x0 = canon_rtx (XEXP (x, 0));
246 rtx x1 = canon_rtx (XEXP (x, 1));
247
248 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
249 {
250 /* We can tolerate LO_SUMs being offset here; these
251 rtl are used for nothing other than comparisons. */
252 if (GET_CODE (x0) == CONST_INT)
253 return plus_constant_for_output (x1, INTVAL (x0));
254 else if (GET_CODE (x1) == CONST_INT)
255 return plus_constant_for_output (x0, INTVAL (x1));
256 return gen_rtx (PLUS, GET_MODE (x), x0, x1);
257 }
258 }
259 return x;
260}
261
262/* Set up all info needed to perform alias analysis on memory references. */
263
264void
265init_alias_analysis ()
266{
267 int maxreg = max_reg_num ();
268 rtx insn;
269 rtx note;
270 rtx set;
271
272 reg_known_value_size = maxreg;
273
274 reg_known_value
275 = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx))
276 - FIRST_PSEUDO_REGISTER;
277 bzero (reg_known_value+FIRST_PSEUDO_REGISTER,
278 (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
279
280 /* Fill in the entries with known constant values. */
281 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
282 if ((set = single_set (insn)) != 0
283 && GET_CODE (SET_DEST (set)) == REG
284 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
285 && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
286 && reg_n_sets[REGNO (SET_DEST (set))] == 1)
fb3821f7 287 || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0)
bd58420c
RK
288 && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
289 reg_known_value[REGNO (SET_DEST (set))] = XEXP (note, 0);
290
291 /* Fill in the remaining entries. */
292 while (--maxreg >= FIRST_PSEUDO_REGISTER)
293 if (reg_known_value[maxreg] == 0)
294 reg_known_value[maxreg] = regno_reg_rtx[maxreg];
295}
296
297/* Return 1 if X and Y are identical-looking rtx's.
298
299 We use the data in reg_known_value above to see if two registers with
300 different numbers are, in fact, equivalent. */
301
302static int
303rtx_equal_for_memref_p (x, y)
304 rtx x, y;
305{
306 register int i;
307 register int j;
308 register enum rtx_code code;
309 register char *fmt;
310
311 if (x == 0 && y == 0)
312 return 1;
313 if (x == 0 || y == 0)
314 return 0;
315 x = canon_rtx (x);
316 y = canon_rtx (y);
317
318 if (x == y)
319 return 1;
320
321 code = GET_CODE (x);
322 /* Rtx's of different codes cannot be equal. */
323 if (code != GET_CODE (y))
324 return 0;
325
326 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
327 (REG:SI x) and (REG:HI x) are NOT equivalent. */
328
329 if (GET_MODE (x) != GET_MODE (y))
330 return 0;
331
332 /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
333
334 if (code == REG)
335 return REGNO (x) == REGNO (y);
336 if (code == LABEL_REF)
337 return XEXP (x, 0) == XEXP (y, 0);
338 if (code == SYMBOL_REF)
339 return XSTR (x, 0) == XSTR (y, 0);
340
341 /* Compare the elements. If any pair of corresponding elements
342 fail to match, return 0 for the whole things. */
343
344 fmt = GET_RTX_FORMAT (code);
345 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
346 {
347 switch (fmt[i])
348 {
fb3821f7
CH
349 case 'w':
350 if (XWINT (x, i) != XWINT (y, i))
351 return 0;
352 break;
353
bd58420c
RK
354 case 'n':
355 case 'i':
356 if (XINT (x, i) != XINT (y, i))
357 return 0;
358 break;
359
360 case 'V':
361 case 'E':
362 /* Two vectors must have the same length. */
363 if (XVECLEN (x, i) != XVECLEN (y, i))
364 return 0;
365
366 /* And the corresponding elements must match. */
367 for (j = 0; j < XVECLEN (x, i); j++)
368 if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
369 return 0;
370 break;
371
372 case 'e':
373 if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
374 return 0;
375 break;
376
377 case 'S':
378 case 's':
379 if (strcmp (XSTR (x, i), XSTR (y, i)))
380 return 0;
381 break;
382
383 case 'u':
384 /* These are just backpointers, so they don't matter. */
385 break;
386
387 case '0':
388 break;
389
390 /* It is believed that rtx's at this level will never
391 contain anything but integers and other rtx's,
392 except for within LABEL_REFs and SYMBOL_REFs. */
393 default:
394 abort ();
395 }
396 }
397 return 1;
398}
399
400/* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
401 X and return it, or return 0 if none found. */
402
403static rtx
404find_symbolic_term (x)
405 rtx x;
406{
407 register int i;
408 register enum rtx_code code;
409 register char *fmt;
410
411 code = GET_CODE (x);
412 if (code == SYMBOL_REF || code == LABEL_REF)
413 return x;
414 if (GET_RTX_CLASS (code) == 'o')
415 return 0;
416
417 fmt = GET_RTX_FORMAT (code);
418 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
419 {
420 rtx t;
421
422 if (fmt[i] == 'e')
423 {
424 t = find_symbolic_term (XEXP (x, i));
425 if (t != 0)
426 return t;
427 }
428 else if (fmt[i] == 'E')
429 break;
430 }
431 return 0;
432}
433
434/* Return nonzero if X and Y (memory addresses) could reference the
435 same location in memory. C is an offset accumulator. When
436 C is nonzero, we are testing aliases between X and Y + C.
437 XSIZE is the size in bytes of the X reference,
438 similarly YSIZE is the size in bytes for Y.
439
440 If XSIZE or YSIZE is zero, we do not know the amount of memory being
441 referenced (the reference was BLKmode), so make the most pessimistic
442 assumptions.
443
444 We recognize the following cases of non-conflicting memory:
445
446 (1) addresses involving the frame pointer cannot conflict
447 with addresses involving static variables.
448 (2) static variables with different addresses cannot conflict.
449
6dc42e49 450 Nice to notice that varying addresses cannot conflict with fp if no
bd58420c
RK
451 local variables had their addresses taken, but that's too hard now. */
452
453static int
454memrefs_conflict_p (xsize, x, ysize, y, c)
455 rtx x, y;
456 int xsize, ysize;
fb3821f7 457 HOST_WIDE_INT c;
bd58420c
RK
458{
459 if (GET_CODE (x) == HIGH)
460 x = XEXP (x, 0);
461 else if (GET_CODE (x) == LO_SUM)
462 x = XEXP (x, 1);
463 else
464 x = canon_rtx (x);
465 if (GET_CODE (y) == HIGH)
466 y = XEXP (y, 0);
467 else if (GET_CODE (y) == LO_SUM)
468 y = XEXP (y, 1);
469 else
470 y = canon_rtx (y);
471
472 if (rtx_equal_for_memref_p (x, y))
473 return (xsize == 0 || ysize == 0 ||
474 (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
475
476 if (y == frame_pointer_rtx || y == stack_pointer_rtx)
477 {
478 rtx t = y;
479 int tsize = ysize;
480 y = x; ysize = xsize;
481 x = t; xsize = tsize;
482 }
483
484 if (x == frame_pointer_rtx || x == stack_pointer_rtx)
485 {
486 rtx y1;
487
488 if (CONSTANT_P (y))
489 return 0;
490
491 if (GET_CODE (y) == PLUS
492 && canon_rtx (XEXP (y, 0)) == x
493 && (y1 = canon_rtx (XEXP (y, 1)))
494 && GET_CODE (y1) == CONST_INT)
495 {
496 c += INTVAL (y1);
497 return (xsize == 0 || ysize == 0
498 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
499 }
500
501 if (GET_CODE (y) == PLUS
502 && (y1 = canon_rtx (XEXP (y, 0)))
503 && CONSTANT_P (y1))
504 return 0;
505
506 return 1;
507 }
508
509 if (GET_CODE (x) == PLUS)
510 {
6dc42e49
RS
511 /* The fact that X is canonicalized means that this
512 PLUS rtx is canonicalized. */
bd58420c
RK
513 rtx x0 = XEXP (x, 0);
514 rtx x1 = XEXP (x, 1);
515
516 if (GET_CODE (y) == PLUS)
517 {
6dc42e49
RS
518 /* The fact that Y is canonicalized means that this
519 PLUS rtx is canonicalized. */
bd58420c
RK
520 rtx y0 = XEXP (y, 0);
521 rtx y1 = XEXP (y, 1);
522
523 if (rtx_equal_for_memref_p (x1, y1))
524 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
525 if (rtx_equal_for_memref_p (x0, y0))
526 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
527 if (GET_CODE (x1) == CONST_INT)
528 if (GET_CODE (y1) == CONST_INT)
529 return memrefs_conflict_p (xsize, x0, ysize, y0,
530 c - INTVAL (x1) + INTVAL (y1));
531 else
532 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
533 else if (GET_CODE (y1) == CONST_INT)
534 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
535
536 /* Handle case where we cannot understand iteration operators,
537 but we notice that the base addresses are distinct objects. */
538 x = find_symbolic_term (x);
539 if (x == 0)
540 return 1;
541 y = find_symbolic_term (y);
542 if (y == 0)
543 return 1;
544 return rtx_equal_for_memref_p (x, y);
545 }
546 else if (GET_CODE (x1) == CONST_INT)
547 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
548 }
549 else if (GET_CODE (y) == PLUS)
550 {
6dc42e49
RS
551 /* The fact that Y is canonicalized means that this
552 PLUS rtx is canonicalized. */
bd58420c
RK
553 rtx y0 = XEXP (y, 0);
554 rtx y1 = XEXP (y, 1);
555
556 if (GET_CODE (y1) == CONST_INT)
557 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
558 else
559 return 1;
560 }
561
562 if (GET_CODE (x) == GET_CODE (y))
563 switch (GET_CODE (x))
564 {
565 case MULT:
566 {
567 /* Handle cases where we expect the second operands to be the
568 same, and check only whether the first operand would conflict
569 or not. */
570 rtx x0, y0;
571 rtx x1 = canon_rtx (XEXP (x, 1));
572 rtx y1 = canon_rtx (XEXP (y, 1));
573 if (! rtx_equal_for_memref_p (x1, y1))
574 return 1;
575 x0 = canon_rtx (XEXP (x, 0));
576 y0 = canon_rtx (XEXP (y, 0));
577 if (rtx_equal_for_memref_p (x0, y0))
578 return (xsize == 0 || ysize == 0
579 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
580
581 /* Can't properly adjust our sizes. */
582 if (GET_CODE (x1) != CONST_INT)
583 return 1;
584 xsize /= INTVAL (x1);
585 ysize /= INTVAL (x1);
586 c /= INTVAL (x1);
587 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
588 }
589 }
590
591 if (CONSTANT_P (x))
592 {
593 if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
594 {
595 c += (INTVAL (y) - INTVAL (x));
596 return (xsize == 0 || ysize == 0
597 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
598 }
599
600 if (GET_CODE (x) == CONST)
601 {
602 if (GET_CODE (y) == CONST)
603 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
604 ysize, canon_rtx (XEXP (y, 0)), c);
605 else
606 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
607 ysize, y, c);
608 }
609 if (GET_CODE (y) == CONST)
610 return memrefs_conflict_p (xsize, x, ysize,
611 canon_rtx (XEXP (y, 0)), c);
612
613 if (CONSTANT_P (y))
614 return (rtx_equal_for_memref_p (x, y)
615 && (xsize == 0 || ysize == 0
616 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
617
618 return 1;
619 }
620 return 1;
621}
622
623/* Functions to compute memory dependencies.
624
625 Since we process the insns in execution order, we can build tables
626 to keep track of what registers are fixed (and not aliased), what registers
627 are varying in known ways, and what registers are varying in unknown
628 ways.
629
630 If both memory references are volatile, then there must always be a
631 dependence between the two references, since their order can not be
632 changed. A volatile and non-volatile reference can be interchanged
633 though.
634
635 A MEM_IN_STRUCT reference at a varying address can never conflict with a
636 non-MEM_IN_STRUCT reference at a fixed address. */
637
638/* Read dependence: X is read after read in MEM takes place. There can
639 only be a dependence here if both reads are volatile. */
640
641int
642read_dependence (mem, x)
643 rtx mem;
644 rtx x;
645{
646 return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
647}
648
649/* True dependence: X is read after store in MEM takes place. */
650
651int
652true_dependence (mem, x)
653 rtx mem;
654 rtx x;
655{
1f6fc4de
JW
656 /* If X is an unchanging read, then it can't possibly conflict with any
657 non-unchanging store. It may conflict with an unchanging write though,
658 because there may be a single store to this address to initialize it.
659 Just fall through to the code below to resolve the case where we have
660 both an unchanging read and an unchanging write. This won't handle all
661 cases optimally, but the possible performance loss should be
662 negligible. */
663 if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem))
bd58420c
RK
664 return 0;
665
666 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
667 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
668 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
669 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
670 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
671 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
672 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
673}
674
675/* Anti dependence: X is written after read in MEM takes place. */
676
677int
678anti_dependence (mem, x)
679 rtx mem;
680 rtx x;
681{
1f6fc4de
JW
682 /* If MEM is an unchanging read, then it can't possibly conflict with
683 the store to X, because there is at most one store to MEM, and it must
684 have occured somewhere before MEM. */
bd58420c
RK
685 if (RTX_UNCHANGING_P (mem))
686 return 0;
687
688 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
689 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
690 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
691 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
692 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
693 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
694 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
695}
696
697/* Output dependence: X is written after store in MEM takes place. */
698
699int
700output_dependence (mem, x)
701 rtx mem;
702 rtx x;
703{
704 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
705 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
706 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
707 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
708 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
709 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
710 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
711}
712\f
6dab9d41
RS
713/* Helper functions for instruction scheduling. */
714
715/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
716 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
717 of dependence that this link represents. */
718
719void
720add_dependence (insn, elem, dep_type)
721 rtx insn;
722 rtx elem;
723 enum reg_note dep_type;
724{
725 rtx link, next;
726
727 /* Don't depend an insn on itself. */
728 if (insn == elem)
729 return;
730
731 /* If elem is part of a sequence that must be scheduled together, then
732 make the dependence point to the last insn of the sequence.
733 When HAVE_cc0, it is possible for NOTEs to exist between users and
734 setters of the condition codes, so we must skip past notes here.
735 Otherwise, NOTEs are impossible here. */
736
737 next = NEXT_INSN (elem);
738
739#ifdef HAVE_cc0
740 while (next && GET_CODE (next) == NOTE)
741 next = NEXT_INSN (next);
742#endif
743
744 if (next && SCHED_GROUP_P (next))
745 {
746 /* Notes will never intervene here though, so don't bother checking
747 for them. */
748 while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next)))
749 next = NEXT_INSN (next);
750
751 /* Again, don't depend an insn on itself. */
752 if (insn == next)
753 return;
754
755 /* Make the dependence to NEXT, the last insn of the group, instead
756 of the original ELEM. */
757 elem = next;
758 }
759
760 /* Check that we don't already have this dependence. */
761 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
762 if (XEXP (link, 0) == elem)
763 {
764 /* If this is a more restrictive type of dependence than the existing
765 one, then change the existing dependence to this type. */
766 if ((int) dep_type < (int) REG_NOTE_KIND (link))
767 PUT_REG_NOTE_KIND (link, dep_type);
768 return;
769 }
770 /* Might want to check one level of transitivity to save conses. */
771
772 link = rtx_alloc (INSN_LIST);
773 /* Insn dependency, not data dependency. */
774 PUT_REG_NOTE_KIND (link, dep_type);
775 XEXP (link, 0) = elem;
776 XEXP (link, 1) = LOG_LINKS (insn);
777 LOG_LINKS (insn) = link;
778}
779
780/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
781 of INSN. Abort if not found. */
782void
783remove_dependence (insn, elem)
784 rtx insn;
785 rtx elem;
786{
787 rtx prev, link;
788 int found = 0;
789
790 for (prev = 0, link = LOG_LINKS (insn); link;
791 prev = link, link = XEXP (link, 1))
792 {
793 if (XEXP (link, 0) == elem)
794 {
795 if (prev)
796 XEXP (prev, 1) = XEXP (link, 1);
797 else
798 LOG_LINKS (insn) = XEXP (link, 1);
799 found = 1;
800 }
801 }
802
803 if (! found)
804 abort ();
805 return;
806}
807\f
bd58420c
RK
808#ifndef INSN_SCHEDULING
809void schedule_insns () {}
810#else
811#ifndef __GNUC__
812#define __inline
813#endif
814
815/* Computation of memory dependencies. */
816
817/* The *_insns and *_mems are paired lists. Each pending memory operation
818 will have a pointer to the MEM rtx on one list and a pointer to the
819 containing insn on the other list in the same place in the list. */
820
821/* We can't use add_dependence like the old code did, because a single insn
822 may have multiple memory accesses, and hence needs to be on the list
823 once for each memory access. Add_dependence won't let you add an insn
824 to a list more than once. */
825
826/* An INSN_LIST containing all insns with pending read operations. */
827static rtx pending_read_insns;
828
829/* An EXPR_LIST containing all MEM rtx's which are pending reads. */
830static rtx pending_read_mems;
831
832/* An INSN_LIST containing all insns with pending write operations. */
833static rtx pending_write_insns;
834
835/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
836static rtx pending_write_mems;
837
838/* Indicates the combined length of the two pending lists. We must prevent
839 these lists from ever growing too large since the number of dependencies
840 produced is at least O(N*N), and execution time is at least O(4*N*N), as
841 a function of the length of these pending lists. */
842
843static int pending_lists_length;
844
845/* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
846
847static rtx unused_insn_list;
848
849/* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
850
851static rtx unused_expr_list;
852
853/* The last insn upon which all memory references must depend.
854 This is an insn which flushed the pending lists, creating a dependency
855 between it and all previously pending memory references. This creates
856 a barrier (or a checkpoint) which no memory reference is allowed to cross.
857
858 This includes all non constant CALL_INSNs. When we do interprocedural
859 alias analysis, this restriction can be relaxed.
860 This may also be an INSN that writes memory if the pending lists grow
861 too large. */
862
863static rtx last_pending_memory_flush;
864
865/* The last function call we have seen. All hard regs, and, of course,
866 the last function call, must depend on this. */
867
868static rtx last_function_call;
869
870/* The LOG_LINKS field of this is a list of insns which use a pseudo register
871 that does not already cross a call. We create dependencies between each
872 of those insn and the next call insn, to ensure that they won't cross a call
873 after scheduling is done. */
874
875static rtx sched_before_next_call;
876
877/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
878 so that insns independent of the last scheduled insn will be preferred
879 over dependent instructions. */
880
881static rtx last_scheduled_insn;
882
883/* Process an insn's memory dependencies. There are four kinds of
884 dependencies:
885
886 (0) read dependence: read follows read
887 (1) true dependence: read follows write
888 (2) anti dependence: write follows read
889 (3) output dependence: write follows write
890
891 We are careful to build only dependencies which actually exist, and
892 use transitivity to avoid building too many links. */
893\f
894/* Return the INSN_LIST containing INSN in LIST, or NULL
895 if LIST does not contain INSN. */
896
897__inline static rtx
898find_insn_list (insn, list)
899 rtx insn;
900 rtx list;
901{
902 while (list)
903 {
904 if (XEXP (list, 0) == insn)
905 return list;
906 list = XEXP (list, 1);
907 }
908 return 0;
909}
910
911/* Compute cost of executing INSN. This is the number of virtual
912 cycles taken between instruction issue and instruction results. */
913
914__inline static int
915insn_cost (insn)
916 rtx insn;
917{
918 register int cost;
919
c3de4fe6
JW
920 if (INSN_COST (insn))
921 return INSN_COST (insn);
922
bd58420c
RK
923 recog_memoized (insn);
924
925 /* A USE insn, or something else we don't need to understand.
926 We can't pass these directly to result_ready_cost because it will trigger
927 a fatal error for unrecognizable insns. */
928 if (INSN_CODE (insn) < 0)
c3de4fe6
JW
929 {
930 INSN_COST (insn) = 1;
931 return 1;
932 }
bd58420c
RK
933 else
934 {
935 cost = result_ready_cost (insn);
936
937 if (cost < 1)
938 cost = 1;
939
c3de4fe6 940 INSN_COST (insn) = cost;
bd58420c
RK
941 return cost;
942 }
943}
944
945/* Compute the priority number for INSN. */
946
947static int
948priority (insn)
949 rtx insn;
950{
951 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
952 {
953 int prev_priority;
954 int max_priority;
955 int this_priority = INSN_PRIORITY (insn);
956 rtx prev;
957
958 if (this_priority > 0)
959 return this_priority;
960
961 max_priority = 1;
962
963 /* Nonzero if these insns must be scheduled together. */
964 if (SCHED_GROUP_P (insn))
965 {
966 prev = insn;
967 while (SCHED_GROUP_P (prev))
968 {
969 prev = PREV_INSN (prev);
970 INSN_REF_COUNT (prev) += 1;
971 }
972 }
973
974 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
975 {
976 rtx x = XEXP (prev, 0);
977
978 /* A dependence pointing to a note is always obsolete, because
979 sched_analyze_insn will have created any necessary new dependences
980 which replace it. Notes can be created when instructions are
981 deleted by insn splitting, or by register allocation. */
982 if (GET_CODE (x) == NOTE)
983 {
984 remove_dependence (insn, x);
985 continue;
986 }
987
988 /* This priority calculation was chosen because it results in the
989 least instruction movement, and does not hurt the performance
990 of the resulting code compared to the old algorithm.
991 This makes the sched algorithm more stable, which results
992 in better code, because there is less register pressure,
993 cross jumping is more likely to work, and debugging is easier.
994
995 When all instructions have a latency of 1, there is no need to
996 move any instructions. Subtracting one here ensures that in such
997 cases all instructions will end up with a priority of one, and
998 hence no scheduling will be done.
999
1000 The original code did not subtract the one, and added the
1001 insn_cost of the current instruction to its priority (e.g.
1002 move the insn_cost call down to the end). */
1003
1004 if (REG_NOTE_KIND (prev) == 0)
1005 /* Data dependence. */
1006 prev_priority = priority (x) + insn_cost (x) - 1;
1007 else
1008 /* Anti or output dependence. Don't add the latency of this
1009 insn's result, because it isn't being used. */
1010 prev_priority = priority (x);
1011
1012 if (prev_priority > max_priority)
1013 max_priority = prev_priority;
1014 INSN_REF_COUNT (x) += 1;
1015 }
1016
1017 INSN_PRIORITY (insn) = max_priority;
1018 return INSN_PRIORITY (insn);
1019 }
1020 return 0;
1021}
1022\f
1023/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
1024 them to the unused_*_list variables, so that they can be reused. */
1025
1026static void
1027free_pending_lists ()
1028{
1029 register rtx link, prev_link;
1030
1031 if (pending_read_insns)
1032 {
1033 prev_link = pending_read_insns;
1034 link = XEXP (prev_link, 1);
1035
1036 while (link)
1037 {
1038 prev_link = link;
1039 link = XEXP (link, 1);
1040 }
1041
1042 XEXP (prev_link, 1) = unused_insn_list;
1043 unused_insn_list = pending_read_insns;
1044 pending_read_insns = 0;
1045 }
1046
1047 if (pending_write_insns)
1048 {
1049 prev_link = pending_write_insns;
1050 link = XEXP (prev_link, 1);
1051
1052 while (link)
1053 {
1054 prev_link = link;
1055 link = XEXP (link, 1);
1056 }
1057
1058 XEXP (prev_link, 1) = unused_insn_list;
1059 unused_insn_list = pending_write_insns;
1060 pending_write_insns = 0;
1061 }
1062
1063 if (pending_read_mems)
1064 {
1065 prev_link = pending_read_mems;
1066 link = XEXP (prev_link, 1);
1067
1068 while (link)
1069 {
1070 prev_link = link;
1071 link = XEXP (link, 1);
1072 }
1073
1074 XEXP (prev_link, 1) = unused_expr_list;
1075 unused_expr_list = pending_read_mems;
1076 pending_read_mems = 0;
1077 }
1078
1079 if (pending_write_mems)
1080 {
1081 prev_link = pending_write_mems;
1082 link = XEXP (prev_link, 1);
1083
1084 while (link)
1085 {
1086 prev_link = link;
1087 link = XEXP (link, 1);
1088 }
1089
1090 XEXP (prev_link, 1) = unused_expr_list;
1091 unused_expr_list = pending_write_mems;
1092 pending_write_mems = 0;
1093 }
1094}
1095
1096/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1097 The MEM is a memory reference contained within INSN, which we are saving
1098 so that we can do memory aliasing on it. */
1099
1100static void
1101add_insn_mem_dependence (insn_list, mem_list, insn, mem)
1102 rtx *insn_list, *mem_list, insn, mem;
1103{
1104 register rtx link;
1105
1106 if (unused_insn_list)
1107 {
1108 link = unused_insn_list;
1109 unused_insn_list = XEXP (link, 1);
1110 }
1111 else
1112 link = rtx_alloc (INSN_LIST);
1113 XEXP (link, 0) = insn;
1114 XEXP (link, 1) = *insn_list;
1115 *insn_list = link;
1116
1117 if (unused_expr_list)
1118 {
1119 link = unused_expr_list;
1120 unused_expr_list = XEXP (link, 1);
1121 }
1122 else
1123 link = rtx_alloc (EXPR_LIST);
1124 XEXP (link, 0) = mem;
1125 XEXP (link, 1) = *mem_list;
1126 *mem_list = link;
1127
1128 pending_lists_length++;
1129}
1130\f
1131/* Make a dependency between every memory reference on the pending lists
1132 and INSN, thus flushing the pending lists. */
1133
1134static void
1135flush_pending_lists (insn)
1136 rtx insn;
1137{
1138 rtx link;
1139
1140 while (pending_read_insns)
1141 {
1142 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1143
1144 link = pending_read_insns;
1145 pending_read_insns = XEXP (pending_read_insns, 1);
1146 XEXP (link, 1) = unused_insn_list;
1147 unused_insn_list = link;
1148
1149 link = pending_read_mems;
1150 pending_read_mems = XEXP (pending_read_mems, 1);
1151 XEXP (link, 1) = unused_expr_list;
1152 unused_expr_list = link;
1153 }
1154 while (pending_write_insns)
1155 {
1156 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1157
1158 link = pending_write_insns;
1159 pending_write_insns = XEXP (pending_write_insns, 1);
1160 XEXP (link, 1) = unused_insn_list;
1161 unused_insn_list = link;
1162
1163 link = pending_write_mems;
1164 pending_write_mems = XEXP (pending_write_mems, 1);
1165 XEXP (link, 1) = unused_expr_list;
1166 unused_expr_list = link;
1167 }
1168 pending_lists_length = 0;
1169
1170 if (last_pending_memory_flush)
1171 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1172
1173 last_pending_memory_flush = insn;
1174}
1175
1176/* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1177 by the write to the destination of X, and reads of everything mentioned. */
1178
1179static void
1180sched_analyze_1 (x, insn)
1181 rtx x;
1182 rtx insn;
1183{
1184 register int regno;
1185 register rtx dest = SET_DEST (x);
1186
1187 if (dest == 0)
1188 return;
1189
1190 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1191 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1192 {
1193 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1194 {
1195 /* The second and third arguments are values read by this insn. */
1196 sched_analyze_2 (XEXP (dest, 1), insn);
1197 sched_analyze_2 (XEXP (dest, 2), insn);
1198 }
1199 dest = SUBREG_REG (dest);
1200 }
1201
1202 if (GET_CODE (dest) == REG)
1203 {
1204 register int offset, bit, i;
1205
1206 regno = REGNO (dest);
1207
1208 /* A hard reg in a wide mode may really be multiple registers.
1209 If so, mark all of them just like the first. */
1210 if (regno < FIRST_PSEUDO_REGISTER)
1211 {
1212 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1213 while (--i >= 0)
1214 {
1215 rtx u;
1216
1217 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1218 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1219 reg_last_uses[regno + i] = 0;
1220 if (reg_last_sets[regno + i])
1221 add_dependence (insn, reg_last_sets[regno + i],
1222 REG_DEP_OUTPUT);
1223 reg_last_sets[regno + i] = insn;
1224 if ((call_used_regs[i] || global_regs[i])
1225 && last_function_call)
1226 /* Function calls clobber all call_used regs. */
1227 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1228 }
1229 }
1230 else
1231 {
1232 rtx u;
1233
1234 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1235 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1236 reg_last_uses[regno] = 0;
1237 if (reg_last_sets[regno])
1238 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1239 reg_last_sets[regno] = insn;
1240
1241 /* Don't let it cross a call after scheduling if it doesn't
1242 already cross one. */
1243 if (reg_n_calls_crossed[regno] == 0 && last_function_call)
1244 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1245 }
1246 }
1247 else if (GET_CODE (dest) == MEM)
1248 {
1249 /* Writing memory. */
1250
1251 if (pending_lists_length > 32)
1252 {
1253 /* Flush all pending reads and writes to prevent the pending lists
1254 from getting any larger. Insn scheduling runs too slowly when
1255 these lists get long. The number 32 was chosen because it
6dc42e49 1256 seems like a reasonable number. When compiling GCC with itself,
bd58420c
RK
1257 this flush occurs 8 times for sparc, and 10 times for m88k using
1258 the number 32. */
1259 flush_pending_lists (insn);
1260 }
1261 else
1262 {
1263 rtx pending, pending_mem;
1264
1265 pending = pending_read_insns;
1266 pending_mem = pending_read_mems;
1267 while (pending)
1268 {
1269 /* If a dependency already exists, don't create a new one. */
1270 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1271 if (anti_dependence (XEXP (pending_mem, 0), dest, insn))
1272 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1273
1274 pending = XEXP (pending, 1);
1275 pending_mem = XEXP (pending_mem, 1);
1276 }
1277
1278 pending = pending_write_insns;
1279 pending_mem = pending_write_mems;
1280 while (pending)
1281 {
1282 /* If a dependency already exists, don't create a new one. */
1283 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1284 if (output_dependence (XEXP (pending_mem, 0), dest))
1285 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1286
1287 pending = XEXP (pending, 1);
1288 pending_mem = XEXP (pending_mem, 1);
1289 }
1290
1291 if (last_pending_memory_flush)
1292 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1293
1294 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1295 insn, dest);
1296 }
1297 sched_analyze_2 (XEXP (dest, 0), insn);
1298 }
1299
1300 /* Analyze reads. */
1301 if (GET_CODE (x) == SET)
1302 sched_analyze_2 (SET_SRC (x), insn);
bd58420c
RK
1303}
1304
1305/* Analyze the uses of memory and registers in rtx X in INSN. */
1306
1307static void
1308sched_analyze_2 (x, insn)
1309 rtx x;
1310 rtx insn;
1311{
1312 register int i;
1313 register int j;
1314 register enum rtx_code code;
1315 register char *fmt;
1316
1317 if (x == 0)
1318 return;
1319
1320 code = GET_CODE (x);
1321
b85b8af2 1322 switch (code)
bd58420c 1323 {
b85b8af2
JW
1324 case CONST_INT:
1325 case CONST_DOUBLE:
1326 case SYMBOL_REF:
1327 case CONST:
1328 case LABEL_REF:
1329 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1330 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1331 this does not mean that this insn is using cc0. */
bd58420c 1332 return;
bd58420c 1333
b85b8af2
JW
1334#ifdef HAVE_cc0
1335 case CC0:
1336 {
a3ee5899 1337 rtx link, prev;
bd58420c 1338
a3ee5899 1339 /* There may be a note before this insn now, but all notes will
ea3fa5f7 1340 be removed before we actually try to schedule the insns, so
a3ee5899
JW
1341 it won't cause a problem later. We must avoid it here though. */
1342
d21d2fb8 1343 /* User of CC0 depends on immediately preceding insn. */
b85b8af2 1344 SCHED_GROUP_P (insn) = 1;
bd58420c 1345
a3ee5899
JW
1346 /* Make a copy of all dependencies on the immediately previous insn,
1347 and add to this insn. This is so that all the dependencies will
1348 apply to the group. */
1349
1350 prev = PREV_INSN (insn);
1351 while (GET_CODE (prev) == NOTE)
1352 prev = PREV_INSN (prev);
bd58420c 1353
a3ee5899 1354 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
b85b8af2 1355 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
bd58420c 1356
b85b8af2
JW
1357 return;
1358 }
1359#endif
bd58420c 1360
b85b8af2
JW
1361 case REG:
1362 {
1363 int regno = REGNO (x);
1364 if (regno < FIRST_PSEUDO_REGISTER)
1365 {
1366 int i;
1367
1368 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1369 while (--i >= 0)
1370 {
1371 reg_last_uses[regno + i]
1372 = gen_rtx (INSN_LIST, VOIDmode,
1373 insn, reg_last_uses[regno + i]);
1374 if (reg_last_sets[regno + i])
1375 add_dependence (insn, reg_last_sets[regno + i], 0);
1376 if ((call_used_regs[regno + i] || global_regs[regno + i])
1377 && last_function_call)
1378 /* Function calls clobber all call_used regs. */
1379 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1380 }
1381 }
1382 else
1383 {
1384 reg_last_uses[regno]
1385 = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
1386 if (reg_last_sets[regno])
1387 add_dependence (insn, reg_last_sets[regno], 0);
1388
1389 /* If the register does not already cross any calls, then add this
1390 insn to the sched_before_next_call list so that it will still
1391 not cross calls after scheduling. */
1392 if (reg_n_calls_crossed[regno] == 0)
1393 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1394 }
1395 return;
1396 }
bd58420c 1397
b85b8af2
JW
1398 case MEM:
1399 {
1400 /* Reading memory. */
1401
1f6fc4de
JW
1402 rtx pending, pending_mem;
1403
1404 pending = pending_read_insns;
1405 pending_mem = pending_read_mems;
1406 while (pending)
b85b8af2 1407 {
1f6fc4de
JW
1408 /* If a dependency already exists, don't create a new one. */
1409 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1410 if (read_dependence (XEXP (pending_mem, 0), x))
1411 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
b85b8af2 1412
1f6fc4de
JW
1413 pending = XEXP (pending, 1);
1414 pending_mem = XEXP (pending_mem, 1);
1415 }
b85b8af2 1416
1f6fc4de
JW
1417 pending = pending_write_insns;
1418 pending_mem = pending_write_mems;
1419 while (pending)
1420 {
1421 /* If a dependency already exists, don't create a new one. */
1422 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1423 if (true_dependence (XEXP (pending_mem, 0), x))
1424 add_dependence (insn, XEXP (pending, 0), 0);
b85b8af2 1425
1f6fc4de
JW
1426 pending = XEXP (pending, 1);
1427 pending_mem = XEXP (pending_mem, 1);
1428 }
1429 if (last_pending_memory_flush)
1430 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
b85b8af2 1431
1f6fc4de
JW
1432 /* Always add these dependencies to pending_reads, since
1433 this insn may be followed by a write. */
1434 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1435 insn, x);
b85b8af2 1436
b85b8af2
JW
1437 /* Take advantage of tail recursion here. */
1438 sched_analyze_2 (XEXP (x, 0), insn);
1439 return;
1440 }
bd58420c 1441
b85b8af2
JW
1442 case ASM_OPERANDS:
1443 case ASM_INPUT:
1444 case UNSPEC_VOLATILE:
c3de4fe6 1445 case TRAP_IF:
b85b8af2
JW
1446 {
1447 rtx u;
1448
1449 /* Traditional and volatile asm instructions must be considered to use
1450 and clobber all hard registers and all of memory. So must
c3de4fe6
JW
1451 TRAP_IF and UNSPEC_VOLATILE operations. */
1452 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
b85b8af2
JW
1453 {
1454 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1455 {
1456 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1457 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1458 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1459 reg_last_uses[i] = 0;
1460 if (reg_last_sets[i]
1461 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1462 add_dependence (insn, reg_last_sets[i], 0);
1463 reg_last_sets[i] = insn;
1464 }
1465
1466 flush_pending_lists (insn);
1467 }
1468
1469 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1470 We can not just fall through here since then we would be confused
1471 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1472 traditional asms unlike their normal usage. */
1473
1474 if (code == ASM_OPERANDS)
1475 {
1476 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1477 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1478 return;
1479 }
1480 break;
1481 }
bd58420c 1482
b85b8af2
JW
1483 case PRE_DEC:
1484 case POST_DEC:
1485 case PRE_INC:
1486 case POST_INC:
1487 /* These read and modify the result; just consider them writes. */
1488 sched_analyze_1 (x, insn);
bd58420c
RK
1489 return;
1490 }
1491
bd58420c
RK
1492 /* Other cases: walk the insn. */
1493 fmt = GET_RTX_FORMAT (code);
1494 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1495 {
1496 if (fmt[i] == 'e')
1497 sched_analyze_2 (XEXP (x, i), insn);
1498 else if (fmt[i] == 'E')
1499 for (j = 0; j < XVECLEN (x, i); j++)
1500 sched_analyze_2 (XVECEXP (x, i, j), insn);
1501 }
1502}
1503
1504/* Analyze an INSN with pattern X to find all dependencies. */
1505
1506static void
1507sched_analyze_insn (x, insn)
1508 rtx x, insn;
1509{
1510 register RTX_CODE code = GET_CODE (x);
1511 rtx link;
1512
1513 if (code == SET || code == CLOBBER)
1514 sched_analyze_1 (x, insn);
1515 else if (code == PARALLEL)
1516 {
1517 register int i;
1518 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1519 {
1520 code = GET_CODE (XVECEXP (x, 0, i));
1521 if (code == SET || code == CLOBBER)
1522 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1523 else
1524 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1525 }
1526 }
1527 else
1528 sched_analyze_2 (x, insn);
1529
bd58420c
RK
1530 /* Handle function calls. */
1531 if (GET_CODE (insn) == CALL_INSN)
1532 {
1533 rtx dep_insn;
1534 rtx prev_dep_insn;
1535
1536 /* When scheduling instructions, we make sure calls don't lose their
1537 accompanying USE insns by depending them one on another in order. */
1538
1539 prev_dep_insn = insn;
1540 dep_insn = PREV_INSN (insn);
1541 while (GET_CODE (dep_insn) == INSN
1542 && GET_CODE (PATTERN (dep_insn)) == USE)
1543 {
1544 SCHED_GROUP_P (prev_dep_insn) = 1;
1545
1546 /* Make a copy of all dependencies on dep_insn, and add to insn.
1547 This is so that all of the dependencies will apply to the
1548 group. */
1549
1550 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1551 add_dependence (insn, XEXP (link, 0), GET_MODE (link));
1552
1553 prev_dep_insn = dep_insn;
1554 dep_insn = PREV_INSN (dep_insn);
1555 }
1556 }
1557}
1558
1559/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1560 for every dependency. */
1561
1562static int
1563sched_analyze (head, tail)
1564 rtx head, tail;
1565{
1566 register rtx insn;
1567 register int n_insns = 0;
1568 register rtx u;
1569 register int luid = 0;
1570
1571 for (insn = head; ; insn = NEXT_INSN (insn))
1572 {
1573 INSN_LUID (insn) = luid++;
1574
1575 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1576 {
1577 sched_analyze_insn (PATTERN (insn), insn);
1578 n_insns += 1;
1579 }
1580 else if (GET_CODE (insn) == CALL_INSN)
1581 {
1582 rtx dest = 0;
1583 rtx x;
1584 register int i;
1585
1586 /* Any instruction using a hard register which may get clobbered
1587 by a call needs to be marked as dependent on this call.
1588 This prevents a use of a hard return reg from being moved
1589 past a void call (i.e. it does not explicitly set the hard
1590 return reg). */
1591
1592 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1593 if (call_used_regs[i] || global_regs[i])
1594 {
1595 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1596 if (GET_CODE (PATTERN (XEXP (u, 0))) != USE)
1597 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1598 reg_last_uses[i] = 0;
1599 if (reg_last_sets[i]
1600 && GET_CODE (PATTERN (reg_last_sets[i])) != USE)
1601 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1602 reg_last_sets[i] = insn;
1603 /* Insn, being a CALL_INSN, magically depends on
1604 `last_function_call' already. */
1605 }
1606
1607 /* For each insn which shouldn't cross a call, add a dependence
1608 between that insn and this call insn. */
1609 x = LOG_LINKS (sched_before_next_call);
1610 while (x)
1611 {
1612 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1613 x = XEXP (x, 1);
1614 }
1615 LOG_LINKS (sched_before_next_call) = 0;
1616
1617 sched_analyze_insn (PATTERN (insn), insn);
1618
1619 /* We don't need to flush memory for a function call which does
1620 not involve memory. */
1621 if (! CONST_CALL_P (insn))
1622 {
1623 /* In the absence of interprocedural alias analysis,
1624 we must flush all pending reads and writes, and
1625 start new dependencies starting from here. */
1626 flush_pending_lists (insn);
1627 }
1628
1629 /* Depend this function call (actually, the user of this
1630 function call) on all hard register clobberage. */
1631 last_function_call = insn;
1632 n_insns += 1;
1633 }
1634
1635 if (insn == tail)
1636 return n_insns;
1637 }
1638}
1639\f
1640/* Called when we see a set of a register. If death is true, then we are
1641 scanning backwards. Mark that register as unborn. If nobody says
1642 otherwise, that is how things will remain. If death is false, then we
1643 are scanning forwards. Mark that register as being born. */
1644
1645static void
1646sched_note_set (b, x, death)
1647 int b;
1648 rtx x;
1649 int death;
1650{
1651 register int regno, j;
1652 register rtx reg = SET_DEST (x);
1653 int subreg_p = 0;
1654
1655 if (reg == 0)
1656 return;
1657
1658 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
1659 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
1660 {
1661 /* Must treat modification of just one hardware register of a multi-reg
1662 value or just a byte field of a register exactly the same way that
1663 mark_set_1 in flow.c does. */
1664 if (GET_CODE (reg) == ZERO_EXTRACT
1665 || GET_CODE (reg) == SIGN_EXTRACT
1666 || (GET_CODE (reg) == SUBREG
1667 && REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg)))
1668 subreg_p = 1;
1669
1670 reg = SUBREG_REG (reg);
1671 }
1672
1673 if (GET_CODE (reg) != REG)
1674 return;
1675
1676 /* Global registers are always live, so the code below does not apply
1677 to them. */
1678
1679 regno = REGNO (reg);
1680 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1681 {
1682 register int offset = regno / REGSET_ELT_BITS;
fb3821f7
CH
1683 register REGSET_ELT_TYPE bit
1684 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
bd58420c
RK
1685
1686 if (death)
1687 {
1688 /* If we only set part of the register, then this set does not
1689 kill it. */
1690 if (subreg_p)
1691 return;
1692
1693 /* Try killing this register. */
1694 if (regno < FIRST_PSEUDO_REGISTER)
1695 {
1696 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1697 while (--j >= 0)
1698 {
1699 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7 1700 bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
bd58420c
RK
1701
1702 bb_live_regs[offset] &= ~bit;
1703 bb_dead_regs[offset] |= bit;
1704 }
1705 }
1706 else
1707 {
1708 bb_live_regs[offset] &= ~bit;
1709 bb_dead_regs[offset] |= bit;
1710 }
1711 }
1712 else
1713 {
1714 /* Make the register live again. */
1715 if (regno < FIRST_PSEUDO_REGISTER)
1716 {
1717 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1718 while (--j >= 0)
1719 {
1720 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7 1721 bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
bd58420c
RK
1722
1723 bb_live_regs[offset] |= bit;
1724 bb_dead_regs[offset] &= ~bit;
1725 }
1726 }
1727 else
1728 {
1729 bb_live_regs[offset] |= bit;
1730 bb_dead_regs[offset] &= ~bit;
1731 }
1732 }
1733 }
1734}
1735\f
1736/* Macros and functions for keeping the priority queue sorted, and
1737 dealing with queueing and unqueueing of instructions. */
1738
1739#define SCHED_SORT(READY, NEW_READY, OLD_READY) \
1740 do { if ((NEW_READY) - (OLD_READY) == 1) \
1741 swap_sort (READY, NEW_READY); \
1742 else if ((NEW_READY) - (OLD_READY) > 1) \
1743 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
1744 while (0)
1745
1746/* Returns a positive value if y is preferred; returns a negative value if
1747 x is preferred. Should never return 0, since that will make the sort
1748 unstable. */
1749
1750static int
1751rank_for_schedule (x, y)
1752 rtx *x, *y;
1753{
1754 rtx tmp = *y;
1755 rtx tmp2 = *x;
1756 rtx tmp_dep, tmp2_dep;
1757 int tmp_class, tmp2_class;
1758 int value;
1759
1760 /* Choose the instruction with the highest priority, if different. */
1761 if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2))
1762 return value;
1763
1764 if (last_scheduled_insn)
1765 {
1766 /* Classify the instructions into three classes:
1767 1) Data dependent on last schedule insn.
1768 2) Anti/Output dependent on last scheduled insn.
1769 3) Independent of last scheduled insn, or has latency of one.
1770 Choose the insn from the highest numbered class if different. */
1771 tmp_dep = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
1772 if (tmp_dep == 0 || insn_cost (tmp) == 1)
1773 tmp_class = 3;
1774 else if (REG_NOTE_KIND (tmp_dep) == 0)
1775 tmp_class = 1;
1776 else
1777 tmp_class = 2;
1778
1779 tmp2_dep = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
1780 if (tmp2_dep == 0 || insn_cost (tmp2) == 1)
1781 tmp2_class = 3;
1782 else if (REG_NOTE_KIND (tmp2_dep) == 0)
1783 tmp2_class = 1;
1784 else
1785 tmp2_class = 2;
1786
1787 if (value = tmp_class - tmp2_class)
1788 return value;
1789 }
1790
1791 /* If insns are equally good, sort by INSN_LUID (original insn order),
1792 so that we make the sort stable. This minimizes instruction movement,
1793 thus minimizing sched's effect on debugging and cross-jumping. */
1794 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1795}
1796
1797/* Resort the array A in which only element at index N may be out of order. */
1798
1799__inline static void
1800swap_sort (a, n)
1801 rtx *a;
1802 int n;
1803{
1804 rtx insn = a[n-1];
1805 int i = n-2;
1806
1807 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
1808 {
1809 a[i+1] = a[i];
1810 i -= 1;
1811 }
1812 a[i+1] = insn;
1813}
1814
1815static int max_priority;
1816
1817/* Add INSN to the insn queue so that it fires at least N_CYCLES
1818 before the currently executing insn. */
1819
1820__inline static void
1821queue_insn (insn, n_cycles)
1822 rtx insn;
1823 int n_cycles;
1824{
1825 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1826 NEXT_INSN (insn) = insn_queue[next_q];
1827 insn_queue[next_q] = insn;
1828 q_size += 1;
1829}
1830
1831/* Return nonzero if PAT is the pattern of an insn which makes a
1832 register live. */
1833
1834__inline static int
1835birthing_insn_p (pat)
1836 rtx pat;
1837{
1838 int j;
1839
1840 if (reload_completed == 1)
1841 return 0;
1842
1843 if (GET_CODE (pat) == SET
1844 && GET_CODE (SET_DEST (pat)) == REG)
1845 {
1846 rtx dest = SET_DEST (pat);
1847 int i = REGNO (dest);
1848 int offset = i / REGSET_ELT_BITS;
fb3821f7 1849 REGSET_ELT_TYPE bit = (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
bd58420c
RK
1850
1851 /* It would be more accurate to use refers_to_regno_p or
1852 reg_mentioned_p to determine when the dest is not live before this
1853 insn. */
1854
1855 if (bb_live_regs[offset] & bit)
1856 return (reg_n_sets[i] == 1);
1857
1858 return 0;
1859 }
1860 if (GET_CODE (pat) == PARALLEL)
1861 {
1862 for (j = 0; j < XVECLEN (pat, 0); j++)
1863 if (birthing_insn_p (XVECEXP (pat, 0, j)))
1864 return 1;
1865 }
1866 return 0;
1867}
1868
1869/* If PREV is an insn which is immediately ready to execute, return 1,
1870 otherwise return 0. We may adjust its priority if that will help shorten
1871 register lifetimes. */
1872
1873static int
1874launch_link (prev)
1875 rtx prev;
1876{
1877 rtx pat = PATTERN (prev);
1878 rtx note;
1879 /* MAX of (a) number of cycles needed by prev
1880 (b) number of cycles before needed resources are free. */
1881 int n_cycles = insn_cost (prev);
1882 int n_deaths = 0;
1883
1884 /* Trying to shorten register lives after reload has completed
1885 is useless and wrong. It gives inaccurate schedules. */
1886 if (reload_completed == 0)
1887 {
1888 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
1889 if (REG_NOTE_KIND (note) == REG_DEAD)
1890 n_deaths += 1;
1891
1892 /* Defer scheduling insns which kill registers, since that
1893 shortens register lives. Prefer scheduling insns which
1894 make registers live for the same reason. */
1895 switch (n_deaths)
1896 {
1897 default:
1898 INSN_PRIORITY (prev) >>= 3;
1899 break;
1900 case 3:
1901 INSN_PRIORITY (prev) >>= 2;
1902 break;
1903 case 2:
1904 case 1:
1905 INSN_PRIORITY (prev) >>= 1;
1906 break;
1907 case 0:
1908 if (birthing_insn_p (pat))
1909 {
1910 int max = max_priority;
1911
1912 if (max > INSN_PRIORITY (prev))
1913 INSN_PRIORITY (prev) = max;
1914 }
1915 break;
1916 }
1917 }
1918
1919 if (n_cycles <= 1)
1920 return 1;
1921 queue_insn (prev, n_cycles);
1922 return 0;
1923}
1924
1925/* INSN is the "currently executing insn". Launch each insn which was
1926 waiting on INSN (in the backwards dataflow sense). READY is a
1927 vector of insns which are ready to fire. N_READY is the number of
1928 elements in READY. */
1929
1930static int
1931launch_links (insn, ready, n_ready)
1932 rtx insn;
1933 rtx *ready;
1934 int n_ready;
1935{
1936 rtx link;
1937 int new_ready = n_ready;
1938
1939 if (LOG_LINKS (insn) == 0)
1940 return n_ready;
1941
1942 /* This is used by the function launch_link above. */
1943 if (n_ready > 0)
1944 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
1945 else
1946 max_priority = INSN_PRIORITY (insn);
1947
1948 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
1949 {
1950 rtx prev = XEXP (link, 0);
1951
1952 if ((INSN_REF_COUNT (prev) -= 1) == 0 && launch_link (prev))
1953 ready[new_ready++] = prev;
1954 }
1955
1956 return new_ready;
1957}
1958
1959/* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
1960 dead_notes list. */
1961
1962static void
1963create_reg_dead_note (reg, insn)
1964 rtx reg, insn;
1965{
1966 rtx link = dead_notes;
1967
1968 if (link == 0)
1969 /* In theory, we should not end up with more REG_DEAD reg notes than we
1970 started with. In practice, this can occur as the result of bugs in
1971 flow, combine and/or sched. */
1972 {
1973#if 1
1974 abort ();
1975#else
1976 link = rtx_alloc (EXPR_LIST);
1977 PUT_REG_NOTE_KIND (link, REG_DEAD);
1978#endif
1979 }
1980 else
1981 dead_notes = XEXP (dead_notes, 1);
1982
1983 XEXP (link, 0) = reg;
1984 XEXP (link, 1) = REG_NOTES (insn);
1985 REG_NOTES (insn) = link;
1986}
1987
1988/* Subroutine on attach_deaths_insn--handles the recursive search
1989 through INSN. If SET_P is true, then x is being modified by the insn. */
1990
1991static void
1992attach_deaths (x, insn, set_p)
1993 rtx x;
1994 rtx insn;
1995 int set_p;
1996{
1997 register int i;
1998 register int j;
1999 register enum rtx_code code;
2000 register char *fmt;
2001
2002 if (x == 0)
2003 return;
2004
2005 code = GET_CODE (x);
2006
2007 switch (code)
2008 {
2009 case CONST_INT:
2010 case CONST_DOUBLE:
2011 case LABEL_REF:
2012 case SYMBOL_REF:
2013 case CONST:
2014 case CODE_LABEL:
2015 case PC:
2016 case CC0:
2017 /* Get rid of the easy cases first. */
2018 return;
2019
2020 case REG:
2021 {
2022 /* If the register dies in this insn, queue that note, and mark
2023 this register as needing to die. */
2024 /* This code is very similar to mark_used_1 (if set_p is false)
2025 and mark_set_1 (if set_p is true) in flow.c. */
2026
2027 register int regno = REGNO (x);
2028 register int offset = regno / REGSET_ELT_BITS;
fb3821f7
CH
2029 register REGSET_ELT_TYPE bit
2030 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
2031 REGSET_ELT_TYPE all_needed = (old_live_regs[offset] & bit);
2032 REGSET_ELT_TYPE some_needed = (old_live_regs[offset] & bit);
bd58420c
RK
2033
2034 if (set_p)
2035 return;
2036
2037 if (regno < FIRST_PSEUDO_REGISTER)
2038 {
2039 int n;
2040
2041 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2042 while (--n > 0)
2043 {
2044 some_needed |= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
fb3821f7
CH
2045 & ((REGSET_ELT_TYPE) 1
2046 << ((regno + n) % REGSET_ELT_BITS)));
bd58420c 2047 all_needed &= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
fb3821f7
CH
2048 & ((REGSET_ELT_TYPE) 1
2049 << ((regno + n) % REGSET_ELT_BITS)));
bd58420c
RK
2050 }
2051 }
2052
2053 /* If it wasn't live before we started, then add a REG_DEAD note.
2054 We must check the previous lifetime info not the current info,
2055 because we may have to execute this code several times, e.g.
2056 once for a clobber (which doesn't add a note) and later
2057 for a use (which does add a note).
2058
2059 Always make the register live. We must do this even if it was
2060 live before, because this may be an insn which sets and uses
2061 the same register, in which case the register has already been
2062 killed, so we must make it live again.
2063
2064 Global registers are always live, and should never have a REG_DEAD
2065 note added for them, so none of the code below applies to them. */
2066
2067 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2068 {
2069 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2070 STACK_POINTER_REGNUM, since these are always considered to be
2071 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2072 if (regno != FRAME_POINTER_REGNUM
2073#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2074 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
bd58420c
RK
2075#endif
2076 && regno != STACK_POINTER_REGNUM)
2077 {
2078 if (! all_needed && ! dead_or_set_p (insn, x))
2079 {
2080 /* If none of the words in X is needed, make a REG_DEAD
2081 note. Otherwise, we must make partial REG_DEAD
2082 notes. */
2083 if (! some_needed)
2084 create_reg_dead_note (x, insn);
2085 else
2086 {
2087 int i;
2088
2089 /* Don't make a REG_DEAD note for a part of a
2090 register that is set in the insn. */
2091 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2092 i >= 0; i--)
2093 if ((old_live_regs[(regno + i) / REGSET_ELT_BITS]
fb3821f7
CH
2094 & ((REGSET_ELT_TYPE) 1
2095 << ((regno +i) % REGSET_ELT_BITS))) == 0
bd58420c
RK
2096 && ! dead_or_set_regno_p (insn, regno + i))
2097 create_reg_dead_note (gen_rtx (REG, word_mode,
2098 regno + i),
2099 insn);
2100 }
2101 }
2102 }
2103
2104 if (regno < FIRST_PSEUDO_REGISTER)
2105 {
2106 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2107 while (--j >= 0)
2108 {
2109 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7
CH
2110 bit
2111 = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
bd58420c
RK
2112
2113 bb_dead_regs[offset] &= ~bit;
2114 bb_live_regs[offset] |= bit;
2115 }
2116 }
2117 else
2118 {
2119 bb_dead_regs[offset] &= ~bit;
2120 bb_live_regs[offset] |= bit;
2121 }
2122 }
2123 return;
2124 }
2125
2126 case MEM:
2127 /* Handle tail-recursive case. */
2128 attach_deaths (XEXP (x, 0), insn, 0);
2129 return;
2130
2131 case SUBREG:
2132 case STRICT_LOW_PART:
2133 /* These two cases preserve the value of SET_P, so handle them
2134 separately. */
2135 attach_deaths (XEXP (x, 0), insn, set_p);
2136 return;
2137
2138 case ZERO_EXTRACT:
2139 case SIGN_EXTRACT:
2140 /* This case preserves the value of SET_P for the first operand, but
2141 clears it for the other two. */
2142 attach_deaths (XEXP (x, 0), insn, set_p);
2143 attach_deaths (XEXP (x, 1), insn, 0);
2144 attach_deaths (XEXP (x, 2), insn, 0);
2145 return;
2146
2147 default:
2148 /* Other cases: walk the insn. */
2149 fmt = GET_RTX_FORMAT (code);
2150 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2151 {
2152 if (fmt[i] == 'e')
2153 attach_deaths (XEXP (x, i), insn, 0);
2154 else if (fmt[i] == 'E')
2155 for (j = 0; j < XVECLEN (x, i); j++)
2156 attach_deaths (XVECEXP (x, i, j), insn, 0);
2157 }
2158 }
2159}
2160
2161/* After INSN has executed, add register death notes for each register
2162 that is dead after INSN. */
2163
2164static void
2165attach_deaths_insn (insn)
2166 rtx insn;
2167{
2168 rtx x = PATTERN (insn);
2169 register RTX_CODE code = GET_CODE (x);
2170
2171 if (code == SET)
2172 {
2173 attach_deaths (SET_SRC (x), insn, 0);
2174
2175 /* A register might die here even if it is the destination, e.g.
2176 it is the target of a volatile read and is otherwise unused.
2177 Hence we must always call attach_deaths for the SET_DEST. */
2178 attach_deaths (SET_DEST (x), insn, 1);
2179 }
2180 else if (code == PARALLEL)
2181 {
2182 register int i;
2183 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2184 {
2185 code = GET_CODE (XVECEXP (x, 0, i));
2186 if (code == SET)
2187 {
2188 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2189
2190 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2191 }
2192 else if (code == CLOBBER)
2193 attach_deaths (XEXP (XVECEXP (x, 0, i), 0), insn, 1);
2194 else
2195 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2196 }
2197 }
2198 else if (code == CLOBBER)
2199 attach_deaths (XEXP (x, 0), insn, 1);
2200 else
2201 attach_deaths (x, insn, 0);
2202}
2203
2204/* Delete notes beginning with INSN and maybe put them in the chain
2205 of notes ended by NOTE_LIST.
2206 Returns the insn following the notes. */
2207
2208static rtx
2209unlink_notes (insn, tail)
2210 rtx insn, tail;
2211{
2212 rtx prev = PREV_INSN (insn);
2213
2214 while (insn != tail && GET_CODE (insn) == NOTE)
2215 {
2216 rtx next = NEXT_INSN (insn);
2217 /* Delete the note from its current position. */
2218 if (prev)
2219 NEXT_INSN (prev) = next;
2220 if (next)
2221 PREV_INSN (next) = prev;
2222
2223 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2224 /* Record line-number notes so they can be reused. */
2225 LINE_NOTE (insn) = insn;
2226 else
2227 {
2228 /* Insert the note at the end of the notes list. */
2229 PREV_INSN (insn) = note_list;
2230 if (note_list)
2231 NEXT_INSN (note_list) = insn;
2232 note_list = insn;
2233 }
2234
2235 insn = next;
2236 }
2237 return insn;
2238}
2239
2240/* Data structure for keeping track of register information
2241 during that register's life. */
2242
2243struct sometimes
2244{
2245 short offset; short bit;
2246 short live_length; short calls_crossed;
2247};
2248
2249/* Constructor for `sometimes' data structure. */
2250
2251static int
2252new_sometimes_live (regs_sometimes_live, offset, bit, sometimes_max)
2253 struct sometimes *regs_sometimes_live;
2254 int offset, bit;
2255 int sometimes_max;
2256{
2257 register struct sometimes *p;
2258 register int regno = offset * REGSET_ELT_BITS + bit;
2259 int i;
2260
2261 /* There should never be a register greater than max_regno here. If there
2262 is, it means that a define_split has created a new pseudo reg. This
2263 is not allowed, since there will not be flow info available for any
2264 new register, so catch the error here. */
2265 if (regno >= max_regno)
2266 abort ();
2267
2268 p = &regs_sometimes_live[sometimes_max];
2269 p->offset = offset;
2270 p->bit = bit;
2271 p->live_length = 0;
2272 p->calls_crossed = 0;
2273 sometimes_max++;
2274 return sometimes_max;
2275}
2276
2277/* Count lengths of all regs we are currently tracking,
2278 and find new registers no longer live. */
2279
2280static void
2281finish_sometimes_live (regs_sometimes_live, sometimes_max)
2282 struct sometimes *regs_sometimes_live;
2283 int sometimes_max;
2284{
2285 int i;
2286
2287 for (i = 0; i < sometimes_max; i++)
2288 {
2289 register struct sometimes *p = &regs_sometimes_live[i];
2290 int regno;
2291
2292 regno = p->offset * REGSET_ELT_BITS + p->bit;
2293
2294 sched_reg_live_length[regno] += p->live_length;
2295 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2296 }
2297}
2298
2299/* Use modified list scheduling to rearrange insns in basic block
2300 B. FILE, if nonzero, is where we dump interesting output about
2301 this pass. */
2302
2303static void
2304schedule_block (b, file)
2305 int b;
2306 FILE *file;
2307{
2308 rtx insn, last;
2309 rtx last_note = 0;
2310 rtx *ready, link;
2311 int i, j, n_ready = 0, new_ready, n_insns = 0;
2312 int sched_n_insns = 0;
2313#define NEED_NOTHING 0
2314#define NEED_HEAD 1
2315#define NEED_TAIL 2
2316 int new_needs;
2317
2318 /* HEAD and TAIL delimit the region being scheduled. */
2319 rtx head = basic_block_head[b];
2320 rtx tail = basic_block_end[b];
2321 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2322 being scheduled. When the insns have been ordered,
2323 these insns delimit where the new insns are to be
2324 spliced back into the insn chain. */
2325 rtx next_tail;
2326 rtx prev_head;
2327
2328 /* Keep life information accurate. */
2329 register struct sometimes *regs_sometimes_live;
2330 int sometimes_max;
2331
2332 if (file)
2333 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2334 b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
2335
2336 i = max_reg_num ();
2337 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2338 bzero (reg_last_uses, i * sizeof (rtx));
2339 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2340 bzero (reg_last_sets, i * sizeof (rtx));
2341
2342 /* Remove certain insns at the beginning from scheduling,
2343 by advancing HEAD. */
2344
2345 /* At the start of a function, before reload has run, don't delay getting
2346 parameters from hard registers into pseudo registers. */
2347 if (reload_completed == 0 && b == 0)
2348 {
2349 while (head != tail
2350 && GET_CODE (head) == NOTE
2351 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2352 head = NEXT_INSN (head);
2353 while (head != tail
2354 && GET_CODE (head) == INSN
2355 && GET_CODE (PATTERN (head)) == SET)
2356 {
2357 rtx src = SET_SRC (PATTERN (head));
2358 while (GET_CODE (src) == SUBREG
2359 || GET_CODE (src) == SIGN_EXTEND
2360 || GET_CODE (src) == ZERO_EXTEND
2361 || GET_CODE (src) == SIGN_EXTRACT
2362 || GET_CODE (src) == ZERO_EXTRACT)
2363 src = XEXP (src, 0);
2364 if (GET_CODE (src) != REG
2365 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2366 break;
2367 /* Keep this insn from ever being scheduled. */
2368 INSN_REF_COUNT (head) = 1;
2369 head = NEXT_INSN (head);
2370 }
2371 }
2372
2373 /* Don't include any notes or labels at the beginning of the
2374 basic block, or notes at the ends of basic blocks. */
2375 while (head != tail)
2376 {
2377 if (GET_CODE (head) == NOTE)
2378 head = NEXT_INSN (head);
2379 else if (GET_CODE (tail) == NOTE)
2380 tail = PREV_INSN (tail);
2381 else if (GET_CODE (head) == CODE_LABEL)
2382 head = NEXT_INSN (head);
2383 else break;
2384 }
2385 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2386 to schedule this block. */
2387 if (head == tail
2388 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2389 return;
2390
2391#if 0
2392 /* This short-cut doesn't work. It does not count call insns crossed by
2393 registers in reg_sometimes_live. It does not mark these registers as
2394 dead if they die in this block. It does not mark these registers live
2395 (or create new reg_sometimes_live entries if necessary) if they are born
2396 in this block.
2397
2398 The easy solution is to just always schedule a block. This block only
2399 has one insn, so this won't slow down this pass by much. */
2400
2401 if (head == tail)
2402 return;
2403#endif
2404
bd58420c
RK
2405 /* Now HEAD through TAIL are the insns actually to be rearranged;
2406 Let PREV_HEAD and NEXT_TAIL enclose them. */
2407 prev_head = PREV_INSN (head);
2408 next_tail = NEXT_INSN (tail);
2409
2410 /* Initialize basic block data structures. */
2411 dead_notes = 0;
2412 pending_read_insns = 0;
2413 pending_read_mems = 0;
2414 pending_write_insns = 0;
2415 pending_write_mems = 0;
2416 pending_lists_length = 0;
2417 last_pending_memory_flush = 0;
2418 last_function_call = 0;
2419 last_scheduled_insn = 0;
2420
2421 LOG_LINKS (sched_before_next_call) = 0;
2422
2423 n_insns += sched_analyze (head, tail);
2424 if (n_insns == 0)
2425 {
2426 free_pending_lists ();
2427 return;
2428 }
2429
2430 /* Allocate vector to hold insns to be rearranged (except those
2431 insns which are controlled by an insn with SCHED_GROUP_P set).
2432 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2433 as those variables ultimately are set up. */
2434 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2435
2436 /* TAIL is now the last of the insns to be rearranged.
2437 Put those insns into the READY vector. */
2438 insn = tail;
2439
aa265568
JW
2440 /* For all branches, calls, uses, and cc0 setters, force them to remain
2441 in order at the end of the block by giving them high priorities.
2442 There may be notes present, and prev_head may also be a note.
2443
2444 Branches must obviously remain at the end. Calls should remain at the
2445 end since moving them results in worse register allocation. Uses remain
2446 at the end to ensure proper register allocation. cc0 setters remaim
2447 at the end because they can't be moved away from their cc0 user. */
2448 i = 0;
2449 while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
2450 || (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE)
2451#ifdef HAVE_cc0
2452 || sets_cc0_p (PATTERN (insn))
2453#endif
2454 || GET_CODE (insn) == NOTE)
bd58420c 2455 {
aa265568
JW
2456 if (GET_CODE (insn) != NOTE)
2457 {
2458 priority (insn);
2459 ready[n_ready++] = insn;
2460 INSN_PRIORITY (insn) = TAIL_PRIORITY - i;
2461 i++;
2462 INSN_REF_COUNT (insn) = 0;
2463
2464 /* Skip over insns that are part of a group. */
2465 while (SCHED_GROUP_P (insn))
2466 {
2467 insn = prev_nonnote_insn (insn);
2468 priority (insn);
2469 }
2470 }
2471
2472 insn = PREV_INSN (insn);
2473 /* Don't overrun the bounds of the basic block. */
2474 if (insn == prev_head)
2475 break;
bd58420c
RK
2476 }
2477
2478 /* Assign priorities to instructions. Also check whether they
2479 are in priority order already. If so then I will be nonnegative.
2480 We use this shortcut only before reloading. */
2481#if 0
2482 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2483#endif
2484
2485 for (; insn != prev_head; insn = PREV_INSN (insn))
2486 {
2487 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2488 {
2489 priority (insn);
2490 if (INSN_REF_COUNT (insn) == 0)
2491 ready[n_ready++] = insn;
2492 if (SCHED_GROUP_P (insn))
2493 {
2494 while (SCHED_GROUP_P (insn))
2495 {
2496 insn = PREV_INSN (insn);
2497 while (GET_CODE (insn) == NOTE)
2498 insn = PREV_INSN (insn);
2499 priority (insn);
2500 }
2501 continue;
2502 }
2503#if 0
2504 if (i < 0)
2505 continue;
2506 if (INSN_PRIORITY (insn) < i)
2507 i = INSN_PRIORITY (insn);
2508 else if (INSN_PRIORITY (insn) > i)
2509 i = DONE_PRIORITY;
2510#endif
2511 }
2512 }
2513
2514#if 0
2515 /* This short-cut doesn't work. It does not count call insns crossed by
2516 registers in reg_sometimes_live. It does not mark these registers as
2517 dead if they die in this block. It does not mark these registers live
2518 (or create new reg_sometimes_live entries if necessary) if they are born
2519 in this block.
2520
2521 The easy solution is to just always schedule a block. These blocks tend
2522 to be very short, so this doesn't slow down this pass by much. */
2523
2524 /* If existing order is good, don't bother to reorder. */
2525 if (i != DONE_PRIORITY)
2526 {
2527 if (file)
2528 fprintf (file, ";; already scheduled\n");
2529
2530 if (reload_completed == 0)
2531 {
2532 for (i = 0; i < sometimes_max; i++)
2533 regs_sometimes_live[i].live_length += n_insns;
2534
2535 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2536 }
2537 free_pending_lists ();
2538 return;
2539 }
2540#endif
2541
2542 /* Scan all the insns to be scheduled, removing NOTE insns
2543 and register death notes.
2544 Line number NOTE insns end up in NOTE_LIST.
2545 Register death notes end up in DEAD_NOTES.
2546
2547 Recreate the register life information for the end of this basic
2548 block. */
2549
2550 if (reload_completed == 0)
2551 {
2552 bcopy (basic_block_live_at_start[b], bb_live_regs, regset_bytes);
2553 bzero (bb_dead_regs, regset_bytes);
2554
2555 if (b == 0)
2556 {
2557 /* This is the first block in the function. There may be insns
2558 before head that we can't schedule. We still need to examine
2559 them though for accurate register lifetime analysis. */
2560
2561 /* We don't want to remove any REG_DEAD notes as the code below
2562 does. */
2563
2564 for (insn = basic_block_head[b]; insn != head;
2565 insn = NEXT_INSN (insn))
2566 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2567 {
2568 /* See if the register gets born here. */
2569 /* We must check for registers being born before we check for
2570 registers dying. It is possible for a register to be born
2571 and die in the same insn, e.g. reading from a volatile
2572 memory location into an otherwise unused register. Such
2573 a register must be marked as dead after this insn. */
2574 if (GET_CODE (PATTERN (insn)) == SET
2575 || GET_CODE (PATTERN (insn)) == CLOBBER)
2576 sched_note_set (b, PATTERN (insn), 0);
2577 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2578 {
2579 int j;
2580 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2581 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2582 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2583 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2584
53dd0e6e
JW
2585 /* ??? This code is obsolete and should be deleted. It
2586 is harmless though, so we will leave it in for now. */
bd58420c
RK
2587 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2588 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2589 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2590 }
2591
2592 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2593 {
2594 if ((REG_NOTE_KIND (link) == REG_DEAD
2595 || REG_NOTE_KIND (link) == REG_UNUSED)
2596 /* Verify that the REG_NOTE has a legal value. */
2597 && GET_CODE (XEXP (link, 0)) == REG)
2598 {
2599 register int regno = REGNO (XEXP (link, 0));
2600 register int offset = regno / REGSET_ELT_BITS;
fb3821f7
CH
2601 register REGSET_ELT_TYPE bit
2602 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
bd58420c
RK
2603
2604 if (regno < FIRST_PSEUDO_REGISTER)
2605 {
2606 int j = HARD_REGNO_NREGS (regno,
2607 GET_MODE (XEXP (link, 0)));
2608 while (--j >= 0)
2609 {
2610 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7
CH
2611 bit = ((REGSET_ELT_TYPE) 1
2612 << ((regno + j) % REGSET_ELT_BITS));
bd58420c
RK
2613
2614 bb_live_regs[offset] &= ~bit;
2615 bb_dead_regs[offset] |= bit;
2616 }
2617 }
2618 else
2619 {
2620 bb_live_regs[offset] &= ~bit;
2621 bb_dead_regs[offset] |= bit;
2622 }
2623 }
2624 }
2625 }
2626 }
2627 }
2628
2629 /* If debugging information is being produced, keep track of the line
2630 number notes for each insn. */
2631 if (write_symbols != NO_DEBUG)
2632 {
2633 /* We must use the true line number for the first insn in the block
2634 that was computed and saved at the start of this pass. We can't
2635 use the current line number, because scheduling of the previous
2636 block may have changed the current line number. */
2637 rtx line = line_note_head[b];
2638
2639 for (insn = basic_block_head[b];
2640 insn != next_tail;
2641 insn = NEXT_INSN (insn))
2642 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
2643 line = insn;
2644 else
2645 LINE_NOTE (insn) = line;
2646 }
2647
2648 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2649 {
2650 rtx prev, next, link;
2651
2652 /* Farm out notes. This is needed to keep the debugger from
2653 getting completely deranged. */
2654 if (GET_CODE (insn) == NOTE)
2655 {
2656 prev = insn;
2657 insn = unlink_notes (insn, next_tail);
2658 if (prev == tail)
2659 abort ();
2660 if (prev == head)
2661 abort ();
2662 if (insn == next_tail)
2663 abort ();
2664 }
2665
2666 if (reload_completed == 0
2667 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2668 {
2669 /* See if the register gets born here. */
2670 /* We must check for registers being born before we check for
2671 registers dying. It is possible for a register to be born and
2672 die in the same insn, e.g. reading from a volatile memory
2673 location into an otherwise unused register. Such a register
2674 must be marked as dead after this insn. */
2675 if (GET_CODE (PATTERN (insn)) == SET
2676 || GET_CODE (PATTERN (insn)) == CLOBBER)
2677 sched_note_set (b, PATTERN (insn), 0);
2678 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2679 {
2680 int j;
2681 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2682 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2683 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2684 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2685
53dd0e6e
JW
2686 /* ??? This code is obsolete and should be deleted. It
2687 is harmless though, so we will leave it in for now. */
bd58420c
RK
2688 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2689 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2690 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2691 }
2692
2693 /* Need to know what registers this insn kills. */
2694 for (prev = 0, link = REG_NOTES (insn); link; link = next)
2695 {
2696 int regno;
2697
2698 next = XEXP (link, 1);
2699 if ((REG_NOTE_KIND (link) == REG_DEAD
2700 || REG_NOTE_KIND (link) == REG_UNUSED)
2701 /* Verify that the REG_NOTE has a legal value. */
2702 && GET_CODE (XEXP (link, 0)) == REG)
2703 {
2704 register int regno = REGNO (XEXP (link, 0));
2705 register int offset = regno / REGSET_ELT_BITS;
fb3821f7
CH
2706 register REGSET_ELT_TYPE bit
2707 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
bd58420c
RK
2708
2709 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
2710 alone. */
2711 if (REG_NOTE_KIND (link) == REG_DEAD)
2712 {
2713 if (prev)
2714 XEXP (prev, 1) = next;
2715 else
2716 REG_NOTES (insn) = next;
2717 XEXP (link, 1) = dead_notes;
2718 dead_notes = link;
2719 }
2720 else
2721 prev = link;
2722
2723 if (regno < FIRST_PSEUDO_REGISTER)
2724 {
2725 int j = HARD_REGNO_NREGS (regno,
2726 GET_MODE (XEXP (link, 0)));
2727 while (--j >= 0)
2728 {
2729 offset = (regno + j) / REGSET_ELT_BITS;
fb3821f7
CH
2730 bit = ((REGSET_ELT_TYPE) 1
2731 << ((regno + j) % REGSET_ELT_BITS));
bd58420c
RK
2732
2733 bb_live_regs[offset] &= ~bit;
2734 bb_dead_regs[offset] |= bit;
2735 }
2736 }
2737 else
2738 {
2739 bb_live_regs[offset] &= ~bit;
2740 bb_dead_regs[offset] |= bit;
2741 }
2742 }
2743 else
2744 prev = link;
2745 }
2746 }
2747 }
2748
2749 if (reload_completed == 0)
2750 {
2751 /* Keep track of register lives. */
2752 old_live_regs = (regset) alloca (regset_bytes);
2753 regs_sometimes_live
2754 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
2755 sometimes_max = 0;
2756
2757 /* Start with registers live at end. */
2758 for (j = 0; j < regset_size; j++)
2759 {
fb3821f7 2760 REGSET_ELT_TYPE live = bb_live_regs[j];
bd58420c
RK
2761 old_live_regs[j] = live;
2762 if (live)
2763 {
fb3821f7 2764 register REGSET_ELT_TYPE bit;
bd58420c 2765 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
fb3821f7 2766 if (live & ((REGSET_ELT_TYPE) 1 << bit))
bd58420c
RK
2767 sometimes_max = new_sometimes_live (regs_sometimes_live, j,
2768 bit, sometimes_max);
2769 }
2770 }
2771 }
2772
2773 SCHED_SORT (ready, n_ready, 1);
2774
2775 if (file)
2776 {
2777 fprintf (file, ";; ready list initially:\n;; ");
2778 for (i = 0; i < n_ready; i++)
2779 fprintf (file, "%d ", INSN_UID (ready[i]));
2780 fprintf (file, "\n\n");
2781
2782 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2783 if (INSN_PRIORITY (insn) > 0)
2784 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
2785 INSN_UID (insn), INSN_PRIORITY (insn),
2786 INSN_REF_COUNT (insn));
2787 }
2788
2789 /* Now HEAD and TAIL are going to become disconnected
2790 entirely from the insn chain. */
2791 tail = ready[0];
2792
2793 /* Q_SIZE will always be zero here. */
2794 q_ptr = 0;
2795 bzero (insn_queue, sizeof (insn_queue));
2796
2797 /* Now, perform list scheduling. */
2798
2799 /* Where we start inserting insns is after TAIL. */
2800 last = next_tail;
2801
2802 new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
2803 ? NEED_HEAD : NEED_NOTHING);
2804 if (PREV_INSN (next_tail) == basic_block_end[b])
2805 new_needs |= NEED_TAIL;
2806
2807 new_ready = n_ready;
2808 while (sched_n_insns < n_insns)
2809 {
2810 q_ptr = NEXT_Q (q_ptr);
2811
2812 /* Add all pending insns that can be scheduled without stalls to the
2813 ready list. */
2814 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
2815 {
2816 if (file)
2817 fprintf (file, ";; launching %d before %d with no stalls\n",
2818 INSN_UID (insn), INSN_UID (last));
2819 ready[new_ready++] = insn;
2820 q_size -= 1;
2821 }
2822 insn_queue[q_ptr] = 0;
2823
2824 /* If there are no ready insns, stall until one is ready and add all
2825 of the pending insns at that point to the ready list. */
2826 if (new_ready == 0)
2827 {
2828 register int stalls;
2829
2830 for (stalls = 1; stalls < Q_SIZE; stalls++)
2831 if (insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])
2832 {
2833 for (; insn; insn = NEXT_INSN (insn))
2834 {
2835 if (file)
2836 fprintf (file, ";; issue insn %d before %d with %d stalls\n",
2837 INSN_UID (insn), INSN_UID (last), stalls);
2838 ready[new_ready++] = insn;
2839 q_size -= 1;
2840 }
2841 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
2842 break;
2843 }
2844
2845#if 0
2846 /* This looks logically correct, but on the SPEC benchmark set on
2847 the SPARC, I get better code without it. */
2848 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2849#endif
2850 }
2851
2852 /* There should be some instructions waiting to fire. */
2853 if (new_ready == 0)
2854 abort ();
2855
2856 /* Sort the ready list and choose the best insn to schedule.
2857 N_READY holds the number of items that were scheduled the last time,
2858 minus the one instruction scheduled on the last loop iteration; it
2859 is not modified for any other reason in this loop. */
2860 SCHED_SORT (ready, new_ready, n_ready);
2861 n_ready = new_ready;
2862 last_scheduled_insn = insn = ready[0];
2863
2864 if (DONE_PRIORITY_P (insn))
2865 abort ();
2866
2867 if (reload_completed == 0)
2868 {
2869 /* Process this insn, and each insn linked to this one which must
2870 be immediately output after this insn. */
2871 do
2872 {
2873 /* First we kill registers set by this insn, and then we
2874 make registers used by this insn live. This is the opposite
2875 order used above because we are traversing the instructions
2876 backwards. */
2877
2878 /* Strictly speaking, we should scan REG_UNUSED notes and make
2879 every register mentioned there live, however, we will just
2880 kill them again immediately below, so there doesn't seem to
2881 be any reason why we bother to do this. */
2882
2883 /* See if this is the last notice we must take of a register. */
2884 if (GET_CODE (PATTERN (insn)) == SET
2885 || GET_CODE (PATTERN (insn)) == CLOBBER)
2886 sched_note_set (b, PATTERN (insn), 1);
2887 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2888 {
2889 int j;
2890 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2891 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2892 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2893 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
2894 }
2895
2896 /* This code keeps life analysis information up to date. */
2897 if (GET_CODE (insn) == CALL_INSN)
2898 {
2899 register struct sometimes *p;
2900
2901 /* A call kills all call used and global registers, except
2902 for those mentioned in the call pattern which will be
2903 made live again later. */
2904 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2905 if (call_used_regs[i] || global_regs[i])
2906 {
2907 register int offset = i / REGSET_ELT_BITS;
fb3821f7
CH
2908 register REGSET_ELT_TYPE bit
2909 = (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
bd58420c
RK
2910
2911 bb_live_regs[offset] &= ~bit;
2912 bb_dead_regs[offset] |= bit;
2913 }
2914
2915 /* Regs live at the time of a call instruction must not
2916 go in a register clobbered by calls. Record this for
2917 all regs now live. Note that insns which are born or
2918 die in a call do not cross a call, so this must be done
2919 after the killings (above) and before the births
2920 (below). */
2921 p = regs_sometimes_live;
2922 for (i = 0; i < sometimes_max; i++, p++)
fb3821f7
CH
2923 if (bb_live_regs[p->offset]
2924 & ((REGSET_ELT_TYPE) 1 << p->bit))
bd58420c
RK
2925 p->calls_crossed += 1;
2926 }
2927
2928 /* Make every register used live, and add REG_DEAD notes for
2929 registers which were not live before we started. */
2930 attach_deaths_insn (insn);
2931
2932 /* Find registers now made live by that instruction. */
2933 for (i = 0; i < regset_size; i++)
2934 {
fb3821f7 2935 REGSET_ELT_TYPE diff = bb_live_regs[i] & ~old_live_regs[i];
bd58420c
RK
2936 if (diff)
2937 {
2938 register int bit;
2939 old_live_regs[i] |= diff;
2940 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
fb3821f7 2941 if (diff & ((REGSET_ELT_TYPE) 1 << bit))
bd58420c
RK
2942 sometimes_max
2943 = new_sometimes_live (regs_sometimes_live, i, bit,
2944 sometimes_max);
2945 }
2946 }
2947
2948 /* Count lengths of all regs we are worrying about now,
2949 and handle registers no longer live. */
2950
2951 for (i = 0; i < sometimes_max; i++)
2952 {
2953 register struct sometimes *p = &regs_sometimes_live[i];
2954 int regno = p->offset*REGSET_ELT_BITS + p->bit;
2955
2956 p->live_length += 1;
2957
fb3821f7
CH
2958 if ((bb_live_regs[p->offset]
2959 & ((REGSET_ELT_TYPE) 1 << p->bit)) == 0)
bd58420c
RK
2960 {
2961 /* This is the end of one of this register's lifetime
2962 segments. Save the lifetime info collected so far,
2963 and clear its bit in the old_live_regs entry. */
2964 sched_reg_live_length[regno] += p->live_length;
2965 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
fb3821f7
CH
2966 old_live_regs[p->offset]
2967 &= ~((REGSET_ELT_TYPE) 1 << p->bit);
bd58420c
RK
2968
2969 /* Delete the reg_sometimes_live entry for this reg by
2970 copying the last entry over top of it. */
2971 *p = regs_sometimes_live[--sometimes_max];
2972 /* ...and decrement i so that this newly copied entry
2973 will be processed. */
2974 i--;
2975 }
2976 }
2977
2978 link = insn;
2979 insn = PREV_INSN (insn);
2980 }
2981 while (SCHED_GROUP_P (link));
2982
2983 /* Set INSN back to the insn we are scheduling now. */
2984 insn = ready[0];
2985 }
2986
2987 /* Schedule INSN. Remove it from the ready list. */
2988 ready += 1;
2989 n_ready -= 1;
2990
2991 sched_n_insns += 1;
2992 NEXT_INSN (insn) = last;
2993 PREV_INSN (last) = insn;
2994 last = insn;
2995
2996 /* Everything that precedes INSN now either becomes "ready", if
2997 it can execute immediately before INSN, or "pending", if
2998 there must be a delay. Give INSN high enough priority that
2999 at least one (maybe more) reg-killing insns can be launched
3000 ahead of all others. Mark INSN as scheduled by changing its
3001 priority to -1. */
3002 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
3003 new_ready = launch_links (insn, ready, n_ready);
3004 INSN_PRIORITY (insn) = DONE_PRIORITY;
3005
3006 /* Schedule all prior insns that must not be moved. */
3007 if (SCHED_GROUP_P (insn))
3008 {
3009 /* Disable these insns from being launched. */
3010 link = insn;
3011 while (SCHED_GROUP_P (link))
3012 {
3013 /* Disable these insns from being launched by anybody. */
3014 link = PREV_INSN (link);
3015 INSN_REF_COUNT (link) = 0;
3016 }
3017
3018 /* None of these insns can move forward into delay slots. */
3019 while (SCHED_GROUP_P (insn))
3020 {
3021 insn = PREV_INSN (insn);
3022 new_ready = launch_links (insn, ready, new_ready);
3023 INSN_PRIORITY (insn) = DONE_PRIORITY;
3024
3025 sched_n_insns += 1;
3026 NEXT_INSN (insn) = last;
3027 PREV_INSN (last) = insn;
3028 last = insn;
3029 }
3030 }
3031 }
3032 if (q_size != 0)
3033 abort ();
3034
3035 if (reload_completed == 0)
3036 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3037
3038 /* HEAD is now the first insn in the chain of insns that
3039 been scheduled by the loop above.
3040 TAIL is the last of those insns. */
3041 head = insn;
3042
3043 /* NOTE_LIST is the end of a chain of notes previously found
3044 among the insns. Insert them at the beginning of the insns. */
3045 if (note_list != 0)
3046 {
3047 rtx note_head = note_list;
3048 while (PREV_INSN (note_head))
3049 note_head = PREV_INSN (note_head);
3050
3051 PREV_INSN (head) = note_list;
3052 NEXT_INSN (note_list) = head;
3053 head = note_head;
3054 }
3055
3056 /* In theory, there should be no REG_DEAD notes leftover at the end.
3057 In practice, this can occur as the result of bugs in flow, combine.c,
3058 and/or sched.c. The values of the REG_DEAD notes remaining are
3059 meaningless, because dead_notes is just used as a free list. */
3060#if 1
3061 if (dead_notes != 0)
3062 abort ();
3063#endif
3064
3065 if (new_needs & NEED_HEAD)
3066 basic_block_head[b] = head;
3067 PREV_INSN (head) = prev_head;
3068 NEXT_INSN (prev_head) = head;
3069
3070 if (new_needs & NEED_TAIL)
3071 basic_block_end[b] = tail;
3072 NEXT_INSN (tail) = next_tail;
3073 PREV_INSN (next_tail) = tail;
3074
3075 /* Restore the line-number notes of each insn. */
3076 if (write_symbols != NO_DEBUG)
3077 {
3078 rtx line, note, prev, new;
3079 int notes = 0;
3080
3081 head = basic_block_head[b];
3082 next_tail = NEXT_INSN (basic_block_end[b]);
3083
3084 /* Determine the current line-number. We want to know the current
3085 line number of the first insn of the block here, in case it is
3086 different from the true line number that was saved earlier. If
3087 different, then we need a line number note before the first insn
3088 of this block. If it happens to be the same, then we don't want to
3089 emit another line number note here. */
3090 for (line = head; line; line = PREV_INSN (line))
3091 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3092 break;
3093
3094 /* Walk the insns keeping track of the current line-number and inserting
3095 the line-number notes as needed. */
3096 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3097 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3098 line = insn;
3099 else if (! (GET_CODE (insn) == NOTE
3100 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3101 && (note = LINE_NOTE (insn)) != 0
3102 && note != line
3103 && (line == 0
3104 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3105 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3106 {
3107 line = note;
3108 prev = PREV_INSN (insn);
3109 if (LINE_NOTE (note))
3110 {
6dc42e49 3111 /* Re-use the original line-number note. */
bd58420c
RK
3112 LINE_NOTE (note) = 0;
3113 PREV_INSN (note) = prev;
3114 NEXT_INSN (prev) = note;
3115 PREV_INSN (insn) = note;
3116 NEXT_INSN (note) = insn;
3117 }
3118 else
3119 {
3120 notes++;
3121 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3122 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3123 }
3124 }
3125 if (file && notes)
3126 fprintf (file, ";; added %d line-number notes\n", notes);
3127 }
3128
3129 if (file)
3130 {
3131 fprintf (file, ";; new basic block head = %d\n;; new basic block end = %d\n\n",
3132 INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
3133 }
3134
3135 /* Yow! We're done! */
3136 free_pending_lists ();
3137
3138 return;
3139}
3140\f
3141/* Subroutine of split_hard_reg_notes. Searches X for any reference to
3142 REGNO, returning the rtx of the reference found if any. Otherwise,
3143 returns 0. */
3144
3145rtx
3146regno_use_in (regno, x)
3147 int regno;
3148 rtx x;
3149{
3150 register char *fmt;
3151 int i, j;
3152 rtx tem;
3153
3154 if (GET_CODE (x) == REG && REGNO (x) == regno)
3155 return x;
3156
3157 fmt = GET_RTX_FORMAT (GET_CODE (x));
3158 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3159 {
3160 if (fmt[i] == 'e')
3161 {
3162 if (tem = regno_use_in (regno, XEXP (x, i)))
3163 return tem;
3164 }
3165 else if (fmt[i] == 'E')
3166 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3167 if (tem = regno_use_in (regno , XVECEXP (x, i, j)))
3168 return tem;
3169 }
3170
3171 return 0;
3172}
3173
3174/* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3175 needed for the hard register mentioned in the note. This can happen
3176 if the reference to the hard register in the original insn was split into
3177 several smaller hard register references in the split insns. */
3178
3179static void
3180split_hard_reg_notes (note, first, last, orig_insn)
3181 rtx note, first, last, orig_insn;
3182{
3183 rtx reg, temp, link;
3184 int n_regs, i, new_reg;
3185 rtx insn;
3186
3187 /* Assume that this is a REG_DEAD note. */
3188 if (REG_NOTE_KIND (note) != REG_DEAD)
3189 abort ();
3190
3191 reg = XEXP (note, 0);
3192
3193 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3194
3195 /* ??? Could add check here to see whether, the hard register is referenced
3196 in the same mode as in the original insn. If so, then it has not been
3197 split, and the rest of the code below is unnecessary. */
3198
3199 for (i = 1; i < n_regs; i++)
3200 {
3201 new_reg = REGNO (reg) + i;
3202
3203 /* Check for references to new_reg in the split insns. */
3204 for (insn = last; ; insn = PREV_INSN (insn))
3205 {
3206 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3207 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3208 {
3209 /* Create a new reg dead note here. */
3210 link = rtx_alloc (EXPR_LIST);
3211 PUT_REG_NOTE_KIND (link, REG_DEAD);
3212 XEXP (link, 0) = temp;
3213 XEXP (link, 1) = REG_NOTES (insn);
3214 REG_NOTES (insn) = link;
3215 break;
3216 }
3217 /* It isn't mentioned anywhere, so no new reg note is needed for
3218 this register. */
3219 if (insn == first)
3220 break;
3221 }
3222 }
3223}
3224
3225/* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3226 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3227
3228static void
3229new_insn_dead_notes (pat, insn, last, orig_insn)
3230 rtx pat, insn, last, orig_insn;
3231{
3232 rtx dest, tem, set;
3233
3234 /* PAT is either a CLOBBER or a SET here. */
3235 dest = XEXP (pat, 0);
3236
3237 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3238 || GET_CODE (dest) == STRICT_LOW_PART
3239 || GET_CODE (dest) == SIGN_EXTRACT)
3240 dest = XEXP (dest, 0);
3241
3242 if (GET_CODE (dest) == REG)
3243 {
3244 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3245 {
3246 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3247 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3248 && (set = single_set (tem)))
3249 {
3250 rtx tem_dest = SET_DEST (set);
3251
3252 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3253 || GET_CODE (tem_dest) == SUBREG
3254 || GET_CODE (tem_dest) == STRICT_LOW_PART
3255 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3256 tem_dest = XEXP (tem_dest, 0);
3257
3258 if (tem_dest != dest)
3259 {
3260 /* Use the same scheme as combine.c, don't put both REG_DEAD
3261 and REG_UNUSED notes on the same insn. */
3262 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3263 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3264 {
3265 rtx note = rtx_alloc (EXPR_LIST);
3266 PUT_REG_NOTE_KIND (note, REG_DEAD);
3267 XEXP (note, 0) = dest;
3268 XEXP (note, 1) = REG_NOTES (tem);
3269 REG_NOTES (tem) = note;
3270 }
3271 /* The reg only dies in one insn, the last one that uses
3272 it. */
3273 break;
3274 }
3275 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3276 /* We found an instruction that both uses the register,
3277 and sets it, so no new REG_NOTE is needed for this set. */
3278 break;
3279 }
3280 }
3281 /* If this is a set, it must die somewhere, unless it is the dest of
3282 the original insn, and hence is live after the original insn. Abort
3283 if it isn't supposed to be live after the original insn.
3284
3285 If this is a clobber, then just add a REG_UNUSED note. */
3286 if (tem == insn)
3287 {
3288 int live_after_orig_insn = 0;
3289 rtx pattern = PATTERN (orig_insn);
3290 int i;
3291
3292 if (GET_CODE (pat) == CLOBBER)
3293 {
3294 rtx note = rtx_alloc (EXPR_LIST);
3295 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3296 XEXP (note, 0) = dest;
3297 XEXP (note, 1) = REG_NOTES (insn);
3298 REG_NOTES (insn) = note;
3299 return;
3300 }
3301
3302 /* The original insn could have multiple sets, so search the
3303 insn for all sets. */
3304 if (GET_CODE (pattern) == SET)
3305 {
3306 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3307 live_after_orig_insn = 1;
3308 }
3309 else if (GET_CODE (pattern) == PARALLEL)
3310 {
3311 for (i = 0; i < XVECLEN (pattern, 0); i++)
3312 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3313 && reg_overlap_mentioned_p (dest,
3314 SET_DEST (XVECEXP (pattern,
3315 0, i))))
3316 live_after_orig_insn = 1;
3317 }
3318
3319 if (! live_after_orig_insn)
3320 abort ();
3321 }
3322 }
3323}
3324
3325/* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3326 registers modified by X. INC is -1 if the containing insn is being deleted,
3327 and is 1 if the containing insn is a newly generated insn. */
3328
3329static void
3330update_n_sets (x, inc)
3331 rtx x;
3332 int inc;
3333{
3334 rtx dest = SET_DEST (x);
3335
3336 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3337 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3338 dest = SUBREG_REG (dest);
3339
3340 if (GET_CODE (dest) == REG)
3341 {
3342 int regno = REGNO (dest);
3343
3344 if (regno < FIRST_PSEUDO_REGISTER)
3345 {
3346 register int i;
3347 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3348
3349 for (i = regno; i < endregno; i++)
3350 reg_n_sets[i] += inc;
3351 }
3352 else
3353 reg_n_sets[regno] += inc;
3354 }
3355}
3356
3357/* Updates all flow-analysis related quantities (including REG_NOTES) for
3358 the insns from FIRST to LAST inclusive that were created by splitting
3359 ORIG_INSN. NOTES are the original REG_NOTES. */
3360
3361static void
3362update_flow_info (notes, first, last, orig_insn)
3363 rtx notes;
3364 rtx first, last;
3365 rtx orig_insn;
3366{
3367 rtx insn, note;
3368 rtx next;
3369 rtx orig_dest, temp;
3370 rtx set;
3371
3372 /* Get and save the destination set by the original insn. */
3373
3374 orig_dest = single_set (orig_insn);
3375 if (orig_dest)
3376 orig_dest = SET_DEST (orig_dest);
3377
3378 /* Move REG_NOTES from the original insn to where they now belong. */
3379
3380 for (note = notes; note; note = next)
3381 {
3382 next = XEXP (note, 1);
3383 switch (REG_NOTE_KIND (note))
3384 {
3385 case REG_DEAD:
3386 case REG_UNUSED:
3387 /* Move these notes from the original insn to the last new insn where
3388 the register is now set. */
3389
3390 for (insn = last; ; insn = PREV_INSN (insn))
3391 {
3392 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3393 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3394 {
3395 XEXP (note, 1) = REG_NOTES (insn);
3396 REG_NOTES (insn) = note;
3397
3398 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3399 notes. */
6dc42e49 3400 /* ??? This won't handle multiple word registers correctly,
bd58420c
RK
3401 but should be good enough for now. */
3402 if (REG_NOTE_KIND (note) == REG_UNUSED
3403 && ! dead_or_set_p (insn, XEXP (note, 0)))
3404 PUT_REG_NOTE_KIND (note, REG_DEAD);
3405
3406 /* The reg only dies in one insn, the last one that uses
3407 it. */
3408 break;
3409 }
3410 /* It must die somewhere, fail it we couldn't find where it died.
3411
3412 If this is a REG_UNUSED note, then it must be a temporary
3413 register that was not needed by this instantiation of the
3414 pattern, so we can safely ignore it. */
3415 if (insn == first)
3416 {
3417 if (REG_NOTE_KIND (note) != REG_UNUSED)
3418 abort ();
3419
3420 break;
3421 }
3422 }
3423
3424 /* If this note refers to a multiple word hard register, it may
3425 have been split into several smaller hard register references.
3426 Check to see if there are any new register references that
3427 need REG_NOTES added for them. */
3428 temp = XEXP (note, 0);
3429 if (REG_NOTE_KIND (note) == REG_DEAD
3430 && GET_CODE (temp) == REG
3431 && REGNO (temp) < FIRST_PSEUDO_REGISTER
3432 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)))
3433 split_hard_reg_notes (note, first, last, orig_insn);
3434 break;
3435
3436 case REG_WAS_0:
3437 /* This note applies to the dest of the original insn. Find the
3438 first new insn that now has the same dest, and move the note
3439 there. */
3440
3441 if (! orig_dest)
3442 abort ();
3443
3444 for (insn = first; ; insn = NEXT_INSN (insn))
3445 {
3446 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3447 && (temp = single_set (insn))
3448 && rtx_equal_p (SET_DEST (temp), orig_dest))
3449 {
3450 XEXP (note, 1) = REG_NOTES (insn);
3451 REG_NOTES (insn) = note;
3452 /* The reg is only zero before one insn, the first that
3453 uses it. */
3454 break;
3455 }
3456 /* It must be set somewhere, fail if we couldn't find where it
3457 was set. */
3458 if (insn == last)
3459 abort ();
3460 }
3461 break;
3462
3463 case REG_EQUAL:
3464 case REG_EQUIV:
3465 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
3466 set is meaningless. Just drop the note. */
3467 if (! orig_dest)
3468 break;
3469
3470 case REG_NO_CONFLICT:
3471 /* These notes apply to the dest of the original insn. Find the last
3472 new insn that now has the same dest, and move the note there. */
3473
3474 if (! orig_dest)
3475 abort ();
3476
3477 for (insn = last; ; insn = PREV_INSN (insn))
3478 {
3479 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3480 && (temp = single_set (insn))
3481 && rtx_equal_p (SET_DEST (temp), orig_dest))
3482 {
3483 XEXP (note, 1) = REG_NOTES (insn);
3484 REG_NOTES (insn) = note;
3485 /* Only put this note on one of the new insns. */
3486 break;
3487 }
3488
3489 /* The original dest must still be set someplace. Abort if we
3490 couldn't find it. */
3491 if (insn == first)
3492 abort ();
3493 }
3494 break;
3495
3496 case REG_LIBCALL:
3497 /* Move a REG_LIBCALL note to the first insn created, and update
3498 the corresponding REG_RETVAL note. */
3499 XEXP (note, 1) = REG_NOTES (first);
3500 REG_NOTES (first) = note;
3501
3502 insn = XEXP (note, 0);
fb3821f7 3503 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
bd58420c
RK
3504 if (note)
3505 XEXP (note, 0) = first;
3506 break;
3507
3508 case REG_RETVAL:
3509 /* Move a REG_RETVAL note to the last insn created, and update
3510 the corresponding REG_LIBCALL note. */
3511 XEXP (note, 1) = REG_NOTES (last);
3512 REG_NOTES (last) = note;
3513
3514 insn = XEXP (note, 0);
fb3821f7 3515 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
bd58420c
RK
3516 if (note)
3517 XEXP (note, 0) = last;
3518 break;
3519
3520 case REG_NONNEG:
3521 /* This should be moved to whichever instruction is a JUMP_INSN. */
3522
3523 for (insn = last; ; insn = PREV_INSN (insn))
3524 {
3525 if (GET_CODE (insn) == JUMP_INSN)
3526 {
3527 XEXP (note, 1) = REG_NOTES (insn);
3528 REG_NOTES (insn) = note;
3529 /* Only put this note on one of the new insns. */
3530 break;
3531 }
3532 /* Fail if we couldn't find a JUMP_INSN. */
3533 if (insn == first)
3534 abort ();
3535 }
3536 break;
3537
3538 case REG_INC:
3539 /* This should be moved to whichever instruction now has the
3540 increment operation. */
3541 abort ();
3542
3543 case REG_LABEL:
3544 /* Should be moved to the new insn(s) which use the label. */
9ae57db8
RS
3545 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
3546 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3547 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3548 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL,
3549 XEXP (note, 0), REG_NOTES (insn));
3550 break;
bd58420c
RK
3551
3552 case REG_CC_SETTER:
3553 case REG_CC_USER:
3554 /* These two notes will never appear until after reorg, so we don't
3555 have to handle them here. */
3556 default:
3557 abort ();
3558 }
3559 }
3560
3561 /* Each new insn created, except the last, has a new set. If the destination
3562 is a register, then this reg is now live across several insns, whereas
3563 previously the dest reg was born and died within the same insn. To
3564 reflect this, we now need a REG_DEAD note on the insn where this
3565 dest reg dies.
3566
3567 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
3568
3569 for (insn = first; insn != last; insn = NEXT_INSN (insn))
3570 {
3571 rtx pat;
3572 int i;
3573
3574 pat = PATTERN (insn);
3575 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
3576 new_insn_dead_notes (pat, insn, last, orig_insn);
3577 else if (GET_CODE (pat) == PARALLEL)
3578 {
3579 for (i = 0; i < XVECLEN (pat, 0); i++)
3580 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3581 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
3582 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
3583 }
3584 }
3585
3586 /* If any insn, except the last, uses the register set by the last insn,
3587 then we need a new REG_DEAD note on that insn. In this case, there
3588 would not have been a REG_DEAD note for this register in the original
3589 insn because it was used and set within one insn.
3590
3591 There is no new REG_DEAD note needed if the last insn uses the register
3592 that it is setting. */
3593
3594 set = single_set (last);
3595 if (set)
3596 {
3597 rtx dest = SET_DEST (set);
3598
3599 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3600 || GET_CODE (dest) == STRICT_LOW_PART
3601 || GET_CODE (dest) == SIGN_EXTRACT)
3602 dest = XEXP (dest, 0);
3603
3604 if (GET_CODE (dest) == REG
3605 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
3606 {
3607 for (insn = PREV_INSN (last); ; insn = PREV_INSN (insn))
3608 {
3609 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3610 && reg_mentioned_p (dest, PATTERN (insn))
3611 && (set = single_set (insn)))
3612 {
3613 rtx insn_dest = SET_DEST (set);
3614
3615 while (GET_CODE (insn_dest) == ZERO_EXTRACT
3616 || GET_CODE (insn_dest) == SUBREG
3617 || GET_CODE (insn_dest) == STRICT_LOW_PART
3618 || GET_CODE (insn_dest) == SIGN_EXTRACT)
3619 insn_dest = XEXP (insn_dest, 0);
3620
3621 if (insn_dest != dest)
3622 {
3623 note = rtx_alloc (EXPR_LIST);
3624 PUT_REG_NOTE_KIND (note, REG_DEAD);
3625 XEXP (note, 0) = dest;
3626 XEXP (note, 1) = REG_NOTES (insn);
3627 REG_NOTES (insn) = note;
3628 /* The reg only dies in one insn, the last one
3629 that uses it. */
3630 break;
3631 }
3632 }
3633 if (insn == first)
3634 break;
3635 }
3636 }
3637 }
3638
3639 /* If the original dest is modifying a multiple register target, and the
3640 original instruction was split such that the original dest is now set
3641 by two or more SUBREG sets, then the split insns no longer kill the
3642 destination of the original insn.
3643
3644 In this case, if there exists an instruction in the same basic block,
3645 before the split insn, which uses the original dest, and this use is
3646 killed by the original insn, then we must remove the REG_DEAD note on
3647 this insn, because it is now superfluous.
3648
3649 This does not apply when a hard register gets split, because the code
3650 knows how to handle overlapping hard registers properly. */
3651 if (orig_dest && GET_CODE (orig_dest) == REG)
3652 {
3653 int found_orig_dest = 0;
3654 int found_split_dest = 0;
3655
3656 for (insn = first; ; insn = NEXT_INSN (insn))
3657 {
3658 set = single_set (insn);
3659 if (set)
3660 {
3661 if (GET_CODE (SET_DEST (set)) == REG
3662 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
3663 {
3664 found_orig_dest = 1;
3665 break;
3666 }
3667 else if (GET_CODE (SET_DEST (set)) == SUBREG
3668 && SUBREG_REG (SET_DEST (set)) == orig_dest)
3669 {
3670 found_split_dest = 1;
3671 break;
3672 }
3673 }
3674
3675 if (insn == last)
3676 break;
3677 }
3678
3679 if (found_split_dest)
3680 {
3681 /* Search backwards from FIRST, looking for the first insn that uses
3682 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
3683 If we find an insn, and it has a REG_DEAD note, then delete the
3684 note. */
3685
3686 for (insn = first; insn; insn = PREV_INSN (insn))
3687 {
3688 if (GET_CODE (insn) == CODE_LABEL
3689 || GET_CODE (insn) == JUMP_INSN)
3690 break;
3691 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3692 && reg_mentioned_p (orig_dest, insn))
3693 {
3694 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
3695 if (note)
3696 remove_note (insn, note);
3697 }
3698 }
3699 }
3700 else if (! found_orig_dest)
3701 {
3702 /* This should never happen. */
3703 abort ();
3704 }
3705 }
3706
3707 /* Update reg_n_sets. This is necessary to prevent local alloc from
3708 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
3709 a reg from set once to set multiple times. */
3710
3711 {
3712 rtx x = PATTERN (orig_insn);
3713 RTX_CODE code = GET_CODE (x);
3714
3715 if (code == SET || code == CLOBBER)
3716 update_n_sets (x, -1);
3717 else if (code == PARALLEL)
3718 {
3719 int i;
3720 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3721 {
3722 code = GET_CODE (XVECEXP (x, 0, i));
3723 if (code == SET || code == CLOBBER)
3724 update_n_sets (XVECEXP (x, 0, i), -1);
3725 }
3726 }
3727
3728 for (insn = first; ; insn = NEXT_INSN (insn))
3729 {
3730 x = PATTERN (insn);
3731 code = GET_CODE (x);
3732
3733 if (code == SET || code == CLOBBER)
3734 update_n_sets (x, 1);
3735 else if (code == PARALLEL)
3736 {
3737 int i;
3738 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3739 {
3740 code = GET_CODE (XVECEXP (x, 0, i));
3741 if (code == SET || code == CLOBBER)
3742 update_n_sets (XVECEXP (x, 0, i), 1);
3743 }
3744 }
3745
3746 if (insn == last)
3747 break;
3748 }
3749 }
3750}
3751
3752/* The one entry point in this file. DUMP_FILE is the dump file for
3753 this pass. */
3754
3755void
3756schedule_insns (dump_file)
3757 FILE *dump_file;
3758{
3759 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
3760 int i, b;
3761 rtx insn;
3762
3763 /* Taking care of this degenerate case makes the rest of
3764 this code simpler. */
3765 if (n_basic_blocks == 0)
3766 return;
3767
3768 /* Create an insn here so that we can hang dependencies off of it later. */
fb3821f7
CH
3769 sched_before_next_call
3770 = gen_rtx (INSN, VOIDmode, 0, NULL_RTX, NULL_RTX,
3771 NULL_RTX, 0, NULL_RTX, 0);
bd58420c
RK
3772
3773 /* Initialize the unused_*_lists. We can't use the ones left over from
3774 the previous function, because gcc has freed that memory. We can use
3775 the ones left over from the first sched pass in the second pass however,
3776 so only clear them on the first sched pass. The first pass is before
3777 reload if flag_schedule_insns is set, otherwise it is afterwards. */
3778
3779 if (reload_completed == 0 || ! flag_schedule_insns)
3780 {
3781 unused_insn_list = 0;
3782 unused_expr_list = 0;
3783 }
3784
3785 /* We create no insns here, only reorder them, so we
3786 remember how far we can cut back the stack on exit. */
3787
3788 /* Allocate data for this pass. See comments, above,
3789 for what these vectors do. */
bd58420c
RK
3790 insn_luid = (int *) alloca (max_uid * sizeof (int));
3791 insn_priority = (int *) alloca (max_uid * sizeof (int));
c3de4fe6 3792 insn_costs = (short *) alloca (max_uid * sizeof (short));
bd58420c
RK
3793 insn_ref_count = (int *) alloca (max_uid * sizeof (int));
3794
3795 if (reload_completed == 0)
3796 {
3797 sched_reg_n_deaths = (short *) alloca (max_regno * sizeof (short));
3798 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
3799 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
3800 bb_dead_regs = (regset) alloca (regset_bytes);
3801 bb_live_regs = (regset) alloca (regset_bytes);
3802 bzero (sched_reg_n_calls_crossed, max_regno * sizeof (int));
3803 bzero (sched_reg_live_length, max_regno * sizeof (int));
3804 bcopy (reg_n_deaths, sched_reg_n_deaths, max_regno * sizeof (short));
3805 init_alias_analysis ();
3806 }
3807 else
3808 {
3809 sched_reg_n_deaths = 0;
3810 sched_reg_n_calls_crossed = 0;
3811 sched_reg_live_length = 0;
3812 bb_dead_regs = 0;
3813 bb_live_regs = 0;
3814 if (! flag_schedule_insns)
3815 init_alias_analysis ();
3816 }
3817
3818 if (write_symbols != NO_DEBUG)
3819 {
3820 rtx line;
3821
3822 line_note = (rtx *) alloca (max_uid * sizeof (rtx));
3823 bzero (line_note, max_uid * sizeof (rtx));
3824 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
3825 bzero (line_note_head, n_basic_blocks * sizeof (rtx));
3826
3827 /* Determine the line-number at the start of each basic block.
3828 This must be computed and saved now, because after a basic block's
3829 predecessor has been scheduled, it is impossible to accurately
3830 determine the correct line number for the first insn of the block. */
3831
3832 for (b = 0; b < n_basic_blocks; b++)
3833 for (line = basic_block_head[b]; line; line = PREV_INSN (line))
3834 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3835 {
3836 line_note_head[b] = line;
3837 break;
3838 }
3839 }
3840
3841 bzero (insn_luid, max_uid * sizeof (int));
3842 bzero (insn_priority, max_uid * sizeof (int));
c3de4fe6 3843 bzero (insn_costs, max_uid * sizeof (short));
bd58420c
RK
3844 bzero (insn_ref_count, max_uid * sizeof (int));
3845
3846 /* Schedule each basic block, block by block. */
3847
3848 if (NEXT_INSN (basic_block_end[n_basic_blocks-1]) == 0
3849 || (GET_CODE (basic_block_end[n_basic_blocks-1]) != NOTE
3850 && GET_CODE (basic_block_end[n_basic_blocks-1]) != CODE_LABEL))
3851 emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
3852
3853 for (b = 0; b < n_basic_blocks; b++)
3854 {
3855 rtx insn, next;
3856 rtx insns;
3857
3858 note_list = 0;
3859
3860 for (insn = basic_block_head[b]; ; insn = next)
3861 {
3862 rtx prev;
3863 rtx set;
3864
3865 /* Can't use `next_real_insn' because that
3866 might go across CODE_LABELS and short-out basic blocks. */
3867 next = NEXT_INSN (insn);
3868 if (GET_CODE (insn) != INSN)
3869 {
3870 if (insn == basic_block_end[b])
3871 break;
3872
3873 continue;
3874 }
3875
3876 /* Don't split no-op move insns. These should silently disappear
3877 later in final. Splitting such insns would break the code
3878 that handles REG_NO_CONFLICT blocks. */
3879 set = single_set (insn);
3880 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
3881 {
3882 if (insn == basic_block_end[b])
3883 break;
3884
3885 /* Nops get in the way while scheduling, so delete them now if
3886 register allocation has already been done. It is too risky
3887 to try to do this before register allocation, and there are
3888 unlikely to be very many nops then anyways. */
3889 if (reload_completed)
3890 {
3891 PUT_CODE (insn, NOTE);
3892 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3893 NOTE_SOURCE_FILE (insn) = 0;
3894 }
3895
3896 continue;
3897 }
3898
3899 /* Split insns here to get max fine-grain parallelism. */
3900 prev = PREV_INSN (insn);
3901 if (reload_completed == 0)
3902 {
3903 rtx last, first = PREV_INSN (insn);
3904 rtx notes = REG_NOTES (insn);
3905
3906 last = try_split (PATTERN (insn), insn, 1);
3907 if (last != insn)
3908 {
3909 /* try_split returns the NOTE that INSN became. */
3910 first = NEXT_INSN (first);
3911 update_flow_info (notes, first, last, insn);
3912
3913 PUT_CODE (insn, NOTE);
3914 NOTE_SOURCE_FILE (insn) = 0;
3915 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3916 if (insn == basic_block_head[b])
3917 basic_block_head[b] = first;
3918 if (insn == basic_block_end[b])
3919 {
3920 basic_block_end[b] = last;
3921 break;
3922 }
3923 }
3924 }
3925
3926 if (insn == basic_block_end[b])
3927 break;
3928 }
3929
3930 schedule_block (b, dump_file);
3931
3932#ifdef USE_C_ALLOCA
3933 alloca (0);
3934#endif
3935 }
3936
bdac5f58
TW
3937 /* Reposition the prologue and epilogue notes in case we moved the
3938 prologue/epilogue insns. */
3939 if (reload_completed)
3940 reposition_prologue_and_epilogue_notes (get_insns ());
3941
bd58420c
RK
3942 if (write_symbols != NO_DEBUG)
3943 {
3944 rtx line = 0;
3945 rtx insn = get_insns ();
3946 int active_insn = 0;
3947 int notes = 0;
3948
3949 /* Walk the insns deleting redundant line-number notes. Many of these
3950 are already present. The remainder tend to occur at basic
3951 block boundaries. */
3952 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
3953 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3954 {
3955 /* If there are no active insns following, INSN is redundant. */
3956 if (active_insn == 0)
3957 {
3958 notes++;
3959 NOTE_SOURCE_FILE (insn) = 0;
3960 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3961 }
3962 /* If the line number is unchanged, LINE is redundant. */
3963 else if (line
3964 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
3965 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
3966 {
3967 notes++;
3968 NOTE_SOURCE_FILE (line) = 0;
3969 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
3970 line = insn;
3971 }
3972 else
3973 line = insn;
3974 active_insn = 0;
3975 }
3976 else if (! ((GET_CODE (insn) == NOTE
3977 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
3978 || (GET_CODE (insn) == INSN
3979 && (GET_CODE (PATTERN (insn)) == USE
3980 || GET_CODE (PATTERN (insn)) == CLOBBER))))
3981 active_insn++;
3982
3983 if (dump_file && notes)
3984 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
3985 }
3986
3987 if (reload_completed == 0)
3988 {
3989 int regno;
3990 for (regno = 0; regno < max_regno; regno++)
3991 if (sched_reg_live_length[regno])
3992 {
3993 if (dump_file)
3994 {
3995 if (reg_live_length[regno] > sched_reg_live_length[regno])
3996 fprintf (dump_file,
3997 ";; register %d life shortened from %d to %d\n",
3998 regno, reg_live_length[regno],
3999 sched_reg_live_length[regno]);
4000 /* Negative values are special; don't overwrite the current
4001 reg_live_length value if it is negative. */
4002 else if (reg_live_length[regno] < sched_reg_live_length[regno]
4003 && reg_live_length[regno] >= 0)
4004 fprintf (dump_file,
4005 ";; register %d life extended from %d to %d\n",
4006 regno, reg_live_length[regno],
4007 sched_reg_live_length[regno]);
4008
4009 if (reg_n_calls_crossed[regno]
4010 && ! sched_reg_n_calls_crossed[regno])
4011 fprintf (dump_file,
4012 ";; register %d no longer crosses calls\n", regno);
4013 else if (! reg_n_calls_crossed[regno]
4014 && sched_reg_n_calls_crossed[regno])
4015 fprintf (dump_file,
4016 ";; register %d now crosses calls\n", regno);
4017 }
b4ac57ab
RS
4018 /* Negative values are special; don't overwrite the current
4019 reg_live_length value if it is negative. */
4020 if (reg_live_length[regno] >= 0)
4021 reg_live_length[regno] = sched_reg_live_length[regno];
bd58420c
RK
4022 reg_n_calls_crossed[regno] = sched_reg_n_calls_crossed[regno];
4023 }
4024 }
4025}
4026#endif /* INSN_SCHEDULING */
This page took 0.448164 seconds and 5 git commands to generate.