]> gcc.gnu.org Git - gcc.git/blob - gcc/sched.c
m88k.h: Change file pattern to match reality.
[gcc.git] / gcc / sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 93-97, 1998 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GNU CC.
7
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23 /* Instruction scheduling pass.
24
25 This pass implements list scheduling within basic blocks. It is
26 run after flow analysis, but before register allocation. The
27 scheduler works as follows:
28
29 We compute insn priorities based on data dependencies. Flow
30 analysis only creates a fraction of the data-dependencies we must
31 observe: namely, only those dependencies which the combiner can be
32 expected to use. For this pass, we must therefore create the
33 remaining dependencies we need to observe: register dependencies,
34 memory dependencies, dependencies to keep function calls in order,
35 and the dependence between a conditional branch and the setting of
36 condition codes are all dealt with here.
37
38 The scheduler first traverses the data flow graph, starting with
39 the last instruction, and proceeding to the first, assigning
40 values to insn_priority as it goes. This sorts the instructions
41 topologically by data dependence.
42
43 Once priorities have been established, we order the insns using
44 list scheduling. This works as follows: starting with a list of
45 all the ready insns, and sorted according to priority number, we
46 schedule the insn from the end of the list by placing its
47 predecessors in the list according to their priority order. We
48 consider this insn scheduled by setting the pointer to the "end" of
49 the list to point to the previous insn. When an insn has no
50 predecessors, we either queue it until sufficient time has elapsed
51 or add it to the ready list. As the instructions are scheduled or
52 when stalls are introduced, the queue advances and dumps insns into
53 the ready list. When all insns down to the lowest priority have
54 been scheduled, the critical path of the basic block has been made
55 as short as possible. The remaining insns are then scheduled in
56 remaining slots.
57
58 Function unit conflicts are resolved during reverse list scheduling
59 by tracking the time when each insn is committed to the schedule
60 and from that, the time the function units it uses must be free.
61 As insns on the ready list are considered for scheduling, those
62 that would result in a blockage of the already committed insns are
63 queued until no blockage will result. Among the remaining insns on
64 the ready list to be considered, the first one with the largest
65 potential for causing a subsequent blockage is chosen.
66
67 The following list shows the order in which we want to break ties
68 among insns in the ready list:
69
70 1. choose insn with lowest conflict cost, ties broken by
71 2. choose insn with the longest path to end of bb, ties broken by
72 3. choose insn that kills the most registers, ties broken by
73 4. choose insn that conflicts with the most ready insns, or finally
74 5. choose insn with lowest UID.
75
76 Memory references complicate matters. Only if we can be certain
77 that memory references are not part of the data dependency graph
78 (via true, anti, or output dependence), can we move operations past
79 memory references. To first approximation, reads can be done
80 independently, while writes introduce dependencies. Better
81 approximations will yield fewer dependencies.
82
83 Dependencies set up by memory references are treated in exactly the
84 same way as other dependencies, by using LOG_LINKS.
85
86 Having optimized the critical path, we may have also unduly
87 extended the lifetimes of some registers. If an operation requires
88 that constants be loaded into registers, it is certainly desirable
89 to load those constants as early as necessary, but no earlier.
90 I.e., it will not do to load up a bunch of registers at the
91 beginning of a basic block only to use them at the end, if they
92 could be loaded later, since this may result in excessive register
93 utilization.
94
95 Note that since branches are never in basic blocks, but only end
96 basic blocks, this pass will not do any branch scheduling. But
97 that is ok, since we can use GNU's delayed branch scheduling
98 pass to take care of this case.
99
100 Also note that no further optimizations based on algebraic identities
101 are performed, so this pass would be a good one to perform instruction
102 splitting, such as breaking up a multiply instruction into shifts
103 and adds where that is profitable.
104
105 Given the memory aliasing analysis that this pass should perform,
106 it should be possible to remove redundant stores to memory, and to
107 load values from registers instead of hitting memory.
108
109 This pass must update information that subsequent passes expect to be
110 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
111 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
112 basic_block_end.
113
114 The information in the line number notes is carefully retained by
115 this pass. Notes that refer to the starting and ending of
116 exception regions are also carefully retained by this pass. All
117 other NOTE insns are grouped in their same relative order at the
118 beginning of basic blocks that have been scheduled. */
119 \f
120 #include "config.h"
121 #include <stdio.h>
122 #if HAVE_STDLIB_H
123 #include <stdlib.h>
124 #endif
125 #include "rtl.h"
126 #include "basic-block.h"
127 #include "regs.h"
128 #include "hard-reg-set.h"
129 #include "flags.h"
130 #include "insn-config.h"
131 #include "insn-attr.h"
132
133 extern char *reg_known_equiv_p;
134 extern rtx *reg_known_value;
135
136 #ifdef INSN_SCHEDULING
137 /* Arrays set up by scheduling for the same respective purposes as
138 similar-named arrays set up by flow analysis. We work with these
139 arrays during the scheduling pass so we can compare values against
140 unscheduled code.
141
142 Values of these arrays are copied at the end of this pass into the
143 arrays set up by flow analysis. */
144 static int *sched_reg_n_calls_crossed;
145 static int *sched_reg_live_length;
146
147 /* Element N is the next insn that sets (hard or pseudo) register
148 N within the current basic block; or zero, if there is no
149 such insn. Needed for new registers which may be introduced
150 by splitting insns. */
151 static rtx *reg_last_uses;
152 static rtx *reg_last_sets;
153 static regset reg_pending_sets;
154 static int reg_pending_sets_all;
155
156 /* Vector indexed by INSN_UID giving the original ordering of the insns. */
157 static int *insn_luid;
158 #define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
159
160 /* Vector indexed by INSN_UID giving each instruction a priority. */
161 static int *insn_priority;
162 #define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
163
164 static short *insn_costs;
165 #define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
166
167 /* Vector indexed by INSN_UID giving an encoding of the function units
168 used. */
169 static short *insn_units;
170 #define INSN_UNIT(INSN) insn_units[INSN_UID (INSN)]
171
172 /* Vector indexed by INSN_UID giving an encoding of the blockage range
173 function. The unit and the range are encoded. */
174 static unsigned int *insn_blockage;
175 #define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
176 #define UNIT_BITS 5
177 #define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
178 #define ENCODE_BLOCKAGE(U,R) \
179 ((((U) << UNIT_BITS) << BLOCKAGE_BITS \
180 | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
181 | MAX_BLOCKAGE_COST (R))
182 #define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
183 #define BLOCKAGE_RANGE(B) \
184 (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
185 | ((B) & BLOCKAGE_MASK))
186
187 /* Encodings of the `<name>_unit_blockage_range' function. */
188 #define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
189 #define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
190
191 #define DONE_PRIORITY -1
192 #define MAX_PRIORITY 0x7fffffff
193 #define TAIL_PRIORITY 0x7ffffffe
194 #define LAUNCH_PRIORITY 0x7f000001
195 #define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
196 #define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
197
198 /* Vector indexed by INSN_UID giving number of insns referring to this insn. */
199 static int *insn_ref_count;
200 #define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
201
202 /* Vector indexed by INSN_UID giving line-number note in effect for each
203 insn. For line-number notes, this indicates whether the note may be
204 reused. */
205 static rtx *line_note;
206 #define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
207
208 /* Vector indexed by basic block number giving the starting line-number
209 for each basic block. */
210 static rtx *line_note_head;
211
212 /* List of important notes we must keep around. This is a pointer to the
213 last element in the list. */
214 static rtx note_list;
215
216 /* Regsets telling whether a given register is live or dead before the last
217 scheduled insn. Must scan the instructions once before scheduling to
218 determine what registers are live or dead at the end of the block. */
219 static regset bb_dead_regs;
220 static regset bb_live_regs;
221
222 /* Regset telling whether a given register is live after the insn currently
223 being scheduled. Before processing an insn, this is equal to bb_live_regs
224 above. This is used so that we can find registers that are newly born/dead
225 after processing an insn. */
226 static regset old_live_regs;
227
228 /* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
229 during the initial scan and reused later. If there are not exactly as
230 many REG_DEAD notes in the post scheduled code as there were in the
231 prescheduled code then we trigger an abort because this indicates a bug. */
232 static rtx dead_notes;
233
234 /* Queues, etc. */
235
236 /* An instruction is ready to be scheduled when all insns following it
237 have already been scheduled. It is important to ensure that all
238 insns which use its result will not be executed until its result
239 has been computed. An insn is maintained in one of four structures:
240
241 (P) the "Pending" set of insns which cannot be scheduled until
242 their dependencies have been satisfied.
243 (Q) the "Queued" set of insns that can be scheduled when sufficient
244 time has passed.
245 (R) the "Ready" list of unscheduled, uncommitted insns.
246 (S) the "Scheduled" list of insns.
247
248 Initially, all insns are either "Pending" or "Ready" depending on
249 whether their dependencies are satisfied.
250
251 Insns move from the "Ready" list to the "Scheduled" list as they
252 are committed to the schedule. As this occurs, the insns in the
253 "Pending" list have their dependencies satisfied and move to either
254 the "Ready" list or the "Queued" set depending on whether
255 sufficient time has passed to make them ready. As time passes,
256 insns move from the "Queued" set to the "Ready" list. Insns may
257 move from the "Ready" list to the "Queued" set if they are blocked
258 due to a function unit conflict.
259
260 The "Pending" list (P) are the insns in the LOG_LINKS of the unscheduled
261 insns, i.e., those that are ready, queued, and pending.
262 The "Queued" set (Q) is implemented by the variable `insn_queue'.
263 The "Ready" list (R) is implemented by the variables `ready' and
264 `n_ready'.
265 The "Scheduled" list (S) is the new insn chain built by this pass.
266
267 The transition (R->S) is implemented in the scheduling loop in
268 `schedule_block' when the best insn to schedule is chosen.
269 The transition (R->Q) is implemented in `schedule_select' when an
270 insn is found to to have a function unit conflict with the already
271 committed insns.
272 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
273 insns move from the ready list to the scheduled list.
274 The transition (Q->R) is implemented at the top of the scheduling
275 loop in `schedule_block' as time passes or stalls are introduced. */
276
277 /* Implement a circular buffer to delay instructions until sufficient
278 time has passed. INSN_QUEUE_SIZE is a power of two larger than
279 MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. This is the
280 longest time an isnsn may be queued. */
281 static rtx insn_queue[INSN_QUEUE_SIZE];
282 static int q_ptr = 0;
283 static int q_size = 0;
284 #define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
285 #define NEXT_Q_AFTER(X,C) (((X)+C) & (INSN_QUEUE_SIZE-1))
286
287 /* Vector indexed by INSN_UID giving the minimum clock tick at which
288 the insn becomes ready. This is used to note timing constraints for
289 insns in the pending list. */
290 static int *insn_tick;
291 #define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
292
293 /* Data structure for keeping track of register information
294 during that register's life. */
295
296 struct sometimes
297 {
298 int regno;
299 int live_length;
300 int calls_crossed;
301 };
302
303 /* Forward declarations. */
304 static void add_dependence PROTO((rtx, rtx, enum reg_note));
305 static void remove_dependence PROTO((rtx, rtx));
306 static rtx find_insn_list PROTO((rtx, rtx));
307 static int insn_unit PROTO((rtx));
308 static unsigned int blockage_range PROTO((int, rtx));
309 static void clear_units PROTO((void));
310 static void prepare_unit PROTO((int));
311 static int actual_hazard_this_instance PROTO((int, int, rtx, int, int));
312 static void schedule_unit PROTO((int, rtx, int));
313 static int actual_hazard PROTO((int, rtx, int, int));
314 static int potential_hazard PROTO((int, rtx, int));
315 static int insn_cost PROTO((rtx, rtx, rtx));
316 static int priority PROTO((rtx));
317 static void free_pending_lists PROTO((void));
318 static void add_insn_mem_dependence PROTO((rtx *, rtx *, rtx, rtx));
319 static void flush_pending_lists PROTO((rtx, int));
320 static void sched_analyze_1 PROTO((rtx, rtx));
321 static void sched_analyze_2 PROTO((rtx, rtx));
322 static void sched_analyze_insn PROTO((rtx, rtx, rtx));
323 static int sched_analyze PROTO((rtx, rtx));
324 static void sched_note_set PROTO((int, rtx, int));
325 static int rank_for_schedule PROTO((rtx *, rtx *));
326 static void swap_sort PROTO((rtx *, int));
327 static void queue_insn PROTO((rtx, int));
328 static int birthing_insn_p PROTO((rtx));
329 static void adjust_priority PROTO((rtx));
330 static int schedule_insn PROTO((rtx, rtx *, int, int));
331 static int schedule_select PROTO((rtx *, int, int, FILE *));
332 static void create_reg_dead_note PROTO((rtx, rtx));
333 static void attach_deaths PROTO((rtx, rtx, int));
334 static void attach_deaths_insn PROTO((rtx));
335 static rtx unlink_notes PROTO((rtx, rtx));
336 static int new_sometimes_live PROTO((struct sometimes *, int, int));
337 static void finish_sometimes_live PROTO((struct sometimes *, int));
338 static rtx reemit_notes PROTO((rtx, rtx));
339 static void schedule_block PROTO((int, FILE *));
340 static rtx regno_use_in PROTO((int, rtx));
341 static void split_hard_reg_notes PROTO((rtx, rtx, rtx, rtx));
342 static void new_insn_dead_notes PROTO((rtx, rtx, rtx, rtx));
343 static void update_n_sets PROTO((rtx, int));
344 static void update_flow_info PROTO((rtx, rtx, rtx, rtx));
345
346 /* Main entry point of this file. */
347 void schedule_insns PROTO((FILE *));
348
349 #endif /* INSN_SCHEDULING */
350 \f
351 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
352
353 /* Helper functions for instruction scheduling. */
354
355 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
356 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
357 of dependence that this link represents. */
358
359 static void
360 add_dependence (insn, elem, dep_type)
361 rtx insn;
362 rtx elem;
363 enum reg_note dep_type;
364 {
365 rtx link, next;
366
367 /* Don't depend an insn on itself. */
368 if (insn == elem)
369 return;
370
371 /* If elem is part of a sequence that must be scheduled together, then
372 make the dependence point to the last insn of the sequence.
373 When HAVE_cc0, it is possible for NOTEs to exist between users and
374 setters of the condition codes, so we must skip past notes here.
375 Otherwise, NOTEs are impossible here. */
376
377 next = NEXT_INSN (elem);
378
379 #ifdef HAVE_cc0
380 while (next && GET_CODE (next) == NOTE)
381 next = NEXT_INSN (next);
382 #endif
383
384 if (next && SCHED_GROUP_P (next)
385 && GET_CODE (next) != CODE_LABEL)
386 {
387 /* Notes will never intervene here though, so don't bother checking
388 for them. */
389 /* We must reject CODE_LABELs, so that we don't get confused by one
390 that has LABEL_PRESERVE_P set, which is represented by the same
391 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
392 SCHED_GROUP_P. */
393 while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
394 && GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
395 next = NEXT_INSN (next);
396
397 /* Again, don't depend an insn on itself. */
398 if (insn == next)
399 return;
400
401 /* Make the dependence to NEXT, the last insn of the group, instead
402 of the original ELEM. */
403 elem = next;
404 }
405
406 /* Check that we don't already have this dependence. */
407 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
408 if (XEXP (link, 0) == elem)
409 {
410 /* If this is a more restrictive type of dependence than the existing
411 one, then change the existing dependence to this type. */
412 if ((int) dep_type < (int) REG_NOTE_KIND (link))
413 PUT_REG_NOTE_KIND (link, dep_type);
414 return;
415 }
416 /* Might want to check one level of transitivity to save conses. */
417
418 link = rtx_alloc (INSN_LIST);
419 /* Insn dependency, not data dependency. */
420 PUT_REG_NOTE_KIND (link, dep_type);
421 XEXP (link, 0) = elem;
422 XEXP (link, 1) = LOG_LINKS (insn);
423 LOG_LINKS (insn) = link;
424 }
425
426 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
427 of INSN. Abort if not found. */
428
429 static void
430 remove_dependence (insn, elem)
431 rtx insn;
432 rtx elem;
433 {
434 rtx prev, link;
435 int found = 0;
436
437 for (prev = 0, link = LOG_LINKS (insn); link; link = XEXP (link, 1))
438 {
439 if (XEXP (link, 0) == elem)
440 {
441 RTX_INTEGRATED_P (link) = 1;
442 if (prev)
443 XEXP (prev, 1) = XEXP (link, 1);
444 else
445 LOG_LINKS (insn) = XEXP (link, 1);
446 found = 1;
447 }
448 else
449 prev = link;
450 }
451
452 if (! found)
453 abort ();
454 return;
455 }
456 \f
457 #ifndef INSN_SCHEDULING
458 void
459 schedule_insns (dump_file)
460 FILE *dump_file;
461 {
462 }
463 #else
464 #ifndef __GNUC__
465 #define __inline
466 #endif
467
468 /* Computation of memory dependencies. */
469
470 /* The *_insns and *_mems are paired lists. Each pending memory operation
471 will have a pointer to the MEM rtx on one list and a pointer to the
472 containing insn on the other list in the same place in the list. */
473
474 /* We can't use add_dependence like the old code did, because a single insn
475 may have multiple memory accesses, and hence needs to be on the list
476 once for each memory access. Add_dependence won't let you add an insn
477 to a list more than once. */
478
479 /* An INSN_LIST containing all insns with pending read operations. */
480 static rtx pending_read_insns;
481
482 /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
483 static rtx pending_read_mems;
484
485 /* An INSN_LIST containing all insns with pending write operations. */
486 static rtx pending_write_insns;
487
488 /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
489 static rtx pending_write_mems;
490
491 /* Indicates the combined length of the two pending lists. We must prevent
492 these lists from ever growing too large since the number of dependencies
493 produced is at least O(N*N), and execution time is at least O(4*N*N), as
494 a function of the length of these pending lists. */
495
496 static int pending_lists_length;
497
498 /* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
499
500 static rtx unused_insn_list;
501
502 /* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
503
504 static rtx unused_expr_list;
505
506 /* The last insn upon which all memory references must depend.
507 This is an insn which flushed the pending lists, creating a dependency
508 between it and all previously pending memory references. This creates
509 a barrier (or a checkpoint) which no memory reference is allowed to cross.
510
511 This includes all non constant CALL_INSNs. When we do interprocedural
512 alias analysis, this restriction can be relaxed.
513 This may also be an INSN that writes memory if the pending lists grow
514 too large. */
515
516 static rtx last_pending_memory_flush;
517
518 /* The last function call we have seen. All hard regs, and, of course,
519 the last function call, must depend on this. */
520
521 static rtx last_function_call;
522
523 /* The LOG_LINKS field of this is a list of insns which use a pseudo register
524 that does not already cross a call. We create dependencies between each
525 of those insn and the next call insn, to ensure that they won't cross a call
526 after scheduling is done. */
527
528 static rtx sched_before_next_call;
529
530 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
531 so that insns independent of the last scheduled insn will be preferred
532 over dependent instructions. */
533
534 static rtx last_scheduled_insn;
535
536 /* Process an insn's memory dependencies. There are four kinds of
537 dependencies:
538
539 (0) read dependence: read follows read
540 (1) true dependence: read follows write
541 (2) anti dependence: write follows read
542 (3) output dependence: write follows write
543
544 We are careful to build only dependencies which actually exist, and
545 use transitivity to avoid building too many links. */
546 \f
547 /* Return the INSN_LIST containing INSN in LIST, or NULL
548 if LIST does not contain INSN. */
549
550 __inline static rtx
551 find_insn_list (insn, list)
552 rtx insn;
553 rtx list;
554 {
555 while (list)
556 {
557 if (XEXP (list, 0) == insn)
558 return list;
559 list = XEXP (list, 1);
560 }
561 return 0;
562 }
563
564 /* Compute the function units used by INSN. This caches the value
565 returned by function_units_used. A function unit is encoded as the
566 unit number if the value is non-negative and the compliment of a
567 mask if the value is negative. A function unit index is the
568 non-negative encoding. */
569
570 __inline static int
571 insn_unit (insn)
572 rtx insn;
573 {
574 register int unit = INSN_UNIT (insn);
575
576 if (unit == 0)
577 {
578 recog_memoized (insn);
579
580 /* A USE insn, or something else we don't need to understand.
581 We can't pass these directly to function_units_used because it will
582 trigger a fatal error for unrecognizable insns. */
583 if (INSN_CODE (insn) < 0)
584 unit = -1;
585 else
586 {
587 unit = function_units_used (insn);
588 /* Increment non-negative values so we can cache zero. */
589 if (unit >= 0) unit++;
590 }
591 /* We only cache 16 bits of the result, so if the value is out of
592 range, don't cache it. */
593 if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
594 || unit >= 0
595 || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
596 INSN_UNIT (insn) = unit;
597 }
598 return (unit > 0 ? unit - 1 : unit);
599 }
600
601 /* Compute the blockage range for executing INSN on UNIT. This caches
602 the value returned by the blockage_range_function for the unit.
603 These values are encoded in an int where the upper half gives the
604 minimum value and the lower half gives the maximum value. */
605
606 __inline static unsigned int
607 blockage_range (unit, insn)
608 int unit;
609 rtx insn;
610 {
611 unsigned int blockage = INSN_BLOCKAGE (insn);
612 unsigned int range;
613
614 if (UNIT_BLOCKED (blockage) != unit + 1)
615 {
616 range = function_units[unit].blockage_range_function (insn);
617 /* We only cache the blockage range for one unit and then only if
618 the values fit. */
619 if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
620 INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
621 }
622 else
623 range = BLOCKAGE_RANGE (blockage);
624
625 return range;
626 }
627
628 /* A vector indexed by function unit instance giving the last insn to use
629 the unit. The value of the function unit instance index for unit U
630 instance I is (U + I * FUNCTION_UNITS_SIZE). */
631 static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
632
633 /* A vector indexed by function unit instance giving the minimum time when
634 the unit will unblock based on the maximum blockage cost. */
635 static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
636
637 /* A vector indexed by function unit number giving the number of insns
638 that remain to use the unit. */
639 static int unit_n_insns[FUNCTION_UNITS_SIZE];
640
641 /* Reset the function unit state to the null state. */
642
643 static void
644 clear_units ()
645 {
646 bzero ((char *) unit_last_insn, sizeof (unit_last_insn));
647 bzero ((char *) unit_tick, sizeof (unit_tick));
648 bzero ((char *) unit_n_insns, sizeof (unit_n_insns));
649 }
650
651 /* Record an insn as one that will use the units encoded by UNIT. */
652
653 __inline static void
654 prepare_unit (unit)
655 int unit;
656 {
657 int i;
658
659 if (unit >= 0)
660 unit_n_insns[unit]++;
661 else
662 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
663 if ((unit & 1) != 0)
664 prepare_unit (i);
665 }
666
667 /* Return the actual hazard cost of executing INSN on the unit UNIT,
668 instance INSTANCE at time CLOCK if the previous actual hazard cost
669 was COST. */
670
671 __inline static int
672 actual_hazard_this_instance (unit, instance, insn, clock, cost)
673 int unit, instance, clock, cost;
674 rtx insn;
675 {
676 int tick = unit_tick[instance];
677
678 if (tick - clock > cost)
679 {
680 /* The scheduler is operating in reverse, so INSN is the executing
681 insn and the unit's last insn is the candidate insn. We want a
682 more exact measure of the blockage if we execute INSN at CLOCK
683 given when we committed the execution of the unit's last insn.
684
685 The blockage value is given by either the unit's max blockage
686 constant, blockage range function, or blockage function. Use
687 the most exact form for the given unit. */
688
689 if (function_units[unit].blockage_range_function)
690 {
691 if (function_units[unit].blockage_function)
692 tick += (function_units[unit].blockage_function
693 (insn, unit_last_insn[instance])
694 - function_units[unit].max_blockage);
695 else
696 tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
697 - function_units[unit].max_blockage);
698 }
699 if (tick - clock > cost)
700 cost = tick - clock;
701 }
702 return cost;
703 }
704
705 /* Record INSN as having begun execution on the units encoded by UNIT at
706 time CLOCK. */
707
708 __inline static void
709 schedule_unit (unit, insn, clock)
710 int unit, clock;
711 rtx insn;
712 {
713 int i;
714
715 if (unit >= 0)
716 {
717 int instance = unit;
718 #if MAX_MULTIPLICITY > 1
719 /* Find the first free instance of the function unit and use that
720 one. We assume that one is free. */
721 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
722 {
723 if (! actual_hazard_this_instance (unit, instance, insn, clock, 0))
724 break;
725 instance += FUNCTION_UNITS_SIZE;
726 }
727 #endif
728 unit_last_insn[instance] = insn;
729 unit_tick[instance] = (clock + function_units[unit].max_blockage);
730 }
731 else
732 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
733 if ((unit & 1) != 0)
734 schedule_unit (i, insn, clock);
735 }
736
737 /* Return the actual hazard cost of executing INSN on the units encoded by
738 UNIT at time CLOCK if the previous actual hazard cost was COST. */
739
740 __inline static int
741 actual_hazard (unit, insn, clock, cost)
742 int unit, clock, cost;
743 rtx insn;
744 {
745 int i;
746
747 if (unit >= 0)
748 {
749 /* Find the instance of the function unit with the minimum hazard. */
750 int instance = unit;
751 int best_cost = actual_hazard_this_instance (unit, instance, insn,
752 clock, cost);
753 #if MAX_MULTIPLICITY > 1
754 int this_cost;
755
756 if (best_cost > cost)
757 {
758 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
759 {
760 instance += FUNCTION_UNITS_SIZE;
761 this_cost = actual_hazard_this_instance (unit, instance, insn,
762 clock, cost);
763 if (this_cost < best_cost)
764 {
765 best_cost = this_cost;
766 if (this_cost <= cost)
767 break;
768 }
769 }
770 }
771 #endif
772 cost = MAX (cost, best_cost);
773 }
774 else
775 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
776 if ((unit & 1) != 0)
777 cost = actual_hazard (i, insn, clock, cost);
778
779 return cost;
780 }
781
782 /* Return the potential hazard cost of executing an instruction on the
783 units encoded by UNIT if the previous potential hazard cost was COST.
784 An insn with a large blockage time is chosen in preference to one
785 with a smaller time; an insn that uses a unit that is more likely
786 to be used is chosen in preference to one with a unit that is less
787 used. We are trying to minimize a subsequent actual hazard. */
788
789 __inline static int
790 potential_hazard (unit, insn, cost)
791 int unit, cost;
792 rtx insn;
793 {
794 int i, ncost;
795 unsigned int minb, maxb;
796
797 if (unit >= 0)
798 {
799 minb = maxb = function_units[unit].max_blockage;
800 if (maxb > 1)
801 {
802 if (function_units[unit].blockage_range_function)
803 {
804 maxb = minb = blockage_range (unit, insn);
805 maxb = MAX_BLOCKAGE_COST (maxb);
806 minb = MIN_BLOCKAGE_COST (minb);
807 }
808
809 if (maxb > 1)
810 {
811 /* Make the number of instructions left dominate. Make the
812 minimum delay dominate the maximum delay. If all these
813 are the same, use the unit number to add an arbitrary
814 ordering. Other terms can be added. */
815 ncost = minb * 0x40 + maxb;
816 ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
817 if (ncost > cost)
818 cost = ncost;
819 }
820 }
821 }
822 else
823 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
824 if ((unit & 1) != 0)
825 cost = potential_hazard (i, insn, cost);
826
827 return cost;
828 }
829
830 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
831 This is the number of virtual cycles taken between instruction issue and
832 instruction results. */
833
834 __inline static int
835 insn_cost (insn, link, used)
836 rtx insn, link, used;
837 {
838 register int cost = INSN_COST (insn);
839
840 if (cost == 0)
841 {
842 recog_memoized (insn);
843
844 /* A USE insn, or something else we don't need to understand.
845 We can't pass these directly to result_ready_cost because it will
846 trigger a fatal error for unrecognizable insns. */
847 if (INSN_CODE (insn) < 0)
848 {
849 INSN_COST (insn) = 1;
850 return 1;
851 }
852 else
853 {
854 cost = result_ready_cost (insn);
855
856 if (cost < 1)
857 cost = 1;
858
859 INSN_COST (insn) = cost;
860 }
861 }
862
863 /* A USE insn should never require the value used to be computed. This
864 allows the computation of a function's result and parameter values to
865 overlap the return and call. */
866 recog_memoized (used);
867 if (INSN_CODE (used) < 0)
868 LINK_COST_FREE (link) = 1;
869
870 /* If some dependencies vary the cost, compute the adjustment. Most
871 commonly, the adjustment is complete: either the cost is ignored
872 (in the case of an output- or anti-dependence), or the cost is
873 unchanged. These values are cached in the link as LINK_COST_FREE
874 and LINK_COST_ZERO. */
875
876 if (LINK_COST_FREE (link))
877 cost = 1;
878 #ifdef ADJUST_COST
879 else if (! LINK_COST_ZERO (link))
880 {
881 int ncost = cost;
882
883 ADJUST_COST (used, link, insn, ncost);
884 if (ncost <= 1)
885 LINK_COST_FREE (link) = ncost = 1;
886 if (cost == ncost)
887 LINK_COST_ZERO (link) = 1;
888 cost = ncost;
889 }
890 #endif
891 return cost;
892 }
893
894 /* Compute the priority number for INSN. */
895
896 static int
897 priority (insn)
898 rtx insn;
899 {
900 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
901 {
902 int prev_priority;
903 int max_priority;
904 int this_priority = INSN_PRIORITY (insn);
905 rtx prev;
906
907 if (this_priority > 0)
908 return this_priority;
909
910 max_priority = 1;
911
912 /* Nonzero if these insns must be scheduled together. */
913 if (SCHED_GROUP_P (insn))
914 {
915 prev = insn;
916 while (SCHED_GROUP_P (prev))
917 {
918 prev = PREV_INSN (prev);
919 INSN_REF_COUNT (prev) += 1;
920 }
921 }
922
923 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
924 {
925 rtx x = XEXP (prev, 0);
926
927 /* If this was a duplicate of a dependence we already deleted,
928 ignore it. */
929 if (RTX_INTEGRATED_P (prev))
930 continue;
931
932 /* A dependence pointing to a note or deleted insn is always
933 obsolete, because sched_analyze_insn will have created any
934 necessary new dependences which replace it. Notes and deleted
935 insns can be created when instructions are deleted by insn
936 splitting, or by register allocation. */
937 if (GET_CODE (x) == NOTE || INSN_DELETED_P (x))
938 {
939 remove_dependence (insn, x);
940 continue;
941 }
942
943 /* Clear the link cost adjustment bits. */
944 LINK_COST_FREE (prev) = 0;
945 #ifdef ADJUST_COST
946 LINK_COST_ZERO (prev) = 0;
947 #endif
948
949 /* This priority calculation was chosen because it results in the
950 least instruction movement, and does not hurt the performance
951 of the resulting code compared to the old algorithm.
952 This makes the sched algorithm more stable, which results
953 in better code, because there is less register pressure,
954 cross jumping is more likely to work, and debugging is easier.
955
956 When all instructions have a latency of 1, there is no need to
957 move any instructions. Subtracting one here ensures that in such
958 cases all instructions will end up with a priority of one, and
959 hence no scheduling will be done.
960
961 The original code did not subtract the one, and added the
962 insn_cost of the current instruction to its priority (e.g.
963 move the insn_cost call down to the end). */
964
965 prev_priority = priority (x) + insn_cost (x, prev, insn) - 1;
966
967 if (prev_priority > max_priority)
968 max_priority = prev_priority;
969 INSN_REF_COUNT (x) += 1;
970 }
971
972 prepare_unit (insn_unit (insn));
973 INSN_PRIORITY (insn) = max_priority;
974 return INSN_PRIORITY (insn);
975 }
976 return 0;
977 }
978 \f
979 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
980 them to the unused_*_list variables, so that they can be reused. */
981
982 static void
983 free_pending_lists ()
984 {
985 register rtx link, prev_link;
986
987 if (pending_read_insns)
988 {
989 prev_link = pending_read_insns;
990 link = XEXP (prev_link, 1);
991
992 while (link)
993 {
994 prev_link = link;
995 link = XEXP (link, 1);
996 }
997
998 XEXP (prev_link, 1) = unused_insn_list;
999 unused_insn_list = pending_read_insns;
1000 pending_read_insns = 0;
1001 }
1002
1003 if (pending_write_insns)
1004 {
1005 prev_link = pending_write_insns;
1006 link = XEXP (prev_link, 1);
1007
1008 while (link)
1009 {
1010 prev_link = link;
1011 link = XEXP (link, 1);
1012 }
1013
1014 XEXP (prev_link, 1) = unused_insn_list;
1015 unused_insn_list = pending_write_insns;
1016 pending_write_insns = 0;
1017 }
1018
1019 if (pending_read_mems)
1020 {
1021 prev_link = pending_read_mems;
1022 link = XEXP (prev_link, 1);
1023
1024 while (link)
1025 {
1026 prev_link = link;
1027 link = XEXP (link, 1);
1028 }
1029
1030 XEXP (prev_link, 1) = unused_expr_list;
1031 unused_expr_list = pending_read_mems;
1032 pending_read_mems = 0;
1033 }
1034
1035 if (pending_write_mems)
1036 {
1037 prev_link = pending_write_mems;
1038 link = XEXP (prev_link, 1);
1039
1040 while (link)
1041 {
1042 prev_link = link;
1043 link = XEXP (link, 1);
1044 }
1045
1046 XEXP (prev_link, 1) = unused_expr_list;
1047 unused_expr_list = pending_write_mems;
1048 pending_write_mems = 0;
1049 }
1050 }
1051
1052 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1053 The MEM is a memory reference contained within INSN, which we are saving
1054 so that we can do memory aliasing on it. */
1055
1056 static void
1057 add_insn_mem_dependence (insn_list, mem_list, insn, mem)
1058 rtx *insn_list, *mem_list, insn, mem;
1059 {
1060 register rtx link;
1061
1062 if (unused_insn_list)
1063 {
1064 link = unused_insn_list;
1065 unused_insn_list = XEXP (link, 1);
1066 }
1067 else
1068 link = rtx_alloc (INSN_LIST);
1069 XEXP (link, 0) = insn;
1070 XEXP (link, 1) = *insn_list;
1071 *insn_list = link;
1072
1073 if (unused_expr_list)
1074 {
1075 link = unused_expr_list;
1076 unused_expr_list = XEXP (link, 1);
1077 }
1078 else
1079 link = rtx_alloc (EXPR_LIST);
1080 XEXP (link, 0) = mem;
1081 XEXP (link, 1) = *mem_list;
1082 *mem_list = link;
1083
1084 pending_lists_length++;
1085 }
1086 \f
1087 /* Make a dependency between every memory reference on the pending lists
1088 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
1089 the read list. */
1090
1091 static void
1092 flush_pending_lists (insn, only_write)
1093 rtx insn;
1094 int only_write;
1095 {
1096 rtx link;
1097
1098 while (pending_read_insns && ! only_write)
1099 {
1100 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1101
1102 link = pending_read_insns;
1103 pending_read_insns = XEXP (pending_read_insns, 1);
1104 XEXP (link, 1) = unused_insn_list;
1105 unused_insn_list = link;
1106
1107 link = pending_read_mems;
1108 pending_read_mems = XEXP (pending_read_mems, 1);
1109 XEXP (link, 1) = unused_expr_list;
1110 unused_expr_list = link;
1111 }
1112 while (pending_write_insns)
1113 {
1114 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1115
1116 link = pending_write_insns;
1117 pending_write_insns = XEXP (pending_write_insns, 1);
1118 XEXP (link, 1) = unused_insn_list;
1119 unused_insn_list = link;
1120
1121 link = pending_write_mems;
1122 pending_write_mems = XEXP (pending_write_mems, 1);
1123 XEXP (link, 1) = unused_expr_list;
1124 unused_expr_list = link;
1125 }
1126 pending_lists_length = 0;
1127
1128 if (last_pending_memory_flush)
1129 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1130
1131 last_pending_memory_flush = insn;
1132 }
1133
1134 /* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1135 by the write to the destination of X, and reads of everything mentioned. */
1136
1137 static void
1138 sched_analyze_1 (x, insn)
1139 rtx x;
1140 rtx insn;
1141 {
1142 register int regno;
1143 register rtx dest = SET_DEST (x);
1144
1145 if (dest == 0)
1146 return;
1147
1148 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1149 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1150 {
1151 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1152 {
1153 /* The second and third arguments are values read by this insn. */
1154 sched_analyze_2 (XEXP (dest, 1), insn);
1155 sched_analyze_2 (XEXP (dest, 2), insn);
1156 }
1157 dest = SUBREG_REG (dest);
1158 }
1159
1160 if (GET_CODE (dest) == REG)
1161 {
1162 register int i;
1163
1164 regno = REGNO (dest);
1165
1166 /* A hard reg in a wide mode may really be multiple registers.
1167 If so, mark all of them just like the first. */
1168 if (regno < FIRST_PSEUDO_REGISTER)
1169 {
1170 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1171 while (--i >= 0)
1172 {
1173 rtx u;
1174
1175 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1176 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1177 reg_last_uses[regno + i] = 0;
1178 if (reg_last_sets[regno + i])
1179 add_dependence (insn, reg_last_sets[regno + i],
1180 REG_DEP_OUTPUT);
1181 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
1182 if ((call_used_regs[i] || global_regs[i])
1183 && last_function_call)
1184 /* Function calls clobber all call_used regs. */
1185 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1186 }
1187 }
1188 else
1189 {
1190 rtx u;
1191
1192 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1193 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1194 reg_last_uses[regno] = 0;
1195 if (reg_last_sets[regno])
1196 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1197 SET_REGNO_REG_SET (reg_pending_sets, regno);
1198
1199 /* Pseudos that are REG_EQUIV to something may be replaced
1200 by that during reloading. We need only add dependencies for
1201 the address in the REG_EQUIV note. */
1202 if (! reload_completed
1203 && reg_known_equiv_p[regno]
1204 && GET_CODE (reg_known_value[regno]) == MEM)
1205 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1206
1207 /* Don't let it cross a call after scheduling if it doesn't
1208 already cross one. */
1209 if (REG_N_CALLS_CROSSED (regno) == 0 && last_function_call)
1210 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1211 }
1212 }
1213 else if (GET_CODE (dest) == MEM)
1214 {
1215 /* Writing memory. */
1216
1217 if (pending_lists_length > 32)
1218 {
1219 /* Flush all pending reads and writes to prevent the pending lists
1220 from getting any larger. Insn scheduling runs too slowly when
1221 these lists get long. The number 32 was chosen because it
1222 seems like a reasonable number. When compiling GCC with itself,
1223 this flush occurs 8 times for sparc, and 10 times for m88k using
1224 the number 32. */
1225 flush_pending_lists (insn, 0);
1226 }
1227 else
1228 {
1229 rtx pending, pending_mem;
1230
1231 pending = pending_read_insns;
1232 pending_mem = pending_read_mems;
1233 while (pending)
1234 {
1235 /* If a dependency already exists, don't create a new one. */
1236 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1237 if (anti_dependence (XEXP (pending_mem, 0), dest))
1238 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1239
1240 pending = XEXP (pending, 1);
1241 pending_mem = XEXP (pending_mem, 1);
1242 }
1243
1244 pending = pending_write_insns;
1245 pending_mem = pending_write_mems;
1246 while (pending)
1247 {
1248 /* If a dependency already exists, don't create a new one. */
1249 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1250 if (output_dependence (XEXP (pending_mem, 0), dest))
1251 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1252
1253 pending = XEXP (pending, 1);
1254 pending_mem = XEXP (pending_mem, 1);
1255 }
1256
1257 if (last_pending_memory_flush)
1258 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1259
1260 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1261 insn, dest);
1262 }
1263 sched_analyze_2 (XEXP (dest, 0), insn);
1264 }
1265
1266 /* Analyze reads. */
1267 if (GET_CODE (x) == SET)
1268 sched_analyze_2 (SET_SRC (x), insn);
1269 }
1270
1271 /* Analyze the uses of memory and registers in rtx X in INSN. */
1272
1273 static void
1274 sched_analyze_2 (x, insn)
1275 rtx x;
1276 rtx insn;
1277 {
1278 register int i;
1279 register int j;
1280 register enum rtx_code code;
1281 register char *fmt;
1282
1283 if (x == 0)
1284 return;
1285
1286 code = GET_CODE (x);
1287
1288 switch (code)
1289 {
1290 case CONST_INT:
1291 case CONST_DOUBLE:
1292 case SYMBOL_REF:
1293 case CONST:
1294 case LABEL_REF:
1295 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1296 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1297 this does not mean that this insn is using cc0. */
1298 return;
1299
1300 #ifdef HAVE_cc0
1301 case CC0:
1302 {
1303 rtx link, prev;
1304
1305 /* User of CC0 depends on immediately preceding insn. */
1306 SCHED_GROUP_P (insn) = 1;
1307
1308 /* There may be a note before this insn now, but all notes will
1309 be removed before we actually try to schedule the insns, so
1310 it won't cause a problem later. We must avoid it here though. */
1311 prev = prev_nonnote_insn (insn);
1312
1313 /* Make a copy of all dependencies on the immediately previous insn,
1314 and add to this insn. This is so that all the dependencies will
1315 apply to the group. Remove an explicit dependence on this insn
1316 as SCHED_GROUP_P now represents it. */
1317
1318 if (find_insn_list (prev, LOG_LINKS (insn)))
1319 remove_dependence (insn, prev);
1320
1321 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
1322 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1323
1324 return;
1325 }
1326 #endif
1327
1328 case REG:
1329 {
1330 int regno = REGNO (x);
1331 if (regno < FIRST_PSEUDO_REGISTER)
1332 {
1333 int i;
1334
1335 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1336 while (--i >= 0)
1337 {
1338 reg_last_uses[regno + i]
1339 = gen_rtx_INSN_LIST (VOIDmode,
1340 insn, reg_last_uses[regno + i]);
1341 if (reg_last_sets[regno + i])
1342 add_dependence (insn, reg_last_sets[regno + i], 0);
1343 if ((call_used_regs[regno + i] || global_regs[regno + i])
1344 && last_function_call)
1345 /* Function calls clobber all call_used regs. */
1346 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1347 }
1348 }
1349 else
1350 {
1351 reg_last_uses[regno]
1352 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_last_uses[regno]);
1353 if (reg_last_sets[regno])
1354 add_dependence (insn, reg_last_sets[regno], 0);
1355
1356 /* Pseudos that are REG_EQUIV to something may be replaced
1357 by that during reloading. We need only add dependencies for
1358 the address in the REG_EQUIV note. */
1359 if (! reload_completed
1360 && reg_known_equiv_p[regno]
1361 && GET_CODE (reg_known_value[regno]) == MEM)
1362 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1363
1364 /* If the register does not already cross any calls, then add this
1365 insn to the sched_before_next_call list so that it will still
1366 not cross calls after scheduling. */
1367 if (REG_N_CALLS_CROSSED (regno) == 0)
1368 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1369 }
1370 return;
1371 }
1372
1373 case MEM:
1374 {
1375 /* Reading memory. */
1376
1377 rtx pending, pending_mem;
1378
1379 pending = pending_read_insns;
1380 pending_mem = pending_read_mems;
1381 while (pending)
1382 {
1383 /* If a dependency already exists, don't create a new one. */
1384 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1385 if (read_dependence (XEXP (pending_mem, 0), x))
1386 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1387
1388 pending = XEXP (pending, 1);
1389 pending_mem = XEXP (pending_mem, 1);
1390 }
1391
1392 pending = pending_write_insns;
1393 pending_mem = pending_write_mems;
1394 while (pending)
1395 {
1396 /* If a dependency already exists, don't create a new one. */
1397 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1398 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
1399 x, rtx_varies_p))
1400 add_dependence (insn, XEXP (pending, 0), 0);
1401
1402 pending = XEXP (pending, 1);
1403 pending_mem = XEXP (pending_mem, 1);
1404 }
1405 if (last_pending_memory_flush)
1406 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1407
1408 /* Always add these dependencies to pending_reads, since
1409 this insn may be followed by a write. */
1410 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1411 insn, x);
1412
1413 /* Take advantage of tail recursion here. */
1414 sched_analyze_2 (XEXP (x, 0), insn);
1415 return;
1416 }
1417
1418 case ASM_OPERANDS:
1419 case ASM_INPUT:
1420 case UNSPEC_VOLATILE:
1421 case TRAP_IF:
1422 {
1423 rtx u;
1424
1425 /* Traditional and volatile asm instructions must be considered to use
1426 and clobber all hard registers, all pseudo-registers and all of
1427 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1428
1429 Consider for instance a volatile asm that changes the fpu rounding
1430 mode. An insn should not be moved across this even if it only uses
1431 pseudo-regs because it might give an incorrectly rounded result. */
1432 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
1433 {
1434 int max_reg = max_reg_num ();
1435 for (i = 0; i < max_reg; i++)
1436 {
1437 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1438 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1439 reg_last_uses[i] = 0;
1440 if (reg_last_sets[i])
1441 add_dependence (insn, reg_last_sets[i], 0);
1442 }
1443 reg_pending_sets_all = 1;
1444
1445 flush_pending_lists (insn, 0);
1446 }
1447
1448 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1449 We can not just fall through here since then we would be confused
1450 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1451 traditional asms unlike their normal usage. */
1452
1453 if (code == ASM_OPERANDS)
1454 {
1455 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1456 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1457 return;
1458 }
1459 break;
1460 }
1461
1462 case PRE_DEC:
1463 case POST_DEC:
1464 case PRE_INC:
1465 case POST_INC:
1466 /* These both read and modify the result. We must handle them as writes
1467 to get proper dependencies for following instructions. We must handle
1468 them as reads to get proper dependencies from this to previous
1469 instructions. Thus we need to pass them to both sched_analyze_1
1470 and sched_analyze_2. We must call sched_analyze_2 first in order
1471 to get the proper antecedent for the read. */
1472 sched_analyze_2 (XEXP (x, 0), insn);
1473 sched_analyze_1 (x, insn);
1474 return;
1475
1476 default:
1477 break;
1478 }
1479
1480 /* Other cases: walk the insn. */
1481 fmt = GET_RTX_FORMAT (code);
1482 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1483 {
1484 if (fmt[i] == 'e')
1485 sched_analyze_2 (XEXP (x, i), insn);
1486 else if (fmt[i] == 'E')
1487 for (j = 0; j < XVECLEN (x, i); j++)
1488 sched_analyze_2 (XVECEXP (x, i, j), insn);
1489 }
1490 }
1491
1492 /* Analyze an INSN with pattern X to find all dependencies. */
1493
1494 static void
1495 sched_analyze_insn (x, insn, loop_notes)
1496 rtx x, insn;
1497 rtx loop_notes;
1498 {
1499 register RTX_CODE code = GET_CODE (x);
1500 rtx link;
1501 int maxreg = max_reg_num ();
1502 int i;
1503
1504 if (code == SET || code == CLOBBER)
1505 sched_analyze_1 (x, insn);
1506 else if (code == PARALLEL)
1507 {
1508 register int i;
1509 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1510 {
1511 code = GET_CODE (XVECEXP (x, 0, i));
1512 if (code == SET || code == CLOBBER)
1513 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1514 else
1515 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1516 }
1517 }
1518 else
1519 sched_analyze_2 (x, insn);
1520
1521 /* Mark registers CLOBBERED or used by called function. */
1522 if (GET_CODE (insn) == CALL_INSN)
1523 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1524 {
1525 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1526 sched_analyze_1 (XEXP (link, 0), insn);
1527 else
1528 sched_analyze_2 (XEXP (link, 0), insn);
1529 }
1530
1531 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic block, then
1532 we must be sure that no instructions are scheduled across it.
1533 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1534 become incorrect. */
1535
1536 if (loop_notes)
1537 {
1538 int max_reg = max_reg_num ();
1539 rtx link;
1540
1541 for (i = 0; i < max_reg; i++)
1542 {
1543 rtx u;
1544 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1545 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1546 reg_last_uses[i] = 0;
1547 if (reg_last_sets[i])
1548 add_dependence (insn, reg_last_sets[i], 0);
1549 }
1550 reg_pending_sets_all = 1;
1551
1552 flush_pending_lists (insn, 0);
1553
1554 link = loop_notes;
1555 while (XEXP (link, 1))
1556 link = XEXP (link, 1);
1557 XEXP (link, 1) = REG_NOTES (insn);
1558 REG_NOTES (insn) = loop_notes;
1559 }
1560
1561 /* After reload, it is possible for an instruction to have a REG_DEAD note
1562 for a register that actually dies a few instructions earlier. For
1563 example, this can happen with SECONDARY_MEMORY_NEEDED reloads.
1564 In this case, we must consider the insn to use the register mentioned
1565 in the REG_DEAD note. Otherwise, we may accidentally move this insn
1566 after another insn that sets the register, thus getting obviously invalid
1567 rtl. This confuses reorg which believes that REG_DEAD notes are still
1568 meaningful.
1569
1570 ??? We would get better code if we fixed reload to put the REG_DEAD
1571 notes in the right places, but that may not be worth the effort. */
1572
1573 if (reload_completed)
1574 {
1575 rtx note;
1576
1577 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1578 if (REG_NOTE_KIND (note) == REG_DEAD)
1579 sched_analyze_2 (XEXP (note, 0), insn);
1580 }
1581
1582 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1583 {
1584 reg_last_sets[i] = insn;
1585 });
1586 CLEAR_REG_SET (reg_pending_sets);
1587
1588 if (reg_pending_sets_all)
1589 {
1590 for (i = 0; i < maxreg; i++)
1591 reg_last_sets[i] = insn;
1592 reg_pending_sets_all = 0;
1593 }
1594
1595 /* Handle function calls and function returns created by the epilogue
1596 threading code. */
1597 if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
1598 {
1599 rtx dep_insn;
1600 rtx prev_dep_insn;
1601
1602 /* When scheduling instructions, we make sure calls don't lose their
1603 accompanying USE insns by depending them one on another in order.
1604
1605 Also, we must do the same thing for returns created by the epilogue
1606 threading code. Note this code works only in this special case,
1607 because other passes make no guarantee that they will never emit
1608 an instruction between a USE and a RETURN. There is such a guarantee
1609 for USE instructions immediately before a call. */
1610
1611 prev_dep_insn = insn;
1612 dep_insn = PREV_INSN (insn);
1613 while (GET_CODE (dep_insn) == INSN
1614 && GET_CODE (PATTERN (dep_insn)) == USE
1615 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == REG)
1616 {
1617 SCHED_GROUP_P (prev_dep_insn) = 1;
1618
1619 /* Make a copy of all dependencies on dep_insn, and add to insn.
1620 This is so that all of the dependencies will apply to the
1621 group. */
1622
1623 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1624 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1625
1626 prev_dep_insn = dep_insn;
1627 dep_insn = PREV_INSN (dep_insn);
1628 }
1629 }
1630 }
1631
1632 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1633 for every dependency. */
1634
1635 static int
1636 sched_analyze (head, tail)
1637 rtx head, tail;
1638 {
1639 register rtx insn;
1640 register int n_insns = 0;
1641 register rtx u;
1642 register int luid = 0;
1643 rtx loop_notes = 0;
1644
1645 for (insn = head; ; insn = NEXT_INSN (insn))
1646 {
1647 INSN_LUID (insn) = luid++;
1648
1649 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1650 {
1651 sched_analyze_insn (PATTERN (insn), insn, loop_notes);
1652 loop_notes = 0;
1653 n_insns += 1;
1654 }
1655 else if (GET_CODE (insn) == CALL_INSN)
1656 {
1657 rtx x;
1658 register int i;
1659
1660 /* Any instruction using a hard register which may get clobbered
1661 by a call needs to be marked as dependent on this call.
1662 This prevents a use of a hard return reg from being moved
1663 past a void call (i.e. it does not explicitly set the hard
1664 return reg). */
1665
1666 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1667 all registers, not just hard registers, may be clobbered by this
1668 call. */
1669
1670 /* Insn, being a CALL_INSN, magically depends on
1671 `last_function_call' already. */
1672
1673 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1674 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1675 {
1676 int max_reg = max_reg_num ();
1677 for (i = 0; i < max_reg; i++)
1678 {
1679 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1680 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1681 reg_last_uses[i] = 0;
1682 if (reg_last_sets[i])
1683 add_dependence (insn, reg_last_sets[i], 0);
1684 }
1685 reg_pending_sets_all = 1;
1686
1687 /* Add a pair of fake REG_NOTEs which we will later
1688 convert back into a NOTE_INSN_SETJMP note. See
1689 reemit_notes for why we use a pair of of NOTEs. */
1690
1691 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD,
1692 GEN_INT (0),
1693 REG_NOTES (insn));
1694 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD,
1695 GEN_INT (NOTE_INSN_SETJMP),
1696 REG_NOTES (insn));
1697 }
1698 else
1699 {
1700 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1701 if (call_used_regs[i] || global_regs[i])
1702 {
1703 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1704 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1705 reg_last_uses[i] = 0;
1706 if (reg_last_sets[i])
1707 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1708 SET_REGNO_REG_SET (reg_pending_sets, i);
1709 }
1710 }
1711
1712 /* For each insn which shouldn't cross a call, add a dependence
1713 between that insn and this call insn. */
1714 x = LOG_LINKS (sched_before_next_call);
1715 while (x)
1716 {
1717 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1718 x = XEXP (x, 1);
1719 }
1720 LOG_LINKS (sched_before_next_call) = 0;
1721
1722 sched_analyze_insn (PATTERN (insn), insn, loop_notes);
1723 loop_notes = 0;
1724
1725 /* In the absence of interprocedural alias analysis, we must flush
1726 all pending reads and writes, and start new dependencies starting
1727 from here. But only flush writes for constant calls (which may
1728 be passed a pointer to something we haven't written yet). */
1729 flush_pending_lists (insn, CONST_CALL_P (insn));
1730
1731 /* Depend this function call (actually, the user of this
1732 function call) on all hard register clobberage. */
1733 last_function_call = insn;
1734 n_insns += 1;
1735 }
1736
1737 /* See comments on reemit_notes as to why we do this. */
1738 else if (GET_CODE (insn) == NOTE
1739 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1740 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1741 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1742 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1743 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1744 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1745 {
1746 loop_notes = gen_rtx_EXPR_LIST (REG_DEAD,
1747 GEN_INT (NOTE_BLOCK_NUMBER (insn)),
1748 loop_notes);
1749 loop_notes = gen_rtx_EXPR_LIST (REG_DEAD,
1750 GEN_INT (NOTE_LINE_NUMBER (insn)),
1751 loop_notes);
1752 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1753 }
1754
1755 if (insn == tail)
1756 return n_insns;
1757 }
1758
1759 abort ();
1760 }
1761 \f
1762 /* Called when we see a set of a register. If death is true, then we are
1763 scanning backwards. Mark that register as unborn. If nobody says
1764 otherwise, that is how things will remain. If death is false, then we
1765 are scanning forwards. Mark that register as being born. */
1766
1767 static void
1768 sched_note_set (b, x, death)
1769 int b;
1770 rtx x;
1771 int death;
1772 {
1773 register int regno;
1774 register rtx reg = SET_DEST (x);
1775 int subreg_p = 0;
1776
1777 if (reg == 0)
1778 return;
1779
1780 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
1781 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
1782 {
1783 /* Must treat modification of just one hardware register of a multi-reg
1784 value or just a byte field of a register exactly the same way that
1785 mark_set_1 in flow.c does, i.e. anything except a paradoxical subreg
1786 does not kill the entire register. */
1787 if (GET_CODE (reg) != SUBREG
1788 || REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg))
1789 subreg_p = 1;
1790
1791 reg = SUBREG_REG (reg);
1792 }
1793
1794 if (GET_CODE (reg) != REG)
1795 return;
1796
1797 /* Global registers are always live, so the code below does not apply
1798 to them. */
1799
1800 regno = REGNO (reg);
1801 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1802 {
1803 if (death)
1804 {
1805 /* If we only set part of the register, then this set does not
1806 kill it. */
1807 if (subreg_p)
1808 return;
1809
1810 /* Try killing this register. */
1811 if (regno < FIRST_PSEUDO_REGISTER)
1812 {
1813 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1814 while (--j >= 0)
1815 {
1816 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
1817 SET_REGNO_REG_SET (bb_dead_regs, regno + j);
1818 }
1819 }
1820 else
1821 {
1822 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
1823 SET_REGNO_REG_SET (bb_dead_regs, regno);
1824 }
1825 }
1826 else
1827 {
1828 /* Make the register live again. */
1829 if (regno < FIRST_PSEUDO_REGISTER)
1830 {
1831 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1832 while (--j >= 0)
1833 {
1834 SET_REGNO_REG_SET (bb_live_regs, regno + j);
1835 CLEAR_REGNO_REG_SET (bb_dead_regs, regno + j);
1836 }
1837 }
1838 else
1839 {
1840 SET_REGNO_REG_SET (bb_live_regs, regno);
1841 CLEAR_REGNO_REG_SET (bb_dead_regs, regno);
1842 }
1843 }
1844 }
1845 }
1846 \f
1847 /* Macros and functions for keeping the priority queue sorted, and
1848 dealing with queueing and dequeueing of instructions. */
1849
1850 #define SCHED_SORT(READY, NEW_READY, OLD_READY) \
1851 do { if ((NEW_READY) - (OLD_READY) == 1) \
1852 swap_sort (READY, NEW_READY); \
1853 else if ((NEW_READY) - (OLD_READY) > 1) \
1854 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
1855 while (0)
1856
1857 /* Returns a positive value if y is preferred; returns a negative value if
1858 x is preferred. Should never return 0, since that will make the sort
1859 unstable. */
1860
1861 static int
1862 rank_for_schedule (x, y)
1863 rtx *x, *y;
1864 {
1865 rtx tmp = *y;
1866 rtx tmp2 = *x;
1867 rtx link;
1868 int tmp_class, tmp2_class;
1869 int value;
1870
1871 /* Choose the instruction with the highest priority, if different. */
1872 if ((value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2)))
1873 return value;
1874
1875 if (last_scheduled_insn)
1876 {
1877 /* Classify the instructions into three classes:
1878 1) Data dependent on last schedule insn.
1879 2) Anti/Output dependent on last scheduled insn.
1880 3) Independent of last scheduled insn, or has latency of one.
1881 Choose the insn from the highest numbered class if different. */
1882 link = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
1883 if (link == 0 || insn_cost (tmp, link, last_scheduled_insn) == 1)
1884 tmp_class = 3;
1885 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
1886 tmp_class = 1;
1887 else
1888 tmp_class = 2;
1889
1890 link = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
1891 if (link == 0 || insn_cost (tmp2, link, last_scheduled_insn) == 1)
1892 tmp2_class = 3;
1893 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
1894 tmp2_class = 1;
1895 else
1896 tmp2_class = 2;
1897
1898 if ((value = tmp_class - tmp2_class))
1899 return value;
1900 }
1901
1902 /* If insns are equally good, sort by INSN_LUID (original insn order),
1903 so that we make the sort stable. This minimizes instruction movement,
1904 thus minimizing sched's effect on debugging and cross-jumping. */
1905 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1906 }
1907
1908 /* Resort the array A in which only element at index N may be out of order. */
1909
1910 __inline static void
1911 swap_sort (a, n)
1912 rtx *a;
1913 int n;
1914 {
1915 rtx insn = a[n-1];
1916 int i = n-2;
1917
1918 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
1919 {
1920 a[i+1] = a[i];
1921 i -= 1;
1922 }
1923 a[i+1] = insn;
1924 }
1925
1926 static int max_priority;
1927
1928 /* Add INSN to the insn queue so that it fires at least N_CYCLES
1929 before the currently executing insn. */
1930
1931 __inline static void
1932 queue_insn (insn, n_cycles)
1933 rtx insn;
1934 int n_cycles;
1935 {
1936 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1937 NEXT_INSN (insn) = insn_queue[next_q];
1938 insn_queue[next_q] = insn;
1939 q_size += 1;
1940 }
1941
1942 /* Return nonzero if PAT is the pattern of an insn which makes a
1943 register live. */
1944
1945 __inline static int
1946 birthing_insn_p (pat)
1947 rtx pat;
1948 {
1949 int j;
1950
1951 if (reload_completed == 1)
1952 return 0;
1953
1954 if (GET_CODE (pat) == SET
1955 && GET_CODE (SET_DEST (pat)) == REG)
1956 {
1957 rtx dest = SET_DEST (pat);
1958 int i = REGNO (dest);
1959
1960 /* It would be more accurate to use refers_to_regno_p or
1961 reg_mentioned_p to determine when the dest is not live before this
1962 insn. */
1963
1964 if (REGNO_REG_SET_P (bb_live_regs, i))
1965 return (REG_N_SETS (i) == 1);
1966
1967 return 0;
1968 }
1969 if (GET_CODE (pat) == PARALLEL)
1970 {
1971 for (j = 0; j < XVECLEN (pat, 0); j++)
1972 if (birthing_insn_p (XVECEXP (pat, 0, j)))
1973 return 1;
1974 }
1975 return 0;
1976 }
1977
1978 /* PREV is an insn that is ready to execute. Adjust its priority if that
1979 will help shorten register lifetimes. */
1980
1981 __inline static void
1982 adjust_priority (prev)
1983 rtx prev;
1984 {
1985 /* Trying to shorten register lives after reload has completed
1986 is useless and wrong. It gives inaccurate schedules. */
1987 if (reload_completed == 0)
1988 {
1989 rtx note;
1990 int n_deaths = 0;
1991
1992 /* ??? This code has no effect, because REG_DEAD notes are removed
1993 before we ever get here. */
1994 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
1995 if (REG_NOTE_KIND (note) == REG_DEAD)
1996 n_deaths += 1;
1997
1998 /* Defer scheduling insns which kill registers, since that
1999 shortens register lives. Prefer scheduling insns which
2000 make registers live for the same reason. */
2001 switch (n_deaths)
2002 {
2003 default:
2004 INSN_PRIORITY (prev) >>= 3;
2005 break;
2006 case 3:
2007 INSN_PRIORITY (prev) >>= 2;
2008 break;
2009 case 2:
2010 case 1:
2011 INSN_PRIORITY (prev) >>= 1;
2012 break;
2013 case 0:
2014 if (birthing_insn_p (PATTERN (prev)))
2015 {
2016 int max = max_priority;
2017
2018 if (max > INSN_PRIORITY (prev))
2019 INSN_PRIORITY (prev) = max;
2020 }
2021 break;
2022 }
2023 #ifdef ADJUST_PRIORITY
2024 ADJUST_PRIORITY (prev);
2025 #endif
2026 }
2027 }
2028
2029 /* INSN is the "currently executing insn". Launch each insn which was
2030 waiting on INSN (in the backwards dataflow sense). READY is a
2031 vector of insns which are ready to fire. N_READY is the number of
2032 elements in READY. CLOCK is the current virtual cycle. */
2033
2034 static int
2035 schedule_insn (insn, ready, n_ready, clock)
2036 rtx insn;
2037 rtx *ready;
2038 int n_ready;
2039 int clock;
2040 {
2041 rtx link;
2042 int new_ready = n_ready;
2043
2044 if (MAX_BLOCKAGE > 1)
2045 schedule_unit (insn_unit (insn), insn, clock);
2046
2047 if (LOG_LINKS (insn) == 0)
2048 return n_ready;
2049
2050 /* This is used by the function adjust_priority above. */
2051 if (n_ready > 0)
2052 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
2053 else
2054 max_priority = INSN_PRIORITY (insn);
2055
2056 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
2057 {
2058 rtx prev = XEXP (link, 0);
2059 int cost = insn_cost (prev, link, insn);
2060
2061 if ((INSN_REF_COUNT (prev) -= 1) != 0)
2062 {
2063 /* We satisfied one requirement to fire PREV. Record the earliest
2064 time when PREV can fire. No need to do this if the cost is 1,
2065 because PREV can fire no sooner than the next cycle. */
2066 if (cost > 1)
2067 INSN_TICK (prev) = MAX (INSN_TICK (prev), clock + cost);
2068 }
2069 else
2070 {
2071 /* We satisfied the last requirement to fire PREV. Ensure that all
2072 timing requirements are satisfied. */
2073 if (INSN_TICK (prev) - clock > cost)
2074 cost = INSN_TICK (prev) - clock;
2075
2076 /* Adjust the priority of PREV and either put it on the ready
2077 list or queue it. */
2078 adjust_priority (prev);
2079 if (cost <= 1)
2080 ready[new_ready++] = prev;
2081 else
2082 queue_insn (prev, cost);
2083 }
2084 }
2085
2086 return new_ready;
2087 }
2088
2089 /* Given N_READY insns in the ready list READY at time CLOCK, queue
2090 those that are blocked due to function unit hazards and rearrange
2091 the remaining ones to minimize subsequent function unit hazards. */
2092
2093 static int
2094 schedule_select (ready, n_ready, clock, file)
2095 rtx *ready;
2096 int n_ready, clock;
2097 FILE *file;
2098 {
2099 int pri = INSN_PRIORITY (ready[0]);
2100 int i, j, k, q, cost, best_cost, best_insn = 0, new_ready = n_ready;
2101 rtx insn;
2102
2103 /* Work down the ready list in groups of instructions with the same
2104 priority value. Queue insns in the group that are blocked and
2105 select among those that remain for the one with the largest
2106 potential hazard. */
2107 for (i = 0; i < n_ready; i = j)
2108 {
2109 int opri = pri;
2110 for (j = i + 1; j < n_ready; j++)
2111 if ((pri = INSN_PRIORITY (ready[j])) != opri)
2112 break;
2113
2114 /* Queue insns in the group that are blocked. */
2115 for (k = i, q = 0; k < j; k++)
2116 {
2117 insn = ready[k];
2118 if ((cost = actual_hazard (insn_unit (insn), insn, clock, 0)) != 0)
2119 {
2120 q++;
2121 ready[k] = 0;
2122 queue_insn (insn, cost);
2123 if (file)
2124 fprintf (file, "\n;; blocking insn %d for %d cycles",
2125 INSN_UID (insn), cost);
2126 }
2127 }
2128 new_ready -= q;
2129
2130 /* Check the next group if all insns were queued. */
2131 if (j - i - q == 0)
2132 continue;
2133
2134 /* If more than one remains, select the first one with the largest
2135 potential hazard. */
2136 else if (j - i - q > 1)
2137 {
2138 best_cost = -1;
2139 for (k = i; k < j; k++)
2140 {
2141 if ((insn = ready[k]) == 0)
2142 continue;
2143 if ((cost = potential_hazard (insn_unit (insn), insn, 0))
2144 > best_cost)
2145 {
2146 best_cost = cost;
2147 best_insn = k;
2148 }
2149 }
2150 }
2151 /* We have found a suitable insn to schedule. */
2152 break;
2153 }
2154
2155 /* Move the best insn to be front of the ready list. */
2156 if (best_insn != 0)
2157 {
2158 if (file)
2159 {
2160 fprintf (file, ", now");
2161 for (i = 0; i < n_ready; i++)
2162 if (ready[i])
2163 fprintf (file, " %d", INSN_UID (ready[i]));
2164 fprintf (file, "\n;; insn %d has a greater potential hazard",
2165 INSN_UID (ready[best_insn]));
2166 }
2167 for (i = best_insn; i > 0; i--)
2168 {
2169 insn = ready[i-1];
2170 ready[i-1] = ready[i];
2171 ready[i] = insn;
2172 }
2173 }
2174
2175 /* Compact the ready list. */
2176 if (new_ready < n_ready)
2177 for (i = j = 0; i < n_ready; i++)
2178 if (ready[i])
2179 ready[j++] = ready[i];
2180
2181 return new_ready;
2182 }
2183
2184 /* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
2185 dead_notes list. */
2186
2187 static void
2188 create_reg_dead_note (reg, insn)
2189 rtx reg, insn;
2190 {
2191 rtx link;
2192
2193 /* The number of registers killed after scheduling must be the same as the
2194 number of registers killed before scheduling. The number of REG_DEAD
2195 notes may not be conserved, i.e. two SImode hard register REG_DEAD notes
2196 might become one DImode hard register REG_DEAD note, but the number of
2197 registers killed will be conserved.
2198
2199 We carefully remove REG_DEAD notes from the dead_notes list, so that
2200 there will be none left at the end. If we run out early, then there
2201 is a bug somewhere in flow, combine and/or sched. */
2202
2203 if (dead_notes == 0)
2204 {
2205 #if 1
2206 abort ();
2207 #else
2208 link = rtx_alloc (EXPR_LIST);
2209 PUT_REG_NOTE_KIND (link, REG_DEAD);
2210 #endif
2211 }
2212 else
2213 {
2214 /* Number of regs killed by REG. */
2215 int regs_killed = (REGNO (reg) >= FIRST_PSEUDO_REGISTER ? 1
2216 : HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)));
2217 /* Number of regs killed by REG_DEAD notes taken off the list. */
2218 int reg_note_regs;
2219
2220 link = dead_notes;
2221 reg_note_regs = (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2222 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2223 GET_MODE (XEXP (link, 0))));
2224 while (reg_note_regs < regs_killed)
2225 {
2226 /* LINK might be zero if we killed more registers after scheduling
2227 than before, and the last hard register we kill is actually
2228 multiple hard regs. */
2229 if (link == NULL_RTX)
2230 abort ();
2231
2232 link = XEXP (link, 1);
2233 reg_note_regs += (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2234 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2235 GET_MODE (XEXP (link, 0))));
2236 }
2237 dead_notes = XEXP (link, 1);
2238
2239 /* If we took too many regs kills off, put the extra ones back. */
2240 while (reg_note_regs > regs_killed)
2241 {
2242 rtx temp_reg, temp_link;
2243
2244 temp_reg = gen_rtx_REG (word_mode, 0);
2245 temp_link = rtx_alloc (EXPR_LIST);
2246 PUT_REG_NOTE_KIND (temp_link, REG_DEAD);
2247 XEXP (temp_link, 0) = temp_reg;
2248 XEXP (temp_link, 1) = dead_notes;
2249 dead_notes = temp_link;
2250 reg_note_regs--;
2251 }
2252 }
2253
2254 XEXP (link, 0) = reg;
2255 XEXP (link, 1) = REG_NOTES (insn);
2256 REG_NOTES (insn) = link;
2257 }
2258
2259 /* Subroutine on attach_deaths_insn--handles the recursive search
2260 through INSN. If SET_P is true, then x is being modified by the insn. */
2261
2262 static void
2263 attach_deaths (x, insn, set_p)
2264 rtx x;
2265 rtx insn;
2266 int set_p;
2267 {
2268 register int i;
2269 register int j;
2270 register enum rtx_code code;
2271 register char *fmt;
2272
2273 if (x == 0)
2274 return;
2275
2276 code = GET_CODE (x);
2277
2278 switch (code)
2279 {
2280 case CONST_INT:
2281 case CONST_DOUBLE:
2282 case LABEL_REF:
2283 case SYMBOL_REF:
2284 case CONST:
2285 case CODE_LABEL:
2286 case PC:
2287 case CC0:
2288 /* Get rid of the easy cases first. */
2289 return;
2290
2291 case REG:
2292 {
2293 /* If the register dies in this insn, queue that note, and mark
2294 this register as needing to die. */
2295 /* This code is very similar to mark_used_1 (if set_p is false)
2296 and mark_set_1 (if set_p is true) in flow.c. */
2297
2298 register int regno;
2299 int some_needed;
2300 int all_needed;
2301
2302 if (set_p)
2303 return;
2304
2305 regno = REGNO (x);
2306 all_needed = some_needed = REGNO_REG_SET_P (old_live_regs, regno);
2307 if (regno < FIRST_PSEUDO_REGISTER)
2308 {
2309 int n;
2310
2311 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2312 while (--n > 0)
2313 {
2314 int needed = (REGNO_REG_SET_P (old_live_regs, regno + n));
2315 some_needed |= needed;
2316 all_needed &= needed;
2317 }
2318 }
2319
2320 /* If it wasn't live before we started, then add a REG_DEAD note.
2321 We must check the previous lifetime info not the current info,
2322 because we may have to execute this code several times, e.g.
2323 once for a clobber (which doesn't add a note) and later
2324 for a use (which does add a note).
2325
2326 Always make the register live. We must do this even if it was
2327 live before, because this may be an insn which sets and uses
2328 the same register, in which case the register has already been
2329 killed, so we must make it live again.
2330
2331 Global registers are always live, and should never have a REG_DEAD
2332 note added for them, so none of the code below applies to them. */
2333
2334 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2335 {
2336 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2337 STACK_POINTER_REGNUM, since these are always considered to be
2338 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2339 if (regno != FRAME_POINTER_REGNUM
2340 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2341 && ! (regno == HARD_FRAME_POINTER_REGNUM)
2342 #endif
2343 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2344 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
2345 #endif
2346 && regno != STACK_POINTER_REGNUM)
2347 {
2348 /* ??? It is perhaps a dead_or_set_p bug that it does
2349 not check for REG_UNUSED notes itself. This is necessary
2350 for the case where the SET_DEST is a subreg of regno, as
2351 dead_or_set_p handles subregs specially. */
2352 if (! all_needed && ! dead_or_set_p (insn, x)
2353 && ! find_reg_note (insn, REG_UNUSED, x))
2354 {
2355 /* Check for the case where the register dying partially
2356 overlaps the register set by this insn. */
2357 if (regno < FIRST_PSEUDO_REGISTER
2358 && HARD_REGNO_NREGS (regno, GET_MODE (x)) > 1)
2359 {
2360 int n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2361 while (--n >= 0)
2362 some_needed |= dead_or_set_regno_p (insn, regno + n);
2363 }
2364
2365 /* If none of the words in X is needed, make a REG_DEAD
2366 note. Otherwise, we must make partial REG_DEAD
2367 notes. */
2368 if (! some_needed)
2369 create_reg_dead_note (x, insn);
2370 else
2371 {
2372 int i;
2373
2374 /* Don't make a REG_DEAD note for a part of a
2375 register that is set in the insn. */
2376 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2377 i >= 0; i--)
2378 if (! REGNO_REG_SET_P (old_live_regs, regno + i)
2379 && ! dead_or_set_regno_p (insn, regno + i))
2380 create_reg_dead_note (gen_rtx_REG (reg_raw_mode[regno + i],
2381 regno + i),
2382 insn);
2383 }
2384 }
2385 }
2386
2387 if (regno < FIRST_PSEUDO_REGISTER)
2388 {
2389 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2390 while (--j >= 0)
2391 {
2392 CLEAR_REGNO_REG_SET (bb_dead_regs, regno + j);
2393 SET_REGNO_REG_SET (bb_live_regs, regno + j);
2394 }
2395 }
2396 else
2397 {
2398 CLEAR_REGNO_REG_SET (bb_dead_regs, regno);
2399 SET_REGNO_REG_SET (bb_live_regs, regno);
2400 }
2401 }
2402 return;
2403 }
2404
2405 case MEM:
2406 /* Handle tail-recursive case. */
2407 attach_deaths (XEXP (x, 0), insn, 0);
2408 return;
2409
2410 case SUBREG:
2411 case STRICT_LOW_PART:
2412 /* These two cases preserve the value of SET_P, so handle them
2413 separately. */
2414 attach_deaths (XEXP (x, 0), insn, set_p);
2415 return;
2416
2417 case ZERO_EXTRACT:
2418 case SIGN_EXTRACT:
2419 /* This case preserves the value of SET_P for the first operand, but
2420 clears it for the other two. */
2421 attach_deaths (XEXP (x, 0), insn, set_p);
2422 attach_deaths (XEXP (x, 1), insn, 0);
2423 attach_deaths (XEXP (x, 2), insn, 0);
2424 return;
2425
2426 default:
2427 /* Other cases: walk the insn. */
2428 fmt = GET_RTX_FORMAT (code);
2429 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2430 {
2431 if (fmt[i] == 'e')
2432 attach_deaths (XEXP (x, i), insn, 0);
2433 else if (fmt[i] == 'E')
2434 for (j = 0; j < XVECLEN (x, i); j++)
2435 attach_deaths (XVECEXP (x, i, j), insn, 0);
2436 }
2437 }
2438 }
2439
2440 /* After INSN has executed, add register death notes for each register
2441 that is dead after INSN. */
2442
2443 static void
2444 attach_deaths_insn (insn)
2445 rtx insn;
2446 {
2447 rtx x = PATTERN (insn);
2448 register RTX_CODE code = GET_CODE (x);
2449 rtx link;
2450
2451 if (code == SET)
2452 {
2453 attach_deaths (SET_SRC (x), insn, 0);
2454
2455 /* A register might die here even if it is the destination, e.g.
2456 it is the target of a volatile read and is otherwise unused.
2457 Hence we must always call attach_deaths for the SET_DEST. */
2458 attach_deaths (SET_DEST (x), insn, 1);
2459 }
2460 else if (code == PARALLEL)
2461 {
2462 register int i;
2463 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2464 {
2465 code = GET_CODE (XVECEXP (x, 0, i));
2466 if (code == SET)
2467 {
2468 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2469
2470 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2471 }
2472 /* Flow does not add REG_DEAD notes to registers that die in
2473 clobbers, so we can't either. */
2474 else if (code != CLOBBER)
2475 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2476 }
2477 }
2478 /* If this is a CLOBBER, only add REG_DEAD notes to registers inside a
2479 MEM being clobbered, just like flow. */
2480 else if (code == CLOBBER && GET_CODE (XEXP (x, 0)) == MEM)
2481 attach_deaths (XEXP (XEXP (x, 0), 0), insn, 0);
2482 /* Otherwise don't add a death note to things being clobbered. */
2483 else if (code != CLOBBER)
2484 attach_deaths (x, insn, 0);
2485
2486 /* Make death notes for things used in the called function. */
2487 if (GET_CODE (insn) == CALL_INSN)
2488 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2489 attach_deaths (XEXP (XEXP (link, 0), 0), insn,
2490 GET_CODE (XEXP (link, 0)) == CLOBBER);
2491 }
2492
2493 /* Delete notes beginning with INSN and maybe put them in the chain
2494 of notes ended by NOTE_LIST.
2495 Returns the insn following the notes. */
2496
2497 static rtx
2498 unlink_notes (insn, tail)
2499 rtx insn, tail;
2500 {
2501 rtx prev = PREV_INSN (insn);
2502
2503 while (insn != tail && GET_CODE (insn) == NOTE)
2504 {
2505 rtx next = NEXT_INSN (insn);
2506 /* Delete the note from its current position. */
2507 if (prev)
2508 NEXT_INSN (prev) = next;
2509 if (next)
2510 PREV_INSN (next) = prev;
2511
2512 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2513 /* Record line-number notes so they can be reused. */
2514 LINE_NOTE (insn) = insn;
2515
2516 /* Don't save away NOTE_INSN_SETJMPs, because they must remain
2517 immediately after the call they follow. We use a fake
2518 (REG_DEAD (const_int -1)) note to remember them.
2519 Likewise with NOTE_INSN_{LOOP,EHREGION}_{BEG, END}. */
2520 else if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_SETJMP
2521 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
2522 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
2523 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
2524 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
2525 {
2526 /* Insert the note at the end of the notes list. */
2527 PREV_INSN (insn) = note_list;
2528 if (note_list)
2529 NEXT_INSN (note_list) = insn;
2530 note_list = insn;
2531 }
2532
2533 insn = next;
2534 }
2535 return insn;
2536 }
2537
2538 /* Constructor for `sometimes' data structure. */
2539
2540 static int
2541 new_sometimes_live (regs_sometimes_live, regno, sometimes_max)
2542 struct sometimes *regs_sometimes_live;
2543 int regno;
2544 int sometimes_max;
2545 {
2546 register struct sometimes *p;
2547
2548 /* There should never be a register greater than max_regno here. If there
2549 is, it means that a define_split has created a new pseudo reg. This
2550 is not allowed, since there will not be flow info available for any
2551 new register, so catch the error here. */
2552 if (regno >= max_regno)
2553 abort ();
2554
2555 p = &regs_sometimes_live[sometimes_max];
2556 p->regno = regno;
2557 p->live_length = 0;
2558 p->calls_crossed = 0;
2559 sometimes_max++;
2560 return sometimes_max;
2561 }
2562
2563 /* Count lengths of all regs we are currently tracking,
2564 and find new registers no longer live. */
2565
2566 static void
2567 finish_sometimes_live (regs_sometimes_live, sometimes_max)
2568 struct sometimes *regs_sometimes_live;
2569 int sometimes_max;
2570 {
2571 int i;
2572
2573 for (i = 0; i < sometimes_max; i++)
2574 {
2575 register struct sometimes *p = &regs_sometimes_live[i];
2576 int regno = p->regno;
2577
2578 sched_reg_live_length[regno] += p->live_length;
2579 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2580 }
2581 }
2582
2583 /* Search INSN for fake REG_DEAD note pairs for NOTE_INSN_SETJMP,
2584 NOTE_INSN_{LOOP,EHREGION}_{BEG,END}; and convert them back into
2585 NOTEs. The REG_DEAD note following first one is contains the saved
2586 value for NOTE_BLOCK_NUMBER which is useful for
2587 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. LAST is the last instruction
2588 output by the instruction scheduler. Return the new value of LAST. */
2589
2590 static rtx
2591 reemit_notes (insn, last)
2592 rtx insn;
2593 rtx last;
2594 {
2595 rtx note;
2596
2597 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2598 {
2599 if (REG_NOTE_KIND (note) == REG_DEAD
2600 && GET_CODE (XEXP (note, 0)) == CONST_INT)
2601 {
2602 if (INTVAL (XEXP (note, 0)) == NOTE_INSN_SETJMP)
2603 {
2604 CONST_CALL_P (emit_note_after (INTVAL (XEXP (note, 0)), insn))
2605 = CONST_CALL_P (note);
2606 remove_note (insn, note);
2607 note = XEXP (note, 1);
2608 }
2609 else
2610 {
2611 last = emit_note_before (INTVAL (XEXP (note, 0)), last);
2612 remove_note (insn, note);
2613 note = XEXP (note, 1);
2614 NOTE_BLOCK_NUMBER (last) = INTVAL (XEXP (note, 0));
2615 }
2616 remove_note (insn, note);
2617 }
2618 }
2619 return last;
2620 }
2621
2622 /* Use modified list scheduling to rearrange insns in basic block
2623 B. FILE, if nonzero, is where we dump interesting output about
2624 this pass. */
2625
2626 static void
2627 schedule_block (b, file)
2628 int b;
2629 FILE *file;
2630 {
2631 rtx insn, last;
2632 rtx *ready, link;
2633 int i, j, n_ready = 0, new_ready, n_insns;
2634 int sched_n_insns = 0;
2635 int clock;
2636 #define NEED_NOTHING 0
2637 #define NEED_HEAD 1
2638 #define NEED_TAIL 2
2639 int new_needs;
2640
2641 /* HEAD and TAIL delimit the region being scheduled. */
2642 rtx head = basic_block_head[b];
2643 rtx tail = basic_block_end[b];
2644 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2645 being scheduled. When the insns have been ordered,
2646 these insns delimit where the new insns are to be
2647 spliced back into the insn chain. */
2648 rtx next_tail;
2649 rtx prev_head;
2650
2651 /* Keep life information accurate. */
2652 register struct sometimes *regs_sometimes_live;
2653 int sometimes_max;
2654
2655 if (file)
2656 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2657 b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
2658
2659 i = max_reg_num ();
2660 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2661 bzero ((char *) reg_last_uses, i * sizeof (rtx));
2662 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2663 bzero ((char *) reg_last_sets, i * sizeof (rtx));
2664 reg_pending_sets = ALLOCA_REG_SET ();
2665 CLEAR_REG_SET (reg_pending_sets);
2666 reg_pending_sets_all = 0;
2667 clear_units ();
2668
2669 /* Remove certain insns at the beginning from scheduling,
2670 by advancing HEAD. */
2671
2672 /* At the start of a function, before reload has run, don't delay getting
2673 parameters from hard registers into pseudo registers. */
2674 if (reload_completed == 0 && b == 0)
2675 {
2676 while (head != tail
2677 && GET_CODE (head) == NOTE
2678 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2679 head = NEXT_INSN (head);
2680 while (head != tail
2681 && GET_CODE (head) == INSN
2682 && GET_CODE (PATTERN (head)) == SET)
2683 {
2684 rtx src = SET_SRC (PATTERN (head));
2685 while (GET_CODE (src) == SUBREG
2686 || GET_CODE (src) == SIGN_EXTEND
2687 || GET_CODE (src) == ZERO_EXTEND
2688 || GET_CODE (src) == SIGN_EXTRACT
2689 || GET_CODE (src) == ZERO_EXTRACT)
2690 src = XEXP (src, 0);
2691 if (GET_CODE (src) != REG
2692 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2693 break;
2694 /* Keep this insn from ever being scheduled. */
2695 INSN_REF_COUNT (head) = 1;
2696 head = NEXT_INSN (head);
2697 }
2698 }
2699
2700 /* Don't include any notes or labels at the beginning of the
2701 basic block, or notes at the ends of basic blocks. */
2702 while (head != tail)
2703 {
2704 if (GET_CODE (head) == NOTE)
2705 head = NEXT_INSN (head);
2706 else if (GET_CODE (tail) == NOTE)
2707 tail = PREV_INSN (tail);
2708 else if (GET_CODE (head) == CODE_LABEL)
2709 head = NEXT_INSN (head);
2710 else break;
2711 }
2712 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2713 to schedule this block. */
2714 if (head == tail
2715 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2716 goto ret;
2717
2718 #if 0
2719 /* This short-cut doesn't work. It does not count call insns crossed by
2720 registers in reg_sometimes_live. It does not mark these registers as
2721 dead if they die in this block. It does not mark these registers live
2722 (or create new reg_sometimes_live entries if necessary) if they are born
2723 in this block.
2724
2725 The easy solution is to just always schedule a block. This block only
2726 has one insn, so this won't slow down this pass by much. */
2727
2728 if (head == tail)
2729 goto ret;
2730 #endif
2731
2732 /* Now HEAD through TAIL are the insns actually to be rearranged;
2733 Let PREV_HEAD and NEXT_TAIL enclose them. */
2734 prev_head = PREV_INSN (head);
2735 next_tail = NEXT_INSN (tail);
2736
2737 /* Initialize basic block data structures. */
2738 dead_notes = 0;
2739 pending_read_insns = 0;
2740 pending_read_mems = 0;
2741 pending_write_insns = 0;
2742 pending_write_mems = 0;
2743 pending_lists_length = 0;
2744 last_pending_memory_flush = 0;
2745 last_function_call = 0;
2746 last_scheduled_insn = 0;
2747
2748 LOG_LINKS (sched_before_next_call) = 0;
2749
2750 n_insns = sched_analyze (head, tail);
2751 if (n_insns == 0)
2752 {
2753 free_pending_lists ();
2754 goto ret;
2755 }
2756
2757 /* Allocate vector to hold insns to be rearranged (except those
2758 insns which are controlled by an insn with SCHED_GROUP_P set).
2759 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2760 as those variables ultimately are set up. */
2761 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2762
2763 /* TAIL is now the last of the insns to be rearranged.
2764 Put those insns into the READY vector. */
2765 insn = tail;
2766
2767 /* For all branches, calls, uses, and cc0 setters, force them to remain
2768 in order at the end of the block by adding dependencies and giving
2769 the last a high priority. There may be notes present, and prev_head
2770 may also be a note.
2771
2772 Branches must obviously remain at the end. Calls should remain at the
2773 end since moving them results in worse register allocation. Uses remain
2774 at the end to ensure proper register allocation. cc0 setters remaim
2775 at the end because they can't be moved away from their cc0 user. */
2776 last = 0;
2777 while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
2778 || (GET_CODE (insn) == INSN
2779 && (GET_CODE (PATTERN (insn)) == USE
2780 #ifdef HAVE_cc0
2781 || sets_cc0_p (PATTERN (insn))
2782 #endif
2783 ))
2784 || GET_CODE (insn) == NOTE)
2785 {
2786 if (GET_CODE (insn) != NOTE)
2787 {
2788 priority (insn);
2789 if (last == 0)
2790 {
2791 ready[n_ready++] = insn;
2792 INSN_PRIORITY (insn) = TAIL_PRIORITY - i;
2793 INSN_REF_COUNT (insn) = 0;
2794 }
2795 else if (! find_insn_list (insn, LOG_LINKS (last)))
2796 {
2797 add_dependence (last, insn, REG_DEP_ANTI);
2798 INSN_REF_COUNT (insn)++;
2799 }
2800 last = insn;
2801
2802 /* Skip over insns that are part of a group. */
2803 while (SCHED_GROUP_P (insn))
2804 {
2805 insn = prev_nonnote_insn (insn);
2806 priority (insn);
2807 }
2808 }
2809
2810 insn = PREV_INSN (insn);
2811 /* Don't overrun the bounds of the basic block. */
2812 if (insn == prev_head)
2813 break;
2814 }
2815
2816 /* Assign priorities to instructions. Also check whether they
2817 are in priority order already. If so then I will be nonnegative.
2818 We use this shortcut only before reloading. */
2819 #if 0
2820 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2821 #endif
2822
2823 for (; insn != prev_head; insn = PREV_INSN (insn))
2824 {
2825 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2826 {
2827 priority (insn);
2828 if (INSN_REF_COUNT (insn) == 0)
2829 {
2830 if (last == 0)
2831 ready[n_ready++] = insn;
2832 else
2833 {
2834 /* Make this dependent on the last of the instructions
2835 that must remain in order at the end of the block. */
2836 add_dependence (last, insn, REG_DEP_ANTI);
2837 INSN_REF_COUNT (insn) = 1;
2838 }
2839 }
2840 if (SCHED_GROUP_P (insn))
2841 {
2842 while (SCHED_GROUP_P (insn))
2843 {
2844 insn = prev_nonnote_insn (insn);
2845 priority (insn);
2846 }
2847 continue;
2848 }
2849 #if 0
2850 if (i < 0)
2851 continue;
2852 if (INSN_PRIORITY (insn) < i)
2853 i = INSN_PRIORITY (insn);
2854 else if (INSN_PRIORITY (insn) > i)
2855 i = DONE_PRIORITY;
2856 #endif
2857 }
2858 }
2859
2860 #if 0
2861 /* This short-cut doesn't work. It does not count call insns crossed by
2862 registers in reg_sometimes_live. It does not mark these registers as
2863 dead if they die in this block. It does not mark these registers live
2864 (or create new reg_sometimes_live entries if necessary) if they are born
2865 in this block.
2866
2867 The easy solution is to just always schedule a block. These blocks tend
2868 to be very short, so this doesn't slow down this pass by much. */
2869
2870 /* If existing order is good, don't bother to reorder. */
2871 if (i != DONE_PRIORITY)
2872 {
2873 if (file)
2874 fprintf (file, ";; already scheduled\n");
2875
2876 if (reload_completed == 0)
2877 {
2878 for (i = 0; i < sometimes_max; i++)
2879 regs_sometimes_live[i].live_length += n_insns;
2880
2881 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2882 }
2883 free_pending_lists ();
2884 goto ret;
2885 }
2886 #endif
2887
2888 /* Scan all the insns to be scheduled, removing NOTE insns
2889 and register death notes.
2890 Line number NOTE insns end up in NOTE_LIST.
2891 Register death notes end up in DEAD_NOTES.
2892
2893 Recreate the register life information for the end of this basic
2894 block. */
2895
2896 if (reload_completed == 0)
2897 {
2898 COPY_REG_SET (bb_live_regs, basic_block_live_at_start[b]);
2899 CLEAR_REG_SET (bb_dead_regs);
2900
2901 if (b == 0)
2902 {
2903 /* This is the first block in the function. There may be insns
2904 before head that we can't schedule. We still need to examine
2905 them though for accurate register lifetime analysis. */
2906
2907 /* We don't want to remove any REG_DEAD notes as the code below
2908 does. */
2909
2910 for (insn = basic_block_head[b]; insn != head;
2911 insn = NEXT_INSN (insn))
2912 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2913 {
2914 /* See if the register gets born here. */
2915 /* We must check for registers being born before we check for
2916 registers dying. It is possible for a register to be born
2917 and die in the same insn, e.g. reading from a volatile
2918 memory location into an otherwise unused register. Such
2919 a register must be marked as dead after this insn. */
2920 if (GET_CODE (PATTERN (insn)) == SET
2921 || GET_CODE (PATTERN (insn)) == CLOBBER)
2922 sched_note_set (b, PATTERN (insn), 0);
2923 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2924 {
2925 int j;
2926 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2927 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2928 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2929 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2930
2931 /* ??? This code is obsolete and should be deleted. It
2932 is harmless though, so we will leave it in for now. */
2933 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2934 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2935 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2936 }
2937
2938 /* Each call clobbers (makes live) all call-clobbered regs
2939 that are not global or fixed. Note that the function-value
2940 reg is a call_clobbered reg. */
2941
2942 if (GET_CODE (insn) == CALL_INSN)
2943 {
2944 int j;
2945 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
2946 if (call_used_regs[j] && ! global_regs[j]
2947 && ! fixed_regs[j])
2948 {
2949 SET_REGNO_REG_SET (bb_live_regs, j);
2950 CLEAR_REGNO_REG_SET (bb_dead_regs, j);
2951 }
2952 }
2953
2954 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2955 {
2956 if ((REG_NOTE_KIND (link) == REG_DEAD
2957 || REG_NOTE_KIND (link) == REG_UNUSED)
2958 /* Verify that the REG_NOTE has a valid value. */
2959 && GET_CODE (XEXP (link, 0)) == REG)
2960 {
2961 register int regno = REGNO (XEXP (link, 0));
2962
2963 if (regno < FIRST_PSEUDO_REGISTER)
2964 {
2965 int j = HARD_REGNO_NREGS (regno,
2966 GET_MODE (XEXP (link, 0)));
2967 while (--j >= 0)
2968 {
2969 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
2970 SET_REGNO_REG_SET (bb_dead_regs, regno + j);
2971 }
2972 }
2973 else
2974 {
2975 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
2976 SET_REGNO_REG_SET (bb_dead_regs, regno);
2977 }
2978 }
2979 }
2980 }
2981 }
2982 }
2983
2984 /* If debugging information is being produced, keep track of the line
2985 number notes for each insn. */
2986 if (write_symbols != NO_DEBUG)
2987 {
2988 /* We must use the true line number for the first insn in the block
2989 that was computed and saved at the start of this pass. We can't
2990 use the current line number, because scheduling of the previous
2991 block may have changed the current line number. */
2992 rtx line = line_note_head[b];
2993
2994 for (insn = basic_block_head[b];
2995 insn != next_tail;
2996 insn = NEXT_INSN (insn))
2997 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
2998 line = insn;
2999 else
3000 LINE_NOTE (insn) = line;
3001 }
3002
3003 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3004 {
3005 rtx prev, next, link;
3006
3007 /* Farm out notes. This is needed to keep the debugger from
3008 getting completely deranged. */
3009 if (GET_CODE (insn) == NOTE)
3010 {
3011 prev = insn;
3012 insn = unlink_notes (insn, next_tail);
3013 if (prev == tail)
3014 abort ();
3015 if (prev == head)
3016 abort ();
3017 if (insn == next_tail)
3018 abort ();
3019 }
3020
3021 if (reload_completed == 0
3022 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3023 {
3024 /* See if the register gets born here. */
3025 /* We must check for registers being born before we check for
3026 registers dying. It is possible for a register to be born and
3027 die in the same insn, e.g. reading from a volatile memory
3028 location into an otherwise unused register. Such a register
3029 must be marked as dead after this insn. */
3030 if (GET_CODE (PATTERN (insn)) == SET
3031 || GET_CODE (PATTERN (insn)) == CLOBBER)
3032 sched_note_set (b, PATTERN (insn), 0);
3033 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3034 {
3035 int j;
3036 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3037 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3038 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3039 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3040
3041 /* ??? This code is obsolete and should be deleted. It
3042 is harmless though, so we will leave it in for now. */
3043 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3044 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3045 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3046 }
3047
3048 /* Each call clobbers (makes live) all call-clobbered regs that are
3049 not global or fixed. Note that the function-value reg is a
3050 call_clobbered reg. */
3051
3052 if (GET_CODE (insn) == CALL_INSN)
3053 {
3054 int j;
3055 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
3056 if (call_used_regs[j] && ! global_regs[j]
3057 && ! fixed_regs[j])
3058 {
3059 SET_REGNO_REG_SET (bb_live_regs, j);
3060 CLEAR_REGNO_REG_SET (bb_dead_regs, j);
3061 }
3062 }
3063
3064 /* Need to know what registers this insn kills. */
3065 for (prev = 0, link = REG_NOTES (insn); link; link = next)
3066 {
3067 next = XEXP (link, 1);
3068 if ((REG_NOTE_KIND (link) == REG_DEAD
3069 || REG_NOTE_KIND (link) == REG_UNUSED)
3070 /* Verify that the REG_NOTE has a valid value. */
3071 && GET_CODE (XEXP (link, 0)) == REG)
3072 {
3073 register int regno = REGNO (XEXP (link, 0));
3074
3075 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
3076 alone. */
3077 if (REG_NOTE_KIND (link) == REG_DEAD)
3078 {
3079 if (prev)
3080 XEXP (prev, 1) = next;
3081 else
3082 REG_NOTES (insn) = next;
3083 XEXP (link, 1) = dead_notes;
3084 dead_notes = link;
3085 }
3086 else
3087 prev = link;
3088
3089 if (regno < FIRST_PSEUDO_REGISTER)
3090 {
3091 int j = HARD_REGNO_NREGS (regno,
3092 GET_MODE (XEXP (link, 0)));
3093 while (--j >= 0)
3094 {
3095 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
3096 SET_REGNO_REG_SET (bb_dead_regs, regno + j);
3097 }
3098 }
3099 else
3100 {
3101 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
3102 SET_REGNO_REG_SET (bb_dead_regs, regno);
3103 }
3104 }
3105 else
3106 prev = link;
3107 }
3108 }
3109 }
3110
3111 if (reload_completed == 0)
3112 {
3113 /* Keep track of register lives. */
3114 old_live_regs = ALLOCA_REG_SET ();
3115 regs_sometimes_live
3116 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
3117 sometimes_max = 0;
3118
3119 /* Start with registers live at end. */
3120 COPY_REG_SET (old_live_regs, bb_live_regs);
3121 EXECUTE_IF_SET_IN_REG_SET (bb_live_regs, 0, j,
3122 {
3123 sometimes_max
3124 = new_sometimes_live (regs_sometimes_live,
3125 j, sometimes_max);
3126 });
3127 }
3128
3129 SCHED_SORT (ready, n_ready, 1);
3130
3131 if (file)
3132 {
3133 fprintf (file, ";; ready list initially:\n;; ");
3134 for (i = 0; i < n_ready; i++)
3135 fprintf (file, "%d ", INSN_UID (ready[i]));
3136 fprintf (file, "\n\n");
3137
3138 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3139 if (INSN_PRIORITY (insn) > 0)
3140 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
3141 INSN_UID (insn), INSN_PRIORITY (insn),
3142 INSN_REF_COUNT (insn));
3143 }
3144
3145 /* Now HEAD and TAIL are going to become disconnected
3146 entirely from the insn chain. */
3147 tail = 0;
3148
3149 /* Q_SIZE will always be zero here. */
3150 q_ptr = 0; clock = 0;
3151 bzero ((char *) insn_queue, sizeof (insn_queue));
3152
3153 /* Now, perform list scheduling. */
3154
3155 /* Where we start inserting insns is after TAIL. */
3156 last = next_tail;
3157
3158 new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
3159 ? NEED_HEAD : NEED_NOTHING);
3160 if (PREV_INSN (next_tail) == basic_block_end[b])
3161 new_needs |= NEED_TAIL;
3162
3163 new_ready = n_ready;
3164 while (sched_n_insns < n_insns)
3165 {
3166 q_ptr = NEXT_Q (q_ptr); clock++;
3167
3168 /* Add all pending insns that can be scheduled without stalls to the
3169 ready list. */
3170 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
3171 {
3172 if (file)
3173 fprintf (file, ";; launching %d before %d with no stalls at T-%d\n",
3174 INSN_UID (insn), INSN_UID (last), clock);
3175 ready[new_ready++] = insn;
3176 q_size -= 1;
3177 }
3178 insn_queue[q_ptr] = 0;
3179
3180 /* If there are no ready insns, stall until one is ready and add all
3181 of the pending insns at that point to the ready list. */
3182 if (new_ready == 0)
3183 {
3184 register int stalls;
3185
3186 for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
3187 if ((insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
3188 {
3189 for (; insn; insn = NEXT_INSN (insn))
3190 {
3191 if (file)
3192 fprintf (file, ";; launching %d before %d with %d stalls at T-%d\n",
3193 INSN_UID (insn), INSN_UID (last), stalls, clock);
3194 ready[new_ready++] = insn;
3195 q_size -= 1;
3196 }
3197 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
3198 break;
3199 }
3200
3201 q_ptr = NEXT_Q_AFTER (q_ptr, stalls); clock += stalls;
3202 }
3203
3204 /* There should be some instructions waiting to fire. */
3205 if (new_ready == 0)
3206 abort ();
3207
3208 if (file)
3209 {
3210 fprintf (file, ";; ready list at T-%d:", clock);
3211 for (i = 0; i < new_ready; i++)
3212 fprintf (file, " %d (%x)",
3213 INSN_UID (ready[i]), INSN_PRIORITY (ready[i]));
3214 }
3215
3216 /* Sort the ready list and choose the best insn to schedule. Select
3217 which insn should issue in this cycle and queue those that are
3218 blocked by function unit hazards.
3219
3220 N_READY holds the number of items that were scheduled the last time,
3221 minus the one instruction scheduled on the last loop iteration; it
3222 is not modified for any other reason in this loop. */
3223
3224 SCHED_SORT (ready, new_ready, n_ready);
3225 if (MAX_BLOCKAGE > 1)
3226 {
3227 new_ready = schedule_select (ready, new_ready, clock, file);
3228 if (new_ready == 0)
3229 {
3230 if (file)
3231 fprintf (file, "\n");
3232 /* We must set n_ready here, to ensure that sorting always
3233 occurs when we come back to the SCHED_SORT line above. */
3234 n_ready = 0;
3235 continue;
3236 }
3237 }
3238 n_ready = new_ready;
3239 last_scheduled_insn = insn = ready[0];
3240
3241 /* The first insn scheduled becomes the new tail. */
3242 if (tail == 0)
3243 tail = insn;
3244
3245 if (file)
3246 {
3247 fprintf (file, ", now");
3248 for (i = 0; i < n_ready; i++)
3249 fprintf (file, " %d", INSN_UID (ready[i]));
3250 fprintf (file, "\n");
3251 }
3252
3253 if (DONE_PRIORITY_P (insn))
3254 abort ();
3255
3256 if (reload_completed == 0)
3257 {
3258 /* Process this insn, and each insn linked to this one which must
3259 be immediately output after this insn. */
3260 do
3261 {
3262 /* First we kill registers set by this insn, and then we
3263 make registers used by this insn live. This is the opposite
3264 order used above because we are traversing the instructions
3265 backwards. */
3266
3267 /* Strictly speaking, we should scan REG_UNUSED notes and make
3268 every register mentioned there live, however, we will just
3269 kill them again immediately below, so there doesn't seem to
3270 be any reason why we bother to do this. */
3271
3272 /* See if this is the last notice we must take of a register. */
3273 if (GET_CODE (PATTERN (insn)) == SET
3274 || GET_CODE (PATTERN (insn)) == CLOBBER)
3275 sched_note_set (b, PATTERN (insn), 1);
3276 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3277 {
3278 int j;
3279 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3280 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3281 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3282 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
3283 }
3284
3285 /* This code keeps life analysis information up to date. */
3286 if (GET_CODE (insn) == CALL_INSN)
3287 {
3288 register struct sometimes *p;
3289
3290 /* A call kills all call used registers that are not
3291 global or fixed, except for those mentioned in the call
3292 pattern which will be made live again later. */
3293 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3294 if (call_used_regs[i] && ! global_regs[i]
3295 && ! fixed_regs[i])
3296 {
3297 CLEAR_REGNO_REG_SET (bb_live_regs, i);
3298 SET_REGNO_REG_SET (bb_dead_regs, i);
3299 }
3300
3301 /* Regs live at the time of a call instruction must not
3302 go in a register clobbered by calls. Record this for
3303 all regs now live. Note that insns which are born or
3304 die in a call do not cross a call, so this must be done
3305 after the killings (above) and before the births
3306 (below). */
3307 p = regs_sometimes_live;
3308 for (i = 0; i < sometimes_max; i++, p++)
3309 if (REGNO_REG_SET_P (bb_live_regs, p->regno))
3310 p->calls_crossed += 1;
3311 }
3312
3313 /* Make every register used live, and add REG_DEAD notes for
3314 registers which were not live before we started. */
3315 attach_deaths_insn (insn);
3316
3317 /* Find registers now made live by that instruction. */
3318 EXECUTE_IF_AND_COMPL_IN_REG_SET (bb_live_regs, old_live_regs, 0, i,
3319 {
3320 sometimes_max
3321 = new_sometimes_live (regs_sometimes_live,
3322 i, sometimes_max);
3323 });
3324 IOR_REG_SET (old_live_regs, bb_live_regs);
3325
3326 /* Count lengths of all regs we are worrying about now,
3327 and handle registers no longer live. */
3328
3329 for (i = 0; i < sometimes_max; i++)
3330 {
3331 register struct sometimes *p = &regs_sometimes_live[i];
3332 int regno = p->regno;
3333
3334 p->live_length += 1;
3335
3336 if (!REGNO_REG_SET_P (bb_live_regs, p->regno))
3337 {
3338 /* This is the end of one of this register's lifetime
3339 segments. Save the lifetime info collected so far,
3340 and clear its bit in the old_live_regs entry. */
3341 sched_reg_live_length[regno] += p->live_length;
3342 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
3343 CLEAR_REGNO_REG_SET (old_live_regs, p->regno);
3344
3345 /* Delete the reg_sometimes_live entry for this reg by
3346 copying the last entry over top of it. */
3347 *p = regs_sometimes_live[--sometimes_max];
3348 /* ...and decrement i so that this newly copied entry
3349 will be processed. */
3350 i--;
3351 }
3352 }
3353
3354 link = insn;
3355 insn = PREV_INSN (insn);
3356 }
3357 while (SCHED_GROUP_P (link));
3358
3359 /* Set INSN back to the insn we are scheduling now. */
3360 insn = ready[0];
3361 }
3362
3363 /* Schedule INSN. Remove it from the ready list. */
3364 ready += 1;
3365 n_ready -= 1;
3366
3367 sched_n_insns += 1;
3368 NEXT_INSN (insn) = last;
3369 PREV_INSN (last) = insn;
3370
3371 /* Everything that precedes INSN now either becomes "ready", if
3372 it can execute immediately before INSN, or "pending", if
3373 there must be a delay. Give INSN high enough priority that
3374 at least one (maybe more) reg-killing insns can be launched
3375 ahead of all others. Mark INSN as scheduled by changing its
3376 priority to -1. */
3377 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
3378 new_ready = schedule_insn (insn, ready, n_ready, clock);
3379 INSN_PRIORITY (insn) = DONE_PRIORITY;
3380
3381 /* Schedule all prior insns that must not be moved. */
3382 if (SCHED_GROUP_P (insn))
3383 {
3384 /* Disable these insns from being launched, in case one of the
3385 insns in the group has a dependency on an earlier one. */
3386 link = insn;
3387 while (SCHED_GROUP_P (link))
3388 {
3389 /* Disable these insns from being launched by anybody. */
3390 link = PREV_INSN (link);
3391 INSN_REF_COUNT (link) = 0;
3392 }
3393
3394 /* Now handle each group insn like the main insn was handled
3395 above. */
3396 link = insn;
3397 while (SCHED_GROUP_P (link))
3398 {
3399 link = PREV_INSN (link);
3400
3401 sched_n_insns += 1;
3402
3403 /* ??? Why don't we set LAUNCH_PRIORITY here? */
3404 new_ready = schedule_insn (link, ready, new_ready, clock);
3405 INSN_PRIORITY (link) = DONE_PRIORITY;
3406 }
3407 }
3408
3409 /* Put back NOTE_INSN_SETJMP,
3410 NOTE_INSN_{LOOP,EHREGION}_{BEGIN,END} notes. */
3411
3412 /* To prime the loop. We need to handle INSN and all the insns in the
3413 sched group. */
3414 last = NEXT_INSN (insn);
3415 do
3416 {
3417 insn = PREV_INSN (last);
3418
3419 /* Maintain a valid chain so emit_note_before works.
3420 This is necessary because PREV_INSN (insn) isn't valid
3421 (if ! SCHED_GROUP_P) and if it points to an insn already
3422 scheduled, a circularity will result. */
3423 if (! SCHED_GROUP_P (insn))
3424 {
3425 NEXT_INSN (prev_head) = insn;
3426 PREV_INSN (insn) = prev_head;
3427 }
3428
3429 last = reemit_notes (insn, insn);
3430 }
3431 while (SCHED_GROUP_P (insn));
3432 }
3433 if (q_size != 0)
3434 abort ();
3435
3436 if (reload_completed == 0)
3437 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3438
3439 /* HEAD is now the first insn in the chain of insns that
3440 been scheduled by the loop above.
3441 TAIL is the last of those insns. */
3442 head = last;
3443
3444 /* NOTE_LIST is the end of a chain of notes previously found
3445 among the insns. Insert them at the beginning of the insns. */
3446 if (note_list != 0)
3447 {
3448 rtx note_head = note_list;
3449 while (PREV_INSN (note_head))
3450 note_head = PREV_INSN (note_head);
3451
3452 PREV_INSN (head) = note_list;
3453 NEXT_INSN (note_list) = head;
3454 head = note_head;
3455 }
3456
3457 /* There should be no REG_DEAD notes leftover at the end.
3458 In practice, this can occur as the result of bugs in flow, combine.c,
3459 and/or sched.c. The values of the REG_DEAD notes remaining are
3460 meaningless, because dead_notes is just used as a free list. */
3461 #if 1
3462 if (dead_notes != 0)
3463 abort ();
3464 #endif
3465
3466 if (new_needs & NEED_HEAD)
3467 basic_block_head[b] = head;
3468 PREV_INSN (head) = prev_head;
3469 NEXT_INSN (prev_head) = head;
3470
3471 if (new_needs & NEED_TAIL)
3472 basic_block_end[b] = tail;
3473 NEXT_INSN (tail) = next_tail;
3474 PREV_INSN (next_tail) = tail;
3475
3476 /* Restore the line-number notes of each insn. */
3477 if (write_symbols != NO_DEBUG)
3478 {
3479 rtx line, note, prev, new;
3480 int notes = 0;
3481
3482 head = basic_block_head[b];
3483 next_tail = NEXT_INSN (basic_block_end[b]);
3484
3485 /* Determine the current line-number. We want to know the current
3486 line number of the first insn of the block here, in case it is
3487 different from the true line number that was saved earlier. If
3488 different, then we need a line number note before the first insn
3489 of this block. If it happens to be the same, then we don't want to
3490 emit another line number note here. */
3491 for (line = head; line; line = PREV_INSN (line))
3492 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3493 break;
3494
3495 /* Walk the insns keeping track of the current line-number and inserting
3496 the line-number notes as needed. */
3497 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3498 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3499 line = insn;
3500 /* This used to emit line number notes before every non-deleted note.
3501 However, this confuses a debugger, because line notes not separated
3502 by real instructions all end up at the same address. I can find no
3503 use for line number notes before other notes, so none are emitted. */
3504 else if (GET_CODE (insn) != NOTE
3505 && (note = LINE_NOTE (insn)) != 0
3506 && note != line
3507 && (line == 0
3508 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3509 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3510 {
3511 line = note;
3512 prev = PREV_INSN (insn);
3513 if (LINE_NOTE (note))
3514 {
3515 /* Re-use the original line-number note. */
3516 LINE_NOTE (note) = 0;
3517 PREV_INSN (note) = prev;
3518 NEXT_INSN (prev) = note;
3519 PREV_INSN (insn) = note;
3520 NEXT_INSN (note) = insn;
3521 }
3522 else
3523 {
3524 notes++;
3525 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3526 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3527 RTX_INTEGRATED_P (new) = RTX_INTEGRATED_P (note);
3528 }
3529 }
3530 if (file && notes)
3531 fprintf (file, ";; added %d line-number notes\n", notes);
3532 }
3533
3534 if (file)
3535 {
3536 fprintf (file, ";; total time = %d\n;; new basic block head = %d\n;; new basic block end = %d\n\n",
3537 clock, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
3538 }
3539
3540 /* Yow! We're done! */
3541 free_pending_lists ();
3542
3543 ret:
3544 FREE_REG_SET (reg_pending_sets);
3545 FREE_REG_SET (old_live_regs);
3546
3547 return;
3548 }
3549 \f
3550 /* Subroutine of split_hard_reg_notes. Searches X for any reference to
3551 REGNO, returning the rtx of the reference found if any. Otherwise,
3552 returns 0. */
3553
3554 static rtx
3555 regno_use_in (regno, x)
3556 int regno;
3557 rtx x;
3558 {
3559 register char *fmt;
3560 int i, j;
3561 rtx tem;
3562
3563 if (GET_CODE (x) == REG && REGNO (x) == regno)
3564 return x;
3565
3566 fmt = GET_RTX_FORMAT (GET_CODE (x));
3567 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3568 {
3569 if (fmt[i] == 'e')
3570 {
3571 if ((tem = regno_use_in (regno, XEXP (x, i))))
3572 return tem;
3573 }
3574 else if (fmt[i] == 'E')
3575 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3576 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3577 return tem;
3578 }
3579
3580 return 0;
3581 }
3582
3583 /* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3584 needed for the hard register mentioned in the note. This can happen
3585 if the reference to the hard register in the original insn was split into
3586 several smaller hard register references in the split insns. */
3587
3588 static void
3589 split_hard_reg_notes (note, first, last, orig_insn)
3590 rtx note, first, last, orig_insn;
3591 {
3592 rtx reg, temp, link;
3593 int n_regs, i, new_reg;
3594 rtx insn;
3595
3596 /* Assume that this is a REG_DEAD note. */
3597 if (REG_NOTE_KIND (note) != REG_DEAD)
3598 abort ();
3599
3600 reg = XEXP (note, 0);
3601
3602 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3603
3604 for (i = 0; i < n_regs; i++)
3605 {
3606 new_reg = REGNO (reg) + i;
3607
3608 /* Check for references to new_reg in the split insns. */
3609 for (insn = last; ; insn = PREV_INSN (insn))
3610 {
3611 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3612 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3613 {
3614 /* Create a new reg dead note here. */
3615 link = rtx_alloc (EXPR_LIST);
3616 PUT_REG_NOTE_KIND (link, REG_DEAD);
3617 XEXP (link, 0) = temp;
3618 XEXP (link, 1) = REG_NOTES (insn);
3619 REG_NOTES (insn) = link;
3620
3621 /* If killed multiple registers here, then add in the excess. */
3622 i += HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) - 1;
3623
3624 break;
3625 }
3626 /* It isn't mentioned anywhere, so no new reg note is needed for
3627 this register. */
3628 if (insn == first)
3629 break;
3630 }
3631 }
3632 }
3633
3634 /* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3635 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3636
3637 static void
3638 new_insn_dead_notes (pat, insn, last, orig_insn)
3639 rtx pat, insn, last, orig_insn;
3640 {
3641 rtx dest, tem, set;
3642
3643 /* PAT is either a CLOBBER or a SET here. */
3644 dest = XEXP (pat, 0);
3645
3646 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3647 || GET_CODE (dest) == STRICT_LOW_PART
3648 || GET_CODE (dest) == SIGN_EXTRACT)
3649 dest = XEXP (dest, 0);
3650
3651 if (GET_CODE (dest) == REG)
3652 {
3653 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3654 {
3655 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3656 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3657 && (set = single_set (tem)))
3658 {
3659 rtx tem_dest = SET_DEST (set);
3660
3661 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3662 || GET_CODE (tem_dest) == SUBREG
3663 || GET_CODE (tem_dest) == STRICT_LOW_PART
3664 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3665 tem_dest = XEXP (tem_dest, 0);
3666
3667 if (! rtx_equal_p (tem_dest, dest))
3668 {
3669 /* Use the same scheme as combine.c, don't put both REG_DEAD
3670 and REG_UNUSED notes on the same insn. */
3671 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3672 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3673 {
3674 rtx note = rtx_alloc (EXPR_LIST);
3675 PUT_REG_NOTE_KIND (note, REG_DEAD);
3676 XEXP (note, 0) = dest;
3677 XEXP (note, 1) = REG_NOTES (tem);
3678 REG_NOTES (tem) = note;
3679 }
3680 /* The reg only dies in one insn, the last one that uses
3681 it. */
3682 break;
3683 }
3684 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3685 /* We found an instruction that both uses the register,
3686 and sets it, so no new REG_NOTE is needed for this set. */
3687 break;
3688 }
3689 }
3690 /* If this is a set, it must die somewhere, unless it is the dest of
3691 the original insn, and hence is live after the original insn. Abort
3692 if it isn't supposed to be live after the original insn.
3693
3694 If this is a clobber, then just add a REG_UNUSED note. */
3695 if (tem == insn)
3696 {
3697 int live_after_orig_insn = 0;
3698 rtx pattern = PATTERN (orig_insn);
3699 int i;
3700
3701 if (GET_CODE (pat) == CLOBBER)
3702 {
3703 rtx note = rtx_alloc (EXPR_LIST);
3704 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3705 XEXP (note, 0) = dest;
3706 XEXP (note, 1) = REG_NOTES (insn);
3707 REG_NOTES (insn) = note;
3708 return;
3709 }
3710
3711 /* The original insn could have multiple sets, so search the
3712 insn for all sets. */
3713 if (GET_CODE (pattern) == SET)
3714 {
3715 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3716 live_after_orig_insn = 1;
3717 }
3718 else if (GET_CODE (pattern) == PARALLEL)
3719 {
3720 for (i = 0; i < XVECLEN (pattern, 0); i++)
3721 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3722 && reg_overlap_mentioned_p (dest,
3723 SET_DEST (XVECEXP (pattern,
3724 0, i))))
3725 live_after_orig_insn = 1;
3726 }
3727
3728 if (! live_after_orig_insn)
3729 abort ();
3730 }
3731 }
3732 }
3733
3734 /* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3735 registers modified by X. INC is -1 if the containing insn is being deleted,
3736 and is 1 if the containing insn is a newly generated insn. */
3737
3738 static void
3739 update_n_sets (x, inc)
3740 rtx x;
3741 int inc;
3742 {
3743 rtx dest = SET_DEST (x);
3744
3745 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3746 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3747 dest = SUBREG_REG (dest);
3748
3749 if (GET_CODE (dest) == REG)
3750 {
3751 int regno = REGNO (dest);
3752
3753 if (regno < FIRST_PSEUDO_REGISTER)
3754 {
3755 register int i;
3756 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3757
3758 for (i = regno; i < endregno; i++)
3759 REG_N_SETS (i) += inc;
3760 }
3761 else
3762 REG_N_SETS (regno) += inc;
3763 }
3764 }
3765
3766 /* Updates all flow-analysis related quantities (including REG_NOTES) for
3767 the insns from FIRST to LAST inclusive that were created by splitting
3768 ORIG_INSN. NOTES are the original REG_NOTES. */
3769
3770 static void
3771 update_flow_info (notes, first, last, orig_insn)
3772 rtx notes;
3773 rtx first, last;
3774 rtx orig_insn;
3775 {
3776 rtx insn, note;
3777 rtx next;
3778 rtx orig_dest, temp;
3779 rtx set;
3780
3781 /* Get and save the destination set by the original insn. */
3782
3783 orig_dest = single_set (orig_insn);
3784 if (orig_dest)
3785 orig_dest = SET_DEST (orig_dest);
3786
3787 /* Move REG_NOTES from the original insn to where they now belong. */
3788
3789 for (note = notes; note; note = next)
3790 {
3791 next = XEXP (note, 1);
3792 switch (REG_NOTE_KIND (note))
3793 {
3794 case REG_DEAD:
3795 case REG_UNUSED:
3796 /* Move these notes from the original insn to the last new insn where
3797 the register is now set. */
3798
3799 for (insn = last; ; insn = PREV_INSN (insn))
3800 {
3801 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3802 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3803 {
3804 /* If this note refers to a multiple word hard register, it
3805 may have been split into several smaller hard register
3806 references, so handle it specially. */
3807 temp = XEXP (note, 0);
3808 if (REG_NOTE_KIND (note) == REG_DEAD
3809 && GET_CODE (temp) == REG
3810 && REGNO (temp) < FIRST_PSEUDO_REGISTER
3811 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) > 1)
3812 split_hard_reg_notes (note, first, last, orig_insn);
3813 else
3814 {
3815 XEXP (note, 1) = REG_NOTES (insn);
3816 REG_NOTES (insn) = note;
3817 }
3818
3819 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3820 notes. */
3821 /* ??? This won't handle multiple word registers correctly,
3822 but should be good enough for now. */
3823 if (REG_NOTE_KIND (note) == REG_UNUSED
3824 && GET_CODE (XEXP (note, 0)) != SCRATCH
3825 && ! dead_or_set_p (insn, XEXP (note, 0)))
3826 PUT_REG_NOTE_KIND (note, REG_DEAD);
3827
3828 /* The reg only dies in one insn, the last one that uses
3829 it. */
3830 break;
3831 }
3832 /* It must die somewhere, fail it we couldn't find where it died.
3833
3834 If this is a REG_UNUSED note, then it must be a temporary
3835 register that was not needed by this instantiation of the
3836 pattern, so we can safely ignore it. */
3837 if (insn == first)
3838 {
3839 /* After reload, REG_DEAD notes come sometimes an
3840 instruction after the register actually dies. */
3841 if (reload_completed && REG_NOTE_KIND (note) == REG_DEAD)
3842 {
3843 XEXP (note, 1) = REG_NOTES (insn);
3844 REG_NOTES (insn) = note;
3845 break;
3846 }
3847
3848 if (REG_NOTE_KIND (note) != REG_UNUSED)
3849 abort ();
3850
3851 break;
3852 }
3853 }
3854 break;
3855
3856 case REG_WAS_0:
3857 /* If the insn that set the register to 0 was deleted, this
3858 note cannot be relied on any longer. The destination might
3859 even have been moved to memory.
3860 This was observed for SH4 with execute/920501-6.c compilation,
3861 -O2 -fomit-frame-pointer -finline-functions . */
3862 if (GET_CODE (XEXP (note, 0)) == NOTE
3863 || INSN_DELETED_P (XEXP (note, 0)))
3864 break;
3865 /* This note applies to the dest of the original insn. Find the
3866 first new insn that now has the same dest, and move the note
3867 there. */
3868
3869 if (! orig_dest)
3870 abort ();
3871
3872 for (insn = first; ; insn = NEXT_INSN (insn))
3873 {
3874 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3875 && (temp = single_set (insn))
3876 && rtx_equal_p (SET_DEST (temp), orig_dest))
3877 {
3878 XEXP (note, 1) = REG_NOTES (insn);
3879 REG_NOTES (insn) = note;
3880 /* The reg is only zero before one insn, the first that
3881 uses it. */
3882 break;
3883 }
3884 /* If this note refers to a multiple word hard
3885 register, it may have been split into several smaller
3886 hard register references. We could split the notes,
3887 but simply dropping them is good enough. */
3888 if (GET_CODE (orig_dest) == REG
3889 && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
3890 && HARD_REGNO_NREGS (REGNO (orig_dest),
3891 GET_MODE (orig_dest)) > 1)
3892 break;
3893 /* It must be set somewhere, fail if we couldn't find where it
3894 was set. */
3895 if (insn == last)
3896 abort ();
3897 }
3898 break;
3899
3900 case REG_EQUAL:
3901 case REG_EQUIV:
3902 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
3903 set is meaningless. Just drop the note. */
3904 if (! orig_dest)
3905 break;
3906
3907 case REG_NO_CONFLICT:
3908 /* These notes apply to the dest of the original insn. Find the last
3909 new insn that now has the same dest, and move the note there. */
3910
3911 if (! orig_dest)
3912 abort ();
3913
3914 for (insn = last; ; insn = PREV_INSN (insn))
3915 {
3916 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3917 && (temp = single_set (insn))
3918 && rtx_equal_p (SET_DEST (temp), orig_dest))
3919 {
3920 XEXP (note, 1) = REG_NOTES (insn);
3921 REG_NOTES (insn) = note;
3922 /* Only put this note on one of the new insns. */
3923 break;
3924 }
3925
3926 /* The original dest must still be set someplace. Abort if we
3927 couldn't find it. */
3928 if (insn == first)
3929 {
3930 /* However, if this note refers to a multiple word hard
3931 register, it may have been split into several smaller
3932 hard register references. We could split the notes,
3933 but simply dropping them is good enough. */
3934 if (GET_CODE (orig_dest) == REG
3935 && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
3936 && HARD_REGNO_NREGS (REGNO (orig_dest),
3937 GET_MODE (orig_dest)) > 1)
3938 break;
3939 /* Likewise for multi-word memory references. */
3940 if (GET_CODE (orig_dest) == MEM
3941 && SIZE_FOR_MODE (orig_dest) > MOVE_MAX)
3942 break;
3943 abort ();
3944 }
3945 }
3946 break;
3947
3948 case REG_LIBCALL:
3949 /* Move a REG_LIBCALL note to the first insn created, and update
3950 the corresponding REG_RETVAL note. */
3951 XEXP (note, 1) = REG_NOTES (first);
3952 REG_NOTES (first) = note;
3953
3954 insn = XEXP (note, 0);
3955 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
3956 if (note)
3957 XEXP (note, 0) = first;
3958 break;
3959
3960 case REG_EXEC_COUNT:
3961 /* Move a REG_EXEC_COUNT note to the first insn created. */
3962 XEXP (note, 1) = REG_NOTES (first);
3963 REG_NOTES (first) = note;
3964 break;
3965
3966 case REG_RETVAL:
3967 /* Move a REG_RETVAL note to the last insn created, and update
3968 the corresponding REG_LIBCALL note. */
3969 XEXP (note, 1) = REG_NOTES (last);
3970 REG_NOTES (last) = note;
3971
3972 insn = XEXP (note, 0);
3973 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
3974 if (note)
3975 XEXP (note, 0) = last;
3976 break;
3977
3978 case REG_NONNEG:
3979 case REG_BR_PROB:
3980 /* This should be moved to whichever instruction is a JUMP_INSN. */
3981
3982 for (insn = last; ; insn = PREV_INSN (insn))
3983 {
3984 if (GET_CODE (insn) == JUMP_INSN)
3985 {
3986 XEXP (note, 1) = REG_NOTES (insn);
3987 REG_NOTES (insn) = note;
3988 /* Only put this note on one of the new insns. */
3989 break;
3990 }
3991 /* Fail if we couldn't find a JUMP_INSN. */
3992 if (insn == first)
3993 abort ();
3994 }
3995 break;
3996
3997 case REG_INC:
3998 /* reload sometimes leaves obsolete REG_INC notes around. */
3999 if (reload_completed)
4000 break;
4001 /* This should be moved to whichever instruction now has the
4002 increment operation. */
4003 abort ();
4004
4005 case REG_LABEL:
4006 /* Should be moved to the new insn(s) which use the label. */
4007 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
4008 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4009 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
4010 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL,
4011 XEXP (note, 0),
4012 REG_NOTES (insn));
4013 break;
4014
4015 case REG_CC_SETTER:
4016 case REG_CC_USER:
4017 /* These two notes will never appear until after reorg, so we don't
4018 have to handle them here. */
4019 default:
4020 abort ();
4021 }
4022 }
4023
4024 /* Each new insn created, except the last, has a new set. If the destination
4025 is a register, then this reg is now live across several insns, whereas
4026 previously the dest reg was born and died within the same insn. To
4027 reflect this, we now need a REG_DEAD note on the insn where this
4028 dest reg dies.
4029
4030 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
4031
4032 for (insn = first; insn != last; insn = NEXT_INSN (insn))
4033 {
4034 rtx pat;
4035 int i;
4036
4037 pat = PATTERN (insn);
4038 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
4039 new_insn_dead_notes (pat, insn, last, orig_insn);
4040 else if (GET_CODE (pat) == PARALLEL)
4041 {
4042 for (i = 0; i < XVECLEN (pat, 0); i++)
4043 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
4044 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
4045 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
4046 }
4047 }
4048
4049 /* If any insn, except the last, uses the register set by the last insn,
4050 then we need a new REG_DEAD note on that insn. In this case, there
4051 would not have been a REG_DEAD note for this register in the original
4052 insn because it was used and set within one insn. */
4053
4054 set = single_set (last);
4055 if (set)
4056 {
4057 rtx dest = SET_DEST (set);
4058
4059 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
4060 || GET_CODE (dest) == STRICT_LOW_PART
4061 || GET_CODE (dest) == SIGN_EXTRACT)
4062 dest = XEXP (dest, 0);
4063
4064 if (GET_CODE (dest) == REG
4065 /* Global registers are always live, so the code below does not
4066 apply to them. */
4067 && (REGNO (dest) >= FIRST_PSEUDO_REGISTER
4068 || ! global_regs[REGNO (dest)]))
4069 {
4070 rtx stop_insn = PREV_INSN (first);
4071
4072 /* If the last insn uses the register that it is setting, then
4073 we don't want to put a REG_DEAD note there. Search backwards
4074 to find the first insn that sets but does not use DEST. */
4075
4076 insn = last;
4077 if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
4078 {
4079 for (insn = PREV_INSN (insn); insn != first;
4080 insn = PREV_INSN (insn))
4081 {
4082 if ((set = single_set (insn))
4083 && reg_mentioned_p (dest, SET_DEST (set))
4084 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
4085 break;
4086 }
4087 }
4088
4089 /* Now find the first insn that uses but does not set DEST. */
4090
4091 for (insn = PREV_INSN (insn); insn != stop_insn;
4092 insn = PREV_INSN (insn))
4093 {
4094 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4095 && reg_mentioned_p (dest, PATTERN (insn))
4096 && (set = single_set (insn)))
4097 {
4098 rtx insn_dest = SET_DEST (set);
4099
4100 while (GET_CODE (insn_dest) == ZERO_EXTRACT
4101 || GET_CODE (insn_dest) == SUBREG
4102 || GET_CODE (insn_dest) == STRICT_LOW_PART
4103 || GET_CODE (insn_dest) == SIGN_EXTRACT)
4104 insn_dest = XEXP (insn_dest, 0);
4105
4106 if (insn_dest != dest)
4107 {
4108 note = rtx_alloc (EXPR_LIST);
4109 PUT_REG_NOTE_KIND (note, REG_DEAD);
4110 XEXP (note, 0) = dest;
4111 XEXP (note, 1) = REG_NOTES (insn);
4112 REG_NOTES (insn) = note;
4113 /* The reg only dies in one insn, the last one
4114 that uses it. */
4115 break;
4116 }
4117 }
4118 }
4119 }
4120 }
4121
4122 /* If the original dest is modifying a multiple register target, and the
4123 original instruction was split such that the original dest is now set
4124 by two or more SUBREG sets, then the split insns no longer kill the
4125 destination of the original insn.
4126
4127 In this case, if there exists an instruction in the same basic block,
4128 before the split insn, which uses the original dest, and this use is
4129 killed by the original insn, then we must remove the REG_DEAD note on
4130 this insn, because it is now superfluous.
4131
4132 This does not apply when a hard register gets split, because the code
4133 knows how to handle overlapping hard registers properly. */
4134 if (orig_dest && GET_CODE (orig_dest) == REG)
4135 {
4136 int found_orig_dest = 0;
4137 int found_split_dest = 0;
4138
4139 for (insn = first; ; insn = NEXT_INSN (insn))
4140 {
4141 rtx pat = PATTERN (insn);
4142 int i = GET_CODE (pat) == PARALLEL ? XVECLEN (pat, 0) : 0;
4143 set = pat;
4144 for (;;)
4145 {
4146 if (GET_CODE (set) == SET)
4147 {
4148 if (GET_CODE (SET_DEST (set)) == REG
4149 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
4150 {
4151 found_orig_dest = 1;
4152 break;
4153 }
4154 else if (GET_CODE (SET_DEST (set)) == SUBREG
4155 && SUBREG_REG (SET_DEST (set)) == orig_dest)
4156 {
4157 found_split_dest = 1;
4158 break;
4159 }
4160 }
4161 if (--i < 0)
4162 break;
4163 set = XVECEXP (pat, 0, i);
4164 }
4165
4166 if (insn == last)
4167 break;
4168 }
4169
4170 if (found_split_dest)
4171 {
4172 /* Search backwards from FIRST, looking for the first insn that uses
4173 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
4174 If we find an insn, and it has a REG_DEAD note, then delete the
4175 note. */
4176
4177 for (insn = first; insn; insn = PREV_INSN (insn))
4178 {
4179 if (GET_CODE (insn) == CODE_LABEL
4180 || GET_CODE (insn) == JUMP_INSN)
4181 break;
4182 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4183 && reg_mentioned_p (orig_dest, insn))
4184 {
4185 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
4186 if (note)
4187 remove_note (insn, note);
4188 }
4189 }
4190 }
4191 else if (! found_orig_dest)
4192 {
4193 /* This should never happen. */
4194 abort ();
4195 }
4196 }
4197
4198 /* Update reg_n_sets. This is necessary to prevent local alloc from
4199 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
4200 a reg from set once to set multiple times. */
4201
4202 {
4203 rtx x = PATTERN (orig_insn);
4204 RTX_CODE code = GET_CODE (x);
4205
4206 if (code == SET || code == CLOBBER)
4207 update_n_sets (x, -1);
4208 else if (code == PARALLEL)
4209 {
4210 int i;
4211 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4212 {
4213 code = GET_CODE (XVECEXP (x, 0, i));
4214 if (code == SET || code == CLOBBER)
4215 update_n_sets (XVECEXP (x, 0, i), -1);
4216 }
4217 }
4218
4219 for (insn = first; ; insn = NEXT_INSN (insn))
4220 {
4221 x = PATTERN (insn);
4222 code = GET_CODE (x);
4223
4224 if (code == SET || code == CLOBBER)
4225 update_n_sets (x, 1);
4226 else if (code == PARALLEL)
4227 {
4228 int i;
4229 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4230 {
4231 code = GET_CODE (XVECEXP (x, 0, i));
4232 if (code == SET || code == CLOBBER)
4233 update_n_sets (XVECEXP (x, 0, i), 1);
4234 }
4235 }
4236
4237 if (insn == last)
4238 break;
4239 }
4240 }
4241 }
4242
4243 /* The one entry point in this file. DUMP_FILE is the dump file for
4244 this pass. */
4245
4246 void
4247 schedule_insns (dump_file)
4248 FILE *dump_file;
4249 {
4250 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
4251 int b;
4252 rtx insn;
4253
4254 /* Taking care of this degenerate case makes the rest of
4255 this code simpler. */
4256 if (n_basic_blocks == 0)
4257 return;
4258
4259 /* Create an insn here so that we can hang dependencies off of it later. */
4260 sched_before_next_call
4261 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
4262 NULL_RTX, 0, NULL_RTX, NULL_RTX);
4263
4264 /* Initialize the unused_*_lists. We can't use the ones left over from
4265 the previous function, because gcc has freed that memory. We can use
4266 the ones left over from the first sched pass in the second pass however,
4267 so only clear them on the first sched pass. The first pass is before
4268 reload if flag_schedule_insns is set, otherwise it is afterwards. */
4269
4270 if (reload_completed == 0 || ! flag_schedule_insns)
4271 {
4272 unused_insn_list = 0;
4273 unused_expr_list = 0;
4274 }
4275
4276 /* We create no insns here, only reorder them, so we
4277 remember how far we can cut back the stack on exit. */
4278
4279 /* Allocate data for this pass. See comments, above,
4280 for what these vectors do. */
4281 insn_luid = (int *) alloca (max_uid * sizeof (int));
4282 insn_priority = (int *) alloca (max_uid * sizeof (int));
4283 insn_tick = (int *) alloca (max_uid * sizeof (int));
4284 insn_costs = (short *) alloca (max_uid * sizeof (short));
4285 insn_units = (short *) alloca (max_uid * sizeof (short));
4286 insn_blockage = (unsigned int *) alloca (max_uid * sizeof (unsigned int));
4287 insn_ref_count = (int *) alloca (max_uid * sizeof (int));
4288
4289 if (reload_completed == 0)
4290 {
4291 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
4292 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
4293 bb_dead_regs = ALLOCA_REG_SET ();
4294 bb_live_regs = ALLOCA_REG_SET ();
4295 bzero ((char *) sched_reg_n_calls_crossed, max_regno * sizeof (int));
4296 bzero ((char *) sched_reg_live_length, max_regno * sizeof (int));
4297 }
4298 else
4299 {
4300 sched_reg_n_calls_crossed = 0;
4301 sched_reg_live_length = 0;
4302 bb_dead_regs = 0;
4303 bb_live_regs = 0;
4304 }
4305 init_alias_analysis ();
4306
4307 if (write_symbols != NO_DEBUG)
4308 {
4309 rtx line;
4310
4311 line_note = (rtx *) alloca (max_uid * sizeof (rtx));
4312 bzero ((char *) line_note, max_uid * sizeof (rtx));
4313 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
4314 bzero ((char *) line_note_head, n_basic_blocks * sizeof (rtx));
4315
4316 /* Determine the line-number at the start of each basic block.
4317 This must be computed and saved now, because after a basic block's
4318 predecessor has been scheduled, it is impossible to accurately
4319 determine the correct line number for the first insn of the block. */
4320
4321 for (b = 0; b < n_basic_blocks; b++)
4322 for (line = basic_block_head[b]; line; line = PREV_INSN (line))
4323 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
4324 {
4325 line_note_head[b] = line;
4326 break;
4327 }
4328 }
4329
4330 bzero ((char *) insn_luid, max_uid * sizeof (int));
4331 bzero ((char *) insn_priority, max_uid * sizeof (int));
4332 bzero ((char *) insn_tick, max_uid * sizeof (int));
4333 bzero ((char *) insn_costs, max_uid * sizeof (short));
4334 bzero ((char *) insn_units, max_uid * sizeof (short));
4335 bzero ((char *) insn_blockage, max_uid * sizeof (unsigned int));
4336 bzero ((char *) insn_ref_count, max_uid * sizeof (int));
4337
4338 /* Schedule each basic block, block by block. */
4339
4340 /* ??? Add a NOTE after the last insn of the last basic block. It is not
4341 known why this is done. */
4342 /* ??? Perhaps it's done to ensure NEXT_TAIL in schedule_block is a
4343 valid insn. */
4344
4345 insn = basic_block_end[n_basic_blocks-1];
4346 if (NEXT_INSN (insn) == 0
4347 || (GET_CODE (insn) != NOTE
4348 && GET_CODE (insn) != CODE_LABEL
4349 /* Don't emit a NOTE if it would end up between an unconditional
4350 jump and a BARRIER. */
4351 && ! (GET_CODE (insn) == JUMP_INSN
4352 && GET_CODE (NEXT_INSN (insn)) == BARRIER)))
4353 emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
4354
4355 for (b = 0; b < n_basic_blocks; b++)
4356 {
4357 rtx insn, next;
4358
4359 note_list = 0;
4360
4361 for (insn = basic_block_head[b]; ; insn = next)
4362 {
4363 rtx prev;
4364 rtx set;
4365
4366 /* Can't use `next_real_insn' because that
4367 might go across CODE_LABELS and short-out basic blocks. */
4368 next = NEXT_INSN (insn);
4369 if (GET_CODE (insn) != INSN)
4370 {
4371 if (insn == basic_block_end[b])
4372 break;
4373
4374 continue;
4375 }
4376
4377 /* Don't split no-op move insns. These should silently disappear
4378 later in final. Splitting such insns would break the code
4379 that handles REG_NO_CONFLICT blocks. */
4380 set = single_set (insn);
4381 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
4382 {
4383 if (insn == basic_block_end[b])
4384 break;
4385
4386 /* Nops get in the way while scheduling, so delete them now if
4387 register allocation has already been done. It is too risky
4388 to try to do this before register allocation, and there are
4389 unlikely to be very many nops then anyways. */
4390 if (reload_completed)
4391 {
4392 PUT_CODE (insn, NOTE);
4393 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4394 NOTE_SOURCE_FILE (insn) = 0;
4395 }
4396
4397 continue;
4398 }
4399
4400 /* Split insns here to get max fine-grain parallelism. */
4401 prev = PREV_INSN (insn);
4402 /* It is probably not worthwhile to try to split again in the
4403 second pass. However, if flag_schedule_insns is not set,
4404 the first and only (if any) scheduling pass is after reload. */
4405 if (reload_completed == 0 || ! flag_schedule_insns)
4406 {
4407 rtx last, first = PREV_INSN (insn);
4408 rtx notes = REG_NOTES (insn);
4409
4410 last = try_split (PATTERN (insn), insn, 1);
4411 if (last != insn)
4412 {
4413 /* try_split returns the NOTE that INSN became. */
4414 first = NEXT_INSN (first);
4415 update_flow_info (notes, first, last, insn);
4416
4417 PUT_CODE (insn, NOTE);
4418 NOTE_SOURCE_FILE (insn) = 0;
4419 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4420 if (insn == basic_block_head[b])
4421 basic_block_head[b] = first;
4422 if (insn == basic_block_end[b])
4423 {
4424 basic_block_end[b] = last;
4425 break;
4426 }
4427 }
4428 }
4429
4430 if (insn == basic_block_end[b])
4431 break;
4432 }
4433
4434 schedule_block (b, dump_file);
4435
4436 #ifdef USE_C_ALLOCA
4437 alloca (0);
4438 #endif
4439 }
4440
4441 /* Reposition the prologue and epilogue notes in case we moved the
4442 prologue/epilogue insns. */
4443 if (reload_completed)
4444 reposition_prologue_and_epilogue_notes (get_insns ());
4445
4446 if (write_symbols != NO_DEBUG)
4447 {
4448 rtx line = 0;
4449 rtx insn = get_insns ();
4450 int active_insn = 0;
4451 int notes = 0;
4452
4453 /* Walk the insns deleting redundant line-number notes. Many of these
4454 are already present. The remainder tend to occur at basic
4455 block boundaries. */
4456 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
4457 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4458 {
4459 /* If there are no active insns following, INSN is redundant. */
4460 if (active_insn == 0)
4461 {
4462 notes++;
4463 NOTE_SOURCE_FILE (insn) = 0;
4464 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4465 }
4466 /* If the line number is unchanged, LINE is redundant. */
4467 else if (line
4468 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
4469 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
4470 {
4471 notes++;
4472 NOTE_SOURCE_FILE (line) = 0;
4473 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
4474 line = insn;
4475 }
4476 else
4477 line = insn;
4478 active_insn = 0;
4479 }
4480 else if (! ((GET_CODE (insn) == NOTE
4481 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
4482 || (GET_CODE (insn) == INSN
4483 && (GET_CODE (PATTERN (insn)) == USE
4484 || GET_CODE (PATTERN (insn)) == CLOBBER))))
4485 active_insn++;
4486
4487 if (dump_file && notes)
4488 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
4489 }
4490
4491 if (reload_completed == 0)
4492 {
4493 int regno;
4494 for (regno = 0; regno < max_regno; regno++)
4495 if (sched_reg_live_length[regno])
4496 {
4497 if (dump_file)
4498 {
4499 if (REG_LIVE_LENGTH (regno) > sched_reg_live_length[regno])
4500 fprintf (dump_file,
4501 ";; register %d life shortened from %d to %d\n",
4502 regno, REG_LIVE_LENGTH (regno),
4503 sched_reg_live_length[regno]);
4504 /* Negative values are special; don't overwrite the current
4505 reg_live_length value if it is negative. */
4506 else if (REG_LIVE_LENGTH (regno) < sched_reg_live_length[regno]
4507 && REG_LIVE_LENGTH (regno) >= 0)
4508 fprintf (dump_file,
4509 ";; register %d life extended from %d to %d\n",
4510 regno, REG_LIVE_LENGTH (regno),
4511 sched_reg_live_length[regno]);
4512
4513 if (! REG_N_CALLS_CROSSED (regno)
4514 && sched_reg_n_calls_crossed[regno])
4515 fprintf (dump_file,
4516 ";; register %d now crosses calls\n", regno);
4517 else if (REG_N_CALLS_CROSSED (regno)
4518 && ! sched_reg_n_calls_crossed[regno]
4519 && REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
4520 fprintf (dump_file,
4521 ";; register %d no longer crosses calls\n", regno);
4522
4523 }
4524 /* Negative values are special; don't overwrite the current
4525 reg_live_length value if it is negative. */
4526 if (REG_LIVE_LENGTH (regno) >= 0)
4527 REG_LIVE_LENGTH (regno) = sched_reg_live_length[regno];
4528
4529 /* We can't change the value of reg_n_calls_crossed to zero for
4530 pseudos which are live in more than one block.
4531
4532 This is because combine might have made an optimization which
4533 invalidated basic_block_live_at_start and reg_n_calls_crossed,
4534 but it does not update them. If we update reg_n_calls_crossed
4535 here, the two variables are now inconsistent, and this might
4536 confuse the caller-save code into saving a register that doesn't
4537 need to be saved. This is only a problem when we zero calls
4538 crossed for a pseudo live in multiple basic blocks.
4539
4540 Alternatively, we could try to correctly update basic block live
4541 at start here in sched, but that seems complicated. */
4542 if (sched_reg_n_calls_crossed[regno]
4543 || REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
4544 REG_N_CALLS_CROSSED (regno) = sched_reg_n_calls_crossed[regno];
4545 }
4546 }
4547
4548 if (reload_completed == 0)
4549 {
4550 FREE_REG_SET (bb_dead_regs);
4551 FREE_REG_SET (bb_live_regs);
4552 }
4553
4554 }
4555 #endif /* INSN_SCHEDULING */
This page took 0.23727 seconds and 5 git commands to generate.