]> gcc.gnu.org Git - gcc.git/blob - gcc/haifa-sched.c
0fab54ba5e2dfa32fd66b267409a5db0d51c41ac
[gcc.git] / gcc / haifa-sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1997 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GNU CC.
7
8 GNU CC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GNU CC is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to the Free
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23
24 /* Instruction scheduling pass.
25
26 This pass implements list scheduling within basic blocks. It is
27 run twice: (1) after flow analysis, but before register allocation,
28 and (2) after register allocation.
29
30 The first run performs interblock scheduling, moving insns between
31 different blocks in the same "region", and the second runs only
32 basic block scheduling.
33
34 Interblock motions performed are useful motions and speculative
35 motions, including speculative loads. Motions requiring code
36 duplication are not supported. The identification of motion type
37 and the check for validity of speculative motions requires
38 construction and analysis of the function's control flow graph.
39 The scheduler works as follows:
40
41 We compute insn priorities based on data dependencies. Flow
42 analysis only creates a fraction of the data-dependencies we must
43 observe: namely, only those dependencies which the combiner can be
44 expected to use. For this pass, we must therefore create the
45 remaining dependencies we need to observe: register dependencies,
46 memory dependencies, dependencies to keep function calls in order,
47 and the dependence between a conditional branch and the setting of
48 condition codes are all dealt with here.
49
50 The scheduler first traverses the data flow graph, starting with
51 the last instruction, and proceeding to the first, assigning values
52 to insn_priority as it goes. This sorts the instructions
53 topologically by data dependence.
54
55 Once priorities have been established, we order the insns using
56 list scheduling. This works as follows: starting with a list of
57 all the ready insns, and sorted according to priority number, we
58 schedule the insn from the end of the list by placing its
59 predecessors in the list according to their priority order. We
60 consider this insn scheduled by setting the pointer to the "end" of
61 the list to point to the previous insn. When an insn has no
62 predecessors, we either queue it until sufficient time has elapsed
63 or add it to the ready list. As the instructions are scheduled or
64 when stalls are introduced, the queue advances and dumps insns into
65 the ready list. When all insns down to the lowest priority have
66 been scheduled, the critical path of the basic block has been made
67 as short as possible. The remaining insns are then scheduled in
68 remaining slots.
69
70 Function unit conflicts are resolved during forward list scheduling
71 by tracking the time when each insn is committed to the schedule
72 and from that, the time the function units it uses must be free.
73 As insns on the ready list are considered for scheduling, those
74 that would result in a blockage of the already committed insns are
75 queued until no blockage will result.
76
77 The following list shows the order in which we want to break ties
78 among insns in the ready list:
79
80 1. choose insn with the longest path to end of bb, ties
81 broken by
82 2. choose insn with least contribution to register pressure,
83 ties broken by
84 3. prefer in-block upon interblock motion, ties broken by
85 4. prefer useful upon speculative motion, ties broken by
86 5. choose insn with largest control flow probability, ties
87 broken by
88 6. choose insn with the least dependences upon the previously
89 scheduled insn, or finally
90 7. choose insn with lowest UID.
91
92 Memory references complicate matters. Only if we can be certain
93 that memory references are not part of the data dependency graph
94 (via true, anti, or output dependence), can we move operations past
95 memory references. To first approximation, reads can be done
96 independently, while writes introduce dependencies. Better
97 approximations will yield fewer dependencies.
98
99 Before reload, an extended analysis of interblock data dependences
100 is required for interblock scheduling. This is performed in
101 compute_block_backward_dependences ().
102
103 Dependencies set up by memory references are treated in exactly the
104 same way as other dependencies, by using LOG_LINKS backward
105 dependences. LOG_LINKS are translated into INSN_DEPEND forward
106 dependences for the purpose of forward list scheduling.
107
108 Having optimized the critical path, we may have also unduly
109 extended the lifetimes of some registers. If an operation requires
110 that constants be loaded into registers, it is certainly desirable
111 to load those constants as early as necessary, but no earlier.
112 I.e., it will not do to load up a bunch of registers at the
113 beginning of a basic block only to use them at the end, if they
114 could be loaded later, since this may result in excessive register
115 utilization.
116
117 Note that since branches are never in basic blocks, but only end
118 basic blocks, this pass will not move branches. But that is ok,
119 since we can use GNU's delayed branch scheduling pass to take care
120 of this case.
121
122 Also note that no further optimizations based on algebraic
123 identities are performed, so this pass would be a good one to
124 perform instruction splitting, such as breaking up a multiply
125 instruction into shifts and adds where that is profitable.
126
127 Given the memory aliasing analysis that this pass should perform,
128 it should be possible to remove redundant stores to memory, and to
129 load values from registers instead of hitting memory.
130
131 Before reload, speculative insns are moved only if a 'proof' exists
132 that no exception will be caused by this, and if no live registers
133 exist that inhibit the motion (live registers constraints are not
134 represented by data dependence edges).
135
136 This pass must update information that subsequent passes expect to
137 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
138 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
139 basic_block_end.
140
141 The information in the line number notes is carefully retained by
142 this pass. Notes that refer to the starting and ending of
143 exception regions are also carefully retained by this pass. All
144 other NOTE insns are grouped in their same relative order at the
145 beginning of basic blocks and regions that have been scheduled.
146
147 The main entry point for this pass is schedule_insns(), called for
148 each function. The work of the scheduler is organized in three
149 levels: (1) function level: insns are subject to splitting,
150 control-flow-graph is constructed, regions are computed (after
151 reload, each region is of one block), (2) region level: control
152 flow graph attributes required for interblock scheduling are
153 computed (dominators, reachability, etc.), data dependences and
154 priorities are computed, and (3) block level: insns in the block
155 are actually scheduled. */
156 \f
157 #include <stdio.h>
158 #include "config.h"
159 #include "rtl.h"
160 #include "basic-block.h"
161 #include "regs.h"
162 #include "hard-reg-set.h"
163 #include "flags.h"
164 #include "insn-config.h"
165 #include "insn-attr.h"
166 #include "except.h"
167
168 extern char *reg_known_equiv_p;
169 extern rtx *reg_known_value;
170
171 #ifdef INSN_SCHEDULING
172
173 /* enable interblock scheduling code */
174
175 /* define INTERBLOCK_DEBUG for using the -fsched-max debugging facility */
176 /* #define INTERBLOCK_DEBUG */
177
178 /* target_units bitmask has 1 for each unit in the cpu. It should be
179 possible to compute this variable from the machine description.
180 But currently it is computed by examinning the insn list. Since
181 this is only needed for visualization, it seems an acceptable
182 solution. (For understanding the mapping of bits to units, see
183 definition of function_units[] in "insn-attrtab.c") */
184
185 static int target_units = 0;
186
187 /* issue_rate is the number of insns that can be scheduled in the same
188 machine cycle. It can be defined in the config/mach/mach.h file,
189 otherwise we set it to 1. */
190
191 static int issue_rate;
192
193 #ifndef MACHINE_issue_rate
194 #define get_issue_rate() (1)
195 #endif
196
197 /* sched_debug_count is used for debugging the scheduler by limiting
198 the number of scheduled insns. It is controlled by the option
199 -fsched-max-N (N is a number).
200
201 sched-verbose controls the amount of debugging output the
202 scheduler prints. It is controlled by -fsched-verbose-N:
203 N>0 and no -DSR : the output is directed to stderr.
204 N>=10 will direct the printouts to stderr (regardless of -dSR).
205 N=1: same as -dSR.
206 N=2: bb's probabilities, detailed ready list info, unit/insn info.
207 N=3: rtl at abort point, control-flow, regions info.
208 N=5: dependences info.
209
210 max_rgn_blocks and max_region_insns limit region size for
211 interblock scheduling. They are controlled by
212 -fsched-interblock-max-blocks-N, -fsched-interblock-max-insns-N */
213
214 #define MAX_RGN_BLOCKS 10
215 #define MAX_RGN_INSNS 100
216
217 static int sched_debug_count = -1;
218 static int sched_verbose_param = 0;
219 static int sched_verbose = 0;
220 static int max_rgn_blocks = MAX_RGN_BLOCKS;
221 static int max_rgn_insns = MAX_RGN_INSNS;
222
223 /* nr_inter/spec counts interblock/speculative motion for the function */
224 static int nr_inter, nr_spec;
225
226
227 /* debugging file. all printouts are sent to dump, which is always set,
228 either to stderr, or to the dump listing file (-dRS). */
229 static FILE *dump = 0;
230
231 /* fix_sched_param() is called from toplev.c upon detection
232 of the -fsched-***-N options. */
233
234 void
235 fix_sched_param (param, val)
236 char *param, *val;
237 {
238 if (!strcmp (param, "max"))
239 sched_debug_count = ((sched_debug_count == -1) ?
240 atoi (val) : sched_debug_count);
241 else if (!strcmp (param, "verbose"))
242 sched_verbose_param = atoi (val);
243 else if (!strcmp (param, "interblock-max-blocks"))
244 max_rgn_blocks = atoi (val);
245 else if (!strcmp (param, "interblock-max-insns"))
246 max_rgn_insns = atoi (val);
247 else
248 warning ("fix_sched_param: unknown param: %s", param);
249 }
250
251
252 /* Arrays set up by scheduling for the same respective purposes as
253 similar-named arrays set up by flow analysis. We work with these
254 arrays during the scheduling pass so we can compare values against
255 unscheduled code.
256
257 Values of these arrays are copied at the end of this pass into the
258 arrays set up by flow analysis. */
259 static int *sched_reg_n_calls_crossed;
260 static int *sched_reg_live_length;
261 static int *sched_reg_basic_block;
262
263 /* We need to know the current block number during the post scheduling
264 update of live register information so that we can also update
265 REG_BASIC_BLOCK if a register changes blocks. */
266 static int current_block_num;
267
268 /* Element N is the next insn that sets (hard or pseudo) register
269 N within the current basic block; or zero, if there is no
270 such insn. Needed for new registers which may be introduced
271 by splitting insns. */
272 static rtx *reg_last_uses;
273 static rtx *reg_last_sets;
274 static regset reg_pending_sets;
275 static int reg_pending_sets_all;
276
277 /* Vector indexed by INSN_UID giving the original ordering of the insns. */
278 static int *insn_luid;
279 #define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
280
281 /* Vector indexed by INSN_UID giving each instruction a priority. */
282 static int *insn_priority;
283 #define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
284
285 static short *insn_costs;
286 #define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
287
288 /* Vector indexed by INSN_UID giving an encoding of the function units
289 used. */
290 static short *insn_units;
291 #define INSN_UNIT(INSN) insn_units[INSN_UID (INSN)]
292
293 /* Vector indexed by INSN_UID giving each instruction a register-weight.
294 This weight is an estimation of the insn contribution to registers pressure. */
295 static int *insn_reg_weight;
296 #define INSN_REG_WEIGHT(INSN) (insn_reg_weight[INSN_UID (INSN)])
297
298 /* Vector indexed by INSN_UID giving list of insns which
299 depend upon INSN. Unlike LOG_LINKS, it represents forward dependences. */
300 static rtx *insn_depend;
301 #define INSN_DEPEND(INSN) insn_depend[INSN_UID (INSN)]
302
303 /* Vector indexed by INSN_UID. Initialized to the number of incoming
304 edges in forward dependence graph (= number of LOG_LINKS). As
305 scheduling procedes, dependence counts are decreased. An
306 instruction moves to the ready list when its counter is zero. */
307 static int *insn_dep_count;
308 #define INSN_DEP_COUNT(INSN) (insn_dep_count[INSN_UID (INSN)])
309
310 /* Vector indexed by INSN_UID giving an encoding of the blockage range
311 function. The unit and the range are encoded. */
312 static unsigned int *insn_blockage;
313 #define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
314 #define UNIT_BITS 5
315 #define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
316 #define ENCODE_BLOCKAGE(U, R) \
317 ((((U) << UNIT_BITS) << BLOCKAGE_BITS \
318 | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
319 | MAX_BLOCKAGE_COST (R))
320 #define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
321 #define BLOCKAGE_RANGE(B) \
322 (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
323 | (B) & BLOCKAGE_MASK)
324
325 /* Encodings of the `<name>_unit_blockage_range' function. */
326 #define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
327 #define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
328
329 #define DONE_PRIORITY -1
330 #define MAX_PRIORITY 0x7fffffff
331 #define TAIL_PRIORITY 0x7ffffffe
332 #define LAUNCH_PRIORITY 0x7f000001
333 #define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
334 #define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
335
336 /* Vector indexed by INSN_UID giving number of insns referring to this insn. */
337 static int *insn_ref_count;
338 #define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
339
340 /* Vector indexed by INSN_UID giving line-number note in effect for each
341 insn. For line-number notes, this indicates whether the note may be
342 reused. */
343 static rtx *line_note;
344 #define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
345
346 /* Vector indexed by basic block number giving the starting line-number
347 for each basic block. */
348 static rtx *line_note_head;
349
350 /* List of important notes we must keep around. This is a pointer to the
351 last element in the list. */
352 static rtx note_list;
353
354 /* Regsets telling whether a given register is live or dead before the last
355 scheduled insn. Must scan the instructions once before scheduling to
356 determine what registers are live or dead at the end of the block. */
357 static regset bb_live_regs;
358
359 /* Regset telling whether a given register is live after the insn currently
360 being scheduled. Before processing an insn, this is equal to bb_live_regs
361 above. This is used so that we can find registers that are newly born/dead
362 after processing an insn. */
363 static regset old_live_regs;
364
365 /* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
366 during the initial scan and reused later. If there are not exactly as
367 many REG_DEAD notes in the post scheduled code as there were in the
368 prescheduled code then we trigger an abort because this indicates a bug. */
369 static rtx dead_notes;
370
371 /* Queues, etc. */
372
373 /* An instruction is ready to be scheduled when all insns preceding it
374 have already been scheduled. It is important to ensure that all
375 insns which use its result will not be executed until its result
376 has been computed. An insn is maintained in one of four structures:
377
378 (P) the "Pending" set of insns which cannot be scheduled until
379 their dependencies have been satisfied.
380 (Q) the "Queued" set of insns that can be scheduled when sufficient
381 time has passed.
382 (R) the "Ready" list of unscheduled, uncommitted insns.
383 (S) the "Scheduled" list of insns.
384
385 Initially, all insns are either "Pending" or "Ready" depending on
386 whether their dependencies are satisfied.
387
388 Insns move from the "Ready" list to the "Scheduled" list as they
389 are committed to the schedule. As this occurs, the insns in the
390 "Pending" list have their dependencies satisfied and move to either
391 the "Ready" list or the "Queued" set depending on whether
392 sufficient time has passed to make them ready. As time passes,
393 insns move from the "Queued" set to the "Ready" list. Insns may
394 move from the "Ready" list to the "Queued" set if they are blocked
395 due to a function unit conflict.
396
397 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
398 insns, i.e., those that are ready, queued, and pending.
399 The "Queued" set (Q) is implemented by the variable `insn_queue'.
400 The "Ready" list (R) is implemented by the variables `ready' and
401 `n_ready'.
402 The "Scheduled" list (S) is the new insn chain built by this pass.
403
404 The transition (R->S) is implemented in the scheduling loop in
405 `schedule_block' when the best insn to schedule is chosen.
406 The transition (R->Q) is implemented in `queue_insn' when an
407 insn is found to to have a function unit conflict with the already
408 committed insns.
409 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
410 insns move from the ready list to the scheduled list.
411 The transition (Q->R) is implemented in 'queue_to_insn' as time
412 passes or stalls are introduced. */
413
414 /* Implement a circular buffer to delay instructions until sufficient
415 time has passed. INSN_QUEUE_SIZE is a power of two larger than
416 MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. This is the
417 longest time an isnsn may be queued. */
418 static rtx insn_queue[INSN_QUEUE_SIZE];
419 static int q_ptr = 0;
420 static int q_size = 0;
421 #define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
422 #define NEXT_Q_AFTER(X, C) (((X)+C) & (INSN_QUEUE_SIZE-1))
423
424 /* Vector indexed by INSN_UID giving the minimum clock tick at which
425 the insn becomes ready. This is used to note timing constraints for
426 insns in the pending list. */
427 static int *insn_tick;
428 #define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
429
430 /* Data structure for keeping track of register information
431 during that register's life. */
432
433 struct sometimes
434 {
435 int regno;
436 int live_length;
437 int calls_crossed;
438 };
439
440 /* Forward declarations. */
441 static void add_dependence PROTO ((rtx, rtx, enum reg_note));
442 static void remove_dependence PROTO ((rtx, rtx));
443 static rtx find_insn_list PROTO ((rtx, rtx));
444 static int insn_unit PROTO ((rtx));
445 static unsigned int blockage_range PROTO ((int, rtx));
446 static void clear_units PROTO ((void));
447 static int actual_hazard_this_instance PROTO ((int, int, rtx, int, int));
448 static void schedule_unit PROTO ((int, rtx, int));
449 static int actual_hazard PROTO ((int, rtx, int, int));
450 static int potential_hazard PROTO ((int, rtx, int));
451 static int insn_cost PROTO ((rtx, rtx, rtx));
452 static int priority PROTO ((rtx));
453 static void free_pending_lists PROTO ((void));
454 static void add_insn_mem_dependence PROTO ((rtx *, rtx *, rtx, rtx));
455 static void flush_pending_lists PROTO ((rtx, int));
456 static void sched_analyze_1 PROTO ((rtx, rtx));
457 static void sched_analyze_2 PROTO ((rtx, rtx));
458 static void sched_analyze_insn PROTO ((rtx, rtx, rtx));
459 static void sched_analyze PROTO ((rtx, rtx));
460 static void sched_note_set PROTO ((int, rtx, int));
461 static int rank_for_schedule PROTO ((rtx *, rtx *));
462 static void swap_sort PROTO ((rtx *, int));
463 static void queue_insn PROTO ((rtx, int));
464 static int schedule_insn PROTO ((rtx, rtx *, int, int));
465 static void create_reg_dead_note PROTO ((rtx, rtx));
466 static void attach_deaths PROTO ((rtx, rtx, int));
467 static void attach_deaths_insn PROTO ((rtx));
468 static int new_sometimes_live PROTO ((struct sometimes *, int, int));
469 static void finish_sometimes_live PROTO ((struct sometimes *, int));
470 static int schedule_block PROTO ((int, int, int));
471 static rtx regno_use_in PROTO ((int, rtx));
472 static void split_hard_reg_notes PROTO ((rtx, rtx, rtx, rtx));
473 static void new_insn_dead_notes PROTO ((rtx, rtx, rtx, rtx));
474 static void update_n_sets PROTO ((rtx, int));
475 static void update_flow_info PROTO ((rtx, rtx, rtx, rtx));
476
477 /* Main entry point of this file. */
478 void schedule_insns PROTO ((FILE *));
479
480 /* Mapping of insns to their original block prior to scheduling. */
481 static int *insn_orig_block;
482 #define INSN_BLOCK(insn) (insn_orig_block[INSN_UID (insn)])
483
484 /* Some insns (e.g. call) are not allowed to move across blocks. */
485 static char *cant_move;
486 #define CANT_MOVE(insn) (cant_move[INSN_UID (insn)])
487
488 /* Control flow graph edges are kept in circular lists. */
489 typedef struct
490 {
491 int from_block;
492 int to_block;
493 int next_in;
494 int next_out;
495 }
496 edge;
497 static edge *edge_table;
498
499 #define NEXT_IN(edge) (edge_table[edge].next_in)
500 #define NEXT_OUT(edge) (edge_table[edge].next_out)
501 #define FROM_BLOCK(edge) (edge_table[edge].from_block)
502 #define TO_BLOCK(edge) (edge_table[edge].to_block)
503
504 /* Number of edges in the control flow graph. (in fact larger than
505 that by 1, since edge 0 is unused.) */
506 static int nr_edges;
507
508 /* Circular list of incoming/outgoing edges of a block */
509 static int *in_edges;
510 static int *out_edges;
511
512 #define IN_EDGES(block) (in_edges[block])
513 #define OUT_EDGES(block) (out_edges[block])
514
515 /* List of labels which cannot be deleted, needed for control
516 flow graph construction. */
517 extern rtx forced_labels;
518
519
520 static char is_cfg_nonregular PROTO ((void));
521 static int uses_reg_or_mem PROTO ((rtx));
522 void debug_control_flow PROTO ((void));
523 static void build_control_flow PROTO ((void));
524 static void build_jmp_edges PROTO ((rtx, int));
525 static void new_edge PROTO ((int, int));
526
527
528 /* A region is the main entity for interblock scheduling: insns
529 are allowed to move between blocks in the same region, along
530 control flow graph edges, in the 'up' direction. */
531 typedef struct
532 {
533 int rgn_nr_blocks; /* number of blocks in region */
534 int rgn_blocks; /* blocks in the region (actually index in rgn_bb_table) */
535 }
536 region;
537
538 /* Number of regions in the procedure */
539 static int nr_regions;
540
541 /* Table of region descriptions */
542 static region *rgn_table;
543
544 /* Array of lists of regions' blocks */
545 static int *rgn_bb_table;
546
547 /* Topological order of blocks in the region (if b2 is reachable from
548 b1, block_to_bb[b2] > block_to_bb[b1]).
549 Note: A basic block is always referred to by either block or b,
550 while its topological order name (in the region) is refered to by
551 bb.
552 */
553 static int *block_to_bb;
554
555 /* The number of the region containing a block. */
556 static int *containing_rgn;
557
558 #define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
559 #define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
560 #define BLOCK_TO_BB(block) (block_to_bb[block])
561 #define CONTAINING_RGN(block) (containing_rgn[block])
562
563 void debug_regions PROTO ((void));
564 static void find_single_block_region PROTO ((void));
565 static void find_rgns PROTO ((void));
566 static int too_large PROTO ((int, int *, int *));
567
568 extern void debug_live PROTO ((int, int));
569
570 /* Blocks of the current region being scheduled. */
571 static int current_nr_blocks;
572 static int current_blocks;
573
574 /* The mapping from bb to block */
575 #define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)])
576
577
578 /* Bit vectors and bitset operations are needed for computations on
579 the control flow graph. */
580
581 typedef unsigned HOST_WIDE_INT *bitset;
582 typedef struct
583 {
584 int *first_member; /* pointer to the list start in bitlst_table. */
585 int nr_members; /* the number of members of the bit list. */
586 }
587 bitlst;
588
589 static int bitlst_table_last;
590 static int bitlst_table_size;
591 static int *bitlst_table;
592
593 static char bitset_member PROTO ((bitset, int, int));
594 static void extract_bitlst PROTO ((bitset, int, bitlst *));
595
596 /* target info declarations.
597
598 The block currently being scheduled is referred to as the "target" block,
599 while other blocks in the region from which insns can be moved to the
600 target are called "source" blocks. The candidate structure holds info
601 about such sources: are they valid? Speculative? Etc. */
602 typedef bitlst bblst;
603 typedef struct
604 {
605 char is_valid;
606 char is_speculative;
607 int src_prob;
608 bblst split_bbs;
609 bblst update_bbs;
610 }
611 candidate;
612
613 static candidate *candidate_table;
614
615 /* A speculative motion requires checking live information on the path
616 from 'source' to 'target'. The split blocks are those to be checked.
617 After a speculative motion, live information should be modified in
618 the 'update' blocks.
619
620 Lists of split and update blocks for each candidate of the current
621 target are in array bblst_table */
622 static int *bblst_table, bblst_size, bblst_last;
623
624 #define IS_VALID(src) ( candidate_table[src].is_valid )
625 #define IS_SPECULATIVE(src) ( candidate_table[src].is_speculative )
626 #define SRC_PROB(src) ( candidate_table[src].src_prob )
627
628 /* The bb being currently scheduled. */
629 static int target_bb;
630
631 /* List of edges. */
632 typedef bitlst edgelst;
633
634 /* target info functions */
635 static void split_edges PROTO ((int, int, edgelst *));
636 static void compute_trg_info PROTO ((int));
637 void debug_candidate PROTO ((int));
638 void debug_candidates PROTO ((int));
639
640
641 /* Bit-set of bbs, where bit 'i' stands for bb 'i'. */
642 typedef bitset bbset;
643
644 /* Number of words of the bbset. */
645 static int bbset_size;
646
647 /* Dominators array: dom[i] contains the bbset of dominators of
648 bb i in the region. */
649 static bbset *dom;
650
651 /* bb 0 is the only region entry */
652 #define IS_RGN_ENTRY(bb) (!bb)
653
654 /* Is bb_src dominated by bb_trg. */
655 #define IS_DOMINATED(bb_src, bb_trg) \
656 ( bitset_member (dom[bb_src], bb_trg, bbset_size) )
657
658 /* Probability: Prob[i] is a float in [0, 1] which is the probability
659 of bb i relative to the region entry. */
660 static float *prob;
661
662 /* The probability of bb_src, relative to bb_trg. Note, that while the
663 'prob[bb]' is a float in [0, 1], this macro returns an integer
664 in [0, 100]. */
665 #define GET_SRC_PROB(bb_src, bb_trg) ((int) (100.0 * (prob[bb_src] / \
666 prob[bb_trg])))
667
668 /* Bit-set of edges, where bit i stands for edge i. */
669 typedef bitset edgeset;
670
671 /* Number of edges in the region. */
672 static int rgn_nr_edges;
673
674 /* Array of size rgn_nr_edges. */
675 static int *rgn_edges;
676
677 /* Number of words in an edgeset. */
678 static int edgeset_size;
679
680 /* Mapping from each edge in the graph to its number in the rgn. */
681 static int *edge_to_bit;
682 #define EDGE_TO_BIT(edge) (edge_to_bit[edge])
683
684 /* The split edges of a source bb is different for each target
685 bb. In order to compute this efficiently, the 'potential-split edges'
686 are computed for each bb prior to scheduling a region. This is actually
687 the split edges of each bb relative to the region entry.
688
689 pot_split[bb] is the set of potential split edges of bb. */
690 static edgeset *pot_split;
691
692 /* For every bb, a set of its ancestor edges. */
693 static edgeset *ancestor_edges;
694
695 static void compute_dom_prob_ps PROTO ((int));
696
697 #define ABS_VALUE(x) (((x)<0)?(-(x)):(x))
698 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (INSN_BLOCK (INSN))))
699 #define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (INSN_BLOCK (INSN))))
700 #define INSN_BB(INSN) (BLOCK_TO_BB (INSN_BLOCK (INSN)))
701
702 /* parameters affecting the decision of rank_for_schedule() */
703 #define MIN_DIFF_PRIORITY 2
704 #define MIN_PROBABILITY 40
705 #define MIN_PROB_DIFF 10
706
707 /* speculative scheduling functions */
708 static int check_live_1 PROTO ((int, rtx));
709 static void update_live_1 PROTO ((int, rtx));
710 static int check_live PROTO ((rtx, int, int));
711 static void update_live PROTO ((rtx, int, int));
712 static void set_spec_fed PROTO ((rtx));
713 static int is_pfree PROTO ((rtx, int, int));
714 static int find_conditional_protection PROTO ((rtx, int));
715 static int is_conditionally_protected PROTO ((rtx, int, int));
716 static int may_trap_exp PROTO ((rtx, int));
717 static int classify_insn PROTO ((rtx));
718 static int is_exception_free PROTO ((rtx, int, int));
719
720 static char find_insn_mem_list PROTO ((rtx, rtx, rtx, rtx));
721 static void compute_block_forward_dependences PROTO ((int));
722 static void init_rgn_data_dependences PROTO ((int));
723 static void add_branch_dependences PROTO ((rtx, rtx));
724 static void compute_block_backward_dependences PROTO ((int));
725 void debug_dependencies PROTO ((void));
726
727 /* Notes handling mechanism:
728 =========================
729 Generally, NOTES are saved before scheduling and restored after scheduling.
730 The scheduler distinguishes between three types of notes:
731
732 (1) LINE_NUMBER notes, generated and used for debugging. Here,
733 before scheduling a region, a pointer to the LINE_NUMBER note is
734 added to the insn following it (in save_line_notes()), and the note
735 is removed (in rm_line_notes() and unlink_line_notes()). After
736 scheduling the region, this pointer is used for regeneration of
737 the LINE_NUMBER note (in restore_line_notes()).
738
739 (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
740 Before scheduling a region, a pointer to the note is added to the insn
741 that follows or precedes it. (This happens as part of the data dependence
742 computation). After scheduling an insn, the pointer contained in it is
743 used for regenerating the corresponding note (in reemit_notes).
744
745 (3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
746 these notes are put in a list (in rm_other_notes() and
747 unlink_other_notes ()). After scheduling the block, these notes are
748 inserted at the beginning of the block (in schedule_block()). */
749
750 static rtx unlink_other_notes PROTO ((rtx, rtx));
751 static rtx unlink_line_notes PROTO ((rtx, rtx));
752 static void rm_line_notes PROTO ((int));
753 static void save_line_notes PROTO ((int));
754 static void restore_line_notes PROTO ((int));
755 static void rm_redundant_line_notes PROTO ((void));
756 static void rm_other_notes PROTO ((rtx, rtx));
757 static rtx reemit_notes PROTO ((rtx, rtx));
758
759 static void get_block_head_tail PROTO ((int, rtx *, rtx *));
760
761 static void find_pre_sched_live PROTO ((int));
762 static void find_post_sched_live PROTO ((int));
763 static void update_reg_usage PROTO ((void));
764
765 void debug_ready_list PROTO ((rtx[], int));
766 static void init_target_units PROTO (());
767 static void insn_print_units PROTO ((rtx));
768 static int get_visual_tbl_length PROTO (());
769 static void init_block_visualization PROTO (());
770 static void print_block_visualization PROTO ((int, char *));
771 static void visualize_scheduled_insns PROTO ((int, int));
772 static void visualize_no_unit PROTO ((rtx));
773 static void visualize_stall_cycles PROTO ((int, int));
774 static void print_exp PROTO ((char *, rtx, int));
775 static void print_value PROTO ((char *, rtx, int));
776 static void print_pattern PROTO ((char *, rtx, int));
777 static void print_insn PROTO ((char *, rtx, int));
778 void debug_reg_vector PROTO ((regset));
779
780 static rtx move_insn1 PROTO ((rtx, rtx));
781 static rtx move_insn PROTO ((rtx, rtx));
782 static rtx group_leader PROTO ((rtx));
783 static int set_priorities PROTO ((int));
784 static void init_rtx_vector PROTO ((rtx **, rtx *, int, int));
785 static void schedule_region PROTO ((int));
786 static void split_block_insns PROTO ((int));
787
788 #endif /* INSN_SCHEDULING */
789 \f
790 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
791
792 /* Helper functions for instruction scheduling. */
793
794 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
795 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
796 of dependence that this link represents. */
797
798 static void
799 add_dependence (insn, elem, dep_type)
800 rtx insn;
801 rtx elem;
802 enum reg_note dep_type;
803 {
804 rtx link, next;
805
806 /* Don't depend an insn on itself. */
807 if (insn == elem)
808 return;
809
810 /* If elem is part of a sequence that must be scheduled together, then
811 make the dependence point to the last insn of the sequence.
812 When HAVE_cc0, it is possible for NOTEs to exist between users and
813 setters of the condition codes, so we must skip past notes here.
814 Otherwise, NOTEs are impossible here. */
815
816 next = NEXT_INSN (elem);
817
818 #ifdef HAVE_cc0
819 while (next && GET_CODE (next) == NOTE)
820 next = NEXT_INSN (next);
821 #endif
822
823 if (next && SCHED_GROUP_P (next)
824 && GET_CODE (next) != CODE_LABEL)
825 {
826 /* Notes will never intervene here though, so don't bother checking
827 for them. */
828 /* We must reject CODE_LABELs, so that we don't get confused by one
829 that has LABEL_PRESERVE_P set, which is represented by the same
830 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
831 SCHED_GROUP_P. */
832 while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
833 && GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
834 next = NEXT_INSN (next);
835
836 /* Again, don't depend an insn on itself. */
837 if (insn == next)
838 return;
839
840 /* Make the dependence to NEXT, the last insn of the group, instead
841 of the original ELEM. */
842 elem = next;
843 }
844
845 #ifdef INSN_SCHEDULING
846 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
847 No need for interblock dependences with calls, since
848 calls are not moved between blocks. Note: the edge where
849 elem is a CALL is still required. */
850 if (GET_CODE (insn) == CALL_INSN
851 && (INSN_BB (elem) != INSN_BB (insn)))
852 return;
853
854 #endif
855
856 /* Check that we don't already have this dependence. */
857 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
858 if (XEXP (link, 0) == elem)
859 {
860 /* If this is a more restrictive type of dependence than the existing
861 one, then change the existing dependence to this type. */
862 if ((int) dep_type < (int) REG_NOTE_KIND (link))
863 PUT_REG_NOTE_KIND (link, dep_type);
864 return;
865 }
866 /* Might want to check one level of transitivity to save conses. */
867
868 link = rtx_alloc (INSN_LIST);
869 /* Insn dependency, not data dependency. */
870 PUT_REG_NOTE_KIND (link, dep_type);
871 XEXP (link, 0) = elem;
872 XEXP (link, 1) = LOG_LINKS (insn);
873 LOG_LINKS (insn) = link;
874 }
875
876 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
877 of INSN. Abort if not found. */
878
879 static void
880 remove_dependence (insn, elem)
881 rtx insn;
882 rtx elem;
883 {
884 rtx prev, link;
885 int found = 0;
886
887 for (prev = 0, link = LOG_LINKS (insn); link;
888 prev = link, link = XEXP (link, 1))
889 {
890 if (XEXP (link, 0) == elem)
891 {
892 if (prev)
893 XEXP (prev, 1) = XEXP (link, 1);
894 else
895 LOG_LINKS (insn) = XEXP (link, 1);
896 found = 1;
897 }
898 }
899
900 if (!found)
901 abort ();
902 return;
903 }
904 \f
905 #ifndef INSN_SCHEDULING
906 void
907 schedule_insns (dump_file)
908 FILE *dump_file;
909 {
910 }
911 #else
912 #ifndef __GNUC__
913 #define __inline
914 #endif
915
916 /* Computation of memory dependencies. */
917
918 /* The *_insns and *_mems are paired lists. Each pending memory operation
919 will have a pointer to the MEM rtx on one list and a pointer to the
920 containing insn on the other list in the same place in the list. */
921
922 /* We can't use add_dependence like the old code did, because a single insn
923 may have multiple memory accesses, and hence needs to be on the list
924 once for each memory access. Add_dependence won't let you add an insn
925 to a list more than once. */
926
927 /* An INSN_LIST containing all insns with pending read operations. */
928 static rtx pending_read_insns;
929
930 /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
931 static rtx pending_read_mems;
932
933 /* An INSN_LIST containing all insns with pending write operations. */
934 static rtx pending_write_insns;
935
936 /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
937 static rtx pending_write_mems;
938
939 /* Indicates the combined length of the two pending lists. We must prevent
940 these lists from ever growing too large since the number of dependencies
941 produced is at least O(N*N), and execution time is at least O(4*N*N), as
942 a function of the length of these pending lists. */
943
944 static int pending_lists_length;
945
946 /* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
947
948 static rtx unused_insn_list;
949
950 /* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
951
952 static rtx unused_expr_list;
953
954 /* The last insn upon which all memory references must depend.
955 This is an insn which flushed the pending lists, creating a dependency
956 between it and all previously pending memory references. This creates
957 a barrier (or a checkpoint) which no memory reference is allowed to cross.
958
959 This includes all non constant CALL_INSNs. When we do interprocedural
960 alias analysis, this restriction can be relaxed.
961 This may also be an INSN that writes memory if the pending lists grow
962 too large. */
963
964 static rtx last_pending_memory_flush;
965
966 /* The last function call we have seen. All hard regs, and, of course,
967 the last function call, must depend on this. */
968
969 static rtx last_function_call;
970
971 /* The LOG_LINKS field of this is a list of insns which use a pseudo register
972 that does not already cross a call. We create dependencies between each
973 of those insn and the next call insn, to ensure that they won't cross a call
974 after scheduling is done. */
975
976 static rtx sched_before_next_call;
977
978 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
979 so that insns independent of the last scheduled insn will be preferred
980 over dependent instructions. */
981
982 static rtx last_scheduled_insn;
983
984 /* Data structures for the computation of data dependences in a regions. We
985 keep one copy of each of the declared above variables for each bb in the
986 region. Before analyzing the data dependences for a bb, its variables
987 are initialized as a function of the variables of its predecessors. When
988 the analysis for a bb completes, we save the contents of each variable X
989 to a corresponding bb_X[bb] variable. For example, pending_read_insns is
990 copied to bb_pending_read_insns[bb]. Another change is that few
991 variables are now a list of insns rather than a single insn:
992 last_pending_memory_flash, last_function_call, reg_last_sets. The
993 manipulation of these variables was changed appropriately. */
994
995 static rtx **bb_reg_last_uses;
996 static rtx **bb_reg_last_sets;
997
998 static rtx *bb_pending_read_insns;
999 static rtx *bb_pending_read_mems;
1000 static rtx *bb_pending_write_insns;
1001 static rtx *bb_pending_write_mems;
1002 static int *bb_pending_lists_length;
1003
1004 static rtx *bb_last_pending_memory_flush;
1005 static rtx *bb_last_function_call;
1006 static rtx *bb_sched_before_next_call;
1007
1008 /* functions for construction of the control flow graph. */
1009
1010 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
1011 Estimate in nr_edges the number of edges on the graph.
1012 We decide not to build the control flow graph if there is possibly more
1013 than one entry to the function, or if computed branches exist. */
1014
1015 static char
1016 is_cfg_nonregular ()
1017 {
1018 int b;
1019 rtx insn;
1020 RTX_CODE code;
1021
1022 rtx nonlocal_label_list = nonlocal_label_rtx_list ();
1023
1024 /* check for non local labels */
1025 if (nonlocal_label_list)
1026 {
1027 return 1;
1028 }
1029
1030 /* check for labels which cannot be deleted */
1031 if (forced_labels)
1032 {
1033 return 1;
1034 }
1035
1036 /* check for labels which probably cannot be deleted */
1037 if (exception_handler_labels)
1038 {
1039 return 1;
1040 }
1041
1042 /* check for labels referred to other thn by jumps */
1043 for (b = 0; b < n_basic_blocks; b++)
1044 for (insn = basic_block_head[b];; insn = NEXT_INSN (insn))
1045 {
1046 code = GET_CODE (insn);
1047 if (GET_RTX_CLASS (code) == 'i')
1048 {
1049 rtx note;
1050
1051 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1052 if (REG_NOTE_KIND (note) == REG_LABEL)
1053 {
1054 return 1;
1055 }
1056 }
1057
1058 if (insn == basic_block_end[b])
1059 break;
1060 }
1061
1062 nr_edges = 0;
1063
1064 /* check for computed branches */
1065 for (b = 0; b < n_basic_blocks; b++)
1066 {
1067 for (insn = basic_block_head[b];; insn = NEXT_INSN (insn))
1068 {
1069
1070 if (GET_CODE (insn) == JUMP_INSN)
1071 {
1072 rtx pat = PATTERN (insn);
1073 int i;
1074
1075 if (GET_CODE (pat) == PARALLEL)
1076 {
1077 int len = XVECLEN (pat, 0);
1078 int has_use_labelref = 0;
1079
1080 for (i = len - 1; i >= 0; i--)
1081 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
1082 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
1083 == LABEL_REF))
1084 {
1085 nr_edges++;
1086 has_use_labelref = 1;
1087 }
1088
1089 if (!has_use_labelref)
1090 for (i = len - 1; i >= 0; i--)
1091 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
1092 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
1093 && uses_reg_or_mem (SET_SRC (XVECEXP (pat, 0, i))))
1094 {
1095 return 1;
1096 }
1097 }
1098 /* check for branch table */
1099 else if (GET_CODE (pat) == ADDR_VEC
1100 || GET_CODE (pat) == ADDR_DIFF_VEC)
1101 {
1102 int diff_vec_p = GET_CODE (pat) == ADDR_DIFF_VEC;
1103 int len = XVECLEN (pat, diff_vec_p);
1104
1105 nr_edges += len;
1106 }
1107 else
1108 {
1109 /* check for computed branch */
1110 if (GET_CODE (pat) == SET
1111 && SET_DEST (pat) == pc_rtx
1112 && uses_reg_or_mem (SET_SRC (pat)))
1113 {
1114 return 1;
1115 }
1116 }
1117 }
1118
1119 if (insn == basic_block_end[b])
1120 break;
1121 }
1122 }
1123
1124 /* count for the fallthrough edges */
1125 for (b = 0; b < n_basic_blocks; b++)
1126 {
1127 for (insn = PREV_INSN (basic_block_head[b]);
1128 insn && GET_CODE (insn) == NOTE; insn = PREV_INSN (insn))
1129 ;
1130
1131 if (!insn && b != 0)
1132 nr_edges++;
1133 else if (insn && GET_CODE (insn) != BARRIER)
1134 nr_edges++;
1135 }
1136
1137 nr_edges++;
1138
1139 return 0;
1140 }
1141
1142
1143 /* Returns 1 if x uses a reg or a mem (function was taken from flow.c).
1144 x is a target of a jump. Used for the detection of computed
1145 branches. For each label seen, updates the edges estimation
1146 counter nr_edges. */
1147
1148 static int
1149 uses_reg_or_mem (x)
1150 rtx x;
1151 {
1152 enum rtx_code code = GET_CODE (x);
1153 int i, j;
1154 char *fmt;
1155
1156 if (code == REG)
1157 return 1;
1158
1159 if (code == MEM
1160 && !(GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1161 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0))))
1162 return 1;
1163
1164 if (code == IF_THEN_ELSE)
1165 {
1166 if (uses_reg_or_mem (XEXP (x, 1))
1167 || uses_reg_or_mem (XEXP (x, 2)))
1168 return 1;
1169 else
1170 return 0;
1171 }
1172
1173 if (code == LABEL_REF)
1174 {
1175 nr_edges++;
1176
1177 return 0;
1178 }
1179
1180 fmt = GET_RTX_FORMAT (code);
1181 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1182 {
1183 if (fmt[i] == 'e'
1184 && uses_reg_or_mem (XEXP (x, i)))
1185 return 1;
1186
1187 if (fmt[i] == 'E')
1188 for (j = 0; j < XVECLEN (x, i); j++)
1189 if (uses_reg_or_mem (XVECEXP (x, i, j)))
1190 return 1;
1191 }
1192
1193 return 0;
1194 }
1195
1196
1197 /* Print the control flow graph, for debugging purposes.
1198 Callable from the debugger. */
1199
1200 void
1201 debug_control_flow ()
1202 {
1203 int i, e, next;
1204
1205 fprintf (dump, ";; --------- CONTROL FLOW GRAPH --------- \n\n");
1206
1207 for (i = 0; i < n_basic_blocks; i++)
1208 {
1209 fprintf (dump, ";;\tBasic block %d: first insn %d, last %d.\n",
1210 i,
1211 INSN_UID (basic_block_head[i]),
1212 INSN_UID (basic_block_end[i]));
1213
1214 fprintf (dump, ";;\tPredecessor blocks:");
1215 for (e = IN_EDGES (i); e; e = next)
1216 {
1217 fprintf (dump, " %d", FROM_BLOCK (e));
1218
1219 next = NEXT_IN (e);
1220
1221 if (next == IN_EDGES (i))
1222 break;
1223 }
1224
1225 fprintf (dump, "\n;;\tSuccesor blocks:");
1226 for (e = OUT_EDGES (i); e; e = next)
1227 {
1228 fprintf (dump, " %d", TO_BLOCK (e));
1229
1230 next = NEXT_OUT (e);
1231
1232 if (next == OUT_EDGES (i))
1233 break;
1234 }
1235
1236 fprintf (dump, " \n\n");
1237
1238 }
1239 }
1240
1241
1242 /* build the control flow graph. (also set nr_edges accurately) */
1243
1244 static void
1245 build_control_flow ()
1246 {
1247 int i;
1248
1249 nr_edges = 0;
1250 for (i = 0; i < n_basic_blocks; i++)
1251 {
1252 rtx insn;
1253
1254 insn = basic_block_end[i];
1255 if (GET_CODE (insn) == JUMP_INSN)
1256 {
1257 build_jmp_edges (PATTERN (insn), i);
1258 }
1259
1260 for (insn = PREV_INSN (basic_block_head[i]);
1261 insn && GET_CODE (insn) == NOTE; insn = PREV_INSN (insn))
1262 ;
1263
1264 /* build fallthrough edges */
1265 if (!insn && i != 0)
1266 new_edge (i - 1, i);
1267 else if (insn && GET_CODE (insn) != BARRIER)
1268 new_edge (i - 1, i);
1269 }
1270
1271 /* increment by 1, since edge 0 is unused. */
1272 nr_edges++;
1273
1274 }
1275
1276
1277 /* construct edges in the control flow graph, from 'source' block, to
1278 blocks refered to by 'pattern'. */
1279
1280 static
1281 void
1282 build_jmp_edges (pattern, source)
1283 rtx pattern;
1284 int source;
1285 {
1286 register RTX_CODE code;
1287 register int i;
1288 register char *fmt;
1289
1290 code = GET_CODE (pattern);
1291
1292 if (code == LABEL_REF)
1293 {
1294 register rtx label = XEXP (pattern, 0);
1295 register int target;
1296
1297 /* This can happen as a result of a syntax error
1298 and a diagnostic has already been printed. */
1299 if (INSN_UID (label) == 0)
1300 return;
1301
1302 target = INSN_BLOCK (label);
1303 new_edge (source, target);
1304
1305 return;
1306 }
1307
1308 /* proper handling of ADDR_DIFF_VEC: do not add a non-existing edge
1309 from the block containing the branch-on-table, to itself. */
1310 if (code == ADDR_VEC
1311 || code == ADDR_DIFF_VEC)
1312 {
1313 int diff_vec_p = GET_CODE (pattern) == ADDR_DIFF_VEC;
1314 int len = XVECLEN (pattern, diff_vec_p);
1315 int k;
1316
1317 for (k = 0; k < len; k++)
1318 {
1319 rtx tem = XVECEXP (pattern, diff_vec_p, k);
1320
1321 build_jmp_edges (tem, source);
1322 }
1323 }
1324
1325 fmt = GET_RTX_FORMAT (code);
1326 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1327 {
1328 if (fmt[i] == 'e')
1329 build_jmp_edges (XEXP (pattern, i), source);
1330 if (fmt[i] == 'E')
1331 {
1332 register int j;
1333 for (j = 0; j < XVECLEN (pattern, i); j++)
1334 build_jmp_edges (XVECEXP (pattern, i, j), source);
1335 }
1336 }
1337 }
1338
1339
1340 /* construct an edge in the control flow graph, from 'source' to 'target'. */
1341
1342 static void
1343 new_edge (source, target)
1344 int source, target;
1345 {
1346 int e, next_edge;
1347 int curr_edge, fst_edge;
1348
1349 /* check for duplicates */
1350 fst_edge = curr_edge = OUT_EDGES (source);
1351 while (curr_edge)
1352 {
1353 if (FROM_BLOCK (curr_edge) == source
1354 && TO_BLOCK (curr_edge) == target)
1355 {
1356 return;
1357 }
1358
1359 curr_edge = NEXT_OUT (curr_edge);
1360
1361 if (fst_edge == curr_edge)
1362 break;
1363 }
1364
1365 e = ++nr_edges;
1366
1367 FROM_BLOCK (e) = source;
1368 TO_BLOCK (e) = target;
1369
1370 if (OUT_EDGES (source))
1371 {
1372 next_edge = NEXT_OUT (OUT_EDGES (source));
1373 NEXT_OUT (OUT_EDGES (source)) = e;
1374 NEXT_OUT (e) = next_edge;
1375 }
1376 else
1377 {
1378 OUT_EDGES (source) = e;
1379 NEXT_OUT (e) = e;
1380 }
1381
1382 if (IN_EDGES (target))
1383 {
1384 next_edge = NEXT_IN (IN_EDGES (target));
1385 NEXT_IN (IN_EDGES (target)) = e;
1386 NEXT_IN (e) = next_edge;
1387 }
1388 else
1389 {
1390 IN_EDGES (target) = e;
1391 NEXT_IN (e) = e;
1392 }
1393 }
1394
1395
1396 /* BITSET macros for operations on the control flow graph. */
1397
1398 /* Compute bitwise union of two bitsets. */
1399 #define BITSET_UNION(set1, set2, len) \
1400 do { register bitset tp = set1, sp = set2; \
1401 register int i; \
1402 for (i = 0; i < len; i++) \
1403 *(tp++) |= *(sp++); } while (0)
1404
1405 /* Compute bitwise intersection of two bitsets. */
1406 #define BITSET_INTER(set1, set2, len) \
1407 do { register bitset tp = set1, sp = set2; \
1408 register int i; \
1409 for (i = 0; i < len; i++) \
1410 *(tp++) &= *(sp++); } while (0)
1411
1412 /* Compute bitwise difference of two bitsets. */
1413 #define BITSET_DIFFER(set1, set2, len) \
1414 do { register bitset tp = set1, sp = set2; \
1415 register int i; \
1416 for (i = 0; i < len; i++) \
1417 *(tp++) &= ~*(sp++); } while (0)
1418
1419 /* Inverts every bit of bitset 'set' */
1420 #define BITSET_INVERT(set, len) \
1421 do { register bitset tmpset = set; \
1422 register int i; \
1423 for (i = 0; i < len; i++, tmpset++) \
1424 *tmpset = ~*tmpset; } while (0)
1425
1426 /* Turn on the index'th bit in bitset set. */
1427 #define BITSET_ADD(set, index, len) \
1428 { \
1429 if (index >= HOST_BITS_PER_WIDE_INT * len) \
1430 abort (); \
1431 else \
1432 set[index/HOST_BITS_PER_WIDE_INT] |= \
1433 1 << (index % HOST_BITS_PER_WIDE_INT); \
1434 }
1435
1436 /* Turn off the index'th bit in set. */
1437 #define BITSET_REMOVE(set, index, len) \
1438 { \
1439 if (index >= HOST_BITS_PER_WIDE_INT * len) \
1440 abort (); \
1441 else \
1442 set[index/HOST_BITS_PER_WIDE_INT] &= \
1443 ~(1 << (index%HOST_BITS_PER_WIDE_INT)); \
1444 }
1445
1446
1447 /* Check if the index'th bit in bitset set is on. */
1448
1449 static char
1450 bitset_member (set, index, len)
1451 bitset set;
1452 int index, len;
1453 {
1454 if (index >= HOST_BITS_PER_WIDE_INT * len)
1455 abort ();
1456 return (set[index / HOST_BITS_PER_WIDE_INT] &
1457 1 << (index % HOST_BITS_PER_WIDE_INT)) ? 1 : 0;
1458 }
1459
1460
1461 /* Translate a bit-set SET to a list BL of the bit-set members. */
1462
1463 static void
1464 extract_bitlst (set, len, bl)
1465 bitset set;
1466 int len;
1467 bitlst *bl;
1468 {
1469 int i, j, offset;
1470 unsigned HOST_WIDE_INT word;
1471
1472 /* bblst table space is reused in each call to extract_bitlst */
1473 bitlst_table_last = 0;
1474
1475 bl->first_member = &bitlst_table[bitlst_table_last];
1476 bl->nr_members = 0;
1477
1478 for (i = 0; i < len; i++)
1479 {
1480 word = set[i];
1481 offset = i * HOST_BITS_PER_WIDE_INT;
1482 for (j = 0; word; j++)
1483 {
1484 if (word & 1)
1485 {
1486 bitlst_table[bitlst_table_last++] = offset;
1487 (bl->nr_members)++;
1488 }
1489 word >>= 1;
1490 ++offset;
1491 }
1492 }
1493
1494 }
1495
1496
1497 /* functions for the construction of regions */
1498
1499 /* Print the regions, for debugging purposes. Callable from debugger. */
1500
1501 void
1502 debug_regions ()
1503 {
1504 int rgn, bb;
1505
1506 fprintf (dump, "\n;; ------------ REGIONS ----------\n\n");
1507 for (rgn = 0; rgn < nr_regions; rgn++)
1508 {
1509 fprintf (dump, ";;\trgn %d nr_blocks %d:\n", rgn,
1510 rgn_table[rgn].rgn_nr_blocks);
1511 fprintf (dump, ";;\tbb/block: ");
1512
1513 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
1514 {
1515 current_blocks = RGN_BLOCKS (rgn);
1516
1517 if (bb != BLOCK_TO_BB (BB_TO_BLOCK (bb)))
1518 abort ();
1519
1520 fprintf (dump, " %d/%d ", bb, BB_TO_BLOCK (bb));
1521 }
1522
1523 fprintf (dump, "\n\n");
1524 }
1525 }
1526
1527
1528 /* Build a single block region for each basic block in the function.
1529 This allows for using the same code for interblock and basic block
1530 scheduling. */
1531
1532 static void
1533 find_single_block_region ()
1534 {
1535 int i;
1536
1537 for (i = 0; i < n_basic_blocks; i++)
1538 {
1539 rgn_bb_table[i] = i;
1540 RGN_NR_BLOCKS (i) = 1;
1541 RGN_BLOCKS (i) = i;
1542 CONTAINING_RGN (i) = i;
1543 BLOCK_TO_BB (i) = 0;
1544 }
1545 nr_regions = n_basic_blocks;
1546 }
1547
1548
1549 /* Update number of blocks and the estimate for number of insns
1550 in the region. Return 1 if the region is "too large" for interblock
1551 scheduling (compile time considerations), otherwise return 0. */
1552
1553 static int
1554 too_large (block, num_bbs, num_insns)
1555 int block, *num_bbs, *num_insns;
1556 {
1557 (*num_bbs)++;
1558 (*num_insns) += (INSN_LUID (basic_block_end[block]) -
1559 INSN_LUID (basic_block_head[block]));
1560 if ((*num_bbs > max_rgn_blocks) || (*num_insns > max_rgn_insns))
1561 return 1;
1562 else
1563 return 0;
1564 }
1565
1566
1567 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
1568 is still an inner loop. Put in max_hdr[blk] the header of the most inner
1569 loop containing blk. */
1570 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
1571 { \
1572 if (max_hdr[blk] == -1) \
1573 max_hdr[blk] = hdr; \
1574 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
1575 inner[hdr] = 0; \
1576 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
1577 { \
1578 inner[max_hdr[blk]] = 0; \
1579 max_hdr[blk] = hdr; \
1580 } \
1581 }
1582
1583
1584 /* Find regions for interblock scheduling: a loop-free procedure, a reducible
1585 inner loop, or a basic block not contained in any other region.
1586 The procedures control flow graph is traversed twice.
1587 First traversal, a DFS, finds the headers of inner loops in the graph,
1588 and verifies that there are no unreacable blocks.
1589 Second traversal processes headers of inner loops, checking that the
1590 loop is reducible. The loop blocks that form a region are put into the
1591 region's blocks list in topological order.
1592
1593 The following variables are changed by the function: rgn_nr, rgn_table,
1594 rgn_bb_table, block_to_bb and containing_rgn. */
1595
1596 static void
1597 find_rgns ()
1598 {
1599 int *max_hdr, *dfs_nr, *stack, *queue, *degree;
1600 char *header, *inner, *passed, *in_stack, *in_queue, no_loops = 1;
1601 int node, child, loop_head, i, j, fst_edge, head, tail;
1602 int count = 0, sp, idx = 0, current_edge = out_edges[0];
1603 int num_bbs, num_insns;
1604 int too_large_failure;
1605 char *reachable;
1606
1607 /*
1608 The following data structures are computed by the first traversal and
1609 are used by the second traversal:
1610 header[i] - flag set if the block i is the header of a loop.
1611 inner[i] - initially set. It is reset if the the block i is the header
1612 of a non-inner loop.
1613 max_hdr[i] - the header of the inner loop containing block i.
1614 (for a block i not in an inner loop it may be -1 or the
1615 header of the most inner loop containing the block).
1616
1617 These data structures are used by the first traversal only:
1618 stack - non-recursive DFS implementation which uses a stack of edges.
1619 sp - top of the stack of edges
1620 dfs_nr[i] - the DFS ordering of block i.
1621 in_stack[i] - flag set if the block i is in the DFS stack.
1622
1623 These data structures are used by the second traversal only:
1624 queue - queue containing the blocks of the current region.
1625 head and tail - queue boundaries.
1626 in_queue[i] - flag set if the block i is in queue */
1627
1628 /* function's inner arrays allocation and initialization */
1629 max_hdr = (int *) alloca (n_basic_blocks * sizeof (int));
1630 dfs_nr = (int *) alloca (n_basic_blocks * sizeof (int));
1631 bzero ((int *) dfs_nr, n_basic_blocks * sizeof (int));
1632 stack = (int *) alloca (nr_edges * sizeof (int));
1633 queue = (int *) alloca (n_basic_blocks * sizeof (int));
1634
1635 inner = (char *) alloca (n_basic_blocks * sizeof (char));
1636 header = (char *) alloca (n_basic_blocks * sizeof (char));
1637 bzero ((char *) header, n_basic_blocks * sizeof (char));
1638 passed = (char *) alloca (nr_edges * sizeof (char));
1639 bzero ((char *) passed, nr_edges * sizeof (char));
1640 in_stack = (char *) alloca (nr_edges * sizeof (char));
1641 bzero ((char *) in_stack, nr_edges * sizeof (char));
1642 reachable = (char *) alloca (n_basic_blocks * sizeof (char));
1643 bzero ((char *) reachable, n_basic_blocks * sizeof (char));
1644
1645 in_queue = (char *) alloca (n_basic_blocks * sizeof (char));
1646
1647 for (i = 0; i < n_basic_blocks; i++)
1648 {
1649 inner[i] = 1;
1650 max_hdr[i] = -1;
1651 }
1652
1653 /* First traversal: DFS, finds inner loops in control flow graph */
1654
1655 reachable[0] = 1;
1656 sp = -1;
1657 while (1)
1658 {
1659 if (current_edge == 0 || passed[current_edge])
1660 {
1661 /* Here, if current_edge < 0, this is a leaf block.
1662 Otherwise current_edge was already passed. Note that in
1663 the latter case, not only current_edge but also all its
1664 NEXT_OUT edges are also passed. We have to "climb up on
1665 edges in the stack", looking for the first (already
1666 passed) edge whose NEXT_OUT was not passed yet. */
1667
1668 while (sp >= 0 && (current_edge == 0 || passed[current_edge]))
1669 {
1670 current_edge = stack[sp--];
1671 node = FROM_BLOCK (current_edge);
1672 child = TO_BLOCK (current_edge);
1673 in_stack[child] = 0;
1674 if (max_hdr[child] >= 0 && in_stack[max_hdr[child]])
1675 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
1676 current_edge = NEXT_OUT (current_edge);
1677 }
1678
1679 /* stack empty - the whole graph is traversed. */
1680 if (sp < 0 && passed[current_edge])
1681 break;
1682 continue;
1683 }
1684
1685 node = FROM_BLOCK (current_edge);
1686 dfs_nr[node] = ++count;
1687 in_stack[node] = 1;
1688 child = TO_BLOCK (current_edge);
1689 reachable[child] = 1;
1690
1691 /* found a loop header */
1692 if (in_stack[child])
1693 {
1694 no_loops = 0;
1695 header[child] = 1;
1696 max_hdr[child] = child;
1697 UPDATE_LOOP_RELATIONS (node, child);
1698 passed[current_edge] = 1;
1699 current_edge = NEXT_OUT (current_edge);
1700 continue;
1701 }
1702
1703 /* the child was already visited once, no need to go down from
1704 it, everything is traversed there. */
1705 if (dfs_nr[child])
1706 {
1707 if (max_hdr[child] >= 0 && in_stack[max_hdr[child]])
1708 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
1709 passed[current_edge] = 1;
1710 current_edge = NEXT_OUT (current_edge);
1711 continue;
1712 }
1713
1714 /* this is a step down in the dfs traversal */
1715 stack[++sp] = current_edge;
1716 passed[current_edge] = 1;
1717 current_edge = OUT_EDGES (child);
1718 } /* while (1); */
1719
1720 /* if there are unreachable blocks, or more than one entry to
1721 the subroutine, give up on interblock scheduling */
1722 for (i = 1; i < n_basic_blocks; i++)
1723 {
1724 if (reachable[i] == 0)
1725 {
1726 find_single_block_region ();
1727 if (sched_verbose >= 3)
1728 fprintf (stderr, "sched: warning: found an unreachable block %d \n", i);
1729 return;
1730 }
1731 }
1732
1733 /* Second travsersal: find reducible inner loops, and sort
1734 topologically the blocks of each region */
1735 degree = dfs_nr; /* reuse dfs_nr array - it is not needed anymore */
1736 bzero ((char *) in_queue, n_basic_blocks * sizeof (char));
1737
1738 if (no_loops)
1739 header[0] = 1;
1740
1741 /* compute the in-degree of every block in the graph */
1742 for (i = 0; i < n_basic_blocks; i++)
1743 {
1744 fst_edge = IN_EDGES (i);
1745 if (fst_edge > 0)
1746 {
1747 degree[i] = 1;
1748 current_edge = NEXT_IN (fst_edge);
1749 while (fst_edge != current_edge)
1750 {
1751 ++degree[i];
1752 current_edge = NEXT_IN (current_edge);
1753 }
1754 }
1755 else
1756 degree[i] = 0;
1757 }
1758
1759 /* pass through all graph blocks, looking for headers of inner loops */
1760 for (i = 0; i < n_basic_blocks; i++)
1761 {
1762
1763 if (header[i] && inner[i])
1764 {
1765
1766 /* i is a header of a potentially reducible inner loop, or
1767 block 0 in a subroutine with no loops at all */
1768 head = tail = -1;
1769 too_large_failure = 0;
1770 loop_head = max_hdr[i];
1771
1772 /* decrease in_degree of all i's successors, (this is needed
1773 for the topological ordering) */
1774 fst_edge = current_edge = OUT_EDGES (i);
1775 if (fst_edge > 0)
1776 {
1777 do
1778 {
1779 --degree[TO_BLOCK (current_edge)];
1780 current_edge = NEXT_OUT (current_edge);
1781 }
1782 while (fst_edge != current_edge);
1783 }
1784
1785 /* estimate # insns, and count # blocks in the region. */
1786 num_bbs = 1;
1787 num_insns = INSN_LUID (basic_block_end[i]) - INSN_LUID (basic_block_head[i]);
1788
1789
1790 /* find all loop latches, if it is a true loop header, or
1791 all leaves if the graph has no loops at all */
1792 if (no_loops)
1793 {
1794 for (j = 0; j < n_basic_blocks; j++)
1795 if (out_edges[j] == 0) /* a leaf */
1796 {
1797 queue[++tail] = j;
1798 in_queue[j] = 1;
1799
1800 if (too_large (j, &num_bbs, &num_insns))
1801 {
1802 too_large_failure = 1;
1803 break;
1804 }
1805 }
1806 }
1807 else
1808 {
1809 fst_edge = current_edge = IN_EDGES (i);
1810 do
1811 {
1812 node = FROM_BLOCK (current_edge);
1813 if (max_hdr[node] == loop_head && node != i) /* a latch */
1814 {
1815 queue[++tail] = node;
1816 in_queue[node] = 1;
1817
1818 if (too_large (node, &num_bbs, &num_insns))
1819 {
1820 too_large_failure = 1;
1821 break;
1822 }
1823 }
1824 current_edge = NEXT_IN (current_edge);
1825 }
1826 while (fst_edge != current_edge);
1827 }
1828
1829 /* Put in queue[] all blocks that belong to the loop. Check
1830 that the loop is reducible, traversing back from the loop
1831 latches up to the loop header. */
1832 while (head < tail && !too_large_failure)
1833 {
1834 child = queue[++head];
1835 fst_edge = current_edge = IN_EDGES (child);
1836 do
1837 {
1838 node = FROM_BLOCK (current_edge);
1839
1840 if (max_hdr[node] != loop_head)
1841 { /* another entry to loop, it is irreducible */
1842 tail = -1;
1843 break;
1844 }
1845 else if (!in_queue[node] && node != i)
1846 {
1847 queue[++tail] = node;
1848 in_queue[node] = 1;
1849
1850 if (too_large (node, &num_bbs, &num_insns))
1851 {
1852 too_large_failure = 1;
1853 break;
1854 }
1855 }
1856 current_edge = NEXT_IN (current_edge);
1857 }
1858 while (fst_edge != current_edge);
1859 }
1860
1861 if (tail >= 0 && !too_large_failure)
1862 {
1863 /* Place the loop header into list of region blocks */
1864 degree[i] = -1;
1865 rgn_bb_table[idx] = i;
1866 RGN_NR_BLOCKS (nr_regions) = num_bbs;
1867 RGN_BLOCKS (nr_regions) = idx++;
1868 CONTAINING_RGN (i) = nr_regions;
1869 BLOCK_TO_BB (i) = count = 0;
1870
1871 /* remove blocks from queue[], (in topological order), when
1872 their in_degree becomes 0. We scan the queue over and
1873 over again until it is empty. Note: there may be a more
1874 efficient way to do it. */
1875 while (tail >= 0)
1876 {
1877 if (head < 0)
1878 head = tail;
1879 child = queue[head];
1880 if (degree[child] == 0)
1881 {
1882 degree[child] = -1;
1883 rgn_bb_table[idx++] = child;
1884 BLOCK_TO_BB (child) = ++count;
1885 CONTAINING_RGN (child) = nr_regions;
1886 queue[head] = queue[tail--];
1887 fst_edge = current_edge = OUT_EDGES (child);
1888
1889 if (fst_edge > 0)
1890 {
1891 do
1892 {
1893 --degree[TO_BLOCK (current_edge)];
1894 current_edge = NEXT_OUT (current_edge);
1895 }
1896 while (fst_edge != current_edge);
1897 }
1898 }
1899 else
1900 --head;
1901 }
1902 ++nr_regions;
1903 }
1904 }
1905 }
1906
1907 /* define each of all other blocks as a region itself */
1908 for (i = 0; i < n_basic_blocks; i++)
1909 if (degree[i] >= 0)
1910 {
1911 rgn_bb_table[idx] = i;
1912 RGN_NR_BLOCKS (nr_regions) = 1;
1913 RGN_BLOCKS (nr_regions) = idx++;
1914 CONTAINING_RGN (i) = nr_regions++;
1915 BLOCK_TO_BB (i) = 0;
1916 }
1917
1918 } /* find_rgns */
1919
1920
1921 /* functions for regions scheduling information */
1922
1923 /* Compute dominators, probability, and potential-split-edges of bb.
1924 Assume that these values were already computed for bb's predecessors. */
1925
1926 static void
1927 compute_dom_prob_ps (bb)
1928 int bb;
1929 {
1930 int nxt_in_edge, fst_in_edge, pred;
1931 int fst_out_edge, nxt_out_edge, nr_out_edges, nr_rgn_out_edges;
1932
1933 prob[bb] = 0.0;
1934 if (IS_RGN_ENTRY (bb))
1935 {
1936 BITSET_ADD (dom[bb], 0, bbset_size);
1937 prob[bb] = 1.0;
1938 return;
1939 }
1940
1941 fst_in_edge = nxt_in_edge = IN_EDGES (BB_TO_BLOCK (bb));
1942
1943 /* intialize dom[bb] to '111..1' */
1944 BITSET_INVERT (dom[bb], bbset_size);
1945
1946 do
1947 {
1948 pred = FROM_BLOCK (nxt_in_edge);
1949 BITSET_INTER (dom[bb], dom[BLOCK_TO_BB (pred)], bbset_size);
1950
1951 BITSET_UNION (ancestor_edges[bb], ancestor_edges[BLOCK_TO_BB (pred)],
1952 edgeset_size);
1953
1954 BITSET_ADD (ancestor_edges[bb], EDGE_TO_BIT (nxt_in_edge), edgeset_size);
1955
1956 nr_out_edges = 1;
1957 nr_rgn_out_edges = 0;
1958 fst_out_edge = OUT_EDGES (pred);
1959 nxt_out_edge = NEXT_OUT (fst_out_edge);
1960 BITSET_UNION (pot_split[bb], pot_split[BLOCK_TO_BB (pred)],
1961 edgeset_size);
1962
1963 BITSET_ADD (pot_split[bb], EDGE_TO_BIT (fst_out_edge), edgeset_size);
1964
1965 /* the successor doesn't belong the region? */
1966 if (CONTAINING_RGN (TO_BLOCK (fst_out_edge)) !=
1967 CONTAINING_RGN (BB_TO_BLOCK (bb)))
1968 ++nr_rgn_out_edges;
1969
1970 while (fst_out_edge != nxt_out_edge)
1971 {
1972 ++nr_out_edges;
1973 /* the successor doesn't belong the region? */
1974 if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge)) !=
1975 CONTAINING_RGN (BB_TO_BLOCK (bb)))
1976 ++nr_rgn_out_edges;
1977 BITSET_ADD (pot_split[bb], EDGE_TO_BIT (nxt_out_edge), edgeset_size);
1978 nxt_out_edge = NEXT_OUT (nxt_out_edge);
1979
1980 }
1981
1982 /* now nr_rgn_out_edges is the number of region-exit edges from pred,
1983 and nr_out_edges will be the number of pred out edges not leaving
1984 the region. */
1985 nr_out_edges -= nr_rgn_out_edges;
1986 if (nr_rgn_out_edges > 0)
1987 prob[bb] += 0.9 * prob[BLOCK_TO_BB (pred)] / nr_out_edges;
1988 else
1989 prob[bb] += prob[BLOCK_TO_BB (pred)] / nr_out_edges;
1990 nxt_in_edge = NEXT_IN (nxt_in_edge);
1991 }
1992 while (fst_in_edge != nxt_in_edge);
1993
1994 BITSET_ADD (dom[bb], bb, bbset_size);
1995 BITSET_DIFFER (pot_split[bb], ancestor_edges[bb], edgeset_size);
1996
1997 if (sched_verbose >= 2)
1998 fprintf (dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb), (int) (100.0 * prob[bb]));
1999 } /* compute_dom_prob_ps */
2000
2001 /* functions for target info */
2002
2003 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
2004 Note that bb_trg dominates bb_src. */
2005
2006 static void
2007 split_edges (bb_src, bb_trg, bl)
2008 int bb_src;
2009 int bb_trg;
2010 edgelst *bl;
2011 {
2012 int es = edgeset_size;
2013 edgeset src = (edgeset) alloca (es * sizeof (HOST_WIDE_INT));
2014
2015 while (es--)
2016 src[es] = (pot_split[bb_src])[es];
2017 BITSET_DIFFER (src, pot_split[bb_trg], edgeset_size);
2018 extract_bitlst (src, edgeset_size, bl);
2019 }
2020
2021
2022 /* Find the valid candidate-source-blocks for the target block TRG, compute
2023 their probability, and check if they are speculative or not.
2024 For speculative sources, compute their update-blocks and split-blocks. */
2025
2026 static void
2027 compute_trg_info (trg)
2028 int trg;
2029 {
2030 register candidate *sp;
2031 edgelst el;
2032 int check_block, update_idx;
2033 int i, j, k, fst_edge, nxt_edge;
2034
2035 /* define some of the fields for the target bb as well */
2036 sp = candidate_table + trg;
2037 sp->is_valid = 1;
2038 sp->is_speculative = 0;
2039 sp->src_prob = 100;
2040
2041 for (i = trg + 1; i < current_nr_blocks; i++)
2042 {
2043 sp = candidate_table + i;
2044
2045 sp->is_valid = IS_DOMINATED (i, trg);
2046 if (sp->is_valid)
2047 {
2048 sp->src_prob = GET_SRC_PROB (i, trg);
2049 sp->is_valid = (sp->src_prob >= MIN_PROBABILITY);
2050 }
2051
2052 if (sp->is_valid)
2053 {
2054 split_edges (i, trg, &el);
2055 sp->is_speculative = (el.nr_members) ? 1 : 0;
2056 if (sp->is_speculative && !flag_schedule_speculative)
2057 sp->is_valid = 0;
2058 }
2059
2060 if (sp->is_valid)
2061 {
2062 sp->split_bbs.first_member = &bblst_table[bblst_last];
2063 sp->split_bbs.nr_members = el.nr_members;
2064 for (j = 0; j < el.nr_members; bblst_last++, j++)
2065 bblst_table[bblst_last] =
2066 TO_BLOCK (rgn_edges[el.first_member[j]]);
2067 sp->update_bbs.first_member = &bblst_table[bblst_last];
2068 update_idx = 0;
2069 for (j = 0; j < el.nr_members; j++)
2070 {
2071 check_block = FROM_BLOCK (rgn_edges[el.first_member[j]]);
2072 fst_edge = nxt_edge = OUT_EDGES (check_block);
2073 do
2074 {
2075 for (k = 0; k < el.nr_members; k++)
2076 if (EDGE_TO_BIT (nxt_edge) == el.first_member[k])
2077 break;
2078
2079 if (k >= el.nr_members)
2080 {
2081 bblst_table[bblst_last++] = TO_BLOCK (nxt_edge);
2082 update_idx++;
2083 }
2084
2085 nxt_edge = NEXT_OUT (nxt_edge);
2086 }
2087 while (fst_edge != nxt_edge);
2088 }
2089 sp->update_bbs.nr_members = update_idx;
2090
2091 }
2092 else
2093 {
2094 sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
2095
2096 sp->is_speculative = 0;
2097 sp->src_prob = 0;
2098 }
2099 }
2100 } /* compute_trg_info */
2101
2102
2103 /* Print candidates info, for debugging purposes. Callable from debugger. */
2104
2105 void
2106 debug_candidate (i)
2107 int i;
2108 {
2109 if (!candidate_table[i].is_valid)
2110 return;
2111
2112 if (candidate_table[i].is_speculative)
2113 {
2114 int j;
2115 fprintf (dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
2116
2117 fprintf (dump, "split path: ");
2118 for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
2119 {
2120 int b = candidate_table[i].split_bbs.first_member[j];
2121
2122 fprintf (dump, " %d ", b);
2123 }
2124 fprintf (dump, "\n");
2125
2126 fprintf (dump, "update path: ");
2127 for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
2128 {
2129 int b = candidate_table[i].update_bbs.first_member[j];
2130
2131 fprintf (dump, " %d ", b);
2132 }
2133 fprintf (dump, "\n");
2134 }
2135 else
2136 {
2137 fprintf (dump, " src %d equivalent\n", BB_TO_BLOCK (i));
2138 }
2139 }
2140
2141
2142 /* Print candidates info, for debugging purposes. Callable from debugger. */
2143
2144 void
2145 debug_candidates (trg)
2146 int trg;
2147 {
2148 int i;
2149
2150 fprintf (dump, "----------- candidate table: target: b=%d bb=%d ---\n",
2151 BB_TO_BLOCK (trg), trg);
2152 for (i = trg + 1; i < current_nr_blocks; i++)
2153 debug_candidate (i);
2154 }
2155
2156
2157 /* functions for speculative scheduing */
2158
2159 /* Return 0 if x is a set of a register alive in the beginning of one
2160 of the split-blocks of src, otherwise return 1. */
2161
2162 static int
2163 check_live_1 (src, x)
2164 int src;
2165 rtx x;
2166 {
2167 register i;
2168 register int regno;
2169 register rtx reg = SET_DEST (x);
2170
2171 if (reg == 0)
2172 return 1;
2173
2174 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
2175 || GET_CODE (reg) == SIGN_EXTRACT
2176 || GET_CODE (reg) == STRICT_LOW_PART)
2177 reg = XEXP (reg, 0);
2178
2179 if (GET_CODE (reg) != REG)
2180 return 1;
2181
2182 regno = REGNO (reg);
2183
2184 if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
2185 {
2186 /* Global registers are assumed live */
2187 return 0;
2188 }
2189 else
2190 {
2191 if (regno < FIRST_PSEUDO_REGISTER)
2192 {
2193 /* check for hard registers */
2194 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2195 while (--j >= 0)
2196 {
2197 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
2198 {
2199 int b = candidate_table[src].split_bbs.first_member[i];
2200
2201 if (REGNO_REG_SET_P (basic_block_live_at_start[b], regno + j))
2202 {
2203 return 0;
2204 }
2205 }
2206 }
2207 }
2208 else
2209 {
2210 /* check for psuedo registers */
2211 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
2212 {
2213 int b = candidate_table[src].split_bbs.first_member[i];
2214
2215 if (REGNO_REG_SET_P (basic_block_live_at_start[b], regno))
2216 {
2217 return 0;
2218 }
2219 }
2220 }
2221 }
2222
2223 return 1;
2224 }
2225
2226
2227 /* If x is a set of a register R, mark that R is alive in the beginning
2228 of every update-block of src. */
2229
2230 static void
2231 update_live_1 (src, x)
2232 int src;
2233 rtx x;
2234 {
2235 register i;
2236 register int regno;
2237 register rtx reg = SET_DEST (x);
2238
2239 if (reg == 0)
2240 return;
2241
2242 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
2243 || GET_CODE (reg) == SIGN_EXTRACT
2244 || GET_CODE (reg) == STRICT_LOW_PART)
2245 reg = XEXP (reg, 0);
2246
2247 if (GET_CODE (reg) != REG)
2248 return;
2249
2250 /* Global registers are always live, so the code below does not apply
2251 to them. */
2252
2253 regno = REGNO (reg);
2254
2255 if (regno >= FIRST_PSEUDO_REGISTER || !global_regs[regno])
2256 {
2257 if (regno < FIRST_PSEUDO_REGISTER)
2258 {
2259 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2260 while (--j >= 0)
2261 {
2262 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
2263 {
2264 int b = candidate_table[src].update_bbs.first_member[i];
2265
2266 SET_REGNO_REG_SET (basic_block_live_at_start[b], regno + j);
2267 }
2268 }
2269 }
2270 else
2271 {
2272 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
2273 {
2274 int b = candidate_table[src].update_bbs.first_member[i];
2275
2276 SET_REGNO_REG_SET (basic_block_live_at_start[b], regno);
2277 }
2278 }
2279 }
2280 }
2281
2282
2283 /* Return 1 if insn can be speculatively moved from block src to trg,
2284 otherwise return 0. Called before first insertion of insn to
2285 ready-list or before the scheduling. */
2286
2287 static int
2288 check_live (insn, src, trg)
2289 rtx insn;
2290 int src;
2291 int trg;
2292 {
2293 /* find the registers set by instruction */
2294 if (GET_CODE (PATTERN (insn)) == SET
2295 || GET_CODE (PATTERN (insn)) == CLOBBER)
2296 return check_live_1 (src, PATTERN (insn));
2297 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2298 {
2299 int j;
2300 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2301 if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2302 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2303 && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
2304 return 0;
2305
2306 return 1;
2307 }
2308
2309 return 1;
2310 }
2311
2312
2313 /* Update the live registers info after insn was moved speculatively from
2314 block src to trg. */
2315
2316 static void
2317 update_live (insn, src, trg)
2318 rtx insn;
2319 int src, trg;
2320 {
2321 /* find the registers set by instruction */
2322 if (GET_CODE (PATTERN (insn)) == SET
2323 || GET_CODE (PATTERN (insn)) == CLOBBER)
2324 update_live_1 (src, PATTERN (insn));
2325 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2326 {
2327 int j;
2328 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2329 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2330 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2331 update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
2332 }
2333 }
2334
2335 /* Exception Free Loads:
2336
2337 We define five classes of speculative loads: IFREE, IRISKY,
2338 PFREE, PRISKY, and MFREE.
2339
2340 IFREE loads are loads that are proved to be exception-free, just
2341 by examining the load insn. Examples for such loads are loads
2342 from TOC and loads of global data.
2343
2344 IRISKY loads are loads that are proved to be exception-risky,
2345 just by examining the load insn. Examples for such loads are
2346 volatile loads and loads from shared memory.
2347
2348 PFREE loads are loads for which we can prove, by examining other
2349 insns, that they are exception-free. Currently, this class consists
2350 of loads for which we are able to find a "similar load", either in
2351 the target block, or, if only one split-block exists, in that split
2352 block. Load2 is similar to load1 if both have same single base
2353 register. We identify only part of the similar loads, by finding
2354 an insn upon which both load1 and load2 have a DEF-USE dependence.
2355
2356 PRISKY loads are loads for which we can prove, by examining other
2357 insns, that they are exception-risky. Currently we have two proofs for
2358 such loads. The first proof detects loads that are probably guarded by a
2359 test on the memory address. This proof is based on the
2360 backward and forward data dependence information for the region.
2361 Let load-insn be the examined load.
2362 Load-insn is PRISKY iff ALL the following hold:
2363
2364 - insn1 is not in the same block as load-insn
2365 - there is a DEF-USE dependence chain (insn1, ..., load-insn)
2366 - test-insn is either a compare or a branch, not in the same block as load-insn
2367 - load-insn is reachable from test-insn
2368 - there is a DEF-USE dependence chain (insn1, ..., test-insn)
2369
2370 This proof might fail when the compare and the load are fed
2371 by an insn not in the region. To solve this, we will add to this
2372 group all loads that have no input DEF-USE dependence.
2373
2374 The second proof detects loads that are directly or indirectly
2375 fed by a speculative load. This proof is affected by the
2376 scheduling process. We will use the flag fed_by_spec_load.
2377 Initially, all insns have this flag reset. After a speculative
2378 motion of an insn, if insn is either a load, or marked as
2379 fed_by_spec_load, we will also mark as fed_by_spec_load every
2380 insn1 for which a DEF-USE dependence (insn, insn1) exists. A
2381 load which is fed_by_spec_load is also PRISKY.
2382
2383 MFREE (maybe-free) loads are all the remaining loads. They may be
2384 exception-free, but we cannot prove it.
2385
2386 Now, all loads in IFREE and PFREE classes are considered
2387 exception-free, while all loads in IRISKY and PRISKY classes are
2388 considered exception-risky. As for loads in the MFREE class,
2389 these are considered either exception-free or exception-risky,
2390 depending on whether we are pessimistic or optimistic. We have
2391 to take the pessimistic approach to assure the safety of
2392 speculative scheduling, but we can take the optimistic approach
2393 by invoking the -fsched_spec_load_dangerous option. */
2394
2395 enum INSN_TRAP_CLASS
2396 {
2397 TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
2398 PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
2399 };
2400
2401 #define WORST_CLASS(class1, class2) \
2402 ((class1 > class2) ? class1 : class2)
2403
2404 /* Indexed by INSN_UID, and set if there's DEF-USE dependence between */
2405 /* some speculatively moved load insn and this one. */
2406 char *fed_by_spec_load;
2407 char *is_load_insn;
2408
2409 /* Non-zero if block bb_to is equal to, or reachable from block bb_from. */
2410 #define IS_REACHABLE(bb_from, bb_to) \
2411 (bb_from == bb_to \
2412 || IS_RGN_ENTRY (bb_from) \
2413 || (bitset_member (ancestor_edges[bb_to], \
2414 EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))), \
2415 edgeset_size)))
2416 #define FED_BY_SPEC_LOAD(insn) (fed_by_spec_load[INSN_UID (insn)])
2417 #define IS_LOAD_INSN(insn) (is_load_insn[INSN_UID (insn)])
2418
2419 /* Non-zero iff the address is comprised from at most 1 register */
2420 #define CONST_BASED_ADDRESS_P(x) \
2421 (GET_CODE (x) == REG \
2422 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
2423 || (GET_CODE (x) == LO_SUM)) \
2424 && (GET_CODE (XEXP (x, 0)) == CONST_INT \
2425 || GET_CODE (XEXP (x, 1)) == CONST_INT)))
2426
2427 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
2428
2429 static void
2430 set_spec_fed (load_insn)
2431 rtx load_insn;
2432 {
2433 rtx link;
2434
2435 for (link = INSN_DEPEND (load_insn); link; link = XEXP (link, 1))
2436 if (GET_MODE (link) == VOIDmode)
2437 FED_BY_SPEC_LOAD (XEXP (link, 0)) = 1;
2438 } /* set_spec_fed */
2439
2440 /* On the path from the insn to load_insn_bb, find a conditional branch */
2441 /* depending on insn, that guards the speculative load. */
2442
2443 static int
2444 find_conditional_protection (insn, load_insn_bb)
2445 rtx insn;
2446 int load_insn_bb;
2447 {
2448 rtx link;
2449
2450 /* iterate through DEF-USE forward dependences */
2451 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
2452 {
2453 rtx next = XEXP (link, 0);
2454 if ((CONTAINING_RGN (INSN_BLOCK (next)) ==
2455 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
2456 && IS_REACHABLE (INSN_BB (next), load_insn_bb)
2457 && load_insn_bb != INSN_BB (next)
2458 && GET_MODE (link) == VOIDmode
2459 && (GET_CODE (next) == JUMP_INSN
2460 || find_conditional_protection (next, load_insn_bb)))
2461 return 1;
2462 }
2463 return 0;
2464 } /* find_conditional_protection */
2465
2466 /* Returns 1 if the same insn1 that participates in the computation
2467 of load_insn's address is feeding a conditional branch that is
2468 guarding on load_insn. This is true if we find a the two DEF-USE
2469 chains:
2470 insn1 -> ... -> conditional-branch
2471 insn1 -> ... -> load_insn,
2472 and if a flow path exist:
2473 insn1 -> ... -> conditional-branch -> ... -> load_insn,
2474 and if insn1 is on the path
2475 region-entry -> ... -> bb_trg -> ... load_insn.
2476
2477 Locate insn1 by climbing on LOG_LINKS from load_insn.
2478 Locate the branch by following INSN_DEPEND from insn1. */
2479
2480 static int
2481 is_conditionally_protected (load_insn, bb_src, bb_trg)
2482 rtx load_insn;
2483 int bb_src, bb_trg;
2484 {
2485 rtx link;
2486
2487 for (link = LOG_LINKS (load_insn); link; link = XEXP (link, 1))
2488 {
2489 rtx insn1 = XEXP (link, 0);
2490
2491 /* must be a DEF-USE dependence upon non-branch */
2492 if (GET_MODE (link) != VOIDmode
2493 || GET_CODE (insn1) == JUMP_INSN)
2494 continue;
2495
2496 /* must exist a path: region-entry -> ... -> bb_trg -> ... load_insn */
2497 if (INSN_BB (insn1) == bb_src
2498 || (CONTAINING_RGN (INSN_BLOCK (insn1))
2499 != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
2500 || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
2501 && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
2502 continue;
2503
2504 /* now search for the conditional-branch */
2505 if (find_conditional_protection (insn1, bb_src))
2506 return 1;
2507
2508 /* recursive step: search another insn1, "above" current insn1. */
2509 return is_conditionally_protected (insn1, bb_src, bb_trg);
2510 }
2511
2512 /* the chain does not exsist */
2513 return 0;
2514 } /* is_conditionally_protected */
2515
2516 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
2517 load_insn can move speculatively from bb_src to bb_trg. All the
2518 following must hold:
2519
2520 (1) both loads have 1 base register (PFREE_CANDIDATEs).
2521 (2) load_insn and load1 have a def-use dependence upon
2522 the same insn 'insn1'.
2523 (3) either load2 is in bb_trg, or:
2524 - there's only one split-block, and
2525 - load1 is on the escape path, and
2526
2527 From all these we can conclude that the two loads access memory
2528 addresses that differ at most by a constant, and hence if moving
2529 load_insn would cause an exception, it would have been caused by
2530 load2 anyhow. */
2531
2532 static int
2533 is_pfree (load_insn, bb_src, bb_trg)
2534 rtx load_insn;
2535 int bb_src, bb_trg;
2536 {
2537 rtx back_link;
2538 register candidate *candp = candidate_table + bb_src;
2539
2540 if (candp->split_bbs.nr_members != 1)
2541 /* must have exactly one escape block */
2542 return 0;
2543
2544 for (back_link = LOG_LINKS (load_insn);
2545 back_link; back_link = XEXP (back_link, 1))
2546 {
2547 rtx insn1 = XEXP (back_link, 0);
2548
2549 if (GET_MODE (back_link) == VOIDmode)
2550 {
2551 /* found a DEF-USE dependence (insn1, load_insn) */
2552 rtx fore_link;
2553
2554 for (fore_link = INSN_DEPEND (insn1);
2555 fore_link; fore_link = XEXP (fore_link, 1))
2556 {
2557 rtx insn2 = XEXP (fore_link, 0);
2558 if (GET_MODE (fore_link) == VOIDmode)
2559 {
2560 /* found a DEF-USE dependence (insn1, insn2) */
2561 if (classify_insn (insn2) != PFREE_CANDIDATE)
2562 /* insn2 not guaranteed to be a 1 base reg load */
2563 continue;
2564
2565 if (INSN_BB (insn2) == bb_trg)
2566 /* insn2 is the similar load, in the target block */
2567 return 1;
2568
2569 if (*(candp->split_bbs.first_member) == INSN_BLOCK (insn2))
2570 /* insn2 is a similar load, in a split-block */
2571 return 1;
2572 }
2573 }
2574 }
2575 }
2576
2577 /* couldn't find a similar load */
2578 return 0;
2579 } /* is_pfree */
2580
2581 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
2582 as found by analyzing insn's expression. */
2583
2584 static int
2585 may_trap_exp (x, is_store)
2586 rtx x;
2587 int is_store;
2588 {
2589 enum rtx_code code;
2590
2591 if (x == 0)
2592 return TRAP_FREE;
2593 code = GET_CODE (x);
2594 if (is_store)
2595 {
2596 if (code == MEM)
2597 return TRAP_RISKY;
2598 else
2599 return TRAP_FREE;
2600 }
2601 if (code == MEM)
2602 {
2603 /* The insn uses memory */
2604 /* a volatile load */
2605 if (MEM_VOLATILE_P (x))
2606 return IRISKY;
2607 /* an exception-free load */
2608 if (!may_trap_p (x))
2609 return IFREE;
2610 /* a load with 1 base register, to be further checked */
2611 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
2612 return PFREE_CANDIDATE;
2613 /* no info on the load, to be further checked */
2614 return PRISKY_CANDIDATE;
2615 }
2616 else
2617 {
2618 char *fmt;
2619 int i, insn_class = TRAP_FREE;
2620
2621 /* neither store nor load, check if it may cause a trap */
2622 if (may_trap_p (x))
2623 return TRAP_RISKY;
2624 /* recursive step: walk the insn... */
2625 fmt = GET_RTX_FORMAT (code);
2626 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2627 {
2628 if (fmt[i] == 'e')
2629 {
2630 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
2631 insn_class = WORST_CLASS (insn_class, tmp_class);
2632 }
2633 else if (fmt[i] == 'E')
2634 {
2635 int j;
2636 for (j = 0; j < XVECLEN (x, i); j++)
2637 {
2638 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
2639 insn_class = WORST_CLASS (insn_class, tmp_class);
2640 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
2641 break;
2642 }
2643 }
2644 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
2645 break;
2646 }
2647 return insn_class;
2648 }
2649 } /* may_trap_exp */
2650
2651
2652 /* Classifies insn for the purpose of verifying that it can be
2653 moved speculatively, by examining it's patterns, returning:
2654 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
2655 TRAP_FREE: non-load insn.
2656 IFREE: load from a globaly safe location.
2657 IRISKY: volatile load.
2658 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
2659 being either PFREE or PRISKY. */
2660
2661 static int
2662 classify_insn (insn)
2663 rtx insn;
2664 {
2665 rtx pat = PATTERN (insn);
2666 int tmp_class = TRAP_FREE;
2667 int insn_class = TRAP_FREE;
2668 enum rtx_code code;
2669
2670 if (GET_CODE (pat) == PARALLEL)
2671 {
2672 int i, len = XVECLEN (pat, 0);
2673
2674 for (i = len - 1; i >= 0; i--)
2675 {
2676 code = GET_CODE (XVECEXP (pat, 0, i));
2677 switch (code)
2678 {
2679 case CLOBBER:
2680 /* test if it is a 'store' */
2681 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
2682 break;
2683 case SET:
2684 /* test if it is a store */
2685 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
2686 if (tmp_class == TRAP_RISKY)
2687 break;
2688 /* test if it is a load */
2689 tmp_class =
2690 WORST_CLASS (tmp_class,
2691 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)), 0));
2692 default:;
2693 }
2694 insn_class = WORST_CLASS (insn_class, tmp_class);
2695 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
2696 break;
2697 }
2698 }
2699 else
2700 {
2701 code = GET_CODE (pat);
2702 switch (code)
2703 {
2704 case CLOBBER:
2705 /* test if it is a 'store' */
2706 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
2707 break;
2708 case SET:
2709 /* test if it is a store */
2710 tmp_class = may_trap_exp (SET_DEST (pat), 1);
2711 if (tmp_class == TRAP_RISKY)
2712 break;
2713 /* test if it is a load */
2714 tmp_class =
2715 WORST_CLASS (tmp_class,
2716 may_trap_exp (SET_SRC (pat), 0));
2717 default:;
2718 }
2719 insn_class = tmp_class;
2720 }
2721
2722 return insn_class;
2723
2724 } /* classify_insn */
2725
2726 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
2727 a load moved speculatively, or if load_insn is protected by
2728 a compare on load_insn's address). */
2729
2730 static int
2731 is_prisky (load_insn, bb_src, bb_trg)
2732 rtx load_insn;
2733 int bb_src, bb_trg;
2734 {
2735 if (FED_BY_SPEC_LOAD (load_insn))
2736 return 1;
2737
2738 if (LOG_LINKS (load_insn) == NULL)
2739 /* dependence may 'hide' out of the region. */
2740 return 1;
2741
2742 if (is_conditionally_protected (load_insn, bb_src, bb_trg))
2743 return 1;
2744
2745 return 0;
2746 } /* is_prisky */
2747
2748 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
2749 Return 1 if insn is exception-free (and the motion is valid)
2750 and 0 otherwise. */
2751
2752 static int
2753 is_exception_free (insn, bb_src, bb_trg)
2754 rtx insn;
2755 int bb_src, bb_trg;
2756 {
2757 int insn_class = classify_insn (insn);
2758
2759 /* handle non-load insns */
2760 switch (insn_class)
2761 {
2762 case TRAP_FREE:
2763 return 1;
2764 case TRAP_RISKY:
2765 return 0;
2766 default:;
2767 }
2768
2769 /* handle loads */
2770 if (!flag_schedule_speculative_load)
2771 return 0;
2772 IS_LOAD_INSN (insn) = 1;
2773 switch (insn_class)
2774 {
2775 case IFREE:
2776 return (1);
2777 case IRISKY:
2778 return 0;
2779 case PFREE_CANDIDATE:
2780 if (is_pfree (insn, bb_src, bb_trg))
2781 return 1;
2782 /* don't 'break' here: PFREE-candidate is also PRISKY-candidate */
2783 case PRISKY_CANDIDATE:
2784 if (!flag_schedule_speculative_load_dangerous
2785 || is_prisky (insn, bb_src, bb_trg))
2786 return 0;
2787 break;
2788 default:;
2789 }
2790
2791 return flag_schedule_speculative_load_dangerous;
2792 } /* is_exception_free */
2793
2794
2795 /* Process an insn's memory dependencies. There are four kinds of
2796 dependencies:
2797
2798 (0) read dependence: read follows read
2799 (1) true dependence: read follows write
2800 (2) anti dependence: write follows read
2801 (3) output dependence: write follows write
2802
2803 We are careful to build only dependencies which actually exist, and
2804 use transitivity to avoid building too many links. */
2805 \f
2806 /* Return the INSN_LIST containing INSN in LIST, or NULL
2807 if LIST does not contain INSN. */
2808
2809 __inline static rtx
2810 find_insn_list (insn, list)
2811 rtx insn;
2812 rtx list;
2813 {
2814 while (list)
2815 {
2816 if (XEXP (list, 0) == insn)
2817 return list;
2818 list = XEXP (list, 1);
2819 }
2820 return 0;
2821 }
2822
2823
2824 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0 otherwise. */
2825
2826 __inline static char
2827 find_insn_mem_list (insn, x, list, list1)
2828 rtx insn, x;
2829 rtx list, list1;
2830 {
2831 while (list)
2832 {
2833 if (XEXP (list, 0) == insn
2834 && XEXP (list1, 0) == x)
2835 return 1;
2836 list = XEXP (list, 1);
2837 list1 = XEXP (list1, 1);
2838 }
2839 return 0;
2840 }
2841
2842
2843 /* Compute the function units used by INSN. This caches the value
2844 returned by function_units_used. A function unit is encoded as the
2845 unit number if the value is non-negative and the compliment of a
2846 mask if the value is negative. A function unit index is the
2847 non-negative encoding. */
2848
2849 __inline static int
2850 insn_unit (insn)
2851 rtx insn;
2852 {
2853 register int unit = INSN_UNIT (insn);
2854
2855 if (unit == 0)
2856 {
2857 recog_memoized (insn);
2858
2859 /* A USE insn, or something else we don't need to understand.
2860 We can't pass these directly to function_units_used because it will
2861 trigger a fatal error for unrecognizable insns. */
2862 if (INSN_CODE (insn) < 0)
2863 unit = -1;
2864 else
2865 {
2866 unit = function_units_used (insn);
2867 /* Increment non-negative values so we can cache zero. */
2868 if (unit >= 0)
2869 unit++;
2870 }
2871 /* We only cache 16 bits of the result, so if the value is out of
2872 range, don't cache it. */
2873 if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
2874 || unit >= 0
2875 || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
2876 INSN_UNIT (insn) = unit;
2877 }
2878 return (unit > 0 ? unit - 1 : unit);
2879 }
2880
2881 /* Compute the blockage range for executing INSN on UNIT. This caches
2882 the value returned by the blockage_range_function for the unit.
2883 These values are encoded in an int where the upper half gives the
2884 minimum value and the lower half gives the maximum value. */
2885
2886 __inline static unsigned int
2887 blockage_range (unit, insn)
2888 int unit;
2889 rtx insn;
2890 {
2891 unsigned int blockage = INSN_BLOCKAGE (insn);
2892 unsigned int range;
2893
2894 if (UNIT_BLOCKED (blockage) != unit + 1)
2895 {
2896 range = function_units[unit].blockage_range_function (insn);
2897 /* We only cache the blockage range for one unit and then only if
2898 the values fit. */
2899 if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
2900 INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
2901 }
2902 else
2903 range = BLOCKAGE_RANGE (blockage);
2904
2905 return range;
2906 }
2907
2908 /* A vector indexed by function unit instance giving the last insn to use
2909 the unit. The value of the function unit instance index for unit U
2910 instance I is (U + I * FUNCTION_UNITS_SIZE). */
2911 static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
2912
2913 /* A vector indexed by function unit instance giving the minimum time when
2914 the unit will unblock based on the maximum blockage cost. */
2915 static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
2916
2917 /* A vector indexed by function unit number giving the number of insns
2918 that remain to use the unit. */
2919 static int unit_n_insns[FUNCTION_UNITS_SIZE];
2920
2921 /* Reset the function unit state to the null state. */
2922
2923 static void
2924 clear_units ()
2925 {
2926 bzero ((char *) unit_last_insn, sizeof (unit_last_insn));
2927 bzero ((char *) unit_tick, sizeof (unit_tick));
2928 bzero ((char *) unit_n_insns, sizeof (unit_n_insns));
2929 }
2930
2931 /* Return the issue-delay of an insn */
2932
2933 __inline static int
2934 insn_issue_delay (insn)
2935 rtx insn;
2936 {
2937 rtx link;
2938 int i, delay = 0;
2939 int unit = insn_unit (insn);
2940
2941 /* efficiency note: in fact, we are working 'hard' to compute a
2942 value that was available in md file, and is not available in
2943 function_units[] structure. It would be nice to have this
2944 value there, too. */
2945 if (unit >= 0)
2946 {
2947 if (function_units[unit].blockage_range_function &&
2948 function_units[unit].blockage_function)
2949 delay = function_units[unit].blockage_function (insn, insn);
2950 }
2951 else
2952 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
2953 if ((unit & 1) != 0 && function_units[i].blockage_range_function
2954 && function_units[i].blockage_function)
2955 delay = MAX (delay, function_units[i].blockage_function (insn, insn));
2956
2957 return delay;
2958 }
2959
2960 /* Return the actual hazard cost of executing INSN on the unit UNIT,
2961 instance INSTANCE at time CLOCK if the previous actual hazard cost
2962 was COST. */
2963
2964 __inline static int
2965 actual_hazard_this_instance (unit, instance, insn, clock, cost)
2966 int unit, instance, clock, cost;
2967 rtx insn;
2968 {
2969 int tick = unit_tick[instance]; /* issue time of the last issued insn */
2970
2971 if (tick - clock > cost)
2972 {
2973 /* The scheduler is operating forward, so unit's last insn is the
2974 executing insn and INSN is the candidate insn. We want a
2975 more exact measure of the blockage if we execute INSN at CLOCK
2976 given when we committed the execution of the unit's last insn.
2977
2978 The blockage value is given by either the unit's max blockage
2979 constant, blockage range function, or blockage function. Use
2980 the most exact form for the given unit. */
2981
2982 if (function_units[unit].blockage_range_function)
2983 {
2984 if (function_units[unit].blockage_function)
2985 tick += (function_units[unit].blockage_function
2986 (unit_last_insn[instance], insn)
2987 - function_units[unit].max_blockage);
2988 else
2989 tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
2990 - function_units[unit].max_blockage);
2991 }
2992 if (tick - clock > cost)
2993 cost = tick - clock;
2994 }
2995 return cost;
2996 }
2997
2998 /* Record INSN as having begun execution on the units encoded by UNIT at
2999 time CLOCK. */
3000
3001 __inline static void
3002 schedule_unit (unit, insn, clock)
3003 int unit, clock;
3004 rtx insn;
3005 {
3006 int i;
3007
3008 if (unit >= 0)
3009 {
3010 int instance = unit;
3011 #if MAX_MULTIPLICITY > 1
3012 /* Find the first free instance of the function unit and use that
3013 one. We assume that one is free. */
3014 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
3015 {
3016 if (!actual_hazard_this_instance (unit, instance, insn, clock, 0))
3017 break;
3018 instance += FUNCTION_UNITS_SIZE;
3019 }
3020 #endif
3021 unit_last_insn[instance] = insn;
3022 unit_tick[instance] = (clock + function_units[unit].max_blockage);
3023 }
3024 else
3025 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
3026 if ((unit & 1) != 0)
3027 schedule_unit (i, insn, clock);
3028 }
3029
3030 /* Return the actual hazard cost of executing INSN on the units encoded by
3031 UNIT at time CLOCK if the previous actual hazard cost was COST. */
3032
3033 __inline static int
3034 actual_hazard (unit, insn, clock, cost)
3035 int unit, clock, cost;
3036 rtx insn;
3037 {
3038 int i;
3039
3040 if (unit >= 0)
3041 {
3042 /* Find the instance of the function unit with the minimum hazard. */
3043 int instance = unit;
3044 int best_cost = actual_hazard_this_instance (unit, instance, insn,
3045 clock, cost);
3046 int this_cost;
3047
3048 #if MAX_MULTIPLICITY > 1
3049 if (best_cost > cost)
3050 {
3051 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
3052 {
3053 instance += FUNCTION_UNITS_SIZE;
3054 this_cost = actual_hazard_this_instance (unit, instance, insn,
3055 clock, cost);
3056 if (this_cost < best_cost)
3057 {
3058 best_cost = this_cost;
3059 if (this_cost <= cost)
3060 break;
3061 }
3062 }
3063 }
3064 #endif
3065 cost = MAX (cost, best_cost);
3066 }
3067 else
3068 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
3069 if ((unit & 1) != 0)
3070 cost = actual_hazard (i, insn, clock, cost);
3071
3072 return cost;
3073 }
3074
3075 /* Return the potential hazard cost of executing an instruction on the
3076 units encoded by UNIT if the previous potential hazard cost was COST.
3077 An insn with a large blockage time is chosen in preference to one
3078 with a smaller time; an insn that uses a unit that is more likely
3079 to be used is chosen in preference to one with a unit that is less
3080 used. We are trying to minimize a subsequent actual hazard. */
3081
3082 __inline static int
3083 potential_hazard (unit, insn, cost)
3084 int unit, cost;
3085 rtx insn;
3086 {
3087 int i, ncost;
3088 unsigned int minb, maxb;
3089
3090 if (unit >= 0)
3091 {
3092 minb = maxb = function_units[unit].max_blockage;
3093 if (maxb > 1)
3094 {
3095 if (function_units[unit].blockage_range_function)
3096 {
3097 maxb = minb = blockage_range (unit, insn);
3098 maxb = MAX_BLOCKAGE_COST (maxb);
3099 minb = MIN_BLOCKAGE_COST (minb);
3100 }
3101
3102 if (maxb > 1)
3103 {
3104 /* Make the number of instructions left dominate. Make the
3105 minimum delay dominate the maximum delay. If all these
3106 are the same, use the unit number to add an arbitrary
3107 ordering. Other terms can be added. */
3108 ncost = minb * 0x40 + maxb;
3109 ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
3110 if (ncost > cost)
3111 cost = ncost;
3112 }
3113 }
3114 }
3115 else
3116 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
3117 if ((unit & 1) != 0)
3118 cost = potential_hazard (i, insn, cost);
3119
3120 return cost;
3121 }
3122
3123 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
3124 This is the number of cycles between instruction issue and
3125 instruction results. */
3126
3127 __inline static int
3128 insn_cost (insn, link, used)
3129 rtx insn, link, used;
3130 {
3131 register int cost = INSN_COST (insn);
3132
3133 if (cost == 0)
3134 {
3135 recog_memoized (insn);
3136
3137 /* A USE insn, or something else we don't need to understand.
3138 We can't pass these directly to result_ready_cost because it will
3139 trigger a fatal error for unrecognizable insns. */
3140 if (INSN_CODE (insn) < 0)
3141 {
3142 INSN_COST (insn) = 1;
3143 return 1;
3144 }
3145 else
3146 {
3147 cost = result_ready_cost (insn);
3148
3149 if (cost < 1)
3150 cost = 1;
3151
3152 INSN_COST (insn) = cost;
3153 }
3154 }
3155
3156 /* in this case estimate cost without caring how insn is used. */
3157 if (link == 0 && used == 0)
3158 return cost;
3159
3160 /* A USE insn should never require the value used to be computed. This
3161 allows the computation of a function's result and parameter values to
3162 overlap the return and call. */
3163 recog_memoized (used);
3164 if (INSN_CODE (used) < 0)
3165 LINK_COST_FREE (link) = 1;
3166
3167 /* If some dependencies vary the cost, compute the adjustment. Most
3168 commonly, the adjustment is complete: either the cost is ignored
3169 (in the case of an output- or anti-dependence), or the cost is
3170 unchanged. These values are cached in the link as LINK_COST_FREE
3171 and LINK_COST_ZERO. */
3172
3173 if (LINK_COST_FREE (link))
3174 cost = 1;
3175 #ifdef ADJUST_COST
3176 else if (!LINK_COST_ZERO (link))
3177 {
3178 int ncost = cost;
3179
3180 ADJUST_COST (used, link, insn, ncost);
3181 if (ncost <= 1)
3182 LINK_COST_FREE (link) = ncost = 1;
3183 if (cost == ncost)
3184 LINK_COST_ZERO (link) = 1;
3185 cost = ncost;
3186 }
3187 #endif
3188 return cost;
3189 }
3190
3191 /* Compute the priority number for INSN. */
3192
3193 static int
3194 priority (insn)
3195 rtx insn;
3196 {
3197 int this_priority;
3198 rtx link;
3199
3200 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
3201 return 0;
3202
3203 if ((this_priority = INSN_PRIORITY (insn)) == 0)
3204 {
3205 if (INSN_DEPEND (insn) == 0)
3206 this_priority = insn_cost (insn, 0, 0);
3207 else
3208 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
3209 {
3210 rtx next;
3211 int next_priority;
3212
3213 next = XEXP (link, 0);
3214
3215 /* critical path is meaningful in block boundaries only */
3216 if (INSN_BLOCK (next) != INSN_BLOCK (insn))
3217 continue;
3218
3219 next_priority = insn_cost (insn, link, next) + priority (next);
3220 if (next_priority > this_priority)
3221 this_priority = next_priority;
3222 }
3223 INSN_PRIORITY (insn) = this_priority;
3224 }
3225 return this_priority;
3226 }
3227 \f
3228
3229 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
3230 them to the unused_*_list variables, so that they can be reused. */
3231
3232 __inline static void
3233 free_pnd_lst (listp, unused_listp)
3234 rtx *listp, *unused_listp;
3235 {
3236 register rtx link, prev_link;
3237
3238 if (*listp == 0)
3239 return;
3240
3241 prev_link = *listp;
3242 link = XEXP (prev_link, 1);
3243
3244 while (link)
3245 {
3246 prev_link = link;
3247 link = XEXP (link, 1);
3248 }
3249
3250 XEXP (prev_link, 1) = *unused_listp;
3251 *unused_listp = *listp;
3252 *listp = 0;
3253 }
3254
3255 static void
3256 free_pending_lists ()
3257 {
3258
3259
3260 if (current_nr_blocks <= 1)
3261 {
3262 free_pnd_lst (&pending_read_insns, &unused_insn_list);
3263 free_pnd_lst (&pending_write_insns, &unused_insn_list);
3264 free_pnd_lst (&pending_read_mems, &unused_expr_list);
3265 free_pnd_lst (&pending_write_mems, &unused_expr_list);
3266 }
3267 else
3268 {
3269 /* interblock scheduling */
3270 int bb;
3271
3272 for (bb = 0; bb < current_nr_blocks; bb++)
3273 {
3274 free_pnd_lst (&bb_pending_read_insns[bb], &unused_insn_list);
3275 free_pnd_lst (&bb_pending_write_insns[bb], &unused_insn_list);
3276 free_pnd_lst (&bb_pending_read_mems[bb], &unused_expr_list);
3277 free_pnd_lst (&bb_pending_write_mems[bb], &unused_expr_list);
3278 }
3279 }
3280 }
3281
3282 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
3283 The MEM is a memory reference contained within INSN, which we are saving
3284 so that we can do memory aliasing on it. */
3285
3286 static void
3287 add_insn_mem_dependence (insn_list, mem_list, insn, mem)
3288 rtx *insn_list, *mem_list, insn, mem;
3289 {
3290 register rtx link;
3291
3292 if (unused_insn_list)
3293 {
3294 link = unused_insn_list;
3295 unused_insn_list = XEXP (link, 1);
3296 }
3297 else
3298 link = rtx_alloc (INSN_LIST);
3299 XEXP (link, 0) = insn;
3300 XEXP (link, 1) = *insn_list;
3301 *insn_list = link;
3302
3303 if (unused_expr_list)
3304 {
3305 link = unused_expr_list;
3306 unused_expr_list = XEXP (link, 1);
3307 }
3308 else
3309 link = rtx_alloc (EXPR_LIST);
3310 XEXP (link, 0) = mem;
3311 XEXP (link, 1) = *mem_list;
3312 *mem_list = link;
3313
3314 pending_lists_length++;
3315 }
3316 \f
3317
3318 /* Make a dependency between every memory reference on the pending lists
3319 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
3320 the read list. */
3321
3322 static void
3323 flush_pending_lists (insn, only_write)
3324 rtx insn;
3325 int only_write;
3326 {
3327 rtx u;
3328 rtx link;
3329
3330 while (pending_read_insns && ! only_write)
3331 {
3332 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
3333
3334 link = pending_read_insns;
3335 pending_read_insns = XEXP (pending_read_insns, 1);
3336 XEXP (link, 1) = unused_insn_list;
3337 unused_insn_list = link;
3338
3339 link = pending_read_mems;
3340 pending_read_mems = XEXP (pending_read_mems, 1);
3341 XEXP (link, 1) = unused_expr_list;
3342 unused_expr_list = link;
3343 }
3344 while (pending_write_insns)
3345 {
3346 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
3347
3348 link = pending_write_insns;
3349 pending_write_insns = XEXP (pending_write_insns, 1);
3350 XEXP (link, 1) = unused_insn_list;
3351 unused_insn_list = link;
3352
3353 link = pending_write_mems;
3354 pending_write_mems = XEXP (pending_write_mems, 1);
3355 XEXP (link, 1) = unused_expr_list;
3356 unused_expr_list = link;
3357 }
3358 pending_lists_length = 0;
3359
3360 /* last_pending_memory_flush is now a list of insns */
3361 for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
3362 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3363
3364 last_pending_memory_flush =
3365 gen_rtx (INSN_LIST, VOIDmode, insn, 0);
3366 }
3367
3368 /* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
3369 by the write to the destination of X, and reads of everything mentioned. */
3370
3371 static void
3372 sched_analyze_1 (x, insn)
3373 rtx x;
3374 rtx insn;
3375 {
3376 register int regno;
3377 register rtx dest = SET_DEST (x);
3378
3379 if (dest == 0)
3380 return;
3381
3382 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3383 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3384 {
3385 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3386 {
3387 /* The second and third arguments are values read by this insn. */
3388 sched_analyze_2 (XEXP (dest, 1), insn);
3389 sched_analyze_2 (XEXP (dest, 2), insn);
3390 }
3391 dest = SUBREG_REG (dest);
3392 }
3393
3394 if (GET_CODE (dest) == REG)
3395 {
3396 register int i;
3397
3398 regno = REGNO (dest);
3399
3400 /* A hard reg in a wide mode may really be multiple registers.
3401 If so, mark all of them just like the first. */
3402 if (regno < FIRST_PSEUDO_REGISTER)
3403 {
3404 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
3405 while (--i >= 0)
3406 {
3407 rtx u;
3408
3409 for (u = reg_last_uses[regno + i]; u; u = XEXP (u, 1))
3410 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3411 reg_last_uses[regno + i] = 0;
3412
3413 for (u = reg_last_sets[regno + i]; u; u = XEXP (u, 1))
3414 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
3415
3416 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
3417
3418 if ((call_used_regs[regno + i] || global_regs[regno + i]))
3419 /* Function calls clobber all call_used regs. */
3420 for (u = last_function_call; u; u = XEXP (u, 1))
3421 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3422 }
3423 }
3424 else
3425 {
3426 rtx u;
3427
3428 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
3429 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3430 reg_last_uses[regno] = 0;
3431
3432 for (u = reg_last_sets[regno]; u; u = XEXP (u, 1))
3433 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
3434
3435 SET_REGNO_REG_SET (reg_pending_sets, regno);
3436
3437 /* Pseudos that are REG_EQUIV to something may be replaced
3438 by that during reloading. We need only add dependencies for
3439 the address in the REG_EQUIV note. */
3440 if (!reload_completed
3441 && reg_known_equiv_p[regno]
3442 && GET_CODE (reg_known_value[regno]) == MEM)
3443 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
3444
3445 /* Don't let it cross a call after scheduling if it doesn't
3446 already cross one. */
3447
3448 if (REG_N_CALLS_CROSSED (regno) == 0)
3449 for (u = last_function_call; u; u = XEXP (u, 1))
3450 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3451 }
3452 }
3453 else if (GET_CODE (dest) == MEM)
3454 {
3455 /* Writing memory. */
3456
3457 if (pending_lists_length > 32)
3458 {
3459 /* Flush all pending reads and writes to prevent the pending lists
3460 from getting any larger. Insn scheduling runs too slowly when
3461 these lists get long. The number 32 was chosen because it
3462 seems like a reasonable number. When compiling GCC with itself,
3463 this flush occurs 8 times for sparc, and 10 times for m88k using
3464 the number 32. */
3465 flush_pending_lists (insn, 0);
3466 }
3467 else
3468 {
3469 rtx u;
3470 rtx pending, pending_mem;
3471
3472 pending = pending_read_insns;
3473 pending_mem = pending_read_mems;
3474 while (pending)
3475 {
3476 /* If a dependency already exists, don't create a new one. */
3477 if (!find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
3478 if (anti_dependence (XEXP (pending_mem, 0), dest))
3479 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
3480
3481 pending = XEXP (pending, 1);
3482 pending_mem = XEXP (pending_mem, 1);
3483 }
3484
3485 pending = pending_write_insns;
3486 pending_mem = pending_write_mems;
3487 while (pending)
3488 {
3489 /* If a dependency already exists, don't create a new one. */
3490 if (!find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
3491 if (output_dependence (XEXP (pending_mem, 0), dest))
3492 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
3493
3494 pending = XEXP (pending, 1);
3495 pending_mem = XEXP (pending_mem, 1);
3496 }
3497
3498 for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
3499 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3500
3501 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
3502 insn, dest);
3503 }
3504 sched_analyze_2 (XEXP (dest, 0), insn);
3505 }
3506
3507 /* Analyze reads. */
3508 if (GET_CODE (x) == SET)
3509 sched_analyze_2 (SET_SRC (x), insn);
3510 }
3511
3512 /* Analyze the uses of memory and registers in rtx X in INSN. */
3513
3514 static void
3515 sched_analyze_2 (x, insn)
3516 rtx x;
3517 rtx insn;
3518 {
3519 register int i;
3520 register int j;
3521 register enum rtx_code code;
3522 register char *fmt;
3523
3524 if (x == 0)
3525 return;
3526
3527 code = GET_CODE (x);
3528
3529 switch (code)
3530 {
3531 case CONST_INT:
3532 case CONST_DOUBLE:
3533 case SYMBOL_REF:
3534 case CONST:
3535 case LABEL_REF:
3536 /* Ignore constants. Note that we must handle CONST_DOUBLE here
3537 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
3538 this does not mean that this insn is using cc0. */
3539 return;
3540
3541 #ifdef HAVE_cc0
3542 case CC0:
3543 {
3544 rtx link, prev;
3545
3546 /* User of CC0 depends on immediately preceding insn. */
3547 SCHED_GROUP_P (insn) = 1;
3548
3549 /* There may be a note before this insn now, but all notes will
3550 be removed before we actually try to schedule the insns, so
3551 it won't cause a problem later. We must avoid it here though. */
3552 prev = prev_nonnote_insn (insn);
3553
3554 /* Make a copy of all dependencies on the immediately previous insn,
3555 and add to this insn. This is so that all the dependencies will
3556 apply to the group. Remove an explicit dependence on this insn
3557 as SCHED_GROUP_P now represents it. */
3558
3559 if (find_insn_list (prev, LOG_LINKS (insn)))
3560 remove_dependence (insn, prev);
3561
3562 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
3563 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
3564
3565 return;
3566 }
3567 #endif
3568
3569 case REG:
3570 {
3571 rtx u;
3572 int regno = REGNO (x);
3573 if (regno < FIRST_PSEUDO_REGISTER)
3574 {
3575 int i;
3576
3577 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
3578 while (--i >= 0)
3579 {
3580 reg_last_uses[regno + i]
3581 = gen_rtx (INSN_LIST, VOIDmode,
3582 insn, reg_last_uses[regno + i]);
3583
3584 for (u = reg_last_sets[regno + i]; u; u = XEXP (u, 1))
3585 add_dependence (insn, XEXP (u, 0), 0);
3586
3587 if ((call_used_regs[regno + i] || global_regs[regno + i]))
3588 /* Function calls clobber all call_used regs. */
3589 for (u = last_function_call; u; u = XEXP (u, 1))
3590 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3591 }
3592 }
3593 else
3594 {
3595 reg_last_uses[regno]
3596 = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
3597
3598 for (u = reg_last_sets[regno]; u; u = XEXP (u, 1))
3599 add_dependence (insn, XEXP (u, 0), 0);
3600
3601 /* Pseudos that are REG_EQUIV to something may be replaced
3602 by that during reloading. We need only add dependencies for
3603 the address in the REG_EQUIV note. */
3604 if (!reload_completed
3605 && reg_known_equiv_p[regno]
3606 && GET_CODE (reg_known_value[regno]) == MEM)
3607 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
3608
3609 /* If the register does not already cross any calls, then add this
3610 insn to the sched_before_next_call list so that it will still
3611 not cross calls after scheduling. */
3612 if (REG_N_CALLS_CROSSED (regno) == 0)
3613 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
3614 }
3615 return;
3616 }
3617
3618 case MEM:
3619 {
3620 /* Reading memory. */
3621 rtx u;
3622 rtx pending, pending_mem;
3623
3624 pending = pending_read_insns;
3625 pending_mem = pending_read_mems;
3626 while (pending)
3627 {
3628 /* If a dependency already exists, don't create a new one. */
3629 if (!find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
3630 if (read_dependence (XEXP (pending_mem, 0), x))
3631 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
3632
3633 pending = XEXP (pending, 1);
3634 pending_mem = XEXP (pending_mem, 1);
3635 }
3636
3637 pending = pending_write_insns;
3638 pending_mem = pending_write_mems;
3639 while (pending)
3640 {
3641 /* If a dependency already exists, don't create a new one. */
3642 if (!find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
3643 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
3644 x, rtx_varies_p))
3645 add_dependence (insn, XEXP (pending, 0), 0);
3646
3647 pending = XEXP (pending, 1);
3648 pending_mem = XEXP (pending_mem, 1);
3649 }
3650
3651 for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
3652 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3653
3654 /* Always add these dependencies to pending_reads, since
3655 this insn may be followed by a write. */
3656 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
3657 insn, x);
3658
3659 /* Take advantage of tail recursion here. */
3660 sched_analyze_2 (XEXP (x, 0), insn);
3661 return;
3662 }
3663
3664 case ASM_OPERANDS:
3665 case ASM_INPUT:
3666 case UNSPEC_VOLATILE:
3667 case TRAP_IF:
3668 {
3669 rtx u;
3670
3671 /* Traditional and volatile asm instructions must be considered to use
3672 and clobber all hard registers, all pseudo-registers and all of
3673 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
3674
3675 Consider for instance a volatile asm that changes the fpu rounding
3676 mode. An insn should not be moved across this even if it only uses
3677 pseudo-regs because it might give an incorrectly rounded result. */
3678 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
3679 {
3680 int max_reg = max_reg_num ();
3681 for (i = 0; i < max_reg; i++)
3682 {
3683 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
3684 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3685 reg_last_uses[i] = 0;
3686
3687 /* reg_last_sets[r] is now a list of insns */
3688 for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
3689 add_dependence (insn, XEXP (u, 0), 0);
3690 }
3691 reg_pending_sets_all = 1;
3692
3693 flush_pending_lists (insn, 0);
3694 }
3695
3696 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
3697 We can not just fall through here since then we would be confused
3698 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
3699 traditional asms unlike their normal usage. */
3700
3701 if (code == ASM_OPERANDS)
3702 {
3703 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
3704 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
3705 return;
3706 }
3707 break;
3708 }
3709
3710 case PRE_DEC:
3711 case POST_DEC:
3712 case PRE_INC:
3713 case POST_INC:
3714 /* These both read and modify the result. We must handle them as writes
3715 to get proper dependencies for following instructions. We must handle
3716 them as reads to get proper dependencies from this to previous
3717 instructions. Thus we need to pass them to both sched_analyze_1
3718 and sched_analyze_2. We must call sched_analyze_2 first in order
3719 to get the proper antecedent for the read. */
3720 sched_analyze_2 (XEXP (x, 0), insn);
3721 sched_analyze_1 (x, insn);
3722 return;
3723 }
3724
3725 /* Other cases: walk the insn. */
3726 fmt = GET_RTX_FORMAT (code);
3727 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3728 {
3729 if (fmt[i] == 'e')
3730 sched_analyze_2 (XEXP (x, i), insn);
3731 else if (fmt[i] == 'E')
3732 for (j = 0; j < XVECLEN (x, i); j++)
3733 sched_analyze_2 (XVECEXP (x, i, j), insn);
3734 }
3735 }
3736
3737 /* Analyze an INSN with pattern X to find all dependencies. */
3738
3739 static void
3740 sched_analyze_insn (x, insn, loop_notes)
3741 rtx x, insn;
3742 rtx loop_notes;
3743 {
3744 register RTX_CODE code = GET_CODE (x);
3745 rtx link;
3746 int maxreg = max_reg_num ();
3747 int i;
3748
3749 if (code == SET || code == CLOBBER)
3750 sched_analyze_1 (x, insn);
3751 else if (code == PARALLEL)
3752 {
3753 register int i;
3754 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3755 {
3756 code = GET_CODE (XVECEXP (x, 0, i));
3757 if (code == SET || code == CLOBBER)
3758 sched_analyze_1 (XVECEXP (x, 0, i), insn);
3759 else
3760 sched_analyze_2 (XVECEXP (x, 0, i), insn);
3761 }
3762 }
3763 else
3764 sched_analyze_2 (x, insn);
3765
3766 /* Mark registers CLOBBERED or used by called function. */
3767 if (GET_CODE (insn) == CALL_INSN)
3768 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
3769 {
3770 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
3771 sched_analyze_1 (XEXP (link, 0), insn);
3772 else
3773 sched_analyze_2 (XEXP (link, 0), insn);
3774 }
3775
3776 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic block, then
3777 we must be sure that no instructions are scheduled across it.
3778 Otherwise, the reg_n_refs info (which depends on loop_depth) would
3779 become incorrect. */
3780
3781 if (loop_notes)
3782 {
3783 int max_reg = max_reg_num ();
3784 rtx link;
3785
3786 for (i = 0; i < max_reg; i++)
3787 {
3788 rtx u;
3789 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
3790 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3791 reg_last_uses[i] = 0;
3792
3793 /* reg_last_sets[r] is now a list of insns */
3794 for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
3795 add_dependence (insn, XEXP (u, 0), 0);
3796 }
3797 reg_pending_sets_all = 1;
3798
3799 flush_pending_lists (insn, 0);
3800
3801 link = loop_notes;
3802 while (XEXP (link, 1))
3803 link = XEXP (link, 1);
3804 XEXP (link, 1) = REG_NOTES (insn);
3805 REG_NOTES (insn) = loop_notes;
3806 }
3807
3808 /* After reload, it is possible for an instruction to have a REG_DEAD note
3809 for a register that actually dies a few instructions earlier. For
3810 example, this can happen with SECONDARY_MEMORY_NEEDED reloads.
3811 In this case, we must consider the insn to use the register mentioned
3812 in the REG_DEAD note. Otherwise, we may accidentally move this insn
3813 after another insn that sets the register, thus getting obviously invalid
3814 rtl. This confuses reorg which believes that REG_DEAD notes are still
3815 meaningful.
3816
3817 ??? We would get better code if we fixed reload to put the REG_DEAD
3818 notes in the right places, but that may not be worth the effort. */
3819
3820 if (reload_completed)
3821 {
3822 rtx note;
3823
3824 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
3825 if (REG_NOTE_KIND (note) == REG_DEAD)
3826 sched_analyze_2 (XEXP (note, 0), insn);
3827 }
3828
3829 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
3830 {
3831 /* reg_last_sets[r] is now a list of insns */
3832 reg_last_sets[i]
3833 = gen_rtx (INSN_LIST, VOIDmode, insn, 0);
3834 });
3835 CLEAR_REG_SET (reg_pending_sets);
3836
3837 if (reg_pending_sets_all)
3838 {
3839 for (i = 0; i < maxreg; i++)
3840
3841 /* reg_last_sets[r] is now a list of insns */
3842 reg_last_sets[i]
3843 = gen_rtx (INSN_LIST, VOIDmode, insn, 0);
3844
3845 reg_pending_sets_all = 0;
3846 }
3847
3848 /* Handle function calls and function returns created by the epilogue
3849 threading code. */
3850 if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
3851 {
3852 rtx dep_insn;
3853 rtx prev_dep_insn;
3854
3855 /* When scheduling instructions, we make sure calls don't lose their
3856 accompanying USE insns by depending them one on another in order.
3857
3858 Also, we must do the same thing for returns created by the epilogue
3859 threading code. Note this code works only in this special case,
3860 because other passes make no guarantee that they will never emit
3861 an instruction between a USE and a RETURN. There is such a guarantee
3862 for USE instructions immediately before a call. */
3863
3864 prev_dep_insn = insn;
3865 dep_insn = PREV_INSN (insn);
3866 while (GET_CODE (dep_insn) == INSN
3867 && GET_CODE (PATTERN (dep_insn)) == USE
3868 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == REG)
3869 {
3870 SCHED_GROUP_P (prev_dep_insn) = 1;
3871
3872 /* Make a copy of all dependencies on dep_insn, and add to insn.
3873 This is so that all of the dependencies will apply to the
3874 group. */
3875
3876 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
3877 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
3878
3879 prev_dep_insn = dep_insn;
3880 dep_insn = PREV_INSN (dep_insn);
3881 }
3882 }
3883 }
3884
3885 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
3886 for every dependency. */
3887
3888 static void
3889 sched_analyze (head, tail)
3890 rtx head, tail;
3891 {
3892 register rtx insn;
3893 register rtx u;
3894 rtx loop_notes = 0;
3895
3896 for (insn = head;; insn = NEXT_INSN (insn))
3897 {
3898 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
3899 {
3900 sched_analyze_insn (PATTERN (insn), insn, loop_notes);
3901 loop_notes = 0;
3902 }
3903 else if (GET_CODE (insn) == CALL_INSN)
3904 {
3905 rtx x;
3906 register int i;
3907
3908 CANT_MOVE (insn) = 1;
3909
3910 /* Any instruction using a hard register which may get clobbered
3911 by a call needs to be marked as dependent on this call.
3912 This prevents a use of a hard return reg from being moved
3913 past a void call (i.e. it does not explicitly set the hard
3914 return reg). */
3915
3916 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
3917 all registers, not just hard registers, may be clobbered by this
3918 call. */
3919
3920 /* Insn, being a CALL_INSN, magically depends on
3921 `last_function_call' already. */
3922
3923 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
3924 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
3925 {
3926 int max_reg = max_reg_num ();
3927 for (i = 0; i < max_reg; i++)
3928 {
3929 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
3930 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3931
3932 reg_last_uses[i] = 0;
3933
3934 /* reg_last_sets[r] is now a list of insns */
3935 for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
3936 add_dependence (insn, XEXP (u, 0), 0);
3937 }
3938 reg_pending_sets_all = 1;
3939
3940 /* Add a pair of fake REG_NOTE which we will later
3941 convert back into a NOTE_INSN_SETJMP note. See
3942 reemit_notes for why we use a pair of NOTEs. */
3943 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_DEAD,
3944 GEN_INT (0),
3945 REG_NOTES (insn));
3946 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_DEAD,
3947 GEN_INT (NOTE_INSN_SETJMP),
3948 REG_NOTES (insn));
3949 }
3950 else
3951 {
3952 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3953 if (call_used_regs[i] || global_regs[i])
3954 {
3955 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
3956 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3957 reg_last_uses[i] = 0;
3958
3959 /* reg_last_sets[r] is now a list of insns */
3960 for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
3961 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3962
3963 SET_REGNO_REG_SET (reg_pending_sets, i);
3964 }
3965 }
3966
3967 /* For each insn which shouldn't cross a call, add a dependence
3968 between that insn and this call insn. */
3969 x = LOG_LINKS (sched_before_next_call);
3970 while (x)
3971 {
3972 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
3973 x = XEXP (x, 1);
3974 }
3975 LOG_LINKS (sched_before_next_call) = 0;
3976
3977 sched_analyze_insn (PATTERN (insn), insn, loop_notes);
3978 loop_notes = 0;
3979
3980 /* In the absence of interprocedural alias analysis, we must flush
3981 all pending reads and writes, and start new dependencies starting
3982 from here. But only flush writes for constant calls (which may
3983 be passed a pointer to something we haven't written yet). */
3984 flush_pending_lists (insn, CONST_CALL_P (insn));
3985
3986 /* Depend this function call (actually, the user of this
3987 function call) on all hard register clobberage. */
3988
3989 /* last_function_call is now a list of insns */
3990 last_function_call
3991 = gen_rtx (INSN_LIST, VOIDmode, insn, 0);
3992 }
3993
3994 /* See comments on reemit_notes as to why we do this. */
3995 else if (GET_CODE (insn) == NOTE
3996 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
3997 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
3998 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
3999 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
4000 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
4001 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
4002 {
4003 loop_notes = gen_rtx (EXPR_LIST, REG_DEAD,
4004 GEN_INT (NOTE_BLOCK_NUMBER (insn)), loop_notes);
4005 loop_notes = gen_rtx (EXPR_LIST, REG_DEAD,
4006 GEN_INT (NOTE_LINE_NUMBER (insn)), loop_notes);
4007 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
4008 }
4009
4010 if (insn == tail)
4011 return;
4012 }
4013 abort ();
4014 }
4015 \f
4016 /* Called when we see a set of a register. If death is true, then we are
4017 scanning backwards. Mark that register as unborn. If nobody says
4018 otherwise, that is how things will remain. If death is false, then we
4019 are scanning forwards. Mark that register as being born. */
4020
4021 static void
4022 sched_note_set (b, x, death)
4023 int b;
4024 rtx x;
4025 int death;
4026 {
4027 register int regno;
4028 register rtx reg = SET_DEST (x);
4029 int subreg_p = 0;
4030
4031 if (reg == 0)
4032 return;
4033
4034 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
4035 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
4036 {
4037 /* Must treat modification of just one hardware register of a multi-reg
4038 value or just a byte field of a register exactly the same way that
4039 mark_set_1 in flow.c does, i.e. anything except a paradoxical subreg
4040 does not kill the entire register. */
4041 if (GET_CODE (reg) != SUBREG
4042 || REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg))
4043 subreg_p = 1;
4044
4045 reg = SUBREG_REG (reg);
4046 }
4047
4048 if (GET_CODE (reg) != REG)
4049 return;
4050
4051 /* Global registers are always live, so the code below does not apply
4052 to them. */
4053
4054 regno = REGNO (reg);
4055 if (regno >= FIRST_PSEUDO_REGISTER || !global_regs[regno])
4056 {
4057 if (death)
4058 {
4059 /* If we only set part of the register, then this set does not
4060 kill it. */
4061 if (subreg_p)
4062 return;
4063
4064 /* Try killing this register. */
4065 if (regno < FIRST_PSEUDO_REGISTER)
4066 {
4067 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
4068 while (--j >= 0)
4069 {
4070 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
4071 }
4072 }
4073 else
4074 {
4075 /* Recompute REG_BASIC_BLOCK as we update all the other
4076 dataflow information. */
4077 if (sched_reg_basic_block[regno] == REG_BLOCK_UNKNOWN)
4078 sched_reg_basic_block[regno] = current_block_num;
4079 else if (sched_reg_basic_block[regno] != current_block_num)
4080 sched_reg_basic_block[regno] = REG_BLOCK_GLOBAL;
4081
4082 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
4083 }
4084 }
4085 else
4086 {
4087 /* Make the register live again. */
4088 if (regno < FIRST_PSEUDO_REGISTER)
4089 {
4090 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
4091 while (--j >= 0)
4092 {
4093 SET_REGNO_REG_SET (bb_live_regs, regno + j);
4094 }
4095 }
4096 else
4097 {
4098 SET_REGNO_REG_SET (bb_live_regs, regno);
4099 }
4100 }
4101 }
4102 }
4103 \f
4104 /* Macros and functions for keeping the priority queue sorted, and
4105 dealing with queueing and dequeueing of instructions. */
4106
4107 #define SCHED_SORT(READY, N_READY) \
4108 do { if ((N_READY) == 2) \
4109 swap_sort (READY, N_READY); \
4110 else if ((N_READY) > 2) \
4111 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
4112 while (0)
4113
4114 /* Returns a positive value if x is preferred; returns a negative value if
4115 y is preferred. Should never return 0, since that will make the sort
4116 unstable. */
4117
4118 static int
4119 rank_for_schedule (x, y)
4120 rtx *x, *y;
4121 {
4122 rtx tmp = *y;
4123 rtx tmp2 = *x;
4124 rtx link;
4125 int tmp_class, tmp2_class;
4126 int val, priority_val, spec_val, prob_val, weight_val;
4127
4128
4129 /* schedule reverse is a stress test of the scheduler correctness,
4130 controlled by -fsched-reverse option. */
4131 if ((reload_completed && flag_schedule_reverse_after_reload) ||
4132 (!reload_completed && flag_schedule_reverse_before_reload))
4133 return INSN_LUID (tmp2) - INSN_LUID (tmp);
4134
4135 /* prefer insn with higher priority */
4136 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
4137 if (priority_val)
4138 return priority_val;
4139
4140 /* prefer an insn with smaller contribution to registers-pressure */
4141 if (!reload_completed &&
4142 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
4143 return (weight_val);
4144
4145 /* some comparison make sense in interblock scheduling only */
4146 if (INSN_BB (tmp) != INSN_BB (tmp2))
4147 {
4148 /* prefer an inblock motion on an interblock motion */
4149 if ((INSN_BB (tmp2) == target_bb) && (INSN_BB (tmp) != target_bb))
4150 return 1;
4151 if ((INSN_BB (tmp) == target_bb) && (INSN_BB (tmp2) != target_bb))
4152 return -1;
4153
4154 /* prefer a useful motion on a speculative one */
4155 if ((spec_val = IS_SPECULATIVE_INSN (tmp) - IS_SPECULATIVE_INSN (tmp2)))
4156 return (spec_val);
4157
4158 /* prefer a more probable (speculative) insn */
4159 prob_val = INSN_PROBABILITY (tmp2) - INSN_PROBABILITY (tmp);
4160 if (prob_val)
4161 return (prob_val);
4162 }
4163
4164 /* compare insns based on their relation to the last-scheduled-insn */
4165 if (last_scheduled_insn)
4166 {
4167 /* Classify the instructions into three classes:
4168 1) Data dependent on last schedule insn.
4169 2) Anti/Output dependent on last scheduled insn.
4170 3) Independent of last scheduled insn, or has latency of one.
4171 Choose the insn from the highest numbered class if different. */
4172 link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
4173 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
4174 tmp_class = 3;
4175 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
4176 tmp_class = 1;
4177 else
4178 tmp_class = 2;
4179
4180 link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
4181 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
4182 tmp2_class = 3;
4183 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
4184 tmp2_class = 1;
4185 else
4186 tmp2_class = 2;
4187
4188 if ((val = tmp2_class - tmp_class))
4189 return val;
4190 }
4191
4192 /* If insns are equally good, sort by INSN_LUID (original insn order),
4193 so that we make the sort stable. This minimizes instruction movement,
4194 thus minimizing sched's effect on debugging and cross-jumping. */
4195 return INSN_LUID (tmp) - INSN_LUID (tmp2);
4196 }
4197
4198 /* Resort the array A in which only element at index N may be out of order. */
4199
4200 __inline static void
4201 swap_sort (a, n)
4202 rtx *a;
4203 int n;
4204 {
4205 rtx insn = a[n - 1];
4206 int i = n - 2;
4207
4208 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
4209 {
4210 a[i + 1] = a[i];
4211 i -= 1;
4212 }
4213 a[i + 1] = insn;
4214 }
4215
4216 static int max_priority;
4217
4218 /* Add INSN to the insn queue so that it can be executed at least
4219 N_CYCLES after the currently executing insn. Preserve insns
4220 chain for debugging purposes. */
4221
4222 __inline static void
4223 queue_insn (insn, n_cycles)
4224 rtx insn;
4225 int n_cycles;
4226 {
4227 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
4228 rtx link = rtx_alloc (INSN_LIST);
4229 XEXP (link, 0) = insn;
4230 XEXP (link, 1) = insn_queue[next_q];
4231 insn_queue[next_q] = link;
4232 q_size += 1;
4233
4234 if (sched_verbose >= 2)
4235 {
4236 fprintf (dump, ";;\t\tReady-->Q: insn %d: ", INSN_UID (insn));
4237
4238 if (INSN_BB (insn) != target_bb)
4239 fprintf (dump, "(b%d) ", INSN_BLOCK (insn));
4240
4241 fprintf (dump, "queued for %d cycles.\n", n_cycles);
4242 }
4243
4244 }
4245
4246 /* Return nonzero if PAT is the pattern of an insn which makes a
4247 register live. */
4248
4249 __inline static int
4250 birthing_insn_p (pat)
4251 rtx pat;
4252 {
4253 int j;
4254
4255 if (reload_completed == 1)
4256 return 0;
4257
4258 if (GET_CODE (pat) == SET
4259 && GET_CODE (SET_DEST (pat)) == REG)
4260 {
4261 rtx dest = SET_DEST (pat);
4262 int i = REGNO (dest);
4263
4264 /* It would be more accurate to use refers_to_regno_p or
4265 reg_mentioned_p to determine when the dest is not live before this
4266 insn. */
4267
4268 if (REGNO_REG_SET_P (bb_live_regs, i))
4269 return (REG_N_SETS (i) == 1);
4270
4271 return 0;
4272 }
4273 if (GET_CODE (pat) == PARALLEL)
4274 {
4275 for (j = 0; j < XVECLEN (pat, 0); j++)
4276 if (birthing_insn_p (XVECEXP (pat, 0, j)))
4277 return 1;
4278 }
4279 return 0;
4280 }
4281
4282 /* PREV is an insn that is ready to execute. Adjust its priority if that
4283 will help shorten register lifetimes. */
4284
4285 __inline static void
4286 adjust_priority (prev)
4287 rtx prev;
4288 {
4289 /* Trying to shorten register lives after reload has completed
4290 is useless and wrong. It gives inaccurate schedules. */
4291 if (reload_completed == 0)
4292 {
4293 rtx note;
4294 int n_deaths = 0;
4295
4296 /* ??? This code has no effect, because REG_DEAD notes are removed
4297 before we ever get here. */
4298 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
4299 if (REG_NOTE_KIND (note) == REG_DEAD)
4300 n_deaths += 1;
4301
4302 /* Defer scheduling insns which kill registers, since that
4303 shortens register lives. Prefer scheduling insns which
4304 make registers live for the same reason. */
4305 switch (n_deaths)
4306 {
4307 default:
4308 INSN_PRIORITY (prev) >>= 3;
4309 break;
4310 case 3:
4311 INSN_PRIORITY (prev) >>= 2;
4312 break;
4313 case 2:
4314 case 1:
4315 INSN_PRIORITY (prev) >>= 1;
4316 break;
4317 case 0:
4318 if (birthing_insn_p (PATTERN (prev)))
4319 {
4320 int max = max_priority;
4321
4322 if (max > INSN_PRIORITY (prev))
4323 INSN_PRIORITY (prev) = max;
4324 }
4325 break;
4326 }
4327 #ifdef ADJUST_PRIORITY
4328 ADJUST_PRIORITY (prev);
4329 #endif
4330 }
4331 }
4332
4333 /* INSN is the "currently executing insn". Launch each insn which was
4334 waiting on INSN. READY is a vector of insns which are ready to fire.
4335 N_READY is the number of elements in READY. CLOCK is the current
4336 cycle. */
4337
4338 static int
4339 schedule_insn (insn, ready, n_ready, clock)
4340 rtx insn;
4341 rtx *ready;
4342 int n_ready;
4343 int clock;
4344 {
4345 rtx link;
4346 int unit;
4347
4348 unit = insn_unit (insn);
4349
4350 if (sched_verbose >= 2)
4351 {
4352 fprintf (dump, ";;\t\t--> scheduling insn <<<%d>>> on unit ", INSN_UID (insn));
4353 insn_print_units (insn);
4354 fprintf (dump, "\n");
4355 }
4356
4357 if (sched_verbose && unit == -1)
4358 visualize_no_unit (insn);
4359
4360 if (MAX_BLOCKAGE > 1 || issue_rate > 1 || sched_verbose)
4361 schedule_unit (unit, insn, clock);
4362
4363 if (INSN_DEPEND (insn) == 0)
4364 return n_ready;
4365
4366 /* This is used by the function adjust_priority above. */
4367 if (n_ready > 0)
4368 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
4369 else
4370 max_priority = INSN_PRIORITY (insn);
4371
4372 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
4373 {
4374 rtx next = XEXP (link, 0);
4375 int cost = insn_cost (insn, link, next);
4376
4377 INSN_TICK (next) = MAX (INSN_TICK (next), clock + cost);
4378
4379 if ((INSN_DEP_COUNT (next) -= 1) == 0)
4380 {
4381 int effective_cost = INSN_TICK (next) - clock;
4382
4383 /* For speculative insns, before inserting to ready/queue,
4384 check live, exception-free, and issue-delay */
4385 if (INSN_BB (next) != target_bb
4386 && (!IS_VALID (INSN_BB (next))
4387 || CANT_MOVE (next)
4388 || (IS_SPECULATIVE_INSN (next)
4389 && (insn_issue_delay (next) > 3
4390 || !check_live (next, INSN_BB (next), target_bb)
4391 || !is_exception_free (next, INSN_BB (next), target_bb)))))
4392 continue;
4393
4394 if (sched_verbose >= 2)
4395 {
4396 fprintf (dump, ";;\t\tdependences resolved: insn %d ", INSN_UID (next));
4397
4398 if (current_nr_blocks > 1 && INSN_BB (next) != target_bb)
4399 fprintf (dump, "/b%d ", INSN_BLOCK (next));
4400
4401 if (effective_cost <= 1)
4402 fprintf (dump, "into ready\n");
4403 else
4404 fprintf (dump, "into queue with cost=%d\n", effective_cost);
4405 }
4406
4407 /* Adjust the priority of NEXT and either put it on the ready
4408 list or queue it. */
4409 adjust_priority (next);
4410 if (effective_cost <= 1)
4411 ready[n_ready++] = next;
4412 else
4413 queue_insn (next, effective_cost);
4414 }
4415 }
4416
4417 return n_ready;
4418 }
4419
4420
4421 /* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
4422 dead_notes list. */
4423
4424 static void
4425 create_reg_dead_note (reg, insn)
4426 rtx reg, insn;
4427 {
4428 rtx link;
4429
4430 /* The number of registers killed after scheduling must be the same as the
4431 number of registers killed before scheduling. The number of REG_DEAD
4432 notes may not be conserved, i.e. two SImode hard register REG_DEAD notes
4433 might become one DImode hard register REG_DEAD note, but the number of
4434 registers killed will be conserved.
4435
4436 We carefully remove REG_DEAD notes from the dead_notes list, so that
4437 there will be none left at the end. If we run out early, then there
4438 is a bug somewhere in flow, combine and/or sched. */
4439
4440 if (dead_notes == 0)
4441 {
4442 if (current_nr_blocks <= 1)
4443 abort ();
4444 else
4445 {
4446 link = rtx_alloc (EXPR_LIST);
4447 PUT_REG_NOTE_KIND (link, REG_DEAD);
4448 }
4449 }
4450 else
4451 {
4452 /* Number of regs killed by REG. */
4453 int regs_killed = (REGNO (reg) >= FIRST_PSEUDO_REGISTER ? 1
4454 : HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)));
4455 /* Number of regs killed by REG_DEAD notes taken off the list. */
4456 int reg_note_regs;
4457
4458 link = dead_notes;
4459 reg_note_regs = (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
4460 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
4461 GET_MODE (XEXP (link, 0))));
4462 while (reg_note_regs < regs_killed)
4463 {
4464 link = XEXP (link, 1);
4465 reg_note_regs += (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
4466 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
4467 GET_MODE (XEXP (link, 0))));
4468 }
4469 dead_notes = XEXP (link, 1);
4470
4471 /* If we took too many regs kills off, put the extra ones back. */
4472 while (reg_note_regs > regs_killed)
4473 {
4474 rtx temp_reg, temp_link;
4475
4476 temp_reg = gen_rtx (REG, word_mode, 0);
4477 temp_link = rtx_alloc (EXPR_LIST);
4478 PUT_REG_NOTE_KIND (temp_link, REG_DEAD);
4479 XEXP (temp_link, 0) = temp_reg;
4480 XEXP (temp_link, 1) = dead_notes;
4481 dead_notes = temp_link;
4482 reg_note_regs--;
4483 }
4484 }
4485
4486 XEXP (link, 0) = reg;
4487 XEXP (link, 1) = REG_NOTES (insn);
4488 REG_NOTES (insn) = link;
4489 }
4490
4491 /* Subroutine on attach_deaths_insn--handles the recursive search
4492 through INSN. If SET_P is true, then x is being modified by the insn. */
4493
4494 static void
4495 attach_deaths (x, insn, set_p)
4496 rtx x;
4497 rtx insn;
4498 int set_p;
4499 {
4500 register int i;
4501 register int j;
4502 register enum rtx_code code;
4503 register char *fmt;
4504
4505 if (x == 0)
4506 return;
4507
4508 code = GET_CODE (x);
4509
4510 switch (code)
4511 {
4512 case CONST_INT:
4513 case CONST_DOUBLE:
4514 case LABEL_REF:
4515 case SYMBOL_REF:
4516 case CONST:
4517 case CODE_LABEL:
4518 case PC:
4519 case CC0:
4520 /* Get rid of the easy cases first. */
4521 return;
4522
4523 case REG:
4524 {
4525 /* If the register dies in this insn, queue that note, and mark
4526 this register as needing to die. */
4527 /* This code is very similar to mark_used_1 (if set_p is false)
4528 and mark_set_1 (if set_p is true) in flow.c. */
4529
4530 register int regno;
4531 int some_needed;
4532 int all_needed;
4533
4534 if (set_p)
4535 return;
4536
4537 regno = REGNO (x);
4538 all_needed = some_needed = REGNO_REG_SET_P (old_live_regs, regno);
4539 if (regno < FIRST_PSEUDO_REGISTER)
4540 {
4541 int n;
4542
4543 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
4544 while (--n > 0)
4545 {
4546 int needed = (REGNO_REG_SET_P (old_live_regs, regno + n));
4547 some_needed |= needed;
4548 all_needed &= needed;
4549 }
4550 }
4551
4552 /* If it wasn't live before we started, then add a REG_DEAD note.
4553 We must check the previous lifetime info not the current info,
4554 because we may have to execute this code several times, e.g.
4555 once for a clobber (which doesn't add a note) and later
4556 for a use (which does add a note).
4557
4558 Always make the register live. We must do this even if it was
4559 live before, because this may be an insn which sets and uses
4560 the same register, in which case the register has already been
4561 killed, so we must make it live again.
4562
4563 Global registers are always live, and should never have a REG_DEAD
4564 note added for them, so none of the code below applies to them. */
4565
4566 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
4567 {
4568 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
4569 STACK_POINTER_REGNUM, since these are always considered to be
4570 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
4571 if (regno != FRAME_POINTER_REGNUM
4572 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4573 && ! (regno == HARD_FRAME_POINTER_REGNUM)
4574 #endif
4575 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
4576 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
4577 #endif
4578 && regno != STACK_POINTER_REGNUM)
4579 {
4580 /* ??? It is perhaps a dead_or_set_p bug that it does
4581 not check for REG_UNUSED notes itself. This is necessary
4582 for the case where the SET_DEST is a subreg of regno, as
4583 dead_or_set_p handles subregs specially. */
4584 if (! all_needed && ! dead_or_set_p (insn, x)
4585 && ! find_reg_note (insn, REG_UNUSED, x))
4586 {
4587 /* Check for the case where the register dying partially
4588 overlaps the register set by this insn. */
4589 if (regno < FIRST_PSEUDO_REGISTER
4590 && HARD_REGNO_NREGS (regno, GET_MODE (x)) > 1)
4591 {
4592 int n = HARD_REGNO_NREGS (regno, GET_MODE (x));
4593 while (--n >= 0)
4594 some_needed |= dead_or_set_regno_p (insn, regno + n);
4595 }
4596
4597 /* If none of the words in X is needed, make a REG_DEAD
4598 note. Otherwise, we must make partial REG_DEAD
4599 notes. */
4600 if (! some_needed)
4601 create_reg_dead_note (x, insn);
4602 else
4603 {
4604 int i;
4605
4606 /* Don't make a REG_DEAD note for a part of a
4607 register that is set in the insn. */
4608 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
4609 i >= 0; i--)
4610 if (! REGNO_REG_SET_P (old_live_regs, regno+i)
4611 && ! dead_or_set_regno_p (insn, regno + i))
4612 create_reg_dead_note (gen_rtx (REG,
4613 reg_raw_mode[regno + i],
4614 regno + i),
4615 insn);
4616 }
4617 }
4618 }
4619
4620 if (regno < FIRST_PSEUDO_REGISTER)
4621 {
4622 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
4623 while (--j >= 0)
4624 {
4625 SET_REGNO_REG_SET (bb_live_regs, regno + j);
4626 }
4627 }
4628 else
4629 {
4630 /* Recompute REG_BASIC_BLOCK as we update all the other
4631 dataflow information. */
4632 if (sched_reg_basic_block[regno] == REG_BLOCK_UNKNOWN)
4633 sched_reg_basic_block[regno] = current_block_num;
4634 else if (sched_reg_basic_block[regno] != current_block_num)
4635 sched_reg_basic_block[regno] = REG_BLOCK_GLOBAL;
4636
4637 SET_REGNO_REG_SET (bb_live_regs, regno);
4638 }
4639 }
4640 return;
4641 }
4642
4643 case MEM:
4644 /* Handle tail-recursive case. */
4645 attach_deaths (XEXP (x, 0), insn, 0);
4646 return;
4647
4648 case SUBREG:
4649 case STRICT_LOW_PART:
4650 /* These two cases preserve the value of SET_P, so handle them
4651 separately. */
4652 attach_deaths (XEXP (x, 0), insn, set_p);
4653 return;
4654
4655 case ZERO_EXTRACT:
4656 case SIGN_EXTRACT:
4657 /* This case preserves the value of SET_P for the first operand, but
4658 clears it for the other two. */
4659 attach_deaths (XEXP (x, 0), insn, set_p);
4660 attach_deaths (XEXP (x, 1), insn, 0);
4661 attach_deaths (XEXP (x, 2), insn, 0);
4662 return;
4663
4664 default:
4665 /* Other cases: walk the insn. */
4666 fmt = GET_RTX_FORMAT (code);
4667 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4668 {
4669 if (fmt[i] == 'e')
4670 attach_deaths (XEXP (x, i), insn, 0);
4671 else if (fmt[i] == 'E')
4672 for (j = 0; j < XVECLEN (x, i); j++)
4673 attach_deaths (XVECEXP (x, i, j), insn, 0);
4674 }
4675 }
4676 }
4677
4678 /* After INSN has executed, add register death notes for each register
4679 that is dead after INSN. */
4680
4681 static void
4682 attach_deaths_insn (insn)
4683 rtx insn;
4684 {
4685 rtx x = PATTERN (insn);
4686 register RTX_CODE code = GET_CODE (x);
4687 rtx link;
4688
4689 if (code == SET)
4690 {
4691 attach_deaths (SET_SRC (x), insn, 0);
4692
4693 /* A register might die here even if it is the destination, e.g.
4694 it is the target of a volatile read and is otherwise unused.
4695 Hence we must always call attach_deaths for the SET_DEST. */
4696 attach_deaths (SET_DEST (x), insn, 1);
4697 }
4698 else if (code == PARALLEL)
4699 {
4700 register int i;
4701 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4702 {
4703 code = GET_CODE (XVECEXP (x, 0, i));
4704 if (code == SET)
4705 {
4706 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
4707
4708 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
4709 }
4710 /* Flow does not add REG_DEAD notes to registers that die in
4711 clobbers, so we can't either. */
4712 else if (code != CLOBBER)
4713 attach_deaths (XVECEXP (x, 0, i), insn, 0);
4714 }
4715 }
4716 /* If this is a CLOBBER, only add REG_DEAD notes to registers inside a
4717 MEM being clobbered, just like flow. */
4718 else if (code == CLOBBER && GET_CODE (XEXP (x, 0)) == MEM)
4719 attach_deaths (XEXP (XEXP (x, 0), 0), insn, 0);
4720 /* Otherwise don't add a death note to things being clobbered. */
4721 else if (code != CLOBBER)
4722 attach_deaths (x, insn, 0);
4723
4724 /* Make death notes for things used in the called function. */
4725 if (GET_CODE (insn) == CALL_INSN)
4726 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
4727 attach_deaths (XEXP (XEXP (link, 0), 0), insn,
4728 GET_CODE (XEXP (link, 0)) == CLOBBER);
4729 }
4730
4731 /* functions for handlnig of notes */
4732
4733 /* Delete notes beginning with INSN and put them in the chain
4734 of notes ended by NOTE_LIST.
4735 Returns the insn following the notes. */
4736
4737 static rtx
4738 unlink_other_notes (insn, tail)
4739 rtx insn, tail;
4740 {
4741 rtx prev = PREV_INSN (insn);
4742
4743 while (insn != tail && GET_CODE (insn) == NOTE)
4744 {
4745 rtx next = NEXT_INSN (insn);
4746 /* Delete the note from its current position. */
4747 if (prev)
4748 NEXT_INSN (prev) = next;
4749 if (next)
4750 PREV_INSN (next) = prev;
4751
4752 /* Don't save away NOTE_INSN_SETJMPs, because they must remain
4753 immediately after the call they follow. We use a fake
4754 (REG_DEAD (const_int -1)) note to remember them.
4755 Likewise with NOTE_INSN_{LOOP,EHREGION}_{BEG, END}. */
4756 if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_SETJMP
4757 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
4758 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
4759 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
4760 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
4761 {
4762 /* Insert the note at the end of the notes list. */
4763 PREV_INSN (insn) = note_list;
4764 if (note_list)
4765 NEXT_INSN (note_list) = insn;
4766 note_list = insn;
4767 }
4768
4769 insn = next;
4770 }
4771 return insn;
4772 }
4773
4774 /* Delete line notes beginning with INSN. Record line-number notes so
4775 they can be reused. Returns the insn following the notes. */
4776
4777 static rtx
4778 unlink_line_notes (insn, tail)
4779 rtx insn, tail;
4780 {
4781 rtx prev = PREV_INSN (insn);
4782
4783 while (insn != tail && GET_CODE (insn) == NOTE)
4784 {
4785 rtx next = NEXT_INSN (insn);
4786
4787 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
4788 {
4789 /* Delete the note from its current position. */
4790 if (prev)
4791 NEXT_INSN (prev) = next;
4792 if (next)
4793 PREV_INSN (next) = prev;
4794
4795 /* Record line-number notes so they can be reused. */
4796 LINE_NOTE (insn) = insn;
4797 }
4798 else
4799 prev = insn;
4800
4801 insn = next;
4802 }
4803 return insn;
4804 }
4805
4806 /* Return the head and tail pointers of BB. */
4807
4808 __inline static void
4809 get_block_head_tail (bb, headp, tailp)
4810 int bb;
4811 rtx *headp;
4812 rtx *tailp;
4813 {
4814
4815 rtx head = *headp;
4816 rtx tail = *tailp;
4817 int b;
4818
4819 b = BB_TO_BLOCK (bb);
4820
4821 /* HEAD and TAIL delimit the basic block being scheduled. */
4822 head = basic_block_head[b];
4823 tail = basic_block_end[b];
4824
4825 /* Don't include any notes or labels at the beginning of the
4826 basic block, or notes at the ends of basic blocks. */
4827 while (head != tail)
4828 {
4829 if (GET_CODE (head) == NOTE)
4830 head = NEXT_INSN (head);
4831 else if (GET_CODE (tail) == NOTE)
4832 tail = PREV_INSN (tail);
4833 else if (GET_CODE (head) == CODE_LABEL)
4834 head = NEXT_INSN (head);
4835 else
4836 break;
4837 }
4838
4839 *headp = head;
4840 *tailp = tail;
4841 }
4842
4843 /* Delete line notes from bb. Save them so they can be later restored
4844 (in restore_line_notes ()). */
4845
4846 static void
4847 rm_line_notes (bb)
4848 int bb;
4849 {
4850 rtx next_tail;
4851 rtx tail;
4852 rtx head;
4853 rtx insn;
4854
4855 get_block_head_tail (bb, &head, &tail);
4856
4857 if (head == tail
4858 && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
4859 return;
4860
4861 next_tail = NEXT_INSN (tail);
4862 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4863 {
4864 rtx prev;
4865
4866 /* Farm out notes, and maybe save them in NOTE_LIST.
4867 This is needed to keep the debugger from
4868 getting completely deranged. */
4869 if (GET_CODE (insn) == NOTE)
4870 {
4871 prev = insn;
4872 insn = unlink_line_notes (insn, next_tail);
4873
4874 if (prev == tail)
4875 abort ();
4876 if (prev == head)
4877 abort ();
4878 if (insn == next_tail)
4879 abort ();
4880 }
4881 }
4882 }
4883
4884 /* Save line number notes for each insn in bb. */
4885
4886 static void
4887 save_line_notes (bb)
4888 int bb;
4889 {
4890 rtx head, tail;
4891 rtx next_tail;
4892
4893 /* We must use the true line number for the first insn in the block
4894 that was computed and saved at the start of this pass. We can't
4895 use the current line number, because scheduling of the previous
4896 block may have changed the current line number. */
4897
4898 rtx line = line_note_head[BB_TO_BLOCK (bb)];
4899 rtx insn;
4900
4901 get_block_head_tail (bb, &head, &tail);
4902 next_tail = NEXT_INSN (tail);
4903
4904 for (insn = basic_block_head[BB_TO_BLOCK (bb)];
4905 insn != next_tail;
4906 insn = NEXT_INSN (insn))
4907 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4908 line = insn;
4909 else
4910 LINE_NOTE (insn) = line;
4911 }
4912
4913
4914 /* After bb was scheduled, insert line notes into the insns list. */
4915
4916 static void
4917 restore_line_notes (bb)
4918 int bb;
4919 {
4920 rtx line, note, prev, new;
4921 int added_notes = 0;
4922 int b;
4923 rtx head, next_tail, insn;
4924
4925 b = BB_TO_BLOCK (bb);
4926
4927 head = basic_block_head[b];
4928 next_tail = NEXT_INSN (basic_block_end[b]);
4929
4930 /* Determine the current line-number. We want to know the current
4931 line number of the first insn of the block here, in case it is
4932 different from the true line number that was saved earlier. If
4933 different, then we need a line number note before the first insn
4934 of this block. If it happens to be the same, then we don't want to
4935 emit another line number note here. */
4936 for (line = head; line; line = PREV_INSN (line))
4937 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
4938 break;
4939
4940 /* Walk the insns keeping track of the current line-number and inserting
4941 the line-number notes as needed. */
4942 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4943 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4944 line = insn;
4945 /* This used to emit line number notes before every non-deleted note.
4946 However, this confuses a debugger, because line notes not separated
4947 by real instructions all end up at the same address. I can find no
4948 use for line number notes before other notes, so none are emitted. */
4949 else if (GET_CODE (insn) != NOTE
4950 && (note = LINE_NOTE (insn)) != 0
4951 && note != line
4952 && (line == 0
4953 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
4954 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
4955 {
4956 line = note;
4957 prev = PREV_INSN (insn);
4958 if (LINE_NOTE (note))
4959 {
4960 /* Re-use the original line-number note. */
4961 LINE_NOTE (note) = 0;
4962 PREV_INSN (note) = prev;
4963 NEXT_INSN (prev) = note;
4964 PREV_INSN (insn) = note;
4965 NEXT_INSN (note) = insn;
4966 }
4967 else
4968 {
4969 added_notes++;
4970 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
4971 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
4972 RTX_INTEGRATED_P (new) = RTX_INTEGRATED_P (note);
4973 }
4974 }
4975 if (sched_verbose && added_notes)
4976 fprintf (dump, ";; added %d line-number notes\n", added_notes);
4977 }
4978
4979 /* After scheduling the function, delete redundant line notes from the
4980 insns list. */
4981
4982 static void
4983 rm_redundant_line_notes ()
4984 {
4985 rtx line = 0;
4986 rtx insn = get_insns ();
4987 int active_insn = 0;
4988 int notes = 0;
4989
4990 /* Walk the insns deleting redundant line-number notes. Many of these
4991 are already present. The remainder tend to occur at basic
4992 block boundaries. */
4993 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
4994 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4995 {
4996 /* If there are no active insns following, INSN is redundant. */
4997 if (active_insn == 0)
4998 {
4999 notes++;
5000 NOTE_SOURCE_FILE (insn) = 0;
5001 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
5002 }
5003 /* If the line number is unchanged, LINE is redundant. */
5004 else if (line
5005 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
5006 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
5007 {
5008 notes++;
5009 NOTE_SOURCE_FILE (line) = 0;
5010 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
5011 line = insn;
5012 }
5013 else
5014 line = insn;
5015 active_insn = 0;
5016 }
5017 else if (!((GET_CODE (insn) == NOTE
5018 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
5019 || (GET_CODE (insn) == INSN
5020 && (GET_CODE (PATTERN (insn)) == USE
5021 || GET_CODE (PATTERN (insn)) == CLOBBER))))
5022 active_insn++;
5023
5024 if (sched_verbose && notes)
5025 fprintf (dump, ";; deleted %d line-number notes\n", notes);
5026 }
5027
5028 /* Delete notes between head and tail and put them in the chain
5029 of notes ended by NOTE_LIST. */
5030
5031 static void
5032 rm_other_notes (head, tail)
5033 rtx head;
5034 rtx tail;
5035 {
5036 rtx next_tail;
5037 rtx insn;
5038
5039 if (head == tail
5040 && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
5041 return;
5042
5043 next_tail = NEXT_INSN (tail);
5044 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5045 {
5046 rtx prev;
5047
5048 /* Farm out notes, and maybe save them in NOTE_LIST.
5049 This is needed to keep the debugger from
5050 getting completely deranged. */
5051 if (GET_CODE (insn) == NOTE)
5052 {
5053 prev = insn;
5054
5055 insn = unlink_other_notes (insn, next_tail);
5056
5057 if (prev == tail)
5058 abort ();
5059 if (prev == head)
5060 abort ();
5061 if (insn == next_tail)
5062 abort ();
5063 }
5064 }
5065 }
5066
5067 /* Constructor for `sometimes' data structure. */
5068
5069 static int
5070 new_sometimes_live (regs_sometimes_live, regno, sometimes_max)
5071 struct sometimes *regs_sometimes_live;
5072 int regno;
5073 int sometimes_max;
5074 {
5075 register struct sometimes *p;
5076
5077 /* There should never be a register greater than max_regno here. If there
5078 is, it means that a define_split has created a new pseudo reg. This
5079 is not allowed, since there will not be flow info available for any
5080 new register, so catch the error here. */
5081 if (regno >= max_regno)
5082 abort ();
5083
5084 p = &regs_sometimes_live[sometimes_max];
5085 p->regno = regno;
5086 p->live_length = 0;
5087 p->calls_crossed = 0;
5088 sometimes_max++;
5089 return sometimes_max;
5090 }
5091
5092 /* Count lengths of all regs we are currently tracking,
5093 and find new registers no longer live. */
5094
5095 static void
5096 finish_sometimes_live (regs_sometimes_live, sometimes_max)
5097 struct sometimes *regs_sometimes_live;
5098 int sometimes_max;
5099 {
5100 int i;
5101
5102 for (i = 0; i < sometimes_max; i++)
5103 {
5104 register struct sometimes *p = &regs_sometimes_live[i];
5105 int regno = p->regno;
5106
5107 sched_reg_live_length[regno] += p->live_length;
5108 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
5109 }
5110 }
5111
5112 /* functions for computation of registers live/usage info */
5113
5114 /* It is assumed that prior to scheduling basic_block_live_at_start (b)
5115 contains the registers that are alive at the entry to b.
5116
5117 Two passes follow: The first pass is performed before the scheduling
5118 of a region. It scans each block of the region forward, computing
5119 the set of registers alive at the end of the basic block and
5120 discard REG_DEAD notes (done by find_pre_sched_live ()).
5121
5122 The second path is invoked after scheduling all region blocks.
5123 It scans each block of the region backward, a block being traversed
5124 only after its succesors in the region. When the set of registers
5125 live at the end of a basic block may be changed by the scheduling
5126 (this may happen for multiple blocks region), it is computed as
5127 the union of the registers live at the start of its succesors.
5128 The last-use information is updated by inserting REG_DEAD notes.
5129 (done by find_post_sched_live ()) */
5130
5131 /* Scan all the insns to be scheduled, removing register death notes.
5132 Register death notes end up in DEAD_NOTES.
5133 Recreate the register life information for the end of this basic
5134 block. */
5135
5136 static void
5137 find_pre_sched_live (bb)
5138 int bb;
5139 {
5140 rtx insn, next_tail, head, tail;
5141 int b = BB_TO_BLOCK (bb);
5142
5143 get_block_head_tail (bb, &head, &tail);
5144 COPY_REG_SET (bb_live_regs, basic_block_live_at_start[b]);
5145 next_tail = NEXT_INSN (tail);
5146
5147 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5148 {
5149 rtx prev, next, link;
5150 int reg_weight = 0;
5151
5152 /* Handle register life information. */
5153 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
5154 {
5155 /* See if the register gets born here. */
5156 /* We must check for registers being born before we check for
5157 registers dying. It is possible for a register to be born and
5158 die in the same insn, e.g. reading from a volatile memory
5159 location into an otherwise unused register. Such a register
5160 must be marked as dead after this insn. */
5161 if (GET_CODE (PATTERN (insn)) == SET
5162 || GET_CODE (PATTERN (insn)) == CLOBBER)
5163 {
5164 sched_note_set (b, PATTERN (insn), 0);
5165 reg_weight++;
5166 }
5167
5168 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
5169 {
5170 int j;
5171 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
5172 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
5173 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
5174 {
5175 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
5176 reg_weight++;
5177 }
5178
5179 /* ??? This code is obsolete and should be deleted. It
5180 is harmless though, so we will leave it in for now. */
5181 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
5182 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
5183 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
5184 }
5185
5186 /* Each call cobbers (makes live) all call-clobbered regs
5187 that are not global or fixed. Note that the function-value
5188 reg is a call_clobbered reg. */
5189 if (GET_CODE (insn) == CALL_INSN)
5190 {
5191 int j;
5192 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
5193 if (call_used_regs[j] && !global_regs[j]
5194 && ! fixed_regs[j])
5195 {
5196 SET_REGNO_REG_SET (bb_live_regs, j);
5197 #if 0
5198 CLEAR_REGNO_REG_SET (bb_dead_regs, j);
5199 #endif
5200 }
5201 }
5202
5203 /* Need to know what registers this insn kills. */
5204 for (prev = 0, link = REG_NOTES (insn); link; link = next)
5205 {
5206 next = XEXP (link, 1);
5207 if ((REG_NOTE_KIND (link) == REG_DEAD
5208 || REG_NOTE_KIND (link) == REG_UNUSED)
5209 /* Verify that the REG_NOTE has a valid value. */
5210 && GET_CODE (XEXP (link, 0)) == REG)
5211 {
5212 register int regno = REGNO (XEXP (link, 0));
5213
5214 reg_weight--;
5215
5216 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
5217 alone. */
5218 if (REG_NOTE_KIND (link) == REG_DEAD)
5219 {
5220 if (prev)
5221 XEXP (prev, 1) = next;
5222 else
5223 REG_NOTES (insn) = next;
5224 XEXP (link, 1) = dead_notes;
5225 dead_notes = link;
5226 }
5227 else
5228 prev = link;
5229
5230 if (regno < FIRST_PSEUDO_REGISTER)
5231 {
5232 int j = HARD_REGNO_NREGS (regno,
5233 GET_MODE (XEXP (link, 0)));
5234 while (--j >= 0)
5235 {
5236 CLEAR_REGNO_REG_SET (bb_live_regs, regno+j);
5237 }
5238 }
5239 else
5240 {
5241 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
5242 }
5243 }
5244 else
5245 prev = link;
5246 }
5247 }
5248
5249 INSN_REG_WEIGHT (insn) = reg_weight;
5250 }
5251 }
5252
5253 /* Update register life and usage information for block bb
5254 after scheduling. Put register dead notes back in the code. */
5255
5256 static void
5257 find_post_sched_live (bb)
5258 int bb;
5259 {
5260 int sometimes_max;
5261 int j, i;
5262 int b;
5263 rtx insn;
5264 rtx head, tail, prev_head, next_tail;
5265
5266 register struct sometimes *regs_sometimes_live;
5267
5268 b = BB_TO_BLOCK (bb);
5269
5270 /* compute live regs at the end of bb as a function of its successors. */
5271 if (current_nr_blocks > 1)
5272 {
5273 int e;
5274 int first_edge;
5275
5276 first_edge = e = OUT_EDGES (b);
5277 CLEAR_REG_SET (bb_live_regs);
5278
5279 if (e)
5280 do
5281 {
5282 int b_succ;
5283
5284 b_succ = TO_BLOCK (e);
5285 IOR_REG_SET (bb_live_regs, basic_block_live_at_start[b_succ]);
5286 e = NEXT_OUT (e);
5287 }
5288 while (e != first_edge);
5289 }
5290
5291 get_block_head_tail (bb, &head, &tail);
5292 next_tail = NEXT_INSN (tail);
5293 prev_head = PREV_INSN (head);
5294
5295 for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
5296 if (REGNO_REG_SET_P (bb_live_regs, i))
5297 sched_reg_basic_block[i] = REG_BLOCK_GLOBAL;
5298
5299 /* if the block is empty, same regs are alive at its end and its start.
5300 since this is not guaranteed after interblock scheduling, make sure they
5301 are truly identical. */
5302 if (NEXT_INSN (prev_head) == tail
5303 && (GET_RTX_CLASS (GET_CODE (tail)) != 'i'))
5304 {
5305 if (current_nr_blocks > 1)
5306 COPY_REG_SET (basic_block_live_at_start[b], bb_live_regs);
5307
5308 return;
5309 }
5310
5311 b = BB_TO_BLOCK (bb);
5312 current_block_num = b;
5313
5314 /* Keep track of register lives. */
5315 old_live_regs = ALLOCA_REG_SET ();
5316 regs_sometimes_live
5317 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
5318 sometimes_max = 0;
5319
5320 /* initiate "sometimes" data, starting with registers live at end */
5321 sometimes_max = 0;
5322 COPY_REG_SET (old_live_regs, bb_live_regs);
5323 EXECUTE_IF_SET_IN_REG_SET (bb_live_regs, 0, j,
5324 {
5325 sometimes_max
5326 = new_sometimes_live (regs_sometimes_live,
5327 j, sometimes_max);
5328 });
5329
5330 /* scan insns back, computing regs live info */
5331 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
5332 {
5333 /* First we kill registers set by this insn, and then we
5334 make registers used by this insn live. This is the opposite
5335 order used above because we are traversing the instructions
5336 backwards. */
5337
5338 /* Strictly speaking, we should scan REG_UNUSED notes and make
5339 every register mentioned there live, however, we will just
5340 kill them again immediately below, so there doesn't seem to
5341 be any reason why we bother to do this. */
5342
5343 /* See if this is the last notice we must take of a register. */
5344 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
5345 continue;
5346
5347 if (GET_CODE (PATTERN (insn)) == SET
5348 || GET_CODE (PATTERN (insn)) == CLOBBER)
5349 sched_note_set (b, PATTERN (insn), 1);
5350 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
5351 {
5352 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
5353 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
5354 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
5355 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
5356 }
5357
5358 /* This code keeps life analysis information up to date. */
5359 if (GET_CODE (insn) == CALL_INSN)
5360 {
5361 register struct sometimes *p;
5362
5363 /* A call kills all call used registers that are not
5364 global or fixed, except for those mentioned in the call
5365 pattern which will be made live again later. */
5366 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5367 if (call_used_regs[i] && ! global_regs[i]
5368 && ! fixed_regs[i])
5369 {
5370 CLEAR_REGNO_REG_SET (bb_live_regs, i);
5371 #if 0
5372 SET_REGNO_REG_SET (bb_dead_regs, i);
5373 #endif
5374 }
5375
5376 /* Regs live at the time of a call instruction must not
5377 go in a register clobbered by calls. Record this for
5378 all regs now live. Note that insns which are born or
5379 die in a call do not cross a call, so this must be done
5380 after the killings (above) and before the births
5381 (below). */
5382 p = regs_sometimes_live;
5383 for (i = 0; i < sometimes_max; i++, p++)
5384 if (REGNO_REG_SET_P (bb_live_regs, p->regno))
5385 p->calls_crossed += 1;
5386 }
5387
5388 /* Make every register used live, and add REG_DEAD notes for
5389 registers which were not live before we started. */
5390 attach_deaths_insn (insn);
5391
5392 /* Find registers now made live by that instruction. */
5393 EXECUTE_IF_AND_COMPL_IN_REG_SET (bb_live_regs, old_live_regs, 0, j,
5394 {
5395 sometimes_max
5396 = new_sometimes_live (regs_sometimes_live,
5397 j, sometimes_max);
5398 });
5399 IOR_REG_SET (old_live_regs, bb_live_regs);
5400
5401 /* Count lengths of all regs we are worrying about now,
5402 and handle registers no longer live. */
5403
5404 for (i = 0; i < sometimes_max; i++)
5405 {
5406 register struct sometimes *p = &regs_sometimes_live[i];
5407 int regno = p->regno;
5408
5409 p->live_length += 1;
5410
5411 if (!REGNO_REG_SET_P (bb_live_regs, regno))
5412 {
5413 /* This is the end of one of this register's lifetime
5414 segments. Save the lifetime info collected so far,
5415 and clear its bit in the old_live_regs entry. */
5416 sched_reg_live_length[regno] += p->live_length;
5417 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
5418 CLEAR_REGNO_REG_SET (old_live_regs, p->regno);
5419
5420 /* Delete the reg_sometimes_live entry for this reg by
5421 copying the last entry over top of it. */
5422 *p = regs_sometimes_live[--sometimes_max];
5423 /* ...and decrement i so that this newly copied entry
5424 will be processed. */
5425 i--;
5426 }
5427 }
5428 }
5429
5430 finish_sometimes_live (regs_sometimes_live, sometimes_max);
5431
5432 /* In interblock scheduling, basic_block_live_at_start may have changed. */
5433 if (current_nr_blocks > 1)
5434 COPY_REG_SET (basic_block_live_at_start[b], bb_live_regs);
5435
5436
5437 FREE_REG_SET (old_live_regs);
5438 } /* find_post_sched_live */
5439
5440 /* After scheduling the subroutine, restore information about uses of
5441 registers. */
5442
5443 static void
5444 update_reg_usage ()
5445 {
5446 int regno;
5447
5448 if (n_basic_blocks > 0)
5449 for (regno = FIRST_PSEUDO_REGISTER; regno < max_regno; regno++)
5450 if (REGNO_REG_SET_P (basic_block_live_at_start[0], regno))
5451 sched_reg_basic_block[regno] = REG_BLOCK_GLOBAL;
5452
5453 for (regno = 0; regno < max_regno; regno++)
5454 if (sched_reg_live_length[regno])
5455 {
5456 if (sched_verbose)
5457 {
5458 if (REG_LIVE_LENGTH (regno) > sched_reg_live_length[regno])
5459 fprintf (dump,
5460 ";; register %d life shortened from %d to %d\n",
5461 regno, REG_LIVE_LENGTH (regno),
5462 sched_reg_live_length[regno]);
5463 /* Negative values are special; don't overwrite the current
5464 reg_live_length value if it is negative. */
5465 else if (REG_LIVE_LENGTH (regno) < sched_reg_live_length[regno]
5466 && REG_LIVE_LENGTH (regno) >= 0)
5467 fprintf (dump,
5468 ";; register %d life extended from %d to %d\n",
5469 regno, REG_LIVE_LENGTH (regno),
5470 sched_reg_live_length[regno]);
5471
5472 if (!REG_N_CALLS_CROSSED (regno)
5473 && sched_reg_n_calls_crossed[regno])
5474 fprintf (dump,
5475 ";; register %d now crosses calls\n", regno);
5476 else if (REG_N_CALLS_CROSSED (regno)
5477 && !sched_reg_n_calls_crossed[regno]
5478 && REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
5479 fprintf (dump,
5480 ";; register %d no longer crosses calls\n", regno);
5481
5482 if (REG_BASIC_BLOCK (regno) != sched_reg_basic_block[regno]
5483 && sched_reg_basic_block[regno] != REG_BLOCK_UNKNOWN
5484 && REG_BASIC_BLOCK(regno) != REG_BLOCK_UNKNOWN)
5485 fprintf (dump,
5486 ";; register %d changed basic block from %d to %d\n",
5487 regno, REG_BASIC_BLOCK(regno),
5488 sched_reg_basic_block[regno]);
5489
5490 }
5491 /* Negative values are special; don't overwrite the current
5492 reg_live_length value if it is negative. */
5493 if (REG_LIVE_LENGTH (regno) >= 0)
5494 REG_LIVE_LENGTH (regno) = sched_reg_live_length[regno];
5495
5496 if (sched_reg_basic_block[regno] != REG_BLOCK_UNKNOWN
5497 && REG_BASIC_BLOCK(regno) != REG_BLOCK_UNKNOWN)
5498 REG_BASIC_BLOCK(regno) = sched_reg_basic_block[regno];
5499
5500 /* We can't change the value of reg_n_calls_crossed to zero for
5501 pseudos which are live in more than one block.
5502
5503 This is because combine might have made an optimization which
5504 invalidated basic_block_live_at_start and reg_n_calls_crossed,
5505 but it does not update them. If we update reg_n_calls_crossed
5506 here, the two variables are now inconsistent, and this might
5507 confuse the caller-save code into saving a register that doesn't
5508 need to be saved. This is only a problem when we zero calls
5509 crossed for a pseudo live in multiple basic blocks.
5510
5511 Alternatively, we could try to correctly update basic block live
5512 at start here in sched, but that seems complicated.
5513
5514 Note: it is possible that a global register became local, as result
5515 of interblock motion, but will remain marked as a global register. */
5516 if (sched_reg_n_calls_crossed[regno]
5517 || REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
5518 REG_N_CALLS_CROSSED (regno) = sched_reg_n_calls_crossed[regno];
5519
5520 }
5521 }
5522
5523 /* Scheduling clock, modified in schedule_block() and queue_to_ready () */
5524 static int clock_var;
5525
5526 /* Move insns that became ready to fire from queue to ready list. */
5527
5528 static int
5529 queue_to_ready (ready, n_ready)
5530 rtx ready[];
5531 int n_ready;
5532 {
5533 rtx insn;
5534 rtx link;
5535
5536 q_ptr = NEXT_Q (q_ptr);
5537
5538 /* Add all pending insns that can be scheduled without stalls to the
5539 ready list. */
5540 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
5541 {
5542
5543 insn = XEXP (link, 0);
5544 q_size -= 1;
5545
5546 if (sched_verbose >= 2)
5547 fprintf (dump, ";;\t\tQ-->Ready: insn %d: ", INSN_UID (insn));
5548
5549 if (sched_verbose >= 2 && INSN_BB (insn) != target_bb)
5550 fprintf (dump, "(b%d) ", INSN_BLOCK (insn));
5551
5552 ready[n_ready++] = insn;
5553 if (sched_verbose >= 2)
5554 fprintf (dump, "moving to ready without stalls\n");
5555 }
5556 insn_queue[q_ptr] = 0;
5557
5558 /* If there are no ready insns, stall until one is ready and add all
5559 of the pending insns at that point to the ready list. */
5560 if (n_ready == 0)
5561 {
5562 register int stalls;
5563
5564 for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
5565 {
5566 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5567 {
5568 for (; link; link = XEXP (link, 1))
5569 {
5570 insn = XEXP (link, 0);
5571 q_size -= 1;
5572
5573 if (sched_verbose >= 2)
5574 fprintf (dump, ";;\t\tQ-->Ready: insn %d: ", INSN_UID (insn));
5575
5576 if (sched_verbose >= 2 && INSN_BB (insn) != target_bb)
5577 fprintf (dump, "(b%d) ", INSN_BLOCK (insn));
5578
5579 ready[n_ready++] = insn;
5580 if (sched_verbose >= 2)
5581 fprintf (dump, "moving to ready with %d stalls\n", stalls);
5582 }
5583 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
5584
5585 if (n_ready)
5586 break;
5587 }
5588 }
5589
5590 if (sched_verbose && stalls)
5591 visualize_stall_cycles (BB_TO_BLOCK (target_bb), stalls);
5592 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
5593 clock_var += stalls;
5594 }
5595 return n_ready;
5596 }
5597
5598 /* Print the ready list for debugging purposes. Callable from debugger. */
5599
5600 extern void
5601 debug_ready_list (ready, n_ready)
5602 rtx ready[];
5603 int n_ready;
5604 {
5605 int i;
5606
5607 for (i = 0; i < n_ready; i++)
5608 {
5609 fprintf (dump, " %d", INSN_UID (ready[i]));
5610 if (current_nr_blocks > 1 && INSN_BB (ready[i]) != target_bb)
5611 fprintf (dump, "/b%d", INSN_BLOCK (ready[i]));
5612 }
5613 fprintf (dump, "\n");
5614 }
5615
5616 /* Print names of units on which insn can/should execute, for debugging. */
5617
5618 static void
5619 insn_print_units (insn)
5620 rtx insn;
5621 {
5622 int i;
5623 int unit = insn_unit (insn);
5624
5625 if (unit == -1)
5626 fprintf (dump, "none");
5627 else if (unit >= 0)
5628 fprintf (dump, "%s", function_units[unit].name);
5629 else
5630 {
5631 fprintf (dump, "[");
5632 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
5633 if (unit & 1)
5634 {
5635 fprintf (dump, "%s", function_units[i].name);
5636 if (unit != 1)
5637 fprintf (dump, " ");
5638 }
5639 fprintf (dump, "]");
5640 }
5641 }
5642
5643 /* MAX_VISUAL_LINES is the maximum number of lines in visualization table
5644 of a basic block. If more lines are needed, table is splitted to two.
5645 n_visual_lines is the number of lines printed so far for a block.
5646 visual_tbl contains the block visualization info.
5647 vis_no_unit holds insns in a cycle that are not mapped to any unit. */
5648 #define MAX_VISUAL_LINES 100
5649 #define INSN_LEN 30
5650 int n_visual_lines;
5651 char *visual_tbl;
5652 int n_vis_no_unit;
5653 rtx vis_no_unit[10];
5654
5655 /* Finds units that are in use in this fuction. Required only
5656 for visualization. */
5657
5658 static void
5659 init_target_units ()
5660 {
5661 rtx insn;
5662 int unit;
5663
5664 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
5665 {
5666 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
5667 continue;
5668
5669 unit = insn_unit (insn);
5670
5671 if (unit < 0)
5672 target_units |= ~unit;
5673 else
5674 target_units |= (1 << unit);
5675 }
5676 }
5677
5678 /* Return the length of the visualization table */
5679
5680 static int
5681 get_visual_tbl_length ()
5682 {
5683 int unit, i;
5684 int n, n1;
5685 char *s;
5686
5687 /* compute length of one field in line */
5688 s = (char *) alloca (INSN_LEN + 5);
5689 sprintf (s, " %33s", "uname");
5690 n1 = strlen (s);
5691
5692 /* compute length of one line */
5693 n = strlen (";; ");
5694 n += n1;
5695 for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++)
5696 if (function_units[unit].bitmask & target_units)
5697 for (i = 0; i < function_units[unit].multiplicity; i++)
5698 n += n1;
5699 n += n1;
5700 n += strlen ("\n") + 2;
5701
5702 /* compute length of visualization string */
5703 return (MAX_VISUAL_LINES * n);
5704 }
5705
5706 /* Init block visualization debugging info */
5707
5708 static void
5709 init_block_visualization ()
5710 {
5711 strcpy (visual_tbl, "");
5712 n_visual_lines = 0;
5713 n_vis_no_unit = 0;
5714 }
5715
5716 #define BUF_LEN 256
5717
5718 /* This recognizes rtx, I classified as expressions. These are always */
5719 /* represent some action on values or results of other expression, */
5720 /* that may be stored in objects representing values. */
5721
5722 static void
5723 print_exp (buf, x, verbose)
5724 char *buf;
5725 rtx x;
5726 int verbose;
5727 {
5728 char t1[BUF_LEN], t2[BUF_LEN], t3[BUF_LEN];
5729
5730 switch (GET_CODE (x))
5731 {
5732 case PLUS:
5733 print_value (t1, XEXP (x, 0), verbose);
5734 print_value (t2, XEXP (x, 1), verbose);
5735 sprintf (buf, "%s+%s", t1, t2);
5736 break;
5737 case LO_SUM:
5738 print_value (t1, XEXP (x, 0), verbose);
5739 print_value (t2, XEXP (x, 1), verbose);
5740 sprintf (buf, "%sl+%s", t1, t2);
5741 break;
5742 case MINUS:
5743 print_value (t1, XEXP (x, 0), verbose);
5744 print_value (t2, XEXP (x, 1), verbose);
5745 sprintf (buf, "%s-%s", t1, t2);
5746 break;
5747 case COMPARE:
5748 print_value (t1, XEXP (x, 0), verbose);
5749 print_value (t2, XEXP (x, 1), verbose);
5750 sprintf (buf, "%s??%s", t1, t2);
5751 break;
5752 case NEG:
5753 print_value (t1, XEXP (x, 0), verbose);
5754 sprintf (buf, "-%s", t1);
5755 break;
5756 case MULT:
5757 print_value (t1, XEXP (x, 0), verbose);
5758 print_value (t2, XEXP (x, 1), verbose);
5759 sprintf (buf, "%s*%s", t1, t2);
5760 break;
5761 case DIV:
5762 print_value (t1, XEXP (x, 0), verbose);
5763 print_value (t2, XEXP (x, 1), verbose);
5764 sprintf (buf, "%s/%s", t1, t2);
5765 break;
5766 case UDIV:
5767 print_value (t1, XEXP (x, 0), verbose);
5768 print_value (t2, XEXP (x, 1), verbose);
5769 sprintf (buf, "%su/%s", t1, t2);
5770 break;
5771 case MOD:
5772 print_value (t1, XEXP (x, 0), verbose);
5773 print_value (t2, XEXP (x, 1), verbose);
5774 sprintf (buf, "%s%%%s", t1, t2);
5775 break;
5776 case UMOD:
5777 print_value (t1, XEXP (x, 0), verbose);
5778 print_value (t2, XEXP (x, 1), verbose);
5779 sprintf (buf, "%su%%%s", t1, t2);
5780 break;
5781 case SMIN:
5782 print_value (t1, XEXP (x, 0), verbose);
5783 print_value (t2, XEXP (x, 1), verbose);
5784 sprintf (buf, "smin (%s, %s)", t1, t2);
5785 break;
5786 case SMAX:
5787 print_value (t1, XEXP (x, 0), verbose);
5788 print_value (t2, XEXP (x, 1), verbose);
5789 sprintf (buf, "smax(%s,%s)", t1, t2);
5790 break;
5791 case UMIN:
5792 print_value (t1, XEXP (x, 0), verbose);
5793 print_value (t2, XEXP (x, 1), verbose);
5794 sprintf (buf, "umin (%s, %s)", t1, t2);
5795 break;
5796 case UMAX:
5797 print_value (t1, XEXP (x, 0), verbose);
5798 print_value (t2, XEXP (x, 1), verbose);
5799 sprintf (buf, "umax(%s,%s)", t1, t2);
5800 break;
5801 case NOT:
5802 print_value (t1, XEXP (x, 0), verbose);
5803 sprintf (buf, "!%s", t1);
5804 break;
5805 case AND:
5806 print_value (t1, XEXP (x, 0), verbose);
5807 print_value (t2, XEXP (x, 1), verbose);
5808 sprintf (buf, "%s&%s", t1, t2);
5809 break;
5810 case IOR:
5811 print_value (t1, XEXP (x, 0), verbose);
5812 print_value (t2, XEXP (x, 1), verbose);
5813 sprintf (buf, "%s|%s", t1, t2);
5814 break;
5815 case XOR:
5816 print_value (t1, XEXP (x, 0), verbose);
5817 print_value (t2, XEXP (x, 1), verbose);
5818 sprintf (buf, "%s^%s", t1, t2);
5819 break;
5820 case ASHIFT:
5821 print_value (t1, XEXP (x, 0), verbose);
5822 print_value (t2, XEXP (x, 1), verbose);
5823 sprintf (buf, "%s<<%s", t1, t2);
5824 break;
5825 case LSHIFTRT:
5826 print_value (t1, XEXP (x, 0), verbose);
5827 print_value (t2, XEXP (x, 1), verbose);
5828 sprintf (buf, "%s0>%s", t1, t2);
5829 break;
5830 case ASHIFTRT:
5831 print_value (t1, XEXP (x, 0), verbose);
5832 print_value (t2, XEXP (x, 1), verbose);
5833 sprintf (buf, "%s>>%s", t1, t2);
5834 break;
5835 case ROTATE:
5836 print_value (t1, XEXP (x, 0), verbose);
5837 print_value (t2, XEXP (x, 1), verbose);
5838 sprintf (buf, "%s<-<%s", t1, t2);
5839 break;
5840 case ROTATERT:
5841 print_value (t1, XEXP (x, 0), verbose);
5842 print_value (t2, XEXP (x, 1), verbose);
5843 sprintf (buf, "%s>->%s", t1, t2);
5844 break;
5845 case ABS:
5846 print_value (t1, XEXP (x, 0), verbose);
5847 sprintf (buf, "abs(%s)", t1);
5848 break;
5849 case SQRT:
5850 print_value (t1, XEXP (x, 0), verbose);
5851 sprintf (buf, "sqrt(%s)", t1);
5852 break;
5853 case FFS:
5854 print_value (t1, XEXP (x, 0), verbose);
5855 sprintf (buf, "ffs(%s)", t1);
5856 break;
5857 case EQ:
5858 print_value (t1, XEXP (x, 0), verbose);
5859 print_value (t2, XEXP (x, 1), verbose);
5860 sprintf (buf, "%s == %s", t1, t2);
5861 break;
5862 case NE:
5863 print_value (t1, XEXP (x, 0), verbose);
5864 print_value (t2, XEXP (x, 1), verbose);
5865 sprintf (buf, "%s!=%s", t1, t2);
5866 break;
5867 case GT:
5868 print_value (t1, XEXP (x, 0), verbose);
5869 print_value (t2, XEXP (x, 1), verbose);
5870 sprintf (buf, "%s>%s", t1, t2);
5871 break;
5872 case GTU:
5873 print_value (t1, XEXP (x, 0), verbose);
5874 print_value (t2, XEXP (x, 1), verbose);
5875 sprintf (buf, "%s>u%s", t1, t2);
5876 break;
5877 case LT:
5878 print_value (t1, XEXP (x, 0), verbose);
5879 print_value (t2, XEXP (x, 1), verbose);
5880 sprintf (buf, "%s<%s", t1, t2);
5881 break;
5882 case LTU:
5883 print_value (t1, XEXP (x, 0), verbose);
5884 print_value (t2, XEXP (x, 1), verbose);
5885 sprintf (buf, "%s<u%s", t1, t2);
5886 break;
5887 case GE:
5888 print_value (t1, XEXP (x, 0), verbose);
5889 print_value (t2, XEXP (x, 1), verbose);
5890 sprintf (buf, "%s>=%s", t1, t2);
5891 break;
5892 case GEU:
5893 print_value (t1, XEXP (x, 0), verbose);
5894 print_value (t2, XEXP (x, 1), verbose);
5895 sprintf (buf, "%s>=u%s", t1, t2);
5896 break;
5897 case LE:
5898 print_value (t1, XEXP (x, 0), verbose);
5899 print_value (t2, XEXP (x, 1), verbose);
5900 sprintf (buf, "%s<=%s", t1, t2);
5901 break;
5902 case LEU:
5903 print_value (t1, XEXP (x, 0), verbose);
5904 print_value (t2, XEXP (x, 1), verbose);
5905 sprintf (buf, "%s<=u%s", t1, t2);
5906 break;
5907 case SIGN_EXTRACT:
5908 print_value (t1, XEXP (x, 0), verbose);
5909 print_value (t2, XEXP (x, 1), verbose);
5910 print_value (t3, XEXP (x, 2), verbose);
5911 if (verbose)
5912 sprintf (buf, "sign_extract(%s,%s,%s)", t1, t2, t3);
5913 else
5914 sprintf (buf, "sxt(%s,%s,%s)", t1, t2, t3);
5915 break;
5916 case ZERO_EXTRACT:
5917 print_value (t1, XEXP (x, 0), verbose);
5918 print_value (t2, XEXP (x, 1), verbose);
5919 print_value (t3, XEXP (x, 2), verbose);
5920 if (verbose)
5921 sprintf (buf, "zero_extract(%s,%s,%s)", t1, t2, t3);
5922 else
5923 sprintf (buf, "zxt(%s,%s,%s)", t1, t2, t3);
5924 break;
5925 case SIGN_EXTEND:
5926 print_value (t1, XEXP (x, 0), verbose);
5927 if (verbose)
5928 sprintf (buf, "sign_extend(%s)", t1);
5929 else
5930 sprintf (buf, "sxn(%s)", t1);
5931 break;
5932 case ZERO_EXTEND:
5933 print_value (t1, XEXP (x, 0), verbose);
5934 if (verbose)
5935 sprintf (buf, "zero_extend(%s)", t1);
5936 else
5937 sprintf (buf, "zxn(%s)", t1);
5938 break;
5939 case FLOAT_EXTEND:
5940 print_value (t1, XEXP (x, 0), verbose);
5941 if (verbose)
5942 sprintf (buf, "float_extend(%s)", t1);
5943 else
5944 sprintf (buf, "fxn(%s)", t1);
5945 break;
5946 case TRUNCATE:
5947 print_value (t1, XEXP (x, 0), verbose);
5948 if (verbose)
5949 sprintf (buf, "trunc(%s)", t1);
5950 else
5951 sprintf (buf, "trn(%s)", t1);
5952 break;
5953 case FLOAT_TRUNCATE:
5954 print_value (t1, XEXP (x, 0), verbose);
5955 if (verbose)
5956 sprintf (buf, "float_trunc(%s)", t1);
5957 else
5958 sprintf (buf, "ftr(%s)", t1);
5959 break;
5960 case FLOAT:
5961 print_value (t1, XEXP (x, 0), verbose);
5962 if (verbose)
5963 sprintf (buf, "float(%s)", t1);
5964 else
5965 sprintf (buf, "flt(%s)", t1);
5966 break;
5967 case UNSIGNED_FLOAT:
5968 print_value (t1, XEXP (x, 0), verbose);
5969 if (verbose)
5970 sprintf (buf, "uns_float(%s)", t1);
5971 else
5972 sprintf (buf, "ufl(%s)", t1);
5973 break;
5974 case FIX:
5975 print_value (t1, XEXP (x, 0), verbose);
5976 sprintf (buf, "fix(%s)", t1);
5977 break;
5978 case UNSIGNED_FIX:
5979 print_value (t1, XEXP (x, 0), verbose);
5980 if (verbose)
5981 sprintf (buf, "uns_fix(%s)", t1);
5982 else
5983 sprintf (buf, "ufx(%s)", t1);
5984 break;
5985 case PRE_DEC:
5986 print_value (t1, XEXP (x, 0), verbose);
5987 sprintf (buf, "--%s", t1);
5988 break;
5989 case PRE_INC:
5990 print_value (t1, XEXP (x, 0), verbose);
5991 sprintf (buf, "++%s", t1);
5992 break;
5993 case POST_DEC:
5994 print_value (t1, XEXP (x, 0), verbose);
5995 sprintf (buf, "%s--", t1);
5996 break;
5997 case POST_INC:
5998 print_value (t1, XEXP (x, 0), verbose);
5999 sprintf (buf, "%s++", t1);
6000 break;
6001 case CALL:
6002 print_value (t1, XEXP (x, 0), verbose);
6003 if (verbose)
6004 {
6005 print_value (t2, XEXP (x, 1), verbose);
6006 sprintf (buf, "call %s argc:%s", t1, t2);
6007 }
6008 else
6009 sprintf (buf, "call %s", t1);
6010 break;
6011 case IF_THEN_ELSE:
6012 print_exp (t1, XEXP (x, 0), verbose);
6013 print_value (t2, XEXP (x, 1), verbose);
6014 print_value (t3, XEXP (x, 2), verbose);
6015 sprintf (buf, "{(%s)?%s:%s}", t1, t2, t3);
6016 break;
6017 case TRAP_IF:
6018 print_value (t1, TRAP_CONDITION (x), verbose);
6019 sprintf (buf, "trap_if %s", t1);
6020 break;
6021 case UNSPEC:
6022 {
6023 int i;
6024
6025 sprintf (t1, "unspec{");
6026 for (i = 0; i < XVECLEN (x, 0); i++)
6027 {
6028 print_pattern (t2, XVECEXP (x, 0, i), verbose);
6029 sprintf (t3, "%s%s;", t1, t2);
6030 strcpy (t1, t3);
6031 }
6032 sprintf (buf, "%s}", t1);
6033 }
6034 break;
6035 case UNSPEC_VOLATILE:
6036 {
6037 int i;
6038
6039 sprintf (t1, "unspec/v{");
6040 for (i = 0; i < XVECLEN (x, 0); i++)
6041 {
6042 print_pattern (t2, XVECEXP (x, 0, i), verbose);
6043 sprintf (t3, "%s%s;", t1, t2);
6044 strcpy (t1, t3);
6045 }
6046 sprintf (buf, "%s}", t1);
6047 }
6048 break;
6049 default:
6050 /* if (verbose) debug_rtx (x); else sprintf (buf, "$$$"); */
6051 sprintf (buf, "$$$");
6052 }
6053 } /* print_exp */
6054
6055 /* Prints rtxes, i customly classified as values. They're constants, */
6056 /* registers, labels, symbols and memory accesses. */
6057
6058 static void
6059 print_value (buf, x, verbose)
6060 char *buf;
6061 rtx x;
6062 int verbose;
6063 {
6064 char t[BUF_LEN];
6065
6066 switch (GET_CODE (x))
6067 {
6068 case CONST_INT:
6069 sprintf (buf, "%Xh", INTVAL (x));
6070 break;
6071 case CONST_DOUBLE:
6072 print_value (t, XEXP (x, 0), verbose);
6073 sprintf (buf, "<%s>", t);
6074 break;
6075 case CONST_STRING:
6076 sprintf (buf, "\"%s\"", (char *) XEXP (x, 0));
6077 break;
6078 case SYMBOL_REF:
6079 sprintf (buf, "`%s'", (char *) XEXP (x, 0));
6080 break;
6081 case LABEL_REF:
6082 sprintf (buf, "L%d", INSN_UID (XEXP (x, 0)));
6083 break;
6084 case CONST:
6085 print_value (buf, XEXP (x, 0), verbose);
6086 break;
6087 case HIGH:
6088 print_value (buf, XEXP (x, 0), verbose);
6089 break;
6090 case REG:
6091 if (GET_MODE (x) == SFmode
6092 || GET_MODE (x) == DFmode
6093 || GET_MODE (x) == XFmode
6094 || GET_MODE (x) == TFmode)
6095 strcpy (t, "fr");
6096 else
6097 strcpy (t, "r");
6098 sprintf (buf, "%s%d", t, (int) XEXP (x, 0));
6099 break;
6100 case SUBREG:
6101 print_value (t, XEXP (x, 0), verbose);
6102 sprintf (buf, "%s#%d", t, (int) XEXP (x, 1));
6103 break;
6104 case SCRATCH:
6105 sprintf (buf, "scratch");
6106 break;
6107 case CC0:
6108 sprintf (buf, "cc0");
6109 break;
6110 case PC:
6111 sprintf (buf, "pc");
6112 break;
6113 case MEM:
6114 print_value (t, XEXP (x, 0), verbose);
6115 sprintf (buf, "[%s]", t);
6116 break;
6117 default:
6118 print_exp (buf, x, verbose);
6119 }
6120 } /* print_value */
6121
6122 /* The next step in insn detalization, its pattern recognition */
6123
6124 static void
6125 print_pattern (buf, x, verbose)
6126 char *buf;
6127 rtx x;
6128 int verbose;
6129 {
6130 char t1[BUF_LEN], t2[BUF_LEN], t3[BUF_LEN];
6131
6132 switch (GET_CODE (x))
6133 {
6134 case SET:
6135 print_value (t1, SET_DEST (x), verbose);
6136 print_value (t2, SET_SRC (x), verbose);
6137 sprintf (buf, "%s=%s", t1, t2);
6138 break;
6139 case RETURN:
6140 sprintf (buf, "return");
6141 break;
6142 case CALL:
6143 print_exp (buf, x, verbose);
6144 break;
6145 case CLOBBER:
6146 print_value (t1, XEXP (x, 0), verbose);
6147 sprintf (buf, "clobber %s", t1);
6148 break;
6149 case USE:
6150 print_value (t1, XEXP (x, 0), verbose);
6151 sprintf (buf, "use %s", t1);
6152 break;
6153 case PARALLEL:
6154 {
6155 int i;
6156
6157 sprintf (t1, "{");
6158 for (i = 0; i < XVECLEN (x, 0); i++)
6159 {
6160 print_pattern (t2, XVECEXP (x, 0, i), verbose);
6161 sprintf (t3, "%s%s;", t1, t2);
6162 strcpy (t1, t3);
6163 }
6164 sprintf (buf, "%s}", t1);
6165 }
6166 break;
6167 case SEQUENCE:
6168 {
6169 int i;
6170
6171 sprintf (t1, "%%{");
6172 for (i = 0; i < XVECLEN (x, 0); i++)
6173 {
6174 print_insn (t2, XVECEXP (x, 0, i), verbose);
6175 sprintf (t3, "%s%s;", t1, t2);
6176 strcpy (t1, t3);
6177 }
6178 sprintf (buf, "%s%%}", t1);
6179 }
6180 break;
6181 case ASM_INPUT:
6182 sprintf (buf, "asm {%s}", XEXP (x, 0));
6183 break;
6184 case ADDR_VEC:
6185 break;
6186 case ADDR_DIFF_VEC:
6187 print_value (buf, XEXP (x, 0), verbose);
6188 break;
6189 case TRAP_IF:
6190 print_value (t1, TRAP_CONDITION (x), verbose);
6191 sprintf (buf, "trap_if %s", t1);
6192 break;
6193 case UNSPEC:
6194 {
6195 int i;
6196
6197 sprintf (t1, "unspec{");
6198 for (i = 0; i < XVECLEN (x, 0); i++)
6199 {
6200 print_pattern (t2, XVECEXP (x, 0, i), verbose);
6201 sprintf (t3, "%s%s;", t1, t2);
6202 strcpy (t1, t3);
6203 }
6204 sprintf (buf, "%s}", t1);
6205 }
6206 break;
6207 case UNSPEC_VOLATILE:
6208 {
6209 int i;
6210
6211 sprintf (t1, "unspec/v{");
6212 for (i = 0; i < XVECLEN (x, 0); i++)
6213 {
6214 print_pattern (t2, XVECEXP (x, 0, i), verbose);
6215 sprintf (t3, "%s%s;", t1, t2);
6216 strcpy (t1, t3);
6217 }
6218 sprintf (buf, "%s}", t1);
6219 }
6220 break;
6221 default:
6222 print_value (buf, x, verbose);
6223 }
6224 } /* print_pattern */
6225
6226 /* This is the main function in rtl visualization mechanism. It
6227 accepts an rtx and tries to recognize it as an insn, then prints it
6228 properly in human readable form, resembling assembler mnemonics. */
6229 /* For every insn it prints its UID and BB the insn belongs */
6230 /* too. (probably the last "option" should be extended somehow, since */
6231 /* it depends now on sched.c inner variables ...) */
6232
6233 static void
6234 print_insn (buf, x, verbose)
6235 char *buf;
6236 rtx x;
6237 int verbose;
6238 {
6239 char t[BUF_LEN];
6240 rtx insn = x;
6241
6242 switch (GET_CODE (x))
6243 {
6244 case INSN:
6245 print_pattern (t, PATTERN (x), verbose);
6246 if (verbose)
6247 sprintf (buf, "b%d: i% 4d: %s", INSN_BB (x),
6248 INSN_UID (x), t);
6249 else
6250 sprintf (buf, "%-4d %s", INSN_UID (x), t);
6251 break;
6252 case JUMP_INSN:
6253 print_pattern (t, PATTERN (x), verbose);
6254 if (verbose)
6255 sprintf (buf, "b%d: i% 4d: jump %s", INSN_BB (x),
6256 INSN_UID (x), t);
6257 else
6258 sprintf (buf, "%-4d %s", INSN_UID (x), t);
6259 break;
6260 case CALL_INSN:
6261 x = PATTERN (insn);
6262 if (GET_CODE (x) == PARALLEL)
6263 {
6264 x = XVECEXP (x, 0, 0);
6265 print_pattern (t, x, verbose);
6266 }
6267 else
6268 strcpy (t, "call <...>");
6269 if (verbose)
6270 sprintf (buf, "b%d: i% 4d: %s", INSN_BB (insn),
6271 INSN_UID (insn), t);
6272 else
6273 sprintf (buf, "%-4d %s", INSN_UID (insn), t);
6274 break;
6275 case CODE_LABEL:
6276 sprintf (buf, "L%d:", INSN_UID (x));
6277 break;
6278 case BARRIER:
6279 sprintf (buf, "i% 4d: barrier", INSN_UID (x));
6280 break;
6281 case NOTE:
6282 if (NOTE_LINE_NUMBER (x) > 0)
6283 sprintf (buf, "%4d note \"%s\" %d", INSN_UID (x),
6284 NOTE_SOURCE_FILE (x), NOTE_LINE_NUMBER (x));
6285 else
6286 sprintf (buf, "%4d %s", INSN_UID (x),
6287 GET_NOTE_INSN_NAME (NOTE_LINE_NUMBER (x)));
6288 break;
6289 default:
6290 if (verbose)
6291 {
6292 sprintf (buf, "Not an INSN at all\n");
6293 debug_rtx (x);
6294 }
6295 else
6296 sprintf (buf, "i%-4d <What?>", INSN_UID (x));
6297 }
6298 } /* print_insn */
6299
6300 void
6301 print_insn_chain (rtx_first)
6302 rtx rtx_first;
6303 {
6304 register rtx tmp_rtx;
6305 char str[BUF_LEN];
6306
6307 strcpy (str, "(nil)\n");
6308 if (rtx_first != 0)
6309 switch (GET_CODE (rtx_first))
6310 {
6311 case INSN:
6312 case JUMP_INSN:
6313 case CALL_INSN:
6314 case NOTE:
6315 case CODE_LABEL:
6316 case BARRIER:
6317 for (tmp_rtx = rtx_first; tmp_rtx != NULL;
6318 tmp_rtx = NEXT_INSN (tmp_rtx))
6319 {
6320 print_insn (str, tmp_rtx, 0);
6321 printf ("%s\n", str);
6322 }
6323 break;
6324 default:
6325 print_insn (str, rtx_first, 0);
6326 printf ("%s\n", str);
6327 }
6328 } /* print_insn_chain */
6329
6330 /* Print visualization debugging info */
6331
6332 static void
6333 print_block_visualization (b, s)
6334 int b;
6335 char *s;
6336 {
6337 int unit, i;
6338 char *names; /* names of units */
6339 char *delim; /* separation line */
6340
6341 /* print header */
6342 fprintf (dump, "\n;; ==================== scheduling visualization for block %d %s \n", b, s);
6343
6344 /* Print names of units */
6345 names = (char *) alloca (256);
6346 delim = (char *) alloca (256);
6347 sprintf (names, ";; %-8s", "clock");
6348 sprintf (delim, ";; %-8s", "=====");
6349 for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++)
6350 if (function_units[unit].bitmask & target_units)
6351 for (i = 0; i < function_units[unit].multiplicity; i++)
6352 {
6353 sprintf (names + strlen (names), " %-33s", function_units[unit].name);
6354 sprintf (delim + strlen (delim), " %-33s", "==============================");
6355 }
6356 sprintf (names + strlen (names), " %-8s", "no-unit");
6357 sprintf (delim + strlen (delim), " %-8s", "=======");
6358 fprintf (dump, "\n%s\n%s\n", names, delim);
6359
6360 /* Print insns in each cycle */
6361 fprintf (dump, "%s\n", visual_tbl);
6362 }
6363
6364 /* Print insns in the 'no_unit' column of visualization */
6365
6366 static void
6367 visualize_no_unit (insn)
6368 rtx insn;
6369 {
6370 vis_no_unit[n_vis_no_unit] = insn;
6371 n_vis_no_unit++;
6372 }
6373
6374 /* Print insns scheduled in clock, for visualization. */
6375
6376 static void
6377 visualize_scheduled_insns (b, clock)
6378 int b, clock;
6379 {
6380 int i, unit;
6381
6382 /* if no more room, split table into two */
6383 if (n_visual_lines >= MAX_VISUAL_LINES)
6384 {
6385 print_block_visualization (b, "(incomplete)");
6386 init_block_visualization ();
6387 }
6388
6389 n_visual_lines++;
6390
6391 sprintf (visual_tbl + strlen (visual_tbl), ";; %-8d", clock);
6392 for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++)
6393 if (function_units[unit].bitmask & target_units)
6394 for (i = 0; i < function_units[unit].multiplicity; i++)
6395 {
6396 int instance = unit + i * FUNCTION_UNITS_SIZE;
6397 rtx insn = unit_last_insn[instance];
6398
6399 /* print insns that still keep the unit busy */
6400 if (insn &&
6401 actual_hazard_this_instance (unit, instance, insn, clock, 0))
6402 {
6403 char str[BUF_LEN];
6404 print_insn (str, insn, 0);
6405 str[INSN_LEN] = '\0';
6406 sprintf (visual_tbl + strlen (visual_tbl), " %-33s", str);
6407 }
6408 else
6409 sprintf (visual_tbl + strlen (visual_tbl), " %-33s", "------------------------------");
6410 }
6411
6412 /* print insns that are not assigned to any unit */
6413 for (i = 0; i < n_vis_no_unit; i++)
6414 sprintf (visual_tbl + strlen (visual_tbl), " %-8d",
6415 INSN_UID (vis_no_unit[i]));
6416 n_vis_no_unit = 0;
6417
6418 sprintf (visual_tbl + strlen (visual_tbl), "\n");
6419 }
6420
6421 /* Print stalled cycles */
6422
6423 static void
6424 visualize_stall_cycles (b, stalls)
6425 int b, stalls;
6426 {
6427 int i;
6428
6429 /* if no more room, split table into two */
6430 if (n_visual_lines >= MAX_VISUAL_LINES)
6431 {
6432 print_block_visualization (b, "(incomplete)");
6433 init_block_visualization ();
6434 }
6435
6436 n_visual_lines++;
6437
6438 sprintf (visual_tbl + strlen (visual_tbl), ";; ");
6439 for (i = 0; i < stalls; i++)
6440 sprintf (visual_tbl + strlen (visual_tbl), ".");
6441 sprintf (visual_tbl + strlen (visual_tbl), "\n");
6442 }
6443
6444 /* move_insn1: Remove INSN from insn chain, and link it after LAST insn */
6445
6446 static rtx
6447 move_insn1 (insn, last)
6448 rtx insn, last;
6449 {
6450 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
6451 PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
6452
6453 NEXT_INSN (insn) = NEXT_INSN (last);
6454 PREV_INSN (NEXT_INSN (last)) = insn;
6455
6456 NEXT_INSN (last) = insn;
6457 PREV_INSN (insn) = last;
6458
6459 return insn;
6460 }
6461
6462 /* Search INSN for fake REG_DEAD note pairs for NOTE_INSN_SETJMP,
6463 NOTE_INSN_{LOOP,EHREGION}_{BEG,END}; and convert them back into
6464 NOTEs. The REG_DEAD note following first one is contains the saved
6465 value for NOTE_BLOCK_NUMBER which is useful for
6466 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. LAST is the last instruction
6467 output by the instruction scheduler. Return the new value of LAST. */
6468
6469 static rtx
6470 reemit_notes (insn, last)
6471 rtx insn;
6472 rtx last;
6473 {
6474 rtx note, retval;
6475
6476 retval = last;
6477 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
6478 {
6479 if (REG_NOTE_KIND (note) == REG_DEAD
6480 && GET_CODE (XEXP (note, 0)) == CONST_INT)
6481 {
6482 if (INTVAL (XEXP (note, 0)) == NOTE_INSN_SETJMP)
6483 {
6484 retval = emit_note_after (INTVAL (XEXP (note, 0)), insn);
6485 CONST_CALL_P (retval) = CONST_CALL_P (note);
6486 remove_note (insn, note);
6487 note = XEXP (note, 1);
6488 }
6489 else
6490 {
6491 last = emit_note_before (INTVAL (XEXP (note, 0)), last);
6492 remove_note (insn, note);
6493 note = XEXP (note, 1);
6494 NOTE_BLOCK_NUMBER (last) = INTVAL (XEXP (note, 0));
6495 }
6496 remove_note (insn, note);
6497 }
6498 }
6499 return retval;
6500 }
6501
6502 /* Move INSN, and all insns which should be issued before it,
6503 due to SCHED_GROUP_P flag. Reemit notes if needed. */
6504
6505 static rtx
6506 move_insn (insn, last)
6507 rtx insn, last;
6508 {
6509 rtx new_last = insn;
6510
6511 while (SCHED_GROUP_P (insn))
6512 {
6513 rtx prev = PREV_INSN (insn);
6514 move_insn1 (insn, last);
6515 insn = prev;
6516 }
6517
6518 move_insn1 (insn, last);
6519 return reemit_notes (new_last, new_last);
6520 }
6521
6522 /* Return an insn which represents a SCHED_GROUP, which is
6523 the last insn in the group. */
6524
6525 static rtx
6526 group_leader (insn)
6527 rtx insn;
6528 {
6529 rtx prev;
6530
6531 do
6532 {
6533 prev = insn;
6534 insn = next_nonnote_insn (insn);
6535 }
6536 while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
6537
6538 return prev;
6539 }
6540
6541 /* Use forward list scheduling to rearrange insns of block BB in region RGN,
6542 possibly bringing insns from subsequent blocks in the same region.
6543 Return number of insns scheduled. */
6544
6545 static int
6546 schedule_block (bb, rgn, rgn_n_insns)
6547 int bb;
6548 int rgn;
6549 int rgn_n_insns;
6550 {
6551 /* Local variables. */
6552 rtx insn, last;
6553 rtx *ready;
6554 int i;
6555 int n_ready = 0;
6556 int can_issue_more;
6557
6558 /* flow block of this bb */
6559 int b = BB_TO_BLOCK (bb);
6560
6561 /* target_n_insns == number of insns in b before scheduling starts.
6562 sched_target_n_insns == how many of b's insns were scheduled.
6563 sched_n_insns == how many insns were scheduled in b */
6564 int target_n_insns = 0;
6565 int sched_target_n_insns = 0;
6566 int sched_n_insns = 0;
6567
6568 #define NEED_NOTHING 0
6569 #define NEED_HEAD 1
6570 #define NEED_TAIL 2
6571 int new_needs;
6572
6573 /* head/tail info for this block */
6574 rtx prev_head;
6575 rtx next_tail;
6576 rtx head;
6577 rtx tail;
6578 int bb_src;
6579
6580 /* At the start of a function, before reload has run, don't delay getting
6581 parameters from hard registers into pseudo registers. */
6582 if (reload_completed == 0 && b == 0)
6583 {
6584 head = basic_block_head[b];
6585 tail = basic_block_end[b];
6586
6587 while (head != tail
6588 && GET_CODE (head) == NOTE
6589 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
6590 head = NEXT_INSN (head);
6591
6592 while (head != tail
6593 && GET_CODE (head) == INSN
6594 && GET_CODE (PATTERN (head)) == SET)
6595 {
6596 rtx link;
6597 rtx src = SET_SRC (PATTERN (head));
6598 while (GET_CODE (src) == SUBREG
6599 || GET_CODE (src) == SIGN_EXTEND
6600 || GET_CODE (src) == ZERO_EXTEND
6601 || GET_CODE (src) == SIGN_EXTRACT
6602 || GET_CODE (src) == ZERO_EXTRACT)
6603 src = XEXP (src, 0);
6604 if (GET_CODE (src) != REG
6605 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
6606 break;
6607
6608 for (link = INSN_DEPEND (head); link != 0; link = XEXP (link, 1))
6609 INSN_DEP_COUNT (XEXP (link, 0)) -= 1;
6610
6611 if (GET_CODE (head) != NOTE)
6612 sched_n_insns++;
6613
6614 head = NEXT_INSN (head);
6615 }
6616
6617 /* Don't include any notes or labels at the beginning of the
6618 basic block, or notes at the ends of basic blocks. */
6619 while (head != tail)
6620 {
6621 if (GET_CODE (head) == NOTE)
6622 head = NEXT_INSN (head);
6623 else if (GET_CODE (tail) == NOTE)
6624 tail = PREV_INSN (tail);
6625 else if (GET_CODE (head) == CODE_LABEL)
6626 head = NEXT_INSN (head);
6627 else
6628 break;
6629 }
6630 }
6631 else
6632 get_block_head_tail (bb, &head, &tail);
6633
6634 next_tail = NEXT_INSN (tail);
6635 prev_head = PREV_INSN (head);
6636
6637 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
6638 to schedule this block. */
6639 if (head == tail
6640 && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
6641 return (sched_n_insns);
6642
6643 /* debug info */
6644 if (sched_verbose)
6645 {
6646 fprintf (dump, ";; ======================================================\n");
6647 fprintf (dump,
6648 ";; -- basic block %d from %d to %d -- %s reload\n",
6649 b, INSN_UID (basic_block_head[b]),
6650 INSN_UID (basic_block_end[b]),
6651 (reload_completed ? "after" : "before"));
6652 fprintf (dump, ";; ======================================================\n");
6653 if (sched_debug_count >= 0)
6654 fprintf (dump, ";;\t -- sched_debug_count=%d\n", sched_debug_count);
6655 fprintf (dump, "\n");
6656
6657 visual_tbl = (char *) alloca (get_visual_tbl_length ());
6658 init_block_visualization ();
6659 }
6660
6661 /* remove remaining note insns from the block, save them in
6662 note_list. These notes are restored at the end of
6663 schedule_block (). */
6664 note_list = 0;
6665 rm_other_notes (head, tail);
6666
6667 target_bb = bb;
6668
6669 /* prepare current target block info */
6670 if (current_nr_blocks > 1)
6671 {
6672 candidate_table = (candidate *) alloca (current_nr_blocks * sizeof (candidate));
6673
6674 bblst_last = 0;
6675 /* ??? It is not clear why bblst_size is computed this way. The original
6676 number was clearly too small as it resulted in compiler failures.
6677 Multiplying by the original number by 2 (to account for update_bbs
6678 members) seems to be a reasonable solution. */
6679 /* ??? Or perhaps there is a bug somewhere else in this file? */
6680 bblst_size = (current_nr_blocks - bb) * rgn_nr_edges * 2;
6681 bblst_table = (int *) alloca (bblst_size * sizeof (int));
6682
6683 bitlst_table_last = 0;
6684 bitlst_table_size = rgn_nr_edges;
6685 bitlst_table = (int *) alloca (rgn_nr_edges * sizeof (int));
6686
6687 compute_trg_info (bb);
6688 }
6689
6690 clear_units ();
6691
6692 /* Allocate the ready list */
6693 ready = (rtx *) alloca ((rgn_n_insns + 1) * sizeof (rtx));
6694
6695 /* Print debugging information. */
6696 if (sched_verbose >= 5)
6697 debug_dependencies ();
6698
6699
6700 /* Initialize ready list with all 'ready' insns in target block.
6701 Count number of insns in the target block being scheduled. */
6702 n_ready = 0;
6703 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6704 {
6705 rtx next;
6706
6707 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
6708 continue;
6709 next = NEXT_INSN (insn);
6710
6711 if (INSN_DEP_COUNT (insn) == 0
6712 && (SCHED_GROUP_P (next) == 0 || GET_RTX_CLASS (GET_CODE (next)) != 'i'))
6713 ready[n_ready++] = insn;
6714 if (!(SCHED_GROUP_P (insn)))
6715 target_n_insns++;
6716 }
6717
6718 /* Add to ready list all 'ready' insns in valid source blocks.
6719 For speculative insns, check-live, exception-free, and
6720 issue-delay. */
6721 for (bb_src = bb + 1; bb_src < current_nr_blocks; bb_src++)
6722 if (IS_VALID (bb_src))
6723 {
6724 rtx src_head;
6725 rtx src_next_tail;
6726 rtx tail, head;
6727
6728 get_block_head_tail (bb_src, &head, &tail);
6729 src_next_tail = NEXT_INSN (tail);
6730 src_head = head;
6731
6732 if (head == tail
6733 && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
6734 continue;
6735
6736 for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
6737 {
6738 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
6739 continue;
6740
6741 if (!CANT_MOVE (insn)
6742 && (!IS_SPECULATIVE_INSN (insn)
6743 || (insn_issue_delay (insn) <= 3
6744 && check_live (insn, bb_src, target_bb)
6745 && is_exception_free (insn, bb_src, target_bb))))
6746
6747 {
6748 rtx next;
6749
6750 next = NEXT_INSN (insn);
6751 if (INSN_DEP_COUNT (insn) == 0
6752 && (SCHED_GROUP_P (next) == 0
6753 || GET_RTX_CLASS (GET_CODE (next)) != 'i'))
6754 ready[n_ready++] = insn;
6755 }
6756 }
6757 }
6758
6759 /* no insns scheduled in this block yet */
6760 last_scheduled_insn = 0;
6761
6762 /* Sort the ready list */
6763 SCHED_SORT (ready, n_ready);
6764
6765 if (sched_verbose >= 2)
6766 {
6767 fprintf (dump, ";;\t\tReady list initially: ");
6768 debug_ready_list (ready, n_ready);
6769 }
6770
6771 /* Q_SIZE is the total number of insns in the queue. */
6772 q_ptr = 0;
6773 q_size = 0;
6774 clock_var = 0;
6775 bzero ((char *) insn_queue, sizeof (insn_queue));
6776
6777 /* We start inserting insns after PREV_HEAD. */
6778 last = prev_head;
6779
6780 /* Initialize INSN_QUEUE, LIST and NEW_NEEDS. */
6781 new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
6782 ? NEED_HEAD : NEED_NOTHING);
6783 if (PREV_INSN (next_tail) == basic_block_end[b])
6784 new_needs |= NEED_TAIL;
6785
6786 /* loop until all the insns in BB are scheduled. */
6787 while (sched_target_n_insns < target_n_insns)
6788 {
6789 int b1;
6790
6791 #ifdef INTERBLOCK_DEBUG
6792 if (sched_debug_count == 0)
6793 break;
6794 #endif
6795 clock_var++;
6796
6797 /* Add to the ready list all pending insns that can be issued now.
6798 If there are no ready insns, increment clock until one
6799 is ready and add all pending insns at that point to the ready
6800 list. */
6801 n_ready = queue_to_ready (ready, n_ready);
6802
6803 if (n_ready == 0)
6804 abort ();
6805
6806 if (sched_verbose >= 2)
6807 {
6808 fprintf (dump, ";;\t\tReady list after queue_to_ready: ");
6809 debug_ready_list (ready, n_ready);
6810 }
6811
6812 /* Sort the ready list. */
6813 SCHED_SORT (ready, n_ready);
6814
6815 if (sched_verbose)
6816 {
6817 fprintf (dump, ";;\tReady list (t =%3d): ", clock_var);
6818 debug_ready_list (ready, n_ready);
6819 }
6820
6821 /* Issue insns from ready list.
6822 It is important to count down from n_ready, because n_ready may change
6823 as insns are issued. */
6824 can_issue_more = issue_rate;
6825 for (i = n_ready - 1; i >= 0 && can_issue_more; i--)
6826 {
6827 rtx insn = ready[i];
6828 int cost = actual_hazard (insn_unit (insn), insn, clock_var, 0);
6829
6830 if (cost > 1)
6831 {
6832 queue_insn (insn, cost);
6833 ready[i] = ready[--n_ready]; /* remove insn from ready list */
6834 }
6835 else if (cost == 0)
6836 {
6837 #ifdef INTERBLOCK_DEBUG
6838 if (sched_debug_count == 0)
6839 break;
6840 #endif
6841
6842 /* an interblock motion? */
6843 if (INSN_BB (insn) != target_bb)
6844 {
6845 if (IS_SPECULATIVE_INSN (insn))
6846 {
6847
6848 if (!check_live (insn, INSN_BB (insn), target_bb))
6849 {
6850 /* speculative motion, live check failed, remove
6851 insn from ready list */
6852 ready[i] = ready[--n_ready];
6853 continue;
6854 }
6855 update_live (insn, INSN_BB (insn), target_bb);
6856
6857 /* for speculative load, mark insns fed by it. */
6858 if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
6859 set_spec_fed (insn);
6860
6861 nr_spec++;
6862 }
6863 nr_inter++;
6864
6865 /* update source block boundaries */
6866 b1 = INSN_BLOCK (insn);
6867 if (insn == basic_block_head[b1]
6868 && insn == basic_block_end[b1])
6869 {
6870 emit_note_after (NOTE_INSN_DELETED, basic_block_head[b1]);
6871 basic_block_end[b1] = basic_block_head[b1] = NEXT_INSN (insn);
6872 }
6873 else if (insn == basic_block_end[b1])
6874 {
6875 basic_block_end[b1] = PREV_INSN (insn);
6876 }
6877 else if (insn == basic_block_head[b1])
6878 {
6879 basic_block_head[b1] = NEXT_INSN (insn);
6880 }
6881 }
6882 else
6883 {
6884 /* in block motion */
6885 sched_target_n_insns++;
6886 }
6887
6888 last_scheduled_insn = insn;
6889 last = move_insn (insn, last);
6890 sched_n_insns++;
6891
6892 can_issue_more--;
6893
6894 #ifdef INTERBLOCK_DEBUG
6895 if (sched_debug_count > 0)
6896 sched_debug_count--;
6897 #endif
6898
6899 n_ready = schedule_insn (insn, ready, n_ready, clock_var);
6900
6901 /* remove insn from ready list */
6902 ready[i] = ready[--n_ready];
6903
6904 /* close this block after scheduling its jump */
6905 if (GET_CODE (last_scheduled_insn) == JUMP_INSN)
6906 break;
6907 }
6908 }
6909
6910 /* debug info */
6911 if (sched_verbose)
6912 {
6913 visualize_scheduled_insns (b, clock_var);
6914 #ifdef INTERBLOCK_DEBUG
6915 if (sched_debug_count == 0)
6916 fprintf (dump, "........ sched_debug_count == 0 .................\n");
6917 #endif
6918 }
6919 }
6920
6921 /* debug info */
6922 if (sched_verbose)
6923 {
6924 fprintf (dump, ";;\tReady list (final): ");
6925 debug_ready_list (ready, n_ready);
6926 print_block_visualization (b, "");
6927 }
6928
6929 /* Sanity check -- queue must be empty now. Meaningless if region has
6930 multiple bbs, or if scheduling stopped by sched_debug_count. */
6931 if (current_nr_blocks > 1)
6932 #ifdef INTERBLOCK_DEBUG
6933 if (sched_debug_count != 0)
6934 #endif
6935 if (!flag_schedule_interblock && q_size != 0)
6936 abort ();
6937
6938 /* update head/tail boundaries. */
6939 head = NEXT_INSN (prev_head);
6940 tail = last;
6941
6942 #ifdef INTERBLOCK_DEBUG
6943 if (sched_debug_count == 0)
6944 /* compensate for stopping scheduling prematurely */
6945 for (i = sched_target_n_insns; i < target_n_insns; i++)
6946 tail = move_insn (group_leader (NEXT_INSN (tail)), tail);
6947 #endif
6948
6949 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
6950 previously found among the insns. Insert them at the beginning
6951 of the insns. */
6952 if (note_list != 0)
6953 {
6954 rtx note_head = note_list;
6955
6956 while (PREV_INSN (note_head))
6957 {
6958 note_head = PREV_INSN (note_head);
6959 }
6960
6961 PREV_INSN (note_head) = PREV_INSN (head);
6962 NEXT_INSN (PREV_INSN (head)) = note_head;
6963 PREV_INSN (head) = note_list;
6964 NEXT_INSN (note_list) = head;
6965 head = note_head;
6966 }
6967
6968 /* update target block boundaries. */
6969 if (new_needs & NEED_HEAD)
6970 basic_block_head[b] = head;
6971
6972 if (new_needs & NEED_TAIL)
6973 basic_block_end[b] = tail;
6974
6975 /* debugging */
6976 if (sched_verbose)
6977 {
6978 fprintf (dump, ";; total time = %d\n;; new basic block head = %d\n",
6979 clock_var, INSN_UID (basic_block_head[b]));
6980 fprintf (dump, ";; new basic block end = %d\n\n",
6981 INSN_UID (basic_block_end[b]));
6982 }
6983
6984 return (sched_n_insns);
6985 } /* schedule_block () */
6986 \f
6987
6988 /* print the bit-set of registers, S. callable from debugger */
6989
6990 extern void
6991 debug_reg_vector (s)
6992 regset s;
6993 {
6994 int regno;
6995
6996 EXECUTE_IF_SET_IN_REG_SET (s, 0, regno,
6997 {
6998 fprintf (dump, " %d", regno);
6999 });
7000
7001 fprintf (dump, "\n");
7002 }
7003
7004 /* Use the backward dependences from LOG_LINKS to build
7005 forward dependences in INSN_DEPEND. */
7006
7007 static void
7008 compute_block_forward_dependences (bb)
7009 int bb;
7010 {
7011 rtx insn, link;
7012 rtx tail, head;
7013 rtx next_tail;
7014 enum reg_note dep_type;
7015
7016 get_block_head_tail (bb, &head, &tail);
7017 next_tail = NEXT_INSN (tail);
7018 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7019 {
7020 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
7021 continue;
7022
7023 insn = group_leader (insn);
7024
7025 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
7026 {
7027 rtx x = group_leader (XEXP (link, 0));
7028 rtx new_link;
7029
7030 if (x != XEXP (link, 0))
7031 continue;
7032
7033 /* Ignore dependences upon deleted insn */
7034 if (GET_CODE (x) == NOTE || INSN_DELETED_P (x))
7035 continue;
7036 if (find_insn_list (insn, INSN_DEPEND (x)))
7037 continue;
7038
7039 new_link = rtx_alloc (INSN_LIST);
7040
7041 dep_type = REG_NOTE_KIND (link);
7042 PUT_REG_NOTE_KIND (new_link, dep_type);
7043
7044 XEXP (new_link, 0) = insn;
7045 XEXP (new_link, 1) = INSN_DEPEND (x);
7046
7047 INSN_DEPEND (x) = new_link;
7048 INSN_DEP_COUNT (insn) += 1;
7049 }
7050 }
7051 }
7052
7053 /* Initialize variables for region data dependence analysis.
7054 n_bbs is the number of region blocks */
7055
7056 __inline static void
7057 init_rgn_data_dependences (n_bbs)
7058 int n_bbs;
7059 {
7060 int bb;
7061
7062 /* variables for which one copy exists for each block */
7063 bzero ((char *) bb_pending_read_insns, n_bbs * sizeof (rtx));
7064 bzero ((char *) bb_pending_read_mems, n_bbs * sizeof (rtx));
7065 bzero ((char *) bb_pending_write_insns, n_bbs * sizeof (rtx));
7066 bzero ((char *) bb_pending_write_mems, n_bbs * sizeof (rtx));
7067 bzero ((char *) bb_pending_lists_length, n_bbs * sizeof (rtx));
7068 bzero ((char *) bb_last_pending_memory_flush, n_bbs * sizeof (rtx));
7069 bzero ((char *) bb_last_function_call, n_bbs * sizeof (rtx));
7070 bzero ((char *) bb_sched_before_next_call, n_bbs * sizeof (rtx));
7071
7072 /* Create an insn here so that we can hang dependencies off of it later. */
7073 for (bb = 0; bb < n_bbs; bb++)
7074 {
7075 bb_sched_before_next_call[bb] =
7076 gen_rtx (INSN, VOIDmode, 0, NULL_RTX, NULL_RTX,
7077 NULL_RTX, 0, NULL_RTX, 0);
7078 LOG_LINKS (bb_sched_before_next_call[bb]) = 0;
7079 }
7080 }
7081
7082 /* Add dependences so that branches are scheduled to run last in their block */
7083
7084 static void
7085 add_branch_dependences (head, tail)
7086 rtx head, tail;
7087 {
7088
7089 rtx insn, last;
7090
7091 /* For all branches, calls, uses, and cc0 setters, force them to remain
7092 in order at the end of the block by adding dependencies and giving
7093 the last a high priority. There may be notes present, and prev_head
7094 may also be a note.
7095
7096 Branches must obviously remain at the end. Calls should remain at the
7097 end since moving them results in worse register allocation. Uses remain
7098 at the end to ensure proper register allocation. cc0 setters remaim
7099 at the end because they can't be moved away from their cc0 user. */
7100 insn = tail;
7101 last = 0;
7102 while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
7103 || (GET_CODE (insn) == INSN
7104 && (GET_CODE (PATTERN (insn)) == USE
7105 #ifdef HAVE_cc0
7106 || sets_cc0_p (PATTERN (insn))
7107 #endif
7108 ))
7109 || GET_CODE (insn) == NOTE)
7110 {
7111 if (GET_CODE (insn) != NOTE)
7112 {
7113 if (last != 0
7114 && !find_insn_list (insn, LOG_LINKS (last)))
7115 {
7116 add_dependence (last, insn, REG_DEP_ANTI);
7117 INSN_REF_COUNT (insn)++;
7118 }
7119
7120 CANT_MOVE (insn) = 1;
7121
7122 last = insn;
7123 /* Skip over insns that are part of a group. */
7124 while (SCHED_GROUP_P (insn))
7125 insn = prev_nonnote_insn (insn);
7126 }
7127
7128 /* Don't overrun the bounds of the basic block. */
7129 if (insn == head)
7130 break;
7131
7132 insn = PREV_INSN (insn);
7133 }
7134
7135 /* make sure these insns are scheduled last in their block */
7136 insn = last;
7137 if (insn != 0)
7138 while (insn != head)
7139 {
7140 insn = prev_nonnote_insn (insn);
7141
7142 if (INSN_REF_COUNT (insn) != 0)
7143 continue;
7144
7145 if (!find_insn_list (last, LOG_LINKS (insn)))
7146 add_dependence (last, insn, REG_DEP_ANTI);
7147 INSN_REF_COUNT (insn) = 1;
7148
7149 /* Skip over insns that are part of a group. */
7150 while (SCHED_GROUP_P (insn))
7151 insn = prev_nonnote_insn (insn);
7152 }
7153 }
7154
7155 /* Compute bacward dependences inside BB. In a multiple blocks region:
7156 (1) a bb is analyzed after its predecessors, and (2) the lists in
7157 effect at the end of bb (after analyzing for bb) are inherited by
7158 bb's successrs.
7159
7160 Specifically for reg-reg data dependences, the block insns are
7161 scanned by sched_analyze () top-to-bottom. Two lists are
7162 naintained by sched_analyze (): reg_last_defs[] for register DEFs,
7163 and reg_last_uses[] for register USEs.
7164
7165 When analysis is completed for bb, we update for its successors:
7166 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
7167 ; - USES[succ] = Union (USES [succ], DEFS [bb])
7168
7169 The mechanism for computing mem-mem data dependence is very
7170 similar, and the result is interblock dependences in the region. */
7171
7172 static void
7173 compute_block_backward_dependences (bb)
7174 int bb;
7175 {
7176 int b;
7177 rtx x;
7178 rtx head, tail;
7179 int max_reg = max_reg_num ();
7180
7181 b = BB_TO_BLOCK (bb);
7182
7183 if (current_nr_blocks == 1)
7184 {
7185 reg_last_uses = (rtx *) alloca (max_reg * sizeof (rtx));
7186 reg_last_sets = (rtx *) alloca (max_reg * sizeof (rtx));
7187
7188 bzero ((char *) reg_last_uses, max_reg * sizeof (rtx));
7189 bzero ((char *) reg_last_sets, max_reg * sizeof (rtx));
7190
7191 pending_read_insns = 0;
7192 pending_read_mems = 0;
7193 pending_write_insns = 0;
7194 pending_write_mems = 0;
7195 pending_lists_length = 0;
7196 last_function_call = 0;
7197 last_pending_memory_flush = 0;
7198 sched_before_next_call
7199 = gen_rtx (INSN, VOIDmode, 0, NULL_RTX, NULL_RTX,
7200 NULL_RTX, 0, NULL_RTX, 0);
7201 LOG_LINKS (sched_before_next_call) = 0;
7202 }
7203 else
7204 {
7205 reg_last_uses = bb_reg_last_uses[bb];
7206 reg_last_sets = bb_reg_last_sets[bb];
7207
7208 pending_read_insns = bb_pending_read_insns[bb];
7209 pending_read_mems = bb_pending_read_mems[bb];
7210 pending_write_insns = bb_pending_write_insns[bb];
7211 pending_write_mems = bb_pending_write_mems[bb];
7212 pending_lists_length = bb_pending_lists_length[bb];
7213 last_function_call = bb_last_function_call[bb];
7214 last_pending_memory_flush = bb_last_pending_memory_flush[bb];
7215
7216 sched_before_next_call = bb_sched_before_next_call[bb];
7217 }
7218
7219 /* do the analysis for this block */
7220 get_block_head_tail (bb, &head, &tail);
7221 sched_analyze (head, tail);
7222 add_branch_dependences (head, tail);
7223
7224 if (current_nr_blocks > 1)
7225 {
7226 int e, first_edge;
7227 int b_succ, bb_succ;
7228 int reg;
7229 rtx link_insn, link_mem;
7230 rtx u;
7231
7232 /* these lists should point to the right place, for correct freeing later. */
7233 bb_pending_read_insns[bb] = pending_read_insns;
7234 bb_pending_read_mems[bb] = pending_read_mems;
7235 bb_pending_write_insns[bb] = pending_write_insns;
7236 bb_pending_write_mems[bb] = pending_write_mems;
7237
7238 /* bb's structures are inherited by it's successors */
7239 first_edge = e = OUT_EDGES (b);
7240 if (e > 0)
7241 do
7242 {
7243 b_succ = TO_BLOCK (e);
7244 bb_succ = BLOCK_TO_BB (b_succ);
7245
7246 /* only bbs "below" bb, in the same region, are interesting */
7247 if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
7248 || bb_succ <= bb)
7249 {
7250 e = NEXT_OUT (e);
7251 continue;
7252 }
7253
7254 for (reg = 0; reg < max_reg; reg++)
7255 {
7256
7257 /* reg-last-uses lists are inherited by bb_succ */
7258 for (u = reg_last_uses[reg]; u; u = XEXP (u, 1))
7259 {
7260 if (find_insn_list (XEXP (u, 0), (bb_reg_last_uses[bb_succ])[reg]))
7261 continue;
7262
7263 (bb_reg_last_uses[bb_succ])[reg]
7264 = gen_rtx (INSN_LIST, VOIDmode, XEXP (u, 0),
7265 (bb_reg_last_uses[bb_succ])[reg]);
7266 }
7267
7268 /* reg-last-defs lists are inherited by bb_succ */
7269 for (u = reg_last_sets[reg]; u; u = XEXP (u, 1))
7270 {
7271 if (find_insn_list (XEXP (u, 0), (bb_reg_last_sets[bb_succ])[reg]))
7272 continue;
7273
7274 (bb_reg_last_sets[bb_succ])[reg]
7275 = gen_rtx (INSN_LIST, VOIDmode, XEXP (u, 0),
7276 (bb_reg_last_sets[bb_succ])[reg]);
7277 }
7278 }
7279
7280 /* mem read/write lists are inherited by bb_succ */
7281 link_insn = pending_read_insns;
7282 link_mem = pending_read_mems;
7283 while (link_insn)
7284 {
7285 if (!(find_insn_mem_list (XEXP (link_insn, 0), XEXP (link_mem, 0),
7286 bb_pending_read_insns[bb_succ],
7287 bb_pending_read_mems[bb_succ])))
7288 add_insn_mem_dependence (&bb_pending_read_insns[bb_succ],
7289 &bb_pending_read_mems[bb_succ],
7290 XEXP (link_insn, 0), XEXP (link_mem, 0));
7291 link_insn = XEXP (link_insn, 1);
7292 link_mem = XEXP (link_mem, 1);
7293 }
7294
7295 link_insn = pending_write_insns;
7296 link_mem = pending_write_mems;
7297 while (link_insn)
7298 {
7299 if (!(find_insn_mem_list (XEXP (link_insn, 0), XEXP (link_mem, 0),
7300 bb_pending_write_insns[bb_succ],
7301 bb_pending_write_mems[bb_succ])))
7302 add_insn_mem_dependence (&bb_pending_write_insns[bb_succ],
7303 &bb_pending_write_mems[bb_succ],
7304 XEXP (link_insn, 0), XEXP (link_mem, 0));
7305
7306 link_insn = XEXP (link_insn, 1);
7307 link_mem = XEXP (link_mem, 1);
7308 }
7309
7310 /* last_function_call is inherited by bb_succ */
7311 for (u = last_function_call; u; u = XEXP (u, 1))
7312 {
7313 if (find_insn_list (XEXP (u, 0), bb_last_function_call[bb_succ]))
7314 continue;
7315
7316 bb_last_function_call[bb_succ]
7317 = gen_rtx (INSN_LIST, VOIDmode, XEXP (u, 0),
7318 bb_last_function_call[bb_succ]);
7319 }
7320
7321 /* last_pending_memory_flush is inherited by bb_succ */
7322 for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
7323 {
7324 if (find_insn_list (XEXP (u, 0), bb_last_pending_memory_flush[bb_succ]))
7325 continue;
7326
7327 bb_last_pending_memory_flush[bb_succ]
7328 = gen_rtx (INSN_LIST, VOIDmode, XEXP (u, 0),
7329 bb_last_pending_memory_flush[bb_succ]);
7330 }
7331
7332 /* sched_before_next_call is inherited by bb_succ */
7333 x = LOG_LINKS (sched_before_next_call);
7334 for (; x; x = XEXP (x, 1))
7335 add_dependence (bb_sched_before_next_call[bb_succ],
7336 XEXP (x, 0), REG_DEP_ANTI);
7337
7338 e = NEXT_OUT (e);
7339 }
7340 while (e != first_edge);
7341 }
7342 }
7343
7344 /* Print dependences for debugging, callable from debugger */
7345
7346 void
7347 debug_dependencies ()
7348 {
7349 int bb;
7350
7351 fprintf (dump, ";; --------------- forward dependences: ------------ \n");
7352 for (bb = 0; bb < current_nr_blocks; bb++)
7353 {
7354 if (1)
7355 {
7356 rtx head, tail;
7357 rtx next_tail;
7358 rtx insn;
7359
7360 get_block_head_tail (bb, &head, &tail);
7361 next_tail = NEXT_INSN (tail);
7362 fprintf (dump, "\n;; --- Region Dependences --- b %d bb %d \n",
7363 BB_TO_BLOCK (bb), bb);
7364
7365 fprintf (dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
7366 "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
7367 fprintf (dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
7368 "----", "----", "--", "---", "----", "----", "--------", "-----");
7369 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7370 {
7371 rtx link;
7372 int unit, range;
7373
7374 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
7375 {
7376 int n;
7377 fprintf (dump, ";; %6d ", INSN_UID (insn));
7378 if (GET_CODE (insn) == NOTE)
7379 {
7380 n = NOTE_LINE_NUMBER (insn);
7381 if (n < 0)
7382 fprintf (dump, "%s\n", GET_NOTE_INSN_NAME (n));
7383 else
7384 fprintf (dump, "line %d, file %s\n", n,
7385 NOTE_SOURCE_FILE (insn));
7386 }
7387 else
7388 fprintf (dump, " {%s}\n", GET_RTX_NAME (insn));
7389 continue;
7390 }
7391
7392 unit = insn_unit (insn);
7393 range = (unit < 0
7394 || function_units[unit].blockage_range_function == 0) ? 0 :
7395 function_units[unit].blockage_range_function (insn);
7396 fprintf (dump,
7397 ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
7398 (SCHED_GROUP_P (insn) ? "+" : " "),
7399 INSN_UID (insn),
7400 INSN_CODE (insn),
7401 INSN_BB (insn),
7402 INSN_DEP_COUNT (insn),
7403 INSN_PRIORITY (insn),
7404 insn_cost (insn, 0, 0),
7405 (int) MIN_BLOCKAGE_COST (range),
7406 (int) MAX_BLOCKAGE_COST (range));
7407 insn_print_units (insn);
7408 fprintf (dump, "\t: ");
7409 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
7410 fprintf (dump, "%d ", INSN_UID (XEXP (link, 0)));
7411 fprintf (dump, "\n");
7412 }
7413 }
7414 }
7415 fprintf (dump, "\n");
7416 }
7417
7418 /* Set_priorities: compute priority of each insn in the block */
7419
7420 static int
7421 set_priorities (bb)
7422 int bb;
7423 {
7424 rtx insn;
7425 int n_insn;
7426
7427 rtx tail;
7428 rtx prev_head;
7429 rtx head;
7430
7431 get_block_head_tail (bb, &head, &tail);
7432 prev_head = PREV_INSN (head);
7433
7434 if (head == tail
7435 && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
7436 return 0;
7437
7438 n_insn = 0;
7439 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
7440 {
7441
7442 if (GET_CODE (insn) == NOTE)
7443 continue;
7444
7445 if (!(SCHED_GROUP_P (insn)))
7446 n_insn++;
7447 (void) priority (insn);
7448 }
7449
7450 return n_insn;
7451 }
7452
7453 /* Make each element of VECTOR point at an rtx-vector,
7454 taking the space for all those rtx-vectors from SPACE.
7455 SPACE is of type (rtx *), but it is really as long as NELTS rtx-vectors.
7456 BYTES_PER_ELT is the number of bytes in one rtx-vector.
7457 (this is the same as init_regset_vector () in flow.c) */
7458
7459 static void
7460 init_rtx_vector (vector, space, nelts, bytes_per_elt)
7461 rtx **vector;
7462 rtx *space;
7463 int nelts;
7464 int bytes_per_elt;
7465 {
7466 register int i;
7467 register rtx *p = space;
7468
7469 for (i = 0; i < nelts; i++)
7470 {
7471 vector[i] = p;
7472 p += bytes_per_elt / sizeof (*p);
7473 }
7474 }
7475
7476 /* Schedule a region. A region is either an inner loop, a loop-free
7477 subroutine, or a single basic block. Each bb in the region is
7478 scheduled after its flow predecessors. */
7479
7480 static void
7481 schedule_region (rgn)
7482 int rgn;
7483 {
7484 int bb;
7485 int rgn_n_insns = 0;
7486 int sched_rgn_n_insns = 0;
7487
7488 /* set variables for the current region */
7489 current_nr_blocks = RGN_NR_BLOCKS (rgn);
7490 current_blocks = RGN_BLOCKS (rgn);
7491
7492 reg_pending_sets = ALLOCA_REG_SET ();
7493 reg_pending_sets_all = 0;
7494
7495 /* initializations for region data dependence analyisis */
7496 if (current_nr_blocks > 1)
7497 {
7498 rtx *space;
7499 int maxreg = max_reg_num ();
7500
7501 bb_reg_last_uses = (rtx **) alloca (current_nr_blocks * sizeof (rtx *));
7502 space = (rtx *) alloca (current_nr_blocks * maxreg * sizeof (rtx));
7503 bzero ((char *) space, current_nr_blocks * maxreg * sizeof (rtx));
7504 init_rtx_vector (bb_reg_last_uses, space, current_nr_blocks, maxreg * sizeof (rtx *));
7505
7506 bb_reg_last_sets = (rtx **) alloca (current_nr_blocks * sizeof (rtx *));
7507 space = (rtx *) alloca (current_nr_blocks * maxreg * sizeof (rtx));
7508 bzero ((char *) space, current_nr_blocks * maxreg * sizeof (rtx));
7509 init_rtx_vector (bb_reg_last_sets, space, current_nr_blocks, maxreg * sizeof (rtx *));
7510
7511 bb_pending_read_insns = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
7512 bb_pending_read_mems = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
7513 bb_pending_write_insns = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
7514 bb_pending_write_mems = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
7515 bb_pending_lists_length = (int *) alloca (current_nr_blocks * sizeof (int));
7516 bb_last_pending_memory_flush = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
7517 bb_last_function_call = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
7518 bb_sched_before_next_call = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
7519
7520 init_rgn_data_dependences (current_nr_blocks);
7521 }
7522
7523 /* compute LOG_LINKS */
7524 for (bb = 0; bb < current_nr_blocks; bb++)
7525 compute_block_backward_dependences (bb);
7526
7527 /* compute INSN_DEPEND */
7528 for (bb = current_nr_blocks - 1; bb >= 0; bb--)
7529 compute_block_forward_dependences (bb);
7530
7531 /* Delete line notes, compute live-regs at block end, and set priorities. */
7532 dead_notes = 0;
7533 for (bb = 0; bb < current_nr_blocks; bb++)
7534 {
7535 if (reload_completed == 0)
7536 find_pre_sched_live (bb);
7537
7538 if (write_symbols != NO_DEBUG)
7539 {
7540 save_line_notes (bb);
7541 rm_line_notes (bb);
7542 }
7543
7544 rgn_n_insns += set_priorities (bb);
7545 }
7546
7547 /* compute interblock info: probabilities, split-edges, dominators, etc. */
7548 if (current_nr_blocks > 1)
7549 {
7550 int i;
7551
7552 prob = (float *) alloca ((current_nr_blocks) * sizeof (float));
7553
7554 bbset_size = current_nr_blocks / HOST_BITS_PER_WIDE_INT + 1;
7555 dom = (bbset *) alloca (current_nr_blocks * sizeof (bbset));
7556 for (i = 0; i < current_nr_blocks; i++)
7557 {
7558 dom[i] = (bbset) alloca (bbset_size * sizeof (HOST_WIDE_INT));
7559 bzero ((char *) dom[i], bbset_size * sizeof (HOST_WIDE_INT));
7560 }
7561
7562 /* edge to bit */
7563 rgn_nr_edges = 0;
7564 edge_to_bit = (int *) alloca (nr_edges * sizeof (int));
7565 for (i = 1; i < nr_edges; i++)
7566 if (CONTAINING_RGN (FROM_BLOCK (i)) == rgn)
7567 EDGE_TO_BIT (i) = rgn_nr_edges++;
7568 rgn_edges = (int *) alloca (rgn_nr_edges * sizeof (int));
7569
7570 rgn_nr_edges = 0;
7571 for (i = 1; i < nr_edges; i++)
7572 if (CONTAINING_RGN (FROM_BLOCK (i)) == (rgn))
7573 rgn_edges[rgn_nr_edges++] = i;
7574
7575 /* split edges */
7576 edgeset_size = rgn_nr_edges / HOST_BITS_PER_WIDE_INT + 1;
7577 pot_split = (edgeset *) alloca (current_nr_blocks * sizeof (edgeset));
7578 ancestor_edges = (edgeset *) alloca (current_nr_blocks * sizeof (edgeset));
7579 for (i = 0; i < current_nr_blocks; i++)
7580 {
7581 pot_split[i] =
7582 (edgeset) alloca (edgeset_size * sizeof (HOST_WIDE_INT));
7583 bzero ((char *) pot_split[i],
7584 edgeset_size * sizeof (HOST_WIDE_INT));
7585 ancestor_edges[i] =
7586 (edgeset) alloca (edgeset_size * sizeof (HOST_WIDE_INT));
7587 bzero ((char *) ancestor_edges[i],
7588 edgeset_size * sizeof (HOST_WIDE_INT));
7589 }
7590
7591 /* compute probabilities, dominators, split_edges */
7592 for (bb = 0; bb < current_nr_blocks; bb++)
7593 compute_dom_prob_ps (bb);
7594 }
7595
7596 /* now we can schedule all blocks */
7597 for (bb = 0; bb < current_nr_blocks; bb++)
7598 {
7599 sched_rgn_n_insns += schedule_block (bb, rgn, rgn_n_insns);
7600
7601 #ifdef USE_C_ALLOCA
7602 alloca (0);
7603 #endif
7604 }
7605
7606 #ifdef INTERBLOCK_DEBUG
7607 if (sched_debug_count != 0)
7608 #endif
7609 /* sanity check: verify that all region insns were scheduled */
7610 if (sched_rgn_n_insns != rgn_n_insns)
7611 abort ();
7612
7613 /* update register life and usage information */
7614 if (reload_completed == 0)
7615 {
7616 for (bb = current_nr_blocks - 1; bb >= 0; bb--)
7617 find_post_sched_live (bb);
7618
7619 if (current_nr_blocks <= 1)
7620 /* Sanity check. There should be no REG_DEAD notes leftover at the end.
7621 In practice, this can occur as the result of bugs in flow, combine.c,
7622 and/or sched.c. The values of the REG_DEAD notes remaining are
7623 meaningless, because dead_notes is just used as a free list. */
7624 if (dead_notes != 0)
7625 abort ();
7626 }
7627
7628 /* restore line notes. */
7629 if (write_symbols != NO_DEBUG)
7630 {
7631 for (bb = 0; bb < current_nr_blocks; bb++)
7632 restore_line_notes (bb);
7633 }
7634
7635 /* Done with this region */
7636 free_pending_lists ();
7637
7638 FREE_REG_SET (reg_pending_sets);
7639 }
7640
7641 /* Subroutine of split_hard_reg_notes. Searches X for any reference to
7642 REGNO, returning the rtx of the reference found if any. Otherwise,
7643 returns 0. */
7644
7645 static rtx
7646 regno_use_in (regno, x)
7647 int regno;
7648 rtx x;
7649 {
7650 register char *fmt;
7651 int i, j;
7652 rtx tem;
7653
7654 if (GET_CODE (x) == REG && REGNO (x) == regno)
7655 return x;
7656
7657 fmt = GET_RTX_FORMAT (GET_CODE (x));
7658 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7659 {
7660 if (fmt[i] == 'e')
7661 {
7662 if ((tem = regno_use_in (regno, XEXP (x, i))))
7663 return tem;
7664 }
7665 else if (fmt[i] == 'E')
7666 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7667 if ((tem = regno_use_in (regno, XVECEXP (x, i, j))))
7668 return tem;
7669 }
7670
7671 return 0;
7672 }
7673
7674 /* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
7675 needed for the hard register mentioned in the note. This can happen
7676 if the reference to the hard register in the original insn was split into
7677 several smaller hard register references in the split insns. */
7678
7679 static void
7680 split_hard_reg_notes (note, first, last, orig_insn)
7681 rtx note, first, last, orig_insn;
7682 {
7683 rtx reg, temp, link;
7684 int n_regs, i, new_reg;
7685 rtx insn;
7686
7687 /* Assume that this is a REG_DEAD note. */
7688 if (REG_NOTE_KIND (note) != REG_DEAD)
7689 abort ();
7690
7691 reg = XEXP (note, 0);
7692
7693 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
7694
7695 for (i = 0; i < n_regs; i++)
7696 {
7697 new_reg = REGNO (reg) + i;
7698
7699 /* Check for references to new_reg in the split insns. */
7700 for (insn = last;; insn = PREV_INSN (insn))
7701 {
7702 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7703 && (temp = regno_use_in (new_reg, PATTERN (insn))))
7704 {
7705 /* Create a new reg dead note ere. */
7706 link = rtx_alloc (EXPR_LIST);
7707 PUT_REG_NOTE_KIND (link, REG_DEAD);
7708 XEXP (link, 0) = temp;
7709 XEXP (link, 1) = REG_NOTES (insn);
7710 REG_NOTES (insn) = link;
7711
7712 /* If killed multiple registers here, then add in the excess. */
7713 i += HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) - 1;
7714
7715 break;
7716 }
7717 /* It isn't mentioned anywhere, so no new reg note is needed for
7718 this register. */
7719 if (insn == first)
7720 break;
7721 }
7722 }
7723 }
7724
7725 /* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
7726 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
7727
7728 static void
7729 new_insn_dead_notes (pat, insn, last, orig_insn)
7730 rtx pat, insn, last, orig_insn;
7731 {
7732 rtx dest, tem, set;
7733
7734 /* PAT is either a CLOBBER or a SET here. */
7735 dest = XEXP (pat, 0);
7736
7737 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
7738 || GET_CODE (dest) == STRICT_LOW_PART
7739 || GET_CODE (dest) == SIGN_EXTRACT)
7740 dest = XEXP (dest, 0);
7741
7742 if (GET_CODE (dest) == REG)
7743 {
7744 for (tem = last; tem != insn; tem = PREV_INSN (tem))
7745 {
7746 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
7747 && reg_overlap_mentioned_p (dest, PATTERN (tem))
7748 && (set = single_set (tem)))
7749 {
7750 rtx tem_dest = SET_DEST (set);
7751
7752 while (GET_CODE (tem_dest) == ZERO_EXTRACT
7753 || GET_CODE (tem_dest) == SUBREG
7754 || GET_CODE (tem_dest) == STRICT_LOW_PART
7755 || GET_CODE (tem_dest) == SIGN_EXTRACT)
7756 tem_dest = XEXP (tem_dest, 0);
7757
7758 if (!rtx_equal_p (tem_dest, dest))
7759 {
7760 /* Use the same scheme as combine.c, don't put both REG_DEAD
7761 and REG_UNUSED notes on the same insn. */
7762 if (!find_regno_note (tem, REG_UNUSED, REGNO (dest))
7763 && !find_regno_note (tem, REG_DEAD, REGNO (dest)))
7764 {
7765 rtx note = rtx_alloc (EXPR_LIST);
7766 PUT_REG_NOTE_KIND (note, REG_DEAD);
7767 XEXP (note, 0) = dest;
7768 XEXP (note, 1) = REG_NOTES (tem);
7769 REG_NOTES (tem) = note;
7770 }
7771 /* The reg only dies in one insn, the last one that uses
7772 it. */
7773 break;
7774 }
7775 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
7776 /* We found an instruction that both uses the register,
7777 and sets it, so no new REG_NOTE is needed for this set. */
7778 break;
7779 }
7780 }
7781 /* If this is a set, it must die somewhere, unless it is the dest of
7782 the original insn, and hence is live after the original insn. Abort
7783 if it isn't supposed to be live after the original insn.
7784
7785 If this is a clobber, then just add a REG_UNUSED note. */
7786 if (tem == insn)
7787 {
7788 int live_after_orig_insn = 0;
7789 rtx pattern = PATTERN (orig_insn);
7790 int i;
7791
7792 if (GET_CODE (pat) == CLOBBER)
7793 {
7794 rtx note = rtx_alloc (EXPR_LIST);
7795 PUT_REG_NOTE_KIND (note, REG_UNUSED);
7796 XEXP (note, 0) = dest;
7797 XEXP (note, 1) = REG_NOTES (insn);
7798 REG_NOTES (insn) = note;
7799 return;
7800 }
7801
7802 /* The original insn could have multiple sets, so search the
7803 insn for all sets. */
7804 if (GET_CODE (pattern) == SET)
7805 {
7806 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
7807 live_after_orig_insn = 1;
7808 }
7809 else if (GET_CODE (pattern) == PARALLEL)
7810 {
7811 for (i = 0; i < XVECLEN (pattern, 0); i++)
7812 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
7813 && reg_overlap_mentioned_p (dest,
7814 SET_DEST (XVECEXP (pattern,
7815 0, i))))
7816 live_after_orig_insn = 1;
7817 }
7818
7819 if (!live_after_orig_insn)
7820 abort ();
7821 }
7822 }
7823 }
7824
7825 /* Subroutine of update_flow_info. Update the value of reg_n_sets for all
7826 registers modified by X. INC is -1 if the containing insn is being deleted,
7827 and is 1 if the containing insn is a newly generated insn. */
7828
7829 static void
7830 update_n_sets (x, inc)
7831 rtx x;
7832 int inc;
7833 {
7834 rtx dest = SET_DEST (x);
7835
7836 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
7837 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
7838 dest = SUBREG_REG (dest);
7839
7840 if (GET_CODE (dest) == REG)
7841 {
7842 int regno = REGNO (dest);
7843
7844 if (regno < FIRST_PSEUDO_REGISTER)
7845 {
7846 register int i;
7847 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
7848
7849 for (i = regno; i < endregno; i++)
7850 REG_N_SETS (i) += inc;
7851 }
7852 else
7853 REG_N_SETS (regno) += inc;
7854 }
7855 }
7856
7857 /* Updates all flow-analysis related quantities (including REG_NOTES) for
7858 the insns from FIRST to LAST inclusive that were created by splitting
7859 ORIG_INSN. NOTES are the original REG_NOTES. */
7860
7861 static void
7862 update_flow_info (notes, first, last, orig_insn)
7863 rtx notes;
7864 rtx first, last;
7865 rtx orig_insn;
7866 {
7867 rtx insn, note;
7868 rtx next;
7869 rtx orig_dest, temp;
7870 rtx set;
7871
7872 /* Get and save the destination set by the original insn. */
7873
7874 orig_dest = single_set (orig_insn);
7875 if (orig_dest)
7876 orig_dest = SET_DEST (orig_dest);
7877
7878 /* Move REG_NOTES from the original insn to where they now belong. */
7879
7880 for (note = notes; note; note = next)
7881 {
7882 next = XEXP (note, 1);
7883 switch (REG_NOTE_KIND (note))
7884 {
7885 case REG_DEAD:
7886 case REG_UNUSED:
7887 /* Move these notes from the original insn to the last new insn where
7888 the register is now set. */
7889
7890 for (insn = last;; insn = PREV_INSN (insn))
7891 {
7892 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7893 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
7894 {
7895 /* If this note refers to a multiple word hard register, it
7896 may have been split into several smaller hard register
7897 references, so handle it specially. */
7898 temp = XEXP (note, 0);
7899 if (REG_NOTE_KIND (note) == REG_DEAD
7900 && GET_CODE (temp) == REG
7901 && REGNO (temp) < FIRST_PSEUDO_REGISTER
7902 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) > 1)
7903 split_hard_reg_notes (note, first, last, orig_insn);
7904 else
7905 {
7906 XEXP (note, 1) = REG_NOTES (insn);
7907 REG_NOTES (insn) = note;
7908 }
7909
7910 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
7911 notes. */
7912 /* ??? This won't handle multiple word registers correctly,
7913 but should be good enough for now. */
7914 if (REG_NOTE_KIND (note) == REG_UNUSED
7915 && !dead_or_set_p (insn, XEXP (note, 0)))
7916 PUT_REG_NOTE_KIND (note, REG_DEAD);
7917
7918 /* The reg only dies in one insn, the last one that uses
7919 it. */
7920 break;
7921 }
7922 /* It must die somewhere, fail it we couldn't find where it died.
7923
7924 If this is a REG_UNUSED note, then it must be a temporary
7925 register that was not needed by this instantiation of the
7926 pattern, so we can safely ignore it. */
7927 if (insn == first)
7928 {
7929 /* After reload, REG_DEAD notes come sometimes an
7930 instruction after the register actually dies. */
7931 if (reload_completed && REG_NOTE_KIND (note) == REG_DEAD)
7932 {
7933 XEXP (note, 1) = REG_NOTES (insn);
7934 REG_NOTES (insn) = note;
7935 break;
7936 }
7937
7938 if (REG_NOTE_KIND (note) != REG_UNUSED)
7939 abort ();
7940
7941 break;
7942 }
7943 }
7944 break;
7945
7946 case REG_WAS_0:
7947 /* This note applies to the dest of the original insn. Find the
7948 first new insn that now has the same dest, and move the note
7949 there. */
7950
7951 if (!orig_dest)
7952 abort ();
7953
7954 for (insn = first;; insn = NEXT_INSN (insn))
7955 {
7956 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7957 && (temp = single_set (insn))
7958 && rtx_equal_p (SET_DEST (temp), orig_dest))
7959 {
7960 XEXP (note, 1) = REG_NOTES (insn);
7961 REG_NOTES (insn) = note;
7962 /* The reg is only zero before one insn, the first that
7963 uses it. */
7964 break;
7965 }
7966 /* If this note refers to a multiple word hard
7967 register, it may have been split into several smaller
7968 hard register references. We could split the notes,
7969 but simply dropping them is good enough. */
7970 if (GET_CODE (orig_dest) == REG
7971 && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
7972 && HARD_REGNO_NREGS (REGNO (orig_dest),
7973 GET_MODE (orig_dest)) > 1)
7974 break;
7975 /* It must be set somewhere, fail if we couldn't find where it
7976 was set. */
7977 if (insn == last)
7978 abort ();
7979 }
7980 break;
7981
7982 case REG_EQUAL:
7983 case REG_EQUIV:
7984 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
7985 set is meaningless. Just drop the note. */
7986 if (!orig_dest)
7987 break;
7988
7989 case REG_NO_CONFLICT:
7990 /* These notes apply to the dest of the original insn. Find the last
7991 new insn that now has the same dest, and move the note there. */
7992
7993 if (!orig_dest)
7994 abort ();
7995
7996 for (insn = last;; insn = PREV_INSN (insn))
7997 {
7998 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7999 && (temp = single_set (insn))
8000 && rtx_equal_p (SET_DEST (temp), orig_dest))
8001 {
8002 XEXP (note, 1) = REG_NOTES (insn);
8003 REG_NOTES (insn) = note;
8004 /* Only put this note on one of the new insns. */
8005 break;
8006 }
8007
8008 /* The original dest must still be set someplace. Abort if we
8009 couldn't find it. */
8010 if (insn == first)
8011 {
8012 /* However, if this note refers to a multiple word hard
8013 register, it may have been split into several smaller
8014 hard register references. We could split the notes,
8015 but simply dropping them is good enough. */
8016 if (GET_CODE (orig_dest) == REG
8017 && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
8018 && HARD_REGNO_NREGS (REGNO (orig_dest),
8019 GET_MODE (orig_dest)) > 1)
8020 break;
8021 /* Likewise for multi-word memory references. */
8022 if (GET_CODE (orig_dest) == MEM
8023 && SIZE_FOR_MODE (orig_dest) > MOVE_MAX)
8024 break;
8025 abort ();
8026 }
8027 }
8028 break;
8029
8030 case REG_LIBCALL:
8031 /* Move a REG_LIBCALL note to the first insn created, and update
8032 the corresponding REG_RETVAL note. */
8033 XEXP (note, 1) = REG_NOTES (first);
8034 REG_NOTES (first) = note;
8035
8036 insn = XEXP (note, 0);
8037 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
8038 if (note)
8039 XEXP (note, 0) = first;
8040 break;
8041
8042 case REG_EXEC_COUNT:
8043 /* Move a REG_EXEC_COUNT note to the first insn created. */
8044 XEXP (note, 1) = REG_NOTES (first);
8045 REG_NOTES (first) = note;
8046 break;
8047
8048 case REG_RETVAL:
8049 /* Move a REG_RETVAL note to the last insn created, and update
8050 the corresponding REG_LIBCALL note. */
8051 XEXP (note, 1) = REG_NOTES (last);
8052 REG_NOTES (last) = note;
8053
8054 insn = XEXP (note, 0);
8055 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
8056 if (note)
8057 XEXP (note, 0) = last;
8058 break;
8059
8060 case REG_NONNEG:
8061 case REG_BR_PROB:
8062 /* This should be moved to whichever instruction is a JUMP_INSN. */
8063
8064 for (insn = last;; insn = PREV_INSN (insn))
8065 {
8066 if (GET_CODE (insn) == JUMP_INSN)
8067 {
8068 XEXP (note, 1) = REG_NOTES (insn);
8069 REG_NOTES (insn) = note;
8070 /* Only put this note on one of the new insns. */
8071 break;
8072 }
8073 /* Fail if we couldn't find a JUMP_INSN. */
8074 if (insn == first)
8075 abort ();
8076 }
8077 break;
8078
8079 case REG_INC:
8080 /* reload sometimes leaves obsolete REG_INC notes around. */
8081 if (reload_completed)
8082 break;
8083 /* This should be moved to whichever instruction now has the
8084 increment operation. */
8085 abort ();
8086
8087 case REG_LABEL:
8088 /* Should be moved to the new insn(s) which use the label. */
8089 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
8090 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
8091 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
8092 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL,
8093 XEXP (note, 0), REG_NOTES (insn));
8094 break;
8095
8096 case REG_CC_SETTER:
8097 case REG_CC_USER:
8098 /* These two notes will never appear until after reorg, so we don't
8099 have to handle them here. */
8100 default:
8101 abort ();
8102 }
8103 }
8104
8105 /* Each new insn created, except the last, has a new set. If the destination
8106 is a register, then this reg is now live across several insns, whereas
8107 previously the dest reg was born and died within the same insn. To
8108 reflect this, we now need a REG_DEAD note on the insn where this
8109 dest reg dies.
8110
8111 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
8112
8113 for (insn = first; insn != last; insn = NEXT_INSN (insn))
8114 {
8115 rtx pat;
8116 int i;
8117
8118 pat = PATTERN (insn);
8119 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
8120 new_insn_dead_notes (pat, insn, last, orig_insn);
8121 else if (GET_CODE (pat) == PARALLEL)
8122 {
8123 for (i = 0; i < XVECLEN (pat, 0); i++)
8124 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
8125 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
8126 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
8127 }
8128 }
8129
8130 /* If any insn, except the last, uses the register set by the last insn,
8131 then we need a new REG_DEAD note on that insn. In this case, there
8132 would not have been a REG_DEAD note for this register in the original
8133 insn because it was used and set within one insn. */
8134
8135 set = single_set (last);
8136 if (set)
8137 {
8138 rtx dest = SET_DEST (set);
8139
8140 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
8141 || GET_CODE (dest) == STRICT_LOW_PART
8142 || GET_CODE (dest) == SIGN_EXTRACT)
8143 dest = XEXP (dest, 0);
8144
8145 if (GET_CODE (dest) == REG
8146 /* Global registers are always live, so the code below does not
8147 apply to them. */
8148 && (REGNO (dest) >= FIRST_PSEUDO_REGISTER
8149 || ! global_regs[REGNO (dest)]))
8150 {
8151 rtx stop_insn = PREV_INSN (first);
8152
8153 /* If the last insn uses the register that it is setting, then
8154 we don't want to put a REG_DEAD note there. Search backwards
8155 to find the first insn that sets but does not use DEST. */
8156
8157 insn = last;
8158 if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
8159 {
8160 for (insn = PREV_INSN (insn); insn != first;
8161 insn = PREV_INSN (insn))
8162 {
8163 if ((set = single_set (insn))
8164 && reg_mentioned_p (dest, SET_DEST (set))
8165 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
8166 break;
8167 }
8168 }
8169
8170 /* Now find the first insn that uses but does not set DEST. */
8171
8172 for (insn = PREV_INSN (insn); insn != stop_insn;
8173 insn = PREV_INSN (insn))
8174 {
8175 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
8176 && reg_mentioned_p (dest, PATTERN (insn))
8177 && (set = single_set (insn)))
8178 {
8179 rtx insn_dest = SET_DEST (set);
8180
8181 while (GET_CODE (insn_dest) == ZERO_EXTRACT
8182 || GET_CODE (insn_dest) == SUBREG
8183 || GET_CODE (insn_dest) == STRICT_LOW_PART
8184 || GET_CODE (insn_dest) == SIGN_EXTRACT)
8185 insn_dest = XEXP (insn_dest, 0);
8186
8187 if (insn_dest != dest)
8188 {
8189 note = rtx_alloc (EXPR_LIST);
8190 PUT_REG_NOTE_KIND (note, REG_DEAD);
8191 XEXP (note, 0) = dest;
8192 XEXP (note, 1) = REG_NOTES (insn);
8193 REG_NOTES (insn) = note;
8194 /* The reg only dies in one insn, the last one
8195 that uses it. */
8196 break;
8197 }
8198 }
8199 }
8200 }
8201 }
8202
8203 /* If the original dest is modifying a multiple register target, and the
8204 original instruction was split such that the original dest is now set
8205 by two or more SUBREG sets, then the split insns no longer kill the
8206 destination of the original insn.
8207
8208 In this case, if there exists an instruction in the same basic block,
8209 before the split insn, which uses the original dest, and this use is
8210 killed by the original insn, then we must remove the REG_DEAD note on
8211 this insn, because it is now superfluous.
8212
8213 This does not apply when a hard register gets split, because the code
8214 knows how to handle overlapping hard registers properly. */
8215 if (orig_dest && GET_CODE (orig_dest) == REG)
8216 {
8217 int found_orig_dest = 0;
8218 int found_split_dest = 0;
8219
8220 for (insn = first;; insn = NEXT_INSN (insn))
8221 {
8222 set = single_set (insn);
8223 if (set)
8224 {
8225 if (GET_CODE (SET_DEST (set)) == REG
8226 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
8227 {
8228 found_orig_dest = 1;
8229 break;
8230 }
8231 else if (GET_CODE (SET_DEST (set)) == SUBREG
8232 && SUBREG_REG (SET_DEST (set)) == orig_dest)
8233 {
8234 found_split_dest = 1;
8235 break;
8236 }
8237 }
8238
8239 if (insn == last)
8240 break;
8241 }
8242
8243 if (found_split_dest)
8244 {
8245 /* Search backwards from FIRST, looking for the first insn that uses
8246 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
8247 If we find an insn, and it has a REG_DEAD note, then delete the
8248 note. */
8249
8250 for (insn = first; insn; insn = PREV_INSN (insn))
8251 {
8252 if (GET_CODE (insn) == CODE_LABEL
8253 || GET_CODE (insn) == JUMP_INSN)
8254 break;
8255 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
8256 && reg_mentioned_p (orig_dest, insn))
8257 {
8258 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
8259 if (note)
8260 remove_note (insn, note);
8261 }
8262 }
8263 }
8264 else if (!found_orig_dest)
8265 {
8266 /* This should never happen. */
8267 abort ();
8268 }
8269 }
8270
8271 /* Update reg_n_sets. This is necessary to prevent local alloc from
8272 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
8273 a reg from set once to set multiple times. */
8274
8275 {
8276 rtx x = PATTERN (orig_insn);
8277 RTX_CODE code = GET_CODE (x);
8278
8279 if (code == SET || code == CLOBBER)
8280 update_n_sets (x, -1);
8281 else if (code == PARALLEL)
8282 {
8283 int i;
8284 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8285 {
8286 code = GET_CODE (XVECEXP (x, 0, i));
8287 if (code == SET || code == CLOBBER)
8288 update_n_sets (XVECEXP (x, 0, i), -1);
8289 }
8290 }
8291
8292 for (insn = first;; insn = NEXT_INSN (insn))
8293 {
8294 x = PATTERN (insn);
8295 code = GET_CODE (x);
8296
8297 if (code == SET || code == CLOBBER)
8298 update_n_sets (x, 1);
8299 else if (code == PARALLEL)
8300 {
8301 int i;
8302 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8303 {
8304 code = GET_CODE (XVECEXP (x, 0, i));
8305 if (code == SET || code == CLOBBER)
8306 update_n_sets (XVECEXP (x, 0, i), 1);
8307 }
8308 }
8309
8310 if (insn == last)
8311 break;
8312 }
8313 }
8314 }
8315
8316 /* Do the splitting of insns in the block b. */
8317
8318 static void
8319 split_block_insns (b)
8320 int b;
8321 {
8322 rtx insn, next;
8323
8324 for (insn = basic_block_head[b];; insn = next)
8325 {
8326 rtx prev;
8327 rtx set;
8328
8329 /* Can't use `next_real_insn' because that
8330 might go across CODE_LABELS and short-out basic blocks. */
8331 next = NEXT_INSN (insn);
8332 if (GET_CODE (insn) != INSN)
8333 {
8334 if (insn == basic_block_end[b])
8335 break;
8336
8337 continue;
8338 }
8339
8340 /* Don't split no-op move insns. These should silently disappear
8341 later in final. Splitting such insns would break the code
8342 that handles REG_NO_CONFLICT blocks. */
8343 set = single_set (insn);
8344 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
8345 {
8346 if (insn == basic_block_end[b])
8347 break;
8348
8349 /* Nops get in the way while scheduling, so delete them now if
8350 register allocation has already been done. It is too risky
8351 to try to do this before register allocation, and there are
8352 unlikely to be very many nops then anyways. */
8353 if (reload_completed)
8354 {
8355 PUT_CODE (insn, NOTE);
8356 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
8357 NOTE_SOURCE_FILE (insn) = 0;
8358 }
8359
8360 continue;
8361 }
8362
8363 /* Split insns here to get max fine-grain parallelism. */
8364 prev = PREV_INSN (insn);
8365 /* It is probably not worthwhile to try to split again in
8366 the second pass. However, if flag_schedule_insns is not set,
8367 the first and only (if any) scheduling pass is after reload. */
8368 if (reload_completed == 0 || ! flag_schedule_insns)
8369 {
8370 rtx last, first = PREV_INSN (insn);
8371 rtx notes = REG_NOTES (insn);
8372 last = try_split (PATTERN (insn), insn, 1);
8373 if (last != insn)
8374 {
8375 /* try_split returns the NOTE that INSN became. */
8376 first = NEXT_INSN (first);
8377 update_flow_info (notes, first, last, insn);
8378
8379 PUT_CODE (insn, NOTE);
8380 NOTE_SOURCE_FILE (insn) = 0;
8381 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
8382 if (insn == basic_block_head[b])
8383 basic_block_head[b] = first;
8384 if (insn == basic_block_end[b])
8385 {
8386 basic_block_end[b] = last;
8387 break;
8388 }
8389 }
8390 }
8391
8392 if (insn == basic_block_end[b])
8393 break;
8394 }
8395 }
8396
8397 /* The one entry point in this file. DUMP_FILE is the dump file for
8398 this pass. */
8399
8400 void
8401 schedule_insns (dump_file)
8402 FILE *dump_file;
8403 {
8404
8405 int max_uid;
8406 int b;
8407 int i;
8408 rtx insn;
8409 int rgn;
8410
8411 int luid;
8412
8413 /* disable speculative loads in their presence if cc0 defined */
8414 #ifdef HAVE_cc0
8415 flag_schedule_speculative_load = 0;
8416 #endif
8417
8418 /* Taking care of this degenerate case makes the rest of
8419 this code simpler. */
8420 if (n_basic_blocks == 0)
8421 return;
8422
8423 /* set dump and sched_verbose for the desired debugging output. If no
8424 dump-file was specified, but -fsched-verbose-N (any N), print to stderr.
8425 For -fsched-verbose-N, N>=10, print everything to stderr. */
8426 sched_verbose = sched_verbose_param;
8427 if (sched_verbose_param == 0 && dump_file)
8428 sched_verbose = 1;
8429 dump = ((sched_verbose_param >= 10 || !dump_file) ? stderr : dump_file);
8430
8431 nr_inter = 0;
8432 nr_spec = 0;
8433
8434 /* Initialize the unused_*_lists. We can't use the ones left over from
8435 the previous function, because gcc has freed that memory. We can use
8436 the ones left over from the first sched pass in the second pass however,
8437 so only clear them on the first sched pass. The first pass is before
8438 reload if flag_schedule_insns is set, otherwise it is afterwards. */
8439
8440 if (reload_completed == 0 || !flag_schedule_insns)
8441 {
8442 unused_insn_list = 0;
8443 unused_expr_list = 0;
8444 }
8445
8446 /* initialize issue_rate */
8447 issue_rate = get_issue_rate ();
8448
8449 /* do the splitting first for all blocks */
8450 for (b = 0; b < n_basic_blocks; b++)
8451 split_block_insns (b);
8452
8453 max_uid = (get_max_uid () + 1);
8454
8455 cant_move = (char *) alloca (max_uid * sizeof (char));
8456 bzero ((char *) cant_move, max_uid * sizeof (char));
8457
8458 fed_by_spec_load = (char *) alloca (max_uid * sizeof (char));
8459 bzero ((char *) fed_by_spec_load, max_uid * sizeof (char));
8460
8461 is_load_insn = (char *) alloca (max_uid * sizeof (char));
8462 bzero ((char *) is_load_insn, max_uid * sizeof (char));
8463
8464 insn_orig_block = (int *) alloca (max_uid * sizeof (int));
8465 insn_luid = (int *) alloca (max_uid * sizeof (int));
8466
8467 luid = 0;
8468 for (b = 0; b < n_basic_blocks; b++)
8469 for (insn = basic_block_head[b];; insn = NEXT_INSN (insn))
8470 {
8471 INSN_BLOCK (insn) = b;
8472 INSN_LUID (insn) = luid++;
8473
8474 if (insn == basic_block_end[b])
8475 break;
8476 }
8477
8478 /* after reload, remove inter-blocks dependences computed before reload. */
8479 if (reload_completed)
8480 {
8481 int b;
8482 rtx insn;
8483
8484 for (b = 0; b < n_basic_blocks; b++)
8485 for (insn = basic_block_head[b];; insn = NEXT_INSN (insn))
8486 {
8487 rtx link;
8488
8489 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
8490 {
8491 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
8492 {
8493 rtx x = XEXP (link, 0);
8494
8495 if (INSN_BLOCK (x) != b)
8496 remove_dependence (insn, x);
8497 }
8498 }
8499
8500 if (insn == basic_block_end[b])
8501 break;
8502 }
8503 }
8504
8505 nr_regions = 0;
8506 rgn_table = (region *) alloca ((n_basic_blocks) * sizeof (region));
8507 rgn_bb_table = (int *) alloca ((n_basic_blocks) * sizeof (int));
8508 block_to_bb = (int *) alloca ((n_basic_blocks) * sizeof (int));
8509 containing_rgn = (int *) alloca ((n_basic_blocks) * sizeof (int));
8510
8511 /* compute regions for scheduling */
8512 if (reload_completed
8513 || n_basic_blocks == 1
8514 || !flag_schedule_interblock)
8515 {
8516 find_single_block_region ();
8517 }
8518 else
8519 {
8520 /* an estimation for nr_edges is computed in is_cfg_nonregular () */
8521 nr_edges = 0;
8522
8523 /* verify that a 'good' control flow graph can be built */
8524 if (is_cfg_nonregular ()
8525 || nr_edges <= 1)
8526 {
8527 find_single_block_region ();
8528 }
8529 else
8530 {
8531 /* build control flow graph */
8532 in_edges = (int *) alloca (n_basic_blocks * sizeof (int));
8533 out_edges = (int *) alloca (n_basic_blocks * sizeof (int));
8534 bzero ((char *) in_edges, n_basic_blocks * sizeof (int));
8535 bzero ((char *) out_edges, n_basic_blocks * sizeof (int));
8536
8537 edge_table =
8538 (edge *) alloca ((nr_edges) * sizeof (edge));
8539 bzero ((char *) edge_table,
8540 ((nr_edges) * sizeof (edge)));
8541 build_control_flow ();
8542
8543 /* identify reducible inner loops and compute regions */
8544 find_rgns ();
8545
8546 if (sched_verbose >= 3)
8547 {
8548 debug_control_flow ();
8549 debug_regions ();
8550 }
8551
8552 }
8553 }
8554
8555 /* Allocate data for this pass. See comments, above,
8556 for what these vectors do. */
8557 insn_priority = (int *) alloca (max_uid * sizeof (int));
8558 insn_reg_weight = (int *) alloca (max_uid * sizeof (int));
8559 insn_tick = (int *) alloca (max_uid * sizeof (int));
8560 insn_costs = (short *) alloca (max_uid * sizeof (short));
8561 insn_units = (short *) alloca (max_uid * sizeof (short));
8562 insn_blockage = (unsigned int *) alloca (max_uid * sizeof (unsigned int));
8563 insn_ref_count = (int *) alloca (max_uid * sizeof (int));
8564
8565 /* Allocate for forward dependencies */
8566 insn_dep_count = (int *) alloca (max_uid * sizeof (int));
8567 insn_depend = (rtx *) alloca (max_uid * sizeof (rtx));
8568
8569 if (reload_completed == 0)
8570 {
8571 int i;
8572
8573 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
8574 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
8575 sched_reg_basic_block = (int *) alloca (max_regno * sizeof (int));
8576 bb_live_regs = ALLOCA_REG_SET ();
8577 bzero ((char *) sched_reg_n_calls_crossed, max_regno * sizeof (int));
8578 bzero ((char *) sched_reg_live_length, max_regno * sizeof (int));
8579
8580 for (i = 0; i < max_regno; i++)
8581 sched_reg_basic_block[i] = REG_BLOCK_UNKNOWN;
8582 }
8583 else
8584 {
8585 sched_reg_n_calls_crossed = 0;
8586 sched_reg_live_length = 0;
8587 bb_live_regs = 0;
8588 }
8589 init_alias_analysis ();
8590
8591 if (write_symbols != NO_DEBUG)
8592 {
8593 rtx line;
8594
8595 line_note = (rtx *) alloca (max_uid * sizeof (rtx));
8596 bzero ((char *) line_note, max_uid * sizeof (rtx));
8597 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
8598 bzero ((char *) line_note_head, n_basic_blocks * sizeof (rtx));
8599
8600 /* Save-line-note-head:
8601 Determine the line-number at the start of each basic block.
8602 This must be computed and saved now, because after a basic block's
8603 predecessor has been scheduled, it is impossible to accurately
8604 determine the correct line number for the first insn of the block. */
8605
8606 for (b = 0; b < n_basic_blocks; b++)
8607 for (line = basic_block_head[b]; line; line = PREV_INSN (line))
8608 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
8609 {
8610 line_note_head[b] = line;
8611 break;
8612 }
8613 }
8614
8615 bzero ((char *) insn_priority, max_uid * sizeof (int));
8616 bzero ((char *) insn_reg_weight, max_uid * sizeof (int));
8617 bzero ((char *) insn_tick, max_uid * sizeof (int));
8618 bzero ((char *) insn_costs, max_uid * sizeof (short));
8619 bzero ((char *) insn_units, max_uid * sizeof (short));
8620 bzero ((char *) insn_blockage, max_uid * sizeof (unsigned int));
8621 bzero ((char *) insn_ref_count, max_uid * sizeof (int));
8622
8623 /* Initialize for forward dependencies */
8624 bzero ((char *) insn_depend, max_uid * sizeof (rtx));
8625 bzero ((char *) insn_dep_count, max_uid * sizeof (int));
8626
8627 /* Find units used in this fuction, for visualization */
8628 if (sched_verbose)
8629 init_target_units ();
8630
8631 /* ??? Add a NOTE after the last insn of the last basic block. It is not
8632 known why this is done. */
8633
8634 insn = basic_block_end[n_basic_blocks - 1];
8635 if (NEXT_INSN (insn) == 0
8636 || (GET_CODE (insn) != NOTE
8637 && GET_CODE (insn) != CODE_LABEL
8638 /* Don't emit a NOTE if it would end up between an unconditional
8639 jump and a BARRIER. */
8640 && !(GET_CODE (insn) == JUMP_INSN
8641 && GET_CODE (NEXT_INSN (insn)) == BARRIER)))
8642 emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks - 1]);
8643
8644 /* Schedule every region in the subroutine */
8645 for (rgn = 0; rgn < nr_regions; rgn++)
8646 {
8647 schedule_region (rgn);
8648
8649 #ifdef USE_C_ALLOCA
8650 alloca (0);
8651 #endif
8652 }
8653
8654 /* Reposition the prologue and epilogue notes in case we moved the
8655 prologue/epilogue insns. */
8656 if (reload_completed)
8657 reposition_prologue_and_epilogue_notes (get_insns ());
8658
8659 /* delete redundant line notes. */
8660 if (write_symbols != NO_DEBUG)
8661 rm_redundant_line_notes ();
8662
8663 /* Update information about uses of registers in the subroutine. */
8664 if (reload_completed == 0)
8665 update_reg_usage ();
8666
8667 if (sched_verbose)
8668 {
8669 if (reload_completed == 0 && flag_schedule_interblock)
8670 {
8671 fprintf (dump, "\n;; Procedure interblock/speculative motions == %d/%d \n",
8672 nr_inter, nr_spec);
8673 }
8674 else
8675 {
8676 if (nr_inter > 0)
8677 abort ();
8678 }
8679 fprintf (dump, "\n\n");
8680 }
8681
8682 if (bb_live_regs)
8683 FREE_REG_SET (bb_live_regs);
8684 }
8685 #endif /* INSN_SCHEDULING */
This page took 0.42443 seconds and 4 git commands to generate.