]> gcc.gnu.org Git - gcc.git/blame - gcc/haifa-sched.c
semantics.c (finish_stmt_expr): Fix typo in comment.
[gcc.git] / gcc / haifa-sched.c
CommitLineData
8c660648 1/* Instruction scheduling pass.
a5cad800 2 Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
8c660648
JL
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GNU CC.
7
8 GNU CC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GNU CC is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to the Free
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23
24/* Instruction scheduling pass.
25
26 This pass implements list scheduling within basic blocks. It is
27 run twice: (1) after flow analysis, but before register allocation,
28 and (2) after register allocation.
29
30 The first run performs interblock scheduling, moving insns between
31 different blocks in the same "region", and the second runs only
32 basic block scheduling.
33
34 Interblock motions performed are useful motions and speculative
35 motions, including speculative loads. Motions requiring code
36 duplication are not supported. The identification of motion type
37 and the check for validity of speculative motions requires
38 construction and analysis of the function's control flow graph.
39 The scheduler works as follows:
40
41 We compute insn priorities based on data dependencies. Flow
42 analysis only creates a fraction of the data-dependencies we must
43 observe: namely, only those dependencies which the combiner can be
44 expected to use. For this pass, we must therefore create the
45 remaining dependencies we need to observe: register dependencies,
46 memory dependencies, dependencies to keep function calls in order,
47 and the dependence between a conditional branch and the setting of
48 condition codes are all dealt with here.
49
50 The scheduler first traverses the data flow graph, starting with
51 the last instruction, and proceeding to the first, assigning values
52 to insn_priority as it goes. This sorts the instructions
53 topologically by data dependence.
54
55 Once priorities have been established, we order the insns using
56 list scheduling. This works as follows: starting with a list of
57 all the ready insns, and sorted according to priority number, we
58 schedule the insn from the end of the list by placing its
59 predecessors in the list according to their priority order. We
60 consider this insn scheduled by setting the pointer to the "end" of
61 the list to point to the previous insn. When an insn has no
62 predecessors, we either queue it until sufficient time has elapsed
63 or add it to the ready list. As the instructions are scheduled or
64 when stalls are introduced, the queue advances and dumps insns into
65 the ready list. When all insns down to the lowest priority have
66 been scheduled, the critical path of the basic block has been made
67 as short as possible. The remaining insns are then scheduled in
68 remaining slots.
69
70 Function unit conflicts are resolved during forward list scheduling
71 by tracking the time when each insn is committed to the schedule
72 and from that, the time the function units it uses must be free.
73 As insns on the ready list are considered for scheduling, those
74 that would result in a blockage of the already committed insns are
75 queued until no blockage will result.
76
77 The following list shows the order in which we want to break ties
78 among insns in the ready list:
79
80 1. choose insn with the longest path to end of bb, ties
81 broken by
82 2. choose insn with least contribution to register pressure,
83 ties broken by
84 3. prefer in-block upon interblock motion, ties broken by
85 4. prefer useful upon speculative motion, ties broken by
86 5. choose insn with largest control flow probability, ties
87 broken by
88 6. choose insn with the least dependences upon the previously
89 scheduled insn, or finally
2db45993
JL
90 7 choose the insn which has the most insns dependent on it.
91 8. choose insn with lowest UID.
8c660648
JL
92
93 Memory references complicate matters. Only if we can be certain
94 that memory references are not part of the data dependency graph
95 (via true, anti, or output dependence), can we move operations past
96 memory references. To first approximation, reads can be done
97 independently, while writes introduce dependencies. Better
98 approximations will yield fewer dependencies.
99
100 Before reload, an extended analysis of interblock data dependences
101 is required for interblock scheduling. This is performed in
102 compute_block_backward_dependences ().
103
104 Dependencies set up by memory references are treated in exactly the
105 same way as other dependencies, by using LOG_LINKS backward
106 dependences. LOG_LINKS are translated into INSN_DEPEND forward
107 dependences for the purpose of forward list scheduling.
108
109 Having optimized the critical path, we may have also unduly
110 extended the lifetimes of some registers. If an operation requires
111 that constants be loaded into registers, it is certainly desirable
112 to load those constants as early as necessary, but no earlier.
113 I.e., it will not do to load up a bunch of registers at the
114 beginning of a basic block only to use them at the end, if they
115 could be loaded later, since this may result in excessive register
116 utilization.
117
118 Note that since branches are never in basic blocks, but only end
119 basic blocks, this pass will not move branches. But that is ok,
120 since we can use GNU's delayed branch scheduling pass to take care
121 of this case.
122
123 Also note that no further optimizations based on algebraic
124 identities are performed, so this pass would be a good one to
125 perform instruction splitting, such as breaking up a multiply
126 instruction into shifts and adds where that is profitable.
127
128 Given the memory aliasing analysis that this pass should perform,
129 it should be possible to remove redundant stores to memory, and to
130 load values from registers instead of hitting memory.
131
132 Before reload, speculative insns are moved only if a 'proof' exists
133 that no exception will be caused by this, and if no live registers
134 exist that inhibit the motion (live registers constraints are not
135 represented by data dependence edges).
136
137 This pass must update information that subsequent passes expect to
138 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
3b413743
RH
139 reg_n_calls_crossed, and reg_live_length. Also, BLOCK_HEAD,
140 BLOCK_END.
8c660648
JL
141
142 The information in the line number notes is carefully retained by
143 this pass. Notes that refer to the starting and ending of
144 exception regions are also carefully retained by this pass. All
145 other NOTE insns are grouped in their same relative order at the
146 beginning of basic blocks and regions that have been scheduled.
147
148 The main entry point for this pass is schedule_insns(), called for
149 each function. The work of the scheduler is organized in three
150 levels: (1) function level: insns are subject to splitting,
151 control-flow-graph is constructed, regions are computed (after
152 reload, each region is of one block), (2) region level: control
153 flow graph attributes required for interblock scheduling are
154 computed (dominators, reachability, etc.), data dependences and
155 priorities are computed, and (3) block level: insns in the block
156 are actually scheduled. */
157\f
8c660648 158#include "config.h"
5835e573 159#include "system.h"
01198c2f 160#include "toplev.h"
8c660648
JL
161#include "rtl.h"
162#include "basic-block.h"
163#include "regs.h"
49ad7cfa 164#include "function.h"
8c660648
JL
165#include "hard-reg-set.h"
166#include "flags.h"
167#include "insn-config.h"
168#include "insn-attr.h"
169#include "except.h"
487a6e06 170#include "toplev.h"
79c9824e 171#include "recog.h"
8c660648
JL
172
173extern char *reg_known_equiv_p;
174extern rtx *reg_known_value;
175
176#ifdef INSN_SCHEDULING
177
8c660648
JL
178/* target_units bitmask has 1 for each unit in the cpu. It should be
179 possible to compute this variable from the machine description.
180 But currently it is computed by examinning the insn list. Since
181 this is only needed for visualization, it seems an acceptable
182 solution. (For understanding the mapping of bits to units, see
183 definition of function_units[] in "insn-attrtab.c") */
184
61822835 185static int target_units = 0;
8c660648
JL
186
187/* issue_rate is the number of insns that can be scheduled in the same
188 machine cycle. It can be defined in the config/mach/mach.h file,
189 otherwise we set it to 1. */
190
191static int issue_rate;
192
62d65906
JL
193#ifndef ISSUE_RATE
194#define ISSUE_RATE 1
8c660648
JL
195#endif
196
cc132865 197/* sched-verbose controls the amount of debugging output the
8c660648
JL
198 scheduler prints. It is controlled by -fsched-verbose-N:
199 N>0 and no -DSR : the output is directed to stderr.
200 N>=10 will direct the printouts to stderr (regardless of -dSR).
201 N=1: same as -dSR.
202 N=2: bb's probabilities, detailed ready list info, unit/insn info.
203 N=3: rtl at abort point, control-flow, regions info.
cc132865 204 N=5: dependences info. */
8c660648
JL
205
206#define MAX_RGN_BLOCKS 10
207#define MAX_RGN_INSNS 100
208
8c660648
JL
209static int sched_verbose_param = 0;
210static int sched_verbose = 0;
8c660648
JL
211
212/* nr_inter/spec counts interblock/speculative motion for the function */
213static int nr_inter, nr_spec;
214
215
216/* debugging file. all printouts are sent to dump, which is always set,
217 either to stderr, or to the dump listing file (-dRS). */
218static FILE *dump = 0;
219
220/* fix_sched_param() is called from toplev.c upon detection
221 of the -fsched-***-N options. */
222
223void
224fix_sched_param (param, val)
225 char *param, *val;
226{
cc132865 227 if (!strcmp (param, "verbose"))
8c660648 228 sched_verbose_param = atoi (val);
8c660648
JL
229 else
230 warning ("fix_sched_param: unknown param: %s", param);
231}
232
233
234/* Arrays set up by scheduling for the same respective purposes as
235 similar-named arrays set up by flow analysis. We work with these
236 arrays during the scheduling pass so we can compare values against
237 unscheduled code.
238
239 Values of these arrays are copied at the end of this pass into the
240 arrays set up by flow analysis. */
241static int *sched_reg_n_calls_crossed;
242static int *sched_reg_live_length;
243static int *sched_reg_basic_block;
244
245/* We need to know the current block number during the post scheduling
246 update of live register information so that we can also update
247 REG_BASIC_BLOCK if a register changes blocks. */
248static int current_block_num;
249
250/* Element N is the next insn that sets (hard or pseudo) register
251 N within the current basic block; or zero, if there is no
252 such insn. Needed for new registers which may be introduced
253 by splitting insns. */
254static rtx *reg_last_uses;
255static rtx *reg_last_sets;
28c95eff 256static rtx *reg_last_clobbers;
8c660648 257static regset reg_pending_sets;
28c95eff 258static regset reg_pending_clobbers;
8c660648
JL
259static int reg_pending_sets_all;
260
261/* Vector indexed by INSN_UID giving the original ordering of the insns. */
262static int *insn_luid;
263#define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
264
265/* Vector indexed by INSN_UID giving each instruction a priority. */
266static int *insn_priority;
267#define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
268
269static short *insn_costs;
270#define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
271
272/* Vector indexed by INSN_UID giving an encoding of the function units
273 used. */
274static short *insn_units;
275#define INSN_UNIT(INSN) insn_units[INSN_UID (INSN)]
276
277/* Vector indexed by INSN_UID giving each instruction a register-weight.
278 This weight is an estimation of the insn contribution to registers pressure. */
279static int *insn_reg_weight;
280#define INSN_REG_WEIGHT(INSN) (insn_reg_weight[INSN_UID (INSN)])
281
282/* Vector indexed by INSN_UID giving list of insns which
283 depend upon INSN. Unlike LOG_LINKS, it represents forward dependences. */
284static rtx *insn_depend;
285#define INSN_DEPEND(INSN) insn_depend[INSN_UID (INSN)]
286
287/* Vector indexed by INSN_UID. Initialized to the number of incoming
288 edges in forward dependence graph (= number of LOG_LINKS). As
289 scheduling procedes, dependence counts are decreased. An
290 instruction moves to the ready list when its counter is zero. */
291static int *insn_dep_count;
292#define INSN_DEP_COUNT(INSN) (insn_dep_count[INSN_UID (INSN)])
293
294/* Vector indexed by INSN_UID giving an encoding of the blockage range
295 function. The unit and the range are encoded. */
296static unsigned int *insn_blockage;
297#define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
298#define UNIT_BITS 5
299#define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
300#define ENCODE_BLOCKAGE(U, R) \
f4b94256 301(((U) << BLOCKAGE_BITS \
8c660648 302 | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
f4b94256 303 | MAX_BLOCKAGE_COST (R))
8c660648
JL
304#define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
305#define BLOCKAGE_RANGE(B) \
306 (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
5835e573 307 | ((B) & BLOCKAGE_MASK))
8c660648
JL
308
309/* Encodings of the `<name>_unit_blockage_range' function. */
310#define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
311#define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
312
313#define DONE_PRIORITY -1
314#define MAX_PRIORITY 0x7fffffff
315#define TAIL_PRIORITY 0x7ffffffe
316#define LAUNCH_PRIORITY 0x7f000001
317#define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
318#define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
319
320/* Vector indexed by INSN_UID giving number of insns referring to this insn. */
321static int *insn_ref_count;
322#define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
323
324/* Vector indexed by INSN_UID giving line-number note in effect for each
325 insn. For line-number notes, this indicates whether the note may be
326 reused. */
327static rtx *line_note;
328#define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
329
330/* Vector indexed by basic block number giving the starting line-number
331 for each basic block. */
332static rtx *line_note_head;
333
334/* List of important notes we must keep around. This is a pointer to the
335 last element in the list. */
336static rtx note_list;
337
338/* Regsets telling whether a given register is live or dead before the last
339 scheduled insn. Must scan the instructions once before scheduling to
340 determine what registers are live or dead at the end of the block. */
341static regset bb_live_regs;
342
343/* Regset telling whether a given register is live after the insn currently
344 being scheduled. Before processing an insn, this is equal to bb_live_regs
345 above. This is used so that we can find registers that are newly born/dead
346 after processing an insn. */
347static regset old_live_regs;
348
349/* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
350 during the initial scan and reused later. If there are not exactly as
351 many REG_DEAD notes in the post scheduled code as there were in the
352 prescheduled code then we trigger an abort because this indicates a bug. */
353static rtx dead_notes;
354
355/* Queues, etc. */
356
357/* An instruction is ready to be scheduled when all insns preceding it
358 have already been scheduled. It is important to ensure that all
359 insns which use its result will not be executed until its result
360 has been computed. An insn is maintained in one of four structures:
361
362 (P) the "Pending" set of insns which cannot be scheduled until
363 their dependencies have been satisfied.
364 (Q) the "Queued" set of insns that can be scheduled when sufficient
365 time has passed.
366 (R) the "Ready" list of unscheduled, uncommitted insns.
367 (S) the "Scheduled" list of insns.
368
369 Initially, all insns are either "Pending" or "Ready" depending on
370 whether their dependencies are satisfied.
371
372 Insns move from the "Ready" list to the "Scheduled" list as they
373 are committed to the schedule. As this occurs, the insns in the
374 "Pending" list have their dependencies satisfied and move to either
375 the "Ready" list or the "Queued" set depending on whether
376 sufficient time has passed to make them ready. As time passes,
377 insns move from the "Queued" set to the "Ready" list. Insns may
378 move from the "Ready" list to the "Queued" set if they are blocked
379 due to a function unit conflict.
380
381 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
382 insns, i.e., those that are ready, queued, and pending.
383 The "Queued" set (Q) is implemented by the variable `insn_queue'.
384 The "Ready" list (R) is implemented by the variables `ready' and
385 `n_ready'.
386 The "Scheduled" list (S) is the new insn chain built by this pass.
387
388 The transition (R->S) is implemented in the scheduling loop in
389 `schedule_block' when the best insn to schedule is chosen.
390 The transition (R->Q) is implemented in `queue_insn' when an
38e01259 391 insn is found to have a function unit conflict with the already
8c660648
JL
392 committed insns.
393 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
394 insns move from the ready list to the scheduled list.
395 The transition (Q->R) is implemented in 'queue_to_insn' as time
396 passes or stalls are introduced. */
397
398/* Implement a circular buffer to delay instructions until sufficient
399 time has passed. INSN_QUEUE_SIZE is a power of two larger than
400 MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. This is the
401 longest time an isnsn may be queued. */
402static rtx insn_queue[INSN_QUEUE_SIZE];
403static int q_ptr = 0;
404static int q_size = 0;
405#define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
406#define NEXT_Q_AFTER(X, C) (((X)+C) & (INSN_QUEUE_SIZE-1))
407
408/* Vector indexed by INSN_UID giving the minimum clock tick at which
409 the insn becomes ready. This is used to note timing constraints for
410 insns in the pending list. */
411static int *insn_tick;
412#define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
413
414/* Data structure for keeping track of register information
415 during that register's life. */
416
417struct sometimes
418 {
419 int regno;
420 int live_length;
421 int calls_crossed;
422 };
423
424/* Forward declarations. */
425static void add_dependence PROTO ((rtx, rtx, enum reg_note));
426static void remove_dependence PROTO ((rtx, rtx));
427static rtx find_insn_list PROTO ((rtx, rtx));
428static int insn_unit PROTO ((rtx));
429static unsigned int blockage_range PROTO ((int, rtx));
430static void clear_units PROTO ((void));
431static int actual_hazard_this_instance PROTO ((int, int, rtx, int, int));
432static void schedule_unit PROTO ((int, rtx, int));
433static int actual_hazard PROTO ((int, rtx, int, int));
434static int potential_hazard PROTO ((int, rtx, int));
435static int insn_cost PROTO ((rtx, rtx, rtx));
436static int priority PROTO ((rtx));
437static void free_pending_lists PROTO ((void));
438static void add_insn_mem_dependence PROTO ((rtx *, rtx *, rtx, rtx));
439static void flush_pending_lists PROTO ((rtx, int));
440static void sched_analyze_1 PROTO ((rtx, rtx));
441static void sched_analyze_2 PROTO ((rtx, rtx));
442static void sched_analyze_insn PROTO ((rtx, rtx, rtx));
443static void sched_analyze PROTO ((rtx, rtx));
5835e573 444static void sched_note_set PROTO ((rtx, int));
01c7f350 445static int rank_for_schedule PROTO ((const GENERIC_PTR, const GENERIC_PTR));
8c660648
JL
446static void swap_sort PROTO ((rtx *, int));
447static void queue_insn PROTO ((rtx, int));
448static int schedule_insn PROTO ((rtx, rtx *, int, int));
449static void create_reg_dead_note PROTO ((rtx, rtx));
450static void attach_deaths PROTO ((rtx, rtx, int));
451static void attach_deaths_insn PROTO ((rtx));
452static int new_sometimes_live PROTO ((struct sometimes *, int, int));
453static void finish_sometimes_live PROTO ((struct sometimes *, int));
5835e573 454static int schedule_block PROTO ((int, int));
5835e573 455static void split_hard_reg_notes PROTO ((rtx, rtx, rtx));
8c660648
JL
456static void new_insn_dead_notes PROTO ((rtx, rtx, rtx, rtx));
457static void update_n_sets PROTO ((rtx, int));
459b3825 458static char *safe_concat PROTO ((char *, char *, char *));
cc4fe0e2
L
459static int insn_issue_delay PROTO ((rtx));
460static int birthing_insn_p PROTO ((rtx));
461static void adjust_priority PROTO ((rtx));
8c660648
JL
462
463/* Mapping of insns to their original block prior to scheduling. */
464static int *insn_orig_block;
465#define INSN_BLOCK(insn) (insn_orig_block[INSN_UID (insn)])
466
467/* Some insns (e.g. call) are not allowed to move across blocks. */
468static char *cant_move;
469#define CANT_MOVE(insn) (cant_move[INSN_UID (insn)])
470
471/* Control flow graph edges are kept in circular lists. */
472typedef struct
473 {
474 int from_block;
475 int to_block;
476 int next_in;
477 int next_out;
478 }
e881bb1b
RH
479haifa_edge;
480static haifa_edge *edge_table;
8c660648
JL
481
482#define NEXT_IN(edge) (edge_table[edge].next_in)
483#define NEXT_OUT(edge) (edge_table[edge].next_out)
484#define FROM_BLOCK(edge) (edge_table[edge].from_block)
485#define TO_BLOCK(edge) (edge_table[edge].to_block)
486
487/* Number of edges in the control flow graph. (in fact larger than
488 that by 1, since edge 0 is unused.) */
489static int nr_edges;
490
491/* Circular list of incoming/outgoing edges of a block */
492static int *in_edges;
493static int *out_edges;
494
495#define IN_EDGES(block) (in_edges[block])
496#define OUT_EDGES(block) (out_edges[block])
497
8c660648
JL
498
499
168cbdf9 500static int is_cfg_nonregular PROTO ((void));
a2e68776
JL
501static int build_control_flow PROTO ((int_list_ptr *, int_list_ptr *,
502 int *, int *));
8c660648
JL
503static void new_edge PROTO ((int, int));
504
505
506/* A region is the main entity for interblock scheduling: insns
507 are allowed to move between blocks in the same region, along
508 control flow graph edges, in the 'up' direction. */
509typedef struct
510 {
511 int rgn_nr_blocks; /* number of blocks in region */
512 int rgn_blocks; /* blocks in the region (actually index in rgn_bb_table) */
513 }
514region;
515
516/* Number of regions in the procedure */
517static int nr_regions;
518
519/* Table of region descriptions */
520static region *rgn_table;
521
522/* Array of lists of regions' blocks */
523static int *rgn_bb_table;
524
525/* Topological order of blocks in the region (if b2 is reachable from
526 b1, block_to_bb[b2] > block_to_bb[b1]).
527 Note: A basic block is always referred to by either block or b,
528 while its topological order name (in the region) is refered to by
529 bb.
530 */
531static int *block_to_bb;
532
533/* The number of the region containing a block. */
534static int *containing_rgn;
535
536#define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
537#define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
538#define BLOCK_TO_BB(block) (block_to_bb[block])
539#define CONTAINING_RGN(block) (containing_rgn[block])
540
541void debug_regions PROTO ((void));
542static void find_single_block_region PROTO ((void));
a2e68776
JL
543static void find_rgns PROTO ((int_list_ptr *, int_list_ptr *,
544 int *, int *, sbitmap *));
8c660648
JL
545static int too_large PROTO ((int, int *, int *));
546
547extern void debug_live PROTO ((int, int));
548
549/* Blocks of the current region being scheduled. */
550static int current_nr_blocks;
551static int current_blocks;
552
553/* The mapping from bb to block */
554#define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)])
555
556
557/* Bit vectors and bitset operations are needed for computations on
558 the control flow graph. */
559
560typedef unsigned HOST_WIDE_INT *bitset;
561typedef struct
562 {
563 int *first_member; /* pointer to the list start in bitlst_table. */
564 int nr_members; /* the number of members of the bit list. */
565 }
566bitlst;
567
61822835
JL
568static int bitlst_table_last;
569static int bitlst_table_size;
8c660648
JL
570static int *bitlst_table;
571
572static char bitset_member PROTO ((bitset, int, int));
573static void extract_bitlst PROTO ((bitset, int, bitlst *));
574
575/* target info declarations.
576
577 The block currently being scheduled is referred to as the "target" block,
578 while other blocks in the region from which insns can be moved to the
579 target are called "source" blocks. The candidate structure holds info
580 about such sources: are they valid? Speculative? Etc. */
581typedef bitlst bblst;
582typedef struct
583 {
584 char is_valid;
585 char is_speculative;
586 int src_prob;
587 bblst split_bbs;
588 bblst update_bbs;
589 }
590candidate;
591
592static candidate *candidate_table;
593
594/* A speculative motion requires checking live information on the path
595 from 'source' to 'target'. The split blocks are those to be checked.
596 After a speculative motion, live information should be modified in
597 the 'update' blocks.
598
599 Lists of split and update blocks for each candidate of the current
600 target are in array bblst_table */
61822835 601static int *bblst_table, bblst_size, bblst_last;
8c660648
JL
602
603#define IS_VALID(src) ( candidate_table[src].is_valid )
604#define IS_SPECULATIVE(src) ( candidate_table[src].is_speculative )
605#define SRC_PROB(src) ( candidate_table[src].src_prob )
606
607/* The bb being currently scheduled. */
61822835 608static int target_bb;
8c660648
JL
609
610/* List of edges. */
611typedef bitlst edgelst;
612
613/* target info functions */
614static void split_edges PROTO ((int, int, edgelst *));
615static void compute_trg_info PROTO ((int));
616void debug_candidate PROTO ((int));
617void debug_candidates PROTO ((int));
618
619
620/* Bit-set of bbs, where bit 'i' stands for bb 'i'. */
621typedef bitset bbset;
622
623/* Number of words of the bbset. */
61822835 624static int bbset_size;
8c660648
JL
625
626/* Dominators array: dom[i] contains the bbset of dominators of
627 bb i in the region. */
61822835 628static bbset *dom;
8c660648
JL
629
630/* bb 0 is the only region entry */
631#define IS_RGN_ENTRY(bb) (!bb)
632
633/* Is bb_src dominated by bb_trg. */
634#define IS_DOMINATED(bb_src, bb_trg) \
635( bitset_member (dom[bb_src], bb_trg, bbset_size) )
636
637/* Probability: Prob[i] is a float in [0, 1] which is the probability
638 of bb i relative to the region entry. */
61822835 639static float *prob;
8c660648
JL
640
641/* The probability of bb_src, relative to bb_trg. Note, that while the
642 'prob[bb]' is a float in [0, 1], this macro returns an integer
643 in [0, 100]. */
644#define GET_SRC_PROB(bb_src, bb_trg) ((int) (100.0 * (prob[bb_src] / \
645 prob[bb_trg])))
646
647/* Bit-set of edges, where bit i stands for edge i. */
648typedef bitset edgeset;
649
650/* Number of edges in the region. */
61822835 651static int rgn_nr_edges;
8c660648
JL
652
653/* Array of size rgn_nr_edges. */
61822835 654static int *rgn_edges;
8c660648
JL
655
656/* Number of words in an edgeset. */
61822835 657static int edgeset_size;
8c660648
JL
658
659/* Mapping from each edge in the graph to its number in the rgn. */
61822835 660static int *edge_to_bit;
8c660648
JL
661#define EDGE_TO_BIT(edge) (edge_to_bit[edge])
662
663/* The split edges of a source bb is different for each target
664 bb. In order to compute this efficiently, the 'potential-split edges'
665 are computed for each bb prior to scheduling a region. This is actually
666 the split edges of each bb relative to the region entry.
667
668 pot_split[bb] is the set of potential split edges of bb. */
61822835 669static edgeset *pot_split;
8c660648
JL
670
671/* For every bb, a set of its ancestor edges. */
61822835 672static edgeset *ancestor_edges;
8c660648
JL
673
674static void compute_dom_prob_ps PROTO ((int));
675
676#define ABS_VALUE(x) (((x)<0)?(-(x)):(x))
677#define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (INSN_BLOCK (INSN))))
678#define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (INSN_BLOCK (INSN))))
679#define INSN_BB(INSN) (BLOCK_TO_BB (INSN_BLOCK (INSN)))
680
681/* parameters affecting the decision of rank_for_schedule() */
682#define MIN_DIFF_PRIORITY 2
683#define MIN_PROBABILITY 40
684#define MIN_PROB_DIFF 10
685
686/* speculative scheduling functions */
687static int check_live_1 PROTO ((int, rtx));
688static void update_live_1 PROTO ((int, rtx));
5835e573
KG
689static int check_live PROTO ((rtx, int));
690static void update_live PROTO ((rtx, int));
8c660648
JL
691static void set_spec_fed PROTO ((rtx));
692static int is_pfree PROTO ((rtx, int, int));
693static int find_conditional_protection PROTO ((rtx, int));
694static int is_conditionally_protected PROTO ((rtx, int, int));
695static int may_trap_exp PROTO ((rtx, int));
ac957f13 696static int haifa_classify_insn PROTO ((rtx));
e009aaf3 697static int is_prisky PROTO ((rtx, int, int));
8c660648
JL
698static int is_exception_free PROTO ((rtx, int, int));
699
700static char find_insn_mem_list PROTO ((rtx, rtx, rtx, rtx));
701static void compute_block_forward_dependences PROTO ((int));
702static void init_rgn_data_dependences PROTO ((int));
703static void add_branch_dependences PROTO ((rtx, rtx));
704static void compute_block_backward_dependences PROTO ((int));
705void debug_dependencies PROTO ((void));
706
707/* Notes handling mechanism:
708 =========================
709 Generally, NOTES are saved before scheduling and restored after scheduling.
710 The scheduler distinguishes between three types of notes:
711
712 (1) LINE_NUMBER notes, generated and used for debugging. Here,
713 before scheduling a region, a pointer to the LINE_NUMBER note is
714 added to the insn following it (in save_line_notes()), and the note
715 is removed (in rm_line_notes() and unlink_line_notes()). After
716 scheduling the region, this pointer is used for regeneration of
717 the LINE_NUMBER note (in restore_line_notes()).
718
719 (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
720 Before scheduling a region, a pointer to the note is added to the insn
721 that follows or precedes it. (This happens as part of the data dependence
722 computation). After scheduling an insn, the pointer contained in it is
723 used for regenerating the corresponding note (in reemit_notes).
724
725 (3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
726 these notes are put in a list (in rm_other_notes() and
727 unlink_other_notes ()). After scheduling the block, these notes are
728 inserted at the beginning of the block (in schedule_block()). */
729
730static rtx unlink_other_notes PROTO ((rtx, rtx));
731static rtx unlink_line_notes PROTO ((rtx, rtx));
732static void rm_line_notes PROTO ((int));
733static void save_line_notes PROTO ((int));
734static void restore_line_notes PROTO ((int));
735static void rm_redundant_line_notes PROTO ((void));
736static void rm_other_notes PROTO ((rtx, rtx));
737static rtx reemit_notes PROTO ((rtx, rtx));
738
739static void get_block_head_tail PROTO ((int, rtx *, rtx *));
740
741static void find_pre_sched_live PROTO ((int));
742static void find_post_sched_live PROTO ((int));
743static void update_reg_usage PROTO ((void));
c6a754f2 744static int queue_to_ready PROTO ((rtx [], int));
8c660648 745
9a8b0889 746static void debug_ready_list PROTO ((rtx[], int));
cc4fe0e2 747static void init_target_units PROTO ((void));
8c660648 748static void insn_print_units PROTO ((rtx));
cc4fe0e2
L
749static int get_visual_tbl_length PROTO ((void));
750static void init_block_visualization PROTO ((void));
8c660648
JL
751static void print_block_visualization PROTO ((int, char *));
752static void visualize_scheduled_insns PROTO ((int, int));
753static void visualize_no_unit PROTO ((rtx));
754static void visualize_stall_cycles PROTO ((int, int));
755static void print_exp PROTO ((char *, rtx, int));
756static void print_value PROTO ((char *, rtx, int));
757static void print_pattern PROTO ((char *, rtx, int));
758static void print_insn PROTO ((char *, rtx, int));
759void debug_reg_vector PROTO ((regset));
760
761static rtx move_insn1 PROTO ((rtx, rtx));
762static rtx move_insn PROTO ((rtx, rtx));
763static rtx group_leader PROTO ((rtx));
764static int set_priorities PROTO ((int));
765static void init_rtx_vector PROTO ((rtx **, rtx *, int, int));
766static void schedule_region PROTO ((int));
8c660648
JL
767
768#endif /* INSN_SCHEDULING */
769\f
770#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
771
772/* Helper functions for instruction scheduling. */
773
ebb7b10b
RH
774/* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
775static rtx unused_insn_list;
776
777/* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
778static rtx unused_expr_list;
779
780static void free_list PROTO ((rtx *, rtx *));
781static rtx alloc_INSN_LIST PROTO ((rtx, rtx));
782static rtx alloc_EXPR_LIST PROTO ((int, rtx, rtx));
783
784static void
785free_list (listp, unused_listp)
786 rtx *listp, *unused_listp;
787{
788 register rtx link, prev_link;
789
790 if (*listp == 0)
791 return;
792
793 prev_link = *listp;
794 link = XEXP (prev_link, 1);
795
796 while (link)
797 {
798 prev_link = link;
799 link = XEXP (link, 1);
800 }
801
802 XEXP (prev_link, 1) = *unused_listp;
803 *unused_listp = *listp;
804 *listp = 0;
805}
806
459b3825 807static rtx
ebb7b10b
RH
808alloc_INSN_LIST (val, next)
809 rtx val, next;
810{
811 rtx r;
812
813 if (unused_insn_list)
814 {
815 r = unused_insn_list;
816 unused_insn_list = XEXP (r, 1);
817 XEXP (r, 0) = val;
818 XEXP (r, 1) = next;
819 PUT_REG_NOTE_KIND (r, VOIDmode);
820 }
821 else
822 r = gen_rtx_INSN_LIST (VOIDmode, val, next);
823
824 return r;
825}
826
459b3825 827static rtx
ebb7b10b
RH
828alloc_EXPR_LIST (kind, val, next)
829 int kind;
830 rtx val, next;
831{
832 rtx r;
833
c92293e7 834 if (unused_expr_list)
ebb7b10b 835 {
c92293e7
CM
836 r = unused_expr_list;
837 unused_expr_list = XEXP (r, 1);
ebb7b10b
RH
838 XEXP (r, 0) = val;
839 XEXP (r, 1) = next;
840 PUT_REG_NOTE_KIND (r, kind);
841 }
842 else
843 r = gen_rtx_EXPR_LIST (kind, val, next);
844
845 return r;
846}
847
8c660648
JL
848/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
849 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
850 of dependence that this link represents. */
851
852static void
853add_dependence (insn, elem, dep_type)
854 rtx insn;
855 rtx elem;
856 enum reg_note dep_type;
857{
858 rtx link, next;
859
860 /* Don't depend an insn on itself. */
861 if (insn == elem)
862 return;
863
342d9c89
JL
864 /* We can get a dependency on deleted insns due to optimizations in
865 the register allocation and reloading or due to splitting. Any
866 such dependency is useless and can be ignored. */
867 if (GET_CODE (elem) == NOTE)
868 return;
869
8c660648
JL
870 /* If elem is part of a sequence that must be scheduled together, then
871 make the dependence point to the last insn of the sequence.
872 When HAVE_cc0, it is possible for NOTEs to exist between users and
873 setters of the condition codes, so we must skip past notes here.
874 Otherwise, NOTEs are impossible here. */
875
876 next = NEXT_INSN (elem);
877
878#ifdef HAVE_cc0
879 while (next && GET_CODE (next) == NOTE)
880 next = NEXT_INSN (next);
881#endif
882
883 if (next && SCHED_GROUP_P (next)
884 && GET_CODE (next) != CODE_LABEL)
885 {
886 /* Notes will never intervene here though, so don't bother checking
887 for them. */
888 /* We must reject CODE_LABELs, so that we don't get confused by one
889 that has LABEL_PRESERVE_P set, which is represented by the same
890 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
891 SCHED_GROUP_P. */
892 while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
893 && GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
894 next = NEXT_INSN (next);
895
896 /* Again, don't depend an insn on itself. */
897 if (insn == next)
898 return;
899
900 /* Make the dependence to NEXT, the last insn of the group, instead
901 of the original ELEM. */
902 elem = next;
903 }
904
905#ifdef INSN_SCHEDULING
906 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
907 No need for interblock dependences with calls, since
908 calls are not moved between blocks. Note: the edge where
909 elem is a CALL is still required. */
910 if (GET_CODE (insn) == CALL_INSN
911 && (INSN_BB (elem) != INSN_BB (insn)))
912 return;
913
914#endif
915
916 /* Check that we don't already have this dependence. */
917 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
918 if (XEXP (link, 0) == elem)
919 {
920 /* If this is a more restrictive type of dependence than the existing
921 one, then change the existing dependence to this type. */
922 if ((int) dep_type < (int) REG_NOTE_KIND (link))
923 PUT_REG_NOTE_KIND (link, dep_type);
924 return;
925 }
926 /* Might want to check one level of transitivity to save conses. */
927
ebb7b10b
RH
928 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
929 LOG_LINKS (insn) = link;
930
8c660648
JL
931 /* Insn dependency, not data dependency. */
932 PUT_REG_NOTE_KIND (link, dep_type);
8c660648
JL
933}
934
935/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
936 of INSN. Abort if not found. */
937
938static void
939remove_dependence (insn, elem)
940 rtx insn;
941 rtx elem;
942{
ebb7b10b 943 rtx prev, link, next;
8c660648
JL
944 int found = 0;
945
ebb7b10b 946 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
8c660648 947 {
ebb7b10b 948 next = XEXP (link, 1);
8c660648
JL
949 if (XEXP (link, 0) == elem)
950 {
951 if (prev)
ebb7b10b 952 XEXP (prev, 1) = next;
8c660648 953 else
ebb7b10b
RH
954 LOG_LINKS (insn) = next;
955
956 XEXP (link, 1) = unused_insn_list;
957 unused_insn_list = link;
958
8c660648
JL
959 found = 1;
960 }
6d8ccdbb
JL
961 else
962 prev = link;
8c660648
JL
963 }
964
965 if (!found)
966 abort ();
967 return;
968}
969\f
970#ifndef INSN_SCHEDULING
971void
972schedule_insns (dump_file)
973 FILE *dump_file;
974{
975}
976#else
977#ifndef __GNUC__
978#define __inline
979#endif
980
cbb13457
MM
981#ifndef HAIFA_INLINE
982#define HAIFA_INLINE __inline
983#endif
984
8c660648
JL
985/* Computation of memory dependencies. */
986
987/* The *_insns and *_mems are paired lists. Each pending memory operation
988 will have a pointer to the MEM rtx on one list and a pointer to the
989 containing insn on the other list in the same place in the list. */
990
991/* We can't use add_dependence like the old code did, because a single insn
992 may have multiple memory accesses, and hence needs to be on the list
993 once for each memory access. Add_dependence won't let you add an insn
994 to a list more than once. */
995
996/* An INSN_LIST containing all insns with pending read operations. */
997static rtx pending_read_insns;
998
999/* An EXPR_LIST containing all MEM rtx's which are pending reads. */
1000static rtx pending_read_mems;
1001
1002/* An INSN_LIST containing all insns with pending write operations. */
1003static rtx pending_write_insns;
1004
1005/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
1006static rtx pending_write_mems;
1007
1008/* Indicates the combined length of the two pending lists. We must prevent
1009 these lists from ever growing too large since the number of dependencies
1010 produced is at least O(N*N), and execution time is at least O(4*N*N), as
1011 a function of the length of these pending lists. */
1012
1013static int pending_lists_length;
1014
8c660648
JL
1015/* The last insn upon which all memory references must depend.
1016 This is an insn which flushed the pending lists, creating a dependency
1017 between it and all previously pending memory references. This creates
1018 a barrier (or a checkpoint) which no memory reference is allowed to cross.
1019
1020 This includes all non constant CALL_INSNs. When we do interprocedural
1021 alias analysis, this restriction can be relaxed.
1022 This may also be an INSN that writes memory if the pending lists grow
1023 too large. */
1024
1025static rtx last_pending_memory_flush;
1026
1027/* The last function call we have seen. All hard regs, and, of course,
1028 the last function call, must depend on this. */
1029
1030static rtx last_function_call;
1031
1032/* The LOG_LINKS field of this is a list of insns which use a pseudo register
1033 that does not already cross a call. We create dependencies between each
1034 of those insn and the next call insn, to ensure that they won't cross a call
1035 after scheduling is done. */
1036
1037static rtx sched_before_next_call;
1038
1039/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
1040 so that insns independent of the last scheduled insn will be preferred
1041 over dependent instructions. */
1042
1043static rtx last_scheduled_insn;
1044
1045/* Data structures for the computation of data dependences in a regions. We
1046 keep one copy of each of the declared above variables for each bb in the
1047 region. Before analyzing the data dependences for a bb, its variables
1048 are initialized as a function of the variables of its predecessors. When
1049 the analysis for a bb completes, we save the contents of each variable X
1050 to a corresponding bb_X[bb] variable. For example, pending_read_insns is
1051 copied to bb_pending_read_insns[bb]. Another change is that few
1052 variables are now a list of insns rather than a single insn:
1053 last_pending_memory_flash, last_function_call, reg_last_sets. The
1054 manipulation of these variables was changed appropriately. */
1055
1056static rtx **bb_reg_last_uses;
1057static rtx **bb_reg_last_sets;
28c95eff 1058static rtx **bb_reg_last_clobbers;
8c660648
JL
1059
1060static rtx *bb_pending_read_insns;
1061static rtx *bb_pending_read_mems;
1062static rtx *bb_pending_write_insns;
1063static rtx *bb_pending_write_mems;
1064static int *bb_pending_lists_length;
1065
1066static rtx *bb_last_pending_memory_flush;
1067static rtx *bb_last_function_call;
1068static rtx *bb_sched_before_next_call;
1069
1070/* functions for construction of the control flow graph. */
1071
1072/* Return 1 if control flow graph should not be constructed, 0 otherwise.
168cbdf9 1073
8c660648 1074 We decide not to build the control flow graph if there is possibly more
168cbdf9
JL
1075 than one entry to the function, if computed branches exist, of if we
1076 have nonlocal gotos. */
8c660648 1077
168cbdf9 1078static int
8c660648
JL
1079is_cfg_nonregular ()
1080{
1081 int b;
1082 rtx insn;
1083 RTX_CODE code;
1084
168cbdf9
JL
1085 /* If we have a label that could be the target of a nonlocal goto, then
1086 the cfg is not well structured. */
e881bb1b 1087 if (nonlocal_goto_handler_labels)
168cbdf9 1088 return 1;
8c660648 1089
168cbdf9 1090 /* If we have any forced labels, then the cfg is not well structured. */
8c660648 1091 if (forced_labels)
168cbdf9 1092 return 1;
8c660648 1093
4d1d8045
BS
1094 /* If this function has a computed jump, then we consider the cfg
1095 not well structured. */
1096 if (current_function_has_computed_jump)
1097 return 1;
1098
168cbdf9
JL
1099 /* If we have exception handlers, then we consider the cfg not well
1100 structured. ?!? We should be able to handle this now that flow.c
1101 computes an accurate cfg for EH. */
8c660648 1102 if (exception_handler_labels)
168cbdf9 1103 return 1;
8c660648 1104
168cbdf9
JL
1105 /* If we have non-jumping insns which refer to labels, then we consider
1106 the cfg not well structured. */
8c660648
JL
1107 /* check for labels referred to other thn by jumps */
1108 for (b = 0; b < n_basic_blocks; b++)
3b413743 1109 for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
8c660648
JL
1110 {
1111 code = GET_CODE (insn);
1112 if (GET_RTX_CLASS (code) == 'i')
1113 {
1114 rtx note;
1115
1116 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1117 if (REG_NOTE_KIND (note) == REG_LABEL)
168cbdf9 1118 return 1;
8c660648
JL
1119 }
1120
3b413743 1121 if (insn == BLOCK_END (b))
8c660648
JL
1122 break;
1123 }
1124
168cbdf9 1125 /* All the tests passed. Consider the cfg well structured. */
8c660648
JL
1126 return 0;
1127}
1128
5ece9746
JL
1129/* Build the control flow graph and set nr_edges.
1130
1131 Instead of trying to build a cfg ourselves, we rely on flow to
168cbdf9 1132 do it for us. Stamp out useless code (and bug) duplication.
8c660648 1133
168cbdf9
JL
1134 Return nonzero if an irregularity in the cfg is found which would
1135 prevent cross block scheduling. */
1136
1137static int
a2e68776
JL
1138build_control_flow (s_preds, s_succs, num_preds, num_succs)
1139 int_list_ptr *s_preds;
1140 int_list_ptr *s_succs;
1141 int *num_preds;
1142 int *num_succs;
8c660648 1143{
081f5e7e 1144 int i;
5ece9746 1145 int_list_ptr succ;
168cbdf9 1146 int unreachable;
5ece9746 1147
168cbdf9
JL
1148 /* Count the number of edges in the cfg. */
1149 nr_edges = 0;
1150 unreachable = 0;
1151 for (i = 0; i < n_basic_blocks; i++)
1152 {
1153 nr_edges += num_succs[i];
15ebe47d
JL
1154
1155 /* Unreachable loops with more than one basic block are detected
1156 during the DFS traversal in find_rgns.
1157
1158 Unreachable loops with a single block are detected here. This
1159 test is redundant with the one in find_rgns, but it's much
1160 cheaper to go ahead and catch the trivial case here. */
a8afd67b
JW
1161 if (num_preds[i] == 0
1162 || (num_preds[i] == 1 && INT_LIST_VAL (s_preds[i]) == i))
168cbdf9
JL
1163 unreachable = 1;
1164 }
1165
1166 /* Account for entry/exit edges. */
1167 nr_edges += 2;
1168
1169 in_edges = (int *) xmalloc (n_basic_blocks * sizeof (int));
1170 out_edges = (int *) xmalloc (n_basic_blocks * sizeof (int));
1171 bzero ((char *) in_edges, n_basic_blocks * sizeof (int));
1172 bzero ((char *) out_edges, n_basic_blocks * sizeof (int));
1173
e881bb1b
RH
1174 edge_table = (haifa_edge *) xmalloc ((nr_edges) * sizeof (haifa_edge));
1175 bzero ((char *) edge_table, ((nr_edges) * sizeof (haifa_edge)));
168cbdf9 1176
8c660648
JL
1177 nr_edges = 0;
1178 for (i = 0; i < n_basic_blocks; i++)
5ece9746
JL
1179 for (succ = s_succs[i]; succ; succ = succ->next)
1180 {
1181 if (INT_LIST_VAL (succ) != EXIT_BLOCK)
1182 new_edge (i, INT_LIST_VAL (succ));
1183 }
8c660648
JL
1184
1185 /* increment by 1, since edge 0 is unused. */
1186 nr_edges++;
1187
168cbdf9 1188 return unreachable;
8c660648
JL
1189}
1190
1191
5ece9746 1192/* Record an edge in the control flow graph from SOURCE to TARGET.
8c660648 1193
5ece9746
JL
1194 In theory, this is redundant with the s_succs computed above, but
1195 we have not converted all of haifa to use information from the
1196 integer lists. */
8c660648
JL
1197
1198static void
1199new_edge (source, target)
1200 int source, target;
1201{
1202 int e, next_edge;
1203 int curr_edge, fst_edge;
1204
1205 /* check for duplicates */
1206 fst_edge = curr_edge = OUT_EDGES (source);
1207 while (curr_edge)
1208 {
1209 if (FROM_BLOCK (curr_edge) == source
1210 && TO_BLOCK (curr_edge) == target)
1211 {
1212 return;
1213 }
1214
1215 curr_edge = NEXT_OUT (curr_edge);
1216
1217 if (fst_edge == curr_edge)
1218 break;
1219 }
1220
1221 e = ++nr_edges;
1222
1223 FROM_BLOCK (e) = source;
1224 TO_BLOCK (e) = target;
1225
1226 if (OUT_EDGES (source))
1227 {
1228 next_edge = NEXT_OUT (OUT_EDGES (source));
1229 NEXT_OUT (OUT_EDGES (source)) = e;
1230 NEXT_OUT (e) = next_edge;
1231 }
1232 else
1233 {
1234 OUT_EDGES (source) = e;
1235 NEXT_OUT (e) = e;
1236 }
1237
1238 if (IN_EDGES (target))
1239 {
1240 next_edge = NEXT_IN (IN_EDGES (target));
1241 NEXT_IN (IN_EDGES (target)) = e;
1242 NEXT_IN (e) = next_edge;
1243 }
1244 else
1245 {
1246 IN_EDGES (target) = e;
1247 NEXT_IN (e) = e;
1248 }
1249}
1250
1251
1252/* BITSET macros for operations on the control flow graph. */
1253
1254/* Compute bitwise union of two bitsets. */
1255#define BITSET_UNION(set1, set2, len) \
1256do { register bitset tp = set1, sp = set2; \
1257 register int i; \
1258 for (i = 0; i < len; i++) \
1259 *(tp++) |= *(sp++); } while (0)
1260
1261/* Compute bitwise intersection of two bitsets. */
1262#define BITSET_INTER(set1, set2, len) \
1263do { register bitset tp = set1, sp = set2; \
1264 register int i; \
1265 for (i = 0; i < len; i++) \
1266 *(tp++) &= *(sp++); } while (0)
1267
1268/* Compute bitwise difference of two bitsets. */
1269#define BITSET_DIFFER(set1, set2, len) \
1270do { register bitset tp = set1, sp = set2; \
1271 register int i; \
1272 for (i = 0; i < len; i++) \
1273 *(tp++) &= ~*(sp++); } while (0)
1274
1275/* Inverts every bit of bitset 'set' */
1276#define BITSET_INVERT(set, len) \
1277do { register bitset tmpset = set; \
1278 register int i; \
1279 for (i = 0; i < len; i++, tmpset++) \
1280 *tmpset = ~*tmpset; } while (0)
1281
1282/* Turn on the index'th bit in bitset set. */
1283#define BITSET_ADD(set, index, len) \
1284{ \
1285 if (index >= HOST_BITS_PER_WIDE_INT * len) \
1286 abort (); \
1287 else \
1288 set[index/HOST_BITS_PER_WIDE_INT] |= \
1289 1 << (index % HOST_BITS_PER_WIDE_INT); \
1290}
1291
1292/* Turn off the index'th bit in set. */
1293#define BITSET_REMOVE(set, index, len) \
1294{ \
1295 if (index >= HOST_BITS_PER_WIDE_INT * len) \
1296 abort (); \
1297 else \
1298 set[index/HOST_BITS_PER_WIDE_INT] &= \
1299 ~(1 << (index%HOST_BITS_PER_WIDE_INT)); \
1300}
1301
1302
1303/* Check if the index'th bit in bitset set is on. */
1304
1305static char
1306bitset_member (set, index, len)
1307 bitset set;
1308 int index, len;
1309{
1310 if (index >= HOST_BITS_PER_WIDE_INT * len)
1311 abort ();
1312 return (set[index / HOST_BITS_PER_WIDE_INT] &
1313 1 << (index % HOST_BITS_PER_WIDE_INT)) ? 1 : 0;
1314}
1315
1316
1317/* Translate a bit-set SET to a list BL of the bit-set members. */
1318
1319static void
1320extract_bitlst (set, len, bl)
1321 bitset set;
1322 int len;
1323 bitlst *bl;
1324{
1325 int i, j, offset;
1326 unsigned HOST_WIDE_INT word;
1327
1328 /* bblst table space is reused in each call to extract_bitlst */
1329 bitlst_table_last = 0;
1330
1331 bl->first_member = &bitlst_table[bitlst_table_last];
1332 bl->nr_members = 0;
1333
1334 for (i = 0; i < len; i++)
1335 {
1336 word = set[i];
1337 offset = i * HOST_BITS_PER_WIDE_INT;
1338 for (j = 0; word; j++)
1339 {
1340 if (word & 1)
1341 {
1342 bitlst_table[bitlst_table_last++] = offset;
1343 (bl->nr_members)++;
1344 }
1345 word >>= 1;
1346 ++offset;
1347 }
1348 }
1349
1350}
1351
1352
1353/* functions for the construction of regions */
1354
1355/* Print the regions, for debugging purposes. Callable from debugger. */
1356
1357void
1358debug_regions ()
1359{
1360 int rgn, bb;
1361
1362 fprintf (dump, "\n;; ------------ REGIONS ----------\n\n");
1363 for (rgn = 0; rgn < nr_regions; rgn++)
1364 {
1365 fprintf (dump, ";;\trgn %d nr_blocks %d:\n", rgn,
1366 rgn_table[rgn].rgn_nr_blocks);
1367 fprintf (dump, ";;\tbb/block: ");
1368
1369 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
1370 {
1371 current_blocks = RGN_BLOCKS (rgn);
1372
1373 if (bb != BLOCK_TO_BB (BB_TO_BLOCK (bb)))
1374 abort ();
1375
1376 fprintf (dump, " %d/%d ", bb, BB_TO_BLOCK (bb));
1377 }
1378
1379 fprintf (dump, "\n\n");
1380 }
1381}
1382
1383
1384/* Build a single block region for each basic block in the function.
1385 This allows for using the same code for interblock and basic block
1386 scheduling. */
1387
1388static void
1389find_single_block_region ()
1390{
1391 int i;
1392
1393 for (i = 0; i < n_basic_blocks; i++)
1394 {
1395 rgn_bb_table[i] = i;
1396 RGN_NR_BLOCKS (i) = 1;
1397 RGN_BLOCKS (i) = i;
1398 CONTAINING_RGN (i) = i;
1399 BLOCK_TO_BB (i) = 0;
1400 }
1401 nr_regions = n_basic_blocks;
1402}
1403
1404
1405/* Update number of blocks and the estimate for number of insns
1406 in the region. Return 1 if the region is "too large" for interblock
1407 scheduling (compile time considerations), otherwise return 0. */
1408
1409static int
1410too_large (block, num_bbs, num_insns)
1411 int block, *num_bbs, *num_insns;
1412{
1413 (*num_bbs)++;
3b413743
RH
1414 (*num_insns) += (INSN_LUID (BLOCK_END (block)) -
1415 INSN_LUID (BLOCK_HEAD (block)));
cc132865 1416 if ((*num_bbs > MAX_RGN_BLOCKS) || (*num_insns > MAX_RGN_INSNS))
8c660648
JL
1417 return 1;
1418 else
1419 return 0;
1420}
1421
1422
1423/* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
1424 is still an inner loop. Put in max_hdr[blk] the header of the most inner
1425 loop containing blk. */
1426#define UPDATE_LOOP_RELATIONS(blk, hdr) \
1427{ \
1428 if (max_hdr[blk] == -1) \
1429 max_hdr[blk] = hdr; \
1430 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
a2e68776 1431 RESET_BIT (inner, hdr); \
8c660648
JL
1432 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
1433 { \
a2e68776 1434 RESET_BIT (inner,max_hdr[blk]); \
8c660648
JL
1435 max_hdr[blk] = hdr; \
1436 } \
1437}
1438
1439
a2e68776
JL
1440/* Find regions for interblock scheduling.
1441
1442 A region for scheduling can be:
1443
1444 * A loop-free procedure, or
1445
1446 * A reducible inner loop, or
1447
1448 * A basic block not contained in any other region.
1449
1450
1451 ?!? In theory we could build other regions based on extended basic
1452 blocks or reverse extended basic blocks. Is it worth the trouble?
1453
1454 Loop blocks that form a region are put into the region's block list
1455 in topological order.
1456
1457 This procedure stores its results into the following global (ick) variables
1458
1459 * rgn_nr
1460 * rgn_table
1461 * rgn_bb_table
1462 * block_to_bb
1463 * containing region
1464
1465
1466 We use dominator relationships to avoid making regions out of non-reducible
1467 loops.
8c660648 1468
a2e68776
JL
1469 This procedure needs to be converted to work on pred/succ lists instead
1470 of edge tables. That would simplify it somewhat. */
8c660648
JL
1471
1472static void
a2e68776
JL
1473find_rgns (s_preds, s_succs, num_preds, num_succs, dom)
1474 int_list_ptr *s_preds;
1475 int_list_ptr *s_succs;
1476 int *num_preds;
1477 int *num_succs;
1478 sbitmap *dom;
8c660648
JL
1479{
1480 int *max_hdr, *dfs_nr, *stack, *queue, *degree;
a2e68776 1481 char no_loops = 1;
487a6e06 1482 int node, child, loop_head, i, head, tail;
8c660648 1483 int count = 0, sp, idx = 0, current_edge = out_edges[0];
15ebe47d 1484 int num_bbs, num_insns, unreachable;
8c660648 1485 int too_large_failure;
8c660648 1486
a2e68776
JL
1487 /* Note if an edge has been passed. */
1488 sbitmap passed;
1489
1490 /* Note if a block is a natural loop header. */
1491 sbitmap header;
1492
1493 /* Note if a block is an natural inner loop header. */
1494 sbitmap inner;
1495
1496 /* Note if a block is in the block queue. */
1497 sbitmap in_queue;
1498
cc132865
JL
1499 /* Note if a block is in the block queue. */
1500 sbitmap in_stack;
1501
a2e68776
JL
1502 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
1503 and a mapping from block to its loop header (if the block is contained
1504 in a loop, else -1).
1505
1506 Store results in HEADER, INNER, and MAX_HDR respectively, these will
1507 be used as inputs to the second traversal.
1508
1509 STACK, SP and DFS_NR are only used during the first traversal. */
1510
1511 /* Allocate and initialize variables for the first traversal. */
8c660648
JL
1512 max_hdr = (int *) alloca (n_basic_blocks * sizeof (int));
1513 dfs_nr = (int *) alloca (n_basic_blocks * sizeof (int));
52b7724b 1514 bzero ((char *) dfs_nr, n_basic_blocks * sizeof (int));
8c660648 1515 stack = (int *) alloca (nr_edges * sizeof (int));
8c660648 1516
a2e68776
JL
1517 inner = sbitmap_alloc (n_basic_blocks);
1518 sbitmap_ones (inner);
1519
1520 header = sbitmap_alloc (n_basic_blocks);
1521 sbitmap_zero (header);
8c660648 1522
a2e68776
JL
1523 passed = sbitmap_alloc (nr_edges);
1524 sbitmap_zero (passed);
1525
1526 in_queue = sbitmap_alloc (n_basic_blocks);
1527 sbitmap_zero (in_queue);
8c660648 1528
cc132865
JL
1529 in_stack = sbitmap_alloc (n_basic_blocks);
1530 sbitmap_zero (in_stack);
1531
8c660648 1532 for (i = 0; i < n_basic_blocks; i++)
a2e68776 1533 max_hdr[i] = -1;
8c660648 1534
a2e68776 1535 /* DFS traversal to find inner loops in the cfg. */
8c660648 1536
8c660648
JL
1537 sp = -1;
1538 while (1)
1539 {
a2e68776 1540 if (current_edge == 0 || TEST_BIT (passed, current_edge))
8c660648 1541 {
a2e68776 1542 /* We have reached a leaf node or a node that was already
cc132865 1543 processed. Pop edges off the stack until we find
a2e68776
JL
1544 an edge that has not yet been processed. */
1545 while (sp >= 0
1546 && (current_edge == 0 || TEST_BIT (passed, current_edge)))
8c660648 1547 {
a2e68776 1548 /* Pop entry off the stack. */
8c660648
JL
1549 current_edge = stack[sp--];
1550 node = FROM_BLOCK (current_edge);
1551 child = TO_BLOCK (current_edge);
cc132865
JL
1552 RESET_BIT (in_stack, child);
1553 if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
8c660648
JL
1554 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
1555 current_edge = NEXT_OUT (current_edge);
1556 }
1557
a2e68776
JL
1558 /* See if have finished the DFS tree traversal. */
1559 if (sp < 0 && TEST_BIT (passed, current_edge))
8c660648 1560 break;
a2e68776
JL
1561
1562 /* Nope, continue the traversal with the popped node. */
8c660648
JL
1563 continue;
1564 }
1565
a2e68776 1566 /* Process a node. */
8c660648 1567 node = FROM_BLOCK (current_edge);
8c660648 1568 child = TO_BLOCK (current_edge);
cc132865 1569 SET_BIT (in_stack, node);
a2e68776 1570 dfs_nr[node] = ++count;
8c660648 1571
cc132865
JL
1572 /* If the successor is in the stack, then we've found a loop.
1573 Mark the loop, if it is not a natural loop, then it will
1574 be rejected during the second traversal. */
1575 if (TEST_BIT (in_stack, child))
8c660648
JL
1576 {
1577 no_loops = 0;
a2e68776 1578 SET_BIT (header, child);
8c660648 1579 UPDATE_LOOP_RELATIONS (node, child);
a2e68776 1580 SET_BIT (passed, current_edge);
8c660648
JL
1581 current_edge = NEXT_OUT (current_edge);
1582 continue;
1583 }
1584
a2e68776
JL
1585 /* If the child was already visited, then there is no need to visit
1586 it again. Just update the loop relationships and restart
1587 with a new edge. */
8c660648
JL
1588 if (dfs_nr[child])
1589 {
cc132865 1590 if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
8c660648 1591 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
a2e68776 1592 SET_BIT (passed, current_edge);
8c660648
JL
1593 current_edge = NEXT_OUT (current_edge);
1594 continue;
1595 }
1596
a2e68776 1597 /* Push an entry on the stack and continue DFS traversal. */
8c660648 1598 stack[++sp] = current_edge;
a2e68776 1599 SET_BIT (passed, current_edge);
8c660648 1600 current_edge = OUT_EDGES (child);
a2e68776 1601 }
8c660648 1602
15ebe47d
JL
1603 /* Another check for unreachable blocks. The earlier test in
1604 is_cfg_nonregular only finds unreachable blocks that do not
1605 form a loop.
a2e68776 1606
15ebe47d
JL
1607 The DFS traversal will mark every block that is reachable from
1608 the entry node by placing a nonzero value in dfs_nr. Thus if
1609 dfs_nr is zero for any block, then it must be unreachable. */
1610 unreachable = 0;
1611 for (i = 0; i < n_basic_blocks; i++)
1612 if (dfs_nr[i] == 0)
1613 {
1614 unreachable = 1;
1615 break;
1616 }
a2e68776
JL
1617
1618 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
1619 to hold degree counts. */
1620 degree = dfs_nr;
8c660648 1621
a2e68776 1622 /* Compute the in-degree of every block in the graph */
8c660648 1623 for (i = 0; i < n_basic_blocks; i++)
a2e68776
JL
1624 degree[i] = num_preds[i];
1625
15ebe47d
JL
1626 /* Do not perform region scheduling if there are any unreachable
1627 blocks. */
1628 if (!unreachable)
8c660648 1629 {
15ebe47d
JL
1630 if (no_loops)
1631 SET_BIT (header, 0);
8c660648 1632
15ebe47d
JL
1633 /* Second travsersal:find reducible inner loops and topologically sort
1634 block of each region. */
8c660648 1635
15ebe47d 1636 queue = (int *) alloca (n_basic_blocks * sizeof (int));
8c660648 1637
cc132865
JL
1638 /* Find blocks which are inner loop headers. We still have non-reducible
1639 loops to consider at this point. */
15ebe47d
JL
1640 for (i = 0; i < n_basic_blocks; i++)
1641 {
1642 if (TEST_BIT (header, i) && TEST_BIT (inner, i))
1643 {
1644 int_list_ptr ps;
cc132865
JL
1645 int j;
1646
1647 /* Now check that the loop is reducible. We do this separate
1648 from finding inner loops so that we do not find a reducible
1649 loop which contains an inner non-reducible loop.
1650
1651 A simple way to find reducible/natrual loops is to verify
1652 that each block in the loop is dominated by the loop
1653 header.
1654
1655 If there exists a block that is not dominated by the loop
1656 header, then the block is reachable from outside the loop
1657 and thus the loop is not a natural loop. */
1658 for (j = 0; j < n_basic_blocks; j++)
1659 {
1660 /* First identify blocks in the loop, except for the loop
1661 entry block. */
1662 if (i == max_hdr[j] && i != j)
1663 {
1664 /* Now verify that the block is dominated by the loop
1665 header. */
1666 if (!TEST_BIT (dom[j], i))
1667 break;
1668 }
1669 }
1670
1671 /* If we exited the loop early, then I is the header of a non
1672 reducible loop and we should quit processing it now. */
1673 if (j != n_basic_blocks)
1674 continue;
8c660648 1675
cc132865
JL
1676 /* I is a header of an inner loop, or block 0 in a subroutine
1677 with no loops at all. */
15ebe47d
JL
1678 head = tail = -1;
1679 too_large_failure = 0;
1680 loop_head = max_hdr[i];
8c660648 1681
15ebe47d 1682 /* Decrease degree of all I's successors for topological
a59bfd78 1683 ordering. */
15ebe47d
JL
1684 for (ps = s_succs[i]; ps; ps = ps->next)
1685 if (INT_LIST_VAL (ps) != EXIT_BLOCK
1686 && INT_LIST_VAL (ps) != ENTRY_BLOCK)
cc132865 1687 --degree[INT_LIST_VAL(ps)];
a2e68776 1688
15ebe47d
JL
1689 /* Estimate # insns, and count # blocks in the region. */
1690 num_bbs = 1;
3b413743
RH
1691 num_insns = (INSN_LUID (BLOCK_END (i))
1692 - INSN_LUID (BLOCK_HEAD (i)));
8c660648 1693
15ebe47d
JL
1694
1695 /* Find all loop latches (blocks which back edges to the loop
1696 header) or all the leaf blocks in the cfg has no loops.
1697
1698 Place those blocks into the queue. */
1699 if (no_loops)
1700 {
1701 for (j = 0; j < n_basic_blocks; j++)
1702 /* Leaf nodes have only a single successor which must
1703 be EXIT_BLOCK. */
1704 if (num_succs[j] == 1
1705 && INT_LIST_VAL (s_succs[j]) == EXIT_BLOCK)
8c660648 1706 {
15ebe47d
JL
1707 queue[++tail] = j;
1708 SET_BIT (in_queue, j);
1709
1710 if (too_large (j, &num_bbs, &num_insns))
1711 {
1712 too_large_failure = 1;
1713 break;
1714 }
8c660648 1715 }
15ebe47d
JL
1716 }
1717 else
8c660648 1718 {
15ebe47d 1719 int_list_ptr ps;
a2e68776 1720
15ebe47d 1721 for (ps = s_preds[i]; ps; ps = ps->next)
8c660648 1722 {
15ebe47d 1723 node = INT_LIST_VAL (ps);
8c660648 1724
15ebe47d
JL
1725 if (node == ENTRY_BLOCK || node == EXIT_BLOCK)
1726 continue;
1727
1728 if (max_hdr[node] == loop_head && node != i)
8c660648 1729 {
15ebe47d
JL
1730 /* This is a loop latch. */
1731 queue[++tail] = node;
1732 SET_BIT (in_queue, node);
1733
1734 if (too_large (node, &num_bbs, &num_insns))
1735 {
1736 too_large_failure = 1;
1737 break;
1738 }
8c660648 1739 }
15ebe47d 1740
8c660648 1741 }
8c660648 1742 }
8c660648 1743
15ebe47d 1744 /* Now add all the blocks in the loop to the queue.
a2e68776
JL
1745
1746 We know the loop is a natural loop; however the algorithm
1747 above will not always mark certain blocks as being in the
1748 loop. Consider:
1749 node children
1750 a b,c
1751 b c
1752 c a,d
1753 d b
1754
1755
1756 The algorithm in the DFS traversal may not mark B & D as part
1757 of the loop (ie they will not have max_hdr set to A).
1758
1759 We know they can not be loop latches (else they would have
1760 had max_hdr set since they'd have a backedge to a dominator
1761 block). So we don't need them on the initial queue.
1762
1763 We know they are part of the loop because they are dominated
1764 by the loop header and can be reached by a backwards walk of
1765 the edges starting with nodes on the initial queue.
1766
1767 It is safe and desirable to include those nodes in the
1768 loop/scheduling region. To do so we would need to decrease
1769 the degree of a node if it is the target of a backedge
1770 within the loop itself as the node is placed in the queue.
1771
1772 We do not do this because I'm not sure that the actual
1773 scheduling code will properly handle this case. ?!? */
1774
15ebe47d 1775 while (head < tail && !too_large_failure)
8c660648 1776 {
15ebe47d
JL
1777 int_list_ptr ps;
1778 child = queue[++head];
8c660648 1779
15ebe47d 1780 for (ps = s_preds[child]; ps; ps = ps->next)
8c660648 1781 {
15ebe47d 1782 node = INT_LIST_VAL (ps);
8c660648 1783
15ebe47d
JL
1784 /* See discussion above about nodes not marked as in
1785 this loop during the initial DFS traversal. */
1786 if (node == ENTRY_BLOCK || node == EXIT_BLOCK
1787 || max_hdr[node] != loop_head)
8c660648 1788 {
15ebe47d 1789 tail = -1;
8c660648
JL
1790 break;
1791 }
15ebe47d
JL
1792 else if (!TEST_BIT (in_queue, node) && node != i)
1793 {
1794 queue[++tail] = node;
1795 SET_BIT (in_queue, node);
1796
1797 if (too_large (node, &num_bbs, &num_insns))
1798 {
1799 too_large_failure = 1;
1800 break;
1801 }
1802 }
8c660648 1803 }
8c660648 1804 }
8c660648 1805
15ebe47d
JL
1806 if (tail >= 0 && !too_large_failure)
1807 {
1808 /* Place the loop header into list of region blocks. */
1809 degree[i] = -1;
1810 rgn_bb_table[idx] = i;
1811 RGN_NR_BLOCKS (nr_regions) = num_bbs;
1812 RGN_BLOCKS (nr_regions) = idx++;
1813 CONTAINING_RGN (i) = nr_regions;
1814 BLOCK_TO_BB (i) = count = 0;
1815
1816 /* Remove blocks from queue[] when their in degree becomes
a2e68776
JL
1817 zero. Repeat until no blocks are left on the list. This
1818 produces a topological list of blocks in the region. */
15ebe47d 1819 while (tail >= 0)
8c660648 1820 {
15ebe47d
JL
1821 int_list_ptr ps;
1822
1823 if (head < 0)
1824 head = tail;
1825 child = queue[head];
1826 if (degree[child] == 0)
1827 {
1828 degree[child] = -1;
1829 rgn_bb_table[idx++] = child;
1830 BLOCK_TO_BB (child) = ++count;
1831 CONTAINING_RGN (child) = nr_regions;
1832 queue[head] = queue[tail--];
1833
1834 for (ps = s_succs[child]; ps; ps = ps->next)
1835 if (INT_LIST_VAL (ps) != ENTRY_BLOCK
1836 && INT_LIST_VAL (ps) != EXIT_BLOCK)
1837 --degree[INT_LIST_VAL (ps)];
1838 }
1839 else
1840 --head;
8c660648 1841 }
15ebe47d 1842 ++nr_regions;
8c660648 1843 }
8c660648
JL
1844 }
1845 }
1846 }
1847
a2e68776
JL
1848 /* Any block that did not end up in a region is placed into a region
1849 by itself. */
8c660648
JL
1850 for (i = 0; i < n_basic_blocks; i++)
1851 if (degree[i] >= 0)
1852 {
1853 rgn_bb_table[idx] = i;
1854 RGN_NR_BLOCKS (nr_regions) = 1;
1855 RGN_BLOCKS (nr_regions) = idx++;
1856 CONTAINING_RGN (i) = nr_regions++;
1857 BLOCK_TO_BB (i) = 0;
1858 }
1859
a2e68776
JL
1860 free (passed);
1861 free (header);
1862 free (inner);
1863 free (in_queue);
cc132865 1864 free (in_stack);
a2e68776 1865}
8c660648
JL
1866
1867
1868/* functions for regions scheduling information */
1869
1870/* Compute dominators, probability, and potential-split-edges of bb.
1871 Assume that these values were already computed for bb's predecessors. */
1872
1873static void
1874compute_dom_prob_ps (bb)
1875 int bb;
1876{
1877 int nxt_in_edge, fst_in_edge, pred;
1878 int fst_out_edge, nxt_out_edge, nr_out_edges, nr_rgn_out_edges;
1879
1880 prob[bb] = 0.0;
1881 if (IS_RGN_ENTRY (bb))
1882 {
1883 BITSET_ADD (dom[bb], 0, bbset_size);
1884 prob[bb] = 1.0;
1885 return;
1886 }
1887
1888 fst_in_edge = nxt_in_edge = IN_EDGES (BB_TO_BLOCK (bb));
1889
1890 /* intialize dom[bb] to '111..1' */
1891 BITSET_INVERT (dom[bb], bbset_size);
1892
1893 do
1894 {
1895 pred = FROM_BLOCK (nxt_in_edge);
1896 BITSET_INTER (dom[bb], dom[BLOCK_TO_BB (pred)], bbset_size);
1897
1898 BITSET_UNION (ancestor_edges[bb], ancestor_edges[BLOCK_TO_BB (pred)],
1899 edgeset_size);
1900
1901 BITSET_ADD (ancestor_edges[bb], EDGE_TO_BIT (nxt_in_edge), edgeset_size);
1902
1903 nr_out_edges = 1;
1904 nr_rgn_out_edges = 0;
1905 fst_out_edge = OUT_EDGES (pred);
1906 nxt_out_edge = NEXT_OUT (fst_out_edge);
1907 BITSET_UNION (pot_split[bb], pot_split[BLOCK_TO_BB (pred)],
1908 edgeset_size);
1909
1910 BITSET_ADD (pot_split[bb], EDGE_TO_BIT (fst_out_edge), edgeset_size);
1911
1912 /* the successor doesn't belong the region? */
1913 if (CONTAINING_RGN (TO_BLOCK (fst_out_edge)) !=
1914 CONTAINING_RGN (BB_TO_BLOCK (bb)))
1915 ++nr_rgn_out_edges;
1916
1917 while (fst_out_edge != nxt_out_edge)
1918 {
1919 ++nr_out_edges;
1920 /* the successor doesn't belong the region? */
1921 if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge)) !=
1922 CONTAINING_RGN (BB_TO_BLOCK (bb)))
1923 ++nr_rgn_out_edges;
1924 BITSET_ADD (pot_split[bb], EDGE_TO_BIT (nxt_out_edge), edgeset_size);
1925 nxt_out_edge = NEXT_OUT (nxt_out_edge);
1926
1927 }
1928
1929 /* now nr_rgn_out_edges is the number of region-exit edges from pred,
1930 and nr_out_edges will be the number of pred out edges not leaving
1931 the region. */
1932 nr_out_edges -= nr_rgn_out_edges;
1933 if (nr_rgn_out_edges > 0)
1934 prob[bb] += 0.9 * prob[BLOCK_TO_BB (pred)] / nr_out_edges;
1935 else
1936 prob[bb] += prob[BLOCK_TO_BB (pred)] / nr_out_edges;
1937 nxt_in_edge = NEXT_IN (nxt_in_edge);
1938 }
1939 while (fst_in_edge != nxt_in_edge);
1940
1941 BITSET_ADD (dom[bb], bb, bbset_size);
1942 BITSET_DIFFER (pot_split[bb], ancestor_edges[bb], edgeset_size);
1943
1944 if (sched_verbose >= 2)
1945 fprintf (dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb), (int) (100.0 * prob[bb]));
1946} /* compute_dom_prob_ps */
1947
1948/* functions for target info */
1949
1950/* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1951 Note that bb_trg dominates bb_src. */
1952
1953static void
1954split_edges (bb_src, bb_trg, bl)
1955 int bb_src;
1956 int bb_trg;
1957 edgelst *bl;
1958{
1959 int es = edgeset_size;
1960 edgeset src = (edgeset) alloca (es * sizeof (HOST_WIDE_INT));
1961
1962 while (es--)
1963 src[es] = (pot_split[bb_src])[es];
1964 BITSET_DIFFER (src, pot_split[bb_trg], edgeset_size);
1965 extract_bitlst (src, edgeset_size, bl);
1966}
1967
1968
1969/* Find the valid candidate-source-blocks for the target block TRG, compute
1970 their probability, and check if they are speculative or not.
1971 For speculative sources, compute their update-blocks and split-blocks. */
1972
1973static void
1974compute_trg_info (trg)
1975 int trg;
1976{
1977 register candidate *sp;
1978 edgelst el;
1979 int check_block, update_idx;
1980 int i, j, k, fst_edge, nxt_edge;
1981
1982 /* define some of the fields for the target bb as well */
1983 sp = candidate_table + trg;
1984 sp->is_valid = 1;
1985 sp->is_speculative = 0;
1986 sp->src_prob = 100;
1987
1988 for (i = trg + 1; i < current_nr_blocks; i++)
1989 {
1990 sp = candidate_table + i;
1991
1992 sp->is_valid = IS_DOMINATED (i, trg);
1993 if (sp->is_valid)
1994 {
1995 sp->src_prob = GET_SRC_PROB (i, trg);
1996 sp->is_valid = (sp->src_prob >= MIN_PROBABILITY);
1997 }
1998
1999 if (sp->is_valid)
2000 {
2001 split_edges (i, trg, &el);
2002 sp->is_speculative = (el.nr_members) ? 1 : 0;
2003 if (sp->is_speculative && !flag_schedule_speculative)
2004 sp->is_valid = 0;
2005 }
2006
2007 if (sp->is_valid)
2008 {
2009 sp->split_bbs.first_member = &bblst_table[bblst_last];
2010 sp->split_bbs.nr_members = el.nr_members;
2011 for (j = 0; j < el.nr_members; bblst_last++, j++)
2012 bblst_table[bblst_last] =
2013 TO_BLOCK (rgn_edges[el.first_member[j]]);
2014 sp->update_bbs.first_member = &bblst_table[bblst_last];
2015 update_idx = 0;
2016 for (j = 0; j < el.nr_members; j++)
2017 {
2018 check_block = FROM_BLOCK (rgn_edges[el.first_member[j]]);
2019 fst_edge = nxt_edge = OUT_EDGES (check_block);
2020 do
2021 {
2022 for (k = 0; k < el.nr_members; k++)
2023 if (EDGE_TO_BIT (nxt_edge) == el.first_member[k])
2024 break;
2025
2026 if (k >= el.nr_members)
2027 {
2028 bblst_table[bblst_last++] = TO_BLOCK (nxt_edge);
2029 update_idx++;
2030 }
2031
2032 nxt_edge = NEXT_OUT (nxt_edge);
2033 }
2034 while (fst_edge != nxt_edge);
2035 }
2036 sp->update_bbs.nr_members = update_idx;
2037
2038 }
2039 else
2040 {
2041 sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
2042
2043 sp->is_speculative = 0;
2044 sp->src_prob = 0;
2045 }
2046 }
2047} /* compute_trg_info */
2048
2049
2050/* Print candidates info, for debugging purposes. Callable from debugger. */
2051
2052void
2053debug_candidate (i)
2054 int i;
2055{
2056 if (!candidate_table[i].is_valid)
2057 return;
2058
2059 if (candidate_table[i].is_speculative)
2060 {
2061 int j;
2062 fprintf (dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
2063
2064 fprintf (dump, "split path: ");
2065 for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
2066 {
2067 int b = candidate_table[i].split_bbs.first_member[j];
2068
2069 fprintf (dump, " %d ", b);
2070 }
2071 fprintf (dump, "\n");
2072
2073 fprintf (dump, "update path: ");
2074 for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
2075 {
2076 int b = candidate_table[i].update_bbs.first_member[j];
2077
2078 fprintf (dump, " %d ", b);
2079 }
2080 fprintf (dump, "\n");
2081 }
2082 else
2083 {
2084 fprintf (dump, " src %d equivalent\n", BB_TO_BLOCK (i));
2085 }
2086}
2087
2088
2089/* Print candidates info, for debugging purposes. Callable from debugger. */
2090
2091void
2092debug_candidates (trg)
2093 int trg;
2094{
2095 int i;
2096
2097 fprintf (dump, "----------- candidate table: target: b=%d bb=%d ---\n",
2098 BB_TO_BLOCK (trg), trg);
2099 for (i = trg + 1; i < current_nr_blocks; i++)
2100 debug_candidate (i);
2101}
2102
2103
2104/* functions for speculative scheduing */
2105
2106/* Return 0 if x is a set of a register alive in the beginning of one
2107 of the split-blocks of src, otherwise return 1. */
2108
2109static int
2110check_live_1 (src, x)
2111 int src;
2112 rtx x;
2113{
5835e573 2114 register int i;
8c660648
JL
2115 register int regno;
2116 register rtx reg = SET_DEST (x);
2117
2118 if (reg == 0)
2119 return 1;
2120
2121 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
2122 || GET_CODE (reg) == SIGN_EXTRACT
2123 || GET_CODE (reg) == STRICT_LOW_PART)
2124 reg = XEXP (reg, 0);
2125
c0222c21
DM
2126 if (GET_CODE (reg) == PARALLEL
2127 && GET_MODE (reg) == BLKmode)
2128 {
2129 register int i;
2130 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
2131 if (check_live_1 (src, XVECEXP (reg, 0, i)))
2132 return 1;
2133 return 0;
2134 }
2135
8c660648
JL
2136 if (GET_CODE (reg) != REG)
2137 return 1;
2138
2139 regno = REGNO (reg);
2140
2141 if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
2142 {
2143 /* Global registers are assumed live */
2144 return 0;
2145 }
2146 else
2147 {
2148 if (regno < FIRST_PSEUDO_REGISTER)
2149 {
2150 /* check for hard registers */
2151 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2152 while (--j >= 0)
2153 {
2154 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
2155 {
2156 int b = candidate_table[src].split_bbs.first_member[i];
2157
e881bb1b
RH
2158 if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start,
2159 regno + j))
8c660648
JL
2160 {
2161 return 0;
2162 }
2163 }
2164 }
2165 }
2166 else
2167 {
2168 /* check for psuedo registers */
2169 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
2170 {
2171 int b = candidate_table[src].split_bbs.first_member[i];
2172
e881bb1b 2173 if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start, regno))
8c660648
JL
2174 {
2175 return 0;
2176 }
2177 }
2178 }
2179 }
2180
2181 return 1;
2182}
2183
2184
2185/* If x is a set of a register R, mark that R is alive in the beginning
2186 of every update-block of src. */
2187
2188static void
2189update_live_1 (src, x)
2190 int src;
2191 rtx x;
2192{
5835e573 2193 register int i;
8c660648
JL
2194 register int regno;
2195 register rtx reg = SET_DEST (x);
2196
2197 if (reg == 0)
2198 return;
2199
2200 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
2201 || GET_CODE (reg) == SIGN_EXTRACT
2202 || GET_CODE (reg) == STRICT_LOW_PART)
2203 reg = XEXP (reg, 0);
2204
c0222c21
DM
2205 if (GET_CODE (reg) == PARALLEL
2206 && GET_MODE (reg) == BLKmode)
2207 {
2208 register int i;
2209 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
2210 update_live_1 (src, XVECEXP (reg, 0, i));
2211 return;
2212 }
2213
8c660648
JL
2214 if (GET_CODE (reg) != REG)
2215 return;
2216
2217 /* Global registers are always live, so the code below does not apply
2218 to them. */
2219
2220 regno = REGNO (reg);
2221
2222 if (regno >= FIRST_PSEUDO_REGISTER || !global_regs[regno])
2223 {
2224 if (regno < FIRST_PSEUDO_REGISTER)
2225 {
2226 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2227 while (--j >= 0)
2228 {
2229 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
2230 {
2231 int b = candidate_table[src].update_bbs.first_member[i];
2232
e881bb1b
RH
2233 SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start,
2234 regno + j);
8c660648
JL
2235 }
2236 }
2237 }
2238 else
2239 {
2240 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
2241 {
2242 int b = candidate_table[src].update_bbs.first_member[i];
2243
e881bb1b 2244 SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start, regno);
8c660648
JL
2245 }
2246 }
2247 }
2248}
2249
2250
2251/* Return 1 if insn can be speculatively moved from block src to trg,
2252 otherwise return 0. Called before first insertion of insn to
2253 ready-list or before the scheduling. */
2254
2255static int
5835e573 2256check_live (insn, src)
8c660648
JL
2257 rtx insn;
2258 int src;
8c660648
JL
2259{
2260 /* find the registers set by instruction */
2261 if (GET_CODE (PATTERN (insn)) == SET
2262 || GET_CODE (PATTERN (insn)) == CLOBBER)
2263 return check_live_1 (src, PATTERN (insn));
2264 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2265 {
2266 int j;
2267 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2268 if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2269 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2270 && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
2271 return 0;
2272
2273 return 1;
2274 }
2275
2276 return 1;
2277}
2278
2279
2280/* Update the live registers info after insn was moved speculatively from
2281 block src to trg. */
2282
2283static void
5835e573 2284update_live (insn, src)
8c660648 2285 rtx insn;
5835e573 2286 int src;
8c660648
JL
2287{
2288 /* find the registers set by instruction */
2289 if (GET_CODE (PATTERN (insn)) == SET
2290 || GET_CODE (PATTERN (insn)) == CLOBBER)
2291 update_live_1 (src, PATTERN (insn));
2292 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2293 {
2294 int j;
2295 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2296 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2297 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2298 update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
2299 }
2300}
2301
2302/* Exception Free Loads:
2303
2304 We define five classes of speculative loads: IFREE, IRISKY,
2305 PFREE, PRISKY, and MFREE.
2306
2307 IFREE loads are loads that are proved to be exception-free, just
2308 by examining the load insn. Examples for such loads are loads
2309 from TOC and loads of global data.
2310
2311 IRISKY loads are loads that are proved to be exception-risky,
2312 just by examining the load insn. Examples for such loads are
2313 volatile loads and loads from shared memory.
2314
2315 PFREE loads are loads for which we can prove, by examining other
2316 insns, that they are exception-free. Currently, this class consists
2317 of loads for which we are able to find a "similar load", either in
2318 the target block, or, if only one split-block exists, in that split
2319 block. Load2 is similar to load1 if both have same single base
2320 register. We identify only part of the similar loads, by finding
2321 an insn upon which both load1 and load2 have a DEF-USE dependence.
2322
2323 PRISKY loads are loads for which we can prove, by examining other
2324 insns, that they are exception-risky. Currently we have two proofs for
2325 such loads. The first proof detects loads that are probably guarded by a
2326 test on the memory address. This proof is based on the
2327 backward and forward data dependence information for the region.
2328 Let load-insn be the examined load.
2329 Load-insn is PRISKY iff ALL the following hold:
2330
2331 - insn1 is not in the same block as load-insn
2332 - there is a DEF-USE dependence chain (insn1, ..., load-insn)
2333 - test-insn is either a compare or a branch, not in the same block as load-insn
2334 - load-insn is reachable from test-insn
2335 - there is a DEF-USE dependence chain (insn1, ..., test-insn)
2336
2337 This proof might fail when the compare and the load are fed
2338 by an insn not in the region. To solve this, we will add to this
2339 group all loads that have no input DEF-USE dependence.
2340
2341 The second proof detects loads that are directly or indirectly
2342 fed by a speculative load. This proof is affected by the
2343 scheduling process. We will use the flag fed_by_spec_load.
2344 Initially, all insns have this flag reset. After a speculative
2345 motion of an insn, if insn is either a load, or marked as
2346 fed_by_spec_load, we will also mark as fed_by_spec_load every
2347 insn1 for which a DEF-USE dependence (insn, insn1) exists. A
2348 load which is fed_by_spec_load is also PRISKY.
2349
2350 MFREE (maybe-free) loads are all the remaining loads. They may be
2351 exception-free, but we cannot prove it.
2352
2353 Now, all loads in IFREE and PFREE classes are considered
2354 exception-free, while all loads in IRISKY and PRISKY classes are
2355 considered exception-risky. As for loads in the MFREE class,
2356 these are considered either exception-free or exception-risky,
2357 depending on whether we are pessimistic or optimistic. We have
2358 to take the pessimistic approach to assure the safety of
2359 speculative scheduling, but we can take the optimistic approach
2360 by invoking the -fsched_spec_load_dangerous option. */
2361
2362enum INSN_TRAP_CLASS
2363{
2364 TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
2365 PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
2366};
2367
2368#define WORST_CLASS(class1, class2) \
2369((class1 > class2) ? class1 : class2)
2370
2371/* Indexed by INSN_UID, and set if there's DEF-USE dependence between */
2372/* some speculatively moved load insn and this one. */
2373char *fed_by_spec_load;
2374char *is_load_insn;
2375
2376/* Non-zero if block bb_to is equal to, or reachable from block bb_from. */
2377#define IS_REACHABLE(bb_from, bb_to) \
2378(bb_from == bb_to \
2379 || IS_RGN_ENTRY (bb_from) \
2380 || (bitset_member (ancestor_edges[bb_to], \
2381 EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))), \
2382 edgeset_size)))
2383#define FED_BY_SPEC_LOAD(insn) (fed_by_spec_load[INSN_UID (insn)])
2384#define IS_LOAD_INSN(insn) (is_load_insn[INSN_UID (insn)])
2385
2386/* Non-zero iff the address is comprised from at most 1 register */
2387#define CONST_BASED_ADDRESS_P(x) \
2388 (GET_CODE (x) == REG \
2389 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
2390 || (GET_CODE (x) == LO_SUM)) \
2391 && (GET_CODE (XEXP (x, 0)) == CONST_INT \
2392 || GET_CODE (XEXP (x, 1)) == CONST_INT)))
2393
2394/* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
2395
2396static void
2397set_spec_fed (load_insn)
2398 rtx load_insn;
2399{
2400 rtx link;
2401
2402 for (link = INSN_DEPEND (load_insn); link; link = XEXP (link, 1))
2403 if (GET_MODE (link) == VOIDmode)
2404 FED_BY_SPEC_LOAD (XEXP (link, 0)) = 1;
2405} /* set_spec_fed */
2406
2407/* On the path from the insn to load_insn_bb, find a conditional branch */
2408/* depending on insn, that guards the speculative load. */
2409
2410static int
2411find_conditional_protection (insn, load_insn_bb)
2412 rtx insn;
2413 int load_insn_bb;
2414{
2415 rtx link;
2416
2417 /* iterate through DEF-USE forward dependences */
2418 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
2419 {
2420 rtx next = XEXP (link, 0);
2421 if ((CONTAINING_RGN (INSN_BLOCK (next)) ==
2422 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
2423 && IS_REACHABLE (INSN_BB (next), load_insn_bb)
2424 && load_insn_bb != INSN_BB (next)
2425 && GET_MODE (link) == VOIDmode
2426 && (GET_CODE (next) == JUMP_INSN
2427 || find_conditional_protection (next, load_insn_bb)))
2428 return 1;
2429 }
2430 return 0;
2431} /* find_conditional_protection */
2432
2433/* Returns 1 if the same insn1 that participates in the computation
2434 of load_insn's address is feeding a conditional branch that is
2435 guarding on load_insn. This is true if we find a the two DEF-USE
2436 chains:
2437 insn1 -> ... -> conditional-branch
2438 insn1 -> ... -> load_insn,
2439 and if a flow path exist:
2440 insn1 -> ... -> conditional-branch -> ... -> load_insn,
2441 and if insn1 is on the path
2442 region-entry -> ... -> bb_trg -> ... load_insn.
2443
2444 Locate insn1 by climbing on LOG_LINKS from load_insn.
2445 Locate the branch by following INSN_DEPEND from insn1. */
2446
2447static int
2448is_conditionally_protected (load_insn, bb_src, bb_trg)
2449 rtx load_insn;
2450 int bb_src, bb_trg;
2451{
2452 rtx link;
2453
2454 for (link = LOG_LINKS (load_insn); link; link = XEXP (link, 1))
2455 {
2456 rtx insn1 = XEXP (link, 0);
2457
2458 /* must be a DEF-USE dependence upon non-branch */
2459 if (GET_MODE (link) != VOIDmode
2460 || GET_CODE (insn1) == JUMP_INSN)
2461 continue;
2462
2463 /* must exist a path: region-entry -> ... -> bb_trg -> ... load_insn */
2464 if (INSN_BB (insn1) == bb_src
2465 || (CONTAINING_RGN (INSN_BLOCK (insn1))
2466 != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
2467 || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
2468 && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
2469 continue;
2470
2471 /* now search for the conditional-branch */
2472 if (find_conditional_protection (insn1, bb_src))
2473 return 1;
2474
2475 /* recursive step: search another insn1, "above" current insn1. */
2476 return is_conditionally_protected (insn1, bb_src, bb_trg);
2477 }
2478
2479 /* the chain does not exsist */
2480 return 0;
2481} /* is_conditionally_protected */
2482
2483/* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
2484 load_insn can move speculatively from bb_src to bb_trg. All the
2485 following must hold:
2486
2487 (1) both loads have 1 base register (PFREE_CANDIDATEs).
2488 (2) load_insn and load1 have a def-use dependence upon
2489 the same insn 'insn1'.
2490 (3) either load2 is in bb_trg, or:
2491 - there's only one split-block, and
2492 - load1 is on the escape path, and
2493
2494 From all these we can conclude that the two loads access memory
2495 addresses that differ at most by a constant, and hence if moving
2496 load_insn would cause an exception, it would have been caused by
2497 load2 anyhow. */
2498
2499static int
2500is_pfree (load_insn, bb_src, bb_trg)
2501 rtx load_insn;
2502 int bb_src, bb_trg;
2503{
2504 rtx back_link;
2505 register candidate *candp = candidate_table + bb_src;
2506
2507 if (candp->split_bbs.nr_members != 1)
2508 /* must have exactly one escape block */
2509 return 0;
2510
2511 for (back_link = LOG_LINKS (load_insn);
2512 back_link; back_link = XEXP (back_link, 1))
2513 {
2514 rtx insn1 = XEXP (back_link, 0);
2515
2516 if (GET_MODE (back_link) == VOIDmode)
2517 {
2518 /* found a DEF-USE dependence (insn1, load_insn) */
2519 rtx fore_link;
2520
2521 for (fore_link = INSN_DEPEND (insn1);
2522 fore_link; fore_link = XEXP (fore_link, 1))
2523 {
2524 rtx insn2 = XEXP (fore_link, 0);
2525 if (GET_MODE (fore_link) == VOIDmode)
2526 {
2527 /* found a DEF-USE dependence (insn1, insn2) */
ac957f13 2528 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
8c660648
JL
2529 /* insn2 not guaranteed to be a 1 base reg load */
2530 continue;
2531
2532 if (INSN_BB (insn2) == bb_trg)
2533 /* insn2 is the similar load, in the target block */
2534 return 1;
2535
2536 if (*(candp->split_bbs.first_member) == INSN_BLOCK (insn2))
2537 /* insn2 is a similar load, in a split-block */
2538 return 1;
2539 }
2540 }
2541 }
2542 }
2543
2544 /* couldn't find a similar load */
2545 return 0;
2546} /* is_pfree */
2547
2548/* Returns a class that insn with GET_DEST(insn)=x may belong to,
2549 as found by analyzing insn's expression. */
2550
2551static int
2552may_trap_exp (x, is_store)
2553 rtx x;
2554 int is_store;
2555{
2556 enum rtx_code code;
2557
2558 if (x == 0)
2559 return TRAP_FREE;
2560 code = GET_CODE (x);
2561 if (is_store)
2562 {
2563 if (code == MEM)
2564 return TRAP_RISKY;
2565 else
2566 return TRAP_FREE;
2567 }
2568 if (code == MEM)
2569 {
2570 /* The insn uses memory */
2571 /* a volatile load */
2572 if (MEM_VOLATILE_P (x))
2573 return IRISKY;
2574 /* an exception-free load */
2575 if (!may_trap_p (x))
2576 return IFREE;
2577 /* a load with 1 base register, to be further checked */
2578 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
2579 return PFREE_CANDIDATE;
2580 /* no info on the load, to be further checked */
2581 return PRISKY_CANDIDATE;
2582 }
2583 else
2584 {
2585 char *fmt;
2586 int i, insn_class = TRAP_FREE;
2587
2588 /* neither store nor load, check if it may cause a trap */
2589 if (may_trap_p (x))
2590 return TRAP_RISKY;
2591 /* recursive step: walk the insn... */
2592 fmt = GET_RTX_FORMAT (code);
2593 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2594 {
2595 if (fmt[i] == 'e')
2596 {
2597 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
2598 insn_class = WORST_CLASS (insn_class, tmp_class);
2599 }
2600 else if (fmt[i] == 'E')
2601 {
2602 int j;
2603 for (j = 0; j < XVECLEN (x, i); j++)
2604 {
2605 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
2606 insn_class = WORST_CLASS (insn_class, tmp_class);
2607 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
2608 break;
2609 }
2610 }
2611 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
2612 break;
2613 }
2614 return insn_class;
2615 }
2616} /* may_trap_exp */
2617
2618
2619/* Classifies insn for the purpose of verifying that it can be
2620 moved speculatively, by examining it's patterns, returning:
2621 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
2622 TRAP_FREE: non-load insn.
2623 IFREE: load from a globaly safe location.
2624 IRISKY: volatile load.
2625 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
2626 being either PFREE or PRISKY. */
2627
2628static int
ac957f13 2629haifa_classify_insn (insn)
8c660648
JL
2630 rtx insn;
2631{
2632 rtx pat = PATTERN (insn);
2633 int tmp_class = TRAP_FREE;
2634 int insn_class = TRAP_FREE;
2635 enum rtx_code code;
2636
2637 if (GET_CODE (pat) == PARALLEL)
2638 {
2639 int i, len = XVECLEN (pat, 0);
2640
2641 for (i = len - 1; i >= 0; i--)
2642 {
2643 code = GET_CODE (XVECEXP (pat, 0, i));
2644 switch (code)
2645 {
2646 case CLOBBER:
2647 /* test if it is a 'store' */
2648 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
2649 break;
2650 case SET:
2651 /* test if it is a store */
2652 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
2653 if (tmp_class == TRAP_RISKY)
2654 break;
2655 /* test if it is a load */
2656 tmp_class =
2657 WORST_CLASS (tmp_class,
2658 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)), 0));
e0cd0770
JC
2659 break;
2660 case TRAP_IF:
2661 tmp_class = TRAP_RISKY;
2662 break;
8c660648
JL
2663 default:;
2664 }
2665 insn_class = WORST_CLASS (insn_class, tmp_class);
2666 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
2667 break;
2668 }
2669 }
2670 else
2671 {
2672 code = GET_CODE (pat);
2673 switch (code)
2674 {
2675 case CLOBBER:
2676 /* test if it is a 'store' */
2677 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
2678 break;
2679 case SET:
2680 /* test if it is a store */
2681 tmp_class = may_trap_exp (SET_DEST (pat), 1);
2682 if (tmp_class == TRAP_RISKY)
2683 break;
2684 /* test if it is a load */
2685 tmp_class =
2686 WORST_CLASS (tmp_class,
2687 may_trap_exp (SET_SRC (pat), 0));
e0cd0770
JC
2688 break;
2689 case TRAP_IF:
2690 tmp_class = TRAP_RISKY;
2691 break;
8c660648
JL
2692 default:;
2693 }
2694 insn_class = tmp_class;
2695 }
2696
2697 return insn_class;
2698
ac957f13 2699} /* haifa_classify_insn */
8c660648
JL
2700
2701/* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
2702 a load moved speculatively, or if load_insn is protected by
2703 a compare on load_insn's address). */
2704
2705static int
2706is_prisky (load_insn, bb_src, bb_trg)
2707 rtx load_insn;
2708 int bb_src, bb_trg;
2709{
2710 if (FED_BY_SPEC_LOAD (load_insn))
2711 return 1;
2712
2713 if (LOG_LINKS (load_insn) == NULL)
2714 /* dependence may 'hide' out of the region. */
2715 return 1;
2716
2717 if (is_conditionally_protected (load_insn, bb_src, bb_trg))
2718 return 1;
2719
2720 return 0;
2721} /* is_prisky */
2722
2723/* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
2724 Return 1 if insn is exception-free (and the motion is valid)
2725 and 0 otherwise. */
2726
2727static int
2728is_exception_free (insn, bb_src, bb_trg)
2729 rtx insn;
2730 int bb_src, bb_trg;
2731{
ac957f13 2732 int insn_class = haifa_classify_insn (insn);
8c660648
JL
2733
2734 /* handle non-load insns */
2735 switch (insn_class)
2736 {
2737 case TRAP_FREE:
2738 return 1;
2739 case TRAP_RISKY:
2740 return 0;
2741 default:;
2742 }
2743
2744 /* handle loads */
2745 if (!flag_schedule_speculative_load)
2746 return 0;
2747 IS_LOAD_INSN (insn) = 1;
2748 switch (insn_class)
2749 {
2750 case IFREE:
2751 return (1);
2752 case IRISKY:
2753 return 0;
2754 case PFREE_CANDIDATE:
2755 if (is_pfree (insn, bb_src, bb_trg))
2756 return 1;
2757 /* don't 'break' here: PFREE-candidate is also PRISKY-candidate */
2758 case PRISKY_CANDIDATE:
2759 if (!flag_schedule_speculative_load_dangerous
2760 || is_prisky (insn, bb_src, bb_trg))
2761 return 0;
2762 break;
2763 default:;
2764 }
2765
2766 return flag_schedule_speculative_load_dangerous;
2767} /* is_exception_free */
2768
2769
2770/* Process an insn's memory dependencies. There are four kinds of
2771 dependencies:
2772
2773 (0) read dependence: read follows read
2774 (1) true dependence: read follows write
2775 (2) anti dependence: write follows read
2776 (3) output dependence: write follows write
2777
2778 We are careful to build only dependencies which actually exist, and
2779 use transitivity to avoid building too many links. */
2780\f
2781/* Return the INSN_LIST containing INSN in LIST, or NULL
2782 if LIST does not contain INSN. */
2783
cbb13457 2784HAIFA_INLINE static rtx
8c660648
JL
2785find_insn_list (insn, list)
2786 rtx insn;
2787 rtx list;
2788{
2789 while (list)
2790 {
2791 if (XEXP (list, 0) == insn)
2792 return list;
2793 list = XEXP (list, 1);
2794 }
2795 return 0;
2796}
2797
2798
2799/* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0 otherwise. */
2800
cbb13457 2801HAIFA_INLINE static char
8c660648
JL
2802find_insn_mem_list (insn, x, list, list1)
2803 rtx insn, x;
2804 rtx list, list1;
2805{
2806 while (list)
2807 {
2808 if (XEXP (list, 0) == insn
2809 && XEXP (list1, 0) == x)
2810 return 1;
2811 list = XEXP (list, 1);
2812 list1 = XEXP (list1, 1);
2813 }
2814 return 0;
2815}
2816
2817
2818/* Compute the function units used by INSN. This caches the value
2819 returned by function_units_used. A function unit is encoded as the
2820 unit number if the value is non-negative and the compliment of a
2821 mask if the value is negative. A function unit index is the
2822 non-negative encoding. */
2823
cbb13457 2824HAIFA_INLINE static int
8c660648
JL
2825insn_unit (insn)
2826 rtx insn;
2827{
2828 register int unit = INSN_UNIT (insn);
2829
2830 if (unit == 0)
2831 {
2832 recog_memoized (insn);
2833
2834 /* A USE insn, or something else we don't need to understand.
2835 We can't pass these directly to function_units_used because it will
2836 trigger a fatal error for unrecognizable insns. */
2837 if (INSN_CODE (insn) < 0)
2838 unit = -1;
2839 else
2840 {
2841 unit = function_units_used (insn);
2842 /* Increment non-negative values so we can cache zero. */
2843 if (unit >= 0)
2844 unit++;
2845 }
2846 /* We only cache 16 bits of the result, so if the value is out of
2847 range, don't cache it. */
2848 if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
2849 || unit >= 0
77f3d48a 2850 || (unit & ~((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
8c660648
JL
2851 INSN_UNIT (insn) = unit;
2852 }
2853 return (unit > 0 ? unit - 1 : unit);
2854}
2855
2856/* Compute the blockage range for executing INSN on UNIT. This caches
2857 the value returned by the blockage_range_function for the unit.
2858 These values are encoded in an int where the upper half gives the
2859 minimum value and the lower half gives the maximum value. */
2860
cbb13457 2861HAIFA_INLINE static unsigned int
8c660648
JL
2862blockage_range (unit, insn)
2863 int unit;
2864 rtx insn;
2865{
2866 unsigned int blockage = INSN_BLOCKAGE (insn);
2867 unsigned int range;
2868
79c9824e 2869 if ((int) UNIT_BLOCKED (blockage) != unit + 1)
8c660648
JL
2870 {
2871 range = function_units[unit].blockage_range_function (insn);
2872 /* We only cache the blockage range for one unit and then only if
2873 the values fit. */
2874 if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
2875 INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
2876 }
2877 else
2878 range = BLOCKAGE_RANGE (blockage);
2879
2880 return range;
2881}
2882
2883/* A vector indexed by function unit instance giving the last insn to use
2884 the unit. The value of the function unit instance index for unit U
2885 instance I is (U + I * FUNCTION_UNITS_SIZE). */
2886static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
2887
2888/* A vector indexed by function unit instance giving the minimum time when
2889 the unit will unblock based on the maximum blockage cost. */
2890static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
2891
2892/* A vector indexed by function unit number giving the number of insns
2893 that remain to use the unit. */
2894static int unit_n_insns[FUNCTION_UNITS_SIZE];
2895
2896/* Reset the function unit state to the null state. */
2897
2898static void
2899clear_units ()
2900{
2901 bzero ((char *) unit_last_insn, sizeof (unit_last_insn));
2902 bzero ((char *) unit_tick, sizeof (unit_tick));
2903 bzero ((char *) unit_n_insns, sizeof (unit_n_insns));
2904}
2905
2906/* Return the issue-delay of an insn */
2907
cbb13457 2908HAIFA_INLINE static int
8c660648
JL
2909insn_issue_delay (insn)
2910 rtx insn;
2911{
8c660648
JL
2912 int i, delay = 0;
2913 int unit = insn_unit (insn);
2914
2915 /* efficiency note: in fact, we are working 'hard' to compute a
2916 value that was available in md file, and is not available in
2917 function_units[] structure. It would be nice to have this
2918 value there, too. */
2919 if (unit >= 0)
2920 {
2921 if (function_units[unit].blockage_range_function &&
2922 function_units[unit].blockage_function)
2923 delay = function_units[unit].blockage_function (insn, insn);
2924 }
2925 else
2926 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
2927 if ((unit & 1) != 0 && function_units[i].blockage_range_function
2928 && function_units[i].blockage_function)
2929 delay = MAX (delay, function_units[i].blockage_function (insn, insn));
2930
2931 return delay;
2932}
2933
2934/* Return the actual hazard cost of executing INSN on the unit UNIT,
2935 instance INSTANCE at time CLOCK if the previous actual hazard cost
2936 was COST. */
2937
cbb13457 2938HAIFA_INLINE static int
8c660648
JL
2939actual_hazard_this_instance (unit, instance, insn, clock, cost)
2940 int unit, instance, clock, cost;
2941 rtx insn;
2942{
2943 int tick = unit_tick[instance]; /* issue time of the last issued insn */
2944
2945 if (tick - clock > cost)
2946 {
2947 /* The scheduler is operating forward, so unit's last insn is the
2948 executing insn and INSN is the candidate insn. We want a
2949 more exact measure of the blockage if we execute INSN at CLOCK
2950 given when we committed the execution of the unit's last insn.
2951
2952 The blockage value is given by either the unit's max blockage
2953 constant, blockage range function, or blockage function. Use
2954 the most exact form for the given unit. */
2955
2956 if (function_units[unit].blockage_range_function)
2957 {
2958 if (function_units[unit].blockage_function)
2959 tick += (function_units[unit].blockage_function
2960 (unit_last_insn[instance], insn)
2961 - function_units[unit].max_blockage);
2962 else
2963 tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
2964 - function_units[unit].max_blockage);
2965 }
2966 if (tick - clock > cost)
2967 cost = tick - clock;
2968 }
2969 return cost;
2970}
2971
2972/* Record INSN as having begun execution on the units encoded by UNIT at
2973 time CLOCK. */
2974
cbb13457 2975HAIFA_INLINE static void
8c660648
JL
2976schedule_unit (unit, insn, clock)
2977 int unit, clock;
2978 rtx insn;
2979{
2980 int i;
2981
2982 if (unit >= 0)
2983 {
2984 int instance = unit;
2985#if MAX_MULTIPLICITY > 1
2986 /* Find the first free instance of the function unit and use that
2987 one. We assume that one is free. */
2988 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
2989 {
2990 if (!actual_hazard_this_instance (unit, instance, insn, clock, 0))
2991 break;
2992 instance += FUNCTION_UNITS_SIZE;
2993 }
2994#endif
2995 unit_last_insn[instance] = insn;
2996 unit_tick[instance] = (clock + function_units[unit].max_blockage);
2997 }
2998 else
2999 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
3000 if ((unit & 1) != 0)
3001 schedule_unit (i, insn, clock);
3002}
3003
3004/* Return the actual hazard cost of executing INSN on the units encoded by
3005 UNIT at time CLOCK if the previous actual hazard cost was COST. */
3006
cbb13457 3007HAIFA_INLINE static int
8c660648
JL
3008actual_hazard (unit, insn, clock, cost)
3009 int unit, clock, cost;
3010 rtx insn;
3011{
3012 int i;
3013
3014 if (unit >= 0)
3015 {
3016 /* Find the instance of the function unit with the minimum hazard. */
3017 int instance = unit;
3018 int best_cost = actual_hazard_this_instance (unit, instance, insn,
3019 clock, cost);
3020 int this_cost;
3021
3022#if MAX_MULTIPLICITY > 1
3023 if (best_cost > cost)
3024 {
3025 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
3026 {
3027 instance += FUNCTION_UNITS_SIZE;
3028 this_cost = actual_hazard_this_instance (unit, instance, insn,
3029 clock, cost);
3030 if (this_cost < best_cost)
3031 {
3032 best_cost = this_cost;
3033 if (this_cost <= cost)
3034 break;
3035 }
3036 }
3037 }
3038#endif
3039 cost = MAX (cost, best_cost);
3040 }
3041 else
3042 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
3043 if ((unit & 1) != 0)
3044 cost = actual_hazard (i, insn, clock, cost);
3045
3046 return cost;
3047}
3048
3049/* Return the potential hazard cost of executing an instruction on the
3050 units encoded by UNIT if the previous potential hazard cost was COST.
3051 An insn with a large blockage time is chosen in preference to one
3052 with a smaller time; an insn that uses a unit that is more likely
3053 to be used is chosen in preference to one with a unit that is less
3054 used. We are trying to minimize a subsequent actual hazard. */
3055
cbb13457 3056HAIFA_INLINE static int
8c660648
JL
3057potential_hazard (unit, insn, cost)
3058 int unit, cost;
3059 rtx insn;
3060{
3061 int i, ncost;
3062 unsigned int minb, maxb;
3063
3064 if (unit >= 0)
3065 {
3066 minb = maxb = function_units[unit].max_blockage;
3067 if (maxb > 1)
3068 {
3069 if (function_units[unit].blockage_range_function)
3070 {
3071 maxb = minb = blockage_range (unit, insn);
3072 maxb = MAX_BLOCKAGE_COST (maxb);
3073 minb = MIN_BLOCKAGE_COST (minb);
3074 }
3075
3076 if (maxb > 1)
3077 {
3078 /* Make the number of instructions left dominate. Make the
3079 minimum delay dominate the maximum delay. If all these
3080 are the same, use the unit number to add an arbitrary
3081 ordering. Other terms can be added. */
3082 ncost = minb * 0x40 + maxb;
3083 ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
3084 if (ncost > cost)
3085 cost = ncost;
3086 }
3087 }
3088 }
3089 else
3090 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
3091 if ((unit & 1) != 0)
3092 cost = potential_hazard (i, insn, cost);
3093
3094 return cost;
3095}
3096
3097/* Compute cost of executing INSN given the dependence LINK on the insn USED.
3098 This is the number of cycles between instruction issue and
3099 instruction results. */
3100
cbb13457 3101HAIFA_INLINE static int
8c660648
JL
3102insn_cost (insn, link, used)
3103 rtx insn, link, used;
3104{
3105 register int cost = INSN_COST (insn);
3106
3107 if (cost == 0)
3108 {
3109 recog_memoized (insn);
3110
3111 /* A USE insn, or something else we don't need to understand.
3112 We can't pass these directly to result_ready_cost because it will
3113 trigger a fatal error for unrecognizable insns. */
3114 if (INSN_CODE (insn) < 0)
3115 {
3116 INSN_COST (insn) = 1;
3117 return 1;
3118 }
3119 else
3120 {
3121 cost = result_ready_cost (insn);
3122
3123 if (cost < 1)
3124 cost = 1;
3125
3126 INSN_COST (insn) = cost;
3127 }
3128 }
3129
3130 /* in this case estimate cost without caring how insn is used. */
3131 if (link == 0 && used == 0)
3132 return cost;
3133
3134 /* A USE insn should never require the value used to be computed. This
3135 allows the computation of a function's result and parameter values to
3136 overlap the return and call. */
3137 recog_memoized (used);
3138 if (INSN_CODE (used) < 0)
3139 LINK_COST_FREE (link) = 1;
3140
3141 /* If some dependencies vary the cost, compute the adjustment. Most
3142 commonly, the adjustment is complete: either the cost is ignored
3143 (in the case of an output- or anti-dependence), or the cost is
3144 unchanged. These values are cached in the link as LINK_COST_FREE
3145 and LINK_COST_ZERO. */
3146
3147 if (LINK_COST_FREE (link))
197043f5 3148 cost = 0;
8c660648
JL
3149#ifdef ADJUST_COST
3150 else if (!LINK_COST_ZERO (link))
3151 {
3152 int ncost = cost;
3153
3154 ADJUST_COST (used, link, insn, ncost);
197043f5
RH
3155 if (ncost < 1)
3156 {
3157 LINK_COST_FREE (link) = 1;
3158 ncost = 0;
3159 }
8c660648
JL
3160 if (cost == ncost)
3161 LINK_COST_ZERO (link) = 1;
3162 cost = ncost;
3163 }
3164#endif
3165 return cost;
3166}
3167
3168/* Compute the priority number for INSN. */
3169
3170static int
3171priority (insn)
3172 rtx insn;
3173{
3174 int this_priority;
3175 rtx link;
3176
3177 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
3178 return 0;
3179
3180 if ((this_priority = INSN_PRIORITY (insn)) == 0)
3181 {
3182 if (INSN_DEPEND (insn) == 0)
3183 this_priority = insn_cost (insn, 0, 0);
3184 else
3185 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
3186 {
3187 rtx next;
3188 int next_priority;
3189
6d8ccdbb
JL
3190 if (RTX_INTEGRATED_P (link))
3191 continue;
3192
8c660648
JL
3193 next = XEXP (link, 0);
3194
3195 /* critical path is meaningful in block boundaries only */
3196 if (INSN_BLOCK (next) != INSN_BLOCK (insn))
3197 continue;
3198
3199 next_priority = insn_cost (insn, link, next) + priority (next);
3200 if (next_priority > this_priority)
3201 this_priority = next_priority;
3202 }
3203 INSN_PRIORITY (insn) = this_priority;
3204 }
3205 return this_priority;
3206}
3207\f
3208
3209/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
3210 them to the unused_*_list variables, so that they can be reused. */
3211
8c660648
JL
3212static void
3213free_pending_lists ()
3214{
8c660648
JL
3215 if (current_nr_blocks <= 1)
3216 {
ebb7b10b
RH
3217 free_list (&pending_read_insns, &unused_insn_list);
3218 free_list (&pending_write_insns, &unused_insn_list);
3219 free_list (&pending_read_mems, &unused_expr_list);
3220 free_list (&pending_write_mems, &unused_expr_list);
8c660648
JL
3221 }
3222 else
3223 {
3224 /* interblock scheduling */
3225 int bb;
3226
3227 for (bb = 0; bb < current_nr_blocks; bb++)
3228 {
ebb7b10b
RH
3229 free_list (&bb_pending_read_insns[bb], &unused_insn_list);
3230 free_list (&bb_pending_write_insns[bb], &unused_insn_list);
3231 free_list (&bb_pending_read_mems[bb], &unused_expr_list);
3232 free_list (&bb_pending_write_mems[bb], &unused_expr_list);
8c660648
JL
3233 }
3234 }
3235}
3236
3237/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
3238 The MEM is a memory reference contained within INSN, which we are saving
3239 so that we can do memory aliasing on it. */
3240
3241static void
3242add_insn_mem_dependence (insn_list, mem_list, insn, mem)
3243 rtx *insn_list, *mem_list, insn, mem;
3244{
3245 register rtx link;
3246
ebb7b10b 3247 link = alloc_INSN_LIST (insn, *insn_list);
8c660648
JL
3248 *insn_list = link;
3249
ebb7b10b 3250 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
8c660648
JL
3251 *mem_list = link;
3252
3253 pending_lists_length++;
3254}
3255\f
3256
3257/* Make a dependency between every memory reference on the pending lists
3258 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
3259 the read list. */
3260
3261static void
3262flush_pending_lists (insn, only_write)
3263 rtx insn;
3264 int only_write;
3265{
3266 rtx u;
3267 rtx link;
3268
3269 while (pending_read_insns && ! only_write)
3270 {
3271 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
3272
3273 link = pending_read_insns;
3274 pending_read_insns = XEXP (pending_read_insns, 1);
3275 XEXP (link, 1) = unused_insn_list;
3276 unused_insn_list = link;
3277
3278 link = pending_read_mems;
3279 pending_read_mems = XEXP (pending_read_mems, 1);
3280 XEXP (link, 1) = unused_expr_list;
3281 unused_expr_list = link;
3282 }
3283 while (pending_write_insns)
3284 {
3285 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
3286
3287 link = pending_write_insns;
3288 pending_write_insns = XEXP (pending_write_insns, 1);
3289 XEXP (link, 1) = unused_insn_list;
3290 unused_insn_list = link;
3291
3292 link = pending_write_mems;
3293 pending_write_mems = XEXP (pending_write_mems, 1);
3294 XEXP (link, 1) = unused_expr_list;
3295 unused_expr_list = link;
3296 }
3297 pending_lists_length = 0;
3298
3299 /* last_pending_memory_flush is now a list of insns */
3300 for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
3301 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3302
ebb7b10b
RH
3303 free_list (&last_pending_memory_flush, &unused_insn_list);
3304 last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
8c660648
JL
3305}
3306
3307/* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
3308 by the write to the destination of X, and reads of everything mentioned. */
3309
3310static void
3311sched_analyze_1 (x, insn)
3312 rtx x;
3313 rtx insn;
3314{
3315 register int regno;
3316 register rtx dest = SET_DEST (x);
28c95eff 3317 enum rtx_code code = GET_CODE (x);
8c660648
JL
3318
3319 if (dest == 0)
3320 return;
3321
c0222c21
DM
3322 if (GET_CODE (dest) == PARALLEL
3323 && GET_MODE (dest) == BLKmode)
3324 {
3325 register int i;
3326 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
3327 sched_analyze_1 (XVECEXP (dest, 0, i), insn);
3328 if (GET_CODE (x) == SET)
3329 sched_analyze_2 (SET_SRC (x), insn);
3330 return;
3331 }
3332
8c660648
JL
3333 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3334 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3335 {
3336 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3337 {
3338 /* The second and third arguments are values read by this insn. */
3339 sched_analyze_2 (XEXP (dest, 1), insn);
3340 sched_analyze_2 (XEXP (dest, 2), insn);
3341 }
3342 dest = SUBREG_REG (dest);
3343 }
3344
3345 if (GET_CODE (dest) == REG)
3346 {
3347 register int i;
3348
3349 regno = REGNO (dest);
3350
3351 /* A hard reg in a wide mode may really be multiple registers.
3352 If so, mark all of them just like the first. */
3353 if (regno < FIRST_PSEUDO_REGISTER)
3354 {
3355 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
3356 while (--i >= 0)
3357 {
3358 rtx u;
3359
3360 for (u = reg_last_uses[regno + i]; u; u = XEXP (u, 1))
3361 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
8c660648
JL
3362
3363 for (u = reg_last_sets[regno + i]; u; u = XEXP (u, 1))
3364 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
3365
28c95eff
RH
3366 /* Clobbers need not be ordered with respect to one another,
3367 but sets must be ordered with respect to a pending clobber. */
3368 if (code == SET)
3369 {
0adb548c 3370 free_list (&reg_last_uses[regno + i], &unused_insn_list);
28c95eff
RH
3371 for (u = reg_last_clobbers[regno + i]; u; u = XEXP (u, 1))
3372 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
3373 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
3374 }
3375 else
3376 SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
8c660648 3377
28c95eff
RH
3378 /* Function calls clobber all call_used regs. */
3379 if (global_regs[regno + i]
3380 || (code == SET && call_used_regs[regno + i]))
8c660648
JL
3381 for (u = last_function_call; u; u = XEXP (u, 1))
3382 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3383 }
3384 }
3385 else
3386 {
3387 rtx u;
3388
3389 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
3390 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
8c660648
JL
3391
3392 for (u = reg_last_sets[regno]; u; u = XEXP (u, 1))
3393 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
3394
28c95eff 3395 if (code == SET)
7399257b 3396 {
0adb548c 3397 free_list (&reg_last_uses[regno], &unused_insn_list);
7399257b
RH
3398 for (u = reg_last_clobbers[regno]; u; u = XEXP (u, 1))
3399 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
3400 SET_REGNO_REG_SET (reg_pending_sets, regno);
3401 }
28c95eff
RH
3402 else
3403 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
8c660648
JL
3404
3405 /* Pseudos that are REG_EQUIV to something may be replaced
3406 by that during reloading. We need only add dependencies for
3407 the address in the REG_EQUIV note. */
3408 if (!reload_completed
3409 && reg_known_equiv_p[regno]
3410 && GET_CODE (reg_known_value[regno]) == MEM)
3411 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
3412
3413 /* Don't let it cross a call after scheduling if it doesn't
3414 already cross one. */
3415
3416 if (REG_N_CALLS_CROSSED (regno) == 0)
3417 for (u = last_function_call; u; u = XEXP (u, 1))
3418 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3419 }
3420 }
3421 else if (GET_CODE (dest) == MEM)
3422 {
3423 /* Writing memory. */
3424
3425 if (pending_lists_length > 32)
3426 {
3427 /* Flush all pending reads and writes to prevent the pending lists
3428 from getting any larger. Insn scheduling runs too slowly when
3429 these lists get long. The number 32 was chosen because it
3430 seems like a reasonable number. When compiling GCC with itself,
3431 this flush occurs 8 times for sparc, and 10 times for m88k using
3432 the number 32. */
3433 flush_pending_lists (insn, 0);
3434 }
3435 else
3436 {
3437 rtx u;
3438 rtx pending, pending_mem;
3439
3440 pending = pending_read_insns;
3441 pending_mem = pending_read_mems;
3442 while (pending)
3443 {
3444 /* If a dependency already exists, don't create a new one. */
3445 if (!find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
3446 if (anti_dependence (XEXP (pending_mem, 0), dest))
3447 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
3448
3449 pending = XEXP (pending, 1);
3450 pending_mem = XEXP (pending_mem, 1);
3451 }
3452
3453 pending = pending_write_insns;
3454 pending_mem = pending_write_mems;
3455 while (pending)
3456 {
3457 /* If a dependency already exists, don't create a new one. */
3458 if (!find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
3459 if (output_dependence (XEXP (pending_mem, 0), dest))
3460 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
3461
3462 pending = XEXP (pending, 1);
3463 pending_mem = XEXP (pending_mem, 1);
3464 }
3465
3466 for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
3467 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3468
3469 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
3470 insn, dest);
3471 }
3472 sched_analyze_2 (XEXP (dest, 0), insn);
3473 }
3474
3475 /* Analyze reads. */
3476 if (GET_CODE (x) == SET)
3477 sched_analyze_2 (SET_SRC (x), insn);
3478}
3479
3480/* Analyze the uses of memory and registers in rtx X in INSN. */
3481
3482static void
3483sched_analyze_2 (x, insn)
3484 rtx x;
3485 rtx insn;
3486{
3487 register int i;
3488 register int j;
3489 register enum rtx_code code;
3490 register char *fmt;
3491
3492 if (x == 0)
3493 return;
3494
3495 code = GET_CODE (x);
3496
3497 switch (code)
3498 {
3499 case CONST_INT:
3500 case CONST_DOUBLE:
3501 case SYMBOL_REF:
3502 case CONST:
3503 case LABEL_REF:
3504 /* Ignore constants. Note that we must handle CONST_DOUBLE here
3505 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
3506 this does not mean that this insn is using cc0. */
3507 return;
3508
3509#ifdef HAVE_cc0
3510 case CC0:
3511 {
3512 rtx link, prev;
3513
3514 /* User of CC0 depends on immediately preceding insn. */
3515 SCHED_GROUP_P (insn) = 1;
3516
3517 /* There may be a note before this insn now, but all notes will
3518 be removed before we actually try to schedule the insns, so
3519 it won't cause a problem later. We must avoid it here though. */
3520 prev = prev_nonnote_insn (insn);
3521
3522 /* Make a copy of all dependencies on the immediately previous insn,
3523 and add to this insn. This is so that all the dependencies will
3524 apply to the group. Remove an explicit dependence on this insn
3525 as SCHED_GROUP_P now represents it. */
3526
3527 if (find_insn_list (prev, LOG_LINKS (insn)))
3528 remove_dependence (insn, prev);
3529
3530 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
3531 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
3532
3533 return;
3534 }
3535#endif
3536
3537 case REG:
3538 {
3539 rtx u;
3540 int regno = REGNO (x);
3541 if (regno < FIRST_PSEUDO_REGISTER)
3542 {
3543 int i;
3544
3545 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
3546 while (--i >= 0)
3547 {
3548 reg_last_uses[regno + i]
ebb7b10b 3549 = alloc_INSN_LIST (insn, reg_last_uses[regno + i]);
8c660648
JL
3550
3551 for (u = reg_last_sets[regno + i]; u; u = XEXP (u, 1))
3552 add_dependence (insn, XEXP (u, 0), 0);
3553
28c95eff
RH
3554 /* ??? This should never happen. */
3555 for (u = reg_last_clobbers[regno + i]; u; u = XEXP (u, 1))
3556 add_dependence (insn, XEXP (u, 0), 0);
3557
8c660648
JL
3558 if ((call_used_regs[regno + i] || global_regs[regno + i]))
3559 /* Function calls clobber all call_used regs. */
3560 for (u = last_function_call; u; u = XEXP (u, 1))
3561 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3562 }
3563 }
3564 else
3565 {
ebb7b10b 3566 reg_last_uses[regno] = alloc_INSN_LIST (insn, reg_last_uses[regno]);
8c660648
JL
3567
3568 for (u = reg_last_sets[regno]; u; u = XEXP (u, 1))
3569 add_dependence (insn, XEXP (u, 0), 0);
3570
28c95eff
RH
3571 /* ??? This should never happen. */
3572 for (u = reg_last_clobbers[regno]; u; u = XEXP (u, 1))
3573 add_dependence (insn, XEXP (u, 0), 0);
3574
8c660648
JL
3575 /* Pseudos that are REG_EQUIV to something may be replaced
3576 by that during reloading. We need only add dependencies for
3577 the address in the REG_EQUIV note. */
3578 if (!reload_completed
3579 && reg_known_equiv_p[regno]
3580 && GET_CODE (reg_known_value[regno]) == MEM)
3581 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
3582
3583 /* If the register does not already cross any calls, then add this
3584 insn to the sched_before_next_call list so that it will still
3585 not cross calls after scheduling. */
3586 if (REG_N_CALLS_CROSSED (regno) == 0)
3587 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
3588 }
3589 return;
3590 }
3591
3592 case MEM:
3593 {
3594 /* Reading memory. */
3595 rtx u;
3596 rtx pending, pending_mem;
3597
3598 pending = pending_read_insns;
3599 pending_mem = pending_read_mems;
3600 while (pending)
3601 {
3602 /* If a dependency already exists, don't create a new one. */
3603 if (!find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
3604 if (read_dependence (XEXP (pending_mem, 0), x))
3605 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
3606
3607 pending = XEXP (pending, 1);
3608 pending_mem = XEXP (pending_mem, 1);
3609 }
3610
3611 pending = pending_write_insns;
3612 pending_mem = pending_write_mems;
3613 while (pending)
3614 {
3615 /* If a dependency already exists, don't create a new one. */
3616 if (!find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
3617 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
3618 x, rtx_varies_p))
3619 add_dependence (insn, XEXP (pending, 0), 0);
3620
3621 pending = XEXP (pending, 1);
3622 pending_mem = XEXP (pending_mem, 1);
3623 }
3624
3625 for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
3626 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3627
3628 /* Always add these dependencies to pending_reads, since
3629 this insn may be followed by a write. */
3630 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
3631 insn, x);
3632
3633 /* Take advantage of tail recursion here. */
3634 sched_analyze_2 (XEXP (x, 0), insn);
3635 return;
3636 }
3637
e0cd0770
JC
3638 /* Force pending stores to memory in case a trap handler needs them. */
3639 case TRAP_IF:
3640 flush_pending_lists (insn, 1);
3641 break;
3642
8c660648
JL
3643 case ASM_OPERANDS:
3644 case ASM_INPUT:
3645 case UNSPEC_VOLATILE:
8c660648
JL
3646 {
3647 rtx u;
3648
3649 /* Traditional and volatile asm instructions must be considered to use
3650 and clobber all hard registers, all pseudo-registers and all of
3651 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
3652
3653 Consider for instance a volatile asm that changes the fpu rounding
3654 mode. An insn should not be moved across this even if it only uses
3655 pseudo-regs because it might give an incorrectly rounded result. */
3656 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
3657 {
3658 int max_reg = max_reg_num ();
3659 for (i = 0; i < max_reg; i++)
3660 {
3661 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
3662 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
0adb548c 3663 free_list (&reg_last_uses[i], &unused_insn_list);
8c660648 3664
8c660648
JL
3665 for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
3666 add_dependence (insn, XEXP (u, 0), 0);
28c95eff
RH
3667
3668 for (u = reg_last_clobbers[i]; u; u = XEXP (u, 1))
3669 add_dependence (insn, XEXP (u, 0), 0);
8c660648
JL
3670 }
3671 reg_pending_sets_all = 1;
3672
3673 flush_pending_lists (insn, 0);
3674 }
3675
3676 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
3677 We can not just fall through here since then we would be confused
3678 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
3679 traditional asms unlike their normal usage. */
3680
3681 if (code == ASM_OPERANDS)
3682 {
3683 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
3684 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
3685 return;
3686 }
3687 break;
3688 }
3689
3690 case PRE_DEC:
3691 case POST_DEC:
3692 case PRE_INC:
3693 case POST_INC:
3694 /* These both read and modify the result. We must handle them as writes
3695 to get proper dependencies for following instructions. We must handle
3696 them as reads to get proper dependencies from this to previous
3697 instructions. Thus we need to pass them to both sched_analyze_1
3698 and sched_analyze_2. We must call sched_analyze_2 first in order
3699 to get the proper antecedent for the read. */
3700 sched_analyze_2 (XEXP (x, 0), insn);
3701 sched_analyze_1 (x, insn);
3702 return;
5835e573
KG
3703
3704 default:
3705 break;
8c660648
JL
3706 }
3707
3708 /* Other cases: walk the insn. */
3709 fmt = GET_RTX_FORMAT (code);
3710 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3711 {
3712 if (fmt[i] == 'e')
3713 sched_analyze_2 (XEXP (x, i), insn);
3714 else if (fmt[i] == 'E')
3715 for (j = 0; j < XVECLEN (x, i); j++)
3716 sched_analyze_2 (XVECEXP (x, i, j), insn);
3717 }
3718}
3719
3720/* Analyze an INSN with pattern X to find all dependencies. */
3721
3722static void
3723sched_analyze_insn (x, insn, loop_notes)
3724 rtx x, insn;
3725 rtx loop_notes;
3726{
3727 register RTX_CODE code = GET_CODE (x);
3728 rtx link;
3729 int maxreg = max_reg_num ();
3730 int i;
3731
3732 if (code == SET || code == CLOBBER)
3733 sched_analyze_1 (x, insn);
3734 else if (code == PARALLEL)
3735 {
3736 register int i;
3737 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3738 {
3739 code = GET_CODE (XVECEXP (x, 0, i));
3740 if (code == SET || code == CLOBBER)
3741 sched_analyze_1 (XVECEXP (x, 0, i), insn);
3742 else
3743 sched_analyze_2 (XVECEXP (x, 0, i), insn);
3744 }
3745 }
3746 else
3747 sched_analyze_2 (x, insn);
3748
3749 /* Mark registers CLOBBERED or used by called function. */
3750 if (GET_CODE (insn) == CALL_INSN)
3751 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
3752 {
3753 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
3754 sched_analyze_1 (XEXP (link, 0), insn);
3755 else
3756 sched_analyze_2 (XEXP (link, 0), insn);
3757 }
3758
1f1ed00c
JL
3759 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
3760 block, then we must be sure that no instructions are scheduled across it.
8c660648
JL
3761 Otherwise, the reg_n_refs info (which depends on loop_depth) would
3762 become incorrect. */
3763
3764 if (loop_notes)
3765 {
3766 int max_reg = max_reg_num ();
1f1ed00c 3767 int schedule_barrier_found = 0;
8c660648
JL
3768 rtx link;
3769
1f1ed00c
JL
3770 /* Update loop_notes with any notes from this insn. Also determine
3771 if any of the notes on the list correspond to instruction scheduling
3772 barriers (loop, eh & setjmp notes, but not range notes. */
8c660648
JL
3773 link = loop_notes;
3774 while (XEXP (link, 1))
1f1ed00c 3775 {
54c3cf4b
JL
3776 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
3777 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
3778 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
3779 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END
3780 || INTVAL (XEXP (link, 0)) == NOTE_INSN_SETJMP)
1f1ed00c
JL
3781 schedule_barrier_found = 1;
3782
3783 link = XEXP (link, 1);
3784 }
8c660648
JL
3785 XEXP (link, 1) = REG_NOTES (insn);
3786 REG_NOTES (insn) = loop_notes;
1f1ed00c
JL
3787
3788 /* Add dependencies if a scheduling barrier was found. */
3789 if (schedule_barrier_found)
3790 {
3791 for (i = 0; i < max_reg; i++)
3792 {
3793 rtx u;
3794 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
3795 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
0adb548c 3796 free_list (&reg_last_uses[i], &unused_insn_list);
1f1ed00c 3797
1f1ed00c
JL
3798 for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
3799 add_dependence (insn, XEXP (u, 0), 0);
28c95eff
RH
3800
3801 for (u = reg_last_clobbers[i]; u; u = XEXP (u, 1))
3802 add_dependence (insn, XEXP (u, 0), 0);
1f1ed00c
JL
3803 }
3804 reg_pending_sets_all = 1;
3805
3806 flush_pending_lists (insn, 0);
3807 }
3808
8c660648
JL
3809 }
3810
28c95eff
RH
3811 /* Accumulate clobbers until the next set so that it will be output dependant
3812 on all of them. At the next set we can clear the clobber list, since
3813 subsequent sets will be output dependant on it. */
8c660648
JL
3814 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
3815 {
ebb7b10b 3816 free_list (&reg_last_sets[i], &unused_insn_list);
28c95eff
RH
3817 free_list (&reg_last_clobbers[i],
3818 &unused_insn_list);
8c660648 3819 reg_last_sets[i]
ebb7b10b 3820 = alloc_INSN_LIST (insn, NULL_RTX);
8c660648 3821 });
28c95eff
RH
3822 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
3823 {
3824 reg_last_clobbers[i]
3825 = alloc_INSN_LIST (insn, reg_last_clobbers[i]);
3826 });
8c660648 3827 CLEAR_REG_SET (reg_pending_sets);
28c95eff 3828 CLEAR_REG_SET (reg_pending_clobbers);
8c660648
JL
3829
3830 if (reg_pending_sets_all)
3831 {
3832 for (i = 0; i < maxreg; i++)
ebb7b10b 3833 {
ebb7b10b
RH
3834 free_list (&reg_last_sets[i], &unused_insn_list);
3835 reg_last_sets[i] = alloc_INSN_LIST (insn, NULL_RTX);
3836 }
8c660648
JL
3837
3838 reg_pending_sets_all = 0;
3839 }
3840
3841 /* Handle function calls and function returns created by the epilogue
3842 threading code. */
3843 if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
3844 {
3845 rtx dep_insn;
3846 rtx prev_dep_insn;
3847
3848 /* When scheduling instructions, we make sure calls don't lose their
3849 accompanying USE insns by depending them one on another in order.
3850
3851 Also, we must do the same thing for returns created by the epilogue
3852 threading code. Note this code works only in this special case,
3853 because other passes make no guarantee that they will never emit
3854 an instruction between a USE and a RETURN. There is such a guarantee
3855 for USE instructions immediately before a call. */
3856
3857 prev_dep_insn = insn;
3858 dep_insn = PREV_INSN (insn);
3859 while (GET_CODE (dep_insn) == INSN
3860 && GET_CODE (PATTERN (dep_insn)) == USE
3861 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == REG)
3862 {
3863 SCHED_GROUP_P (prev_dep_insn) = 1;
3864
3865 /* Make a copy of all dependencies on dep_insn, and add to insn.
3866 This is so that all of the dependencies will apply to the
3867 group. */
3868
3869 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
3870 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
3871
3872 prev_dep_insn = dep_insn;
3873 dep_insn = PREV_INSN (dep_insn);
3874 }
3875 }
3876}
3877
3878/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
3879 for every dependency. */
3880
3881static void
3882sched_analyze (head, tail)
3883 rtx head, tail;
3884{
3885 register rtx insn;
3886 register rtx u;
3887 rtx loop_notes = 0;
3888
3889 for (insn = head;; insn = NEXT_INSN (insn))
3890 {
3891 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
3892 {
062ae7ed
JL
3893 /* Make each JUMP_INSN a scheduling barrier for memory references. */
3894 if (GET_CODE (insn) == JUMP_INSN)
3895 last_pending_memory_flush
3896 = alloc_INSN_LIST (insn, last_pending_memory_flush);
8c660648
JL
3897 sched_analyze_insn (PATTERN (insn), insn, loop_notes);
3898 loop_notes = 0;
3899 }
3900 else if (GET_CODE (insn) == CALL_INSN)
3901 {
3902 rtx x;
3903 register int i;
3904
3905 CANT_MOVE (insn) = 1;
3906
3907 /* Any instruction using a hard register which may get clobbered
3908 by a call needs to be marked as dependent on this call.
3909 This prevents a use of a hard return reg from being moved
3910 past a void call (i.e. it does not explicitly set the hard
3911 return reg). */
3912
3913 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
3914 all registers, not just hard registers, may be clobbered by this
3915 call. */
3916
3917 /* Insn, being a CALL_INSN, magically depends on
3918 `last_function_call' already. */
3919
3920 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
3921 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
3922 {
3923 int max_reg = max_reg_num ();
3924 for (i = 0; i < max_reg; i++)
3925 {
3926 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
3927 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
0adb548c 3928 free_list (&reg_last_uses[i], &unused_insn_list);
8c660648 3929
8c660648
JL
3930 for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
3931 add_dependence (insn, XEXP (u, 0), 0);
28c95eff
RH
3932
3933 for (u = reg_last_clobbers[i]; u; u = XEXP (u, 1))
3934 add_dependence (insn, XEXP (u, 0), 0);
8c660648
JL
3935 }
3936 reg_pending_sets_all = 1;
3937
3938 /* Add a pair of fake REG_NOTE which we will later
3939 convert back into a NOTE_INSN_SETJMP note. See
3940 reemit_notes for why we use a pair of NOTEs. */
ebb7b10b
RH
3941 REG_NOTES (insn) = alloc_EXPR_LIST (REG_DEAD,
3942 GEN_INT (0),
3943 REG_NOTES (insn));
3944 REG_NOTES (insn) = alloc_EXPR_LIST (REG_DEAD,
3945 GEN_INT (NOTE_INSN_SETJMP),
3946 REG_NOTES (insn));
8c660648
JL
3947 }
3948 else
3949 {
3950 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3951 if (call_used_regs[i] || global_regs[i])
3952 {
3953 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
3954 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
8c660648 3955
8c660648
JL
3956 for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
3957 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3958
c1cb76e9 3959 SET_REGNO_REG_SET (reg_pending_clobbers, i);
8c660648
JL
3960 }
3961 }
3962
3963 /* For each insn which shouldn't cross a call, add a dependence
3964 between that insn and this call insn. */
3965 x = LOG_LINKS (sched_before_next_call);
3966 while (x)
3967 {
3968 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
3969 x = XEXP (x, 1);
3970 }
3971 LOG_LINKS (sched_before_next_call) = 0;
3972
3973 sched_analyze_insn (PATTERN (insn), insn, loop_notes);
3974 loop_notes = 0;
3975
3976 /* In the absence of interprocedural alias analysis, we must flush
3977 all pending reads and writes, and start new dependencies starting
3978 from here. But only flush writes for constant calls (which may
3979 be passed a pointer to something we haven't written yet). */
3980 flush_pending_lists (insn, CONST_CALL_P (insn));
3981
3982 /* Depend this function call (actually, the user of this
3983 function call) on all hard register clobberage. */
3984
3985 /* last_function_call is now a list of insns */
ebb7b10b
RH
3986 free_list(&last_function_call, &unused_insn_list);
3987 last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
8c660648
JL
3988 }
3989
3990 /* See comments on reemit_notes as to why we do this. */
6dfdecdb
RH
3991 /* ??? Actually, the reemit_notes just say what is done, not why. */
3992
3993 else if (GET_CODE (insn) == NOTE
3994 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_START
3995 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
3996 {
3997 loop_notes = alloc_EXPR_LIST (REG_DEAD, NOTE_RANGE_INFO (insn),
3998 loop_notes);
3999 loop_notes = alloc_EXPR_LIST (REG_DEAD,
4000 GEN_INT (NOTE_LINE_NUMBER (insn)),
4001 loop_notes);
4002 }
8c660648
JL
4003 else if (GET_CODE (insn) == NOTE
4004 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
4005 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
4006 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
4007 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
4008 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
4009 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
4010 {
ebb7b10b
RH
4011 loop_notes = alloc_EXPR_LIST (REG_DEAD,
4012 GEN_INT (NOTE_BLOCK_NUMBER (insn)),
4013 loop_notes);
4014 loop_notes = alloc_EXPR_LIST (REG_DEAD,
4015 GEN_INT (NOTE_LINE_NUMBER (insn)),
4016 loop_notes);
8c660648
JL
4017 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
4018 }
4019
4020 if (insn == tail)
4021 return;
4022 }
4023 abort ();
4024}
4025\f
4026/* Called when we see a set of a register. If death is true, then we are
4027 scanning backwards. Mark that register as unborn. If nobody says
4028 otherwise, that is how things will remain. If death is false, then we
4029 are scanning forwards. Mark that register as being born. */
4030
4031static void
5835e573 4032sched_note_set (x, death)
8c660648
JL
4033 rtx x;
4034 int death;
4035{
4036 register int regno;
4037 register rtx reg = SET_DEST (x);
4038 int subreg_p = 0;
4039
4040 if (reg == 0)
4041 return;
4042
c0222c21
DM
4043 if (GET_CODE (reg) == PARALLEL
4044 && GET_MODE (reg) == BLKmode)
4045 {
4046 register int i;
4047 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
4048 sched_note_set (XVECEXP (reg, 0, i), death);
4049 return;
4050 }
4051
8c660648
JL
4052 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
4053 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
4054 {
4055 /* Must treat modification of just one hardware register of a multi-reg
4056 value or just a byte field of a register exactly the same way that
4057 mark_set_1 in flow.c does, i.e. anything except a paradoxical subreg
4058 does not kill the entire register. */
4059 if (GET_CODE (reg) != SUBREG
4060 || REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg))
4061 subreg_p = 1;
4062
4063 reg = SUBREG_REG (reg);
4064 }
4065
4066 if (GET_CODE (reg) != REG)
4067 return;
4068
4069 /* Global registers are always live, so the code below does not apply
4070 to them. */
4071
4072 regno = REGNO (reg);
4073 if (regno >= FIRST_PSEUDO_REGISTER || !global_regs[regno])
4074 {
4075 if (death)
4076 {
4077 /* If we only set part of the register, then this set does not
4078 kill it. */
4079 if (subreg_p)
4080 return;
4081
4082 /* Try killing this register. */
4083 if (regno < FIRST_PSEUDO_REGISTER)
4084 {
4085 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
4086 while (--j >= 0)
4087 {
4088 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
4089 }
4090 }
4091 else
4092 {
4093 /* Recompute REG_BASIC_BLOCK as we update all the other
4094 dataflow information. */
4095 if (sched_reg_basic_block[regno] == REG_BLOCK_UNKNOWN)
4096 sched_reg_basic_block[regno] = current_block_num;
4097 else if (sched_reg_basic_block[regno] != current_block_num)
4098 sched_reg_basic_block[regno] = REG_BLOCK_GLOBAL;
4099
4100 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
4101 }
4102 }
4103 else
4104 {
4105 /* Make the register live again. */
4106 if (regno < FIRST_PSEUDO_REGISTER)
4107 {
4108 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
4109 while (--j >= 0)
4110 {
4111 SET_REGNO_REG_SET (bb_live_regs, regno + j);
4112 }
4113 }
4114 else
4115 {
4116 SET_REGNO_REG_SET (bb_live_regs, regno);
4117 }
4118 }
4119 }
4120}
4121\f
4122/* Macros and functions for keeping the priority queue sorted, and
4123 dealing with queueing and dequeueing of instructions. */
4124
4125#define SCHED_SORT(READY, N_READY) \
4126do { if ((N_READY) == 2) \
4127 swap_sort (READY, N_READY); \
4128 else if ((N_READY) > 2) \
4129 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
4130while (0)
4131
4132/* Returns a positive value if x is preferred; returns a negative value if
4133 y is preferred. Should never return 0, since that will make the sort
4134 unstable. */
4135
4136static int
4137rank_for_schedule (x, y)
01c7f350
MM
4138 const GENERIC_PTR x;
4139 const GENERIC_PTR y;
8c660648 4140{
01c7f350
MM
4141 rtx tmp = *(rtx *)y;
4142 rtx tmp2 = *(rtx *)x;
8c660648 4143 rtx link;
2db45993 4144 int tmp_class, tmp2_class, depend_count1, depend_count2;
8c660648
JL
4145 int val, priority_val, spec_val, prob_val, weight_val;
4146
4147
8c660648
JL
4148 /* prefer insn with higher priority */
4149 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
4150 if (priority_val)
4151 return priority_val;
4152
4153 /* prefer an insn with smaller contribution to registers-pressure */
4154 if (!reload_completed &&
4155 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
4156 return (weight_val);
4157
4158 /* some comparison make sense in interblock scheduling only */
4159 if (INSN_BB (tmp) != INSN_BB (tmp2))
4160 {
4161 /* prefer an inblock motion on an interblock motion */
4162 if ((INSN_BB (tmp2) == target_bb) && (INSN_BB (tmp) != target_bb))
4163 return 1;
4164 if ((INSN_BB (tmp) == target_bb) && (INSN_BB (tmp2) != target_bb))
4165 return -1;
4166
4167 /* prefer a useful motion on a speculative one */
4168 if ((spec_val = IS_SPECULATIVE_INSN (tmp) - IS_SPECULATIVE_INSN (tmp2)))
4169 return (spec_val);
4170
4171 /* prefer a more probable (speculative) insn */
4172 prob_val = INSN_PROBABILITY (tmp2) - INSN_PROBABILITY (tmp);
4173 if (prob_val)
4174 return (prob_val);
4175 }
4176
4177 /* compare insns based on their relation to the last-scheduled-insn */
4178 if (last_scheduled_insn)
4179 {
4180 /* Classify the instructions into three classes:
4181 1) Data dependent on last schedule insn.
4182 2) Anti/Output dependent on last scheduled insn.
4183 3) Independent of last scheduled insn, or has latency of one.
4184 Choose the insn from the highest numbered class if different. */
4185 link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
4186 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
4187 tmp_class = 3;
4188 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
4189 tmp_class = 1;
4190 else
4191 tmp_class = 2;
4192
4193 link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
4194 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
4195 tmp2_class = 3;
4196 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
4197 tmp2_class = 1;
4198 else
4199 tmp2_class = 2;
4200
4201 if ((val = tmp2_class - tmp_class))
4202 return val;
4203 }
4204
2db45993
JL
4205 /* Prefer the insn which has more later insns that depend on it.
4206 This gives the scheduler more freedom when scheduling later
4207 instructions at the expense of added register pressure. */
4208 depend_count1 = 0;
4209 for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
4210 depend_count1++;
4211
4212 depend_count2 = 0;
4213 for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
4214 depend_count2++;
4215
4216 val = depend_count2 - depend_count1;
4217 if (val)
4218 return val;
4219
8c660648
JL
4220 /* If insns are equally good, sort by INSN_LUID (original insn order),
4221 so that we make the sort stable. This minimizes instruction movement,
4222 thus minimizing sched's effect on debugging and cross-jumping. */
4223 return INSN_LUID (tmp) - INSN_LUID (tmp2);
4224}
4225
4226/* Resort the array A in which only element at index N may be out of order. */
4227
cbb13457 4228HAIFA_INLINE static void
8c660648
JL
4229swap_sort (a, n)
4230 rtx *a;
4231 int n;
4232{
4233 rtx insn = a[n - 1];
4234 int i = n - 2;
4235
4236 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
4237 {
4238 a[i + 1] = a[i];
4239 i -= 1;
4240 }
4241 a[i + 1] = insn;
4242}
4243
4244static int max_priority;
4245
4246/* Add INSN to the insn queue so that it can be executed at least
4247 N_CYCLES after the currently executing insn. Preserve insns
4248 chain for debugging purposes. */
4249
cbb13457 4250HAIFA_INLINE static void
8c660648
JL
4251queue_insn (insn, n_cycles)
4252 rtx insn;
4253 int n_cycles;
4254{
4255 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
ebb7b10b 4256 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
8c660648
JL
4257 insn_queue[next_q] = link;
4258 q_size += 1;
4259
4260 if (sched_verbose >= 2)
4261 {
4262 fprintf (dump, ";;\t\tReady-->Q: insn %d: ", INSN_UID (insn));
4263
4264 if (INSN_BB (insn) != target_bb)
4265 fprintf (dump, "(b%d) ", INSN_BLOCK (insn));
4266
4267 fprintf (dump, "queued for %d cycles.\n", n_cycles);
4268 }
4269
4270}
4271
4272/* Return nonzero if PAT is the pattern of an insn which makes a
4273 register live. */
4274
cbb13457 4275HAIFA_INLINE static int
8c660648
JL
4276birthing_insn_p (pat)
4277 rtx pat;
4278{
4279 int j;
4280
4281 if (reload_completed == 1)
4282 return 0;
4283
4284 if (GET_CODE (pat) == SET
c0222c21
DM
4285 && (GET_CODE (SET_DEST (pat)) == REG
4286 || (GET_CODE (SET_DEST (pat)) == PARALLEL
4287 && GET_MODE (SET_DEST (pat)) == BLKmode)))
8c660648
JL
4288 {
4289 rtx dest = SET_DEST (pat);
c0222c21 4290 int i;
8c660648
JL
4291
4292 /* It would be more accurate to use refers_to_regno_p or
c0222c21
DM
4293 reg_mentioned_p to determine when the dest is not live before this
4294 insn. */
4295 if (GET_CODE (dest) == REG)
4296 {
4297 i = REGNO (dest);
4298 if (REGNO_REG_SET_P (bb_live_regs, i))
4299 return (REG_N_SETS (i) == 1);
4300 }
4301 else
4302 {
4303 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
4304 {
4305 int regno = REGNO (SET_DEST (XVECEXP (dest, 0, i)));
4306 if (REGNO_REG_SET_P (bb_live_regs, regno))
4307 return (REG_N_SETS (regno) == 1);
4308 }
4309 }
8c660648
JL
4310 return 0;
4311 }
4312 if (GET_CODE (pat) == PARALLEL)
4313 {
4314 for (j = 0; j < XVECLEN (pat, 0); j++)
4315 if (birthing_insn_p (XVECEXP (pat, 0, j)))
4316 return 1;
4317 }
4318 return 0;
4319}
4320
4321/* PREV is an insn that is ready to execute. Adjust its priority if that
4322 will help shorten register lifetimes. */
4323
cbb13457 4324HAIFA_INLINE static void
8c660648
JL
4325adjust_priority (prev)
4326 rtx prev;
4327{
4328 /* Trying to shorten register lives after reload has completed
4329 is useless and wrong. It gives inaccurate schedules. */
4330 if (reload_completed == 0)
4331 {
4332 rtx note;
4333 int n_deaths = 0;
4334
4335 /* ??? This code has no effect, because REG_DEAD notes are removed
4336 before we ever get here. */
4337 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
4338 if (REG_NOTE_KIND (note) == REG_DEAD)
4339 n_deaths += 1;
4340
4341 /* Defer scheduling insns which kill registers, since that
4342 shortens register lives. Prefer scheduling insns which
4343 make registers live for the same reason. */
4344 switch (n_deaths)
4345 {
4346 default:
4347 INSN_PRIORITY (prev) >>= 3;
4348 break;
4349 case 3:
4350 INSN_PRIORITY (prev) >>= 2;
4351 break;
4352 case 2:
4353 case 1:
4354 INSN_PRIORITY (prev) >>= 1;
4355 break;
4356 case 0:
4357 if (birthing_insn_p (PATTERN (prev)))
4358 {
4359 int max = max_priority;
4360
4361 if (max > INSN_PRIORITY (prev))
4362 INSN_PRIORITY (prev) = max;
4363 }
4364 break;
4365 }
197043f5
RH
4366 }
4367
4368 /* That said, a target might have it's own reasons for adjusting
4369 priority after reload. */
8c660648 4370#ifdef ADJUST_PRIORITY
197043f5 4371 ADJUST_PRIORITY (prev);
8c660648 4372#endif
8c660648
JL
4373}
4374
4bdc8810
RH
4375/* Clock at which the previous instruction was issued. */
4376static int last_clock_var;
4377
8c660648
JL
4378/* INSN is the "currently executing insn". Launch each insn which was
4379 waiting on INSN. READY is a vector of insns which are ready to fire.
4380 N_READY is the number of elements in READY. CLOCK is the current
4381 cycle. */
4382
4383static int
4384schedule_insn (insn, ready, n_ready, clock)
4385 rtx insn;
4386 rtx *ready;
4387 int n_ready;
4388 int clock;
4389{
4390 rtx link;
4391 int unit;
4392
4393 unit = insn_unit (insn);
4394
4395 if (sched_verbose >= 2)
4396 {
4397 fprintf (dump, ";;\t\t--> scheduling insn <<<%d>>> on unit ", INSN_UID (insn));
4398 insn_print_units (insn);
4399 fprintf (dump, "\n");
4400 }
4401
4402 if (sched_verbose && unit == -1)
4403 visualize_no_unit (insn);
4404
4405 if (MAX_BLOCKAGE > 1 || issue_rate > 1 || sched_verbose)
4406 schedule_unit (unit, insn, clock);
4407
4408 if (INSN_DEPEND (insn) == 0)
4409 return n_ready;
4410
4411 /* This is used by the function adjust_priority above. */
4412 if (n_ready > 0)
4413 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
4414 else
4415 max_priority = INSN_PRIORITY (insn);
4416
4417 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
4418 {
4419 rtx next = XEXP (link, 0);
4420 int cost = insn_cost (insn, link, next);
4421
4422 INSN_TICK (next) = MAX (INSN_TICK (next), clock + cost);
4423
4424 if ((INSN_DEP_COUNT (next) -= 1) == 0)
4425 {
4426 int effective_cost = INSN_TICK (next) - clock;
4427
4428 /* For speculative insns, before inserting to ready/queue,
4429 check live, exception-free, and issue-delay */
4430 if (INSN_BB (next) != target_bb
4431 && (!IS_VALID (INSN_BB (next))
4432 || CANT_MOVE (next)
4433 || (IS_SPECULATIVE_INSN (next)
4434 && (insn_issue_delay (next) > 3
5835e573 4435 || !check_live (next, INSN_BB (next))
8c660648
JL
4436 || !is_exception_free (next, INSN_BB (next), target_bb)))))
4437 continue;
4438
4439 if (sched_verbose >= 2)
4440 {
4441 fprintf (dump, ";;\t\tdependences resolved: insn %d ", INSN_UID (next));
4442
4443 if (current_nr_blocks > 1 && INSN_BB (next) != target_bb)
4444 fprintf (dump, "/b%d ", INSN_BLOCK (next));
4445
197043f5 4446 if (effective_cost < 1)
8c660648
JL
4447 fprintf (dump, "into ready\n");
4448 else
4449 fprintf (dump, "into queue with cost=%d\n", effective_cost);
4450 }
4451
4452 /* Adjust the priority of NEXT and either put it on the ready
4453 list or queue it. */
4454 adjust_priority (next);
197043f5 4455 if (effective_cost < 1)
8c660648
JL
4456 ready[n_ready++] = next;
4457 else
4458 queue_insn (next, effective_cost);
4459 }
4460 }
4461
4bdc8810
RH
4462 /* Annotate the instruction with issue information -- TImode
4463 indicates that the instruction is expected not to be able
4464 to issue on the same cycle as the previous insn. A machine
4465 may use this information to decide how the instruction should
4466 be aligned. */
4467 if (reload_completed && issue_rate > 1)
4468 {
4469 PUT_MODE (insn, clock > last_clock_var ? TImode : VOIDmode);
4470 last_clock_var = clock;
4471 }
4472
8c660648
JL
4473 return n_ready;
4474}
4475
4476
4477/* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
4478 dead_notes list. */
4479
4480static void
4481create_reg_dead_note (reg, insn)
4482 rtx reg, insn;
4483{
4484 rtx link;
4485
4486 /* The number of registers killed after scheduling must be the same as the
4487 number of registers killed before scheduling. The number of REG_DEAD
4488 notes may not be conserved, i.e. two SImode hard register REG_DEAD notes
4489 might become one DImode hard register REG_DEAD note, but the number of
4490 registers killed will be conserved.
4491
4492 We carefully remove REG_DEAD notes from the dead_notes list, so that
4493 there will be none left at the end. If we run out early, then there
4494 is a bug somewhere in flow, combine and/or sched. */
4495
4496 if (dead_notes == 0)
4497 {
4498 if (current_nr_blocks <= 1)
4499 abort ();
4500 else
ebb7b10b 4501 link = alloc_EXPR_LIST (REG_DEAD, NULL_RTX, NULL_RTX);
8c660648
JL
4502 }
4503 else
4504 {
4505 /* Number of regs killed by REG. */
4506 int regs_killed = (REGNO (reg) >= FIRST_PSEUDO_REGISTER ? 1
4507 : HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)));
4508 /* Number of regs killed by REG_DEAD notes taken off the list. */
4509 int reg_note_regs;
4510
4511 link = dead_notes;
4512 reg_note_regs = (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
4513 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
4514 GET_MODE (XEXP (link, 0))));
4515 while (reg_note_regs < regs_killed)
4516 {
4517 link = XEXP (link, 1);
04029ca2
JL
4518
4519 /* LINK might be zero if we killed more registers after scheduling
4520 than before, and the last hard register we kill is actually
4521 multiple hard regs.
4522
4523 This is normal for interblock scheduling, so deal with it in
4524 that case, else abort. */
4525 if (link == NULL_RTX && current_nr_blocks <= 1)
4526 abort ();
4527 else if (link == NULL_RTX)
ebb7b10b
RH
4528 link = alloc_EXPR_LIST (REG_DEAD, gen_rtx_REG (word_mode, 0),
4529 NULL_RTX);
04029ca2 4530
8c660648
JL
4531 reg_note_regs += (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
4532 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
4533 GET_MODE (XEXP (link, 0))));
4534 }
4535 dead_notes = XEXP (link, 1);
4536
4537 /* If we took too many regs kills off, put the extra ones back. */
4538 while (reg_note_regs > regs_killed)
4539 {
4540 rtx temp_reg, temp_link;
4541
38a448ca 4542 temp_reg = gen_rtx_REG (word_mode, 0);
ebb7b10b 4543 temp_link = alloc_EXPR_LIST (REG_DEAD, temp_reg, dead_notes);
8c660648
JL
4544 dead_notes = temp_link;
4545 reg_note_regs--;
4546 }
4547 }
4548
4549 XEXP (link, 0) = reg;
4550 XEXP (link, 1) = REG_NOTES (insn);
4551 REG_NOTES (insn) = link;
4552}
4553
4554/* Subroutine on attach_deaths_insn--handles the recursive search
4555 through INSN. If SET_P is true, then x is being modified by the insn. */
4556
4557static void
4558attach_deaths (x, insn, set_p)
4559 rtx x;
4560 rtx insn;
4561 int set_p;
4562{
4563 register int i;
4564 register int j;
4565 register enum rtx_code code;
4566 register char *fmt;
4567
4568 if (x == 0)
4569 return;
4570
4571 code = GET_CODE (x);
4572
4573 switch (code)
4574 {
4575 case CONST_INT:
4576 case CONST_DOUBLE:
4577 case LABEL_REF:
4578 case SYMBOL_REF:
4579 case CONST:
4580 case CODE_LABEL:
4581 case PC:
4582 case CC0:
4583 /* Get rid of the easy cases first. */
4584 return;
4585
4586 case REG:
4587 {
4588 /* If the register dies in this insn, queue that note, and mark
4589 this register as needing to die. */
4590 /* This code is very similar to mark_used_1 (if set_p is false)
4591 and mark_set_1 (if set_p is true) in flow.c. */
4592
4593 register int regno;
4594 int some_needed;
4595 int all_needed;
4596
4597 if (set_p)
4598 return;
4599
4600 regno = REGNO (x);
4601 all_needed = some_needed = REGNO_REG_SET_P (old_live_regs, regno);
4602 if (regno < FIRST_PSEUDO_REGISTER)
4603 {
4604 int n;
4605
4606 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
4607 while (--n > 0)
4608 {
4609 int needed = (REGNO_REG_SET_P (old_live_regs, regno + n));
4610 some_needed |= needed;
4611 all_needed &= needed;
4612 }
4613 }
4614
4615 /* If it wasn't live before we started, then add a REG_DEAD note.
4616 We must check the previous lifetime info not the current info,
4617 because we may have to execute this code several times, e.g.
4618 once for a clobber (which doesn't add a note) and later
4619 for a use (which does add a note).
4620
4621 Always make the register live. We must do this even if it was
4622 live before, because this may be an insn which sets and uses
4623 the same register, in which case the register has already been
4624 killed, so we must make it live again.
4625
4626 Global registers are always live, and should never have a REG_DEAD
4627 note added for them, so none of the code below applies to them. */
4628
4629 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
4630 {
e4b8a413
JW
4631 /* Never add REG_DEAD notes for STACK_POINTER_REGNUM
4632 since it's always considered to be live. Similarly
4633 for FRAME_POINTER_REGNUM if a frame pointer is needed
4634 and for ARG_POINTER_REGNUM if it is fixed. */
4635 if (! (regno == FRAME_POINTER_REGNUM
4636 && (! reload_completed || frame_pointer_needed))
8c660648 4637#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
e4b8a413
JW
4638 && ! (regno == HARD_FRAME_POINTER_REGNUM
4639 && (! reload_completed || frame_pointer_needed))
8c660648
JL
4640#endif
4641#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
4642 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
4643#endif
4644 && regno != STACK_POINTER_REGNUM)
4645 {
d6df9efb 4646 if (! all_needed && ! dead_or_set_p (insn, x))
8c660648
JL
4647 {
4648 /* Check for the case where the register dying partially
4649 overlaps the register set by this insn. */
4650 if (regno < FIRST_PSEUDO_REGISTER
4651 && HARD_REGNO_NREGS (regno, GET_MODE (x)) > 1)
4652 {
4653 int n = HARD_REGNO_NREGS (regno, GET_MODE (x));
4654 while (--n >= 0)
4655 some_needed |= dead_or_set_regno_p (insn, regno + n);
4656 }
4657
4658 /* If none of the words in X is needed, make a REG_DEAD
4659 note. Otherwise, we must make partial REG_DEAD
4660 notes. */
4661 if (! some_needed)
4662 create_reg_dead_note (x, insn);
4663 else
4664 {
4665 int i;
4666
4667 /* Don't make a REG_DEAD note for a part of a
4668 register that is set in the insn. */
4669 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
4670 i >= 0; i--)
4671 if (! REGNO_REG_SET_P (old_live_regs, regno+i)
4672 && ! dead_or_set_regno_p (insn, regno + i))
38a448ca
RH
4673 create_reg_dead_note (gen_rtx_REG (reg_raw_mode[regno + i],
4674 regno + i),
8c660648
JL
4675 insn);
4676 }
4677 }
4678 }
4679
4680 if (regno < FIRST_PSEUDO_REGISTER)
4681 {
4682 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
4683 while (--j >= 0)
4684 {
4685 SET_REGNO_REG_SET (bb_live_regs, regno + j);
4686 }
4687 }
4688 else
4689 {
4690 /* Recompute REG_BASIC_BLOCK as we update all the other
4691 dataflow information. */
4692 if (sched_reg_basic_block[regno] == REG_BLOCK_UNKNOWN)
4693 sched_reg_basic_block[regno] = current_block_num;
4694 else if (sched_reg_basic_block[regno] != current_block_num)
4695 sched_reg_basic_block[regno] = REG_BLOCK_GLOBAL;
4696
4697 SET_REGNO_REG_SET (bb_live_regs, regno);
4698 }
4699 }
4700 return;
4701 }
4702
4703 case MEM:
4704 /* Handle tail-recursive case. */
4705 attach_deaths (XEXP (x, 0), insn, 0);
4706 return;
4707
4708 case SUBREG:
d6df9efb
JL
4709 attach_deaths (SUBREG_REG (x), insn,
4710 set_p && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
4711 <= UNITS_PER_WORD)
4712 || (GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
4713 == GET_MODE_SIZE (GET_MODE ((x))))));
4714 return;
4715
8c660648 4716 case STRICT_LOW_PART:
d6df9efb 4717 attach_deaths (XEXP (x, 0), insn, 0);
8c660648
JL
4718 return;
4719
4720 case ZERO_EXTRACT:
4721 case SIGN_EXTRACT:
d6df9efb 4722 attach_deaths (XEXP (x, 0), insn, 0);
8c660648
JL
4723 attach_deaths (XEXP (x, 1), insn, 0);
4724 attach_deaths (XEXP (x, 2), insn, 0);
4725 return;
4726
c0222c21
DM
4727 case PARALLEL:
4728 if (set_p
4729 && GET_MODE (x) == BLKmode)
4730 {
4731 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4732 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
4733 return;
4734 }
4735
4736 /* fallthrough */
8c660648
JL
4737 default:
4738 /* Other cases: walk the insn. */
4739 fmt = GET_RTX_FORMAT (code);
4740 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4741 {
4742 if (fmt[i] == 'e')
4743 attach_deaths (XEXP (x, i), insn, 0);
4744 else if (fmt[i] == 'E')
4745 for (j = 0; j < XVECLEN (x, i); j++)
4746 attach_deaths (XVECEXP (x, i, j), insn, 0);
4747 }
4748 }
4749}
4750
4751/* After INSN has executed, add register death notes for each register
4752 that is dead after INSN. */
4753
4754static void
4755attach_deaths_insn (insn)
4756 rtx insn;
4757{
4758 rtx x = PATTERN (insn);
4759 register RTX_CODE code = GET_CODE (x);
4760 rtx link;
4761
4762 if (code == SET)
4763 {
4764 attach_deaths (SET_SRC (x), insn, 0);
4765
4766 /* A register might die here even if it is the destination, e.g.
4767 it is the target of a volatile read and is otherwise unused.
4768 Hence we must always call attach_deaths for the SET_DEST. */
4769 attach_deaths (SET_DEST (x), insn, 1);
4770 }
4771 else if (code == PARALLEL)
4772 {
4773 register int i;
4774 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4775 {
4776 code = GET_CODE (XVECEXP (x, 0, i));
4777 if (code == SET)
4778 {
4779 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
4780
4781 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
4782 }
4783 /* Flow does not add REG_DEAD notes to registers that die in
4784 clobbers, so we can't either. */
4785 else if (code != CLOBBER)
4786 attach_deaths (XVECEXP (x, 0, i), insn, 0);
4787 }
4788 }
4789 /* If this is a CLOBBER, only add REG_DEAD notes to registers inside a
4790 MEM being clobbered, just like flow. */
4791 else if (code == CLOBBER && GET_CODE (XEXP (x, 0)) == MEM)
4792 attach_deaths (XEXP (XEXP (x, 0), 0), insn, 0);
4793 /* Otherwise don't add a death note to things being clobbered. */
4794 else if (code != CLOBBER)
4795 attach_deaths (x, insn, 0);
4796
4797 /* Make death notes for things used in the called function. */
4798 if (GET_CODE (insn) == CALL_INSN)
4799 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
4800 attach_deaths (XEXP (XEXP (link, 0), 0), insn,
4801 GET_CODE (XEXP (link, 0)) == CLOBBER);
4802}
4803
4804/* functions for handlnig of notes */
4805
4806/* Delete notes beginning with INSN and put them in the chain
4807 of notes ended by NOTE_LIST.
4808 Returns the insn following the notes. */
4809
4810static rtx
4811unlink_other_notes (insn, tail)
4812 rtx insn, tail;
4813{
4814 rtx prev = PREV_INSN (insn);
4815
4816 while (insn != tail && GET_CODE (insn) == NOTE)
4817 {
4818 rtx next = NEXT_INSN (insn);
4819 /* Delete the note from its current position. */
4820 if (prev)
4821 NEXT_INSN (prev) = next;
4822 if (next)
4823 PREV_INSN (next) = prev;
4824
4825 /* Don't save away NOTE_INSN_SETJMPs, because they must remain
4826 immediately after the call they follow. We use a fake
4827 (REG_DEAD (const_int -1)) note to remember them.
4828 Likewise with NOTE_INSN_{LOOP,EHREGION}_{BEG, END}. */
4829 if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_SETJMP
4830 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
4831 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
0dfa1860
MM
4832 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_START
4833 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_END
8c660648
JL
4834 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
4835 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
4836 {
4837 /* Insert the note at the end of the notes list. */
4838 PREV_INSN (insn) = note_list;
4839 if (note_list)
4840 NEXT_INSN (note_list) = insn;
4841 note_list = insn;
4842 }
4843
4844 insn = next;
4845 }
4846 return insn;
4847}
4848
4849/* Delete line notes beginning with INSN. Record line-number notes so
4850 they can be reused. Returns the insn following the notes. */
4851
4852static rtx
4853unlink_line_notes (insn, tail)
4854 rtx insn, tail;
4855{
4856 rtx prev = PREV_INSN (insn);
4857
4858 while (insn != tail && GET_CODE (insn) == NOTE)
4859 {
4860 rtx next = NEXT_INSN (insn);
4861
4862 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
4863 {
4864 /* Delete the note from its current position. */
4865 if (prev)
4866 NEXT_INSN (prev) = next;
4867 if (next)
4868 PREV_INSN (next) = prev;
4869
4870 /* Record line-number notes so they can be reused. */
4871 LINE_NOTE (insn) = insn;
4872 }
4873 else
4874 prev = insn;
4875
4876 insn = next;
4877 }
4878 return insn;
4879}
4880
4881/* Return the head and tail pointers of BB. */
4882
cbb13457 4883HAIFA_INLINE static void
8c660648
JL
4884get_block_head_tail (bb, headp, tailp)
4885 int bb;
4886 rtx *headp;
4887 rtx *tailp;
4888{
4889
55d89719
TK
4890 rtx head;
4891 rtx tail;
8c660648
JL
4892 int b;
4893
4894 b = BB_TO_BLOCK (bb);
4895
4896 /* HEAD and TAIL delimit the basic block being scheduled. */
3b413743
RH
4897 head = BLOCK_HEAD (b);
4898 tail = BLOCK_END (b);
8c660648
JL
4899
4900 /* Don't include any notes or labels at the beginning of the
4901 basic block, or notes at the ends of basic blocks. */
4902 while (head != tail)
4903 {
4904 if (GET_CODE (head) == NOTE)
4905 head = NEXT_INSN (head);
4906 else if (GET_CODE (tail) == NOTE)
4907 tail = PREV_INSN (tail);
4908 else if (GET_CODE (head) == CODE_LABEL)
4909 head = NEXT_INSN (head);
4910 else
4911 break;
4912 }
4913
4914 *headp = head;
4915 *tailp = tail;
4916}
4917
4918/* Delete line notes from bb. Save them so they can be later restored
4919 (in restore_line_notes ()). */
4920
4921static void
4922rm_line_notes (bb)
4923 int bb;
4924{
4925 rtx next_tail;
4926 rtx tail;
4927 rtx head;
4928 rtx insn;
4929
4930 get_block_head_tail (bb, &head, &tail);
4931
4932 if (head == tail
4933 && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
4934 return;
4935
4936 next_tail = NEXT_INSN (tail);
4937 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4938 {
4939 rtx prev;
4940
4941 /* Farm out notes, and maybe save them in NOTE_LIST.
4942 This is needed to keep the debugger from
4943 getting completely deranged. */
4944 if (GET_CODE (insn) == NOTE)
4945 {
4946 prev = insn;
4947 insn = unlink_line_notes (insn, next_tail);
4948
4949 if (prev == tail)
4950 abort ();
4951 if (prev == head)
4952 abort ();
4953 if (insn == next_tail)
4954 abort ();
4955 }
4956 }
4957}
4958
4959/* Save line number notes for each insn in bb. */
4960
4961static void
4962save_line_notes (bb)
4963 int bb;
4964{
4965 rtx head, tail;
4966 rtx next_tail;
4967
4968 /* We must use the true line number for the first insn in the block
4969 that was computed and saved at the start of this pass. We can't
4970 use the current line number, because scheduling of the previous
4971 block may have changed the current line number. */
4972
4973 rtx line = line_note_head[BB_TO_BLOCK (bb)];
4974 rtx insn;
4975
4976 get_block_head_tail (bb, &head, &tail);
4977 next_tail = NEXT_INSN (tail);
4978
3b413743 4979 for (insn = BLOCK_HEAD (BB_TO_BLOCK (bb));
8c660648
JL
4980 insn != next_tail;
4981 insn = NEXT_INSN (insn))
4982 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4983 line = insn;
4984 else
4985 LINE_NOTE (insn) = line;
4986}
4987
4988
4989/* After bb was scheduled, insert line notes into the insns list. */
4990
4991static void
4992restore_line_notes (bb)
4993 int bb;
4994{
4995 rtx line, note, prev, new;
4996 int added_notes = 0;
4997 int b;
4998 rtx head, next_tail, insn;
4999
5000 b = BB_TO_BLOCK (bb);
5001
3b413743
RH
5002 head = BLOCK_HEAD (b);
5003 next_tail = NEXT_INSN (BLOCK_END (b));
8c660648
JL
5004
5005 /* Determine the current line-number. We want to know the current
5006 line number of the first insn of the block here, in case it is
5007 different from the true line number that was saved earlier. If
5008 different, then we need a line number note before the first insn
5009 of this block. If it happens to be the same, then we don't want to
5010 emit another line number note here. */
5011 for (line = head; line; line = PREV_INSN (line))
5012 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
5013 break;
5014
5015 /* Walk the insns keeping track of the current line-number and inserting
5016 the line-number notes as needed. */
5017 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5018 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
5019 line = insn;
5020 /* This used to emit line number notes before every non-deleted note.
5021 However, this confuses a debugger, because line notes not separated
5022 by real instructions all end up at the same address. I can find no
5023 use for line number notes before other notes, so none are emitted. */
5024 else if (GET_CODE (insn) != NOTE
5025 && (note = LINE_NOTE (insn)) != 0
5026 && note != line
5027 && (line == 0
5028 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
5029 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
5030 {
5031 line = note;
5032 prev = PREV_INSN (insn);
5033 if (LINE_NOTE (note))
5034 {
5035 /* Re-use the original line-number note. */
5036 LINE_NOTE (note) = 0;
5037 PREV_INSN (note) = prev;
5038 NEXT_INSN (prev) = note;
5039 PREV_INSN (insn) = note;
5040 NEXT_INSN (note) = insn;
5041 }
5042 else
5043 {
5044 added_notes++;
5045 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
5046 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
5047 RTX_INTEGRATED_P (new) = RTX_INTEGRATED_P (note);
5048 }
5049 }
5050 if (sched_verbose && added_notes)
5051 fprintf (dump, ";; added %d line-number notes\n", added_notes);
5052}
5053
5054/* After scheduling the function, delete redundant line notes from the
5055 insns list. */
5056
5057static void
5058rm_redundant_line_notes ()
5059{
5060 rtx line = 0;
5061 rtx insn = get_insns ();
5062 int active_insn = 0;
5063 int notes = 0;
5064
5065 /* Walk the insns deleting redundant line-number notes. Many of these
5066 are already present. The remainder tend to occur at basic
5067 block boundaries. */
5068 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
5069 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
5070 {
5071 /* If there are no active insns following, INSN is redundant. */
5072 if (active_insn == 0)
5073 {
5074 notes++;
5075 NOTE_SOURCE_FILE (insn) = 0;
5076 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
5077 }
5078 /* If the line number is unchanged, LINE is redundant. */
5079 else if (line
5080 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
5081 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
5082 {
5083 notes++;
5084 NOTE_SOURCE_FILE (line) = 0;
5085 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
5086 line = insn;
5087 }
5088 else
5089 line = insn;
5090 active_insn = 0;
5091 }
5092 else if (!((GET_CODE (insn) == NOTE
5093 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
5094 || (GET_CODE (insn) == INSN
5095 && (GET_CODE (PATTERN (insn)) == USE
5096 || GET_CODE (PATTERN (insn)) == CLOBBER))))
5097 active_insn++;
5098
5099 if (sched_verbose && notes)
5100 fprintf (dump, ";; deleted %d line-number notes\n", notes);
5101}
5102
5103/* Delete notes between head and tail and put them in the chain
5104 of notes ended by NOTE_LIST. */
5105
5106static void
5107rm_other_notes (head, tail)
5108 rtx head;
5109 rtx tail;
5110{
5111 rtx next_tail;
5112 rtx insn;
5113
5114 if (head == tail
5115 && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
5116 return;
5117
5118 next_tail = NEXT_INSN (tail);
5119 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5120 {
5121 rtx prev;
5122
5123 /* Farm out notes, and maybe save them in NOTE_LIST.
5124 This is needed to keep the debugger from
5125 getting completely deranged. */
5126 if (GET_CODE (insn) == NOTE)
5127 {
5128 prev = insn;
5129
5130 insn = unlink_other_notes (insn, next_tail);
5131
5132 if (prev == tail)
5133 abort ();
5134 if (prev == head)
5135 abort ();
5136 if (insn == next_tail)
5137 abort ();
5138 }
5139 }
5140}
5141
5142/* Constructor for `sometimes' data structure. */
5143
5144static int
5145new_sometimes_live (regs_sometimes_live, regno, sometimes_max)
5146 struct sometimes *regs_sometimes_live;
5147 int regno;
5148 int sometimes_max;
5149{
5150 register struct sometimes *p;
5151
5152 /* There should never be a register greater than max_regno here. If there
5153 is, it means that a define_split has created a new pseudo reg. This
5154 is not allowed, since there will not be flow info available for any
5155 new register, so catch the error here. */
5156 if (regno >= max_regno)
5157 abort ();
5158
5159 p = &regs_sometimes_live[sometimes_max];
5160 p->regno = regno;
5161 p->live_length = 0;
5162 p->calls_crossed = 0;
5163 sometimes_max++;
5164 return sometimes_max;
5165}
5166
5167/* Count lengths of all regs we are currently tracking,
5168 and find new registers no longer live. */
5169
5170static void
5171finish_sometimes_live (regs_sometimes_live, sometimes_max)
5172 struct sometimes *regs_sometimes_live;
5173 int sometimes_max;
5174{
5175 int i;
5176
5177 for (i = 0; i < sometimes_max; i++)
5178 {
5179 register struct sometimes *p = &regs_sometimes_live[i];
5180 int regno = p->regno;
5181
5182 sched_reg_live_length[regno] += p->live_length;
5183 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
5184 }
5185}
5186
5187/* functions for computation of registers live/usage info */
5188
e881bb1b 5189/* It is assumed that prior to scheduling BASIC_BLOCK (b)->global_live_at_start
8c660648
JL
5190 contains the registers that are alive at the entry to b.
5191
5192 Two passes follow: The first pass is performed before the scheduling
5193 of a region. It scans each block of the region forward, computing
5194 the set of registers alive at the end of the basic block and
5195 discard REG_DEAD notes (done by find_pre_sched_live ()).
5196
5197 The second path is invoked after scheduling all region blocks.
5198 It scans each block of the region backward, a block being traversed
5199 only after its succesors in the region. When the set of registers
5200 live at the end of a basic block may be changed by the scheduling
5201 (this may happen for multiple blocks region), it is computed as
5202 the union of the registers live at the start of its succesors.
5203 The last-use information is updated by inserting REG_DEAD notes.
5204 (done by find_post_sched_live ()) */
5205
5206/* Scan all the insns to be scheduled, removing register death notes.
5207 Register death notes end up in DEAD_NOTES.
5208 Recreate the register life information for the end of this basic
5209 block. */
5210
5211static void
5212find_pre_sched_live (bb)
5213 int bb;
5214{
5215 rtx insn, next_tail, head, tail;
5216 int b = BB_TO_BLOCK (bb);
5217
5218 get_block_head_tail (bb, &head, &tail);
e881bb1b 5219 COPY_REG_SET (bb_live_regs, BASIC_BLOCK (b)->global_live_at_start);
8c660648
JL
5220 next_tail = NEXT_INSN (tail);
5221
5222 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5223 {
5224 rtx prev, next, link;
5225 int reg_weight = 0;
5226
5227 /* Handle register life information. */
5228 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
5229 {
5230 /* See if the register gets born here. */
5231 /* We must check for registers being born before we check for
5232 registers dying. It is possible for a register to be born and
5233 die in the same insn, e.g. reading from a volatile memory
5234 location into an otherwise unused register. Such a register
5235 must be marked as dead after this insn. */
5236 if (GET_CODE (PATTERN (insn)) == SET
5237 || GET_CODE (PATTERN (insn)) == CLOBBER)
5238 {
5835e573 5239 sched_note_set (PATTERN (insn), 0);
8c660648
JL
5240 reg_weight++;
5241 }
5242
5243 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
5244 {
5245 int j;
5246 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
5247 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
5248 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
5249 {
5835e573 5250 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
8c660648
JL
5251 reg_weight++;
5252 }
5253
5254 /* ??? This code is obsolete and should be deleted. It
5255 is harmless though, so we will leave it in for now. */
5256 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
5257 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
5835e573 5258 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
8c660648
JL
5259 }
5260
5261 /* Each call cobbers (makes live) all call-clobbered regs
5262 that are not global or fixed. Note that the function-value
5263 reg is a call_clobbered reg. */
5264 if (GET_CODE (insn) == CALL_INSN)
5265 {
5266 int j;
5267 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
5268 if (call_used_regs[j] && !global_regs[j]
5269 && ! fixed_regs[j])
5270 {
5271 SET_REGNO_REG_SET (bb_live_regs, j);
8c660648
JL
5272 }
5273 }
5274
5275 /* Need to know what registers this insn kills. */
5276 for (prev = 0, link = REG_NOTES (insn); link; link = next)
5277 {
5278 next = XEXP (link, 1);
5279 if ((REG_NOTE_KIND (link) == REG_DEAD
5280 || REG_NOTE_KIND (link) == REG_UNUSED)
5281 /* Verify that the REG_NOTE has a valid value. */
5282 && GET_CODE (XEXP (link, 0)) == REG)
5283 {
5284 register int regno = REGNO (XEXP (link, 0));
5285
5286 reg_weight--;
5287
5288 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
5289 alone. */
5290 if (REG_NOTE_KIND (link) == REG_DEAD)
5291 {
5292 if (prev)
5293 XEXP (prev, 1) = next;
5294 else
5295 REG_NOTES (insn) = next;
5296 XEXP (link, 1) = dead_notes;
5297 dead_notes = link;
5298 }
5299 else
5300 prev = link;
5301
5302 if (regno < FIRST_PSEUDO_REGISTER)
5303 {
5304 int j = HARD_REGNO_NREGS (regno,
5305 GET_MODE (XEXP (link, 0)));
5306 while (--j >= 0)
5307 {
5308 CLEAR_REGNO_REG_SET (bb_live_regs, regno+j);
5309 }
5310 }
5311 else
5312 {
5313 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
5314 }
5315 }
5316 else
5317 prev = link;
5318 }
5319 }
5320
5321 INSN_REG_WEIGHT (insn) = reg_weight;
5322 }
5323}
5324
5325/* Update register life and usage information for block bb
5326 after scheduling. Put register dead notes back in the code. */
5327
5328static void
5329find_post_sched_live (bb)
5330 int bb;
5331{
5332 int sometimes_max;
5333 int j, i;
5334 int b;
5335 rtx insn;
5336 rtx head, tail, prev_head, next_tail;
5337
5338 register struct sometimes *regs_sometimes_live;
5339
5340 b = BB_TO_BLOCK (bb);
5341
5342 /* compute live regs at the end of bb as a function of its successors. */
5343 if (current_nr_blocks > 1)
5344 {
5345 int e;
5346 int first_edge;
5347
5348 first_edge = e = OUT_EDGES (b);
5349 CLEAR_REG_SET (bb_live_regs);
5350
5351 if (e)
5352 do
5353 {
5354 int b_succ;
5355
5356 b_succ = TO_BLOCK (e);
e881bb1b
RH
5357 IOR_REG_SET (bb_live_regs,
5358 BASIC_BLOCK (b_succ)->global_live_at_start);
8c660648
JL
5359 e = NEXT_OUT (e);
5360 }
5361 while (e != first_edge);
5362 }
5363
5364 get_block_head_tail (bb, &head, &tail);
5365 next_tail = NEXT_INSN (tail);
5366 prev_head = PREV_INSN (head);
5367
7eea6443
JL
5368 EXECUTE_IF_SET_IN_REG_SET (bb_live_regs, FIRST_PSEUDO_REGISTER, i,
5369 {
5370 sched_reg_basic_block[i] = REG_BLOCK_GLOBAL;
5371 });
8c660648
JL
5372
5373 /* if the block is empty, same regs are alive at its end and its start.
5374 since this is not guaranteed after interblock scheduling, make sure they
5375 are truly identical. */
5376 if (NEXT_INSN (prev_head) == tail
5377 && (GET_RTX_CLASS (GET_CODE (tail)) != 'i'))
5378 {
5379 if (current_nr_blocks > 1)
e881bb1b 5380 COPY_REG_SET (BASIC_BLOCK (b)->global_live_at_start, bb_live_regs);
8c660648
JL
5381
5382 return;
5383 }
5384
5385 b = BB_TO_BLOCK (bb);
5386 current_block_num = b;
5387
5388 /* Keep track of register lives. */
5389 old_live_regs = ALLOCA_REG_SET ();
5390 regs_sometimes_live
5391 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
5392 sometimes_max = 0;
5393
5394 /* initiate "sometimes" data, starting with registers live at end */
5395 sometimes_max = 0;
5396 COPY_REG_SET (old_live_regs, bb_live_regs);
5397 EXECUTE_IF_SET_IN_REG_SET (bb_live_regs, 0, j,
5398 {
5399 sometimes_max
5400 = new_sometimes_live (regs_sometimes_live,
5401 j, sometimes_max);
5402 });
5403
5404 /* scan insns back, computing regs live info */
5405 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
5406 {
5407 /* First we kill registers set by this insn, and then we
5408 make registers used by this insn live. This is the opposite
5409 order used above because we are traversing the instructions
5410 backwards. */
5411
5412 /* Strictly speaking, we should scan REG_UNUSED notes and make
5413 every register mentioned there live, however, we will just
5414 kill them again immediately below, so there doesn't seem to
5415 be any reason why we bother to do this. */
5416
5417 /* See if this is the last notice we must take of a register. */
5418 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
5419 continue;
5420
5421 if (GET_CODE (PATTERN (insn)) == SET
5422 || GET_CODE (PATTERN (insn)) == CLOBBER)
5835e573 5423 sched_note_set (PATTERN (insn), 1);
8c660648
JL
5424 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
5425 {
5426 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
5427 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
5428 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
5835e573 5429 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 1);
8c660648
JL
5430 }
5431
5432 /* This code keeps life analysis information up to date. */
5433 if (GET_CODE (insn) == CALL_INSN)
5434 {
5435 register struct sometimes *p;
5436
5437 /* A call kills all call used registers that are not
5438 global or fixed, except for those mentioned in the call
5439 pattern which will be made live again later. */
5440 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5441 if (call_used_regs[i] && ! global_regs[i]
5442 && ! fixed_regs[i])
5443 {
5444 CLEAR_REGNO_REG_SET (bb_live_regs, i);
8c660648
JL
5445 }
5446
5447 /* Regs live at the time of a call instruction must not
5448 go in a register clobbered by calls. Record this for
5449 all regs now live. Note that insns which are born or
5450 die in a call do not cross a call, so this must be done
5451 after the killings (above) and before the births
5452 (below). */
5453 p = regs_sometimes_live;
5454 for (i = 0; i < sometimes_max; i++, p++)
5455 if (REGNO_REG_SET_P (bb_live_regs, p->regno))
5456 p->calls_crossed += 1;
5457 }
5458
5459 /* Make every register used live, and add REG_DEAD notes for
5460 registers which were not live before we started. */
5461 attach_deaths_insn (insn);
5462
5463 /* Find registers now made live by that instruction. */
5464 EXECUTE_IF_AND_COMPL_IN_REG_SET (bb_live_regs, old_live_regs, 0, j,
5465 {
5466 sometimes_max
5467 = new_sometimes_live (regs_sometimes_live,
5468 j, sometimes_max);
5469 });
5470 IOR_REG_SET (old_live_regs, bb_live_regs);
5471
5472 /* Count lengths of all regs we are worrying about now,
5473 and handle registers no longer live. */
5474
5475 for (i = 0; i < sometimes_max; i++)
5476 {
5477 register struct sometimes *p = &regs_sometimes_live[i];
5478 int regno = p->regno;
5479
5480 p->live_length += 1;
5481
5482 if (!REGNO_REG_SET_P (bb_live_regs, regno))
5483 {
5484 /* This is the end of one of this register's lifetime
5485 segments. Save the lifetime info collected so far,
5486 and clear its bit in the old_live_regs entry. */
5487 sched_reg_live_length[regno] += p->live_length;
5488 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
5489 CLEAR_REGNO_REG_SET (old_live_regs, p->regno);
5490
5491 /* Delete the reg_sometimes_live entry for this reg by
5492 copying the last entry over top of it. */
5493 *p = regs_sometimes_live[--sometimes_max];
5494 /* ...and decrement i so that this newly copied entry
5495 will be processed. */
5496 i--;
5497 }
5498 }
5499 }
5500
5501 finish_sometimes_live (regs_sometimes_live, sometimes_max);
5502
e881bb1b 5503 /* In interblock scheduling, global_live_at_start may have changed. */
8c660648 5504 if (current_nr_blocks > 1)
e881bb1b 5505 COPY_REG_SET (BASIC_BLOCK (b)->global_live_at_start, bb_live_regs);
8c660648 5506
f187056f
JL
5507
5508 FREE_REG_SET (old_live_regs);
8c660648
JL
5509} /* find_post_sched_live */
5510
5511/* After scheduling the subroutine, restore information about uses of
5512 registers. */
5513
5514static void
5515update_reg_usage ()
5516{
5517 int regno;
5518
5519 if (n_basic_blocks > 0)
7eea6443
JL
5520 EXECUTE_IF_SET_IN_REG_SET (bb_live_regs, FIRST_PSEUDO_REGISTER, regno,
5521 {
5522 sched_reg_basic_block[regno]
5523 = REG_BLOCK_GLOBAL;
5524 });
8c660648
JL
5525
5526 for (regno = 0; regno < max_regno; regno++)
5527 if (sched_reg_live_length[regno])
5528 {
5529 if (sched_verbose)
5530 {
5531 if (REG_LIVE_LENGTH (regno) > sched_reg_live_length[regno])
5532 fprintf (dump,
5533 ";; register %d life shortened from %d to %d\n",
5534 regno, REG_LIVE_LENGTH (regno),
5535 sched_reg_live_length[regno]);
5536 /* Negative values are special; don't overwrite the current
5537 reg_live_length value if it is negative. */
5538 else if (REG_LIVE_LENGTH (regno) < sched_reg_live_length[regno]
5539 && REG_LIVE_LENGTH (regno) >= 0)
5540 fprintf (dump,
5541 ";; register %d life extended from %d to %d\n",
5542 regno, REG_LIVE_LENGTH (regno),
5543 sched_reg_live_length[regno]);
5544
5545 if (!REG_N_CALLS_CROSSED (regno)
5546 && sched_reg_n_calls_crossed[regno])
5547 fprintf (dump,
5548 ";; register %d now crosses calls\n", regno);
5549 else if (REG_N_CALLS_CROSSED (regno)
5550 && !sched_reg_n_calls_crossed[regno]
5551 && REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
5552 fprintf (dump,
5553 ";; register %d no longer crosses calls\n", regno);
5554
5555 if (REG_BASIC_BLOCK (regno) != sched_reg_basic_block[regno]
5556 && sched_reg_basic_block[regno] != REG_BLOCK_UNKNOWN
5557 && REG_BASIC_BLOCK(regno) != REG_BLOCK_UNKNOWN)
5558 fprintf (dump,
5559 ";; register %d changed basic block from %d to %d\n",
5560 regno, REG_BASIC_BLOCK(regno),
5561 sched_reg_basic_block[regno]);
5562
5563 }
5564 /* Negative values are special; don't overwrite the current
5565 reg_live_length value if it is negative. */
5566 if (REG_LIVE_LENGTH (regno) >= 0)
5567 REG_LIVE_LENGTH (regno) = sched_reg_live_length[regno];
5568
5569 if (sched_reg_basic_block[regno] != REG_BLOCK_UNKNOWN
5570 && REG_BASIC_BLOCK(regno) != REG_BLOCK_UNKNOWN)
5571 REG_BASIC_BLOCK(regno) = sched_reg_basic_block[regno];
5572
5573 /* We can't change the value of reg_n_calls_crossed to zero for
5574 pseudos which are live in more than one block.
5575
5576 This is because combine might have made an optimization which
e881bb1b 5577 invalidated global_live_at_start and reg_n_calls_crossed,
8c660648
JL
5578 but it does not update them. If we update reg_n_calls_crossed
5579 here, the two variables are now inconsistent, and this might
5580 confuse the caller-save code into saving a register that doesn't
5581 need to be saved. This is only a problem when we zero calls
5582 crossed for a pseudo live in multiple basic blocks.
5583
5584 Alternatively, we could try to correctly update basic block live
5585 at start here in sched, but that seems complicated.
5586
5587 Note: it is possible that a global register became local, as result
5588 of interblock motion, but will remain marked as a global register. */
5589 if (sched_reg_n_calls_crossed[regno]
5590 || REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
5591 REG_N_CALLS_CROSSED (regno) = sched_reg_n_calls_crossed[regno];
5592
5593 }
5594}
5595
5596/* Scheduling clock, modified in schedule_block() and queue_to_ready () */
5597static int clock_var;
5598
5599/* Move insns that became ready to fire from queue to ready list. */
5600
5601static int
5602queue_to_ready (ready, n_ready)
5603 rtx ready[];
5604 int n_ready;
5605{
5606 rtx insn;
5607 rtx link;
5608
5609 q_ptr = NEXT_Q (q_ptr);
5610
5611 /* Add all pending insns that can be scheduled without stalls to the
5612 ready list. */
5613 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
5614 {
5615
5616 insn = XEXP (link, 0);
5617 q_size -= 1;
5618
5619 if (sched_verbose >= 2)
5620 fprintf (dump, ";;\t\tQ-->Ready: insn %d: ", INSN_UID (insn));
5621
5622 if (sched_verbose >= 2 && INSN_BB (insn) != target_bb)
5623 fprintf (dump, "(b%d) ", INSN_BLOCK (insn));
5624
5625 ready[n_ready++] = insn;
5626 if (sched_verbose >= 2)
5627 fprintf (dump, "moving to ready without stalls\n");
5628 }
5629 insn_queue[q_ptr] = 0;
5630
5631 /* If there are no ready insns, stall until one is ready and add all
5632 of the pending insns at that point to the ready list. */
5633 if (n_ready == 0)
5634 {
5635 register int stalls;
5636
5637 for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
5638 {
5639 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5640 {
5641 for (; link; link = XEXP (link, 1))
5642 {
5643 insn = XEXP (link, 0);
5644 q_size -= 1;
5645
5646 if (sched_verbose >= 2)
5647 fprintf (dump, ";;\t\tQ-->Ready: insn %d: ", INSN_UID (insn));
5648
5649 if (sched_verbose >= 2 && INSN_BB (insn) != target_bb)
5650 fprintf (dump, "(b%d) ", INSN_BLOCK (insn));
5651
5652 ready[n_ready++] = insn;
5653 if (sched_verbose >= 2)
5654 fprintf (dump, "moving to ready with %d stalls\n", stalls);
5655 }
5656 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
5657
5658 if (n_ready)
5659 break;
5660 }
5661 }
5662
5663 if (sched_verbose && stalls)
5664 visualize_stall_cycles (BB_TO_BLOCK (target_bb), stalls);
5665 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
5666 clock_var += stalls;
5667 }
5668 return n_ready;
5669}
5670
5671/* Print the ready list for debugging purposes. Callable from debugger. */
5672
9a8b0889 5673static void
8c660648
JL
5674debug_ready_list (ready, n_ready)
5675 rtx ready[];
5676 int n_ready;
5677{
5678 int i;
5679
5680 for (i = 0; i < n_ready; i++)
5681 {
5682 fprintf (dump, " %d", INSN_UID (ready[i]));
5683 if (current_nr_blocks > 1 && INSN_BB (ready[i]) != target_bb)
5684 fprintf (dump, "/b%d", INSN_BLOCK (ready[i]));
5685 }
5686 fprintf (dump, "\n");
5687}
5688
5689/* Print names of units on which insn can/should execute, for debugging. */
5690
5691static void
5692insn_print_units (insn)
5693 rtx insn;
5694{
5695 int i;
5696 int unit = insn_unit (insn);
5697
5698 if (unit == -1)
5699 fprintf (dump, "none");
5700 else if (unit >= 0)
5701 fprintf (dump, "%s", function_units[unit].name);
5702 else
5703 {
5704 fprintf (dump, "[");
5705 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
5706 if (unit & 1)
5707 {
5708 fprintf (dump, "%s", function_units[i].name);
5709 if (unit != 1)
5710 fprintf (dump, " ");
5711 }
5712 fprintf (dump, "]");
5713 }
5714}
5715
5716/* MAX_VISUAL_LINES is the maximum number of lines in visualization table
5717 of a basic block. If more lines are needed, table is splitted to two.
5718 n_visual_lines is the number of lines printed so far for a block.
5719 visual_tbl contains the block visualization info.
5720 vis_no_unit holds insns in a cycle that are not mapped to any unit. */
5721#define MAX_VISUAL_LINES 100
5722#define INSN_LEN 30
5723int n_visual_lines;
5724char *visual_tbl;
5725int n_vis_no_unit;
5726rtx vis_no_unit[10];
5727
5728/* Finds units that are in use in this fuction. Required only
5729 for visualization. */
5730
5731static void
5732init_target_units ()
5733{
5734 rtx insn;
5735 int unit;
5736
5737 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
5738 {
5739 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
5740 continue;
5741
5742 unit = insn_unit (insn);
5743
5744 if (unit < 0)
5745 target_units |= ~unit;
5746 else
5747 target_units |= (1 << unit);
5748 }
5749}
5750
5751/* Return the length of the visualization table */
5752
5753static int
5754get_visual_tbl_length ()
5755{
5756 int unit, i;
5757 int n, n1;
5758 char *s;
5759
5760 /* compute length of one field in line */
5761 s = (char *) alloca (INSN_LEN + 5);
5762 sprintf (s, " %33s", "uname");
5763 n1 = strlen (s);
5764
5765 /* compute length of one line */
5766 n = strlen (";; ");
5767 n += n1;
5768 for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++)
5769 if (function_units[unit].bitmask & target_units)
5770 for (i = 0; i < function_units[unit].multiplicity; i++)
5771 n += n1;
5772 n += n1;
5773 n += strlen ("\n") + 2;
5774
5775 /* compute length of visualization string */
5776 return (MAX_VISUAL_LINES * n);
5777}
5778
5779/* Init block visualization debugging info */
5780
5781static void
5782init_block_visualization ()
5783{
5784 strcpy (visual_tbl, "");
5785 n_visual_lines = 0;
5786 n_vis_no_unit = 0;
5787}
5788
5789#define BUF_LEN 256
5790
459b3825
MM
5791static char *
5792safe_concat (buf, cur, str)
5793 char *buf;
5794 char *cur;
5795 char *str;
5796{
5797 char *end = buf + BUF_LEN - 2; /* leave room for null */
5798 int c;
5799
5800 if (cur > end)
5801 {
5802 *end = '\0';
5803 return end;
5804 }
5805
5806 while (cur < end && (c = *str++) != '\0')
5807 *cur++ = c;
5808
5809 *cur = '\0';
5810 return cur;
5811}
5812
8c660648
JL
5813/* This recognizes rtx, I classified as expressions. These are always */
5814/* represent some action on values or results of other expression, */
5815/* that may be stored in objects representing values. */
5816
5817static void
5818print_exp (buf, x, verbose)
5819 char *buf;
5820 rtx x;
5821 int verbose;
5822{
459b3825
MM
5823 char tmp[BUF_LEN];
5824 char *st[4];
5825 char *cur = buf;
5826 char *fun = (char *)0;
5827 char *sep;
5828 rtx op[4];
5829 int i;
5830
5831 for (i = 0; i < 4; i++)
5832 {
5833 st[i] = (char *)0;
5834 op[i] = NULL_RTX;
5835 }
8c660648
JL
5836
5837 switch (GET_CODE (x))
5838 {
5839 case PLUS:
459b3825 5840 op[0] = XEXP (x, 0);
f4b94256
RH
5841 if (GET_CODE (XEXP (x, 1)) == CONST_INT
5842 && INTVAL (XEXP (x, 1)) < 0)
5843 {
5844 st[1] = "-";
5845 op[1] = GEN_INT (-INTVAL (XEXP (x, 1)));
5846 }
5847 else
5848 {
5849 st[1] = "+";
5850 op[1] = XEXP (x, 1);
5851 }
8c660648
JL
5852 break;
5853 case LO_SUM:
459b3825
MM
5854 op[0] = XEXP (x, 0);
5855 st[1] = "+low(";
5856 op[1] = XEXP (x, 1);
5857 st[2] = ")";
8c660648
JL
5858 break;
5859 case MINUS:
459b3825
MM
5860 op[0] = XEXP (x, 0);
5861 st[1] = "-";
5862 op[1] = XEXP (x, 1);
8c660648
JL
5863 break;
5864 case COMPARE:
459b3825
MM
5865 fun = "cmp";
5866 op[0] = XEXP (x, 0);
5867 op[1] = XEXP (x, 1);
8c660648
JL
5868 break;
5869 case NEG:
459b3825
MM
5870 st[0] = "-";
5871 op[0] = XEXP (x, 0);
8c660648
JL
5872 break;
5873 case MULT:
459b3825
MM
5874 op[0] = XEXP (x, 0);
5875 st[1] = "*";
5876 op[1] = XEXP (x, 1);
8c660648
JL
5877 break;
5878 case DIV:
459b3825
MM
5879 op[0] = XEXP (x, 0);
5880 st[1] = "/";
5881 op[1] = XEXP (x, 1);
8c660648
JL
5882 break;
5883 case UDIV:
459b3825
MM
5884 fun = "udiv";
5885 op[0] = XEXP (x, 0);
5886 op[1] = XEXP (x, 1);
8c660648
JL
5887 break;
5888 case MOD:
459b3825
MM
5889 op[0] = XEXP (x, 0);
5890 st[1] = "%";
5891 op[1] = XEXP (x, 1);
8c660648
JL
5892 break;
5893 case UMOD:
459b3825
MM
5894 fun = "umod";
5895 op[0] = XEXP (x, 0);
5896 op[1] = XEXP (x, 1);
8c660648
JL
5897 break;
5898 case SMIN:
459b3825
MM
5899 fun = "smin";
5900 op[0] = XEXP (x, 0);
5901 op[1] = XEXP (x, 1);
8c660648
JL
5902 break;
5903 case SMAX:
459b3825
MM
5904 fun = "smax";
5905 op[0] = XEXP (x, 0);
5906 op[1] = XEXP (x, 1);
8c660648
JL
5907 break;
5908 case UMIN:
459b3825
MM
5909 fun = "umin";
5910 op[0] = XEXP (x, 0);
5911 op[1] = XEXP (x, 1);
8c660648
JL
5912 break;
5913 case UMAX:
459b3825
MM
5914 fun = "umax";
5915 op[0] = XEXP (x, 0);
5916 op[1] = XEXP (x, 1);
8c660648
JL
5917 break;
5918 case NOT:
459b3825
MM
5919 st[0] = "!";
5920 op[0] = XEXP (x, 0);
8c660648
JL
5921 break;
5922 case AND:
459b3825
MM
5923 op[0] = XEXP (x, 0);
5924 st[1] = "&";
5925 op[1] = XEXP (x, 1);
8c660648
JL
5926 break;
5927 case IOR:
459b3825
MM
5928 op[0] = XEXP (x, 0);
5929 st[1] = "|";
5930 op[1] = XEXP (x, 1);
8c660648
JL
5931 break;
5932 case XOR:
459b3825
MM
5933 op[0] = XEXP (x, 0);
5934 st[1] = "^";
5935 op[1] = XEXP (x, 1);
8c660648
JL
5936 break;
5937 case ASHIFT:
459b3825
MM
5938 op[0] = XEXP (x, 0);
5939 st[1] = "<<";
5940 op[1] = XEXP (x, 1);
8c660648
JL
5941 break;
5942 case LSHIFTRT:
459b3825
MM
5943 op[0] = XEXP (x, 0);
5944 st[1] = " 0>>";
5945 op[1] = XEXP (x, 1);
8c660648
JL
5946 break;
5947 case ASHIFTRT:
459b3825
MM
5948 op[0] = XEXP (x, 0);
5949 st[1] = ">>";
5950 op[1] = XEXP (x, 1);
8c660648
JL
5951 break;
5952 case ROTATE:
459b3825
MM
5953 op[0] = XEXP (x, 0);
5954 st[1] = "<-<";
5955 op[1] = XEXP (x, 1);
8c660648
JL
5956 break;
5957 case ROTATERT:
459b3825
MM
5958 op[0] = XEXP (x, 0);
5959 st[1] = ">->";
5960 op[1] = XEXP (x, 1);
8c660648
JL
5961 break;
5962 case ABS:
459b3825
MM
5963 fun = "abs";
5964 op[0] = XEXP (x, 0);
8c660648
JL
5965 break;
5966 case SQRT:
459b3825
MM
5967 fun = "sqrt";
5968 op[0] = XEXP (x, 0);
8c660648
JL
5969 break;
5970 case FFS:
459b3825
MM
5971 fun = "ffs";
5972 op[0] = XEXP (x, 0);
8c660648
JL
5973 break;
5974 case EQ:
459b3825
MM
5975 op[0] = XEXP (x, 0);
5976 st[1] = "==";
5977 op[1] = XEXP (x, 1);
8c660648
JL
5978 break;
5979 case NE:
459b3825
MM
5980 op[0] = XEXP (x, 0);
5981 st[1] = "!=";
5982 op[1] = XEXP (x, 1);
8c660648
JL
5983 break;
5984 case GT:
459b3825
MM
5985 op[0] = XEXP (x, 0);
5986 st[1] = ">";
5987 op[1] = XEXP (x, 1);
8c660648
JL
5988 break;
5989 case GTU:
459b3825
MM
5990 fun = "gtu";
5991 op[0] = XEXP (x, 0);
5992 op[1] = XEXP (x, 1);
8c660648
JL
5993 break;
5994 case LT:
459b3825
MM
5995 op[0] = XEXP (x, 0);
5996 st[1] = "<";
5997 op[1] = XEXP (x, 1);
8c660648
JL
5998 break;
5999 case LTU:
459b3825
MM
6000 fun = "ltu";
6001 op[0] = XEXP (x, 0);
6002 op[1] = XEXP (x, 1);
8c660648
JL
6003 break;
6004 case GE:
459b3825
MM
6005 op[0] = XEXP (x, 0);
6006 st[1] = ">=";
6007 op[1] = XEXP (x, 1);
8c660648
JL
6008 break;
6009 case GEU:
459b3825
MM
6010 fun = "geu";
6011 op[0] = XEXP (x, 0);
6012 op[1] = XEXP (x, 1);
8c660648
JL
6013 break;
6014 case LE:
459b3825
MM
6015 op[0] = XEXP (x, 0);
6016 st[1] = "<=";
6017 op[1] = XEXP (x, 1);
8c660648
JL
6018 break;
6019 case LEU:
459b3825
MM
6020 fun = "leu";
6021 op[0] = XEXP (x, 0);
6022 op[1] = XEXP (x, 1);
8c660648
JL
6023 break;
6024 case SIGN_EXTRACT:
459b3825
MM
6025 fun = (verbose) ? "sign_extract" : "sxt";
6026 op[0] = XEXP (x, 0);
6027 op[1] = XEXP (x, 1);
6028 op[2] = XEXP (x, 2);
8c660648
JL
6029 break;
6030 case ZERO_EXTRACT:
459b3825
MM
6031 fun = (verbose) ? "zero_extract" : "zxt";
6032 op[0] = XEXP (x, 0);
6033 op[1] = XEXP (x, 1);
6034 op[2] = XEXP (x, 2);
8c660648
JL
6035 break;
6036 case SIGN_EXTEND:
459b3825
MM
6037 fun = (verbose) ? "sign_extend" : "sxn";
6038 op[0] = XEXP (x, 0);
8c660648
JL
6039 break;
6040 case ZERO_EXTEND:
459b3825
MM
6041 fun = (verbose) ? "zero_extend" : "zxn";
6042 op[0] = XEXP (x, 0);
8c660648
JL
6043 break;
6044 case FLOAT_EXTEND:
459b3825
MM
6045 fun = (verbose) ? "float_extend" : "fxn";
6046 op[0] = XEXP (x, 0);
8c660648
JL
6047 break;
6048 case TRUNCATE:
459b3825
MM
6049 fun = (verbose) ? "trunc" : "trn";
6050 op[0] = XEXP (x, 0);
8c660648
JL
6051 break;
6052 case FLOAT_TRUNCATE:
459b3825
MM
6053 fun = (verbose) ? "float_trunc" : "ftr";
6054 op[0] = XEXP (x, 0);
8c660648
JL
6055 break;
6056 case FLOAT:
459b3825
MM
6057 fun = (verbose) ? "float" : "flt";
6058 op[0] = XEXP (x, 0);
8c660648
JL
6059 break;
6060 case UNSIGNED_FLOAT:
459b3825
MM
6061 fun = (verbose) ? "uns_float" : "ufl";
6062 op[0] = XEXP (x, 0);
8c660648
JL
6063 break;
6064 case FIX:
459b3825
MM
6065 fun = "fix";
6066 op[0] = XEXP (x, 0);
8c660648
JL
6067 break;
6068 case UNSIGNED_FIX:
459b3825
MM
6069 fun = (verbose) ? "uns_fix" : "ufx";
6070 op[0] = XEXP (x, 0);
8c660648
JL
6071 break;
6072 case PRE_DEC:
459b3825
MM
6073 st[0] = "--";
6074 op[0] = XEXP (x, 0);
8c660648
JL
6075 break;
6076 case PRE_INC:
459b3825
MM
6077 st[0] = "++";
6078 op[0] = XEXP (x, 0);
8c660648
JL
6079 break;
6080 case POST_DEC:
459b3825
MM
6081 op[0] = XEXP (x, 0);
6082 st[1] = "--";
8c660648
JL
6083 break;
6084 case POST_INC:
459b3825
MM
6085 op[0] = XEXP (x, 0);
6086 st[1] = "++";
8c660648
JL
6087 break;
6088 case CALL:
459b3825
MM
6089 st[0] = "call ";
6090 op[0] = XEXP (x, 0);
8c660648
JL
6091 if (verbose)
6092 {
459b3825
MM
6093 st[1] = " argc:";
6094 op[1] = XEXP (x, 1);
8c660648 6095 }
8c660648
JL
6096 break;
6097 case IF_THEN_ELSE:
459b3825
MM
6098 st[0] = "{(";
6099 op[0] = XEXP (x, 0);
6100 st[1] = ")?";
6101 op[1] = XEXP (x, 1);
6102 st[2] = ":";
6103 op[2] = XEXP (x, 2);
6104 st[3] = "}";
8c660648
JL
6105 break;
6106 case TRAP_IF:
459b3825
MM
6107 fun = "trap_if";
6108 op[0] = TRAP_CONDITION (x);
8c660648
JL
6109 break;
6110 case UNSPEC:
8c660648
JL
6111 case UNSPEC_VOLATILE:
6112 {
459b3825
MM
6113 cur = safe_concat (buf, cur, "unspec");
6114 if (GET_CODE (x) == UNSPEC_VOLATILE)
6115 cur = safe_concat (buf, cur, "/v");
6116 cur = safe_concat (buf, cur, "[");
6117 sep = "";
8c660648
JL
6118 for (i = 0; i < XVECLEN (x, 0); i++)
6119 {
459b3825
MM
6120 print_pattern (tmp, XVECEXP (x, 0, i), verbose);
6121 cur = safe_concat (buf, cur, sep);
6122 cur = safe_concat (buf, cur, tmp);
6123 sep = ",";
8c660648 6124 }
459b3825
MM
6125 cur = safe_concat (buf, cur, "] ");
6126 sprintf (tmp, "%d", XINT (x, 1));
6127 cur = safe_concat (buf, cur, tmp);
8c660648
JL
6128 }
6129 break;
6130 default:
53c0919d
RH
6131 /* if (verbose) debug_rtx (x); */
6132 st[0] = GET_RTX_NAME (GET_CODE (x));
459b3825
MM
6133 break;
6134 }
6135
6136 /* Print this as a function? */
6137 if (fun)
6138 {
6139 cur = safe_concat (buf, cur, fun);
6140 cur = safe_concat (buf, cur, "(");
6141 }
6142
6143 for (i = 0; i < 4; i++)
6144 {
6145 if (st[i])
6146 cur = safe_concat (buf, cur, st[i]);
6147
6148 if (op[i])
6149 {
6150 if (fun && i != 0)
6151 cur = safe_concat (buf, cur, ",");
6152
6153 print_value (tmp, op[i], verbose);
6154 cur = safe_concat (buf, cur, tmp);
6155 }
8c660648 6156 }
459b3825
MM
6157
6158 if (fun)
6159 cur = safe_concat (buf, cur, ")");
6160} /* print_exp */
8c660648
JL
6161
6162/* Prints rtxes, i customly classified as values. They're constants, */
6163/* registers, labels, symbols and memory accesses. */
6164
6165static void
6166print_value (buf, x, verbose)
6167 char *buf;
6168 rtx x;
6169 int verbose;
6170{
6171 char t[BUF_LEN];
459b3825 6172 char *cur = buf;
8c660648
JL
6173
6174 switch (GET_CODE (x))
6175 {
6176 case CONST_INT:
f4b94256 6177 sprintf (t, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
459b3825 6178 cur = safe_concat (buf, cur, t);
8c660648
JL
6179 break;
6180 case CONST_DOUBLE:
459b3825
MM
6181 sprintf (t, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6182 cur = safe_concat (buf, cur, t);
8c660648
JL
6183 break;
6184 case CONST_STRING:
459b3825
MM
6185 cur = safe_concat (buf, cur, "\"");
6186 cur = safe_concat (buf, cur, XSTR (x, 0));
6187 cur = safe_concat (buf, cur, "\"");
8c660648
JL
6188 break;
6189 case SYMBOL_REF:
459b3825
MM
6190 cur = safe_concat (buf, cur, "`");
6191 cur = safe_concat (buf, cur, XSTR (x, 0));
6192 cur = safe_concat (buf, cur, "'");
8c660648
JL
6193 break;
6194 case LABEL_REF:
459b3825
MM
6195 sprintf (t, "L%d", INSN_UID (XEXP (x, 0)));
6196 cur = safe_concat (buf, cur, t);
8c660648
JL
6197 break;
6198 case CONST:
459b3825
MM
6199 print_value (t, XEXP (x, 0), verbose);
6200 cur = safe_concat (buf, cur, "const(");
6201 cur = safe_concat (buf, cur, t);
6202 cur = safe_concat (buf, cur, ")");
8c660648
JL
6203 break;
6204 case HIGH:
459b3825
MM
6205 print_value (t, XEXP (x, 0), verbose);
6206 cur = safe_concat (buf, cur, "high(");
6207 cur = safe_concat (buf, cur, t);
6208 cur = safe_concat (buf, cur, ")");
8c660648
JL
6209 break;
6210 case REG:
459b3825
MM
6211 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
6212 {
6213 int c = reg_names[ REGNO (x) ][0];
6214 if (c >= '0' && c <= '9')
6215 cur = safe_concat (buf, cur, "%");
6216
6217 cur = safe_concat (buf, cur, reg_names[ REGNO (x) ]);
6218 }
8c660648 6219 else
459b3825
MM
6220 {
6221 sprintf (t, "r%d", REGNO (x));
6222 cur = safe_concat (buf, cur, t);
6223 }
8c660648
JL
6224 break;
6225 case SUBREG:
459b3825
MM
6226 print_value (t, SUBREG_REG (x), verbose);
6227 cur = safe_concat (buf, cur, t);
6b879bcc 6228 sprintf (t, "#%d", SUBREG_WORD (x));
459b3825 6229 cur = safe_concat (buf, cur, t);
8c660648
JL
6230 break;
6231 case SCRATCH:
459b3825 6232 cur = safe_concat (buf, cur, "scratch");
8c660648
JL
6233 break;
6234 case CC0:
459b3825 6235 cur = safe_concat (buf, cur, "cc0");
8c660648
JL
6236 break;
6237 case PC:
459b3825 6238 cur = safe_concat (buf, cur, "pc");
8c660648
JL
6239 break;
6240 case MEM:
6241 print_value (t, XEXP (x, 0), verbose);
459b3825
MM
6242 cur = safe_concat (buf, cur, "[");
6243 cur = safe_concat (buf, cur, t);
6244 cur = safe_concat (buf, cur, "]");
8c660648
JL
6245 break;
6246 default:
459b3825
MM
6247 print_exp (t, x, verbose);
6248 cur = safe_concat (buf, cur, t);
6249 break;
8c660648
JL
6250 }
6251} /* print_value */
6252
6253/* The next step in insn detalization, its pattern recognition */
6254
6255static void
6256print_pattern (buf, x, verbose)
6257 char *buf;
6258 rtx x;
6259 int verbose;
6260{
6261 char t1[BUF_LEN], t2[BUF_LEN], t3[BUF_LEN];
6262
6263 switch (GET_CODE (x))
6264 {
6265 case SET:
6266 print_value (t1, SET_DEST (x), verbose);
6267 print_value (t2, SET_SRC (x), verbose);
6268 sprintf (buf, "%s=%s", t1, t2);
6269 break;
6270 case RETURN:
6271 sprintf (buf, "return");
6272 break;
6273 case CALL:
6274 print_exp (buf, x, verbose);
6275 break;
6276 case CLOBBER:
6277 print_value (t1, XEXP (x, 0), verbose);
6278 sprintf (buf, "clobber %s", t1);
6279 break;
6280 case USE:
6281 print_value (t1, XEXP (x, 0), verbose);
6282 sprintf (buf, "use %s", t1);
6283 break;
6284 case PARALLEL:
6285 {
6286 int i;
6287
6288 sprintf (t1, "{");
6289 for (i = 0; i < XVECLEN (x, 0); i++)
6290 {
6291 print_pattern (t2, XVECEXP (x, 0, i), verbose);
6292 sprintf (t3, "%s%s;", t1, t2);
6293 strcpy (t1, t3);
6294 }
6295 sprintf (buf, "%s}", t1);
6296 }
6297 break;
6298 case SEQUENCE:
6299 {
6300 int i;
6301
6302 sprintf (t1, "%%{");
6303 for (i = 0; i < XVECLEN (x, 0); i++)
6304 {
6305 print_insn (t2, XVECEXP (x, 0, i), verbose);
6306 sprintf (t3, "%s%s;", t1, t2);
6307 strcpy (t1, t3);
6308 }
6309 sprintf (buf, "%s%%}", t1);
6310 }
6311 break;
6312 case ASM_INPUT:
c4fa3460 6313 sprintf (buf, "asm {%s}", XSTR (x, 0));
8c660648
JL
6314 break;
6315 case ADDR_VEC:
6316 break;
6317 case ADDR_DIFF_VEC:
6318 print_value (buf, XEXP (x, 0), verbose);
6319 break;
6320 case TRAP_IF:
6321 print_value (t1, TRAP_CONDITION (x), verbose);
6322 sprintf (buf, "trap_if %s", t1);
6323 break;
6324 case UNSPEC:
6325 {
6326 int i;
6327
6328 sprintf (t1, "unspec{");
6329 for (i = 0; i < XVECLEN (x, 0); i++)
6330 {
6331 print_pattern (t2, XVECEXP (x, 0, i), verbose);
6332 sprintf (t3, "%s%s;", t1, t2);
6333 strcpy (t1, t3);
6334 }
6335 sprintf (buf, "%s}", t1);
6336 }
6337 break;
6338 case UNSPEC_VOLATILE:
6339 {
6340 int i;
6341
6342 sprintf (t1, "unspec/v{");
6343 for (i = 0; i < XVECLEN (x, 0); i++)
6344 {
6345 print_pattern (t2, XVECEXP (x, 0, i), verbose);
6346 sprintf (t3, "%s%s;", t1, t2);
6347 strcpy (t1, t3);
6348 }
6349 sprintf (buf, "%s}", t1);
6350 }
6351 break;
6352 default:
6353 print_value (buf, x, verbose);
6354 }
6355} /* print_pattern */
6356
6357/* This is the main function in rtl visualization mechanism. It
6358 accepts an rtx and tries to recognize it as an insn, then prints it
6359 properly in human readable form, resembling assembler mnemonics. */
6360/* For every insn it prints its UID and BB the insn belongs */
6361/* too. (probably the last "option" should be extended somehow, since */
6362/* it depends now on sched.c inner variables ...) */
6363
6364static void
6365print_insn (buf, x, verbose)
6366 char *buf;
6367 rtx x;
6368 int verbose;
6369{
6370 char t[BUF_LEN];
6371 rtx insn = x;
6372
6373 switch (GET_CODE (x))
6374 {
6375 case INSN:
6376 print_pattern (t, PATTERN (x), verbose);
6377 if (verbose)
6378 sprintf (buf, "b%d: i% 4d: %s", INSN_BB (x),
6379 INSN_UID (x), t);
6380 else
6381 sprintf (buf, "%-4d %s", INSN_UID (x), t);
6382 break;
6383 case JUMP_INSN:
6384 print_pattern (t, PATTERN (x), verbose);
6385 if (verbose)
6386 sprintf (buf, "b%d: i% 4d: jump %s", INSN_BB (x),
6387 INSN_UID (x), t);
6388 else
6389 sprintf (buf, "%-4d %s", INSN_UID (x), t);
6390 break;
6391 case CALL_INSN:
6392 x = PATTERN (insn);
6393 if (GET_CODE (x) == PARALLEL)
6394 {
6395 x = XVECEXP (x, 0, 0);
6396 print_pattern (t, x, verbose);
6397 }
6398 else
6399 strcpy (t, "call <...>");
6400 if (verbose)
6401 sprintf (buf, "b%d: i% 4d: %s", INSN_BB (insn),
6402 INSN_UID (insn), t);
6403 else
6404 sprintf (buf, "%-4d %s", INSN_UID (insn), t);
6405 break;
6406 case CODE_LABEL:
6407 sprintf (buf, "L%d:", INSN_UID (x));
6408 break;
6409 case BARRIER:
6410 sprintf (buf, "i% 4d: barrier", INSN_UID (x));
6411 break;
6412 case NOTE:
6413 if (NOTE_LINE_NUMBER (x) > 0)
6414 sprintf (buf, "%4d note \"%s\" %d", INSN_UID (x),
6415 NOTE_SOURCE_FILE (x), NOTE_LINE_NUMBER (x));
6416 else
6417 sprintf (buf, "%4d %s", INSN_UID (x),
6418 GET_NOTE_INSN_NAME (NOTE_LINE_NUMBER (x)));
6419 break;
6420 default:
6421 if (verbose)
6422 {
6423 sprintf (buf, "Not an INSN at all\n");
6424 debug_rtx (x);
6425 }
6426 else
6427 sprintf (buf, "i%-4d <What?>", INSN_UID (x));
6428 }
6429} /* print_insn */
6430
8c660648
JL
6431/* Print visualization debugging info */
6432
6433static void
6434print_block_visualization (b, s)
6435 int b;
6436 char *s;
6437{
6438 int unit, i;
8c660648
JL
6439
6440 /* print header */
6441 fprintf (dump, "\n;; ==================== scheduling visualization for block %d %s \n", b, s);
6442
6443 /* Print names of units */
2f308fec 6444 fprintf (dump, ";; %-8s", "clock");
8c660648
JL
6445 for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++)
6446 if (function_units[unit].bitmask & target_units)
6447 for (i = 0; i < function_units[unit].multiplicity; i++)
2f308fec
RH
6448 fprintf (dump, " %-33s", function_units[unit].name);
6449 fprintf (dump, " %-8s\n", "no-unit");
6450
6451 fprintf (dump, ";; %-8s", "=====");
6452 for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++)
6453 if (function_units[unit].bitmask & target_units)
6454 for (i = 0; i < function_units[unit].multiplicity; i++)
6455 fprintf (dump, " %-33s", "==============================");
6456 fprintf (dump, " %-8s\n", "=======");
8c660648
JL
6457
6458 /* Print insns in each cycle */
6459 fprintf (dump, "%s\n", visual_tbl);
6460}
6461
6462/* Print insns in the 'no_unit' column of visualization */
6463
6464static void
6465visualize_no_unit (insn)
6466 rtx insn;
6467{
6468 vis_no_unit[n_vis_no_unit] = insn;
6469 n_vis_no_unit++;
6470}
6471
6472/* Print insns scheduled in clock, for visualization. */
6473
6474static void
6475visualize_scheduled_insns (b, clock)
6476 int b, clock;
6477{
6478 int i, unit;
6479
6480 /* if no more room, split table into two */
6481 if (n_visual_lines >= MAX_VISUAL_LINES)
6482 {
6483 print_block_visualization (b, "(incomplete)");
6484 init_block_visualization ();
6485 }
6486
6487 n_visual_lines++;
6488
6489 sprintf (visual_tbl + strlen (visual_tbl), ";; %-8d", clock);
6490 for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++)
6491 if (function_units[unit].bitmask & target_units)
6492 for (i = 0; i < function_units[unit].multiplicity; i++)
6493 {
6494 int instance = unit + i * FUNCTION_UNITS_SIZE;
6495 rtx insn = unit_last_insn[instance];
6496
6497 /* print insns that still keep the unit busy */
6498 if (insn &&
6499 actual_hazard_this_instance (unit, instance, insn, clock, 0))
6500 {
6501 char str[BUF_LEN];
6502 print_insn (str, insn, 0);
6503 str[INSN_LEN] = '\0';
6504 sprintf (visual_tbl + strlen (visual_tbl), " %-33s", str);
6505 }
6506 else
6507 sprintf (visual_tbl + strlen (visual_tbl), " %-33s", "------------------------------");
6508 }
6509
6510 /* print insns that are not assigned to any unit */
6511 for (i = 0; i < n_vis_no_unit; i++)
6512 sprintf (visual_tbl + strlen (visual_tbl), " %-8d",
6513 INSN_UID (vis_no_unit[i]));
6514 n_vis_no_unit = 0;
6515
6516 sprintf (visual_tbl + strlen (visual_tbl), "\n");
6517}
6518
6519/* Print stalled cycles */
6520
6521static void
6522visualize_stall_cycles (b, stalls)
6523 int b, stalls;
6524{
6525 int i;
6526
6527 /* if no more room, split table into two */
6528 if (n_visual_lines >= MAX_VISUAL_LINES)
6529 {
6530 print_block_visualization (b, "(incomplete)");
6531 init_block_visualization ();
6532 }
6533
6534 n_visual_lines++;
6535
6536 sprintf (visual_tbl + strlen (visual_tbl), ";; ");
6537 for (i = 0; i < stalls; i++)
6538 sprintf (visual_tbl + strlen (visual_tbl), ".");
6539 sprintf (visual_tbl + strlen (visual_tbl), "\n");
6540}
6541
6542/* move_insn1: Remove INSN from insn chain, and link it after LAST insn */
6543
6544static rtx
6545move_insn1 (insn, last)
6546 rtx insn, last;
6547{
6548 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
6549 PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
6550
6551 NEXT_INSN (insn) = NEXT_INSN (last);
6552 PREV_INSN (NEXT_INSN (last)) = insn;
6553
6554 NEXT_INSN (last) = insn;
6555 PREV_INSN (insn) = last;
6556
6557 return insn;
6558}
6559
6560/* Search INSN for fake REG_DEAD note pairs for NOTE_INSN_SETJMP,
6561 NOTE_INSN_{LOOP,EHREGION}_{BEG,END}; and convert them back into
6562 NOTEs. The REG_DEAD note following first one is contains the saved
6563 value for NOTE_BLOCK_NUMBER which is useful for
6564 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. LAST is the last instruction
6565 output by the instruction scheduler. Return the new value of LAST. */
6566
6567static rtx
6568reemit_notes (insn, last)
6569 rtx insn;
6570 rtx last;
6571{
6572 rtx note, retval;
6573
6574 retval = last;
6575 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
6576 {
6577 if (REG_NOTE_KIND (note) == REG_DEAD
6578 && GET_CODE (XEXP (note, 0)) == CONST_INT)
6579 {
6dfdecdb
RH
6580 int note_type = INTVAL (XEXP (note, 0));
6581 if (note_type == NOTE_INSN_SETJMP)
8c660648 6582 {
6dfdecdb 6583 retval = emit_note_after (NOTE_INSN_SETJMP, insn);
8c660648
JL
6584 CONST_CALL_P (retval) = CONST_CALL_P (note);
6585 remove_note (insn, note);
6586 note = XEXP (note, 1);
6587 }
6dfdecdb
RH
6588 else if (note_type == NOTE_INSN_RANGE_START
6589 || note_type == NOTE_INSN_RANGE_END)
6590 {
6591 last = emit_note_before (note_type, last);
6592 remove_note (insn, note);
6593 note = XEXP (note, 1);
6594 NOTE_RANGE_INFO (last) = XEXP (note, 0);
6595 }
8c660648
JL
6596 else
6597 {
19699da4 6598 last = emit_note_before (note_type, last);
8c660648
JL
6599 remove_note (insn, note);
6600 note = XEXP (note, 1);
6601 NOTE_BLOCK_NUMBER (last) = INTVAL (XEXP (note, 0));
6602 }
6603 remove_note (insn, note);
6604 }
6605 }
6606 return retval;
6607}
6608
6609/* Move INSN, and all insns which should be issued before it,
c9e03727
JL
6610 due to SCHED_GROUP_P flag. Reemit notes if needed.
6611
6612 Return the last insn emitted by the scheduler, which is the
6613 return value from the first call to reemit_notes. */
8c660648
JL
6614
6615static rtx
6616move_insn (insn, last)
6617 rtx insn, last;
6618{
c9e03727 6619 rtx retval = NULL;
8c660648 6620
c9e03727
JL
6621 /* If INSN has SCHED_GROUP_P set, then issue it and any other
6622 insns with SCHED_GROUP_P set first. */
8c660648
JL
6623 while (SCHED_GROUP_P (insn))
6624 {
6625 rtx prev = PREV_INSN (insn);
c9e03727
JL
6626
6627 /* Move a SCHED_GROUP_P insn. */
8c660648 6628 move_insn1 (insn, last);
c9e03727
JL
6629 /* If this is the first call to reemit_notes, then record
6630 its return value. */
6631 if (retval == NULL_RTX)
6632 retval = reemit_notes (insn, insn);
6633 else
6634 reemit_notes (insn, insn);
8c660648
JL
6635 insn = prev;
6636 }
6637
c9e03727 6638 /* Now move the first non SCHED_GROUP_P insn. */
8c660648 6639 move_insn1 (insn, last);
c9e03727
JL
6640
6641 /* If this is the first call to reemit_notes, then record
6642 its return value. */
6643 if (retval == NULL_RTX)
6644 retval = reemit_notes (insn, insn);
6645 else
6646 reemit_notes (insn, insn);
6647
6648 return retval;
8c660648
JL
6649}
6650
6651/* Return an insn which represents a SCHED_GROUP, which is
6652 the last insn in the group. */
6653
6654static rtx
6655group_leader (insn)
6656 rtx insn;
6657{
6658 rtx prev;
6659
6660 do
6661 {
6662 prev = insn;
6663 insn = next_nonnote_insn (insn);
6664 }
6665 while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
6666
6667 return prev;
6668}
6669
6670/* Use forward list scheduling to rearrange insns of block BB in region RGN,
6671 possibly bringing insns from subsequent blocks in the same region.
6672 Return number of insns scheduled. */
6673
6674static int
5835e573 6675schedule_block (bb, rgn_n_insns)
8c660648 6676 int bb;
8c660648
JL
6677 int rgn_n_insns;
6678{
6679 /* Local variables. */
6680 rtx insn, last;
6681 rtx *ready;
8c660648
JL
6682 int n_ready = 0;
6683 int can_issue_more;
6684
6685 /* flow block of this bb */
6686 int b = BB_TO_BLOCK (bb);
6687
6688 /* target_n_insns == number of insns in b before scheduling starts.
6689 sched_target_n_insns == how many of b's insns were scheduled.
6690 sched_n_insns == how many insns were scheduled in b */
6691 int target_n_insns = 0;
6692 int sched_target_n_insns = 0;
6693 int sched_n_insns = 0;
6694
6695#define NEED_NOTHING 0
6696#define NEED_HEAD 1
6697#define NEED_TAIL 2
6698 int new_needs;
6699
6700 /* head/tail info for this block */
6701 rtx prev_head;
6702 rtx next_tail;
6703 rtx head;
6704 rtx tail;
6705 int bb_src;
6706
484df988
JL
6707 /* We used to have code to avoid getting parameters moved from hard
6708 argument registers into pseudos.
8c660648 6709
484df988
JL
6710 However, it was removed when it proved to be of marginal benefit
6711 and caused problems because schedule_block and compute_forward_dependences
6712 had different notions of what the "head" insn was. */
6713 get_block_head_tail (bb, &head, &tail);
8c660648 6714
1447b516
JL
6715 /* Interblock scheduling could have moved the original head insn from this
6716 block into a proceeding block. This may also cause schedule_block and
6717 compute_forward_dependences to have different notions of what the
6718 "head" insn was.
6719
6720 If the interblock movement happened to make this block start with
6721 some notes (LOOP, EH or SETJMP) before the first real insn, then
6722 HEAD will have various special notes attached to it which must be
6723 removed so that we don't end up with extra copies of the notes. */
6724 if (GET_RTX_CLASS (GET_CODE (head)) == 'i')
6725 {
6726 rtx note;
6727
6728 for (note = REG_NOTES (head); note; note = XEXP (note, 1))
6729 if (REG_NOTE_KIND (note) == REG_DEAD
6730 && GET_CODE (XEXP (note, 0)) == CONST_INT)
6731 remove_note (head, note);
6732 }
6733
8c660648
JL
6734 next_tail = NEXT_INSN (tail);
6735 prev_head = PREV_INSN (head);
6736
6737 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
6738 to schedule this block. */
6739 if (head == tail
6740 && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
6741 return (sched_n_insns);
6742
6743 /* debug info */
6744 if (sched_verbose)
6745 {
6746 fprintf (dump, ";; ======================================================\n");
6747 fprintf (dump,
6748 ";; -- basic block %d from %d to %d -- %s reload\n",
3b413743 6749 b, INSN_UID (BLOCK_HEAD (b)), INSN_UID (BLOCK_END (b)),
8c660648
JL
6750 (reload_completed ? "after" : "before"));
6751 fprintf (dump, ";; ======================================================\n");
8c660648
JL
6752 fprintf (dump, "\n");
6753
6754 visual_tbl = (char *) alloca (get_visual_tbl_length ());
6755 init_block_visualization ();
6756 }
6757
6758 /* remove remaining note insns from the block, save them in
6759 note_list. These notes are restored at the end of
6760 schedule_block (). */
6761 note_list = 0;
6762 rm_other_notes (head, tail);
6763
6764 target_bb = bb;
6765
6766 /* prepare current target block info */
6767 if (current_nr_blocks > 1)
6768 {
6769 candidate_table = (candidate *) alloca (current_nr_blocks * sizeof (candidate));
6770
6771 bblst_last = 0;
6772 /* ??? It is not clear why bblst_size is computed this way. The original
6773 number was clearly too small as it resulted in compiler failures.
6774 Multiplying by the original number by 2 (to account for update_bbs
6775 members) seems to be a reasonable solution. */
6776 /* ??? Or perhaps there is a bug somewhere else in this file? */
6777 bblst_size = (current_nr_blocks - bb) * rgn_nr_edges * 2;
6778 bblst_table = (int *) alloca (bblst_size * sizeof (int));
6779
6780 bitlst_table_last = 0;
6781 bitlst_table_size = rgn_nr_edges;
6782 bitlst_table = (int *) alloca (rgn_nr_edges * sizeof (int));
6783
6784 compute_trg_info (bb);
6785 }
6786
6787 clear_units ();
6788
6789 /* Allocate the ready list */
6790 ready = (rtx *) alloca ((rgn_n_insns + 1) * sizeof (rtx));
6791
6792 /* Print debugging information. */
6793 if (sched_verbose >= 5)
6794 debug_dependencies ();
6795
6796
6797 /* Initialize ready list with all 'ready' insns in target block.
6798 Count number of insns in the target block being scheduled. */
6799 n_ready = 0;
6800 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6801 {
6802 rtx next;
6803
6804 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
6805 continue;
6806 next = NEXT_INSN (insn);
6807
6808 if (INSN_DEP_COUNT (insn) == 0
6809 && (SCHED_GROUP_P (next) == 0 || GET_RTX_CLASS (GET_CODE (next)) != 'i'))
6810 ready[n_ready++] = insn;
6811 if (!(SCHED_GROUP_P (insn)))
6812 target_n_insns++;
6813 }
6814
6815 /* Add to ready list all 'ready' insns in valid source blocks.
6816 For speculative insns, check-live, exception-free, and
6817 issue-delay. */
6818 for (bb_src = bb + 1; bb_src < current_nr_blocks; bb_src++)
6819 if (IS_VALID (bb_src))
6820 {
6821 rtx src_head;
6822 rtx src_next_tail;
6823 rtx tail, head;
6824
6825 get_block_head_tail (bb_src, &head, &tail);
6826 src_next_tail = NEXT_INSN (tail);
6827 src_head = head;
6828
6829 if (head == tail
6830 && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
6831 continue;
6832
6833 for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
6834 {
6835 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
6836 continue;
6837
6838 if (!CANT_MOVE (insn)
6839 && (!IS_SPECULATIVE_INSN (insn)
6840 || (insn_issue_delay (insn) <= 3
5835e573 6841 && check_live (insn, bb_src)
8c660648
JL
6842 && is_exception_free (insn, bb_src, target_bb))))
6843
6844 {
6845 rtx next;
6846
6847 next = NEXT_INSN (insn);
6848 if (INSN_DEP_COUNT (insn) == 0
6849 && (SCHED_GROUP_P (next) == 0
6850 || GET_RTX_CLASS (GET_CODE (next)) != 'i'))
6851 ready[n_ready++] = insn;
6852 }
6853 }
6854 }
6855
e4da5f6d
MM
6856#ifdef MD_SCHED_INIT
6857 MD_SCHED_INIT (dump, sched_verbose);
6858#endif
6859
8c660648
JL
6860 /* no insns scheduled in this block yet */
6861 last_scheduled_insn = 0;
6862
8c660648
JL
6863 /* Q_SIZE is the total number of insns in the queue. */
6864 q_ptr = 0;
6865 q_size = 0;
4bdc8810 6866 last_clock_var = 0;
8c660648
JL
6867 bzero ((char *) insn_queue, sizeof (insn_queue));
6868
197043f5
RH
6869 /* Start just before the beginning of time. */
6870 clock_var = -1;
6871
8c660648
JL
6872 /* We start inserting insns after PREV_HEAD. */
6873 last = prev_head;
6874
6875 /* Initialize INSN_QUEUE, LIST and NEW_NEEDS. */
3b413743 6876 new_needs = (NEXT_INSN (prev_head) == BLOCK_HEAD (b)
8c660648 6877 ? NEED_HEAD : NEED_NOTHING);
3b413743 6878 if (PREV_INSN (next_tail) == BLOCK_END (b))
8c660648
JL
6879 new_needs |= NEED_TAIL;
6880
6881 /* loop until all the insns in BB are scheduled. */
6882 while (sched_target_n_insns < target_n_insns)
6883 {
6884 int b1;
6885
8c660648
JL
6886 clock_var++;
6887
6888 /* Add to the ready list all pending insns that can be issued now.
6889 If there are no ready insns, increment clock until one
6890 is ready and add all pending insns at that point to the ready
6891 list. */
6892 n_ready = queue_to_ready (ready, n_ready);
6893
6894 if (n_ready == 0)
6895 abort ();
6896
6897 if (sched_verbose >= 2)
6898 {
6899 fprintf (dump, ";;\t\tReady list after queue_to_ready: ");
6900 debug_ready_list (ready, n_ready);
6901 }
6902
197043f5 6903 /* Sort the ready list based on priority. */
8c660648 6904 SCHED_SORT (ready, n_ready);
197043f5
RH
6905
6906 /* Allow the target to reorder the list, typically for
6907 better instruction bundling. */
e4da5f6d 6908#ifdef MD_SCHED_REORDER
197043f5
RH
6909 MD_SCHED_REORDER (dump, sched_verbose, ready, n_ready, clock_var,
6910 can_issue_more);
6911#else
6912 can_issue_more = issue_rate;
e4da5f6d 6913#endif
8c660648
JL
6914
6915 if (sched_verbose)
6916 {
47312d84 6917 fprintf (dump, "\n;;\tReady list (t =%3d): ", clock_var);
8c660648
JL
6918 debug_ready_list (ready, n_ready);
6919 }
6920
197043f5
RH
6921 /* Issue insns from ready list. */
6922 while (n_ready != 0 && can_issue_more)
8c660648 6923 {
197043f5
RH
6924 /* Select and remove the insn from the ready list. */
6925 rtx insn = ready[--n_ready];
8c660648
JL
6926 int cost = actual_hazard (insn_unit (insn), insn, clock_var, 0);
6927
197043f5 6928 if (cost >= 1)
8c660648
JL
6929 {
6930 queue_insn (insn, cost);
197043f5 6931 continue;
8c660648 6932 }
4f64eaca 6933
197043f5
RH
6934 /* An interblock motion? */
6935 if (INSN_BB (insn) != target_bb)
6936 {
6937 rtx temp;
8c660648 6938
197043f5
RH
6939 if (IS_SPECULATIVE_INSN (insn))
6940 {
6941 if (!check_live (insn, INSN_BB (insn)))
6942 continue;
6943 update_live (insn, INSN_BB (insn));
8c660648 6944
197043f5
RH
6945 /* For speculative load, mark insns fed by it. */
6946 if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
6947 set_spec_fed (insn);
8c660648 6948
197043f5
RH
6949 nr_spec++;
6950 }
6951 nr_inter++;
8c660648 6952
197043f5
RH
6953 temp = insn;
6954 while (SCHED_GROUP_P (temp))
6955 temp = PREV_INSN (temp);
4f64eaca 6956
197043f5
RH
6957 /* Update source block boundaries. */
6958 b1 = INSN_BLOCK (temp);
6959 if (temp == BLOCK_HEAD (b1)
6960 && insn == BLOCK_END (b1))
6961 {
6962 /* We moved all the insns in the basic block.
6963 Emit a note after the last insn and update the
6964 begin/end boundaries to point to the note. */
6965 emit_note_after (NOTE_INSN_DELETED, insn);
6966 BLOCK_END (b1) = NEXT_INSN (insn);
6967 BLOCK_HEAD (b1) = NEXT_INSN (insn);
8c660648 6968 }
197043f5 6969 else if (insn == BLOCK_END (b1))
8c660648 6970 {
197043f5
RH
6971 /* We took insns from the end of the basic block,
6972 so update the end of block boundary so that it
6973 points to the first insn we did not move. */
6974 BLOCK_END (b1) = PREV_INSN (temp);
8c660648 6975 }
197043f5
RH
6976 else if (temp == BLOCK_HEAD (b1))
6977 {
6978 /* We took insns from the start of the basic block,
6979 so update the start of block boundary so that
6980 it points to the first insn we did not move. */
6981 BLOCK_HEAD (b1) = NEXT_INSN (insn);
6982 }
6983 }
6984 else
6985 {
6986 /* In block motion. */
6987 sched_target_n_insns++;
6988 }
8c660648 6989
197043f5
RH
6990 last_scheduled_insn = insn;
6991 last = move_insn (insn, last);
6992 sched_n_insns++;
8c660648 6993
e4da5f6d 6994#ifdef MD_SCHED_VARIABLE_ISSUE
197043f5
RH
6995 MD_SCHED_VARIABLE_ISSUE (dump, sched_verbose, insn,
6996 can_issue_more);
e4da5f6d 6997#else
197043f5 6998 can_issue_more--;
e4da5f6d 6999#endif
8c660648 7000
197043f5 7001 n_ready = schedule_insn (insn, ready, n_ready, clock_var);
8c660648 7002
197043f5
RH
7003 /* Close this block after scheduling its jump. */
7004 if (GET_CODE (last_scheduled_insn) == JUMP_INSN)
7005 break;
8c660648
JL
7006 }
7007
197043f5 7008 /* Debug info. */
8c660648 7009 if (sched_verbose)
197043f5 7010 visualize_scheduled_insns (b, clock_var);
8c660648
JL
7011 }
7012
7013 /* debug info */
7014 if (sched_verbose)
7015 {
7016 fprintf (dump, ";;\tReady list (final): ");
7017 debug_ready_list (ready, n_ready);
7018 print_block_visualization (b, "");
7019 }
7020
7021 /* Sanity check -- queue must be empty now. Meaningless if region has
cc132865 7022 multiple bbs. */
8c660648 7023 if (current_nr_blocks > 1)
cc132865
JL
7024 if (!flag_schedule_interblock && q_size != 0)
7025 abort ();
8c660648
JL
7026
7027 /* update head/tail boundaries. */
7028 head = NEXT_INSN (prev_head);
7029 tail = last;
7030
8c660648
JL
7031 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
7032 previously found among the insns. Insert them at the beginning
7033 of the insns. */
7034 if (note_list != 0)
7035 {
7036 rtx note_head = note_list;
7037
7038 while (PREV_INSN (note_head))
7039 {
7040 note_head = PREV_INSN (note_head);
7041 }
7042
7043 PREV_INSN (note_head) = PREV_INSN (head);
7044 NEXT_INSN (PREV_INSN (head)) = note_head;
7045 PREV_INSN (head) = note_list;
7046 NEXT_INSN (note_list) = head;
7047 head = note_head;
7048 }
7049
7050 /* update target block boundaries. */
7051 if (new_needs & NEED_HEAD)
3b413743 7052 BLOCK_HEAD (b) = head;
8c660648
JL
7053
7054 if (new_needs & NEED_TAIL)
3b413743 7055 BLOCK_END (b) = tail;
8c660648
JL
7056
7057 /* debugging */
7058 if (sched_verbose)
7059 {
7060 fprintf (dump, ";; total time = %d\n;; new basic block head = %d\n",
3b413743 7061 clock_var, INSN_UID (BLOCK_HEAD (b)));
8c660648 7062 fprintf (dump, ";; new basic block end = %d\n\n",
3b413743 7063 INSN_UID (BLOCK_END (b)));
8c660648
JL
7064 }
7065
7066 return (sched_n_insns);
7067} /* schedule_block () */
7068\f
7069
7070/* print the bit-set of registers, S. callable from debugger */
7071
7072extern void
7073debug_reg_vector (s)
7074 regset s;
7075{
7076 int regno;
7077
7078 EXECUTE_IF_SET_IN_REG_SET (s, 0, regno,
7079 {
7080 fprintf (dump, " %d", regno);
7081 });
7082
7083 fprintf (dump, "\n");
7084}
7085
7086/* Use the backward dependences from LOG_LINKS to build
7087 forward dependences in INSN_DEPEND. */
7088
7089static void
7090compute_block_forward_dependences (bb)
7091 int bb;
7092{
7093 rtx insn, link;
7094 rtx tail, head;
7095 rtx next_tail;
7096 enum reg_note dep_type;
7097
7098 get_block_head_tail (bb, &head, &tail);
7099 next_tail = NEXT_INSN (tail);
7100 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7101 {
7102 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
7103 continue;
7104
7105 insn = group_leader (insn);
7106
7107 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
7108 {
7109 rtx x = group_leader (XEXP (link, 0));
7110 rtx new_link;
7111
7112 if (x != XEXP (link, 0))
7113 continue;
7114
7115 /* Ignore dependences upon deleted insn */
7116 if (GET_CODE (x) == NOTE || INSN_DELETED_P (x))
7117 continue;
7118 if (find_insn_list (insn, INSN_DEPEND (x)))
7119 continue;
7120
ebb7b10b 7121 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
8c660648
JL
7122
7123 dep_type = REG_NOTE_KIND (link);
7124 PUT_REG_NOTE_KIND (new_link, dep_type);
7125
8c660648
JL
7126 INSN_DEPEND (x) = new_link;
7127 INSN_DEP_COUNT (insn) += 1;
7128 }
7129 }
7130}
7131
7132/* Initialize variables for region data dependence analysis.
7133 n_bbs is the number of region blocks */
7134
6d3352d9 7135__inline static void
8c660648
JL
7136init_rgn_data_dependences (n_bbs)
7137 int n_bbs;
7138{
7139 int bb;
7140
7141 /* variables for which one copy exists for each block */
7142 bzero ((char *) bb_pending_read_insns, n_bbs * sizeof (rtx));
7143 bzero ((char *) bb_pending_read_mems, n_bbs * sizeof (rtx));
7144 bzero ((char *) bb_pending_write_insns, n_bbs * sizeof (rtx));
7145 bzero ((char *) bb_pending_write_mems, n_bbs * sizeof (rtx));
7146 bzero ((char *) bb_pending_lists_length, n_bbs * sizeof (rtx));
7147 bzero ((char *) bb_last_pending_memory_flush, n_bbs * sizeof (rtx));
7148 bzero ((char *) bb_last_function_call, n_bbs * sizeof (rtx));
7149 bzero ((char *) bb_sched_before_next_call, n_bbs * sizeof (rtx));
7150
7151 /* Create an insn here so that we can hang dependencies off of it later. */
7152 for (bb = 0; bb < n_bbs; bb++)
7153 {
7154 bb_sched_before_next_call[bb] =
38a448ca
RH
7155 gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
7156 NULL_RTX, 0, NULL_RTX, NULL_RTX);
8c660648
JL
7157 LOG_LINKS (bb_sched_before_next_call[bb]) = 0;
7158 }
7159}
7160
7161/* Add dependences so that branches are scheduled to run last in their block */
7162
7163static void
7164add_branch_dependences (head, tail)
7165 rtx head, tail;
7166{
7167
7168 rtx insn, last;
7169
7170 /* For all branches, calls, uses, and cc0 setters, force them to remain
7171 in order at the end of the block by adding dependencies and giving
7172 the last a high priority. There may be notes present, and prev_head
7173 may also be a note.
7174
7175 Branches must obviously remain at the end. Calls should remain at the
7176 end since moving them results in worse register allocation. Uses remain
7177 at the end to ensure proper register allocation. cc0 setters remaim
7178 at the end because they can't be moved away from their cc0 user. */
7179 insn = tail;
7180 last = 0;
7181 while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
7182 || (GET_CODE (insn) == INSN
7183 && (GET_CODE (PATTERN (insn)) == USE
7184#ifdef HAVE_cc0
7185 || sets_cc0_p (PATTERN (insn))
7186#endif
7187 ))
7188 || GET_CODE (insn) == NOTE)
7189 {
7190 if (GET_CODE (insn) != NOTE)
7191 {
7192 if (last != 0
7193 && !find_insn_list (insn, LOG_LINKS (last)))
7194 {
7195 add_dependence (last, insn, REG_DEP_ANTI);
7196 INSN_REF_COUNT (insn)++;
7197 }
7198
7199 CANT_MOVE (insn) = 1;
7200
7201 last = insn;
326ee7a3
JL
7202 /* Skip over insns that are part of a group.
7203 Make each insn explicitly depend on the previous insn.
7204 This ensures that only the group header will ever enter
7205 the ready queue (and, when scheduled, will automatically
7206 schedule the SCHED_GROUP_P block). */
8c660648 7207 while (SCHED_GROUP_P (insn))
326ee7a3
JL
7208 {
7209 rtx temp = prev_nonnote_insn (insn);
7210 add_dependence (insn, temp, REG_DEP_ANTI);
7211 insn = temp;
7212 }
8c660648
JL
7213 }
7214
7215 /* Don't overrun the bounds of the basic block. */
7216 if (insn == head)
7217 break;
7218
7219 insn = PREV_INSN (insn);
7220 }
7221
7222 /* make sure these insns are scheduled last in their block */
7223 insn = last;
7224 if (insn != 0)
7225 while (insn != head)
7226 {
7227 insn = prev_nonnote_insn (insn);
7228
7229 if (INSN_REF_COUNT (insn) != 0)
7230 continue;
7231
7232 if (!find_insn_list (last, LOG_LINKS (insn)))
7233 add_dependence (last, insn, REG_DEP_ANTI);
7234 INSN_REF_COUNT (insn) = 1;
7235
7236 /* Skip over insns that are part of a group. */
7237 while (SCHED_GROUP_P (insn))
7238 insn = prev_nonnote_insn (insn);
7239 }
7240}
7241
7242/* Compute bacward dependences inside BB. In a multiple blocks region:
7243 (1) a bb is analyzed after its predecessors, and (2) the lists in
7244 effect at the end of bb (after analyzing for bb) are inherited by
7245 bb's successrs.
7246
7247 Specifically for reg-reg data dependences, the block insns are
7248 scanned by sched_analyze () top-to-bottom. Two lists are
7249 naintained by sched_analyze (): reg_last_defs[] for register DEFs,
7250 and reg_last_uses[] for register USEs.
7251
7252 When analysis is completed for bb, we update for its successors:
7253 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
7254 ; - USES[succ] = Union (USES [succ], DEFS [bb])
7255
7256 The mechanism for computing mem-mem data dependence is very
7257 similar, and the result is interblock dependences in the region. */
7258
7259static void
7260compute_block_backward_dependences (bb)
7261 int bb;
7262{
7263 int b;
7264 rtx x;
7265 rtx head, tail;
7266 int max_reg = max_reg_num ();
7267
7268 b = BB_TO_BLOCK (bb);
7269
7270 if (current_nr_blocks == 1)
7271 {
7272 reg_last_uses = (rtx *) alloca (max_reg * sizeof (rtx));
7273 reg_last_sets = (rtx *) alloca (max_reg * sizeof (rtx));
28c95eff 7274 reg_last_clobbers = (rtx *) alloca (max_reg * sizeof (rtx));
8c660648
JL
7275
7276 bzero ((char *) reg_last_uses, max_reg * sizeof (rtx));
7277 bzero ((char *) reg_last_sets, max_reg * sizeof (rtx));
28c95eff 7278 bzero ((char *) reg_last_clobbers, max_reg * sizeof (rtx));
8c660648
JL
7279
7280 pending_read_insns = 0;
7281 pending_read_mems = 0;
7282 pending_write_insns = 0;
7283 pending_write_mems = 0;
7284 pending_lists_length = 0;
7285 last_function_call = 0;
7286 last_pending_memory_flush = 0;
7287 sched_before_next_call
38a448ca
RH
7288 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
7289 NULL_RTX, 0, NULL_RTX, NULL_RTX);
8c660648
JL
7290 LOG_LINKS (sched_before_next_call) = 0;
7291 }
7292 else
7293 {
7294 reg_last_uses = bb_reg_last_uses[bb];
7295 reg_last_sets = bb_reg_last_sets[bb];
28c95eff 7296 reg_last_clobbers = bb_reg_last_clobbers[bb];
8c660648
JL
7297
7298 pending_read_insns = bb_pending_read_insns[bb];
7299 pending_read_mems = bb_pending_read_mems[bb];
7300 pending_write_insns = bb_pending_write_insns[bb];
7301 pending_write_mems = bb_pending_write_mems[bb];
7302 pending_lists_length = bb_pending_lists_length[bb];
7303 last_function_call = bb_last_function_call[bb];
7304 last_pending_memory_flush = bb_last_pending_memory_flush[bb];
7305
7306 sched_before_next_call = bb_sched_before_next_call[bb];
7307 }
7308
7309 /* do the analysis for this block */
7310 get_block_head_tail (bb, &head, &tail);
7311 sched_analyze (head, tail);
7312 add_branch_dependences (head, tail);
7313
7314 if (current_nr_blocks > 1)
7315 {
7316 int e, first_edge;
7317 int b_succ, bb_succ;
7318 int reg;
7319 rtx link_insn, link_mem;
7320 rtx u;
7321
7322 /* these lists should point to the right place, for correct freeing later. */
7323 bb_pending_read_insns[bb] = pending_read_insns;
7324 bb_pending_read_mems[bb] = pending_read_mems;
7325 bb_pending_write_insns[bb] = pending_write_insns;
7326 bb_pending_write_mems[bb] = pending_write_mems;
7327
7328 /* bb's structures are inherited by it's successors */
7329 first_edge = e = OUT_EDGES (b);
7330 if (e > 0)
7331 do
7332 {
7333 b_succ = TO_BLOCK (e);
7334 bb_succ = BLOCK_TO_BB (b_succ);
7335
7336 /* only bbs "below" bb, in the same region, are interesting */
7337 if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
7338 || bb_succ <= bb)
7339 {
7340 e = NEXT_OUT (e);
7341 continue;
7342 }
7343
7344 for (reg = 0; reg < max_reg; reg++)
7345 {
7346
7347 /* reg-last-uses lists are inherited by bb_succ */
7348 for (u = reg_last_uses[reg]; u; u = XEXP (u, 1))
7349 {
7350 if (find_insn_list (XEXP (u, 0), (bb_reg_last_uses[bb_succ])[reg]))
7351 continue;
7352
7353 (bb_reg_last_uses[bb_succ])[reg]
ebb7b10b
RH
7354 = alloc_INSN_LIST (XEXP (u, 0),
7355 (bb_reg_last_uses[bb_succ])[reg]);
8c660648
JL
7356 }
7357
7358 /* reg-last-defs lists are inherited by bb_succ */
7359 for (u = reg_last_sets[reg]; u; u = XEXP (u, 1))
7360 {
7361 if (find_insn_list (XEXP (u, 0), (bb_reg_last_sets[bb_succ])[reg]))
7362 continue;
7363
7364 (bb_reg_last_sets[bb_succ])[reg]
ebb7b10b
RH
7365 = alloc_INSN_LIST (XEXP (u, 0),
7366 (bb_reg_last_sets[bb_succ])[reg]);
8c660648 7367 }
28c95eff
RH
7368
7369 for (u = reg_last_clobbers[reg]; u; u = XEXP (u, 1))
7370 {
7371 if (find_insn_list (XEXP (u, 0), (bb_reg_last_clobbers[bb_succ])[reg]))
7372 continue;
7373
7374 (bb_reg_last_clobbers[bb_succ])[reg]
7375 = alloc_INSN_LIST (XEXP (u, 0),
7376 (bb_reg_last_clobbers[bb_succ])[reg]);
7377 }
8c660648
JL
7378 }
7379
7380 /* mem read/write lists are inherited by bb_succ */
7381 link_insn = pending_read_insns;
7382 link_mem = pending_read_mems;
7383 while (link_insn)
7384 {
7385 if (!(find_insn_mem_list (XEXP (link_insn, 0), XEXP (link_mem, 0),
7386 bb_pending_read_insns[bb_succ],
7387 bb_pending_read_mems[bb_succ])))
7388 add_insn_mem_dependence (&bb_pending_read_insns[bb_succ],
7389 &bb_pending_read_mems[bb_succ],
7390 XEXP (link_insn, 0), XEXP (link_mem, 0));
7391 link_insn = XEXP (link_insn, 1);
7392 link_mem = XEXP (link_mem, 1);
7393 }
7394
7395 link_insn = pending_write_insns;
7396 link_mem = pending_write_mems;
7397 while (link_insn)
7398 {
7399 if (!(find_insn_mem_list (XEXP (link_insn, 0), XEXP (link_mem, 0),
7400 bb_pending_write_insns[bb_succ],
7401 bb_pending_write_mems[bb_succ])))
7402 add_insn_mem_dependence (&bb_pending_write_insns[bb_succ],
7403 &bb_pending_write_mems[bb_succ],
7404 XEXP (link_insn, 0), XEXP (link_mem, 0));
7405
7406 link_insn = XEXP (link_insn, 1);
7407 link_mem = XEXP (link_mem, 1);
7408 }
7409
7410 /* last_function_call is inherited by bb_succ */
7411 for (u = last_function_call; u; u = XEXP (u, 1))
7412 {
7413 if (find_insn_list (XEXP (u, 0), bb_last_function_call[bb_succ]))
7414 continue;
7415
7416 bb_last_function_call[bb_succ]
ebb7b10b
RH
7417 = alloc_INSN_LIST (XEXP (u, 0),
7418 bb_last_function_call[bb_succ]);
8c660648
JL
7419 }
7420
7421 /* last_pending_memory_flush is inherited by bb_succ */
7422 for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
7423 {
7424 if (find_insn_list (XEXP (u, 0), bb_last_pending_memory_flush[bb_succ]))
7425 continue;
7426
7427 bb_last_pending_memory_flush[bb_succ]
ebb7b10b
RH
7428 = alloc_INSN_LIST (XEXP (u, 0),
7429 bb_last_pending_memory_flush[bb_succ]);
8c660648
JL
7430 }
7431
7432 /* sched_before_next_call is inherited by bb_succ */
7433 x = LOG_LINKS (sched_before_next_call);
7434 for (; x; x = XEXP (x, 1))
7435 add_dependence (bb_sched_before_next_call[bb_succ],
7436 XEXP (x, 0), REG_DEP_ANTI);
7437
7438 e = NEXT_OUT (e);
7439 }
7440 while (e != first_edge);
7441 }
ebb7b10b 7442
7eea6443
JL
7443 /* Free up the INSN_LISTs
7444
7445 Note this loop is executed max_reg * nr_regions times. It's first
7446 implementation accounted for over 90% of the calls to free_list.
7447 The list was empty for the vast majority of those calls. On the PA,
7448 not calling free_list in those cases improves -O2 compile times by
7449 3-5% on average. */
ebb7b10b
RH
7450 for (b = 0; b < max_reg; ++b)
7451 {
28c95eff
RH
7452 if (reg_last_clobbers[b])
7453 free_list (&reg_last_clobbers[b], &unused_insn_list);
7eea6443
JL
7454 if (reg_last_sets[b])
7455 free_list (&reg_last_sets[b], &unused_insn_list);
7456 if (reg_last_uses[b])
7457 free_list (&reg_last_uses[b], &unused_insn_list);
ebb7b10b
RH
7458 }
7459
7460 /* Assert that we won't need bb_reg_last_* for this block anymore. */
7461 if (current_nr_blocks > 1)
7462 {
7463 bb_reg_last_uses[bb] = (rtx *) NULL_RTX;
7464 bb_reg_last_sets[bb] = (rtx *) NULL_RTX;
28c95eff 7465 bb_reg_last_clobbers[bb] = (rtx *) NULL_RTX;
ebb7b10b 7466 }
8c660648
JL
7467}
7468
7469/* Print dependences for debugging, callable from debugger */
7470
7471void
7472debug_dependencies ()
7473{
7474 int bb;
7475
7476 fprintf (dump, ";; --------------- forward dependences: ------------ \n");
7477 for (bb = 0; bb < current_nr_blocks; bb++)
7478 {
7479 if (1)
7480 {
7481 rtx head, tail;
7482 rtx next_tail;
7483 rtx insn;
7484
7485 get_block_head_tail (bb, &head, &tail);
7486 next_tail = NEXT_INSN (tail);
7487 fprintf (dump, "\n;; --- Region Dependences --- b %d bb %d \n",
7488 BB_TO_BLOCK (bb), bb);
7489
7490 fprintf (dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
7491 "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
7492 fprintf (dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
7493 "----", "----", "--", "---", "----", "----", "--------", "-----");
7494 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7495 {
7496 rtx link;
7497 int unit, range;
7498
7499 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
7500 {
7501 int n;
7502 fprintf (dump, ";; %6d ", INSN_UID (insn));
7503 if (GET_CODE (insn) == NOTE)
ebc25a17
MM
7504 {
7505 n = NOTE_LINE_NUMBER (insn);
7506 if (n < 0)
7507 fprintf (dump, "%s\n", GET_NOTE_INSN_NAME (n));
7508 else
7509 fprintf (dump, "line %d, file %s\n", n,
7510 NOTE_SOURCE_FILE (insn));
7511 }
7512 else
4f64eaca 7513 fprintf (dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
8c660648
JL
7514 continue;
7515 }
7516
7517 unit = insn_unit (insn);
7518 range = (unit < 0
7519 || function_units[unit].blockage_range_function == 0) ? 0 :
7520 function_units[unit].blockage_range_function (insn);
7521 fprintf (dump,
7522 ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
7523 (SCHED_GROUP_P (insn) ? "+" : " "),
7524 INSN_UID (insn),
7525 INSN_CODE (insn),
7526 INSN_BB (insn),
7527 INSN_DEP_COUNT (insn),
7528 INSN_PRIORITY (insn),
7529 insn_cost (insn, 0, 0),
7530 (int) MIN_BLOCKAGE_COST (range),
7531 (int) MAX_BLOCKAGE_COST (range));
7532 insn_print_units (insn);
7533 fprintf (dump, "\t: ");
7534 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
7535 fprintf (dump, "%d ", INSN_UID (XEXP (link, 0)));
7536 fprintf (dump, "\n");
7537 }
7538 }
7539 }
7540 fprintf (dump, "\n");
7541}
7542
7543/* Set_priorities: compute priority of each insn in the block */
7544
7545static int
7546set_priorities (bb)
7547 int bb;
7548{
7549 rtx insn;
7550 int n_insn;
7551
7552 rtx tail;
7553 rtx prev_head;
7554 rtx head;
7555
7556 get_block_head_tail (bb, &head, &tail);
7557 prev_head = PREV_INSN (head);
7558
7559 if (head == tail
7560 && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
7561 return 0;
7562
7563 n_insn = 0;
7564 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
7565 {
7566
7567 if (GET_CODE (insn) == NOTE)
7568 continue;
7569
7570 if (!(SCHED_GROUP_P (insn)))
7571 n_insn++;
7572 (void) priority (insn);
7573 }
7574
7575 return n_insn;
7576}
7577
7578/* Make each element of VECTOR point at an rtx-vector,
7579 taking the space for all those rtx-vectors from SPACE.
7580 SPACE is of type (rtx *), but it is really as long as NELTS rtx-vectors.
7581 BYTES_PER_ELT is the number of bytes in one rtx-vector.
7582 (this is the same as init_regset_vector () in flow.c) */
7583
7584static void
7585init_rtx_vector (vector, space, nelts, bytes_per_elt)
7586 rtx **vector;
7587 rtx *space;
7588 int nelts;
7589 int bytes_per_elt;
7590{
7591 register int i;
7592 register rtx *p = space;
7593
7594 for (i = 0; i < nelts; i++)
7595 {
7596 vector[i] = p;
7597 p += bytes_per_elt / sizeof (*p);
7598 }
7599}
7600
7601/* Schedule a region. A region is either an inner loop, a loop-free
7602 subroutine, or a single basic block. Each bb in the region is
7603 scheduled after its flow predecessors. */
7604
7605static void
7606schedule_region (rgn)
7607 int rgn;
7608{
7609 int bb;
7610 int rgn_n_insns = 0;
7611 int sched_rgn_n_insns = 0;
7612
7613 /* set variables for the current region */
7614 current_nr_blocks = RGN_NR_BLOCKS (rgn);
7615 current_blocks = RGN_BLOCKS (rgn);
7616
7617 reg_pending_sets = ALLOCA_REG_SET ();
28c95eff 7618 reg_pending_clobbers = ALLOCA_REG_SET ();
8c660648
JL
7619 reg_pending_sets_all = 0;
7620
7621 /* initializations for region data dependence analyisis */
7622 if (current_nr_blocks > 1)
7623 {
7624 rtx *space;
7625 int maxreg = max_reg_num ();
7626
7627 bb_reg_last_uses = (rtx **) alloca (current_nr_blocks * sizeof (rtx *));
7628 space = (rtx *) alloca (current_nr_blocks * maxreg * sizeof (rtx));
7629 bzero ((char *) space, current_nr_blocks * maxreg * sizeof (rtx));
28c95eff
RH
7630 init_rtx_vector (bb_reg_last_uses, space, current_nr_blocks,
7631 maxreg * sizeof (rtx *));
8c660648
JL
7632
7633 bb_reg_last_sets = (rtx **) alloca (current_nr_blocks * sizeof (rtx *));
7634 space = (rtx *) alloca (current_nr_blocks * maxreg * sizeof (rtx));
7635 bzero ((char *) space, current_nr_blocks * maxreg * sizeof (rtx));
28c95eff
RH
7636 init_rtx_vector (bb_reg_last_sets, space, current_nr_blocks,
7637 maxreg * sizeof (rtx *));
7638
7639 bb_reg_last_clobbers =
7640 (rtx **) alloca (current_nr_blocks * sizeof (rtx *));
7641 space = (rtx *) alloca (current_nr_blocks * maxreg * sizeof (rtx));
7642 bzero ((char *) space, current_nr_blocks * maxreg * sizeof (rtx));
7643 init_rtx_vector (bb_reg_last_clobbers, space, current_nr_blocks,
7644 maxreg * sizeof (rtx *));
8c660648
JL
7645
7646 bb_pending_read_insns = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
7647 bb_pending_read_mems = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
28c95eff
RH
7648 bb_pending_write_insns =
7649 (rtx *) alloca (current_nr_blocks * sizeof (rtx));
8c660648 7650 bb_pending_write_mems = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
28c95eff
RH
7651 bb_pending_lists_length =
7652 (int *) alloca (current_nr_blocks * sizeof (int));
7653 bb_last_pending_memory_flush =
7654 (rtx *) alloca (current_nr_blocks * sizeof (rtx));
8c660648 7655 bb_last_function_call = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
28c95eff
RH
7656 bb_sched_before_next_call =
7657 (rtx *) alloca (current_nr_blocks * sizeof (rtx));
8c660648
JL
7658
7659 init_rgn_data_dependences (current_nr_blocks);
7660 }
7661
7662 /* compute LOG_LINKS */
7663 for (bb = 0; bb < current_nr_blocks; bb++)
7664 compute_block_backward_dependences (bb);
7665
7666 /* compute INSN_DEPEND */
7667 for (bb = current_nr_blocks - 1; bb >= 0; bb--)
7668 compute_block_forward_dependences (bb);
7669
7670 /* Delete line notes, compute live-regs at block end, and set priorities. */
7671 dead_notes = 0;
7672 for (bb = 0; bb < current_nr_blocks; bb++)
7673 {
7674 if (reload_completed == 0)
7675 find_pre_sched_live (bb);
7676
7677 if (write_symbols != NO_DEBUG)
7678 {
7679 save_line_notes (bb);
7680 rm_line_notes (bb);
7681 }
7682
7683 rgn_n_insns += set_priorities (bb);
7684 }
7685
7686 /* compute interblock info: probabilities, split-edges, dominators, etc. */
7687 if (current_nr_blocks > 1)
7688 {
7689 int i;
7690
7691 prob = (float *) alloca ((current_nr_blocks) * sizeof (float));
7692
7693 bbset_size = current_nr_blocks / HOST_BITS_PER_WIDE_INT + 1;
7694 dom = (bbset *) alloca (current_nr_blocks * sizeof (bbset));
7695 for (i = 0; i < current_nr_blocks; i++)
7696 {
7697 dom[i] = (bbset) alloca (bbset_size * sizeof (HOST_WIDE_INT));
7698 bzero ((char *) dom[i], bbset_size * sizeof (HOST_WIDE_INT));
7699 }
7700
7701 /* edge to bit */
7702 rgn_nr_edges = 0;
7703 edge_to_bit = (int *) alloca (nr_edges * sizeof (int));
7704 for (i = 1; i < nr_edges; i++)
7705 if (CONTAINING_RGN (FROM_BLOCK (i)) == rgn)
7706 EDGE_TO_BIT (i) = rgn_nr_edges++;
7707 rgn_edges = (int *) alloca (rgn_nr_edges * sizeof (int));
7708
7709 rgn_nr_edges = 0;
7710 for (i = 1; i < nr_edges; i++)
7711 if (CONTAINING_RGN (FROM_BLOCK (i)) == (rgn))
7712 rgn_edges[rgn_nr_edges++] = i;
7713
7714 /* split edges */
7715 edgeset_size = rgn_nr_edges / HOST_BITS_PER_WIDE_INT + 1;
7716 pot_split = (edgeset *) alloca (current_nr_blocks * sizeof (edgeset));
7717 ancestor_edges = (edgeset *) alloca (current_nr_blocks * sizeof (edgeset));
7718 for (i = 0; i < current_nr_blocks; i++)
7719 {
7720 pot_split[i] =
7721 (edgeset) alloca (edgeset_size * sizeof (HOST_WIDE_INT));
7722 bzero ((char *) pot_split[i],
7723 edgeset_size * sizeof (HOST_WIDE_INT));
7724 ancestor_edges[i] =
7725 (edgeset) alloca (edgeset_size * sizeof (HOST_WIDE_INT));
7726 bzero ((char *) ancestor_edges[i],
7727 edgeset_size * sizeof (HOST_WIDE_INT));
7728 }
7729
7730 /* compute probabilities, dominators, split_edges */
7731 for (bb = 0; bb < current_nr_blocks; bb++)
7732 compute_dom_prob_ps (bb);
7733 }
7734
7735 /* now we can schedule all blocks */
7736 for (bb = 0; bb < current_nr_blocks; bb++)
7737 {
5835e573 7738 sched_rgn_n_insns += schedule_block (bb, rgn_n_insns);
8c660648
JL
7739
7740#ifdef USE_C_ALLOCA
7741 alloca (0);
7742#endif
7743 }
7744
cc132865
JL
7745 /* sanity check: verify that all region insns were scheduled */
7746 if (sched_rgn_n_insns != rgn_n_insns)
7747 abort ();
8c660648
JL
7748
7749 /* update register life and usage information */
7750 if (reload_completed == 0)
7751 {
7752 for (bb = current_nr_blocks - 1; bb >= 0; bb--)
7753 find_post_sched_live (bb);
7754
7755 if (current_nr_blocks <= 1)
7756 /* Sanity check. There should be no REG_DEAD notes leftover at the end.
7757 In practice, this can occur as the result of bugs in flow, combine.c,
7758 and/or sched.c. The values of the REG_DEAD notes remaining are
7759 meaningless, because dead_notes is just used as a free list. */
7760 if (dead_notes != 0)
7761 abort ();
7762 }
7763
7764 /* restore line notes. */
7765 if (write_symbols != NO_DEBUG)
7766 {
7767 for (bb = 0; bb < current_nr_blocks; bb++)
7768 restore_line_notes (bb);
7769 }
7770
7771 /* Done with this region */
7772 free_pending_lists ();
f187056f
JL
7773
7774 FREE_REG_SET (reg_pending_sets);
28c95eff 7775 FREE_REG_SET (reg_pending_clobbers);
8c660648
JL
7776}
7777
8c660648
JL
7778/* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
7779 needed for the hard register mentioned in the note. This can happen
7780 if the reference to the hard register in the original insn was split into
7781 several smaller hard register references in the split insns. */
7782
7783static void
5835e573
KG
7784split_hard_reg_notes (note, first, last)
7785 rtx note, first, last;
8c660648
JL
7786{
7787 rtx reg, temp, link;
7788 int n_regs, i, new_reg;
7789 rtx insn;
7790
7791 /* Assume that this is a REG_DEAD note. */
7792 if (REG_NOTE_KIND (note) != REG_DEAD)
7793 abort ();
7794
7795 reg = XEXP (note, 0);
7796
7797 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
7798
7799 for (i = 0; i < n_regs; i++)
7800 {
7801 new_reg = REGNO (reg) + i;
7802
7803 /* Check for references to new_reg in the split insns. */
7804 for (insn = last;; insn = PREV_INSN (insn))
7805 {
7806 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7807 && (temp = regno_use_in (new_reg, PATTERN (insn))))
7808 {
7809 /* Create a new reg dead note ere. */
ebb7b10b 7810 link = alloc_EXPR_LIST (REG_DEAD, temp, REG_NOTES (insn));
8c660648
JL
7811 REG_NOTES (insn) = link;
7812
7813 /* If killed multiple registers here, then add in the excess. */
7814 i += HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) - 1;
7815
7816 break;
7817 }
7818 /* It isn't mentioned anywhere, so no new reg note is needed for
7819 this register. */
7820 if (insn == first)
7821 break;
7822 }
7823 }
7824}
7825
7826/* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
7827 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
7828
7829static void
7830new_insn_dead_notes (pat, insn, last, orig_insn)
7831 rtx pat, insn, last, orig_insn;
7832{
7833 rtx dest, tem, set;
7834
7835 /* PAT is either a CLOBBER or a SET here. */
7836 dest = XEXP (pat, 0);
7837
7838 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
7839 || GET_CODE (dest) == STRICT_LOW_PART
7840 || GET_CODE (dest) == SIGN_EXTRACT)
7841 dest = XEXP (dest, 0);
7842
7843 if (GET_CODE (dest) == REG)
7844 {
93da030f
R
7845 /* If the original insn already used this register, we may not add new
7846 notes for it. One example for a split that needs this test is
7847 when a multi-word memory access with register-indirect addressing
7848 is split into multiple memory accesses with auto-increment and
7849 one adjusting add instruction for the address register. */
7850 if (reg_referenced_p (dest, PATTERN (orig_insn)))
7851 return;
8c660648
JL
7852 for (tem = last; tem != insn; tem = PREV_INSN (tem))
7853 {
7854 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
7855 && reg_overlap_mentioned_p (dest, PATTERN (tem))
7856 && (set = single_set (tem)))
7857 {
7858 rtx tem_dest = SET_DEST (set);
7859
7860 while (GET_CODE (tem_dest) == ZERO_EXTRACT
7861 || GET_CODE (tem_dest) == SUBREG
7862 || GET_CODE (tem_dest) == STRICT_LOW_PART
7863 || GET_CODE (tem_dest) == SIGN_EXTRACT)
7864 tem_dest = XEXP (tem_dest, 0);
7865
7866 if (!rtx_equal_p (tem_dest, dest))
7867 {
7868 /* Use the same scheme as combine.c, don't put both REG_DEAD
7869 and REG_UNUSED notes on the same insn. */
7870 if (!find_regno_note (tem, REG_UNUSED, REGNO (dest))
7871 && !find_regno_note (tem, REG_DEAD, REGNO (dest)))
7872 {
ebb7b10b
RH
7873 rtx note = alloc_EXPR_LIST (REG_DEAD, dest,
7874 REG_NOTES (tem));
8c660648
JL
7875 REG_NOTES (tem) = note;
7876 }
7877 /* The reg only dies in one insn, the last one that uses
7878 it. */
7879 break;
7880 }
7881 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
7882 /* We found an instruction that both uses the register,
7883 and sets it, so no new REG_NOTE is needed for this set. */
7884 break;
7885 }
7886 }
7887 /* If this is a set, it must die somewhere, unless it is the dest of
7888 the original insn, and hence is live after the original insn. Abort
7889 if it isn't supposed to be live after the original insn.
7890
7891 If this is a clobber, then just add a REG_UNUSED note. */
7892 if (tem == insn)
7893 {
7894 int live_after_orig_insn = 0;
7895 rtx pattern = PATTERN (orig_insn);
7896 int i;
7897
7898 if (GET_CODE (pat) == CLOBBER)
7899 {
ebb7b10b 7900 rtx note = alloc_EXPR_LIST (REG_UNUSED, dest, REG_NOTES (insn));
8c660648
JL
7901 REG_NOTES (insn) = note;
7902 return;
7903 }
7904
7905 /* The original insn could have multiple sets, so search the
7906 insn for all sets. */
7907 if (GET_CODE (pattern) == SET)
7908 {
7909 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
7910 live_after_orig_insn = 1;
7911 }
7912 else if (GET_CODE (pattern) == PARALLEL)
7913 {
7914 for (i = 0; i < XVECLEN (pattern, 0); i++)
7915 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
7916 && reg_overlap_mentioned_p (dest,
7917 SET_DEST (XVECEXP (pattern,
7918 0, i))))
7919 live_after_orig_insn = 1;
7920 }
7921
7922 if (!live_after_orig_insn)
7923 abort ();
7924 }
7925 }
7926}
7927
7928/* Subroutine of update_flow_info. Update the value of reg_n_sets for all
7929 registers modified by X. INC is -1 if the containing insn is being deleted,
7930 and is 1 if the containing insn is a newly generated insn. */
7931
7932static void
7933update_n_sets (x, inc)
7934 rtx x;
7935 int inc;
7936{
7937 rtx dest = SET_DEST (x);
7938
7939 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
7940 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
7941 dest = SUBREG_REG (dest);
7942
7943 if (GET_CODE (dest) == REG)
7944 {
7945 int regno = REGNO (dest);
7946
7947 if (regno < FIRST_PSEUDO_REGISTER)
7948 {
7949 register int i;
7950 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
7951
7952 for (i = regno; i < endregno; i++)
7953 REG_N_SETS (i) += inc;
7954 }
7955 else
7956 REG_N_SETS (regno) += inc;
7957 }
7958}
7959
7960/* Updates all flow-analysis related quantities (including REG_NOTES) for
7961 the insns from FIRST to LAST inclusive that were created by splitting
7962 ORIG_INSN. NOTES are the original REG_NOTES. */
7963
ca545bb5 7964void
8c660648
JL
7965update_flow_info (notes, first, last, orig_insn)
7966 rtx notes;
7967 rtx first, last;
7968 rtx orig_insn;
7969{
7970 rtx insn, note;
7971 rtx next;
7972 rtx orig_dest, temp;
7973 rtx set;
7974
7975 /* Get and save the destination set by the original insn. */
7976
7977 orig_dest = single_set (orig_insn);
7978 if (orig_dest)
7979 orig_dest = SET_DEST (orig_dest);
7980
7981 /* Move REG_NOTES from the original insn to where they now belong. */
7982
7983 for (note = notes; note; note = next)
7984 {
7985 next = XEXP (note, 1);
7986 switch (REG_NOTE_KIND (note))
7987 {
7988 case REG_DEAD:
7989 case REG_UNUSED:
7990 /* Move these notes from the original insn to the last new insn where
7991 the register is now set. */
7992
7993 for (insn = last;; insn = PREV_INSN (insn))
7994 {
7995 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7996 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
7997 {
7998 /* If this note refers to a multiple word hard register, it
7999 may have been split into several smaller hard register
8000 references, so handle it specially. */
8001 temp = XEXP (note, 0);
8002 if (REG_NOTE_KIND (note) == REG_DEAD
8003 && GET_CODE (temp) == REG
8004 && REGNO (temp) < FIRST_PSEUDO_REGISTER
8005 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) > 1)
5835e573 8006 split_hard_reg_notes (note, first, last);
8c660648
JL
8007 else
8008 {
8009 XEXP (note, 1) = REG_NOTES (insn);
8010 REG_NOTES (insn) = note;
8011 }
8012
8013 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
8014 notes. */
8015 /* ??? This won't handle multiple word registers correctly,
8016 but should be good enough for now. */
8017 if (REG_NOTE_KIND (note) == REG_UNUSED
272299b9 8018 && GET_CODE (XEXP (note, 0)) != SCRATCH
8c660648
JL
8019 && !dead_or_set_p (insn, XEXP (note, 0)))
8020 PUT_REG_NOTE_KIND (note, REG_DEAD);
8021
8022 /* The reg only dies in one insn, the last one that uses
8023 it. */
8024 break;
8025 }
8026 /* It must die somewhere, fail it we couldn't find where it died.
8027
8028 If this is a REG_UNUSED note, then it must be a temporary
8029 register that was not needed by this instantiation of the
8030 pattern, so we can safely ignore it. */
8031 if (insn == first)
a1ef0af4 8032 {
8c660648
JL
8033 if (REG_NOTE_KIND (note) != REG_UNUSED)
8034 abort ();
8035
8036 break;
8037 }
8038 }
8039 break;
8040
8041 case REG_WAS_0:
fcdc0d6e
R
8042 /* If the insn that set the register to 0 was deleted, this
8043 note cannot be relied on any longer. The destination might
8044 even have been moved to memory.
8045 This was observed for SH4 with execute/920501-6.c compilation,
8046 -O2 -fomit-frame-pointer -finline-functions . */
8047 if (GET_CODE (XEXP (note, 0)) == NOTE
8048 || INSN_DELETED_P (XEXP (note, 0)))
8049 break;
8c660648
JL
8050 /* This note applies to the dest of the original insn. Find the
8051 first new insn that now has the same dest, and move the note
8052 there. */
8053
8054 if (!orig_dest)
8055 abort ();
8056
8057 for (insn = first;; insn = NEXT_INSN (insn))
8058 {
8059 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
8060 && (temp = single_set (insn))
8061 && rtx_equal_p (SET_DEST (temp), orig_dest))
8062 {
8063 XEXP (note, 1) = REG_NOTES (insn);
8064 REG_NOTES (insn) = note;
8065 /* The reg is only zero before one insn, the first that
8066 uses it. */
8067 break;
8068 }
8069 /* If this note refers to a multiple word hard
8070 register, it may have been split into several smaller
8071 hard register references. We could split the notes,
8072 but simply dropping them is good enough. */
8073 if (GET_CODE (orig_dest) == REG
8074 && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
8075 && HARD_REGNO_NREGS (REGNO (orig_dest),
8076 GET_MODE (orig_dest)) > 1)
8077 break;
8078 /* It must be set somewhere, fail if we couldn't find where it
8079 was set. */
8080 if (insn == last)
8081 abort ();
8082 }
8083 break;
8084
8085 case REG_EQUAL:
8086 case REG_EQUIV:
8087 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
8088 set is meaningless. Just drop the note. */
8089 if (!orig_dest)
8090 break;
8091
8092 case REG_NO_CONFLICT:
8093 /* These notes apply to the dest of the original insn. Find the last
8094 new insn that now has the same dest, and move the note there. */
8095
8096 if (!orig_dest)
8097 abort ();
8098
8099 for (insn = last;; insn = PREV_INSN (insn))
8100 {
8101 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
8102 && (temp = single_set (insn))
8103 && rtx_equal_p (SET_DEST (temp), orig_dest))
8104 {
8105 XEXP (note, 1) = REG_NOTES (insn);
8106 REG_NOTES (insn) = note;
8107 /* Only put this note on one of the new insns. */
8108 break;
8109 }
8110
8111 /* The original dest must still be set someplace. Abort if we
8112 couldn't find it. */
8113 if (insn == first)
8114 {
8115 /* However, if this note refers to a multiple word hard
8116 register, it may have been split into several smaller
8117 hard register references. We could split the notes,
8118 but simply dropping them is good enough. */
8119 if (GET_CODE (orig_dest) == REG
8120 && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
8121 && HARD_REGNO_NREGS (REGNO (orig_dest),
8122 GET_MODE (orig_dest)) > 1)
8123 break;
8124 /* Likewise for multi-word memory references. */
8125 if (GET_CODE (orig_dest) == MEM
9ae4ec46 8126 && SIZE_FOR_MODE (orig_dest) > UNITS_PER_WORD)
8c660648
JL
8127 break;
8128 abort ();
8129 }
8130 }
8131 break;
8132
8133 case REG_LIBCALL:
8134 /* Move a REG_LIBCALL note to the first insn created, and update
8135 the corresponding REG_RETVAL note. */
8136 XEXP (note, 1) = REG_NOTES (first);
8137 REG_NOTES (first) = note;
8138
8139 insn = XEXP (note, 0);
8140 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
8141 if (note)
8142 XEXP (note, 0) = first;
8143 break;
8144
8145 case REG_EXEC_COUNT:
8146 /* Move a REG_EXEC_COUNT note to the first insn created. */
8147 XEXP (note, 1) = REG_NOTES (first);
8148 REG_NOTES (first) = note;
8149 break;
8150
8151 case REG_RETVAL:
8152 /* Move a REG_RETVAL note to the last insn created, and update
8153 the corresponding REG_LIBCALL note. */
8154 XEXP (note, 1) = REG_NOTES (last);
8155 REG_NOTES (last) = note;
8156
8157 insn = XEXP (note, 0);
8158 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
8159 if (note)
8160 XEXP (note, 0) = last;
8161 break;
8162
8163 case REG_NONNEG:
8164 case REG_BR_PROB:
8165 /* This should be moved to whichever instruction is a JUMP_INSN. */
8166
8167 for (insn = last;; insn = PREV_INSN (insn))
8168 {
8169 if (GET_CODE (insn) == JUMP_INSN)
8170 {
8171 XEXP (note, 1) = REG_NOTES (insn);
8172 REG_NOTES (insn) = note;
8173 /* Only put this note on one of the new insns. */
8174 break;
8175 }
8176 /* Fail if we couldn't find a JUMP_INSN. */
8177 if (insn == first)
8178 abort ();
8179 }
8180 break;
8181
8182 case REG_INC:
8183 /* reload sometimes leaves obsolete REG_INC notes around. */
8184 if (reload_completed)
8185 break;
8186 /* This should be moved to whichever instruction now has the
8187 increment operation. */
8188 abort ();
8189
8190 case REG_LABEL:
8191 /* Should be moved to the new insn(s) which use the label. */
8192 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
8193 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
8194 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
ebb7b10b
RH
8195 {
8196 REG_NOTES (insn) = alloc_EXPR_LIST (REG_LABEL,
38a448ca
RH
8197 XEXP (note, 0),
8198 REG_NOTES (insn));
ebb7b10b 8199 }
8c660648
JL
8200 break;
8201
8202 case REG_CC_SETTER:
8203 case REG_CC_USER:
8204 /* These two notes will never appear until after reorg, so we don't
8205 have to handle them here. */
8206 default:
8207 abort ();
8208 }
8209 }
8210
8211 /* Each new insn created, except the last, has a new set. If the destination
8212 is a register, then this reg is now live across several insns, whereas
8213 previously the dest reg was born and died within the same insn. To
8214 reflect this, we now need a REG_DEAD note on the insn where this
8215 dest reg dies.
8216
8217 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
8218
8219 for (insn = first; insn != last; insn = NEXT_INSN (insn))
8220 {
8221 rtx pat;
8222 int i;
8223
8224 pat = PATTERN (insn);
8225 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
8226 new_insn_dead_notes (pat, insn, last, orig_insn);
8227 else if (GET_CODE (pat) == PARALLEL)
8228 {
8229 for (i = 0; i < XVECLEN (pat, 0); i++)
8230 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
8231 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
8232 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
8233 }
8234 }
8235
8236 /* If any insn, except the last, uses the register set by the last insn,
8237 then we need a new REG_DEAD note on that insn. In this case, there
8238 would not have been a REG_DEAD note for this register in the original
8239 insn because it was used and set within one insn. */
8240
8241 set = single_set (last);
8242 if (set)
8243 {
8244 rtx dest = SET_DEST (set);
8245
8246 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
8247 || GET_CODE (dest) == STRICT_LOW_PART
8248 || GET_CODE (dest) == SIGN_EXTRACT)
8249 dest = XEXP (dest, 0);
8250
8251 if (GET_CODE (dest) == REG
8252 /* Global registers are always live, so the code below does not
8253 apply to them. */
8254 && (REGNO (dest) >= FIRST_PSEUDO_REGISTER
8255 || ! global_regs[REGNO (dest)]))
8256 {
8257 rtx stop_insn = PREV_INSN (first);
8258
8259 /* If the last insn uses the register that it is setting, then
8260 we don't want to put a REG_DEAD note there. Search backwards
8261 to find the first insn that sets but does not use DEST. */
8262
8263 insn = last;
8264 if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
8265 {
8266 for (insn = PREV_INSN (insn); insn != first;
8267 insn = PREV_INSN (insn))
8268 {
8269 if ((set = single_set (insn))
8270 && reg_mentioned_p (dest, SET_DEST (set))
8271 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
8272 break;
8273 }
8274 }
8275
8276 /* Now find the first insn that uses but does not set DEST. */
8277
8278 for (insn = PREV_INSN (insn); insn != stop_insn;
8279 insn = PREV_INSN (insn))
8280 {
8281 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
8282 && reg_mentioned_p (dest, PATTERN (insn))
8283 && (set = single_set (insn)))
8284 {
8285 rtx insn_dest = SET_DEST (set);
8286
8287 while (GET_CODE (insn_dest) == ZERO_EXTRACT
8288 || GET_CODE (insn_dest) == SUBREG
8289 || GET_CODE (insn_dest) == STRICT_LOW_PART
8290 || GET_CODE (insn_dest) == SIGN_EXTRACT)
8291 insn_dest = XEXP (insn_dest, 0);
8292
8293 if (insn_dest != dest)
8294 {
ebb7b10b 8295 note = alloc_EXPR_LIST (REG_DEAD, dest, REG_NOTES (insn));
8c660648
JL
8296 REG_NOTES (insn) = note;
8297 /* The reg only dies in one insn, the last one
8298 that uses it. */
8299 break;
8300 }
8301 }
8302 }
8303 }
8304 }
8305
8306 /* If the original dest is modifying a multiple register target, and the
8307 original instruction was split such that the original dest is now set
8308 by two or more SUBREG sets, then the split insns no longer kill the
8309 destination of the original insn.
8310
8311 In this case, if there exists an instruction in the same basic block,
8312 before the split insn, which uses the original dest, and this use is
8313 killed by the original insn, then we must remove the REG_DEAD note on
8314 this insn, because it is now superfluous.
8315
8316 This does not apply when a hard register gets split, because the code
8317 knows how to handle overlapping hard registers properly. */
8318 if (orig_dest && GET_CODE (orig_dest) == REG)
8319 {
8320 int found_orig_dest = 0;
8321 int found_split_dest = 0;
8322
8323 for (insn = first;; insn = NEXT_INSN (insn))
8324 {
acceac1a
R
8325 rtx pat;
8326 int i;
8327
8328 /* I'm not sure if this can happen, but let's be safe. */
8329 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
8330 continue;
8331
8332 pat = PATTERN (insn);
8333 i = GET_CODE (pat) == PARALLEL ? XVECLEN (pat, 0) : 0;
8334 set = pat;
8335
8336 for (;;)
8c660648 8337 {
acceac1a 8338 if (GET_CODE (set) == SET)
8c660648 8339 {
acceac1a
R
8340 if (GET_CODE (SET_DEST (set)) == REG
8341 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
8342 {
8343 found_orig_dest = 1;
8344 break;
8345 }
8346 else if (GET_CODE (SET_DEST (set)) == SUBREG
8347 && SUBREG_REG (SET_DEST (set)) == orig_dest)
8348 {
8349 found_split_dest = 1;
8350 break;
8351 }
8c660648 8352 }
acceac1a
R
8353 if (--i < 0)
8354 break;
8355 set = XVECEXP (pat, 0, i);
8c660648
JL
8356 }
8357
8358 if (insn == last)
8359 break;
8360 }
8361
8362 if (found_split_dest)
8363 {
8364 /* Search backwards from FIRST, looking for the first insn that uses
8365 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
8366 If we find an insn, and it has a REG_DEAD note, then delete the
8367 note. */
8368
8369 for (insn = first; insn; insn = PREV_INSN (insn))
8370 {
8371 if (GET_CODE (insn) == CODE_LABEL
8372 || GET_CODE (insn) == JUMP_INSN)
8373 break;
8374 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
8375 && reg_mentioned_p (orig_dest, insn))
8376 {
8377 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
8378 if (note)
8379 remove_note (insn, note);
8380 }
8381 }
8382 }
8383 else if (!found_orig_dest)
8384 {
faff4ab8
JW
8385 int i, regno;
8386
8387 /* Should never reach here for a pseudo reg. */
8388 if (REGNO (orig_dest) >= FIRST_PSEUDO_REGISTER)
8389 abort ();
8390
8391 /* This can happen for a hard register, if the splitter
8392 does not bother to emit instructions which would be no-ops.
8393 We try to verify that this is the case by checking to see if
8394 the original instruction uses all of the registers that it
8395 set. This case is OK, because deleting a no-op can not affect
8396 REG_DEAD notes on other insns. If this is not the case, then
8397 abort. */
8398
8399 regno = REGNO (orig_dest);
8400 for (i = HARD_REGNO_NREGS (regno, GET_MODE (orig_dest)) - 1;
8401 i >= 0; i--)
8402 if (! refers_to_regno_p (regno + i, regno + i + 1, orig_insn,
8403 NULL_PTR))
8404 break;
8405 if (i >= 0)
8406 abort ();
8c660648
JL
8407 }
8408 }
8409
8410 /* Update reg_n_sets. This is necessary to prevent local alloc from
8411 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
8412 a reg from set once to set multiple times. */
8413
8414 {
8415 rtx x = PATTERN (orig_insn);
8416 RTX_CODE code = GET_CODE (x);
8417
8418 if (code == SET || code == CLOBBER)
8419 update_n_sets (x, -1);
8420 else if (code == PARALLEL)
8421 {
8422 int i;
8423 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8424 {
8425 code = GET_CODE (XVECEXP (x, 0, i));
8426 if (code == SET || code == CLOBBER)
8427 update_n_sets (XVECEXP (x, 0, i), -1);
8428 }
8429 }
8430
8431 for (insn = first;; insn = NEXT_INSN (insn))
8432 {
8433 x = PATTERN (insn);
8434 code = GET_CODE (x);
8435
8436 if (code == SET || code == CLOBBER)
8437 update_n_sets (x, 1);
8438 else if (code == PARALLEL)
8439 {
8440 int i;
8441 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8442 {
8443 code = GET_CODE (XVECEXP (x, 0, i));
8444 if (code == SET || code == CLOBBER)
8445 update_n_sets (XVECEXP (x, 0, i), 1);
8446 }
8447 }
8448
8449 if (insn == last)
8450 break;
8451 }
8452 }
8453}
8454
8c660648
JL
8455/* The one entry point in this file. DUMP_FILE is the dump file for
8456 this pass. */
8457
8458void
8459schedule_insns (dump_file)
8460 FILE *dump_file;
8461{
8462
8463 int max_uid;
8464 int b;
8c660648
JL
8465 rtx insn;
8466 int rgn;
8467
8468 int luid;
8469
8470 /* disable speculative loads in their presence if cc0 defined */
8471#ifdef HAVE_cc0
8472 flag_schedule_speculative_load = 0;
8473#endif
8474
8475 /* Taking care of this degenerate case makes the rest of
8476 this code simpler. */
8477 if (n_basic_blocks == 0)
8478 return;
8479
8480 /* set dump and sched_verbose for the desired debugging output. If no
8481 dump-file was specified, but -fsched-verbose-N (any N), print to stderr.
8482 For -fsched-verbose-N, N>=10, print everything to stderr. */
8483 sched_verbose = sched_verbose_param;
8484 if (sched_verbose_param == 0 && dump_file)
8485 sched_verbose = 1;
8486 dump = ((sched_verbose_param >= 10 || !dump_file) ? stderr : dump_file);
8487
8488 nr_inter = 0;
8489 nr_spec = 0;
8490
8491 /* Initialize the unused_*_lists. We can't use the ones left over from
8492 the previous function, because gcc has freed that memory. We can use
8493 the ones left over from the first sched pass in the second pass however,
8494 so only clear them on the first sched pass. The first pass is before
8495 reload if flag_schedule_insns is set, otherwise it is afterwards. */
8496
8497 if (reload_completed == 0 || !flag_schedule_insns)
8498 {
8499 unused_insn_list = 0;
8500 unused_expr_list = 0;
8501 }
8502
8503 /* initialize issue_rate */
62d65906 8504 issue_rate = ISSUE_RATE;
8c660648
JL
8505
8506 /* do the splitting first for all blocks */
8507 for (b = 0; b < n_basic_blocks; b++)
ca545bb5 8508 split_block_insns (b, 1);
8c660648
JL
8509
8510 max_uid = (get_max_uid () + 1);
8511
7c74b010 8512 cant_move = (char *) xmalloc (max_uid * sizeof (char));
8c660648
JL
8513 bzero ((char *) cant_move, max_uid * sizeof (char));
8514
7c74b010 8515 fed_by_spec_load = (char *) xmalloc (max_uid * sizeof (char));
8c660648
JL
8516 bzero ((char *) fed_by_spec_load, max_uid * sizeof (char));
8517
7c74b010 8518 is_load_insn = (char *) xmalloc (max_uid * sizeof (char));
8c660648
JL
8519 bzero ((char *) is_load_insn, max_uid * sizeof (char));
8520
7c74b010
JW
8521 insn_orig_block = (int *) xmalloc (max_uid * sizeof (int));
8522 insn_luid = (int *) xmalloc (max_uid * sizeof (int));
8c660648
JL
8523
8524 luid = 0;
8525 for (b = 0; b < n_basic_blocks; b++)
3b413743 8526 for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
8c660648
JL
8527 {
8528 INSN_BLOCK (insn) = b;
8529 INSN_LUID (insn) = luid++;
8530
3b413743 8531 if (insn == BLOCK_END (b))
8c660648
JL
8532 break;
8533 }
8534
8535 /* after reload, remove inter-blocks dependences computed before reload. */
8536 if (reload_completed)
8537 {
8538 int b;
8539 rtx insn;
8540
8541 for (b = 0; b < n_basic_blocks; b++)
3b413743 8542 for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
8c660648 8543 {
c995fea1 8544 rtx link, prev;
8c660648
JL
8545
8546 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
8547 {
c995fea1
RH
8548 prev = NULL_RTX;
8549 link = LOG_LINKS (insn);
8550 while (link)
8c660648
JL
8551 {
8552 rtx x = XEXP (link, 0);
8553
8554 if (INSN_BLOCK (x) != b)
c995fea1
RH
8555 {
8556 remove_dependence (insn, x);
8557 link = prev ? XEXP (prev, 1) : LOG_LINKS (insn);
8558 }
8559 else
8560 prev = link, link = XEXP (prev, 1);
8c660648
JL
8561 }
8562 }
8563
3b413743 8564 if (insn == BLOCK_END (b))
8c660648
JL
8565 break;
8566 }
8567 }
8568
8569 nr_regions = 0;
8570 rgn_table = (region *) alloca ((n_basic_blocks) * sizeof (region));
8571 rgn_bb_table = (int *) alloca ((n_basic_blocks) * sizeof (int));
8572 block_to_bb = (int *) alloca ((n_basic_blocks) * sizeof (int));
8573 containing_rgn = (int *) alloca ((n_basic_blocks) * sizeof (int));
8574
8575 /* compute regions for scheduling */
8576 if (reload_completed
8577 || n_basic_blocks == 1
8578 || !flag_schedule_interblock)
8579 {
8580 find_single_block_region ();
8581 }
8582 else
8583 {
8c660648 8584 /* verify that a 'good' control flow graph can be built */
168cbdf9 8585 if (is_cfg_nonregular ())
8c660648
JL
8586 {
8587 find_single_block_region ();
8588 }
8589 else
8590 {
a2e68776
JL
8591 int_list_ptr *s_preds, *s_succs;
8592 int *num_preds, *num_succs;
8593 sbitmap *dom, *pdom;
8594
8595 s_preds = (int_list_ptr *) alloca (n_basic_blocks
8596 * sizeof (int_list_ptr));
8597 s_succs = (int_list_ptr *) alloca (n_basic_blocks
8598 * sizeof (int_list_ptr));
8599 num_preds = (int *) alloca (n_basic_blocks * sizeof (int));
8600 num_succs = (int *) alloca (n_basic_blocks * sizeof (int));
8601 dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
8602 pdom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
8603
8604 /* The scheduler runs after flow; therefore, we can't blindly call
8605 back into find_basic_blocks since doing so could invalidate the
e881bb1b 8606 info in global_live_at_start.
a2e68776
JL
8607
8608 Consider a block consisting entirely of dead stores; after life
8609 analysis it would be a block of NOTE_INSN_DELETED notes. If
8610 we call find_basic_blocks again, then the block would be removed
8611 entirely and invalidate our the register live information.
8612
8613 We could (should?) recompute register live information. Doing
8614 so may even be beneficial. */
8615
5d27de7d 8616 compute_preds_succs (s_preds, s_succs, num_preds, num_succs);
a2e68776
JL
8617
8618 /* Compute the dominators and post dominators. We don't currently use
8619 post dominators, but we should for speculative motion analysis. */
8620 compute_dominators (dom, pdom, s_preds, s_succs);
8621
168cbdf9
JL
8622 /* build_control_flow will return nonzero if it detects unreachable
8623 blocks or any other irregularity with the cfg which prevents
8624 cross block scheduling. */
a2e68776 8625 if (build_control_flow (s_preds, s_succs, num_preds, num_succs) != 0)
168cbdf9
JL
8626 find_single_block_region ();
8627 else
a2e68776 8628 find_rgns (s_preds, s_succs, num_preds, num_succs, dom);
8c660648
JL
8629
8630 if (sched_verbose >= 3)
a2e68776 8631 debug_regions ();
8c660648 8632
a2e68776
JL
8633 /* For now. This will move as more and more of haifa is converted
8634 to using the cfg code in flow.c */
8635 free_bb_mem ();
8636 free (dom);
8637 free (pdom);
8c660648
JL
8638 }
8639 }
8640
8641 /* Allocate data for this pass. See comments, above,
7c74b010
JW
8642 for what these vectors do.
8643
8644 We use xmalloc instead of alloca, because max_uid can be very large
8645 when there is a lot of function inlining. If we used alloca, we could
8646 exceed stack limits on some hosts for some inputs. */
8647 insn_priority = (int *) xmalloc (max_uid * sizeof (int));
8648 insn_reg_weight = (int *) xmalloc (max_uid * sizeof (int));
8649 insn_tick = (int *) xmalloc (max_uid * sizeof (int));
8650 insn_costs = (short *) xmalloc (max_uid * sizeof (short));
8651 insn_units = (short *) xmalloc (max_uid * sizeof (short));
8652 insn_blockage = (unsigned int *) xmalloc (max_uid * sizeof (unsigned int));
8653 insn_ref_count = (int *) xmalloc (max_uid * sizeof (int));
8c660648
JL
8654
8655 /* Allocate for forward dependencies */
7c74b010
JW
8656 insn_dep_count = (int *) xmalloc (max_uid * sizeof (int));
8657 insn_depend = (rtx *) xmalloc (max_uid * sizeof (rtx));
8c660648
JL
8658
8659 if (reload_completed == 0)
8660 {
8661 int i;
8662
8663 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
8664 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
8665 sched_reg_basic_block = (int *) alloca (max_regno * sizeof (int));
8666 bb_live_regs = ALLOCA_REG_SET ();
8667 bzero ((char *) sched_reg_n_calls_crossed, max_regno * sizeof (int));
8668 bzero ((char *) sched_reg_live_length, max_regno * sizeof (int));
8669
8670 for (i = 0; i < max_regno; i++)
8671 sched_reg_basic_block[i] = REG_BLOCK_UNKNOWN;
8672 }
8673 else
8674 {
8675 sched_reg_n_calls_crossed = 0;
8676 sched_reg_live_length = 0;
8677 bb_live_regs = 0;
8678 }
8679 init_alias_analysis ();
8680
8681 if (write_symbols != NO_DEBUG)
8682 {
8683 rtx line;
8684
7c74b010 8685 line_note = (rtx *) xmalloc (max_uid * sizeof (rtx));
8c660648
JL
8686 bzero ((char *) line_note, max_uid * sizeof (rtx));
8687 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
8688 bzero ((char *) line_note_head, n_basic_blocks * sizeof (rtx));
8689
8690 /* Save-line-note-head:
8691 Determine the line-number at the start of each basic block.
8692 This must be computed and saved now, because after a basic block's
8693 predecessor has been scheduled, it is impossible to accurately
8694 determine the correct line number for the first insn of the block. */
8695
8696 for (b = 0; b < n_basic_blocks; b++)
3b413743 8697 for (line = BLOCK_HEAD (b); line; line = PREV_INSN (line))
8c660648
JL
8698 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
8699 {
8700 line_note_head[b] = line;
8701 break;
8702 }
8703 }
8704
8705 bzero ((char *) insn_priority, max_uid * sizeof (int));
8706 bzero ((char *) insn_reg_weight, max_uid * sizeof (int));
8707 bzero ((char *) insn_tick, max_uid * sizeof (int));
8708 bzero ((char *) insn_costs, max_uid * sizeof (short));
8709 bzero ((char *) insn_units, max_uid * sizeof (short));
8710 bzero ((char *) insn_blockage, max_uid * sizeof (unsigned int));
8711 bzero ((char *) insn_ref_count, max_uid * sizeof (int));
8712
8713 /* Initialize for forward dependencies */
8714 bzero ((char *) insn_depend, max_uid * sizeof (rtx));
8715 bzero ((char *) insn_dep_count, max_uid * sizeof (int));
8716
8717 /* Find units used in this fuction, for visualization */
8718 if (sched_verbose)
8719 init_target_units ();
8720
8721 /* ??? Add a NOTE after the last insn of the last basic block. It is not
8722 known why this is done. */
8723
3b413743 8724 insn = BLOCK_END (n_basic_blocks - 1);
8c660648
JL
8725 if (NEXT_INSN (insn) == 0
8726 || (GET_CODE (insn) != NOTE
8727 && GET_CODE (insn) != CODE_LABEL
3b413743
RH
8728 /* Don't emit a NOTE if it would end up between an unconditional
8729 jump and a BARRIER. */
8c660648
JL
8730 && !(GET_CODE (insn) == JUMP_INSN
8731 && GET_CODE (NEXT_INSN (insn)) == BARRIER)))
3b413743 8732 emit_note_after (NOTE_INSN_DELETED, BLOCK_END (n_basic_blocks - 1));
8c660648
JL
8733
8734 /* Schedule every region in the subroutine */
8735 for (rgn = 0; rgn < nr_regions; rgn++)
8736 {
8737 schedule_region (rgn);
8738
8739#ifdef USE_C_ALLOCA
8740 alloca (0);
8741#endif
8742 }
8743
8744 /* Reposition the prologue and epilogue notes in case we moved the
8745 prologue/epilogue insns. */
8746 if (reload_completed)
8747 reposition_prologue_and_epilogue_notes (get_insns ());
8748
8749 /* delete redundant line notes. */
8750 if (write_symbols != NO_DEBUG)
8751 rm_redundant_line_notes ();
8752
8753 /* Update information about uses of registers in the subroutine. */
8754 if (reload_completed == 0)
8755 update_reg_usage ();
8756
8757 if (sched_verbose)
8758 {
8759 if (reload_completed == 0 && flag_schedule_interblock)
8760 {
8761 fprintf (dump, "\n;; Procedure interblock/speculative motions == %d/%d \n",
8762 nr_inter, nr_spec);
8763 }
8764 else
8765 {
8766 if (nr_inter > 0)
8767 abort ();
8768 }
8769 fprintf (dump, "\n\n");
8770 }
f187056f 8771
7c74b010
JW
8772 free (cant_move);
8773 free (fed_by_spec_load);
8774 free (is_load_insn);
8775 free (insn_orig_block);
8776 free (insn_luid);
8777
8778 free (insn_priority);
8779 free (insn_reg_weight);
8780 free (insn_tick);
8781 free (insn_costs);
8782 free (insn_units);
8783 free (insn_blockage);
8784 free (insn_ref_count);
8785
8786 free (insn_dep_count);
8787 free (insn_depend);
8788
8789 if (write_symbols != NO_DEBUG)
8790 free (line_note);
8791
f187056f
JL
8792 if (bb_live_regs)
8793 FREE_REG_SET (bb_live_regs);
168cbdf9
JL
8794
8795 if (edge_table)
8796 {
8797 free (edge_table);
8798 edge_table = NULL;
8799 }
8800
8801 if (in_edges)
8802 {
8803 free (in_edges);
8804 in_edges = NULL;
8805 }
8806 if (out_edges)
8807 {
8808 free (out_edges);
8809 out_edges = NULL;
8810 }
8c660648
JL
8811}
8812#endif /* INSN_SCHEDULING */
This page took 1.38293 seconds and 5 git commands to generate.