]>
Commit | Line | Data |
---|---|---|
9c7e2978 RK |
1 | /* Perform instruction reorganizations for delay slot filling. |
2 | Copyright (C) 1990, 1991 Free Software Foundation, Inc. | |
3 | Contributed by Richard Kenner (kenner@nyu.edu). | |
4 | Hacked by Michael Tiemann (tiemann@cygnus.com). | |
5 | ||
6 | This file is part of GNU CC. | |
7 | ||
8 | GNU CC is free software; you can redistribute it and/or modify | |
9 | it under the terms of the GNU General Public License as published by | |
10 | the Free Software Foundation; either version 2, or (at your option) | |
11 | any later version. | |
12 | ||
13 | GNU CC is distributed in the hope that it will be useful, | |
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | GNU General Public License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GNU CC; see the file COPYING. If not, write to | |
20 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ | |
21 | ||
22 | ||
23 | #include "insn-attr.h" | |
24 | ||
25 | #ifdef DELAY_SLOTS | |
26 | ||
27 | /* Instruction reorganization pass. | |
28 | ||
29 | This pass runs after register allocation and final jump | |
30 | optimization. It should be the last pass to run before peephole. | |
31 | It serves primarily to fill delay slots of insns, typically branch | |
32 | and call insns. Other insns typically involve more complicated | |
33 | interractions of data dependencies and resource constraints, and | |
34 | are better handled by scheduling before register allocation (by the | |
35 | function `schedule_insns'). | |
36 | ||
37 | The Branch Penalty is the number of extra cycles that are needed to | |
38 | execute a branch insn. On an ideal machine, branches take a single | |
39 | cycle, and the Branch Penalty is 0. Several RISC machines approach | |
40 | branch delays differently: | |
41 | ||
42 | The MIPS and AMD 29000 have a single branch delay slot. Most insns | |
43 | (except other branches) can be used to fill this slot. When the | |
44 | slot is filled, two insns execute in two cycles, reducing the | |
45 | branch penalty to zero. | |
46 | ||
47 | The Motorola 88000 conditionally exposes its branch delay slot, | |
48 | so code is shorter when it is turned off, but will run faster | |
49 | when useful insns are scheduled there. | |
50 | ||
51 | The IBM ROMP has two forms of branch and call insns, both with and | |
52 | without a delay slot. Much like the 88k, insns not using the delay | |
53 | slot can be shorted (2 bytes vs. 4 bytes), but will run slowed. | |
54 | ||
55 | The SPARC always has a branch delay slot, but its effects can be | |
56 | annulled when the branch is not taken. This means that failing to | |
57 | find other sources of insns, we can hoist an insn from the branch | |
58 | target that would only be safe to execute knowing that the branch | |
59 | is taken. | |
60 | ||
61 | Three techniques for filling delay slots have been implemented so far: | |
62 | ||
63 | (1) `fill_simple_delay_slots' is the simplest, most efficient way | |
64 | to fill delay slots. This pass first looks for insns which come | |
65 | from before the branch and which are safe to execute after the | |
66 | branch. Then it searches after the insn requiring delay slots or, | |
67 | in the case of a branch, for insns that are after the point at | |
68 | which the branch merges into the fallthrough code, if such a point | |
69 | exists. When such insns are found, the branch penalty decreases | |
70 | and no code expansion takes place. | |
71 | ||
72 | (2) `fill_eager_delay_slots' is more complicated: it is used for | |
73 | scheduling conditional jumps, or for scheduling jumps which cannot | |
74 | be filled using (1). A machine need not have annulled jumps to use | |
75 | this strategy, but it helps (by keeping more options open). | |
76 | `fill_eager_delay_slots' tries to guess the direction the branch | |
77 | will go; if it guesses right 100% of the time, it can reduce the | |
78 | branch penalty as much as `fill_eager_delay_slots' does. If it | |
79 | guesses wrong 100% of the time, it might as well schedule nops (or | |
80 | on the m88k, unexpose the branch slot). When | |
81 | `fill_eager_delay_slots' takes insns from the fall-through path of | |
82 | the jump, usually there is no code expansion; when it takes insns | |
83 | from the branch target, there is code expansion if it is not the | |
84 | only way to reach that target. | |
85 | ||
86 | (3) `relax_delay_slots' uses a set of rules to simplify code that | |
87 | has been reorganized by (1) and (2). It finds cases where | |
88 | conditional test can be eliminated, jumps can be threaded, extra | |
89 | insns can be eliminated, etc. It is the job of (1) and (2) to do a | |
90 | good job of scheduling locally; `relax_delay_slots' takes care of | |
91 | making the various individual schedules work well together. It is | |
92 | especially tuned to handle the control flow interactions of branch | |
93 | insns. It does nothing for insns with delay slots that do not | |
94 | branch. | |
95 | ||
96 | On machines that use CC0, we are very conservative. We will not make | |
97 | a copy of an insn involving CC0 since we want to maintain a 1-1 | |
98 | correspondance between the insn that sets and uses CC0. The insns are | |
99 | allowed to be separated by placing an insn that sets CC0 (but not an insn | |
100 | that uses CC0; we could do this, but it doesn't seem worthwhile) in a | |
101 | delay slot. In that case, we point each insn at the other with REG_CC_USER | |
102 | and REG_CC_SETTER notes. Note that these restrictions affect very few | |
103 | machines because most RISC machines with delay slots will not use CC0 | |
104 | (the RT is the only known exception at this point). | |
105 | ||
106 | Not yet implemented: | |
107 | ||
108 | The Acorn Risc Machine can conditionally execute most insns, so | |
109 | it is profitable to move single insns into a position to execute | |
110 | based on the condition code of the previous insn. | |
111 | ||
112 | The HP-PA can conditionally nullify insns, providing a similar | |
113 | effect to the ARM, differing mostly in which insn is "in charge". */ | |
114 | ||
115 | #include <stdio.h> | |
116 | #include "config.h" | |
117 | #include "rtl.h" | |
118 | #include "insn-config.h" | |
119 | #include "conditions.h" | |
120 | #include "hard-reg-set.h" | |
121 | #include "basic-block.h" | |
122 | #include "regs.h" | |
123 | #include "insn-flags.h" | |
124 | #include "recog.h" | |
125 | #include "flags.h" | |
126 | #include "output.h" | |
127 | #include "obstack.h" | |
128 | ||
129 | #define obstack_chunk_alloc xmalloc | |
130 | #define obstack_chunk_free free | |
131 | ||
132 | extern int xmalloc (); | |
133 | extern void free (); | |
134 | ||
135 | #ifndef ANNUL_IFTRUE_SLOTS | |
136 | #define eligible_for_annul_true(INSN, SLOTS, TRIAL) 0 | |
137 | #endif | |
138 | #ifndef ANNUL_IFFALSE_SLOTS | |
139 | #define eligible_for_annul_false(INSN, SLOTS, TRIAL) 0 | |
140 | #endif | |
141 | ||
142 | /* Insns which have delay slots that have not yet been filled. */ | |
143 | ||
144 | static struct obstack unfilled_slots_obstack; | |
145 | static rtx *unfilled_firstobj; | |
146 | ||
147 | /* Define macros to refer to the first and last slot containing unfilled | |
148 | insns. These are used because the list may move and its address | |
149 | should be recomputed at each use. */ | |
150 | ||
151 | #define unfilled_slots_base \ | |
152 | ((rtx *) obstack_base (&unfilled_slots_obstack)) | |
153 | ||
154 | #define unfilled_slots_next \ | |
155 | ((rtx *) obstack_next_free (&unfilled_slots_obstack)) | |
156 | ||
157 | /* This structure is used to indicate which hardware resources are set or | |
158 | needed by insns so far. */ | |
159 | ||
160 | struct resources | |
161 | { | |
162 | char memory; /* Insn sets or needs a memory location. */ | |
163 | char volatil; /* Insn sets or needs a volatile memory loc. */ | |
164 | char cc; /* Insn sets or needs the condition codes. */ | |
165 | HARD_REG_SET regs; /* Which registers are set or needed. */ | |
166 | }; | |
167 | ||
168 | /* Macro to clear all resources. */ | |
169 | #define CLEAR_RESOURCE(RES) \ | |
170 | do { (RES)->memory = (RES)->volatil = (RES)->cc = 0; \ | |
171 | CLEAR_HARD_REG_SET ((RES)->regs); } while (0) | |
172 | ||
173 | /* Indicates what resources are required at function end. */ | |
174 | static struct resources end_of_function_needs; | |
175 | ||
176 | /* Points to the label before the end of the function. */ | |
177 | static rtx end_of_function_label; | |
178 | ||
179 | /* This structure is used to record livness information at the targets or | |
180 | fallthrough insns of branches. We will most likely need the information | |
181 | at targets again, so save them in a hash table rather than recomputing them | |
182 | each time. */ | |
183 | ||
184 | struct target_info | |
185 | { | |
186 | int uid; /* INSN_UID of target. */ | |
187 | struct target_info *next; /* Next info for same hash bucket. */ | |
188 | HARD_REG_SET live_regs; /* Registers live at target. */ | |
189 | int block; /* Basic block number containing target. */ | |
190 | int bb_tick; /* Generation count of basic block info. */ | |
191 | }; | |
192 | ||
193 | #define TARGET_HASH_PRIME 257 | |
194 | ||
195 | /* Define the hash table itself. */ | |
196 | static struct target_info **target_hash_table; | |
197 | ||
198 | /* For each basic block, we maintain a generation number of its basic | |
199 | block info, which is updated each time we move an insn from the | |
200 | target of a jump. This is the generation number indexed by block | |
201 | number. */ | |
202 | ||
203 | static int *bb_ticks; | |
204 | ||
205 | /* Mapping between INSN_UID's and position in the code since INSN_UID's do | |
206 | not always monotonically increase. */ | |
207 | static int *uid_to_ruid; | |
208 | ||
209 | /* Highest valid index in `uid_to_ruid'. */ | |
210 | static int max_uid; | |
211 | ||
212 | /* Forward references: */ | |
213 | ||
214 | static int redundant_insn_p (); | |
215 | static void update_block (); | |
216 | \f | |
217 | /* Given X, some rtl, and RES, a pointer to a `struct resource', mark | |
218 | which resources are references by the insn. If INCLUDE_CALLED_ROUTINE | |
219 | is TRUE, resources used by the called routine will be included for | |
220 | CALL_INSNs. */ | |
221 | ||
222 | static void | |
223 | mark_referenced_resources (x, res, include_called_routine) | |
224 | register rtx x; | |
225 | register struct resources *res; | |
226 | register int include_called_routine; | |
227 | { | |
228 | register enum rtx_code code = GET_CODE (x); | |
229 | register int i, j; | |
230 | register char *format_ptr; | |
231 | ||
232 | /* Handle leaf items for which we set resource flags. Also, special-case | |
233 | CALL, SET and CLOBBER operators. */ | |
234 | switch (code) | |
235 | { | |
236 | case CONST: | |
237 | case CONST_INT: | |
238 | case CONST_DOUBLE: | |
239 | case PC: | |
240 | case SYMBOL_REF: | |
241 | case LABEL_REF: | |
242 | return; | |
243 | ||
244 | case SUBREG: | |
245 | if (GET_CODE (SUBREG_REG (x)) != REG) | |
246 | mark_referenced_resources (SUBREG_REG (x), res, 0); | |
247 | else | |
248 | { | |
249 | int regno = REGNO (SUBREG_REG (x)) + SUBREG_WORD (x); | |
250 | int last_regno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x)); | |
251 | for (i = regno; i < last_regno; i++) | |
252 | SET_HARD_REG_BIT (res->regs, i); | |
253 | } | |
254 | return; | |
255 | ||
256 | case REG: | |
257 | for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++) | |
258 | SET_HARD_REG_BIT (res->regs, REGNO (x) + i); | |
259 | return; | |
260 | ||
261 | case MEM: | |
262 | /* If this memory shouldn't change, it really isn't referencing | |
263 | memory. */ | |
264 | if (! RTX_UNCHANGING_P (x)) | |
265 | res->memory = 1; | |
266 | res->volatil = MEM_VOLATILE_P (x); | |
267 | ||
268 | /* Mark registers used to access memory. */ | |
269 | mark_referenced_resources (XEXP (x, 0), res, 0); | |
270 | return; | |
271 | ||
272 | case CC0: | |
273 | res->cc = 1; | |
274 | return; | |
275 | ||
276 | case CALL: | |
277 | /* The first operand will be a (MEM (xxx)) but doesn't really reference | |
278 | memory. The second operand may be referenced, though. */ | |
279 | mark_referenced_resources (XEXP (XEXP (x, 0), 0), res, 0); | |
280 | mark_referenced_resources (XEXP (x, 1), res, 0); | |
281 | return; | |
282 | ||
283 | case SET: | |
284 | /* Usually, the first operand of SET is set, not referenced. But | |
285 | registers used to access memory are referenced. SET_DEST is | |
286 | also referenced if it is a ZERO_EXTRACT or SIGN_EXTRACT. */ | |
287 | ||
288 | mark_referenced_resources (SET_SRC (x), res, 0); | |
289 | ||
290 | x = SET_DEST (x); | |
291 | if (GET_CODE (x) == SIGN_EXTRACT || GET_CODE (x) == ZERO_EXTRACT) | |
292 | mark_referenced_resources (x, res, 0); | |
293 | else if (GET_CODE (x) == SUBREG) | |
294 | x = SUBREG_REG (x); | |
295 | if (GET_CODE (x) == MEM) | |
296 | mark_referenced_resources (XEXP (x, 0), res, 0); | |
297 | return; | |
298 | ||
299 | case CLOBBER: | |
300 | return; | |
301 | ||
302 | case CALL_INSN: | |
303 | if (include_called_routine) | |
304 | { | |
305 | /* A CALL references memory, the frame pointer if it exists, the | |
306 | stack pointer, and any registers given in USE insns immediately | |
307 | in front of the CALL. | |
308 | ||
309 | However, we may have moved some of the parameter loading insns | |
310 | into the delay slot of this CALL. If so, the USE's for them | |
311 | don't count and should be skipped. */ | |
312 | rtx insn = PREV_INSN (x); | |
313 | rtx sequence = 0; | |
314 | int seq_size = 0; | |
315 | int i; | |
316 | ||
317 | /* If we are part of a delay slot sequence, point at the SEQUENCE. */ | |
318 | if (NEXT_INSN (insn) != x) | |
319 | { | |
320 | sequence = PATTERN (NEXT_INSN (insn)); | |
321 | seq_size = XVECLEN (sequence, 0); | |
322 | if (GET_CODE (sequence) != SEQUENCE) | |
323 | abort (); | |
324 | } | |
325 | ||
326 | res->memory = 1; | |
327 | SET_HARD_REG_BIT (res->regs, STACK_POINTER_REGNUM); | |
328 | if (frame_pointer_needed) | |
329 | SET_HARD_REG_BIT (res->regs, FRAME_POINTER_REGNUM); | |
330 | ||
331 | /* Skip any labels between the CALL_INSN and possible USE insns. */ | |
332 | while (GET_CODE (insn) == CODE_LABEL) | |
333 | insn = PREV_INSN (insn); | |
334 | ||
335 | for ( ; (insn && GET_CODE (insn) == INSN | |
336 | && GET_CODE (PATTERN (insn)) == USE); | |
337 | insn = PREV_INSN (insn)) | |
338 | { | |
339 | for (i = 1; i < seq_size; i++) | |
340 | { | |
341 | rtx slot_pat = PATTERN (XVECEXP (sequence, 0, i)); | |
342 | if (GET_CODE (slot_pat) == SET | |
343 | && rtx_equal_p (SET_DEST (slot_pat), | |
344 | XEXP (PATTERN (insn), 0))) | |
345 | break; | |
346 | } | |
347 | if (i >= seq_size) | |
348 | mark_referenced_resources (XEXP (PATTERN (insn), 0), res, 0); | |
349 | } | |
350 | } | |
351 | ||
352 | /* ... fall through to other INSN procesing ... */ | |
353 | ||
354 | case INSN: | |
355 | case JUMP_INSN: | |
356 | /* No special processing, just speed up. */ | |
357 | mark_referenced_resources (PATTERN (x), res, include_called_routine); | |
358 | return; | |
359 | } | |
360 | ||
361 | /* Process each sub-expression and flag what it needs. */ | |
362 | format_ptr = GET_RTX_FORMAT (code); | |
363 | for (i = 0; i < GET_RTX_LENGTH (code); i++) | |
364 | switch (*format_ptr++) | |
365 | { | |
366 | case 'e': | |
367 | mark_referenced_resources (XEXP (x, i), res, include_called_routine); | |
368 | break; | |
369 | ||
370 | case 'E': | |
371 | for (j = 0; j < XVECLEN (x, i); j++) | |
372 | mark_referenced_resources (XVECEXP (x, i, j), res, | |
373 | include_called_routine); | |
374 | break; | |
375 | } | |
376 | } | |
377 | \f | |
378 | /* Given an insn, INSN, and a pointer to a `struct resource', RES, indicate | |
379 | which resources are modified by the insn. If INCLUDE_CALLED_ROUTINE | |
380 | is TRUE, also mark resources potentially set by the called routine. | |
381 | ||
382 | We never mark the insn as modifying the condition code unless it explicitly | |
383 | SETs CC0 even though this is not totally correct. The reason for this is | |
384 | that we require a SET of CC0 to immediately preceed the reference to CC0. | |
385 | So if some other insn sets CC0 as a side-effect, we know it cannot affect | |
386 | our computation and thus may be placed in a delay slot. */ | |
387 | ||
388 | static void | |
389 | mark_set_resources (insn, res, include_called_routine) | |
390 | register rtx insn; | |
391 | register struct resources *res; | |
392 | int include_called_routine; | |
393 | { | |
394 | register int i; | |
395 | ||
396 | switch (GET_CODE (insn)) | |
397 | { | |
398 | case NOTE: | |
399 | case BARRIER: | |
400 | case CODE_LABEL: | |
401 | /* These don't set any resources. */ | |
402 | return; | |
403 | ||
404 | case CALL_INSN: | |
405 | /* Called routine modifies the condition code, memory, any registers | |
406 | that aren't saved across calls, and anything explicitly CLOBBERed | |
407 | immediately after the CALL_INSN. */ | |
408 | ||
409 | if (include_called_routine) | |
410 | { | |
411 | rtx next = NEXT_INSN (insn); | |
412 | ||
413 | res->cc = res->memory = 1; | |
414 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
415 | if (call_used_regs[i]) | |
416 | SET_HARD_REG_BIT (res->regs, i); | |
417 | ||
418 | /* Skip any possible labels between the CALL_INSN and CLOBBERs. */ | |
419 | while (GET_CODE (next) == CODE_LABEL) | |
420 | next = NEXT_INSN (next); | |
421 | ||
422 | for (; (next && GET_CODE (next) == INSN | |
423 | && GET_CODE (PATTERN (next)) == CLOBBER); | |
424 | next = NEXT_INSN (next)) | |
425 | mark_referenced_resources (XEXP (PATTERN (next), 0), res, 0); | |
426 | } | |
427 | ||
428 | /* ... and also what it's RTL says it modifies, if anything. */ | |
429 | ||
430 | case JUMP_INSN: | |
431 | case INSN: | |
432 | { | |
433 | register rtx body = PATTERN (insn); | |
434 | register rtx note; | |
435 | ||
436 | /* An insn consisting of just a CLOBBER (or USE) is | |
437 | just for flow and doesn't actually do anything, so we don't check | |
438 | for it. | |
439 | ||
440 | If the source of a SET is a CALL, this is actually done by | |
441 | the called routine. So only include it if we are to include the | |
442 | effects of the calling routine. */ | |
443 | ||
444 | if (GET_CODE (body) == SET | |
445 | && (include_called_routine || GET_CODE (SET_SRC (body)) != CALL)) | |
446 | mark_referenced_resources (SET_DEST (body), res, 0); | |
447 | else if (GET_CODE (body) == PARALLEL) | |
448 | { | |
449 | for (i = 0; i < XVECLEN (body, 0); i++) | |
450 | if ((GET_CODE (XVECEXP (body, 0, i)) == SET | |
451 | && (include_called_routine | |
452 | || GET_CODE (SET_SRC (XVECEXP (body, 0, i))) != CALL)) | |
453 | || GET_CODE (XVECEXP (body, 0, i)) == CLOBBER) | |
454 | mark_referenced_resources (SET_DEST (XVECEXP (body, 0, i)), | |
455 | res, 0); | |
456 | } | |
457 | else if (GET_CODE (body) == SEQUENCE) | |
458 | for (i = 0; i < XVECLEN (body, 0); i++) | |
459 | if (! (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)) | |
460 | && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))) | |
461 | mark_set_resources (XVECEXP (body, 0, i), res, | |
462 | include_called_routine); | |
463 | ||
464 | #ifdef AUTO_INC_DEC | |
465 | /* If any register are incremented or decremented in an address, | |
466 | they are set here. */ | |
467 | for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) | |
468 | if (REG_NOTE_KIND (note) == REG_INC) | |
469 | mark_referenced_resources (XEXP (note, 0), res, 0); | |
470 | #endif | |
471 | ||
472 | #ifdef PUSH_ROUNDING | |
473 | /* An insn that has a PRE_DEC on SP will not have a REG_INC note. | |
474 | Until we fix this correctly, consider all insns as modifying | |
475 | SP on such machines. So far, we don't have delay slot scheduling | |
476 | on any machines with PUSH_ROUNDING. */ | |
477 | SET_HARD_REG_BIT (res->regs, STACK_POINTER_REGNUM); | |
478 | #endif | |
479 | return; | |
480 | } | |
481 | ||
482 | default: | |
483 | abort (); | |
484 | } | |
485 | } | |
486 | \f | |
487 | /* Return TRUE if this insn should stop the search for insn to fill delay | |
488 | slots. LABELS_P indicates that labels should terminate the search. | |
489 | In all cases, jumps terminate the search. */ | |
490 | ||
491 | static int | |
492 | stop_search_p (insn, labels_p) | |
493 | rtx insn; | |
494 | int labels_p; | |
495 | { | |
496 | if (insn == 0) | |
497 | return 1; | |
498 | ||
499 | switch (GET_CODE (insn)) | |
500 | { | |
501 | case NOTE: | |
502 | case CALL_INSN: | |
503 | return 0; | |
504 | ||
505 | case CODE_LABEL: | |
506 | return labels_p; | |
507 | ||
508 | case JUMP_INSN: | |
509 | case BARRIER: | |
510 | return 1; | |
511 | ||
512 | case INSN: | |
513 | /* OK unless it contains a delay slot or is an `asm' insn of some type. | |
514 | We don't know anything about these. */ | |
515 | return (GET_CODE (PATTERN (insn)) == SEQUENCE | |
516 | || GET_CODE (PATTERN (insn)) == ASM_INPUT | |
517 | || asm_noperands (PATTERN (insn)) >= 0); | |
518 | ||
519 | default: | |
520 | abort (); | |
521 | } | |
522 | } | |
523 | \f | |
524 | /* Return TRUE if any resources are marked in both RES1 and RES2 or if either | |
525 | resource set contains a volatile memory reference. Otherwise, return FALSE. */ | |
526 | ||
527 | static int | |
528 | resource_conflicts_p (res1, res2) | |
529 | struct resources *res1, *res2; | |
530 | { | |
531 | if ((res1->cc && res2->cc) || (res1->memory && res2->memory) | |
532 | || res1->volatil || res2->volatil) | |
533 | return 1; | |
534 | ||
535 | #ifdef HARD_REG_SET | |
536 | return (res1->regs & res2->regs) != HARD_CONST (0); | |
537 | #else | |
538 | { | |
539 | int i; | |
540 | ||
541 | for (i = 0; i < HARD_REG_SET_LONGS; i++) | |
542 | if ((res1->regs[i] & res2->regs[i]) != 0) | |
543 | return 1; | |
544 | return 0; | |
545 | } | |
546 | #endif | |
547 | } | |
548 | ||
549 | /* Return TRUE if any resource marked in RES, a `struct resources', is | |
550 | referenced by INSN. If INCLUDE_CALLED_ROUTINE is set, return if the called | |
551 | routine is using those resources. | |
552 | ||
553 | We compute this by computing all the resources referenced by INSN and | |
554 | seeing if this conflicts with RES. It might be faster to directly check | |
555 | ourselves, and this is the way it used to work, but it means duplicating | |
556 | a large block of complex code. */ | |
557 | ||
558 | static int | |
559 | insn_references_resource_p (insn, res, include_called_routine) | |
560 | register rtx insn; | |
561 | register struct resources *res; | |
562 | int include_called_routine; | |
563 | { | |
564 | struct resources insn_res; | |
565 | ||
566 | CLEAR_RESOURCE (&insn_res); | |
567 | mark_referenced_resources (insn, &insn_res, include_called_routine); | |
568 | return resource_conflicts_p (&insn_res, res); | |
569 | } | |
570 | ||
571 | /* Return TRUE if INSN modifies resources that are marked in RES. | |
572 | INCLUDE_CALLED_ROUTINE is set if the actions of that routine should be | |
573 | included. CC0 is only modified if it is explicitly set; see comments | |
574 | in front of mark_set_resources for details. */ | |
575 | ||
576 | static int | |
577 | insn_sets_resource_p (insn, res, include_called_routine) | |
578 | register rtx insn; | |
579 | register struct resources *res; | |
580 | int include_called_routine; | |
581 | { | |
582 | struct resources insn_sets; | |
583 | ||
584 | CLEAR_RESOURCE (&insn_sets); | |
585 | mark_set_resources (insn, &insn_sets, include_called_routine); | |
586 | return resource_conflicts_p (&insn_sets, res); | |
587 | } | |
588 | \f | |
589 | /* Find a label at the end of the function or before a RETURN. If there is | |
590 | none, make one. */ | |
591 | ||
592 | static rtx | |
593 | find_end_label () | |
594 | { | |
595 | rtx insn; | |
596 | ||
597 | /* If we found one previously, return it. */ | |
598 | if (end_of_function_label) | |
599 | return end_of_function_label; | |
600 | ||
601 | /* Otherwise, see if there is a label at the end of the function. If there | |
602 | is, it must be that RETURN insns aren't needed, so that is our return | |
603 | label and we don't have to do anything else. */ | |
604 | ||
605 | insn = get_last_insn (); | |
606 | while (GET_CODE (insn) == NOTE | |
607 | || (GET_CODE (insn) == INSN | |
608 | && (GET_CODE (PATTERN (insn)) == USE | |
609 | || GET_CODE (PATTERN (insn)) == CLOBBER))) | |
610 | insn = PREV_INSN (insn); | |
611 | ||
612 | if (GET_CODE (insn) == CODE_LABEL) | |
613 | end_of_function_label = insn; | |
614 | else | |
615 | { | |
616 | /* Otherwise, make a new label and emit a RETURN and BARRIER, | |
617 | if needed. */ | |
618 | end_of_function_label = gen_label_rtx (); | |
619 | LABEL_NUSES (end_of_function_label) = 0; | |
620 | emit_label (end_of_function_label); | |
621 | #ifdef HAVE_return | |
622 | if (HAVE_return) | |
623 | { | |
624 | emit_jump_insn (gen_return ()); | |
625 | emit_barrier (); | |
626 | } | |
627 | #endif | |
628 | } | |
629 | ||
630 | /* Show one additional use for this label so it won't go away until | |
631 | we are done. */ | |
632 | ++LABEL_NUSES (end_of_function_label); | |
633 | ||
634 | return end_of_function_label; | |
635 | } | |
636 | \f | |
637 | /* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace | |
638 | the pattern of INSN with the SEQUENCE. | |
639 | ||
640 | Chain the insns so that NEXT_INSN of each insn in the sequence points to | |
641 | the next and NEXT_INSN of the last insn in the sequence points to | |
642 | the first insn after the sequence. Similarly for PREV_INSN. This makes | |
643 | it easier to scan all insns. | |
644 | ||
645 | Returns the SEQUENCE that replaces INSN. */ | |
646 | ||
647 | static rtx | |
648 | emit_delay_sequence (insn, list, length, avail) | |
649 | rtx insn; | |
650 | rtx list; | |
651 | int length; | |
652 | int avail; | |
653 | { | |
654 | register int i = 1; | |
655 | register rtx li; | |
656 | int had_barrier = 0; | |
657 | ||
658 | /* Allocate the the rtvec to hold the insns and the SEQUENCE. */ | |
659 | rtvec seqv = rtvec_alloc (length + 1); | |
660 | rtx seq = gen_rtx (SEQUENCE, VOIDmode, seqv); | |
661 | rtx seq_insn = make_insn_raw (seq); | |
662 | rtx first = get_insns (); | |
663 | rtx last = get_last_insn (); | |
664 | ||
665 | /* Make a copy of the insn having delay slots. */ | |
666 | rtx delay_insn = copy_rtx (insn); | |
667 | ||
668 | /* If INSN is followed by a BARRIER, delete the BARRIER since it will only | |
669 | confuse further processing. Update LAST in case it was the last insn. | |
670 | We will put the BARRIER back in later. */ | |
671 | if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == BARRIER) | |
672 | { | |
673 | delete_insn (NEXT_INSN (insn)); | |
674 | last = get_last_insn (); | |
675 | had_barrier = 1; | |
676 | } | |
677 | ||
678 | /* Splice our SEQUENCE into the insn stream where INSN used to be. */ | |
679 | NEXT_INSN (seq_insn) = NEXT_INSN (insn); | |
680 | PREV_INSN (seq_insn) = PREV_INSN (insn); | |
681 | ||
682 | if (insn == last) | |
683 | set_new_first_and_last_insn (first, seq_insn); | |
684 | else | |
685 | PREV_INSN (NEXT_INSN (seq_insn)) = seq_insn; | |
686 | ||
687 | if (insn == first) | |
688 | set_new_first_and_last_insn (seq_insn, last); | |
689 | else | |
690 | NEXT_INSN (PREV_INSN (seq_insn)) = seq_insn; | |
691 | ||
692 | /* Build our SEQUENCE and rebuild the insn chain. */ | |
693 | XVECEXP (seq, 0, 0) = delay_insn; | |
694 | INSN_DELETED_P (delay_insn) = 0; | |
695 | PREV_INSN (delay_insn) = PREV_INSN (seq_insn); | |
696 | ||
697 | for (li = list; li; li = XEXP (li, 1), i++) | |
698 | { | |
699 | rtx tem = XEXP (li, 0); | |
700 | rtx note; | |
701 | ||
702 | /* Show that this copy of the insn isn't deleted. */ | |
703 | INSN_DELETED_P (tem) = 0; | |
704 | ||
705 | XVECEXP (seq, 0, i) = tem; | |
706 | PREV_INSN (tem) = XVECEXP (seq, 0, i - 1); | |
707 | NEXT_INSN (XVECEXP (seq, 0, i - 1)) = tem; | |
708 | ||
709 | /* Remove any REG_DEAD notes because we can't rely on them now | |
710 | that the insn has been moved. */ | |
711 | for (note = REG_NOTES (tem); note; note = XEXP (note, 1)) | |
712 | if (REG_NOTE_KIND (note) == REG_DEAD) | |
713 | XEXP (note, 0) = const0_rtx; | |
714 | } | |
715 | ||
716 | NEXT_INSN (XVECEXP (seq, 0, length)) = NEXT_INSN (seq_insn); | |
717 | ||
718 | /* If there used to be a BARRIER, put it back. */ | |
719 | if (had_barrier) | |
720 | emit_barrier_after (seq_insn); | |
721 | ||
722 | if (i != length + 1) | |
723 | abort (); | |
724 | ||
725 | return seq_insn; | |
726 | } | |
727 | ||
728 | /* Add INSN to DELAY_LIST and return the head of the new list. The list must | |
729 | be in the order in which the insns are to be executed. */ | |
730 | ||
731 | static rtx | |
732 | add_to_delay_list (insn, delay_list) | |
733 | rtx insn; | |
734 | rtx delay_list; | |
735 | { | |
736 | /* If we have an empty list, just make a new list element. */ | |
737 | if (delay_list == 0) | |
738 | return gen_rtx (INSN_LIST, VOIDmode, insn, 0); | |
739 | ||
740 | /* Otherwise this must be an INSN_LIST. Add INSN to the end of the | |
741 | list. */ | |
742 | XEXP (delay_list, 1) = add_to_delay_list (insn, XEXP (delay_list, 1)); | |
743 | ||
744 | return delay_list; | |
745 | } | |
746 | ||
747 | #ifdef HAVE_cc0 | |
748 | /* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER | |
749 | and REG_CC_USER notes so we can find it. */ | |
750 | ||
751 | static void | |
752 | link_cc0_insns (insn) | |
753 | rtx insn; | |
754 | { | |
755 | rtx user = next_nonnote_insn (insn); | |
756 | ||
757 | if (GET_CODE (user) == INSN && GET_CODE (PATTERN (user)) == SEQUENCE) | |
758 | user = XVECEXP (PATTERN (user), 0, 0); | |
759 | ||
760 | REG_NOTES (user) = gen_rtx (INSN_LIST, REG_CC_SETTER, insn, | |
761 | REG_NOTES (user)); | |
762 | REG_NOTES (insn) = gen_rtx (INSN_LIST, REG_CC_USER, user, REG_NOTES (insn)); | |
763 | } | |
764 | #endif | |
765 | \f | |
766 | /* Delete INSN from the the delay slot of the insn that it is in. This may | |
767 | produce an insn without anything in its delay slots. */ | |
768 | ||
769 | static void | |
770 | delete_from_delay_slot (insn) | |
771 | rtx insn; | |
772 | { | |
773 | rtx trial, seq_insn, seq, prev; | |
774 | rtx delay_list = 0; | |
775 | int i; | |
776 | ||
777 | /* We first must find the insn containing the SEQUENCE with INSN in its | |
778 | delay slot. Do this by finding an insn, TRIAL, where | |
779 | PREV_INSN (NEXT_INSN (TRIAL)) != TRIAL. */ | |
780 | ||
781 | for (trial = insn; | |
782 | PREV_INSN (NEXT_INSN (trial)) == trial; | |
783 | trial = NEXT_INSN (trial)) | |
784 | ; | |
785 | ||
786 | seq_insn = PREV_INSN (NEXT_INSN (trial)); | |
787 | seq = PATTERN (seq_insn); | |
788 | ||
789 | /* Create a delay list consisting of all the insns other than the one | |
790 | we are deleting (unless we were the only one). */ | |
791 | if (XVECLEN (seq, 0) > 2) | |
792 | for (i = 1; i < XVECLEN (seq, 0); i++) | |
793 | if (XVECEXP (seq, 0, i) != insn) | |
794 | delay_list = add_to_delay_list (XVECEXP (seq, 0, i), delay_list); | |
795 | ||
796 | /* Delete the old SEQUENCE, re-emit the insn that used to have the delay | |
797 | list, and rebuild the delay list if non-empty. */ | |
798 | prev = PREV_INSN (seq_insn); | |
799 | trial = XVECEXP (seq, 0, 0); | |
800 | delete_insn (seq_insn); | |
801 | add_insn_after (trial, prev); | |
802 | ||
803 | if (GET_CODE (trial) == JUMP_INSN | |
804 | && (simplejump_p (trial) || GET_CODE (PATTERN (trial)) == RETURN)) | |
805 | emit_barrier_after (trial); | |
806 | ||
807 | /* If there are any delay insns, remit them. Otherwise clear the | |
808 | annul flag. */ | |
809 | if (delay_list) | |
810 | trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 1, 0); | |
811 | else | |
812 | INSN_ANNULLED_BRANCH_P (trial) = 0; | |
813 | ||
814 | INSN_FROM_TARGET_P (insn) = 0; | |
815 | ||
816 | /* Show we need to fill this insn again. */ | |
817 | obstack_ptr_grow (&unfilled_slots_obstack, trial); | |
818 | } | |
819 | \f | |
820 | /* Delete INSN, a JUMP_INSN. If it is a conditional jump, we must track down | |
821 | the insn that sets CC0 for it and delete it too. */ | |
822 | ||
823 | static void | |
824 | delete_scheduled_jump (insn) | |
825 | rtx insn; | |
826 | { | |
827 | /* Delete the insn that sets cc0 for us. On machines without cc0, we could | |
828 | delete the insn that sets the condition code, but it is hard to find it. | |
829 | Since this case is rare anyway, don't bother trying; there would likely | |
830 | be other insns that became dead anyway, which we wouldn't know to | |
831 | delete. */ | |
832 | ||
833 | #ifdef HAVE_cc0 | |
834 | if (reg_mentioned_p (cc0_rtx, insn)) | |
835 | { | |
836 | rtx note = find_reg_note (insn, REG_CC_SETTER, 0); | |
837 | ||
838 | /* If a reg-note was found, it points to an insn to set CC0. This | |
839 | insn is in the delay list of some other insn. So delete it from | |
840 | the delay list it was in. */ | |
841 | if (note) | |
842 | { | |
843 | if (! FIND_REG_INC_NOTE (XEXP (note, 0), 0) | |
844 | && sets_cc0_p (PATTERN (XEXP (note, 0))) == 1) | |
845 | delete_from_delay_slot (XEXP (note, 0)); | |
846 | } | |
847 | else | |
848 | { | |
849 | /* The insn setting CC0 is our previous insn, but it may be in | |
850 | a delay slot. It will be the last insn in the delay slot, if | |
851 | it is. */ | |
852 | rtx trial = previous_insn (insn); | |
853 | if (GET_CODE (trial) == NOTE) | |
854 | trial = prev_nonnote_insn (trial); | |
855 | if (sets_cc0_p (PATTERN (trial)) != 1 | |
856 | || FIND_REG_INC_NOTE (trial, 0)) | |
857 | return; | |
858 | if (PREV_INSN (NEXT_INSN (trial)) == trial) | |
859 | delete_insn (trial); | |
860 | else | |
861 | delete_from_delay_slot (trial); | |
862 | } | |
863 | } | |
864 | #endif | |
865 | ||
866 | delete_insn (insn); | |
867 | } | |
868 | \f | |
869 | /* Counters for delay-slot filling. */ | |
870 | ||
871 | #define NUM_REORG_FUNCTIONS 2 | |
872 | #define MAX_DELAY_HISTOGRAM 3 | |
873 | #define MAX_REORG_PASSES 2 | |
874 | ||
875 | static int num_insns_needing_delays[NUM_REORG_FUNCTIONS][MAX_REORG_PASSES]; | |
876 | ||
877 | static int num_filled_delays[NUM_REORG_FUNCTIONS][MAX_DELAY_HISTOGRAM+1][MAX_REORG_PASSES]; | |
878 | ||
879 | static int reorg_pass_number; | |
880 | ||
881 | static void | |
882 | note_delay_statistics (slots_filled, index) | |
883 | int slots_filled, index; | |
884 | { | |
885 | num_insns_needing_delays[index][reorg_pass_number]++; | |
886 | if (slots_filled > MAX_DELAY_HISTOGRAM) | |
887 | slots_filled = MAX_DELAY_HISTOGRAM; | |
888 | num_filled_delays[index][slots_filled][reorg_pass_number]++; | |
889 | } | |
890 | \f | |
891 | #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS) | |
892 | ||
893 | /* Optimize the following cases: | |
894 | ||
895 | 1. When a conditional branch skips over only one instruction, | |
896 | use an annulling branch and put that insn in the delay slot. | |
897 | Use either a branch that annulls when the condition if true or | |
898 | invert the test with a branch that annulls when the condition is | |
899 | false. This saves insns, since otherwise we must copy an insn | |
900 | from the L1 target. | |
901 | ||
902 | (orig) (skip) (otherwise) | |
903 | Bcc.n L1 Bcc',a L1 Bcc,a L1' | |
904 | insn insn insn2 | |
905 | L1: L1: L1: | |
906 | insn2 insn2 insn2 | |
907 | insn3 insn3 L1': | |
908 | insn3 | |
909 | ||
910 | 2. When a conditional branch skips over only one instruction, | |
911 | and after that, it unconditionally branches somewhere else, | |
912 | perform the similar optimization. This saves executing the | |
913 | second branch in the case where the inverted condition is true. | |
914 | ||
915 | Bcc.n L1 Bcc',a L2 | |
916 | insn insn | |
917 | L1: L1: | |
918 | Bra L2 Bra L2 | |
919 | ||
920 | INSN is a JUMP_INSN. | |
921 | ||
922 | This should be expanded to skip over N insns, where N is the number | |
923 | of delay slots required. */ | |
924 | ||
925 | static rtx | |
926 | optimize_skip (insn) | |
927 | register rtx insn; | |
928 | { | |
929 | register rtx trial = next_nonnote_insn (insn); | |
930 | rtx next_trial = next_active_insn (trial); | |
931 | rtx delay_list = 0; | |
932 | rtx target_label; | |
933 | ||
934 | if (trial == 0 | |
935 | || GET_CODE (trial) != INSN | |
936 | || GET_CODE (PATTERN (trial)) == SEQUENCE | |
937 | || recog_memoized (trial) < 0 | |
938 | || (! eligible_for_annul_false (insn, 0, trial) | |
939 | && ! eligible_for_annul_true (insn, 0, trial))) | |
940 | return 0; | |
941 | ||
942 | /* There are two cases where we are just executing one insn (we assume | |
943 | here that a branch requires only one insn; this should be generalized | |
944 | at some point): Where the branch goes around a single insn or where | |
945 | we have one insn followed by a branch to the same label we branch to. | |
946 | In both of these cases, inverting the jump and annulling the delay | |
947 | slot give the same effect in fewer insns. */ | |
948 | if ((next_trial == next_active_insn (JUMP_LABEL (insn))) | |
949 | || (next_trial != 0 | |
950 | && GET_CODE (next_trial) == JUMP_INSN | |
951 | && JUMP_LABEL (insn) == JUMP_LABEL (next_trial) | |
952 | && (simplejump_p (next_trial) | |
953 | || GET_CODE (PATTERN (next_trial)) == RETURN))) | |
954 | { | |
955 | if (eligible_for_annul_false (insn, 0, trial)) | |
956 | { | |
957 | if (invert_jump (insn, JUMP_LABEL (insn))) | |
958 | INSN_FROM_TARGET_P (trial) = 1; | |
959 | else if (! eligible_for_annul_true (insn, 0, trial)) | |
960 | return 0; | |
961 | } | |
962 | ||
963 | delay_list = add_to_delay_list (trial, 0); | |
964 | next_trial = next_active_insn (trial); | |
965 | update_block (trial, trial); | |
966 | delete_insn (trial); | |
967 | ||
968 | /* Also, if we are targeting an unconditional | |
969 | branch, thread our jump to the target of that branch. Don't | |
970 | change this into a RETURN here, because it may not accept what | |
971 | we have in the delay slot. We'll fix this up later. */ | |
972 | if (next_trial && GET_CODE (next_trial) == JUMP_INSN | |
973 | && (simplejump_p (next_trial) | |
974 | || GET_CODE (PATTERN (next_trial)) == RETURN)) | |
975 | { | |
976 | target_label = JUMP_LABEL (next_trial); | |
977 | if (target_label == 0) | |
978 | target_label = find_end_label (); | |
979 | redirect_jump (insn, target_label); | |
980 | } | |
981 | ||
982 | INSN_ANNULLED_BRANCH_P (insn) = 1; | |
983 | } | |
984 | ||
985 | return delay_list; | |
986 | } | |
987 | #endif | |
988 | \f | |
989 | /* Return truth value of the statement that this branch | |
990 | is mostly taken. If we think that the branch is extremely likely | |
991 | to be taken, we return 2. If the branch is slightly more likely to be | |
992 | taken, return 1. Otherwise, return 0. | |
993 | ||
994 | CONDITION, if non-zero, is the condition that JUMP_INSN is testing. */ | |
995 | ||
996 | static int | |
997 | mostly_true_jump (jump_insn, condition) | |
998 | rtx jump_insn, condition; | |
999 | { | |
1000 | rtx target_label = JUMP_LABEL (jump_insn); | |
1001 | rtx insn; | |
1002 | ||
1003 | /* If this is a conditional return insn, assume it won't return. */ | |
1004 | if (target_label == 0) | |
1005 | return 0; | |
1006 | ||
1007 | /* If TARGET_LABEL has no jumps between it and the end of the function, | |
1008 | this is essentially a conditional return, so predict it as false. */ | |
1009 | for (insn = NEXT_INSN (target_label); | |
1010 | insn && GET_CODE (insn) != JUMP_INSN; | |
1011 | insn = NEXT_INSN (insn)) | |
1012 | ; | |
1013 | ||
1014 | if (insn == 0) | |
1015 | return 0; | |
1016 | ||
1017 | /* If this is the test of a loop, it is very likely true. We scan backwards | |
1018 | from the target label. If we find a NOTE_INSN_LOOP_BEG before the next | |
1019 | real insn, we assume the branch is to the top of the loop. */ | |
1020 | for (insn = PREV_INSN (target_label); | |
1021 | insn && GET_CODE (insn) == NOTE; | |
1022 | insn = PREV_INSN (insn)) | |
1023 | if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) | |
1024 | return 2; | |
1025 | ||
1026 | /* If we couldn't figure out what this jump was, assume it won't be | |
1027 | taken. This should be rare. */ | |
1028 | if (condition == 0) | |
1029 | return 0; | |
1030 | ||
1031 | /* EQ tests are usually false and NE tests are usually true. Also, | |
1032 | most quantities are positive, so we can make the appropriate guesses | |
1033 | about signed comparisons against zero. */ | |
1034 | switch (GET_CODE (condition)) | |
1035 | { | |
1036 | case CONST_INT: | |
1037 | /* Unconditional branch. */ | |
1038 | return 1; | |
1039 | case EQ: | |
1040 | return 0; | |
1041 | case NE: | |
1042 | return 1; | |
1043 | case LE: | |
1044 | case LT: | |
1045 | if (XEXP (condition, 1) == const0_rtx) | |
1046 | return 0; | |
1047 | break; | |
1048 | case GE: | |
1049 | case GT: | |
1050 | if (XEXP (condition, 1) == const0_rtx) | |
1051 | return 1; | |
1052 | break; | |
1053 | } | |
1054 | ||
1055 | /* Predict backward branches usually take, forward branches usually not. If | |
1056 | we don't know whether this is forward or backward, assume the branch | |
1057 | will be taken, since most are. */ | |
1058 | return (INSN_UID (jump_insn) > max_uid || INSN_UID (target_label) > max_uid | |
1059 | || (uid_to_ruid[INSN_UID (jump_insn)] | |
1060 | > uid_to_ruid[INSN_UID (target_label)]));; | |
1061 | } | |
1062 | ||
1063 | /* Return the condition under which INSN will branch to TARGET. If TARGET | |
1064 | is zero, return the condition under which INSN will return. If INSN is | |
1065 | an unconditional branch, return const_true_rtx. If INSN isn't a simple | |
1066 | type of jump, or it doesn't go to TARGET, return 0. */ | |
1067 | ||
1068 | static rtx | |
1069 | get_branch_condition (insn, target) | |
1070 | rtx insn; | |
1071 | rtx target; | |
1072 | { | |
1073 | rtx pat = PATTERN (insn); | |
1074 | rtx src; | |
1075 | ||
1076 | if (GET_CODE (pat) == RETURN) | |
1077 | return target == 0 ? const_true_rtx : 0; | |
1078 | ||
1079 | else if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx) | |
1080 | return 0; | |
1081 | ||
1082 | src = SET_SRC (pat); | |
1083 | if (GET_CODE (src) == LABEL_REF && XEXP (src, 0) == target) | |
1084 | return const_true_rtx; | |
1085 | ||
1086 | else if (GET_CODE (src) == IF_THEN_ELSE | |
1087 | && ((target == 0 && GET_CODE (XEXP (src, 1)) == RETURN) | |
1088 | || (GET_CODE (XEXP (src, 1)) == LABEL_REF | |
1089 | && XEXP (XEXP (src, 1), 0) == target)) | |
1090 | && XEXP (src, 2) == pc_rtx) | |
1091 | return XEXP (src, 0); | |
1092 | ||
1093 | else if (GET_CODE (src) == IF_THEN_ELSE | |
1094 | && ((target == 0 && GET_CODE (XEXP (src, 2)) == RETURN) | |
1095 | || (GET_CODE (XEXP (src, 2)) == LABEL_REF | |
1096 | && XEXP (XEXP (src, 2), 0) == target)) | |
1097 | && XEXP (src, 1) == pc_rtx) | |
1098 | return gen_rtx (reverse_condition (GET_CODE (XEXP (src, 0))), | |
1099 | GET_MODE (XEXP (src, 0)), | |
1100 | XEXP (XEXP (src, 0), 0), XEXP (XEXP (src, 0), 1)); | |
1101 | } | |
1102 | ||
1103 | /* Return non-zero if CONDITION is more strict than the condition of | |
1104 | INSN, i.e., if INSN will always branch if CONDITION is true. */ | |
1105 | ||
1106 | static int | |
1107 | condition_dominates_p (condition, insn) | |
1108 | rtx condition; | |
1109 | rtx insn; | |
1110 | { | |
1111 | rtx other_condition = get_branch_condition (insn, JUMP_LABEL (insn)); | |
1112 | enum rtx_code code = GET_CODE (condition); | |
1113 | enum rtx_code other_code; | |
1114 | ||
1115 | if (rtx_equal_p (condition, other_condition) | |
1116 | || other_condition == const_true_rtx) | |
1117 | return 1; | |
1118 | ||
1119 | else if (condition == const_true_rtx || other_condition == 0) | |
1120 | return 0; | |
1121 | ||
1122 | other_code = GET_CODE (other_condition); | |
1123 | if (GET_RTX_LENGTH (code) != 2 || GET_RTX_LENGTH (other_code) != 2 | |
1124 | || ! rtx_equal_p (XEXP (condition, 0), XEXP (other_condition, 0)) | |
1125 | || ! rtx_equal_p (XEXP (condition, 1), XEXP (other_condition, 1))) | |
1126 | return 0; | |
1127 | ||
1128 | return comparison_dominates_p (code, other_code); | |
1129 | } | |
1130 | \f | |
1131 | /* INSN branches to an insn whose pattern SEQ is a SEQUENCE. Given that | |
1132 | the condition tested by INSN is CONDITION and the resources shown in | |
1133 | OTHER_NEEDED are needed after INSN, see whether INSN can take all the insns | |
1134 | from SEQ's delay list, in addition to whatever insns it may execute | |
1135 | (in DELAY_LIST). SETS and NEEDED are denote resources already set and | |
1136 | needed while searching for delay slot insns. Return the concatenated | |
1137 | delay list if possible, otherwise, return 0. | |
1138 | ||
1139 | SLOTS_TO_FILL is the total number of slots required by INSN, and | |
1140 | PSLOTS_FILLED points to the number filled so far (also the number of | |
1141 | insns in DELAY_LIST). It is updated with the number that have been | |
1142 | filled from the SEQUENCE, if any. | |
1143 | ||
1144 | PANNUL_P points to a non-zero value if we already know that we need | |
1145 | to annul INSN. If this routine determines that annulling is needed, | |
1146 | it may set that value non-zero. | |
1147 | ||
1148 | PNEW_THREAD points to a location that is to receive the place at which | |
1149 | execution should continue. */ | |
1150 | ||
1151 | static rtx | |
1152 | steal_delay_list_from_target (insn, condition, seq, delay_list, | |
1153 | sets, needed, other_needed, | |
1154 | slots_to_fill, pslots_filled, pannul_p, | |
1155 | pnew_thread) | |
1156 | rtx insn, condition; | |
1157 | rtx seq; | |
1158 | rtx delay_list; | |
1159 | struct resources *sets, *needed, *other_needed; | |
1160 | int slots_to_fill; | |
1161 | int *pslots_filled; | |
1162 | int *pannul_p; | |
1163 | rtx *pnew_thread; | |
1164 | { | |
1165 | rtx temp; | |
1166 | int slots_remaining = slots_to_fill - *pslots_filled; | |
1167 | int total_slots_filled = *pslots_filled; | |
1168 | rtx new_delay_list = 0; | |
1169 | int must_annul = *pannul_p; | |
1170 | int i; | |
1171 | ||
1172 | /* We can't do anything if there are more delay slots in SEQ than we | |
1173 | can handle, or if we don't know that it will be a taken branch. | |
1174 | ||
1175 | We know that it will be a taken branch if it is either an unconditional | |
1176 | branch or a conditional branch with a stricter branch condition. */ | |
1177 | ||
1178 | if (XVECLEN (seq, 0) - 1 > slots_remaining | |
1179 | || ! condition_dominates_p (condition, XVECEXP (seq, 0, 0))) | |
1180 | return delay_list; | |
1181 | ||
1182 | for (i = 1; i < XVECLEN (seq, 0); i++) | |
1183 | { | |
1184 | rtx trial = XVECEXP (seq, 0, i); | |
1185 | ||
1186 | if (insn_references_resource_p (trial, sets, 0) | |
1187 | || insn_sets_resource_p (trial, needed, 0) | |
1188 | || insn_sets_resource_p (trial, sets, 0) | |
1189 | #ifdef HAVE_cc0 | |
1190 | /* If TRIAL sets CC0, we can't copy it, so we can't steal this | |
1191 | delay list. */ | |
1192 | || find_reg_note (trial, REG_CC_USER, 0) | |
1193 | #endif | |
1194 | /* If TRIAL is from the fallthrough code of an annulled branch insn | |
1195 | in SEQ, we cannot use it. */ | |
1196 | || (INSN_ANNULLED_BRANCH_P (XVECEXP (seq, 0, 0)) | |
1197 | && ! INSN_FROM_TARGET_P (trial))) | |
1198 | return delay_list; | |
1199 | ||
1200 | /* If this insn was already done (usually in a previous delay slot), | |
1201 | pretend we put it in our delay slot. */ | |
1202 | if (redundant_insn_p (trial, insn, new_delay_list)) | |
1203 | continue; | |
1204 | ||
1205 | if (! must_annul | |
1206 | && ((condition == const_true_rtx | |
1207 | || (! insn_sets_resource_p (trial, other_needed, 0) | |
1208 | && ! may_trap_p (PATTERN (trial))))) | |
1209 | ? eligible_for_delay (insn, total_slots_filled, trial) | |
1210 | : (must_annul = 1, | |
1211 | eligible_for_annul_false (insn, total_slots_filled, trial))) | |
1212 | { | |
1213 | temp = copy_rtx (trial); | |
1214 | INSN_FROM_TARGET_P (temp) = 1; | |
1215 | new_delay_list = add_to_delay_list (temp, new_delay_list); | |
1216 | total_slots_filled++; | |
1217 | ||
1218 | if (--slots_remaining == 0) | |
1219 | break; | |
1220 | } | |
1221 | else | |
1222 | return delay_list; | |
1223 | } | |
1224 | ||
1225 | /* Show the place to which we will be branching. */ | |
1226 | *pnew_thread = next_active_insn (JUMP_LABEL (XVECEXP (seq, 0, 0))); | |
1227 | ||
1228 | /* Add any new insns to the delay list and update the count of the | |
1229 | number of slots filled. */ | |
1230 | *pslots_filled = total_slots_filled; | |
1231 | *pannul_p = must_annul; | |
1232 | ||
1233 | if (delay_list == 0) | |
1234 | return new_delay_list; | |
1235 | ||
1236 | for (temp = new_delay_list; temp; temp = XEXP (temp, 1)) | |
1237 | delay_list = add_to_delay_list (XEXP (temp, 0), delay_list); | |
1238 | ||
1239 | return delay_list; | |
1240 | } | |
1241 | \f | |
1242 | /* Similar to steal_delay_list_from_target except that SEQ is on the | |
1243 | fallthrough path of INSN. Here we only do something if the delay insn | |
1244 | of SEQ is an unconditional branch. In that case we steal its delay slot | |
1245 | for INSN since unconditional branches are much easier to fill. */ | |
1246 | ||
1247 | static rtx | |
1248 | steal_delay_list_from_fallthrough (insn, condition, seq, | |
1249 | delay_list, sets, needed, other_needed, | |
1250 | slots_to_fill, pslots_filled, pannul_p) | |
1251 | rtx insn, condition; | |
1252 | rtx seq; | |
1253 | rtx delay_list; | |
1254 | struct resources *sets, *needed, *other_needed; | |
1255 | int slots_to_fill; | |
1256 | int *pslots_filled; | |
1257 | int *pannul_p; | |
1258 | { | |
1259 | int i; | |
1260 | ||
1261 | /* We can't do anything if SEQ's delay insn isn't an | |
1262 | unconditional branch. */ | |
1263 | ||
1264 | if (! simplejump_p (XVECEXP (seq, 0, 0)) | |
1265 | && GET_CODE (PATTERN (XVECEXP (seq, 0, 0))) != RETURN) | |
1266 | return delay_list; | |
1267 | ||
1268 | for (i = 1; i < XVECLEN (seq, 0); i++) | |
1269 | { | |
1270 | rtx trial = XVECEXP (seq, 0, i); | |
1271 | ||
1272 | /* If TRIAL sets CC0, stealing it will move it too far from the use | |
1273 | of CC0. */ | |
1274 | if (insn_references_resource_p (trial, sets, 0) | |
1275 | || insn_sets_resource_p (trial, needed, 0) | |
1276 | || insn_sets_resource_p (trial, sets, 0) | |
1277 | #ifdef HAVE_cc0 | |
1278 | || sets_cc0_p (PATTERN (trial)) | |
1279 | #endif | |
1280 | ) | |
1281 | ||
1282 | break; | |
1283 | ||
1284 | /* If this insn was already done, we don't need it. */ | |
1285 | if (redundant_insn_p (trial, insn, delay_list)) | |
1286 | { | |
1287 | delete_from_delay_slot (trial); | |
1288 | continue; | |
1289 | } | |
1290 | ||
1291 | if (! *pannul_p | |
1292 | && ((condition == const_true_rtx | |
1293 | || (! insn_sets_resource_p (trial, other_needed, 0) | |
1294 | && ! may_trap_p (PATTERN (trial))))) | |
1295 | ? eligible_for_delay (insn, *pslots_filled, trial) | |
1296 | : (*pannul_p = 1, | |
1297 | eligible_for_annul_true (insn, *pslots_filled, trial))) | |
1298 | { | |
1299 | delete_from_delay_slot (trial); | |
1300 | delay_list = add_to_delay_list (trial, delay_list); | |
1301 | ||
1302 | if (++(*pslots_filled) == slots_to_fill) | |
1303 | break; | |
1304 | } | |
1305 | else | |
1306 | break; | |
1307 | } | |
1308 | ||
1309 | return delay_list; | |
1310 | } | |
1311 | \f | |
1312 | /* Try merging insns starting at THREAD which match exactly the insns in | |
1313 | INSN's delay list. | |
1314 | ||
1315 | If all insns were matched and the insn was previously annulling, the | |
1316 | annul bit will be cleared. | |
1317 | ||
1318 | For each insn that is merged, if the branch is or will be non-annulling, | |
1319 | we delete the merged insn. */ | |
1320 | ||
1321 | static void | |
1322 | try_merge_delay_insns (insn, thread) | |
1323 | rtx insn, thread; | |
1324 | { | |
1325 | rtx trial, next_trial; | |
1326 | rtx delay_insn = XVECEXP (PATTERN (insn), 0, 0); | |
1327 | int annul_p = INSN_ANNULLED_BRANCH_P (delay_insn); | |
1328 | int slot_number = 1; | |
1329 | int num_slots = XVECLEN (PATTERN (insn), 0); | |
1330 | rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number); | |
1331 | struct resources set, needed; | |
1332 | rtx merged_insns = 0; | |
1333 | int i; | |
1334 | ||
1335 | CLEAR_RESOURCE (&needed); | |
1336 | CLEAR_RESOURCE (&set); | |
1337 | ||
1338 | /* If this is not an annulling branch, take into account anything needed in | |
1339 | NEXT_TO_MATCH. This prevents two increments from being incorrectly | |
1340 | folded into one. If we are annulling, this would be the correct | |
1341 | thing to do. (The alternative, looking at things set in NEXT_TO_MATCH | |
1342 | will essentially disable this optimization. This method is somewhat of | |
1343 | a kludge, but I don't see a better way.) */ | |
1344 | if (! annul_p) | |
1345 | mark_referenced_resources (next_to_match, &needed, 1); | |
1346 | ||
1347 | for (trial = thread; !stop_search_p (trial, 1); trial = next_trial) | |
1348 | { | |
1349 | rtx pat = PATTERN (trial); | |
1350 | ||
1351 | next_trial = next_nonnote_insn (trial); | |
1352 | ||
1353 | /* TRIAL must be a CALL_INSN or INSN. Skip USE and CLOBBER. */ | |
1354 | if (GET_CODE (trial) == INSN | |
1355 | && (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)) | |
1356 | continue; | |
1357 | ||
1358 | if (GET_CODE (next_to_match) == GET_CODE (trial) | |
1359 | #ifdef HAVE_cc0 | |
1360 | /* We can't share an insn that sets cc0. */ | |
1361 | && ! sets_cc0_p (pat) | |
1362 | #endif | |
1363 | && ! insn_references_resource_p (trial, &set, 1) | |
1364 | && ! insn_sets_resource_p (trial, &set, 1) | |
1365 | && ! insn_sets_resource_p (trial, &needed, 1) | |
1366 | && (trial = try_split (pat, trial, 0)) != 0 | |
1367 | && rtx_equal_p (PATTERN (next_to_match), PATTERN (trial)) | |
1368 | /* Have to test this condition if annul condition is different | |
1369 | from (and less restrictive than) non-annulling one. */ | |
1370 | && eligible_for_delay (delay_insn, slot_number - 1, trial)) | |
1371 | { | |
1372 | next_trial = next_nonnote_insn (trial); | |
1373 | ||
1374 | if (! annul_p) | |
1375 | { | |
1376 | update_block (trial, trial); | |
1377 | delete_insn (trial); | |
1378 | INSN_FROM_TARGET_P (next_to_match) = 0; | |
1379 | } | |
1380 | else | |
1381 | merged_insns = gen_rtx (INSN_LIST, VOIDmode, trial, merged_insns); | |
1382 | ||
1383 | if (++slot_number == num_slots) | |
1384 | break; | |
1385 | ||
1386 | next_to_match = XVECEXP (PATTERN (insn), 0, slot_number); | |
1387 | if (! annul_p) | |
1388 | mark_referenced_resources (next_to_match, &needed, 1); | |
1389 | } | |
1390 | ||
1391 | mark_set_resources (trial, &set, 1); | |
1392 | mark_referenced_resources (trial, &needed, 1); | |
1393 | } | |
1394 | ||
1395 | /* See if we stopped on a filled insn. If we did, try to see if its | |
1396 | delay slots match. */ | |
1397 | if (slot_number != num_slots | |
1398 | && trial && GET_CODE (trial) == INSN | |
1399 | && GET_CODE (PATTERN (trial)) == SEQUENCE | |
1400 | && ! INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0))) | |
1401 | { | |
1402 | rtx pat = PATTERN (trial); | |
1403 | ||
1404 | for (i = 1; i < XVECLEN (pat, 0); i++) | |
1405 | { | |
1406 | rtx dtrial = XVECEXP (pat, 0, i); | |
1407 | ||
1408 | if (! insn_references_resource_p (dtrial, &set, 1) | |
1409 | && ! insn_sets_resource_p (dtrial, &set, 1) | |
1410 | && ! insn_sets_resource_p (dtrial, &needed, 1) | |
1411 | #ifdef HAVE_cc0 | |
1412 | && ! sets_cc0_p (PATTERN (dtrial)) | |
1413 | #endif | |
1414 | && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial)) | |
1415 | && eligible_for_delay (delay_insn, slot_number - 1, dtrial)) | |
1416 | { | |
1417 | if (! annul_p) | |
1418 | { | |
1419 | update_block (dtrial, trial); | |
1420 | delete_from_delay_slot (dtrial); | |
1421 | INSN_FROM_TARGET_P (next_to_match) = 0; | |
1422 | } | |
1423 | else | |
1424 | merged_insns = gen_rtx (INSN_LIST, SImode, dtrial, | |
1425 | merged_insns); | |
1426 | ||
1427 | if (++slot_number == num_slots) | |
1428 | break; | |
1429 | ||
1430 | next_to_match = XVECEXP (PATTERN (insn), 0, slot_number); | |
1431 | } | |
1432 | } | |
1433 | } | |
1434 | ||
1435 | /* If all insns in the delay slot have been matched and we were previously | |
1436 | annulling the branch, we need not any more. In that case delete all the | |
1437 | merged insns. Also clear the INSN_FROM_TARGET_P bit of each insn the | |
1438 | the delay list so that we know that it isn't only being used at the | |
1439 | target. */ | |
1440 | if (next_to_match == 0 && annul_p) | |
1441 | { | |
1442 | for (; merged_insns; merged_insns = XEXP (merged_insns, 1)) | |
1443 | { | |
1444 | if (GET_MODE (merged_insns) == SImode) | |
1445 | { | |
1446 | update_block (XEXP (merged_insns, 0), trial); | |
1447 | delete_from_delay_slot (XEXP (merged_insns, 0)); | |
1448 | } | |
1449 | else | |
1450 | { | |
1451 | update_block (XEXP (merged_insns, 0), XEXP (merged_insns, 0)); | |
1452 | delete_insn (XEXP (merged_insns, 0)); | |
1453 | } | |
1454 | } | |
1455 | ||
1456 | INSN_ANNULLED_BRANCH_P (delay_insn) = 0; | |
1457 | ||
1458 | for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++) | |
1459 | INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = 0; | |
1460 | } | |
1461 | } | |
1462 | \f | |
1463 | /* See if INSN is redundant with an insn in front of TARGET. Often this | |
1464 | is called when INSN is a candidate for a delay slot of TARGET. | |
1465 | DELAY_LIST are insns that will be placed in delay slots of TARGET in front | |
1466 | of INSN. Often INSN will be redundant with an insn in a delay slot of | |
1467 | some previous insn. This happens when we have a series of branches to the | |
1468 | same label; in that case the first insn at the target might want to go | |
1469 | into each of the delay slots. | |
1470 | ||
1471 | If we are not careful, this routine can take up a significant fraction | |
1472 | of the total compilation time (4%), but only wins rarely. Hence we | |
1473 | speed this routine up by making two passes. The first pass goes back | |
1474 | until it hits a label and sees if it find an insn with an identical | |
1475 | pattern. Only in this (relatively rare) event does it check for | |
1476 | data conflicts. | |
1477 | ||
1478 | We do not split insns we encounter. This could cause us not to find a | |
1479 | redundant insn, but the cost of splitting seems greater than the possible | |
1480 | gain in rare cases. */ | |
1481 | ||
1482 | static int | |
1483 | redundant_insn_p (insn, target, delay_list) | |
1484 | rtx insn; | |
1485 | rtx target; | |
1486 | rtx delay_list; | |
1487 | { | |
1488 | rtx target_main = target; | |
1489 | rtx ipat = PATTERN (insn); | |
1490 | rtx trial, pat; | |
1491 | struct resources needed, set; | |
1492 | int i; | |
1493 | ||
1494 | /* Scan backwards looking for a match. */ | |
1495 | for (trial = PREV_INSN (target); trial; trial = PREV_INSN (trial)) | |
1496 | { | |
1497 | if (GET_CODE (trial) == CODE_LABEL) | |
1498 | return 0; | |
1499 | ||
1500 | if (GET_CODE (trial) != INSN && GET_CODE (trial) != JUMP_INSN | |
1501 | && GET_CODE (trial) != JUMP_INSN) | |
1502 | continue; | |
1503 | ||
1504 | pat = PATTERN (trial); | |
1505 | if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) | |
1506 | continue; | |
1507 | ||
1508 | if (GET_CODE (pat) == SEQUENCE) | |
1509 | { | |
1510 | /* Stop for a CALL and its delay slots because it difficult to track | |
1511 | its resource needs correctly. */ | |
1512 | if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN) | |
1513 | return 0; | |
1514 | ||
1515 | for (i = XVECLEN (pat, 0) - 1; i > 0; i--) | |
1516 | if (GET_CODE (XVECEXP (pat, 0, i)) == GET_CODE (insn) | |
1517 | && rtx_equal_p (PATTERN (XVECEXP (pat, 0, i)), ipat)) | |
1518 | break; | |
1519 | ||
1520 | /* If found a match, exit this loop early. */ | |
1521 | if (i > 0) | |
1522 | break; | |
1523 | } | |
1524 | ||
1525 | else if (GET_CODE (trial) == GET_CODE (insn) && rtx_equal_p (pat, ipat)) | |
1526 | break; | |
1527 | } | |
1528 | ||
1529 | /* If we didn't find an insn that matches, return 0. */ | |
1530 | if (trial == 0) | |
1531 | return 0; | |
1532 | ||
1533 | /* See what resources this insn sets and needs. If they overlap, or | |
1534 | if this insn references CC0, it can't be redundant. */ | |
1535 | ||
1536 | CLEAR_RESOURCE (&needed); | |
1537 | CLEAR_RESOURCE (&set); | |
1538 | mark_set_resources (insn, &set, 1); | |
1539 | mark_referenced_resources (insn, &needed, 1); | |
1540 | ||
1541 | /* If TARGET is a SEQUENCE, get the main insn. */ | |
1542 | if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE) | |
1543 | target_main = XVECEXP (PATTERN (target), 0, 0); | |
1544 | ||
1545 | if (resource_conflicts_p (&needed, &set) | |
1546 | #ifdef HAVE_cc0 | |
1547 | || reg_mentioned_p (cc0_rtx, ipat) | |
1548 | #endif | |
1549 | /* The insn requiring the delay may not set anything needed or set by | |
1550 | INSN. */ | |
1551 | || insn_sets_resource_p (target_main, &needed, 1) | |
1552 | || insn_sets_resource_p (target_main, &set, 1)) | |
1553 | return 0; | |
1554 | ||
1555 | /* Insns we pass may not set either NEEDED or SET, so merge them for | |
1556 | simpler tests. */ | |
1557 | needed.memory |= set.memory; | |
1558 | IOR_HARD_REG_SET (needed.regs, set.regs); | |
1559 | ||
1560 | /* This insn isn't redundant if it conflicts with an insn that either is | |
1561 | or will be in a delay slot of TARGET. */ | |
1562 | ||
1563 | while (delay_list) | |
1564 | { | |
1565 | if (insn_sets_resource_p (XEXP (delay_list, 0), &needed, 1)) | |
1566 | return 0; | |
1567 | delay_list = XEXP (delay_list, 1); | |
1568 | } | |
1569 | ||
1570 | if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE) | |
1571 | for (i = 1; i < XVECLEN (PATTERN (target), 0); i++) | |
1572 | if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed, 1)) | |
1573 | return 0; | |
1574 | ||
1575 | /* Scan backwards until we reach a label or an insn that uses something | |
1576 | INSN sets or sets something insn uses or sets. */ | |
1577 | ||
1578 | for (trial = PREV_INSN (target); | |
1579 | trial && GET_CODE (trial) != CODE_LABEL; | |
1580 | trial = PREV_INSN (trial)) | |
1581 | { | |
1582 | if (GET_CODE (trial) != INSN && GET_CODE (trial) != CALL_INSN | |
1583 | && GET_CODE (trial) != JUMP_INSN) | |
1584 | continue; | |
1585 | ||
1586 | pat = PATTERN (trial); | |
1587 | if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) | |
1588 | continue; | |
1589 | ||
1590 | if (GET_CODE (pat) == SEQUENCE) | |
1591 | { | |
1592 | /* If this is a CALL_INSN and its delay slots, it is hard to track | |
1593 | the resource needs properly, so give up. */ | |
1594 | if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN) | |
1595 | return 0; | |
1596 | ||
1597 | /* See if any of the insns in the delay slot match, updating | |
1598 | resource requirements as we go. */ | |
1599 | for (i = XVECLEN (pat, 0) - 1; i > 0; i--) | |
1600 | { | |
1601 | rtx candidate = XVECEXP (pat, 0, i); | |
1602 | ||
1603 | /* If an insn will be annulled if the branch is false, it isn't | |
1604 | considered as a possible duplicate insn. */ | |
1605 | if (rtx_equal_p (PATTERN (candidate), ipat) | |
1606 | && ! (INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0)) | |
1607 | && INSN_FROM_TARGET_P (candidate))) | |
1608 | { | |
1609 | /* Show that this insn will be used in the sequel. */ | |
1610 | INSN_FROM_TARGET_P (candidate) = 0; | |
1611 | return 1; | |
1612 | } | |
1613 | ||
1614 | /* Unless this is an annulled insn from the target of a branch, | |
1615 | we must stop if it sets anything needed or set by INSN. */ | |
1616 | if ((! INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0)) | |
1617 | || ! INSN_FROM_TARGET_P (candidate)) | |
1618 | && insn_sets_resource_p (candidate, &needed, 1)) | |
1619 | return 0; | |
1620 | } | |
1621 | ||
1622 | ||
1623 | /* If the insn requiring the delay slot conflicts with INSN, we | |
1624 | must stop. */ | |
1625 | if (insn_sets_resource_p (XVECEXP (pat, 0, 0), &needed, 1)) | |
1626 | return 0; | |
1627 | } | |
1628 | else | |
1629 | { | |
1630 | /* See if TRIAL is the same as INSN. */ | |
1631 | pat = PATTERN (trial); | |
1632 | if (rtx_equal_p (pat, ipat)) | |
1633 | return 1; | |
1634 | ||
1635 | /* Can't go any further if TRIAL conflicts with INSN. */ | |
1636 | if (insn_sets_resource_p (trial, &needed, 1)) | |
1637 | return 0; | |
1638 | } | |
1639 | } | |
1640 | ||
1641 | return 0; | |
1642 | } | |
1643 | \f | |
1644 | /* Return 1 if THREAD can only be executed in one way. If LABEL is non-zero, | |
1645 | it is the target of the branch insn being scanned. If ALLOW_FALLTHROUGH | |
1646 | is non-zero, we are allowed to fall into this thread; otherwise, we are | |
1647 | not. | |
1648 | ||
1649 | If LABEL is used more than one or we pass a label other than LABEL before | |
1650 | finding an active insn, we do not own this thread. */ | |
1651 | ||
1652 | static int | |
1653 | own_thread_p (thread, label, allow_fallthrough) | |
1654 | rtx thread; | |
1655 | rtx label; | |
1656 | int allow_fallthrough; | |
1657 | { | |
1658 | rtx active_insn; | |
1659 | rtx insn; | |
1660 | ||
1661 | /* We don't own the function end. */ | |
1662 | if (thread == 0) | |
1663 | return 0; | |
1664 | ||
1665 | /* Get the first active insn, or THREAD, if it is an active insn. */ | |
1666 | active_insn = next_active_insn (PREV_INSN (thread)); | |
1667 | ||
1668 | for (insn = thread; insn != active_insn; insn = NEXT_INSN (insn)) | |
1669 | if (GET_CODE (insn) == CODE_LABEL | |
1670 | && (insn != label || LABEL_NUSES (insn) != 1)) | |
1671 | return 0; | |
1672 | ||
1673 | if (allow_fallthrough) | |
1674 | return 1; | |
1675 | ||
1676 | /* Ensure that we reach a BARRIER before any insn or label. */ | |
1677 | for (insn = prev_nonnote_insn (thread); | |
1678 | insn == 0 || GET_CODE (insn) != BARRIER; | |
1679 | insn = prev_nonnote_insn (insn)) | |
1680 | if (insn == 0 | |
1681 | || GET_CODE (insn) == CODE_LABEL | |
1682 | || (GET_CODE (insn) == INSN | |
1683 | && GET_CODE (PATTERN (insn)) != USE | |
1684 | && GET_CODE (PATTERN (insn)) != CLOBBER)) | |
1685 | return 0; | |
1686 | ||
1687 | return 1; | |
1688 | } | |
1689 | \f | |
1690 | /* Find the number of the basic block that starts closest to INSN. Return -1 | |
1691 | if we couldn't find such a basic block. */ | |
1692 | ||
1693 | static int | |
1694 | find_basic_block (insn) | |
1695 | rtx insn; | |
1696 | { | |
1697 | int i; | |
1698 | ||
1699 | /* Scan backwards to the previous BARRIER. Then see if we can find a | |
1700 | label that starts a basic block. Return the basic block number. */ | |
1701 | ||
1702 | for (insn = prev_nonnote_insn (insn); | |
1703 | insn && GET_CODE (insn) != BARRIER; | |
1704 | insn = prev_nonnote_insn (insn)) | |
1705 | ; | |
1706 | ||
1707 | /* The start of the function is basic block zero. */ | |
1708 | if (insn == 0) | |
1709 | return 0; | |
1710 | ||
1711 | /* See if any of the upcoming CODE_LABELs start a basic block. If we reach | |
1712 | anything other than a CODE_LABEL or note, we can't find this code. */ | |
1713 | for (insn = next_nonnote_insn (insn); | |
1714 | insn && GET_CODE (insn) == CODE_LABEL; | |
1715 | insn = next_nonnote_insn (insn)) | |
1716 | { | |
1717 | for (i = 0; i < n_basic_blocks; i++) | |
1718 | if (insn == basic_block_head[i]) | |
1719 | return i; | |
1720 | } | |
1721 | ||
1722 | return -1; | |
1723 | } | |
1724 | \f | |
1725 | /* Used for communication between the following two routines, contains | |
1726 | the block number that insn was in. */ | |
1727 | ||
1728 | static int current_block_number; | |
1729 | ||
1730 | /* Called via note_stores from update_block_status. It marks the | |
1731 | registers set in this insn as live at the start of the block whose | |
1732 | number is in current_block_number. */ | |
1733 | ||
1734 | static void | |
1735 | update_block_from_store (dest, x) | |
1736 | rtx dest; | |
1737 | rtx x; | |
1738 | { | |
1739 | int first_regno, last_regno; | |
1740 | int offset = 0; | |
1741 | int i; | |
1742 | ||
1743 | if (GET_CODE (x) != SET | |
1744 | || (GET_CODE (dest) != REG && (GET_CODE (dest) != SUBREG | |
1745 | || GET_CODE (SUBREG_REG (dest)) != REG))) | |
1746 | return; | |
1747 | ||
1748 | if (GET_CODE (dest) == SUBREG) | |
1749 | first_regno = REGNO (SUBREG_REG (dest)) + SUBREG_WORD (dest); | |
1750 | else | |
1751 | first_regno = REGNO (dest); | |
1752 | ||
1753 | last_regno = first_regno + HARD_REGNO_NREGS (first_regno, GET_MODE (dest)); | |
1754 | for (i = first_regno; i < last_regno; i++) | |
1755 | basic_block_live_at_start[current_block_number][i / HOST_BITS_PER_INT] | |
1756 | |= (1 << (i % HOST_BITS_PER_INT)); | |
1757 | } | |
1758 | ||
1759 | /* Called when INSN is being moved from a location near the target of a jump. | |
1760 | If INSN is the first active insn at the start of its basic block, we can | |
1761 | just mark the registers set in INSN as live at the start of the basic block | |
1762 | that starts immediately before INSN. | |
1763 | ||
1764 | Otherwise, we leave a marker of the form (use (INSN)) immediately in front | |
1765 | of WHERE for mark_target_live_regs. These markers will be deleted when | |
1766 | reorg finishes. */ | |
1767 | ||
1768 | static void | |
1769 | update_block (insn, where) | |
1770 | rtx insn; | |
1771 | rtx where; | |
1772 | { | |
1773 | /* Ignore if this was in a delay slot and it came from the target of | |
1774 | a branch. */ | |
1775 | if (INSN_FROM_TARGET_P (insn)) | |
1776 | return; | |
1777 | ||
1778 | current_block_number = find_basic_block (insn); | |
1779 | if (current_block_number == -1) | |
1780 | return; | |
1781 | ||
1782 | if (insn == next_active_insn (basic_block_head[current_block_number])) | |
1783 | note_stores (PATTERN (insn), update_block_from_store); | |
1784 | else | |
1785 | emit_insn_before (gen_rtx (USE, VOIDmode, insn), where); | |
1786 | ||
1787 | /* INSN might be making a value live in a block where it didn't use to | |
1788 | be. So recompute liveness information for this block. */ | |
1789 | bb_ticks[current_block_number]++; | |
1790 | } | |
1791 | \f | |
1792 | /* Marks registers possibly live at the current place being scanned by | |
1793 | mark_target_live_regs. Used only by next two function. */ | |
1794 | ||
1795 | static HARD_REG_SET current_live_regs; | |
1796 | ||
1797 | /* Marks registers for which we have seen a REG_DEAD note but no assignment. | |
1798 | Also only used by the next two functions. */ | |
1799 | ||
1800 | static HARD_REG_SET pending_dead_regs; | |
1801 | ||
1802 | /* Utility function called from mark_target_live_regs via note_stores. | |
1803 | It deadens any CLOBBERed registers and livens any SET registers. */ | |
1804 | ||
1805 | static void | |
1806 | update_live_status (dest, x) | |
1807 | rtx dest; | |
1808 | rtx x; | |
1809 | { | |
1810 | int first_regno, last_regno; | |
1811 | int i; | |
1812 | ||
1813 | if (GET_CODE (dest) != REG | |
1814 | && (GET_CODE (dest) != SUBREG || GET_CODE (SUBREG_REG (dest)) != REG)) | |
1815 | return; | |
1816 | ||
1817 | if (GET_CODE (dest) == SUBREG) | |
1818 | first_regno = REGNO (SUBREG_REG (dest)) + SUBREG_WORD (dest); | |
1819 | else | |
1820 | first_regno = REGNO (dest); | |
1821 | ||
1822 | last_regno = first_regno + HARD_REGNO_NREGS (first_regno, GET_MODE (dest)); | |
1823 | ||
1824 | if (GET_CODE (x) == CLOBBER) | |
1825 | for (i = first_regno; i < last_regno; i++) | |
1826 | CLEAR_HARD_REG_BIT (current_live_regs, i); | |
1827 | else | |
1828 | for (i = first_regno; i < last_regno; i++) | |
1829 | { | |
1830 | SET_HARD_REG_BIT (current_live_regs, i); | |
1831 | CLEAR_HARD_REG_BIT (pending_dead_regs, i); | |
1832 | } | |
1833 | } | |
1834 | ||
1835 | /* Similar to next_insn, but ignores insns in the delay slots of | |
1836 | an annulled branch. */ | |
1837 | ||
1838 | static rtx | |
1839 | next_insn_no_annul (insn) | |
1840 | rtx insn; | |
1841 | { | |
1842 | if (insn) | |
1843 | { | |
1844 | /* If INSN is an annulled branch, skip any insns from the target | |
1845 | of the branch. */ | |
1846 | if (INSN_ANNULLED_BRANCH_P (insn) | |
1847 | && NEXT_INSN (PREV_INSN (insn)) != insn) | |
1848 | while (INSN_FROM_TARGET_P (NEXT_INSN (insn))) | |
1849 | insn = NEXT_INSN (insn); | |
1850 | ||
1851 | insn = NEXT_INSN (insn); | |
1852 | if (insn && GET_CODE (insn) == INSN | |
1853 | && GET_CODE (PATTERN (insn)) == SEQUENCE) | |
1854 | insn = XVECEXP (PATTERN (insn), 0, 0); | |
1855 | } | |
1856 | ||
1857 | return insn; | |
1858 | } | |
1859 | \f | |
1860 | /* Set the resources that are live at TARGET. | |
1861 | ||
1862 | If TARGET is zero, we refer to the end of the current function and can | |
1863 | return our precomputed value. | |
1864 | ||
1865 | Otherwise, we try to find out what is live by consulting the basic block | |
1866 | information. This is tricky, because we must consider the actions of | |
1867 | reload and jump optimization, which occur after the basic block information | |
1868 | has been computed. | |
1869 | ||
1870 | Accordingly, we proceed as follows:: | |
1871 | ||
1872 | We find the previous BARRIER and look at all immediately following labels | |
1873 | (with no intervening active insns) to see if any of them start a basic | |
1874 | block. If we hit the start of the function first, we use block 0. | |
1875 | ||
1876 | Once we have found a basic block and a corresponding first insns, we can | |
1877 | accurately compute the live status from basic_block_live_regs and | |
1878 | reg_renumber. (By starting at a label following a BARRIER, we are immune | |
1879 | to actions taken by reload and jump.) Then we scan all insns between | |
1880 | that point and our target. For each CLOBBER (or for call-clobbered regs | |
1881 | when we pass a CALL_INSN), mark the appropriate registers are dead. For | |
1882 | a SET, mark them as live. | |
1883 | ||
1884 | We have to be careful when using REG_DEAD notes because they are not | |
1885 | updated by such things as find_equiv_reg. So keep track of registers | |
1886 | marked as dead that haven't been assigned to, and mark them dead at the | |
1887 | next CODE_LABEL since reload and jump won't propagate values across labels. | |
1888 | ||
1889 | If we cannot find the start of a basic block (should be a very rare | |
1890 | case, if it can happen at all), mark everything as potentially live. | |
1891 | ||
1892 | Next, scan forward from TARGET looking for things set or clobbered | |
1893 | before they are used. These are not live. | |
1894 | ||
1895 | Because we can be called many times on the same target, save our results | |
1896 | in a hash table indexed by INSN_UID. */ | |
1897 | ||
1898 | static void | |
1899 | mark_target_live_regs (target, res) | |
1900 | rtx target; | |
1901 | struct resources *res; | |
1902 | { | |
1903 | int b = -1; | |
1904 | int i; | |
1905 | struct target_info *tinfo; | |
1906 | rtx insn, next; | |
1907 | rtx jump_insn = 0; | |
1908 | HARD_REG_SET scratch; | |
1909 | struct resources set, needed; | |
1910 | int jump_count = 0; | |
1911 | ||
1912 | /* Handle end of function. */ | |
1913 | if (target == 0) | |
1914 | { | |
1915 | *res = end_of_function_needs; | |
1916 | return; | |
1917 | } | |
1918 | ||
1919 | /* We have to assume memory is needed, but the CC isn't. */ | |
1920 | res->memory = 1; | |
1921 | res->volatil = 0; | |
1922 | res->cc = 0; | |
1923 | ||
1924 | /* See if we have computed this value already. */ | |
1925 | for (tinfo = target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME]; | |
1926 | tinfo; tinfo = tinfo->next) | |
1927 | if (tinfo->uid == INSN_UID (target)) | |
1928 | break; | |
1929 | ||
1930 | /* Start by getting the basic block number. If we have saved information, | |
1931 | we can get it from there unless the insn at the start of the basic block | |
1932 | has been deleted. */ | |
1933 | if (tinfo && tinfo->block != -1 | |
1934 | && ! INSN_DELETED_P (basic_block_head[tinfo->block])) | |
1935 | b = tinfo->block; | |
1936 | ||
1937 | if (b == -1) | |
1938 | b = find_basic_block (target); | |
1939 | ||
1940 | if (tinfo) | |
1941 | { | |
1942 | /* If the information is up-to-date, use it. Otherwise, we will | |
1943 | update it below. */ | |
1944 | if (b == tinfo->block && b != -1 && tinfo->bb_tick == bb_ticks[b]) | |
1945 | { | |
1946 | COPY_HARD_REG_SET (res->regs, tinfo->live_regs); | |
1947 | return; | |
1948 | } | |
1949 | } | |
1950 | else | |
1951 | { | |
1952 | /* Allocate a place to put our results and chain it into the | |
1953 | hash table. */ | |
1954 | tinfo = (struct target_info *) oballoc (sizeof (struct target_info)); | |
1955 | tinfo->uid = INSN_UID (target); | |
1956 | tinfo->block = b; | |
1957 | tinfo->next = target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME]; | |
1958 | target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME] = tinfo; | |
1959 | } | |
1960 | ||
1961 | CLEAR_HARD_REG_SET (pending_dead_regs); | |
1962 | ||
1963 | /* If we found a basic block, get the live registers from it and update | |
1964 | them with anything set or killed between its start and the insn before | |
1965 | TARGET. Otherwise, we must assume everything is live. */ | |
1966 | if (b != -1) | |
1967 | { | |
1968 | regset regs_live = basic_block_live_at_start[b]; | |
1969 | int offset, bit, j; | |
1970 | int regno; | |
1971 | rtx start_insn, stop_insn; | |
1972 | ||
1973 | /* Compute hard regs live at start of block -- this is the real hard regs | |
1974 | marked live, plus live pseudo regs that have been renumbered to | |
1975 | hard regs. */ | |
1976 | ||
1977 | #ifdef HARD_REG_SET | |
1978 | current_live_regs = *regs_live; | |
1979 | #else | |
1980 | COPY_HARD_REG_SET (current_live_regs, regs_live); | |
1981 | #endif | |
1982 | ||
1983 | for (offset = 0, i = 0; offset < regset_size; offset++) | |
1984 | { | |
1985 | if (regs_live[offset] == 0) | |
1986 | i += HOST_BITS_PER_INT; | |
1987 | else | |
1988 | for (bit = 1; bit && i < max_regno; bit <<= 1, i++) | |
1989 | if ((regs_live[offset] & bit) | |
1990 | && (regno = reg_renumber[i]) >= 0) | |
1991 | for (j = regno; | |
1992 | j < regno + HARD_REGNO_NREGS (regno, | |
1993 | PSEUDO_REGNO_MODE (i)); | |
1994 | j++) | |
1995 | SET_HARD_REG_BIT (current_live_regs, j); | |
1996 | } | |
1997 | ||
1998 | /* Get starting and ending insn, handling the case where each might | |
1999 | be a SEQUENCE. */ | |
2000 | start_insn = (b == 0 ? get_insns () : basic_block_head[b]); | |
2001 | stop_insn = target; | |
2002 | ||
2003 | if (GET_CODE (start_insn) == INSN | |
2004 | && GET_CODE (PATTERN (start_insn)) == SEQUENCE) | |
2005 | start_insn = XVECEXP (PATTERN (start_insn), 0, 0); | |
2006 | ||
2007 | if (GET_CODE (stop_insn) == INSN | |
2008 | && GET_CODE (PATTERN (stop_insn)) == SEQUENCE) | |
2009 | stop_insn = next_insn (PREV_INSN (stop_insn)); | |
2010 | ||
2011 | for (insn = start_insn; insn != stop_insn; | |
2012 | insn = next_insn_no_annul (insn)) | |
2013 | { | |
2014 | rtx link; | |
2015 | rtx real_insn = insn; | |
2016 | ||
2017 | /* If this insn is from the target of a branch, it isn't going to | |
2018 | be used in the sequel. If it is used in both cases, this | |
2019 | test will not be true. */ | |
2020 | if (INSN_FROM_TARGET_P (insn)) | |
2021 | continue; | |
2022 | ||
2023 | /* If this insn is a USE made by update_block, we care about the | |
2024 | underlying insn. */ | |
2025 | if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE | |
2026 | && (GET_CODE (XEXP (PATTERN (insn), 0)) == INSN | |
2027 | || GET_CODE (XEXP (PATTERN (insn), 0)) == CALL_INSN | |
2028 | || GET_CODE (XEXP (PATTERN (insn), 0)) == JUMP_INSN)) | |
2029 | real_insn = XEXP (PATTERN (insn), 0); | |
2030 | ||
2031 | if (GET_CODE (real_insn) == CALL_INSN) | |
2032 | { | |
2033 | /* CALL clobbers all call-used regs that aren't fixed except | |
2034 | sp, ap, and fp. Do this before setting the result of the | |
2035 | call live. */ | |
2036 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
2037 | if (call_used_regs[i] && ! fixed_regs[i] | |
2038 | && i != STACK_POINTER_REGNUM && i != FRAME_POINTER_REGNUM | |
2039 | && i != ARG_POINTER_REGNUM) | |
2040 | CLEAR_HARD_REG_BIT (current_live_regs, i); | |
2041 | } | |
2042 | ||
2043 | /* Mark anything killed in an insn to be deadened at the next | |
2044 | label. Ignore USE insns; the only REG_DEAD notes will be for | |
2045 | parameters. But they might be early. A CALL_INSN will usually | |
2046 | clobber registers used for parameters. It isn't worth bothering | |
2047 | with the unlikely case when it won't. */ | |
2048 | if ((GET_CODE (real_insn) == INSN | |
2049 | && GET_CODE (PATTERN (real_insn)) != USE) | |
2050 | || GET_CODE (real_insn) == JUMP_INSN | |
2051 | || GET_CODE (real_insn) == CALL_INSN) | |
2052 | { | |
2053 | for (link = REG_NOTES (real_insn); link; link = XEXP (link, 1)) | |
2054 | if (REG_NOTE_KIND (link) == REG_DEAD | |
2055 | && GET_CODE (XEXP (link, 0)) == REG | |
2056 | && REGNO (XEXP (link, 0)) < FIRST_PSEUDO_REGISTER) | |
2057 | { | |
2058 | int first_regno = REGNO (XEXP (link, 0)); | |
2059 | int last_regno | |
2060 | = (first_regno | |
2061 | + HARD_REGNO_NREGS (first_regno, | |
2062 | GET_MODE (XEXP (link, 0)))); | |
2063 | ||
2064 | for (i = first_regno; i < last_regno; i++) | |
2065 | SET_HARD_REG_BIT (pending_dead_regs, i); | |
2066 | } | |
2067 | ||
2068 | note_stores (PATTERN (real_insn), update_live_status); | |
2069 | ||
2070 | /* If any registers were unused after this insn, kill them. | |
2071 | These notes will always be accurate. */ | |
2072 | for (link = REG_NOTES (real_insn); link; link = XEXP (link, 1)) | |
2073 | if (REG_NOTE_KIND (link) == REG_UNUSED | |
2074 | && GET_CODE (XEXP (link, 0)) == REG | |
2075 | && REGNO (XEXP (link, 0)) < FIRST_PSEUDO_REGISTER) | |
2076 | { | |
2077 | int first_regno = REGNO (XEXP (link, 0)); | |
2078 | int last_regno | |
2079 | = (first_regno | |
2080 | + HARD_REGNO_NREGS (first_regno, | |
2081 | GET_MODE (XEXP (link, 0)))); | |
2082 | ||
2083 | for (i = first_regno; i < last_regno; i++) | |
2084 | CLEAR_HARD_REG_BIT (current_live_regs, i); | |
2085 | } | |
2086 | } | |
2087 | ||
2088 | if (GET_CODE (real_insn) == CODE_LABEL) | |
2089 | { | |
2090 | /* A label clobbers the pending dead registers since neither | |
2091 | reload nor jump will propagate a value across a label. */ | |
2092 | AND_COMPL_HARD_REG_SET (current_live_regs, pending_dead_regs); | |
2093 | CLEAR_HARD_REG_SET (pending_dead_regs); | |
2094 | } | |
2095 | } | |
2096 | ||
2097 | COPY_HARD_REG_SET (res->regs, current_live_regs); | |
2098 | tinfo->block = b; | |
2099 | tinfo->bb_tick = bb_ticks[b]; | |
2100 | } | |
2101 | else | |
2102 | /* We didn't find the start of a basic block. Assume everything | |
2103 | in use. This should happen only extremely rarely. */ | |
2104 | SET_HARD_REG_SET (res->regs); | |
2105 | ||
2106 | /* Now step forward from TARGET looking for registers that are set before | |
2107 | they are used. These are dead. If we pass a label, any pending dead | |
2108 | registers that weren't yet used can be made dead. Stop when we pass a | |
2109 | conditional JUMP_INSN; follow the first few unconditional branches. */ | |
2110 | ||
2111 | CLEAR_RESOURCE (&set); | |
2112 | CLEAR_RESOURCE (&needed); | |
2113 | ||
2114 | for (insn = target; insn; insn = next) | |
2115 | { | |
2116 | rtx main_insn = insn; | |
2117 | ||
2118 | next = NEXT_INSN (insn); | |
2119 | switch (GET_CODE (insn)) | |
2120 | { | |
2121 | case CODE_LABEL: | |
2122 | AND_COMPL_HARD_REG_SET (pending_dead_regs, needed.regs); | |
2123 | AND_COMPL_HARD_REG_SET (res->regs, pending_dead_regs); | |
2124 | CLEAR_HARD_REG_SET (pending_dead_regs); | |
2125 | continue; | |
2126 | ||
2127 | case BARRIER: | |
2128 | case NOTE: | |
2129 | continue; | |
2130 | ||
2131 | case INSN: | |
2132 | if (GET_CODE (PATTERN (insn)) == USE | |
2133 | || GET_CODE (PATTERN (insn)) == CLOBBER) | |
2134 | continue; | |
2135 | if (GET_CODE (PATTERN (insn)) == SEQUENCE) | |
2136 | main_insn = XVECEXP (PATTERN (insn), 0, 0); | |
2137 | } | |
2138 | ||
2139 | if (GET_CODE (main_insn) == JUMP_INSN) | |
2140 | { | |
2141 | if (jump_count++ < 10 | |
2142 | && (simplejump_p (main_insn) | |
2143 | || GET_CODE (PATTERN (main_insn)) == RETURN)) | |
2144 | { | |
2145 | next = next_active_insn (JUMP_LABEL (main_insn)); | |
2146 | if (jump_insn == 0) | |
2147 | jump_insn = insn; | |
2148 | } | |
2149 | else | |
2150 | break; | |
2151 | } | |
2152 | ||
2153 | mark_referenced_resources (insn, &needed, 1); | |
2154 | mark_set_resources (insn, &set, 1); | |
2155 | ||
2156 | COPY_HARD_REG_SET (scratch, set.regs); | |
2157 | AND_COMPL_HARD_REG_SET (scratch, needed.regs); | |
2158 | AND_COMPL_HARD_REG_SET (res->regs, scratch); | |
2159 | } | |
2160 | ||
2161 | /* If we hit an unconditional branch, we have another way of finding out | |
2162 | what is live: we can see what is live at the branch target and include | |
2163 | anything used but not set before the branch. The only things that are | |
2164 | live are those that are live using the above test and the test below. */ | |
2165 | if (jump_insn) | |
2166 | { | |
2167 | rtx jump_target = (GET_CODE (jump_insn) == INSN | |
2168 | ? JUMP_LABEL (XVECEXP (PATTERN (jump_insn), 0, 0)) | |
2169 | : JUMP_LABEL (jump_insn)); | |
2170 | struct resources new_resources; | |
2171 | rtx stop_insn = next_active_insn (jump_insn); | |
2172 | ||
2173 | mark_target_live_regs (next_active_insn (jump_target), &new_resources); | |
2174 | CLEAR_RESOURCE (&set); | |
2175 | CLEAR_RESOURCE (&needed); | |
2176 | ||
2177 | /* Include JUMP_INSN in the needed registers. */ | |
2178 | for (insn = target; insn != stop_insn; insn = next_active_insn (insn)) | |
2179 | { | |
2180 | mark_referenced_resources (insn, &needed, 1); | |
2181 | ||
2182 | COPY_HARD_REG_SET (scratch, needed.regs); | |
2183 | AND_COMPL_HARD_REG_SET (scratch, set.regs); | |
2184 | IOR_HARD_REG_SET (new_resources.regs, scratch); | |
2185 | ||
2186 | mark_set_resources (insn, &set, 1); | |
2187 | } | |
2188 | ||
2189 | AND_HARD_REG_SET (res->regs, new_resources.regs); | |
2190 | } | |
2191 | ||
2192 | COPY_HARD_REG_SET (tinfo->live_regs, res->regs); | |
2193 | } | |
2194 | \f | |
2195 | /* Scan a function looking for insns that need a delay slot and find insns to | |
2196 | put into the delay slot. | |
2197 | ||
2198 | NON_JUMPS_P is non-zero if we are to only try to fill non-jump insns (such | |
2199 | as calls). We do these first since we don't want jump insns (that are | |
2200 | easier to fill) to get the only insns that could be used for non-jump insns. | |
2201 | When it is zero, only try to fill JUMP_INSNs. | |
2202 | ||
2203 | When slots are filled in this manner, the insns (including the | |
2204 | delay_insn) are put together in a SEQUENCE rtx. In this fashion, | |
2205 | it is possible to tell whether a delay slot has really been filled | |
2206 | or not. `final' knows how to deal with this, by communicating | |
2207 | through FINAL_SEQUENCE. */ | |
2208 | ||
2209 | static void | |
2210 | fill_simple_delay_slots (first, non_jumps_p) | |
2211 | rtx first; | |
2212 | { | |
2213 | register rtx insn, pat, trial, next_trial; | |
2214 | register int i; | |
2215 | int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base; | |
2216 | struct resources needed, set; | |
2217 | register int slots_to_fill, slots_filled; | |
2218 | rtx delay_list; | |
2219 | ||
2220 | for (i = 0; i < num_unfilled_slots; i++) | |
2221 | { | |
2222 | /* Get the next insn to fill. If it has already had any slots assigned, | |
2223 | we can't do anything with it. Maybe we'll improve this later. */ | |
2224 | ||
2225 | insn = unfilled_slots_base[i]; | |
2226 | if (insn == 0 | |
2227 | || INSN_DELETED_P (insn) | |
2228 | || (GET_CODE (insn) == INSN | |
2229 | && GET_CODE (PATTERN (insn)) == SEQUENCE) | |
2230 | || (GET_CODE (insn) == JUMP_INSN && non_jumps_p) | |
2231 | || (GET_CODE (insn) != JUMP_INSN && ! non_jumps_p)) | |
2232 | continue; | |
2233 | ||
2234 | slots_to_fill = num_delay_slots (insn); | |
2235 | if (slots_to_fill == 0) | |
2236 | abort (); | |
2237 | ||
2238 | /* This insn needs, or can use, some delay slots. SLOTS_TO_FILL | |
2239 | says how many. After initialization, scan backwards from the | |
2240 | insn to search for a potential delay-slot candidate. Stop | |
2241 | searching when a label or jump is hit. | |
2242 | ||
2243 | For each candidate, if it is to go into the delay slot (moved | |
2244 | forward in execution sequence), it must not need or set any resources | |
2245 | that were set by later insns and must not set any resources that | |
2246 | are needed for those insns. | |
2247 | ||
2248 | The delay slot insn itself sets resources unless it is a call | |
2249 | (in which case the called routine, not the insn itself, is doing | |
2250 | the setting). */ | |
2251 | ||
2252 | slots_filled = 0; | |
2253 | delay_list = 0; | |
2254 | CLEAR_RESOURCE (&needed); | |
2255 | CLEAR_RESOURCE (&set); | |
2256 | mark_set_resources (insn, &set, 0); | |
2257 | mark_referenced_resources (insn, &needed, 0); | |
2258 | ||
2259 | for (trial = prev_nonnote_insn (insn); ! stop_search_p (trial, 1); | |
2260 | trial = next_trial) | |
2261 | { | |
2262 | next_trial = prev_nonnote_insn (trial); | |
2263 | ||
2264 | /* This must be an INSN or CALL_INSN. */ | |
2265 | pat = PATTERN (trial); | |
2266 | ||
2267 | /* USE and CLOBBER at this level was just for flow; ignore it. */ | |
2268 | if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) | |
2269 | continue; | |
2270 | ||
2271 | /* Check for resource conflict first, to avoid unnecessary | |
2272 | splitting. */ | |
2273 | if (! insn_references_resource_p (trial, &set, 1) | |
2274 | && ! insn_sets_resource_p (trial, &set, 1) | |
2275 | && ! insn_sets_resource_p (trial, &needed, 1) | |
2276 | #ifdef HAVE_cc0 | |
2277 | /* Can't separate set of cc0 from its use. */ | |
2278 | && ! (reg_mentioned_p (cc0_rtx, pat) | |
2279 | && ! sets_cc0_p (cc0_rtx, pat)) | |
2280 | #endif | |
2281 | ) | |
2282 | { | |
2283 | trial = try_split (pat, trial, 1); | |
2284 | next_trial = prev_nonnote_insn (trial); | |
2285 | if (eligible_for_delay (insn, slots_filled, trial)) | |
2286 | { | |
2287 | delay_list = add_to_delay_list (trial, delay_list); | |
2288 | update_block (trial, trial); | |
2289 | delete_insn (trial); | |
2290 | if (slots_to_fill == ++slots_filled) | |
2291 | break; | |
2292 | continue; | |
2293 | } | |
2294 | } | |
2295 | ||
2296 | mark_set_resources (trial, &set, 1); | |
2297 | mark_referenced_resources (trial, &needed, 1); | |
2298 | } | |
2299 | ||
2300 | if (slots_filled == slots_to_fill) | |
2301 | /* happy. */ ; | |
2302 | ||
2303 | /* If all needed slots haven't been filled, we come here. */ | |
2304 | ||
2305 | /* Try to optimize case of jumping around a single insn. */ | |
2306 | #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS) | |
2307 | else if (delay_list == 0 | |
2308 | && GET_CODE (insn) == JUMP_INSN && condjump_p (insn)) | |
2309 | { | |
2310 | delay_list = optimize_skip (insn); | |
2311 | if (delay_list) | |
2312 | slots_filled += 1; | |
2313 | } | |
2314 | #endif | |
2315 | ||
2316 | /* @@ This would be a good place to optimize: | |
2317 | ||
2318 | call _foo call _foo | |
2319 | nop add %o7,.-L1,%o7 | |
2320 | b,a L1 | |
2321 | nop | |
2322 | ||
2323 | Someday... */ | |
2324 | ||
2325 | /* Try to get insns from beyond the insn needing the delay slot. | |
2326 | These insns can neither set or reference resources set in insns being | |
2327 | skipped, cannot set resources in the insn being skipped, and, if this | |
2328 | is a CALL_INSN (or a CALL_INSN is passed), cannot trap (because the | |
2329 | call might not return). | |
2330 | ||
2331 | If this is a conditional jump, see if it merges back to us early | |
2332 | enough for us to pick up insns from the merge point. Don't do | |
2333 | this if there is another branch to our label unless we pass all of | |
2334 | them. | |
2335 | ||
2336 | Another similar merge is if we jump to the same place that a | |
2337 | later unconditional jump branches to. In that case, we don't | |
2338 | care about the number of uses of our label. */ | |
2339 | ||
2340 | else if (GET_CODE (insn) != JUMP_INSN | |
2341 | || (condjump_p (insn) && ! simplejump_p (insn) | |
2342 | && JUMP_LABEL (insn) != 0)) | |
2343 | { | |
2344 | rtx target = 0; | |
2345 | int maybe_never = 0; | |
2346 | int passed_label = 0; | |
2347 | int target_uses; | |
2348 | struct resources needed_at_jump; | |
2349 | ||
2350 | CLEAR_RESOURCE (&needed); | |
2351 | CLEAR_RESOURCE (&set); | |
2352 | ||
2353 | if (GET_CODE (insn) == CALL_INSN) | |
2354 | { | |
2355 | mark_set_resources (insn, &set, 1); | |
2356 | mark_referenced_resources (insn, &needed, 1); | |
2357 | maybe_never = 1; | |
2358 | } | |
2359 | else if (GET_CODE (insn) == JUMP_INSN) | |
2360 | { | |
2361 | /* Get our target and show how many more uses we want to | |
2362 | see before we hit the label. */ | |
2363 | target = JUMP_LABEL (insn); | |
2364 | target_uses = LABEL_NUSES (target) - 1; | |
2365 | } | |
2366 | ||
2367 | for (trial = next_nonnote_insn (insn); trial; trial = next_trial) | |
2368 | { | |
2369 | rtx pat, trial_delay; | |
2370 | ||
2371 | next_trial = next_nonnote_insn (trial); | |
2372 | ||
2373 | if (GET_CODE (trial) == CODE_LABEL) | |
2374 | { | |
2375 | passed_label = 1; | |
2376 | ||
2377 | /* If this is our target, see if we have seen all its uses. | |
2378 | If so, indicate we have passed our target and ignore it. | |
2379 | All other labels cause us to stop our search. */ | |
2380 | if (trial == target && target_uses == 0) | |
2381 | { | |
2382 | target = 0; | |
2383 | continue; | |
2384 | } | |
2385 | else | |
2386 | break; | |
2387 | } | |
2388 | else if (GET_CODE (trial) == BARRIER) | |
2389 | break; | |
2390 | ||
2391 | /* We must have an INSN, JUMP_INSN, or CALL_INSN. */ | |
2392 | pat = PATTERN (trial); | |
2393 | ||
2394 | /* Stand-alone USE and CLOBBER are just for flow. */ | |
2395 | if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) | |
2396 | continue; | |
2397 | ||
2398 | /* If this already has filled delay slots, get the insn needing | |
2399 | the delay slots. */ | |
2400 | if (GET_CODE (pat) == SEQUENCE) | |
2401 | trial_delay = XVECEXP (pat, 0, 0); | |
2402 | else | |
2403 | trial_delay = trial; | |
2404 | ||
2405 | /* If this is a jump insn to our target, indicate that we have | |
2406 | seen another jump to it. If we aren't handling a conditional | |
2407 | jump, stop our search. Otherwise, compute the needs at its | |
2408 | target and add them to NEEDED. */ | |
2409 | if (GET_CODE (trial_delay) == JUMP_INSN) | |
2410 | { | |
2411 | if (target == 0) | |
2412 | break; | |
2413 | else if (JUMP_LABEL (trial_delay) == target) | |
2414 | target_uses--; | |
2415 | else | |
2416 | { | |
2417 | mark_target_live_regs | |
2418 | (next_active_insn (JUMP_LABEL (trial_delay)), | |
2419 | &needed_at_jump); | |
2420 | needed.memory |= needed_at_jump.memory; | |
2421 | IOR_HARD_REG_SET (needed.regs, needed_at_jump.regs); | |
2422 | } | |
2423 | } | |
2424 | ||
2425 | /* See if we have a resource problem before we try to | |
2426 | split. */ | |
2427 | if (target == 0 | |
2428 | && GET_CODE (pat) != SEQUENCE | |
2429 | && ! insn_references_resource_p (trial, &set, 1) | |
2430 | && ! insn_sets_resource_p (trial, &set, 1) | |
2431 | && ! insn_sets_resource_p (trial, &needed, 1) | |
2432 | #ifdef HAVE_cc0 | |
2433 | && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat)) | |
2434 | #endif | |
2435 | && ! (maybe_never && may_trap_p (pat)) | |
2436 | && (trial = try_split (pat, trial, 0)) | |
2437 | && eligible_for_delay (insn, slots_filled, trial)) | |
2438 | { | |
2439 | next_trial = next_nonnote_insn (trial); | |
2440 | delay_list = add_to_delay_list (trial, delay_list); | |
2441 | ||
2442 | #ifdef HAVE_cc0 | |
2443 | if (reg_mentioned_p (cc0_rtx, pat)) | |
2444 | link_cc0_insns (trial); | |
2445 | #endif | |
2446 | ||
2447 | if (passed_label) | |
2448 | update_block (trial, trial); | |
2449 | delete_insn (trial); | |
2450 | if (slots_to_fill == ++slots_filled) | |
2451 | break; | |
2452 | continue; | |
2453 | } | |
2454 | ||
2455 | mark_set_resources (trial, &set, 1); | |
2456 | mark_referenced_resources (trial, &needed, 1); | |
2457 | ||
2458 | /* Ensure we don't put insns between the setting of cc and the | |
2459 | comparison by moving a setting of cc into an earlier delay | |
2460 | slot since these insns could clobber the condition code. */ | |
2461 | set.cc = 1; | |
2462 | ||
2463 | /* If this is a call or jump, we might not get here. */ | |
2464 | if (GET_CODE (trial) == CALL_INSN | |
2465 | || GET_CODE (trial) == JUMP_INSN) | |
2466 | maybe_never = 1; | |
2467 | } | |
2468 | ||
2469 | /* If there are slots left to fill and our search was stopped by an | |
2470 | unconditional branch, try the insn at the branch target. We can | |
2471 | redirect the branch if it works. */ | |
2472 | if (slots_to_fill != slots_filled | |
2473 | && trial | |
2474 | && GET_CODE (trial) == JUMP_INSN | |
2475 | && simplejump_p (trial) | |
2476 | && (target == 0 || JUMP_LABEL (trial) == target) | |
2477 | && (next_trial = next_active_insn (JUMP_LABEL (trial))) != 0 | |
2478 | && ! (GET_CODE (next_trial) == INSN | |
2479 | && GET_CODE (PATTERN (next_trial)) == SEQUENCE) | |
2480 | && ! insn_references_resource_p (next_trial, &set, 1) | |
2481 | && ! insn_sets_resource_p (next_trial, &set, 1) | |
2482 | && ! insn_sets_resource_p (next_trial, &needed, 1) | |
2483 | #ifdef HAVE_cc0 | |
2484 | && ! (reg_mentioned_p (cc0_rtx, PATTERN (next_trial)) | |
2485 | && ! sets_cc0_p (PATTERN (next_trial))) | |
2486 | #endif | |
2487 | && ! (maybe_never && may_trap_p (PATTERN (next_trial))) | |
2488 | && (next_trial = try_split (PATTERN (next_trial), next_trial, 0)) | |
2489 | && eligible_for_delay (insn, slots_filled, next_trial)) | |
2490 | { | |
2491 | rtx new_label = next_active_insn (next_trial); | |
2492 | ||
2493 | if (new_label != 0) | |
2494 | new_label = get_label_before (new_label); | |
2495 | ||
2496 | delay_list | |
2497 | = add_to_delay_list (copy_rtx (next_trial), delay_list); | |
2498 | slots_filled++; | |
2499 | redirect_jump (trial, new_label); | |
2500 | ||
2501 | /* If we merged because we both jumped to the same place, | |
2502 | redirect the original insn also. */ | |
2503 | if (target) | |
2504 | redirect_jump (insn, new_label); | |
2505 | } | |
2506 | } | |
2507 | ||
2508 | if (delay_list) | |
2509 | unfilled_slots_base[i] | |
2510 | = emit_delay_sequence (insn, delay_list, | |
2511 | slots_filled, slots_to_fill); | |
2512 | ||
2513 | if (slots_to_fill == slots_filled) | |
2514 | unfilled_slots_base[i] = 0; | |
2515 | ||
2516 | note_delay_statistics (slots_filled, 0); | |
2517 | } | |
2518 | ||
2519 | #ifdef DELAY_SLOTS_FOR_EPILOGUE | |
2520 | /* See if the epilogue needs any delay slots. Try to fill them if so. | |
2521 | The only thing we can do is scan backwards from the end of the | |
2522 | function. If we did this in a previous pass, it is incorrect to do it | |
2523 | again. */ | |
2524 | if (current_function_epilogue_delay_list) | |
2525 | return; | |
2526 | ||
2527 | slots_to_fill = DELAY_SLOTS_FOR_EPILOGUE; | |
2528 | if (slots_to_fill == 0) | |
2529 | return; | |
2530 | ||
2531 | slots_filled = 0; | |
2532 | CLEAR_RESOURCE (&needed); | |
2533 | CLEAR_RESOURCE (&set); | |
2534 | ||
2535 | for (trial = get_last_insn (); ! stop_search_p (trial, 1); | |
2536 | trial = PREV_INSN (trial)) | |
2537 | { | |
2538 | if (GET_CODE (trial) == NOTE) | |
2539 | continue; | |
2540 | pat = PATTERN (trial); | |
2541 | if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) | |
2542 | continue; | |
2543 | ||
2544 | if (! insn_references_resource_p (trial, &set, 1) | |
2545 | && ! insn_sets_resource_p (trial, &needed, 1) | |
2546 | #ifdef HAVE_cc0 | |
2547 | /* Don't want to mess with cc0 here. */ | |
2548 | && ! reg_mentioned_p (cc0_rtx, pat) | |
2549 | #endif | |
2550 | ) | |
2551 | { | |
2552 | trial = try_split (pat, trial, 1); | |
2553 | if (ELIGIBLE_FOR_EPILOGUE_DELAY (trial, slots_filled)) | |
2554 | { | |
2555 | current_function_epilogue_delay_list | |
2556 | = add_to_delay_list (trial, | |
2557 | current_function_epilogue_delay_list); | |
2558 | mark_referenced_resources (trial, &end_of_function_needs, 1); | |
2559 | update_block (trial, trial); | |
2560 | delete_insn (trial); | |
2561 | ||
2562 | /* Clear deleted bit so final.c will output the insn. */ | |
2563 | INSN_DELETED_P (trial) = 0; | |
2564 | ||
2565 | if (slots_to_fill == ++slots_filled) | |
2566 | break; | |
2567 | continue; | |
2568 | } | |
2569 | } | |
2570 | ||
2571 | mark_set_resources (trial, &set, 1); | |
2572 | mark_referenced_resources (trial, &needed, 1); | |
2573 | } | |
2574 | ||
2575 | note_delay_statistics (slots_filled, 0); | |
2576 | #endif | |
2577 | } | |
2578 | \f | |
2579 | /* Try to find insns to place in delay slots. | |
2580 | ||
2581 | INSN is the jump needing SLOTS_TO_FILL delay slots. It tests CONDITION | |
2582 | or is an unconditional branch if CONDITION is const_true_rtx. | |
2583 | *PSLOTS_FILLED is updated with the number of slots that we have filled. | |
2584 | ||
2585 | THREAD is a flow-of-control, either the insns to be executed if the | |
2586 | branch is true or if the branch is false, THREAD_IF_TRUE says which. | |
2587 | ||
2588 | OPPOSITE_THREAD is the thread in the opposite direction. It is used | |
2589 | to see if any potential delay slot insns set things needed there. | |
2590 | ||
2591 | LIKELY is non-zero if it is extremely likely that the branch will be | |
2592 | taken and THREAD_IF_TRUE is set. This is used for the branch at the | |
2593 | end of a loop back up to the top. | |
2594 | ||
2595 | OWN_THREAD and OWN_OPPOSITE_THREAD are true if we are the only user of the | |
2596 | thread. I.e., it is the fallthrough code of our jump or the target of the | |
2597 | jump when we are the only jump going there. | |
2598 | ||
2599 | If OWN_THREAD is false, it must be the "true" thread of a jump. In that | |
2600 | case, we can only take insns from the head of the thread for our delay | |
2601 | slot. We then adjust the jump to point after the insns we have taken. */ | |
2602 | ||
2603 | static rtx | |
2604 | fill_slots_from_thread (insn, condition, thread, opposite_thread, likely, | |
2605 | thread_if_true, own_thread, own_opposite_thread, | |
2606 | slots_to_fill, pslots_filled) | |
2607 | rtx insn; | |
2608 | rtx condition; | |
2609 | rtx thread, opposite_thread; | |
2610 | int likely; | |
2611 | int thread_if_true; | |
2612 | int own_thread, own_opposite_thread; | |
2613 | int slots_to_fill, *pslots_filled; | |
2614 | { | |
2615 | rtx new_thread = thread; | |
2616 | rtx delay_list = 0; | |
2617 | struct resources opposite_needed, set, needed; | |
2618 | rtx trial; | |
2619 | int lose = 0; | |
2620 | int must_annul = 0; | |
2621 | ||
2622 | /* Validate our arguments. */ | |
2623 | if ((condition == const_true_rtx && ! thread_if_true) | |
2624 | || (! own_thread && ! thread_if_true)) | |
2625 | abort (); | |
2626 | ||
2627 | /* If our thread is the end of subroutine, we can't get any delay | |
2628 | insns from that. */ | |
2629 | if (thread == 0) | |
2630 | return 0; | |
2631 | ||
2632 | /* If this is an unconditional branch, nothing is needed at the | |
2633 | opposite thread. Otherwise, compute what is needed there. */ | |
2634 | if (condition == const_true_rtx) | |
2635 | CLEAR_RESOURCE (&opposite_needed); | |
2636 | else | |
2637 | mark_target_live_regs (opposite_thread, &opposite_needed); | |
2638 | ||
2639 | /* Scan insns at THREAD. We are looking for an insn that can be removed | |
2640 | from THREAD (it neither sets nor references resources that were set | |
2641 | ahead of it and it doesn't set anything needs by the insns ahead of | |
2642 | it) and that either can be placed in an annulling insn or aren't | |
2643 | needed at OPPOSITE_THREAD. */ | |
2644 | ||
2645 | CLEAR_RESOURCE (&needed); | |
2646 | CLEAR_RESOURCE (&set); | |
2647 | ||
2648 | /* If we do not own this thread, we must stop as soon as we find | |
2649 | something that we can't put in a delay slot, since all we can do | |
2650 | is branch into THREAD at a later point. Therefore, labels stop | |
2651 | the search if this is not the `true' thread. */ | |
2652 | ||
2653 | for (trial = thread; | |
2654 | ! stop_search_p (trial, ! thread_if_true) && (! lose || own_thread); | |
2655 | trial = next_nonnote_insn (trial)) | |
2656 | { | |
2657 | rtx pat; | |
2658 | ||
2659 | /* If we have passed a label, we no longer own this thread. */ | |
2660 | if (GET_CODE (trial) == CODE_LABEL) | |
2661 | { | |
2662 | own_thread = 0; | |
2663 | continue; | |
2664 | } | |
2665 | ||
2666 | pat = PATTERN (trial); | |
2667 | if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) | |
2668 | continue; | |
2669 | ||
2670 | /* If TRIAL conflicts with the insns ahead of it, we lose. Also, | |
2671 | don't separate or copy insns that set and use CC0. */ | |
2672 | if (! insn_references_resource_p (trial, &set, 1) | |
2673 | && ! insn_sets_resource_p (trial, &set, 1) | |
2674 | && ! insn_sets_resource_p (trial, &needed, 1) | |
2675 | #ifdef HAVE_cc0 | |
2676 | && ! (reg_mentioned_p (cc0_rtx, pat) | |
2677 | && (! own_thread || ! sets_cc0_p (pat))) | |
2678 | #endif | |
2679 | ) | |
2680 | { | |
2681 | /* If TRIAL is redundant with some insn before INSN, we don't | |
2682 | actually need to add it to the delay list; we can merely pretend | |
2683 | we did. */ | |
2684 | if (redundant_insn_p (trial, insn, delay_list)) | |
2685 | { | |
2686 | if (own_thread) | |
2687 | { | |
2688 | update_block (trial, trial); | |
2689 | delete_insn (trial); | |
2690 | } | |
2691 | else | |
2692 | new_thread = next_active_insn (trial); | |
2693 | ||
2694 | continue; | |
2695 | } | |
2696 | ||
2697 | /* There are two ways we can win: If TRIAL doesn't set anything | |
2698 | needed at the opposite thread and can't trap, or if it can | |
2699 | go into an annulled delay slot. */ | |
2700 | if (condition == const_true_rtx | |
2701 | || (! insn_sets_resource_p (trial, &opposite_needed, 1) | |
2702 | && ! may_trap_p (pat))) | |
2703 | { | |
2704 | trial = try_split (pat, trial, 0); | |
2705 | pat = PATTERN (trial); | |
2706 | if (eligible_for_delay (insn, *pslots_filled, trial)) | |
2707 | goto winner; | |
2708 | } | |
2709 | else if (0 | |
2710 | #ifdef ANNUL_IFTRUE_SLOTS | |
2711 | || ! thread_if_true | |
2712 | #endif | |
2713 | #ifdef ANNUL_IFFALSE_SLOTS | |
2714 | || thread_if_true | |
2715 | #endif | |
2716 | ) | |
2717 | { | |
2718 | trial = try_split (pat, trial, 0); | |
2719 | pat = PATTERN (trial); | |
2720 | if ((thread_if_true | |
2721 | ? eligible_for_annul_false (insn, *pslots_filled, trial) | |
2722 | : eligible_for_annul_true (insn, *pslots_filled, trial))) | |
2723 | { | |
2724 | rtx temp; | |
2725 | ||
2726 | must_annul = 1; | |
2727 | winner: | |
2728 | ||
2729 | #ifdef HAVE_cc0 | |
2730 | if (reg_mentioned_p (cc0_rtx, pat)) | |
2731 | link_cc0_insns (trial); | |
2732 | #endif | |
2733 | ||
2734 | /* If we own this thread, delete the insn. If this is the | |
2735 | destination of a branch, show that a basic block status | |
2736 | may have been updated. In any case, mark the new | |
2737 | starting point of this thread. */ | |
2738 | if (own_thread) | |
2739 | { | |
2740 | update_block (trial, trial); | |
2741 | delete_insn (trial); | |
2742 | } | |
2743 | else | |
2744 | new_thread = next_active_insn (trial); | |
2745 | ||
2746 | temp = own_thread ? trial : copy_rtx (trial); | |
2747 | if (thread_if_true) | |
2748 | INSN_FROM_TARGET_P (temp) = 1; | |
2749 | ||
2750 | delay_list = add_to_delay_list (temp, delay_list); | |
2751 | ||
2752 | if (slots_to_fill == ++(*pslots_filled)) | |
2753 | { | |
2754 | /* Even though we have filled all the slots, we | |
2755 | may be branching to a location that has a | |
2756 | redundant insn. Skip any if so. */ | |
2757 | while (new_thread && ! own_thread | |
2758 | && ! insn_sets_resource_p (new_thread, &set, 1) | |
2759 | && ! insn_sets_resource_p (new_thread, &needed, 1) | |
2760 | && ! insn_references_resource_p (new_thread, | |
2761 | &set, 1) | |
2762 | && redundant_insn_p (new_thread, insn, | |
2763 | delay_list)) | |
2764 | new_thread = next_active_insn (new_thread); | |
2765 | break; | |
2766 | } | |
2767 | ||
2768 | continue; | |
2769 | } | |
2770 | } | |
2771 | } | |
2772 | ||
2773 | /* This insn can't go into a delay slot. */ | |
2774 | lose = 1; | |
2775 | mark_set_resources (trial, &set, 1); | |
2776 | mark_referenced_resources (trial, &needed, 1); | |
2777 | ||
2778 | /* Ensure we don't put insns between the setting of cc and the comparison | |
2779 | by moving a setting of cc into an earlier delay slot since these insns | |
2780 | could clobber the condition code. */ | |
2781 | set.cc = 1; | |
2782 | ||
2783 | /* If this insn is a register-register copy and the next insn has | |
2784 | a use of our destination, change it to use our source. That way, | |
2785 | it will become a candidate for our delay slot the next time | |
2786 | through this loop. This case occurs commonly in loops that | |
2787 | scan a list. | |
2788 | ||
2789 | We could check for more complex cases than those tested below, | |
2790 | but it doesn't seem worth it. It might also be a good idea to try | |
2791 | to swap the two insns. That might do better. */ | |
2792 | ||
2793 | if (GET_CODE (trial) == INSN && GET_CODE (pat) == SET | |
2794 | && GET_CODE (SET_SRC (pat)) == REG | |
2795 | && GET_CODE (SET_DEST (pat)) == REG) | |
2796 | { | |
2797 | rtx next = next_nonnote_insn (trial); | |
2798 | int our_dest = REGNO (SET_DEST (pat)); | |
2799 | ||
2800 | if (next && GET_CODE (next) == INSN | |
2801 | && GET_CODE (PATTERN (next)) == SET | |
2802 | && GET_CODE (SET_DEST (PATTERN (next))) == REG | |
2803 | && REGNO (SET_DEST (PATTERN (next))) != our_dest | |
2804 | && refers_to_regno_p (our_dest, our_dest + 1, | |
2805 | SET_SRC (PATTERN (next)), 0)) | |
2806 | validate_replace_rtx (SET_DEST (pat), SET_SRC (pat), next); | |
2807 | } | |
2808 | } | |
2809 | ||
2810 | /* If we stopped on a branch insn that has delay slots, see if we can | |
2811 | steal some of the insns in those slots. */ | |
2812 | if (trial && GET_CODE (trial) == INSN | |
2813 | && GET_CODE (PATTERN (trial)) == SEQUENCE | |
2814 | && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN) | |
2815 | { | |
2816 | /* If this is the `true' thread, we will want to follow the jump, | |
2817 | so we can only do this if we have taken everything up to here. */ | |
2818 | if (thread_if_true && trial == new_thread) | |
2819 | delay_list | |
2820 | = steal_delay_list_from_target (insn, condition, PATTERN (trial), | |
2821 | delay_list, &set, &needed, | |
2822 | &opposite_needed, slots_to_fill, | |
2823 | pslots_filled, &must_annul, | |
2824 | &new_thread); | |
2825 | else if (! thread_if_true) | |
2826 | delay_list | |
2827 | = steal_delay_list_from_fallthrough (insn, condition, | |
2828 | PATTERN (trial), | |
2829 | delay_list, &set, &needed, | |
2830 | &opposite_needed, slots_to_fill, | |
2831 | pslots_filled, &must_annul); | |
2832 | } | |
2833 | ||
2834 | /* If we haven't found anything for this delay slot and it is very | |
2835 | likely that the branch will be taken, see if the insn at our target | |
2836 | increments or decrements a register. If so, try to place the opposite | |
2837 | arithmetic insn after the jump insn and put the arithmetic insn in the | |
2838 | delay slot. If we can't do this, return. */ | |
2839 | if (delay_list == 0 && likely && new_thread && GET_CODE (new_thread) == INSN) | |
2840 | { | |
2841 | rtx pat = PATTERN (new_thread); | |
2842 | rtx dest; | |
2843 | rtx src; | |
2844 | ||
2845 | trial = try_split (pat, new_thread, 0); | |
2846 | pat = PATTERN (trial); | |
2847 | ||
2848 | if (GET_CODE (trial) != INSN || GET_CODE (pat) != SET | |
2849 | || ! eligible_for_delay (insn, 0, trial)) | |
2850 | return 0; | |
2851 | ||
2852 | dest = SET_DEST (pat), src = SET_SRC (pat); | |
2853 | if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS) | |
2854 | && rtx_equal_p (XEXP (src, 0), dest)) | |
2855 | { | |
2856 | rtx other = XEXP (src, 1); | |
2857 | rtx new_arith; | |
2858 | rtx ninsn; | |
2859 | ||
2860 | /* If this is a constant adjustment, use the same code with | |
2861 | the negated constant. Otherwise, reverse the sense of the | |
2862 | arithmetic. */ | |
2863 | if (GET_CODE (other) == CONST_INT) | |
2864 | new_arith = gen_rtx (GET_CODE (src), GET_MODE (src), dest, | |
2865 | negate_rtx (GET_MODE (src), other)); | |
2866 | else | |
2867 | new_arith = gen_rtx (GET_CODE (src) == PLUS ? MINUS : PLUS, | |
2868 | GET_MODE (src), dest, other); | |
2869 | ||
2870 | ninsn = emit_insn_after (gen_rtx (SET, VOIDmode, dest, new_arith), | |
2871 | insn); | |
2872 | ||
2873 | if (recog_memoized (ninsn) < 0 | |
2874 | || (insn_extract (ninsn), | |
2875 | ! constrain_operands (INSN_CODE (ninsn), 1))) | |
2876 | { | |
2877 | delete_insn (ninsn); | |
2878 | return 0; | |
2879 | } | |
2880 | ||
2881 | if (own_thread) | |
2882 | { | |
2883 | update_block (trial, trial); | |
2884 | delete_insn (trial); | |
2885 | } | |
2886 | else | |
2887 | new_thread = next_active_insn (trial); | |
2888 | ||
2889 | ninsn = own_thread ? trial : copy_rtx (trial); | |
2890 | if (thread_if_true) | |
2891 | INSN_FROM_TARGET_P (ninsn) = 1; | |
2892 | ||
2893 | delay_list = add_to_delay_list (ninsn, 0); | |
2894 | (*pslots_filled)++; | |
2895 | } | |
2896 | } | |
2897 | ||
2898 | if (delay_list && must_annul) | |
2899 | INSN_ANNULLED_BRANCH_P (insn) = 1; | |
2900 | ||
2901 | /* If we are to branch into the middle of this thread, find an appropriate | |
2902 | label or make a new one if none, and redirect INSN to it. If we hit the | |
2903 | end of the function, use the end-of-function label. */ | |
2904 | if (new_thread != thread) | |
2905 | { | |
2906 | rtx label; | |
2907 | ||
2908 | if (! thread_if_true) | |
2909 | abort (); | |
2910 | ||
2911 | if (new_thread && GET_CODE (new_thread) == JUMP_INSN | |
2912 | && (simplejump_p (new_thread) | |
2913 | || GET_CODE (PATTERN (new_thread)) == RETURN)) | |
2914 | new_thread = follow_jumps (JUMP_LABEL (new_thread), 1); | |
2915 | ||
2916 | if (new_thread == 0) | |
2917 | label = find_end_label (); | |
2918 | else if (GET_CODE (new_thread) == CODE_LABEL) | |
2919 | label = new_thread; | |
2920 | else | |
2921 | label = get_label_before (new_thread); | |
2922 | ||
2923 | redirect_jump (insn, label); | |
2924 | } | |
2925 | ||
2926 | return delay_list; | |
2927 | } | |
2928 | \f | |
2929 | /* Make another attempt to find insns to place in delay slots. | |
2930 | ||
2931 | We previously looked for insns located in front of the delay insn | |
2932 | and, for non-jump delay insns, located behind the delay insn. | |
2933 | ||
2934 | Here only try to schedule jump insns and try to move insns from either | |
2935 | the target or the following insns into the delay slot. If annulling is | |
2936 | supported, we will be likely to do this. Otherwise, we can do this only | |
2937 | if safe. */ | |
2938 | ||
2939 | static void | |
2940 | fill_eager_delay_slots (first) | |
2941 | rtx first; | |
2942 | { | |
2943 | register rtx insn; | |
2944 | register int i; | |
2945 | int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base; | |
2946 | ||
2947 | for (i = 0; i < num_unfilled_slots; i++) | |
2948 | { | |
2949 | rtx condition; | |
2950 | rtx target_label, insn_at_target, fallthrough_insn; | |
2951 | rtx delay_list = 0; | |
2952 | int own_target; | |
2953 | int own_fallthrough; | |
2954 | int prediction, slots_to_fill, slots_filled; | |
2955 | ||
2956 | insn = unfilled_slots_base[i]; | |
2957 | if (insn == 0 | |
2958 | || INSN_DELETED_P (insn) | |
2959 | || GET_CODE (insn) != JUMP_INSN | |
2960 | || ! condjump_p (insn)) | |
2961 | continue; | |
2962 | ||
2963 | slots_to_fill = num_delay_slots (insn); | |
2964 | if (slots_to_fill == 0) | |
2965 | abort (); | |
2966 | ||
2967 | slots_filled = 0; | |
2968 | target_label = JUMP_LABEL (insn); | |
2969 | condition = get_branch_condition (insn, target_label); | |
2970 | ||
2971 | if (condition == 0) | |
2972 | continue; | |
2973 | ||
2974 | /* Get the next active fallthough and target insns and see if we own | |
2975 | them. Then see whether the branch is likely true. We don't need | |
2976 | to do a lot of this for unconditional branches. */ | |
2977 | ||
2978 | insn_at_target = next_active_insn (target_label); | |
2979 | own_target = own_thread_p (target_label, target_label, 0); | |
2980 | ||
2981 | if (condition == const_true_rtx) | |
2982 | { | |
2983 | own_fallthrough = 0; | |
2984 | fallthrough_insn = 0; | |
2985 | prediction = 2; | |
2986 | } | |
2987 | else | |
2988 | { | |
2989 | fallthrough_insn = next_active_insn (insn); | |
2990 | own_fallthrough = own_thread_p (NEXT_INSN (insn), 0, 1); | |
2991 | prediction = mostly_true_jump (insn, condition); | |
2992 | } | |
2993 | ||
2994 | /* If this insn is expected to branch, first try to get insns from our | |
2995 | target, then our fallthrough insns. If it is not, expected to branch, | |
2996 | try the other order. */ | |
2997 | ||
2998 | if (prediction) | |
2999 | { | |
3000 | delay_list | |
3001 | = fill_slots_from_thread (insn, condition, insn_at_target, | |
3002 | fallthrough_insn, prediction == 2, 1, | |
3003 | own_target, own_fallthrough, | |
3004 | slots_to_fill, &slots_filled); | |
3005 | ||
3006 | if (delay_list == 0 && own_fallthrough) | |
3007 | { | |
3008 | /* Even though we didn't find anything for delay slots, | |
3009 | we might have found a redundant insn which we deleted | |
3010 | from the thread that was filled. So we have to recompute | |
3011 | the next insn at the target. */ | |
3012 | target_label = JUMP_LABEL (insn); | |
3013 | insn_at_target = next_active_insn (target_label); | |
3014 | ||
3015 | delay_list | |
3016 | = fill_slots_from_thread (insn, condition, fallthrough_insn, | |
3017 | insn_at_target, 0, 0, | |
3018 | own_fallthrough, own_target, | |
3019 | slots_to_fill, &slots_filled); | |
3020 | } | |
3021 | } | |
3022 | else | |
3023 | { | |
3024 | if (own_fallthrough) | |
3025 | delay_list | |
3026 | = fill_slots_from_thread (insn, condition, fallthrough_insn, | |
3027 | insn_at_target, 0, 0, | |
3028 | own_fallthrough, own_target, | |
3029 | slots_to_fill, &slots_filled); | |
3030 | ||
3031 | if (delay_list == 0) | |
3032 | delay_list | |
3033 | = fill_slots_from_thread (insn, condition, insn_at_target, | |
3034 | next_active_insn (insn), 0, 1, | |
3035 | own_target, own_fallthrough, | |
3036 | slots_to_fill, &slots_filled); | |
3037 | } | |
3038 | ||
3039 | if (delay_list) | |
3040 | unfilled_slots_base[i] | |
3041 | = emit_delay_sequence (insn, delay_list, | |
3042 | slots_filled, slots_to_fill); | |
3043 | ||
3044 | if (slots_to_fill == slots_filled) | |
3045 | unfilled_slots_base[i] = 0; | |
3046 | ||
3047 | note_delay_statistics (slots_filled, 1); | |
3048 | } | |
3049 | } | |
3050 | \f | |
3051 | /* Once we have tried two ways to fill a delay slot, make a pass over the | |
3052 | code to try to improve the results and to do such things as more jump | |
3053 | threading. */ | |
3054 | ||
3055 | static void | |
3056 | relax_delay_slots (first) | |
3057 | rtx first; | |
3058 | { | |
3059 | register rtx insn, next, pat; | |
3060 | register rtx trial, delay_insn, target_label; | |
3061 | ||
3062 | /* Look at every JUMP_INSN and see if we can improve it. */ | |
3063 | for (insn = first; insn; insn = next) | |
3064 | { | |
3065 | rtx other; | |
3066 | ||
3067 | next = next_active_insn (insn); | |
3068 | ||
3069 | /* If this is a jump insn, see if it now jumps to a jump, jumps to | |
3070 | the next insn, or jumps to a label that is not the last of a | |
3071 | group of consecutive labels. */ | |
3072 | if (GET_CODE (insn) == JUMP_INSN | |
3073 | && (target_label = JUMP_LABEL (insn)) != 0) | |
3074 | { | |
3075 | target_label = follow_jumps (target_label, 1); | |
3076 | target_label = prev_label (next_active_insn (target_label)); | |
3077 | ||
3078 | if (next_active_insn (target_label) == next) | |
3079 | { | |
3080 | delete_jump (insn); | |
3081 | continue; | |
3082 | } | |
3083 | ||
3084 | if (target_label != JUMP_LABEL (insn)) | |
3085 | redirect_jump (insn, target_label); | |
3086 | ||
3087 | /* See if this jump branches around a unconditional jump. | |
3088 | If so, invert this jump and point it to the target of the | |
3089 | second jump. */ | |
3090 | if (next && GET_CODE (next) == JUMP_INSN | |
3091 | && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN) | |
3092 | && next_active_insn (target_label) == next_active_insn (next) | |
3093 | && no_labels_between_p (insn, next)) | |
3094 | { | |
3095 | rtx label = JUMP_LABEL (next); | |
3096 | ||
3097 | /* Be careful how we do this to avoid deleting code or | |
3098 | labels that are momentarily dead. See similar optimization | |
3099 | in jump.c. | |
3100 | ||
3101 | We also need to ensure we properly handle the case when | |
3102 | invert_jump fails. */ | |
3103 | ||
3104 | ++LABEL_NUSES (target_label); | |
3105 | if (label) | |
3106 | ++LABEL_NUSES (label); | |
3107 | ||
3108 | if (invert_jump (insn, label)) | |
3109 | { | |
3110 | delete_insn (next); | |
3111 | next = insn; | |
3112 | } | |
3113 | ||
3114 | if (label) | |
3115 | --LABEL_NUSES (label); | |
3116 | ||
3117 | if (--LABEL_NUSES (target_label) == 0) | |
3118 | delete_insn (target_label); | |
3119 | ||
3120 | continue; | |
3121 | } | |
3122 | } | |
3123 | ||
3124 | /* If this is an unconditional jump and the previous insn is a | |
3125 | conditional jump, try reversing the condition of the previous | |
3126 | insn and swapping our targets. The next pass might be able to | |
3127 | fill the slots. | |
3128 | ||
3129 | Don't do this if we expect the conditional branch to be true, because | |
3130 | we would then be making the more common case longer. */ | |
3131 | ||
3132 | if (GET_CODE (insn) == JUMP_INSN | |
3133 | && (simplejump_p (insn) || GET_CODE (PATTERN (insn)) == RETURN) | |
3134 | && (other = prev_active_insn (insn)) != 0 | |
3135 | && condjump_p (other) | |
3136 | && no_labels_between_p (other, insn) | |
3137 | && ! mostly_true_jump (other, | |
3138 | get_branch_condition (other, | |
3139 | JUMP_LABEL (other)))) | |
3140 | { | |
3141 | rtx other_target = JUMP_LABEL (other); | |
3142 | ||
3143 | /* Increment the count of OTHER_TARGET, so it doesn't get deleted | |
3144 | as we move the label. */ | |
3145 | if (other_target) | |
3146 | ++LABEL_NUSES (other_target); | |
3147 | ||
3148 | if (invert_jump (other, target_label)) | |
3149 | redirect_jump (insn, other_target); | |
3150 | ||
3151 | if (other_target) | |
3152 | --LABEL_NUSES (other_target); | |
3153 | } | |
3154 | ||
3155 | /* Now look only at cases where we have filled a delay slot. */ | |
3156 | if (GET_CODE (insn) != INSN | |
3157 | || GET_CODE (PATTERN (insn)) != SEQUENCE) | |
3158 | continue; | |
3159 | ||
3160 | pat = PATTERN (insn); | |
3161 | delay_insn = XVECEXP (pat, 0, 0); | |
3162 | ||
3163 | /* See if the first insn in the delay slot is redundant with some | |
3164 | previous insn. Remove it from the delay slot if so; then set up | |
3165 | to reprocess this insn. */ | |
3166 | if (redundant_insn_p (XVECEXP (pat, 0, 1), delay_insn, 0)) | |
3167 | { | |
3168 | delete_from_delay_slot (XVECEXP (pat, 0, 1)); | |
3169 | next = prev_active_insn (next); | |
3170 | continue; | |
3171 | } | |
3172 | ||
3173 | /* Now look only at the cases where we have a filled JUMP_INSN. */ | |
3174 | if (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN | |
3175 | || ! condjump_p (XVECEXP (PATTERN (insn), 0, 0))) | |
3176 | continue; | |
3177 | ||
3178 | target_label = JUMP_LABEL (delay_insn); | |
3179 | ||
3180 | if (target_label) | |
3181 | { | |
3182 | /* If this jump goes to another unconditional jump, thread it, but | |
3183 | don't convert a jump into a RETURN here. */ | |
3184 | trial = follow_jumps (target_label, 1); | |
3185 | trial = prev_label (next_active_insn (trial)); | |
3186 | if (trial == 0 && target_label != 0) | |
3187 | trial = find_end_label (); | |
3188 | ||
3189 | if (trial != target_label) | |
3190 | { | |
3191 | redirect_jump (delay_insn, trial); | |
3192 | target_label = trial; | |
3193 | } | |
3194 | ||
3195 | /* If the first insn at TARGET_LABEL is redundant with a previous | |
3196 | insn, redirect the jump to the following insn process again. */ | |
3197 | trial = next_active_insn (target_label); | |
3198 | if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE | |
3199 | && redundant_insn_p (trial, insn, 0)) | |
3200 | { | |
3201 | trial = next_active_insn (trial); | |
3202 | if (trial == 0) | |
3203 | target_label = find_end_label (); | |
3204 | else | |
3205 | target_label = get_label_before (trial); | |
3206 | redirect_jump (delay_insn, target_label); | |
3207 | next = insn; | |
3208 | continue; | |
3209 | } | |
3210 | ||
3211 | /* Similarly, if it is an unconditional jump with one insn in its | |
3212 | delay list and that insn is redundant, thread the jump. */ | |
3213 | if (trial && GET_CODE (PATTERN (trial)) == SEQUENCE | |
3214 | && XVECLEN (PATTERN (trial), 0) == 2 | |
3215 | && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN | |
3216 | && (simplejump_p (XVECEXP (PATTERN (trial), 0, 0)) | |
3217 | || GET_CODE (PATTERN (XVECEXP (PATTERN (trial), 0, 0))) == RETURN) | |
3218 | && redundant_insn_p (XVECEXP (PATTERN (trial), 0, 1), insn, 0)) | |
3219 | { | |
3220 | target_label = JUMP_LABEL (XVECEXP (PATTERN (trial), 0, 0)); | |
3221 | if (target_label == 0) | |
3222 | target_label = find_end_label (); | |
3223 | redirect_jump (delay_insn, target_label); | |
3224 | next = insn; | |
3225 | continue; | |
3226 | } | |
3227 | } | |
3228 | ||
3229 | if (! INSN_ANNULLED_BRANCH_P (delay_insn) | |
3230 | && prev_active_insn (target_label) == insn | |
3231 | #ifdef HAVE_cc0 | |
3232 | /* If the last insn in the delay slot sets CC0 for some insn, | |
3233 | various code assumes that it is in a delay slot. We could | |
3234 | put it back where it belonged and delete the register notes, | |
3235 | but it doesn't seem worhwhile in this uncommon case. */ | |
3236 | && ! find_reg_note (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1), | |
3237 | REG_CC_USER, 0) | |
3238 | #endif | |
3239 | ) | |
3240 | { | |
3241 | /* All this insn does is execute its delay list and jump to the | |
3242 | following insn. So delete the jump and just execute the delay | |
3243 | list insns. | |
3244 | ||
3245 | We do this by deleting the INSN containing the SEQUENCE, then | |
3246 | re-emitting the insns separately, and then deleting the jump. | |
3247 | This allows the count of the jump target to be properly | |
3248 | decremented. */ | |
3249 | ||
3250 | trial = PREV_INSN (insn); | |
3251 | delete_insn (insn); | |
3252 | emit_insn_after (pat, trial); | |
3253 | delete_scheduled_jump (delay_insn); | |
3254 | continue; | |
3255 | } | |
3256 | ||
3257 | /* See if this jump (with its delay slots) branches around another | |
3258 | jump (without delay slots). If so, invert this jump and point | |
3259 | it to the target of the second jump. We cannot do this for | |
3260 | annulled jumps, though. Again, don't convert a jump to a RETURN | |
3261 | here. */ | |
3262 | if (! INSN_ANNULLED_BRANCH_P (delay_insn) | |
3263 | && next && GET_CODE (next) == JUMP_INSN | |
3264 | && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN) | |
3265 | && next_active_insn (target_label) == next_active_insn (next) | |
3266 | && no_labels_between_p (insn, next)) | |
3267 | { | |
3268 | rtx label = JUMP_LABEL (next); | |
3269 | rtx old_label = JUMP_LABEL (delay_insn); | |
3270 | ||
3271 | if (label == 0) | |
3272 | label = find_end_label (); | |
3273 | ||
3274 | /* Be careful how we do this to avoid deleting code or labels | |
3275 | that are momentarily dead. See similar optimization in jump.c */ | |
3276 | if (old_label) | |
3277 | ++LABEL_NUSES (old_label); | |
3278 | ||
3279 | if (invert_jump (delay_insn, label)) | |
3280 | { | |
3281 | delete_insn (next); | |
3282 | next = insn; | |
3283 | } | |
3284 | ||
3285 | if (old_label && --LABEL_NUSES (old_label) == 0) | |
3286 | delete_insn (old_label); | |
3287 | continue; | |
3288 | } | |
3289 | ||
3290 | /* If we own the thread opposite the way this insn branches, see if we | |
3291 | can merge its delay slots with following insns. */ | |
3292 | if (INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1)) | |
3293 | && own_thread_p (NEXT_INSN (insn), 0, 1)) | |
3294 | try_merge_delay_insns (insn, next); | |
3295 | else if (! INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1)) | |
3296 | && own_thread_p (target_label, target_label, 0)) | |
3297 | try_merge_delay_insns (insn, next_active_insn (target_label)); | |
3298 | ||
3299 | /* If we get here, we haven't deleted INSN. But we may have deleted | |
3300 | NEXT, so recompute it. */ | |
3301 | next = next_active_insn (insn); | |
3302 | } | |
3303 | } | |
3304 | \f | |
3305 | #ifdef HAVE_return | |
3306 | ||
3307 | /* Look for filled jumps to the end of function label. We can try to convert | |
3308 | them into RETURN insns if the insns in the delay slot are valid for the | |
3309 | RETURN as well. */ | |
3310 | ||
3311 | static void | |
3312 | make_return_insns (first) | |
3313 | rtx first; | |
3314 | { | |
3315 | rtx insn, jump_insn, pat; | |
3316 | rtx real_return_label = end_of_function_label; | |
3317 | int slots, i; | |
3318 | ||
3319 | /* See if there is a RETURN insn in the function other than the one we | |
3320 | made for END_OF_FUNCTION_LABEL. If so, set up anything we can't change | |
3321 | into a RETURN to jump to it. */ | |
3322 | for (insn = first; insn; insn = NEXT_INSN (insn)) | |
3323 | if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == RETURN) | |
3324 | { | |
3325 | real_return_label = get_label_before (insn); | |
3326 | break; | |
3327 | } | |
3328 | ||
3329 | /* Show an extra usage of REAL_RETURN_LABEL so it won't go away if it | |
3330 | was equal to END_OF_FUNCTION_LABEL. */ | |
3331 | LABEL_NUSES (real_return_label)++; | |
3332 | ||
3333 | /* Clear the list of insns to fill so we can use it. */ | |
3334 | obstack_free (&unfilled_slots_obstack, unfilled_firstobj); | |
3335 | ||
3336 | for (insn = first; insn; insn = NEXT_INSN (insn)) | |
3337 | { | |
3338 | /* Only look at filled JUMP_INSNs that go to the end of function | |
3339 | label. */ | |
3340 | if (GET_CODE (insn) != INSN | |
3341 | || GET_CODE (PATTERN (insn)) != SEQUENCE | |
3342 | || GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN | |
3343 | || JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0)) != end_of_function_label) | |
3344 | continue; | |
3345 | ||
3346 | pat = PATTERN (insn); | |
3347 | jump_insn = XVECEXP (pat, 0, 0); | |
3348 | ||
3349 | /* If we can't make the jump into a RETURN, redirect it to the best | |
3350 | RETURN and go on to the next insn. */ | |
3351 | if (! redirect_jump (jump_insn, 0)) | |
3352 | { | |
3353 | redirect_jump (jump_insn, real_return_label); | |
3354 | continue; | |
3355 | } | |
3356 | ||
3357 | /* See if this RETURN can accept the insns current in its delay slot. | |
3358 | It can if it has more or an equal number of slots and the contents | |
3359 | of each is valid. */ | |
3360 | ||
3361 | slots = num_delay_slots (jump_insn); | |
3362 | if (slots >= XVECLEN (pat, 0) - 1) | |
3363 | { | |
3364 | for (i = 1; i < XVECLEN (pat, 0); i++) | |
3365 | if (! ( | |
3366 | #ifdef ANNUL_IFFALSE_SLOTS | |
3367 | (INSN_ANNULLED_BRANCH_P (jump_insn) | |
3368 | && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i))) | |
3369 | ? eligible_for_annul_false (jump_insn, i - 1, | |
3370 | XVECEXP (pat, 0, i)) : | |
3371 | #endif | |
3372 | #ifdef ANNUL_IFTRUE_SLOTS | |
3373 | (INSN_ANNULLED_BRANCH_P (jump_insn) | |
3374 | && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i))) | |
3375 | ? eligible_for_annul_true (jump_insn, i - 1, | |
3376 | XVECEXP (pat, 0, i)) : | |
3377 | #endif | |
3378 | eligible_for_delay (jump_insn, i -1, XVECEXP (pat, 0, i)))) | |
3379 | break; | |
3380 | } | |
3381 | else | |
3382 | i = 0; | |
3383 | ||
3384 | if (i == XVECLEN (pat, 0)) | |
3385 | continue; | |
3386 | ||
3387 | /* We have to do something with this insn. If it is an unconditional | |
3388 | RETURN, delete the SEQUENCE and output the individual insns, | |
3389 | followed by the RETURN. Then set things up so we try to find | |
3390 | insns for its delay slots, if it needs some. */ | |
3391 | if (GET_CODE (PATTERN (jump_insn)) == RETURN) | |
3392 | { | |
3393 | rtx prev = PREV_INSN (insn); | |
3394 | ||
3395 | delete_insn (insn); | |
3396 | for (i = 1; i < XVECLEN (pat, 0); i++) | |
3397 | prev = emit_insn_after (PATTERN (XVECEXP (pat, 0, i)), prev); | |
3398 | ||
3399 | insn = emit_jump_insn_after (PATTERN (jump_insn), prev); | |
3400 | emit_barrier_after (insn); | |
3401 | ||
3402 | if (slots) | |
3403 | obstack_ptr_grow (&unfilled_slots_obstack, insn); | |
3404 | } | |
3405 | else | |
3406 | /* It is probably more efficient to keep this with its current | |
3407 | delay slot as a branch to a RETURN. */ | |
3408 | redirect_jump (jump_insn, real_return_label); | |
3409 | } | |
3410 | ||
3411 | /* Now delete REAL_RETURN_LABEL if we never used it. Then try to fill any | |
3412 | new delay slots we have created. */ | |
3413 | if (--LABEL_NUSES (real_return_label) == 0) | |
3414 | delete_insn (real_return_label); | |
3415 | ||
3416 | fill_simple_delay_slots (first, 1); | |
3417 | fill_simple_delay_slots (first, 0); | |
3418 | } | |
3419 | #endif | |
3420 | \f | |
3421 | /* Try to find insns to place in delay slots. */ | |
3422 | ||
3423 | void | |
3424 | dbr_schedule (first, file) | |
3425 | rtx first; | |
3426 | FILE *file; | |
3427 | { | |
3428 | rtx insn, next; | |
3429 | int i; | |
3430 | #if 0 | |
3431 | int old_flag_no_peephole = flag_no_peephole; | |
3432 | ||
3433 | /* Execute `final' once in prescan mode to delete any insns that won't be | |
3434 | used. Don't let final try to do any peephole optimization--it will | |
3435 | ruin dataflow information for this pass. */ | |
3436 | ||
3437 | flag_no_peephole = 1; | |
3438 | final (first, 0, NO_DEBUG, 1, 1); | |
3439 | flag_no_peephole = old_flag_no_peephole; | |
3440 | #endif | |
3441 | ||
3442 | /* Find the highest INSN_UID and allocate and initialize our map from | |
3443 | INSN_UID's to position in code. */ | |
3444 | for (max_uid = 0, insn = first; insn; insn = NEXT_INSN (insn)) | |
3445 | if (INSN_UID (insn) > max_uid) | |
3446 | max_uid = INSN_UID (insn); | |
3447 | ||
3448 | uid_to_ruid = (int *) alloca ((max_uid + 1) * sizeof (int *)); | |
3449 | for (i = 0, insn = first; insn; i++, insn = NEXT_INSN (insn)) | |
3450 | uid_to_ruid[INSN_UID (insn)] = i; | |
3451 | ||
3452 | /* Initialize the list of insns that need filling. */ | |
3453 | if (unfilled_firstobj == 0) | |
3454 | { | |
3455 | gcc_obstack_init (&unfilled_slots_obstack); | |
3456 | unfilled_firstobj = (rtx *) obstack_alloc (&unfilled_slots_obstack, 0); | |
3457 | } | |
3458 | ||
3459 | for (insn = next_active_insn (first); insn; insn = next_active_insn (insn)) | |
3460 | { | |
3461 | rtx target; | |
3462 | ||
3463 | INSN_ANNULLED_BRANCH_P (insn) = 0; | |
3464 | INSN_FROM_TARGET_P (insn) = 0; | |
3465 | ||
3466 | /* Skip vector tables. We can't get attributes for them. */ | |
3467 | if (GET_CODE (insn) == JUMP_INSN | |
3468 | && (GET_CODE (PATTERN (insn)) == ADDR_VEC | |
3469 | || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)) | |
3470 | continue; | |
3471 | ||
3472 | if (num_delay_slots (insn) > 0) | |
3473 | obstack_ptr_grow (&unfilled_slots_obstack, insn); | |
3474 | ||
3475 | /* Ensure all jumps go to the last of a set of consecutive labels. */ | |
3476 | if (GET_CODE (insn) == JUMP_INSN && condjump_p (insn) | |
3477 | && JUMP_LABEL (insn) != 0 | |
3478 | && ((target = prev_label (next_active_insn (JUMP_LABEL (insn)))) | |
3479 | != JUMP_LABEL (insn))) | |
3480 | redirect_jump (insn, target); | |
3481 | } | |
3482 | ||
3483 | /* Indicate what resources are required to be valid at the end of the current | |
3484 | function. The condition code never is and memory always is. If the | |
3485 | frame pointer is needed, it is and so is the stack pointer unless | |
3486 | EXIT_IGNORE_STACK is non-zero. If the frame pointer is not needed, the | |
3487 | stack pointer is. In addition, registers used to return the function | |
3488 | value are needed. */ | |
3489 | ||
3490 | end_of_function_needs.cc = 0; | |
3491 | end_of_function_needs.memory = 1; | |
3492 | CLEAR_HARD_REG_SET (end_of_function_needs.regs); | |
3493 | ||
3494 | if (frame_pointer_needed) | |
3495 | { | |
3496 | SET_HARD_REG_BIT (end_of_function_needs.regs, FRAME_POINTER_REGNUM); | |
3497 | #ifdef EXIT_IGNORE_STACK | |
3498 | if (! EXIT_IGNORE_STACK) | |
3499 | #endif | |
3500 | SET_HARD_REG_BIT (end_of_function_needs.regs, STACK_POINTER_REGNUM); | |
3501 | } | |
3502 | else | |
3503 | SET_HARD_REG_BIT (end_of_function_needs.regs, STACK_POINTER_REGNUM); | |
3504 | ||
3505 | if (current_function_return_rtx != 0 | |
3506 | && GET_CODE (current_function_return_rtx) == REG) | |
3507 | mark_referenced_resources (current_function_return_rtx, | |
3508 | &end_of_function_needs, 0); | |
3509 | ||
3510 | /* Show we haven't computed an end-of-function label yet. */ | |
3511 | end_of_function_label = 0; | |
3512 | ||
3513 | /* Allocate and initialize the tables used by mark_target_live_regs. */ | |
3514 | target_hash_table | |
3515 | = (struct target_info **) alloca ((TARGET_HASH_PRIME | |
3516 | * sizeof (struct target_info *))); | |
3517 | bzero (target_hash_table, TARGET_HASH_PRIME * sizeof (struct target_info *)); | |
3518 | ||
3519 | bb_ticks = (int *) alloca (n_basic_blocks * sizeof (int)); | |
3520 | bzero (bb_ticks, n_basic_blocks * sizeof (int)); | |
3521 | ||
3522 | /* Initialize the statistics for this function. */ | |
3523 | bzero (num_insns_needing_delays, sizeof num_insns_needing_delays); | |
3524 | bzero (num_filled_delays, sizeof num_filled_delays); | |
3525 | ||
3526 | /* Now do the delay slot filling. Try everything twice in case earlier | |
3527 | changes make more slots fillable. */ | |
3528 | ||
3529 | for (reorg_pass_number = 0; | |
3530 | reorg_pass_number < MAX_REORG_PASSES; | |
3531 | reorg_pass_number++) | |
3532 | { | |
3533 | fill_simple_delay_slots (first, 1); | |
3534 | fill_simple_delay_slots (first, 0); | |
3535 | fill_eager_delay_slots (first); | |
3536 | relax_delay_slots (first); | |
3537 | } | |
3538 | ||
3539 | /* Delete any USE insns made by update_block; subsequent passes don't need | |
3540 | them or know how to deal with them. */ | |
3541 | for (insn = first; insn; insn = next) | |
3542 | { | |
3543 | next = NEXT_INSN (insn); | |
3544 | ||
3545 | if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE | |
3546 | && (GET_CODE (XEXP (PATTERN (insn), 0)) == INSN | |
3547 | || GET_CODE (XEXP (PATTERN (insn), 0)) == JUMP_INSN | |
3548 | || GET_CODE (XEXP (PATTERN (insn), 0)) == CALL_INSN)) | |
3549 | next = delete_insn (insn); | |
3550 | } | |
3551 | ||
3552 | /* If we made an end of function label, indicate that it is now | |
3553 | safe to delete it by undoing our prior adjustment to LABEL_NUSES. | |
3554 | If it is now unused, delete it. */ | |
3555 | if (end_of_function_label && --LABEL_NUSES (end_of_function_label) == 0) | |
3556 | delete_insn (end_of_function_label); | |
3557 | ||
3558 | #ifdef HAVE_return | |
3559 | if (HAVE_return && end_of_function_label != 0) | |
3560 | make_return_insns (first); | |
3561 | #endif | |
3562 | ||
3563 | obstack_free (&unfilled_slots_obstack, unfilled_firstobj); | |
3564 | ||
3565 | /* It is not clear why the line below is needed, but it does seem to be. */ | |
3566 | unfilled_firstobj = (rtx *) obstack_alloc (&unfilled_slots_obstack, 0); | |
3567 | ||
3568 | if (file) | |
3569 | { | |
3570 | register int i, j, need_comma; | |
3571 | ||
3572 | for (reorg_pass_number = 0; | |
3573 | reorg_pass_number < MAX_REORG_PASSES; | |
3574 | reorg_pass_number++) | |
3575 | { | |
3576 | fprintf (file, ";; Reorg pass #%d:\n", reorg_pass_number + 1); | |
3577 | for (i = 0; i < NUM_REORG_FUNCTIONS; i++) | |
3578 | { | |
3579 | need_comma = 0; | |
3580 | fprintf (file, ";; Reorg function #%d\n", i); | |
3581 | ||
3582 | fprintf (file, ";; %d insns needing delay slots\n;; ", | |
3583 | num_insns_needing_delays[i][reorg_pass_number]); | |
3584 | ||
3585 | for (j = 0; j < MAX_DELAY_HISTOGRAM; j++) | |
3586 | if (num_filled_delays[i][j][reorg_pass_number]) | |
3587 | { | |
3588 | if (need_comma) | |
3589 | fprintf (file, ", "); | |
3590 | need_comma = 1; | |
3591 | fprintf (file, "%d got %d delays", | |
3592 | num_filled_delays[i][j][reorg_pass_number], j); | |
3593 | } | |
3594 | fprintf (file, "\n"); | |
3595 | } | |
3596 | } | |
3597 | } | |
3598 | } | |
3599 | #endif /* DELAY_SLOTS */ |