]> gcc.gnu.org Git - gcc.git/blob - gcc/jump.c
01df0dbba759423856d3c189215ea97ddf3193a6
[gcc.git] / gcc / jump.c
1 /* Optimize jump instructions, for GNU compiler.
2 Copyright (C) 1987, 88, 89, 91-98, 1999 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 /* This is the jump-optimization pass of the compiler.
23 It is run two or three times: once before cse, sometimes once after cse,
24 and once after reload (before final).
25
26 jump_optimize deletes unreachable code and labels that are not used.
27 It also deletes jumps that jump to the following insn,
28 and simplifies jumps around unconditional jumps and jumps
29 to unconditional jumps.
30
31 Each CODE_LABEL has a count of the times it is used
32 stored in the LABEL_NUSES internal field, and each JUMP_INSN
33 has one label that it refers to stored in the
34 JUMP_LABEL internal field. With this we can detect labels that
35 become unused because of the deletion of all the jumps that
36 formerly used them. The JUMP_LABEL info is sometimes looked
37 at by later passes.
38
39 Optionally, cross-jumping can be done. Currently it is done
40 only the last time (when after reload and before final).
41 In fact, the code for cross-jumping now assumes that register
42 allocation has been done, since it uses `rtx_renumbered_equal_p'.
43
44 Jump optimization is done after cse when cse's constant-propagation
45 causes jumps to become unconditional or to be deleted.
46
47 Unreachable loops are not detected here, because the labels
48 have references and the insns appear reachable from the labels.
49 find_basic_blocks in flow.c finds and deletes such loops.
50
51 The subroutines delete_insn, redirect_jump, and invert_jump are used
52 from other passes as well. */
53
54 #include "config.h"
55 #include "system.h"
56 #include "rtl.h"
57 #include "flags.h"
58 #include "hard-reg-set.h"
59 #include "regs.h"
60 #include "insn-config.h"
61 #include "insn-flags.h"
62 #include "insn-attr.h"
63 #include "recog.h"
64 #include "expr.h"
65 #include "real.h"
66 #include "except.h"
67 #include "toplev.h"
68
69 /* ??? Eventually must record somehow the labels used by jumps
70 from nested functions. */
71 /* Pre-record the next or previous real insn for each label?
72 No, this pass is very fast anyway. */
73 /* Condense consecutive labels?
74 This would make life analysis faster, maybe. */
75 /* Optimize jump y; x: ... y: jumpif... x?
76 Don't know if it is worth bothering with. */
77 /* Optimize two cases of conditional jump to conditional jump?
78 This can never delete any instruction or make anything dead,
79 or even change what is live at any point.
80 So perhaps let combiner do it. */
81
82 /* Vector indexed by uid.
83 For each CODE_LABEL, index by its uid to get first unconditional jump
84 that jumps to the label.
85 For each JUMP_INSN, index by its uid to get the next unconditional jump
86 that jumps to the same label.
87 Element 0 is the start of a chain of all return insns.
88 (It is safe to use element 0 because insn uid 0 is not used. */
89
90 static rtx *jump_chain;
91
92 /* List of labels referred to from initializers.
93 These can never be deleted. */
94 rtx forced_labels;
95
96 /* Maximum index in jump_chain. */
97
98 static int max_jump_chain;
99
100 /* Set nonzero by jump_optimize if control can fall through
101 to the end of the function. */
102 int can_reach_end;
103
104 /* Indicates whether death notes are significant in cross jump analysis.
105 Normally they are not significant, because of A and B jump to C,
106 and R dies in A, it must die in B. But this might not be true after
107 stack register conversion, and we must compare death notes in that
108 case. */
109
110 static int cross_jump_death_matters = 0;
111
112 static int init_label_info PROTO((rtx));
113 static void delete_barrier_successors PROTO((rtx));
114 static void mark_all_labels PROTO((rtx, int));
115 static rtx delete_unreferenced_labels PROTO((rtx));
116 static void delete_noop_moves PROTO((rtx));
117 static int calculate_can_reach_end PROTO((rtx, int, int));
118 static int duplicate_loop_exit_test PROTO((rtx));
119 static void find_cross_jump PROTO((rtx, rtx, int, rtx *, rtx *));
120 static void do_cross_jump PROTO((rtx, rtx, rtx));
121 static int jump_back_p PROTO((rtx, rtx));
122 static int tension_vector_labels PROTO((rtx, int));
123 static void mark_jump_label PROTO((rtx, rtx, int));
124 static void delete_computation PROTO((rtx));
125 static void delete_from_jump_chain PROTO((rtx));
126 static int delete_labelref_insn PROTO((rtx, rtx, int));
127 static void mark_modified_reg PROTO((rtx, rtx));
128 static void redirect_tablejump PROTO((rtx, rtx));
129 static void jump_optimize_1 PROTO ((rtx, int, int, int, int));
130 #ifndef HAVE_cc0
131 static rtx find_insert_position PROTO((rtx, rtx));
132 #endif
133
134 /* Main external entry point into the jump optimizer. See comments before
135 jump_optimize_1 for descriptions of the arguments. */
136 void
137 jump_optimize (f, cross_jump, noop_moves, after_regscan)
138 rtx f;
139 int cross_jump;
140 int noop_moves;
141 int after_regscan;
142 {
143 jump_optimize_1 (f, cross_jump, noop_moves, after_regscan, 0);
144 }
145
146 /* Alternate entry into the jump optimizer. This entry point only rebuilds
147 the JUMP_LABEL field in jumping insns and REG_LABEL notes in non-jumping
148 instructions. */
149 void
150 rebuild_jump_labels (f)
151 rtx f;
152 {
153 jump_optimize_1 (f, 0, 0, 0, 1);
154 }
155
156 \f
157 /* Delete no-op jumps and optimize jumps to jumps
158 and jumps around jumps.
159 Delete unused labels and unreachable code.
160
161 If CROSS_JUMP is 1, detect matching code
162 before a jump and its destination and unify them.
163 If CROSS_JUMP is 2, do cross-jumping, but pay attention to death notes.
164
165 If NOOP_MOVES is nonzero, delete no-op move insns.
166
167 If AFTER_REGSCAN is nonzero, then this jump pass is being run immediately
168 after regscan, and it is safe to use regno_first_uid and regno_last_uid.
169
170 If MARK_LABELS_ONLY is nonzero, then we only rebuild the jump chain
171 and JUMP_LABEL field for jumping insns.
172
173 If `optimize' is zero, don't change any code,
174 just determine whether control drops off the end of the function.
175 This case occurs when we have -W and not -O.
176 It works because `delete_insn' checks the value of `optimize'
177 and refrains from actually deleting when that is 0. */
178
179 static void
180 jump_optimize_1 (f, cross_jump, noop_moves, after_regscan, mark_labels_only)
181 rtx f;
182 int cross_jump;
183 int noop_moves;
184 int after_regscan;
185 int mark_labels_only;
186 {
187 register rtx insn, next;
188 int changed;
189 int old_max_reg;
190 int first = 1;
191 int max_uid = 0;
192 rtx last_insn;
193
194 cross_jump_death_matters = (cross_jump == 2);
195 max_uid = init_label_info (f) + 1;
196
197 /* If we are performing cross jump optimizations, then initialize
198 tables mapping UIDs to EH regions to avoid incorrect movement
199 of insns from one EH region to another. */
200 if (flag_exceptions && cross_jump)
201 init_insn_eh_region (f, max_uid);
202
203 delete_barrier_successors (f);
204
205 /* Leave some extra room for labels and duplicate exit test insns
206 we make. */
207 max_jump_chain = max_uid * 14 / 10;
208 jump_chain = (rtx *) alloca (max_jump_chain * sizeof (rtx));
209 bzero ((char *) jump_chain, max_jump_chain * sizeof (rtx));
210
211 mark_all_labels (f, cross_jump);
212
213 /* Keep track of labels used from static data;
214 they cannot ever be deleted. */
215
216 for (insn = forced_labels; insn; insn = XEXP (insn, 1))
217 LABEL_NUSES (XEXP (insn, 0))++;
218
219 check_exception_handler_labels ();
220
221 /* Keep track of labels used for marking handlers for exception
222 regions; they cannot usually be deleted. */
223
224 for (insn = exception_handler_labels; insn; insn = XEXP (insn, 1))
225 LABEL_NUSES (XEXP (insn, 0))++;
226
227 /* Quit now if we just wanted to rebuild the JUMP_LABEL and REG_LABEL
228 notes and recompute LABEL_NUSES. */
229 if (mark_labels_only)
230 return;
231
232 exception_optimize ();
233
234 last_insn = delete_unreferenced_labels (f);
235
236 if (!optimize)
237 {
238 can_reach_end = calculate_can_reach_end (last_insn, 1, 0);
239
240 /* Zero the "deleted" flag of all the "deleted" insns. */
241 for (insn = f; insn; insn = NEXT_INSN (insn))
242 INSN_DELETED_P (insn) = 0;
243
244 /* Show that the jump chain is not valid. */
245 jump_chain = 0;
246 return;
247 }
248
249 #ifdef HAVE_return
250 if (HAVE_return)
251 {
252 /* If we fall through to the epilogue, see if we can insert a RETURN insn
253 in front of it. If the machine allows it at this point (we might be
254 after reload for a leaf routine), it will improve optimization for it
255 to be there. */
256 insn = get_last_insn ();
257 while (insn && GET_CODE (insn) == NOTE)
258 insn = PREV_INSN (insn);
259
260 if (insn && GET_CODE (insn) != BARRIER)
261 {
262 emit_jump_insn (gen_return ());
263 emit_barrier ();
264 }
265 }
266 #endif
267
268 if (noop_moves)
269 delete_noop_moves (f);
270
271 /* If we haven't yet gotten to reload and we have just run regscan,
272 delete any insn that sets a register that isn't used elsewhere.
273 This helps some of the optimizations below by having less insns
274 being jumped around. */
275
276 if (! reload_completed && after_regscan)
277 for (insn = f; insn; insn = next)
278 {
279 rtx set = single_set (insn);
280
281 next = NEXT_INSN (insn);
282
283 if (set && GET_CODE (SET_DEST (set)) == REG
284 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
285 && REGNO_FIRST_UID (REGNO (SET_DEST (set))) == INSN_UID (insn)
286 /* We use regno_last_note_uid so as not to delete the setting
287 of a reg that's used in notes. A subsequent optimization
288 might arrange to use that reg for real. */
289 && REGNO_LAST_NOTE_UID (REGNO (SET_DEST (set))) == INSN_UID (insn)
290 && ! side_effects_p (SET_SRC (set))
291 && ! find_reg_note (insn, REG_RETVAL, 0))
292 delete_insn (insn);
293 }
294
295 /* Now iterate optimizing jumps until nothing changes over one pass. */
296 changed = 1;
297 old_max_reg = max_reg_num ();
298 while (changed)
299 {
300 changed = 0;
301
302 for (insn = f; insn; insn = next)
303 {
304 rtx reallabelprev;
305 rtx temp, temp1, temp2, temp3, temp4, temp5, temp6;
306 rtx nlabel;
307 int this_is_simplejump, this_is_condjump, reversep = 0;
308 int this_is_condjump_in_parallel;
309
310 #if 0
311 /* If NOT the first iteration, if this is the last jump pass
312 (just before final), do the special peephole optimizations.
313 Avoiding the first iteration gives ordinary jump opts
314 a chance to work before peephole opts. */
315
316 if (reload_completed && !first && !flag_no_peephole)
317 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
318 peephole (insn);
319 #endif
320
321 /* That could have deleted some insns after INSN, so check now
322 what the following insn is. */
323
324 next = NEXT_INSN (insn);
325
326 /* See if this is a NOTE_INSN_LOOP_BEG followed by an unconditional
327 jump. Try to optimize by duplicating the loop exit test if so.
328 This is only safe immediately after regscan, because it uses
329 the values of regno_first_uid and regno_last_uid. */
330 if (after_regscan && GET_CODE (insn) == NOTE
331 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
332 && (temp1 = next_nonnote_insn (insn)) != 0
333 && simplejump_p (temp1))
334 {
335 temp = PREV_INSN (insn);
336 if (duplicate_loop_exit_test (insn))
337 {
338 changed = 1;
339 next = NEXT_INSN (temp);
340 continue;
341 }
342 }
343
344 if (GET_CODE (insn) != JUMP_INSN)
345 continue;
346
347 this_is_simplejump = simplejump_p (insn);
348 this_is_condjump = condjump_p (insn);
349 this_is_condjump_in_parallel = condjump_in_parallel_p (insn);
350
351 /* Tension the labels in dispatch tables. */
352
353 if (GET_CODE (PATTERN (insn)) == ADDR_VEC)
354 changed |= tension_vector_labels (PATTERN (insn), 0);
355 if (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
356 changed |= tension_vector_labels (PATTERN (insn), 1);
357
358 /* If a dispatch table always goes to the same place,
359 get rid of it and replace the insn that uses it. */
360
361 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
362 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
363 {
364 int i;
365 rtx pat = PATTERN (insn);
366 int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC;
367 int len = XVECLEN (pat, diff_vec_p);
368 rtx dispatch = prev_real_insn (insn);
369
370 for (i = 0; i < len; i++)
371 if (XEXP (XVECEXP (pat, diff_vec_p, i), 0)
372 != XEXP (XVECEXP (pat, diff_vec_p, 0), 0))
373 break;
374 if (i == len
375 && dispatch != 0
376 && GET_CODE (dispatch) == JUMP_INSN
377 && JUMP_LABEL (dispatch) != 0
378 /* Don't mess with a casesi insn. */
379 && !(GET_CODE (PATTERN (dispatch)) == SET
380 && (GET_CODE (SET_SRC (PATTERN (dispatch)))
381 == IF_THEN_ELSE))
382 && next_real_insn (JUMP_LABEL (dispatch)) == insn)
383 {
384 redirect_tablejump (dispatch,
385 XEXP (XVECEXP (pat, diff_vec_p, 0), 0));
386 changed = 1;
387 }
388 }
389
390 reallabelprev = prev_active_insn (JUMP_LABEL (insn));
391
392 /* If a jump references the end of the function, try to turn
393 it into a RETURN insn, possibly a conditional one. */
394 if (JUMP_LABEL (insn)
395 && (next_active_insn (JUMP_LABEL (insn)) == 0
396 || GET_CODE (PATTERN (next_active_insn (JUMP_LABEL (insn))))
397 == RETURN))
398 changed |= redirect_jump (insn, NULL_RTX);
399
400 /* Detect jump to following insn. */
401 if (reallabelprev == insn && condjump_p (insn))
402 {
403 next = next_real_insn (JUMP_LABEL (insn));
404 delete_jump (insn);
405 changed = 1;
406 continue;
407 }
408
409 /* If we have an unconditional jump preceded by a USE, try to put
410 the USE before the target and jump there. This simplifies many
411 of the optimizations below since we don't have to worry about
412 dealing with these USE insns. We only do this if the label
413 being branch to already has the identical USE or if code
414 never falls through to that label. */
415
416 if (this_is_simplejump
417 && (temp = prev_nonnote_insn (insn)) != 0
418 && GET_CODE (temp) == INSN && GET_CODE (PATTERN (temp)) == USE
419 && (temp1 = prev_nonnote_insn (JUMP_LABEL (insn))) != 0
420 && (GET_CODE (temp1) == BARRIER
421 || (GET_CODE (temp1) == INSN
422 && rtx_equal_p (PATTERN (temp), PATTERN (temp1))))
423 /* Don't do this optimization if we have a loop containing only
424 the USE instruction, and the loop start label has a usage
425 count of 1. This is because we will redo this optimization
426 everytime through the outer loop, and jump opt will never
427 exit. */
428 && ! ((temp2 = prev_nonnote_insn (temp)) != 0
429 && temp2 == JUMP_LABEL (insn)
430 && LABEL_NUSES (temp2) == 1))
431 {
432 if (GET_CODE (temp1) == BARRIER)
433 {
434 emit_insn_after (PATTERN (temp), temp1);
435 temp1 = NEXT_INSN (temp1);
436 }
437
438 delete_insn (temp);
439 redirect_jump (insn, get_label_before (temp1));
440 reallabelprev = prev_real_insn (temp1);
441 changed = 1;
442 }
443
444 /* Simplify if (...) x = a; else x = b; by converting it
445 to x = b; if (...) x = a;
446 if B is sufficiently simple, the test doesn't involve X,
447 and nothing in the test modifies B or X.
448
449 If we have small register classes, we also can't do this if X
450 is a hard register.
451
452 If the "x = b;" insn has any REG_NOTES, we don't do this because
453 of the possibility that we are running after CSE and there is a
454 REG_EQUAL note that is only valid if the branch has already been
455 taken. If we move the insn with the REG_EQUAL note, we may
456 fold the comparison to always be false in a later CSE pass.
457 (We could also delete the REG_NOTES when moving the insn, but it
458 seems simpler to not move it.) An exception is that we can move
459 the insn if the only note is a REG_EQUAL or REG_EQUIV whose
460 value is the same as "b".
461
462 INSN is the branch over the `else' part.
463
464 We set:
465
466 TEMP to the jump insn preceding "x = a;"
467 TEMP1 to X
468 TEMP2 to the insn that sets "x = b;"
469 TEMP3 to the insn that sets "x = a;"
470 TEMP4 to the set of "x = b"; */
471
472 if (this_is_simplejump
473 && (temp3 = prev_active_insn (insn)) != 0
474 && GET_CODE (temp3) == INSN
475 && (temp4 = single_set (temp3)) != 0
476 && GET_CODE (temp1 = SET_DEST (temp4)) == REG
477 && (! SMALL_REGISTER_CLASSES
478 || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
479 && (temp2 = next_active_insn (insn)) != 0
480 && GET_CODE (temp2) == INSN
481 && (temp4 = single_set (temp2)) != 0
482 && rtx_equal_p (SET_DEST (temp4), temp1)
483 && ! side_effects_p (SET_SRC (temp4))
484 && ! may_trap_p (SET_SRC (temp4))
485 && (REG_NOTES (temp2) == 0
486 || ((REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUAL
487 || REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUIV)
488 && XEXP (REG_NOTES (temp2), 1) == 0
489 && rtx_equal_p (XEXP (REG_NOTES (temp2), 0),
490 SET_SRC (temp4))))
491 && (temp = prev_active_insn (temp3)) != 0
492 && condjump_p (temp) && ! simplejump_p (temp)
493 /* TEMP must skip over the "x = a;" insn */
494 && prev_real_insn (JUMP_LABEL (temp)) == insn
495 && no_labels_between_p (insn, JUMP_LABEL (temp))
496 /* There must be no other entries to the "x = b;" insn. */
497 && no_labels_between_p (JUMP_LABEL (temp), temp2)
498 /* INSN must either branch to the insn after TEMP2 or the insn
499 after TEMP2 must branch to the same place as INSN. */
500 && (reallabelprev == temp2
501 || ((temp5 = next_active_insn (temp2)) != 0
502 && simplejump_p (temp5)
503 && JUMP_LABEL (temp5) == JUMP_LABEL (insn))))
504 {
505 /* The test expression, X, may be a complicated test with
506 multiple branches. See if we can find all the uses of
507 the label that TEMP branches to without hitting a CALL_INSN
508 or a jump to somewhere else. */
509 rtx target = JUMP_LABEL (temp);
510 int nuses = LABEL_NUSES (target);
511 rtx p;
512 #ifdef HAVE_cc0
513 rtx q;
514 #endif
515
516 /* Set P to the first jump insn that goes around "x = a;". */
517 for (p = temp; nuses && p; p = prev_nonnote_insn (p))
518 {
519 if (GET_CODE (p) == JUMP_INSN)
520 {
521 if (condjump_p (p) && ! simplejump_p (p)
522 && JUMP_LABEL (p) == target)
523 {
524 nuses--;
525 if (nuses == 0)
526 break;
527 }
528 else
529 break;
530 }
531 else if (GET_CODE (p) == CALL_INSN)
532 break;
533 }
534
535 #ifdef HAVE_cc0
536 /* We cannot insert anything between a set of cc and its use
537 so if P uses cc0, we must back up to the previous insn. */
538 q = prev_nonnote_insn (p);
539 if (q && GET_RTX_CLASS (GET_CODE (q)) == 'i'
540 && sets_cc0_p (PATTERN (q)))
541 p = q;
542 #endif
543
544 if (p)
545 p = PREV_INSN (p);
546
547 /* If we found all the uses and there was no data conflict, we
548 can move the assignment unless we can branch into the middle
549 from somewhere. */
550 if (nuses == 0 && p
551 && no_labels_between_p (p, insn)
552 && ! reg_referenced_between_p (temp1, p, NEXT_INSN (temp3))
553 && ! reg_set_between_p (temp1, p, temp3)
554 && (GET_CODE (SET_SRC (temp4)) == CONST_INT
555 || ! modified_between_p (SET_SRC (temp4), p, temp2))
556 /* Verify that registers used by the jump are not clobbered
557 by the instruction being moved. */
558 && ! regs_set_between_p (PATTERN (temp),
559 PREV_INSN (temp2),
560 NEXT_INSN (temp2)))
561 {
562 emit_insn_after_with_line_notes (PATTERN (temp2), p, temp2);
563 delete_insn (temp2);
564
565 /* Set NEXT to an insn that we know won't go away. */
566 next = next_active_insn (insn);
567
568 /* Delete the jump around the set. Note that we must do
569 this before we redirect the test jumps so that it won't
570 delete the code immediately following the assignment
571 we moved (which might be a jump). */
572
573 delete_insn (insn);
574
575 /* We either have two consecutive labels or a jump to
576 a jump, so adjust all the JUMP_INSNs to branch to where
577 INSN branches to. */
578 for (p = NEXT_INSN (p); p != next; p = NEXT_INSN (p))
579 if (GET_CODE (p) == JUMP_INSN)
580 redirect_jump (p, target);
581
582 changed = 1;
583 continue;
584 }
585 }
586
587 /* Simplify if (...) { x = a; goto l; } x = b; by converting it
588 to x = a; if (...) goto l; x = b;
589 if A is sufficiently simple, the test doesn't involve X,
590 and nothing in the test modifies A or X.
591
592 If we have small register classes, we also can't do this if X
593 is a hard register.
594
595 If the "x = a;" insn has any REG_NOTES, we don't do this because
596 of the possibility that we are running after CSE and there is a
597 REG_EQUAL note that is only valid if the branch has already been
598 taken. If we move the insn with the REG_EQUAL note, we may
599 fold the comparison to always be false in a later CSE pass.
600 (We could also delete the REG_NOTES when moving the insn, but it
601 seems simpler to not move it.) An exception is that we can move
602 the insn if the only note is a REG_EQUAL or REG_EQUIV whose
603 value is the same as "a".
604
605 INSN is the goto.
606
607 We set:
608
609 TEMP to the jump insn preceding "x = a;"
610 TEMP1 to X
611 TEMP2 to the insn that sets "x = b;"
612 TEMP3 to the insn that sets "x = a;"
613 TEMP4 to the set of "x = a"; */
614
615 if (this_is_simplejump
616 && (temp2 = next_active_insn (insn)) != 0
617 && GET_CODE (temp2) == INSN
618 && (temp4 = single_set (temp2)) != 0
619 && GET_CODE (temp1 = SET_DEST (temp4)) == REG
620 && (! SMALL_REGISTER_CLASSES
621 || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
622 && (temp3 = prev_active_insn (insn)) != 0
623 && GET_CODE (temp3) == INSN
624 && (temp4 = single_set (temp3)) != 0
625 && rtx_equal_p (SET_DEST (temp4), temp1)
626 && ! side_effects_p (SET_SRC (temp4))
627 && ! may_trap_p (SET_SRC (temp4))
628 && (REG_NOTES (temp3) == 0
629 || ((REG_NOTE_KIND (REG_NOTES (temp3)) == REG_EQUAL
630 || REG_NOTE_KIND (REG_NOTES (temp3)) == REG_EQUIV)
631 && XEXP (REG_NOTES (temp3), 1) == 0
632 && rtx_equal_p (XEXP (REG_NOTES (temp3), 0),
633 SET_SRC (temp4))))
634 && (temp = prev_active_insn (temp3)) != 0
635 && condjump_p (temp) && ! simplejump_p (temp)
636 /* TEMP must skip over the "x = a;" insn */
637 && prev_real_insn (JUMP_LABEL (temp)) == insn
638 && no_labels_between_p (temp, insn))
639 {
640 rtx prev_label = JUMP_LABEL (temp);
641 rtx insert_after = prev_nonnote_insn (temp);
642
643 #ifdef HAVE_cc0
644 /* We cannot insert anything between a set of cc and its use. */
645 if (insert_after && GET_RTX_CLASS (GET_CODE (insert_after)) == 'i'
646 && sets_cc0_p (PATTERN (insert_after)))
647 insert_after = prev_nonnote_insn (insert_after);
648 #endif
649 ++LABEL_NUSES (prev_label);
650
651 if (insert_after
652 && no_labels_between_p (insert_after, temp)
653 && ! reg_referenced_between_p (temp1, insert_after, temp3)
654 && ! reg_referenced_between_p (temp1, temp3,
655 NEXT_INSN (temp2))
656 && ! reg_set_between_p (temp1, insert_after, temp)
657 && ! modified_between_p (SET_SRC (temp4), insert_after, temp)
658 /* Verify that registers used by the jump are not clobbered
659 by the instruction being moved. */
660 && ! regs_set_between_p (PATTERN (temp),
661 PREV_INSN (temp3),
662 NEXT_INSN (temp3))
663 && invert_jump (temp, JUMP_LABEL (insn)))
664 {
665 emit_insn_after_with_line_notes (PATTERN (temp3),
666 insert_after, temp3);
667 delete_insn (temp3);
668 delete_insn (insn);
669 /* Set NEXT to an insn that we know won't go away. */
670 next = temp2;
671 changed = 1;
672 }
673 if (prev_label && --LABEL_NUSES (prev_label) == 0)
674 delete_insn (prev_label);
675 if (changed)
676 continue;
677 }
678
679 #ifndef HAVE_cc0
680 /* If we have if (...) x = exp; and branches are expensive,
681 EXP is a single insn, does not have any side effects, cannot
682 trap, and is not too costly, convert this to
683 t = exp; if (...) x = t;
684
685 Don't do this when we have CC0 because it is unlikely to help
686 and we'd need to worry about where to place the new insn and
687 the potential for conflicts. We also can't do this when we have
688 notes on the insn for the same reason as above.
689
690 We set:
691
692 TEMP to the "x = exp;" insn.
693 TEMP1 to the single set in the "x = exp;" insn.
694 TEMP2 to "x". */
695
696 if (! reload_completed
697 && this_is_condjump && ! this_is_simplejump
698 && BRANCH_COST >= 3
699 && (temp = next_nonnote_insn (insn)) != 0
700 && GET_CODE (temp) == INSN
701 && REG_NOTES (temp) == 0
702 && (reallabelprev == temp
703 || ((temp2 = next_active_insn (temp)) != 0
704 && simplejump_p (temp2)
705 && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
706 && (temp1 = single_set (temp)) != 0
707 && (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG)
708 && (! SMALL_REGISTER_CLASSES
709 || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
710 && GET_CODE (SET_SRC (temp1)) != REG
711 && GET_CODE (SET_SRC (temp1)) != SUBREG
712 && GET_CODE (SET_SRC (temp1)) != CONST_INT
713 && ! side_effects_p (SET_SRC (temp1))
714 && ! may_trap_p (SET_SRC (temp1))
715 && rtx_cost (SET_SRC (temp1), SET) < 10)
716 {
717 rtx new = gen_reg_rtx (GET_MODE (temp2));
718
719 if ((temp3 = find_insert_position (insn, temp))
720 && validate_change (temp, &SET_DEST (temp1), new, 0))
721 {
722 next = emit_insn_after (gen_move_insn (temp2, new), insn);
723 emit_insn_after_with_line_notes (PATTERN (temp),
724 PREV_INSN (temp3), temp);
725 delete_insn (temp);
726 reallabelprev = prev_active_insn (JUMP_LABEL (insn));
727
728 if (after_regscan)
729 {
730 reg_scan_update (temp3, NEXT_INSN (next), old_max_reg);
731 old_max_reg = max_reg_num ();
732 }
733 }
734 }
735
736 /* Similarly, if it takes two insns to compute EXP but they
737 have the same destination. Here TEMP3 will be the second
738 insn and TEMP4 the SET from that insn. */
739
740 if (! reload_completed
741 && this_is_condjump && ! this_is_simplejump
742 && BRANCH_COST >= 4
743 && (temp = next_nonnote_insn (insn)) != 0
744 && GET_CODE (temp) == INSN
745 && REG_NOTES (temp) == 0
746 && (temp3 = next_nonnote_insn (temp)) != 0
747 && GET_CODE (temp3) == INSN
748 && REG_NOTES (temp3) == 0
749 && (reallabelprev == temp3
750 || ((temp2 = next_active_insn (temp3)) != 0
751 && simplejump_p (temp2)
752 && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
753 && (temp1 = single_set (temp)) != 0
754 && (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG)
755 && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
756 && (! SMALL_REGISTER_CLASSES
757 || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
758 && ! side_effects_p (SET_SRC (temp1))
759 && ! may_trap_p (SET_SRC (temp1))
760 && rtx_cost (SET_SRC (temp1), SET) < 10
761 && (temp4 = single_set (temp3)) != 0
762 && rtx_equal_p (SET_DEST (temp4), temp2)
763 && ! side_effects_p (SET_SRC (temp4))
764 && ! may_trap_p (SET_SRC (temp4))
765 && rtx_cost (SET_SRC (temp4), SET) < 10)
766 {
767 rtx new = gen_reg_rtx (GET_MODE (temp2));
768
769 if ((temp5 = find_insert_position (insn, temp))
770 && (temp6 = find_insert_position (insn, temp3))
771 && validate_change (temp, &SET_DEST (temp1), new, 0))
772 {
773 /* Use the earliest of temp5 and temp6. */
774 if (temp5 != insn)
775 temp6 = temp5;
776 next = emit_insn_after (gen_move_insn (temp2, new), insn);
777 emit_insn_after_with_line_notes (PATTERN (temp),
778 PREV_INSN (temp6), temp);
779 emit_insn_after_with_line_notes
780 (replace_rtx (PATTERN (temp3), temp2, new),
781 PREV_INSN (temp6), temp3);
782 delete_insn (temp);
783 delete_insn (temp3);
784 reallabelprev = prev_active_insn (JUMP_LABEL (insn));
785
786 if (after_regscan)
787 {
788 reg_scan_update (temp6, NEXT_INSN (next), old_max_reg);
789 old_max_reg = max_reg_num ();
790 }
791 }
792 }
793
794 /* Finally, handle the case where two insns are used to
795 compute EXP but a temporary register is used. Here we must
796 ensure that the temporary register is not used anywhere else. */
797
798 if (! reload_completed
799 && after_regscan
800 && this_is_condjump && ! this_is_simplejump
801 && BRANCH_COST >= 4
802 && (temp = next_nonnote_insn (insn)) != 0
803 && GET_CODE (temp) == INSN
804 && REG_NOTES (temp) == 0
805 && (temp3 = next_nonnote_insn (temp)) != 0
806 && GET_CODE (temp3) == INSN
807 && REG_NOTES (temp3) == 0
808 && (reallabelprev == temp3
809 || ((temp2 = next_active_insn (temp3)) != 0
810 && simplejump_p (temp2)
811 && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
812 && (temp1 = single_set (temp)) != 0
813 && (temp5 = SET_DEST (temp1),
814 (GET_CODE (temp5) == REG
815 || (GET_CODE (temp5) == SUBREG
816 && (temp5 = SUBREG_REG (temp5),
817 GET_CODE (temp5) == REG))))
818 && REGNO (temp5) >= FIRST_PSEUDO_REGISTER
819 && REGNO_FIRST_UID (REGNO (temp5)) == INSN_UID (temp)
820 && REGNO_LAST_UID (REGNO (temp5)) == INSN_UID (temp3)
821 && ! side_effects_p (SET_SRC (temp1))
822 && ! may_trap_p (SET_SRC (temp1))
823 && rtx_cost (SET_SRC (temp1), SET) < 10
824 && (temp4 = single_set (temp3)) != 0
825 && (temp2 = SET_DEST (temp4), GET_CODE (temp2) == REG)
826 && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
827 && (! SMALL_REGISTER_CLASSES
828 || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
829 && rtx_equal_p (SET_DEST (temp4), temp2)
830 && ! side_effects_p (SET_SRC (temp4))
831 && ! may_trap_p (SET_SRC (temp4))
832 && rtx_cost (SET_SRC (temp4), SET) < 10)
833 {
834 rtx new = gen_reg_rtx (GET_MODE (temp2));
835
836 if ((temp5 = find_insert_position (insn, temp))
837 && (temp6 = find_insert_position (insn, temp3))
838 && validate_change (temp3, &SET_DEST (temp4), new, 0))
839 {
840 /* Use the earliest of temp5 and temp6. */
841 if (temp5 != insn)
842 temp6 = temp5;
843 next = emit_insn_after (gen_move_insn (temp2, new), insn);
844 emit_insn_after_with_line_notes (PATTERN (temp),
845 PREV_INSN (temp6), temp);
846 emit_insn_after_with_line_notes (PATTERN (temp3),
847 PREV_INSN (temp6), temp3);
848 delete_insn (temp);
849 delete_insn (temp3);
850 reallabelprev = prev_active_insn (JUMP_LABEL (insn));
851
852 if (after_regscan)
853 {
854 reg_scan_update (temp6, NEXT_INSN (next), old_max_reg);
855 old_max_reg = max_reg_num ();
856 }
857 }
858 }
859 #endif /* HAVE_cc0 */
860
861 /* Try to use a conditional move (if the target has them), or a
862 store-flag insn. The general case is:
863
864 1) x = a; if (...) x = b; and
865 2) if (...) x = b;
866
867 If the jump would be faster, the machine should not have defined
868 the movcc or scc insns!. These cases are often made by the
869 previous optimization.
870
871 The second case is treated as x = x; if (...) x = b;.
872
873 INSN here is the jump around the store. We set:
874
875 TEMP to the "x = b;" insn.
876 TEMP1 to X.
877 TEMP2 to B.
878 TEMP3 to A (X in the second case).
879 TEMP4 to the condition being tested.
880 TEMP5 to the earliest insn used to find the condition. */
881
882 if (/* We can't do this after reload has completed. */
883 ! reload_completed
884 && this_is_condjump && ! this_is_simplejump
885 /* Set TEMP to the "x = b;" insn. */
886 && (temp = next_nonnote_insn (insn)) != 0
887 && GET_CODE (temp) == INSN
888 && GET_CODE (PATTERN (temp)) == SET
889 && GET_CODE (temp1 = SET_DEST (PATTERN (temp))) == REG
890 && (! SMALL_REGISTER_CLASSES
891 || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
892 && ! side_effects_p (temp2 = SET_SRC (PATTERN (temp)))
893 && ! may_trap_p (temp2)
894 /* Allow either form, but prefer the former if both apply.
895 There is no point in using the old value of TEMP1 if
896 it is a register, since cse will alias them. It can
897 lose if the old value were a hard register since CSE
898 won't replace hard registers. Avoid using TEMP3 if
899 small register classes and it is a hard register. */
900 && (((temp3 = reg_set_last (temp1, insn)) != 0
901 && ! (SMALL_REGISTER_CLASSES && GET_CODE (temp3) == REG
902 && REGNO (temp3) < FIRST_PSEUDO_REGISTER))
903 /* Make the latter case look like x = x; if (...) x = b; */
904 || (temp3 = temp1, 1))
905 /* INSN must either branch to the insn after TEMP or the insn
906 after TEMP must branch to the same place as INSN. */
907 && (reallabelprev == temp
908 || ((temp4 = next_active_insn (temp)) != 0
909 && simplejump_p (temp4)
910 && JUMP_LABEL (temp4) == JUMP_LABEL (insn)))
911 && (temp4 = get_condition (insn, &temp5)) != 0
912 /* We must be comparing objects whose modes imply the size.
913 We could handle BLKmode if (1) emit_store_flag could
914 and (2) we could find the size reliably. */
915 && GET_MODE (XEXP (temp4, 0)) != BLKmode
916 /* Even if branches are cheap, the store_flag optimization
917 can win when the operation to be performed can be
918 expressed directly. */
919 #ifdef HAVE_cc0
920 /* If the previous insn sets CC0 and something else, we can't
921 do this since we are going to delete that insn. */
922
923 && ! ((temp6 = prev_nonnote_insn (insn)) != 0
924 && GET_CODE (temp6) == INSN
925 && (sets_cc0_p (PATTERN (temp6)) == -1
926 || (sets_cc0_p (PATTERN (temp6)) == 1
927 && FIND_REG_INC_NOTE (temp6, NULL_RTX))))
928 #endif
929 )
930 {
931 #ifdef HAVE_conditional_move
932 /* First try a conditional move. */
933 {
934 enum rtx_code code = GET_CODE (temp4);
935 rtx var = temp1;
936 rtx cond0, cond1, aval, bval;
937 rtx target;
938
939 /* Copy the compared variables into cond0 and cond1, so that
940 any side effects performed in or after the old comparison,
941 will not affect our compare which will come later. */
942 /* ??? Is it possible to just use the comparison in the jump
943 insn? After all, we're going to delete it. We'd have
944 to modify emit_conditional_move to take a comparison rtx
945 instead or write a new function. */
946 cond0 = gen_reg_rtx (GET_MODE (XEXP (temp4, 0)));
947 /* We want the target to be able to simplify comparisons with
948 zero (and maybe other constants as well), so don't create
949 pseudos for them. There's no need to either. */
950 if (GET_CODE (XEXP (temp4, 1)) == CONST_INT
951 || GET_CODE (XEXP (temp4, 1)) == CONST_DOUBLE)
952 cond1 = XEXP (temp4, 1);
953 else
954 cond1 = gen_reg_rtx (GET_MODE (XEXP (temp4, 1)));
955
956 aval = temp3;
957 bval = temp2;
958
959 start_sequence ();
960 target = emit_conditional_move (var, code,
961 cond0, cond1, VOIDmode,
962 aval, bval, GET_MODE (var),
963 (code == LTU || code == GEU
964 || code == LEU || code == GTU));
965
966 if (target)
967 {
968 rtx seq1,seq2,last;
969
970 /* Save the conditional move sequence but don't emit it
971 yet. On some machines, like the alpha, it is possible
972 that temp5 == insn, so next generate the sequence that
973 saves the compared values and then emit both
974 sequences ensuring seq1 occurs before seq2. */
975 seq2 = get_insns ();
976 end_sequence ();
977
978 /* Now that we can't fail, generate the copy insns that
979 preserve the compared values. */
980 start_sequence ();
981 emit_move_insn (cond0, XEXP (temp4, 0));
982 if (cond1 != XEXP (temp4, 1))
983 emit_move_insn (cond1, XEXP (temp4, 1));
984 seq1 = get_insns ();
985 end_sequence ();
986
987 emit_insns_before (seq1, temp5);
988 /* Insert conditional move after insn, to be sure that
989 the jump and a possible compare won't be separated */
990 last = emit_insns_after (seq2, insn);
991
992 /* ??? We can also delete the insn that sets X to A.
993 Flow will do it too though. */
994 delete_insn (temp);
995 next = NEXT_INSN (insn);
996 delete_jump (insn);
997
998 if (after_regscan)
999 {
1000 reg_scan_update (seq1, NEXT_INSN (last), old_max_reg);
1001 old_max_reg = max_reg_num ();
1002 }
1003
1004 changed = 1;
1005 continue;
1006 }
1007 else
1008 end_sequence ();
1009 }
1010 #endif
1011
1012 /* That didn't work, try a store-flag insn.
1013
1014 We further divide the cases into:
1015
1016 1) x = a; if (...) x = b; and either A or B is zero,
1017 2) if (...) x = 0; and jumps are expensive,
1018 3) x = a; if (...) x = b; and A and B are constants where all
1019 the set bits in A are also set in B and jumps are expensive,
1020 4) x = a; if (...) x = b; and A and B non-zero, and jumps are
1021 more expensive, and
1022 5) if (...) x = b; if jumps are even more expensive. */
1023
1024 if (GET_MODE_CLASS (GET_MODE (temp1)) == MODE_INT
1025 && ((GET_CODE (temp3) == CONST_INT)
1026 /* Make the latter case look like
1027 x = x; if (...) x = 0; */
1028 || (temp3 = temp1,
1029 ((BRANCH_COST >= 2
1030 && temp2 == const0_rtx)
1031 || BRANCH_COST >= 3)))
1032 /* If B is zero, OK; if A is zero, can only do (1) if we
1033 can reverse the condition. See if (3) applies possibly
1034 by reversing the condition. Prefer reversing to (4) when
1035 branches are very expensive. */
1036 && (((BRANCH_COST >= 2
1037 || STORE_FLAG_VALUE == -1
1038 || (STORE_FLAG_VALUE == 1
1039 /* Check that the mask is a power of two,
1040 so that it can probably be generated
1041 with a shift. */
1042 && GET_CODE (temp3) == CONST_INT
1043 && exact_log2 (INTVAL (temp3)) >= 0))
1044 && (reversep = 0, temp2 == const0_rtx))
1045 || ((BRANCH_COST >= 2
1046 || STORE_FLAG_VALUE == -1
1047 || (STORE_FLAG_VALUE == 1
1048 && GET_CODE (temp2) == CONST_INT
1049 && exact_log2 (INTVAL (temp2)) >= 0))
1050 && temp3 == const0_rtx
1051 && (reversep = can_reverse_comparison_p (temp4, insn)))
1052 || (BRANCH_COST >= 2
1053 && GET_CODE (temp2) == CONST_INT
1054 && GET_CODE (temp3) == CONST_INT
1055 && ((INTVAL (temp2) & INTVAL (temp3)) == INTVAL (temp2)
1056 || ((INTVAL (temp2) & INTVAL (temp3)) == INTVAL (temp3)
1057 && (reversep = can_reverse_comparison_p (temp4,
1058 insn)))))
1059 || BRANCH_COST >= 3)
1060 )
1061 {
1062 enum rtx_code code = GET_CODE (temp4);
1063 rtx uval, cval, var = temp1;
1064 int normalizep;
1065 rtx target;
1066
1067 /* If necessary, reverse the condition. */
1068 if (reversep)
1069 code = reverse_condition (code), uval = temp2, cval = temp3;
1070 else
1071 uval = temp3, cval = temp2;
1072
1073 /* If CVAL is non-zero, normalize to -1. Otherwise, if UVAL
1074 is the constant 1, it is best to just compute the result
1075 directly. If UVAL is constant and STORE_FLAG_VALUE
1076 includes all of its bits, it is best to compute the flag
1077 value unnormalized and `and' it with UVAL. Otherwise,
1078 normalize to -1 and `and' with UVAL. */
1079 normalizep = (cval != const0_rtx ? -1
1080 : (uval == const1_rtx ? 1
1081 : (GET_CODE (uval) == CONST_INT
1082 && (INTVAL (uval) & ~STORE_FLAG_VALUE) == 0)
1083 ? 0 : -1));
1084
1085 /* We will be putting the store-flag insn immediately in
1086 front of the comparison that was originally being done,
1087 so we know all the variables in TEMP4 will be valid.
1088 However, this might be in front of the assignment of
1089 A to VAR. If it is, it would clobber the store-flag
1090 we will be emitting.
1091
1092 Therefore, emit into a temporary which will be copied to
1093 VAR immediately after TEMP. */
1094
1095 start_sequence ();
1096 target = emit_store_flag (gen_reg_rtx (GET_MODE (var)), code,
1097 XEXP (temp4, 0), XEXP (temp4, 1),
1098 VOIDmode,
1099 (code == LTU || code == LEU
1100 || code == GEU || code == GTU),
1101 normalizep);
1102 if (target)
1103 {
1104 rtx seq;
1105 rtx before = insn;
1106
1107 seq = get_insns ();
1108 end_sequence ();
1109
1110 /* Put the store-flag insns in front of the first insn
1111 used to compute the condition to ensure that we
1112 use the same values of them as the current
1113 comparison. However, the remainder of the insns we
1114 generate will be placed directly in front of the
1115 jump insn, in case any of the pseudos we use
1116 are modified earlier. */
1117
1118 emit_insns_before (seq, temp5);
1119
1120 start_sequence ();
1121
1122 /* Both CVAL and UVAL are non-zero. */
1123 if (cval != const0_rtx && uval != const0_rtx)
1124 {
1125 rtx tem1, tem2;
1126
1127 tem1 = expand_and (uval, target, NULL_RTX);
1128 if (GET_CODE (cval) == CONST_INT
1129 && GET_CODE (uval) == CONST_INT
1130 && (INTVAL (cval) & INTVAL (uval)) == INTVAL (cval))
1131 tem2 = cval;
1132 else
1133 {
1134 tem2 = expand_unop (GET_MODE (var), one_cmpl_optab,
1135 target, NULL_RTX, 0);
1136 tem2 = expand_and (cval, tem2,
1137 (GET_CODE (tem2) == REG
1138 ? tem2 : 0));
1139 }
1140
1141 /* If we usually make new pseudos, do so here. This
1142 turns out to help machines that have conditional
1143 move insns. */
1144 /* ??? Conditional moves have already been handled.
1145 This may be obsolete. */
1146
1147 if (flag_expensive_optimizations)
1148 target = 0;
1149
1150 target = expand_binop (GET_MODE (var), ior_optab,
1151 tem1, tem2, target,
1152 1, OPTAB_WIDEN);
1153 }
1154 else if (normalizep != 1)
1155 {
1156 /* We know that either CVAL or UVAL is zero. If
1157 UVAL is zero, negate TARGET and `and' with CVAL.
1158 Otherwise, `and' with UVAL. */
1159 if (uval == const0_rtx)
1160 {
1161 target = expand_unop (GET_MODE (var), one_cmpl_optab,
1162 target, NULL_RTX, 0);
1163 uval = cval;
1164 }
1165
1166 target = expand_and (uval, target,
1167 (GET_CODE (target) == REG
1168 && ! preserve_subexpressions_p ()
1169 ? target : NULL_RTX));
1170 }
1171
1172 emit_move_insn (var, target);
1173 seq = get_insns ();
1174 end_sequence ();
1175 #ifdef HAVE_cc0
1176 /* If INSN uses CC0, we must not separate it from the
1177 insn that sets cc0. */
1178 if (reg_mentioned_p (cc0_rtx, PATTERN (before)))
1179 before = prev_nonnote_insn (before);
1180 #endif
1181 emit_insns_before (seq, before);
1182
1183 delete_insn (temp);
1184 next = NEXT_INSN (insn);
1185 delete_jump (insn);
1186
1187 if (after_regscan)
1188 {
1189 reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
1190 old_max_reg = max_reg_num ();
1191 }
1192
1193 changed = 1;
1194 continue;
1195 }
1196 else
1197 end_sequence ();
1198 }
1199 }
1200
1201 /* If branches are expensive, convert
1202 if (foo) bar++; to bar += (foo != 0);
1203 and similarly for "bar--;"
1204
1205 INSN is the conditional branch around the arithmetic. We set:
1206
1207 TEMP is the arithmetic insn.
1208 TEMP1 is the SET doing the arithmetic.
1209 TEMP2 is the operand being incremented or decremented.
1210 TEMP3 to the condition being tested.
1211 TEMP4 to the earliest insn used to find the condition. */
1212
1213 if ((BRANCH_COST >= 2
1214 #ifdef HAVE_incscc
1215 || HAVE_incscc
1216 #endif
1217 #ifdef HAVE_decscc
1218 || HAVE_decscc
1219 #endif
1220 )
1221 && ! reload_completed
1222 && this_is_condjump && ! this_is_simplejump
1223 && (temp = next_nonnote_insn (insn)) != 0
1224 && (temp1 = single_set (temp)) != 0
1225 && (temp2 = SET_DEST (temp1),
1226 GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT)
1227 && GET_CODE (SET_SRC (temp1)) == PLUS
1228 && (XEXP (SET_SRC (temp1), 1) == const1_rtx
1229 || XEXP (SET_SRC (temp1), 1) == constm1_rtx)
1230 && rtx_equal_p (temp2, XEXP (SET_SRC (temp1), 0))
1231 && ! side_effects_p (temp2)
1232 && ! may_trap_p (temp2)
1233 /* INSN must either branch to the insn after TEMP or the insn
1234 after TEMP must branch to the same place as INSN. */
1235 && (reallabelprev == temp
1236 || ((temp3 = next_active_insn (temp)) != 0
1237 && simplejump_p (temp3)
1238 && JUMP_LABEL (temp3) == JUMP_LABEL (insn)))
1239 && (temp3 = get_condition (insn, &temp4)) != 0
1240 /* We must be comparing objects whose modes imply the size.
1241 We could handle BLKmode if (1) emit_store_flag could
1242 and (2) we could find the size reliably. */
1243 && GET_MODE (XEXP (temp3, 0)) != BLKmode
1244 && can_reverse_comparison_p (temp3, insn))
1245 {
1246 rtx temp6, target = 0, seq, init_insn = 0, init = temp2;
1247 enum rtx_code code = reverse_condition (GET_CODE (temp3));
1248
1249 start_sequence ();
1250
1251 /* It must be the case that TEMP2 is not modified in the range
1252 [TEMP4, INSN). The one exception we make is if the insn
1253 before INSN sets TEMP2 to something which is also unchanged
1254 in that range. In that case, we can move the initialization
1255 into our sequence. */
1256
1257 if ((temp5 = prev_active_insn (insn)) != 0
1258 && no_labels_between_p (temp5, insn)
1259 && GET_CODE (temp5) == INSN
1260 && (temp6 = single_set (temp5)) != 0
1261 && rtx_equal_p (temp2, SET_DEST (temp6))
1262 && (CONSTANT_P (SET_SRC (temp6))
1263 || GET_CODE (SET_SRC (temp6)) == REG
1264 || GET_CODE (SET_SRC (temp6)) == SUBREG))
1265 {
1266 emit_insn (PATTERN (temp5));
1267 init_insn = temp5;
1268 init = SET_SRC (temp6);
1269 }
1270
1271 if (CONSTANT_P (init)
1272 || ! reg_set_between_p (init, PREV_INSN (temp4), insn))
1273 target = emit_store_flag (gen_reg_rtx (GET_MODE (temp2)), code,
1274 XEXP (temp3, 0), XEXP (temp3, 1),
1275 VOIDmode,
1276 (code == LTU || code == LEU
1277 || code == GTU || code == GEU), 1);
1278
1279 /* If we can do the store-flag, do the addition or
1280 subtraction. */
1281
1282 if (target)
1283 target = expand_binop (GET_MODE (temp2),
1284 (XEXP (SET_SRC (temp1), 1) == const1_rtx
1285 ? add_optab : sub_optab),
1286 temp2, target, temp2, 0, OPTAB_WIDEN);
1287
1288 if (target != 0)
1289 {
1290 /* Put the result back in temp2 in case it isn't already.
1291 Then replace the jump, possible a CC0-setting insn in
1292 front of the jump, and TEMP, with the sequence we have
1293 made. */
1294
1295 if (target != temp2)
1296 emit_move_insn (temp2, target);
1297
1298 seq = get_insns ();
1299 end_sequence ();
1300
1301 emit_insns_before (seq, temp4);
1302 delete_insn (temp);
1303
1304 if (init_insn)
1305 delete_insn (init_insn);
1306
1307 next = NEXT_INSN (insn);
1308 #ifdef HAVE_cc0
1309 delete_insn (prev_nonnote_insn (insn));
1310 #endif
1311 delete_insn (insn);
1312
1313 if (after_regscan)
1314 {
1315 reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
1316 old_max_reg = max_reg_num ();
1317 }
1318
1319 changed = 1;
1320 continue;
1321 }
1322 else
1323 end_sequence ();
1324 }
1325
1326 /* Simplify if (...) x = 1; else {...} if (x) ...
1327 We recognize this case scanning backwards as well.
1328
1329 TEMP is the assignment to x;
1330 TEMP1 is the label at the head of the second if. */
1331 /* ?? This should call get_condition to find the values being
1332 compared, instead of looking for a COMPARE insn when HAVE_cc0
1333 is not defined. This would allow it to work on the m88k. */
1334 /* ?? This optimization is only safe before cse is run if HAVE_cc0
1335 is not defined and the condition is tested by a separate compare
1336 insn. This is because the code below assumes that the result
1337 of the compare dies in the following branch.
1338
1339 Not only that, but there might be other insns between the
1340 compare and branch whose results are live. Those insns need
1341 to be executed.
1342
1343 A way to fix this is to move the insns at JUMP_LABEL (insn)
1344 to before INSN. If we are running before flow, they will
1345 be deleted if they aren't needed. But this doesn't work
1346 well after flow.
1347
1348 This is really a special-case of jump threading, anyway. The
1349 right thing to do is to replace this and jump threading with
1350 much simpler code in cse.
1351
1352 This code has been turned off in the non-cc0 case in the
1353 meantime. */
1354
1355 #ifdef HAVE_cc0
1356 else if (this_is_simplejump
1357 /* Safe to skip USE and CLOBBER insns here
1358 since they will not be deleted. */
1359 && (temp = prev_active_insn (insn))
1360 && no_labels_between_p (temp, insn)
1361 && GET_CODE (temp) == INSN
1362 && GET_CODE (PATTERN (temp)) == SET
1363 && GET_CODE (SET_DEST (PATTERN (temp))) == REG
1364 && CONSTANT_P (SET_SRC (PATTERN (temp)))
1365 && (temp1 = next_active_insn (JUMP_LABEL (insn)))
1366 /* If we find that the next value tested is `x'
1367 (TEMP1 is the insn where this happens), win. */
1368 && GET_CODE (temp1) == INSN
1369 && GET_CODE (PATTERN (temp1)) == SET
1370 #ifdef HAVE_cc0
1371 /* Does temp1 `tst' the value of x? */
1372 && SET_SRC (PATTERN (temp1)) == SET_DEST (PATTERN (temp))
1373 && SET_DEST (PATTERN (temp1)) == cc0_rtx
1374 && (temp1 = next_nonnote_insn (temp1))
1375 #else
1376 /* Does temp1 compare the value of x against zero? */
1377 && GET_CODE (SET_SRC (PATTERN (temp1))) == COMPARE
1378 && XEXP (SET_SRC (PATTERN (temp1)), 1) == const0_rtx
1379 && (XEXP (SET_SRC (PATTERN (temp1)), 0)
1380 == SET_DEST (PATTERN (temp)))
1381 && GET_CODE (SET_DEST (PATTERN (temp1))) == REG
1382 && (temp1 = find_next_ref (SET_DEST (PATTERN (temp1)), temp1))
1383 #endif
1384 && condjump_p (temp1))
1385 {
1386 /* Get the if_then_else from the condjump. */
1387 rtx choice = SET_SRC (PATTERN (temp1));
1388 if (GET_CODE (choice) == IF_THEN_ELSE)
1389 {
1390 enum rtx_code code = GET_CODE (XEXP (choice, 0));
1391 rtx val = SET_SRC (PATTERN (temp));
1392 rtx cond
1393 = simplify_relational_operation (code, GET_MODE (SET_DEST (PATTERN (temp))),
1394 val, const0_rtx);
1395 rtx ultimate;
1396
1397 if (cond == const_true_rtx)
1398 ultimate = XEXP (choice, 1);
1399 else if (cond == const0_rtx)
1400 ultimate = XEXP (choice, 2);
1401 else
1402 ultimate = 0;
1403
1404 if (ultimate == pc_rtx)
1405 ultimate = get_label_after (temp1);
1406 else if (ultimate && GET_CODE (ultimate) != RETURN)
1407 ultimate = XEXP (ultimate, 0);
1408
1409 if (ultimate && JUMP_LABEL(insn) != ultimate)
1410 changed |= redirect_jump (insn, ultimate);
1411 }
1412 }
1413 #endif
1414
1415 #if 0
1416 /* @@ This needs a bit of work before it will be right.
1417
1418 Any type of comparison can be accepted for the first and
1419 second compare. When rewriting the first jump, we must
1420 compute the what conditions can reach label3, and use the
1421 appropriate code. We can not simply reverse/swap the code
1422 of the first jump. In some cases, the second jump must be
1423 rewritten also.
1424
1425 For example,
1426 < == converts to > ==
1427 < != converts to == >
1428 etc.
1429
1430 If the code is written to only accept an '==' test for the second
1431 compare, then all that needs to be done is to swap the condition
1432 of the first branch.
1433
1434 It is questionable whether we want this optimization anyways,
1435 since if the user wrote code like this because he/she knew that
1436 the jump to label1 is taken most of the time, then rewriting
1437 this gives slower code. */
1438 /* @@ This should call get_condition to find the values being
1439 compared, instead of looking for a COMPARE insn when HAVE_cc0
1440 is not defined. This would allow it to work on the m88k. */
1441 /* @@ This optimization is only safe before cse is run if HAVE_cc0
1442 is not defined and the condition is tested by a separate compare
1443 insn. This is because the code below assumes that the result
1444 of the compare dies in the following branch. */
1445
1446 /* Simplify test a ~= b
1447 condjump label1;
1448 test a == b
1449 condjump label2;
1450 jump label3;
1451 label1:
1452
1453 rewriting as
1454 test a ~~= b
1455 condjump label3
1456 test a == b
1457 condjump label2
1458 label1:
1459
1460 where ~= is an inequality, e.g. >, and ~~= is the swapped
1461 inequality, e.g. <.
1462
1463 We recognize this case scanning backwards.
1464
1465 TEMP is the conditional jump to `label2';
1466 TEMP1 is the test for `a == b';
1467 TEMP2 is the conditional jump to `label1';
1468 TEMP3 is the test for `a ~= b'. */
1469 else if (this_is_simplejump
1470 && (temp = prev_active_insn (insn))
1471 && no_labels_between_p (temp, insn)
1472 && condjump_p (temp)
1473 && (temp1 = prev_active_insn (temp))
1474 && no_labels_between_p (temp1, temp)
1475 && GET_CODE (temp1) == INSN
1476 && GET_CODE (PATTERN (temp1)) == SET
1477 #ifdef HAVE_cc0
1478 && sets_cc0_p (PATTERN (temp1)) == 1
1479 #else
1480 && GET_CODE (SET_SRC (PATTERN (temp1))) == COMPARE
1481 && GET_CODE (SET_DEST (PATTERN (temp1))) == REG
1482 && (temp == find_next_ref (SET_DEST (PATTERN (temp1)), temp1))
1483 #endif
1484 && (temp2 = prev_active_insn (temp1))
1485 && no_labels_between_p (temp2, temp1)
1486 && condjump_p (temp2)
1487 && JUMP_LABEL (temp2) == next_nonnote_insn (NEXT_INSN (insn))
1488 && (temp3 = prev_active_insn (temp2))
1489 && no_labels_between_p (temp3, temp2)
1490 && GET_CODE (PATTERN (temp3)) == SET
1491 && rtx_equal_p (SET_DEST (PATTERN (temp3)),
1492 SET_DEST (PATTERN (temp1)))
1493 && rtx_equal_p (SET_SRC (PATTERN (temp1)),
1494 SET_SRC (PATTERN (temp3)))
1495 && ! inequality_comparisons_p (PATTERN (temp))
1496 && inequality_comparisons_p (PATTERN (temp2)))
1497 {
1498 rtx fallthrough_label = JUMP_LABEL (temp2);
1499
1500 ++LABEL_NUSES (fallthrough_label);
1501 if (swap_jump (temp2, JUMP_LABEL (insn)))
1502 {
1503 delete_insn (insn);
1504 changed = 1;
1505 }
1506
1507 if (--LABEL_NUSES (fallthrough_label) == 0)
1508 delete_insn (fallthrough_label);
1509 }
1510 #endif
1511 /* Simplify if (...) {... x = 1;} if (x) ...
1512
1513 We recognize this case backwards.
1514
1515 TEMP is the test of `x';
1516 TEMP1 is the assignment to `x' at the end of the
1517 previous statement. */
1518 /* @@ This should call get_condition to find the values being
1519 compared, instead of looking for a COMPARE insn when HAVE_cc0
1520 is not defined. This would allow it to work on the m88k. */
1521 /* @@ This optimization is only safe before cse is run if HAVE_cc0
1522 is not defined and the condition is tested by a separate compare
1523 insn. This is because the code below assumes that the result
1524 of the compare dies in the following branch. */
1525
1526 /* ??? This has to be turned off. The problem is that the
1527 unconditional jump might indirectly end up branching to the
1528 label between TEMP1 and TEMP. We can't detect this, in general,
1529 since it may become a jump to there after further optimizations.
1530 If that jump is done, it will be deleted, so we will retry
1531 this optimization in the next pass, thus an infinite loop.
1532
1533 The present code prevents this by putting the jump after the
1534 label, but this is not logically correct. */
1535 #if 0
1536 else if (this_is_condjump
1537 /* Safe to skip USE and CLOBBER insns here
1538 since they will not be deleted. */
1539 && (temp = prev_active_insn (insn))
1540 && no_labels_between_p (temp, insn)
1541 && GET_CODE (temp) == INSN
1542 && GET_CODE (PATTERN (temp)) == SET
1543 #ifdef HAVE_cc0
1544 && sets_cc0_p (PATTERN (temp)) == 1
1545 && GET_CODE (SET_SRC (PATTERN (temp))) == REG
1546 #else
1547 /* Temp must be a compare insn, we can not accept a register
1548 to register move here, since it may not be simply a
1549 tst insn. */
1550 && GET_CODE (SET_SRC (PATTERN (temp))) == COMPARE
1551 && XEXP (SET_SRC (PATTERN (temp)), 1) == const0_rtx
1552 && GET_CODE (XEXP (SET_SRC (PATTERN (temp)), 0)) == REG
1553 && GET_CODE (SET_DEST (PATTERN (temp))) == REG
1554 && insn == find_next_ref (SET_DEST (PATTERN (temp)), temp)
1555 #endif
1556 /* May skip USE or CLOBBER insns here
1557 for checking for opportunity, since we
1558 take care of them later. */
1559 && (temp1 = prev_active_insn (temp))
1560 && GET_CODE (temp1) == INSN
1561 && GET_CODE (PATTERN (temp1)) == SET
1562 #ifdef HAVE_cc0
1563 && SET_SRC (PATTERN (temp)) == SET_DEST (PATTERN (temp1))
1564 #else
1565 && (XEXP (SET_SRC (PATTERN (temp)), 0)
1566 == SET_DEST (PATTERN (temp1)))
1567 #endif
1568 && CONSTANT_P (SET_SRC (PATTERN (temp1)))
1569 /* If this isn't true, cse will do the job. */
1570 && ! no_labels_between_p (temp1, temp))
1571 {
1572 /* Get the if_then_else from the condjump. */
1573 rtx choice = SET_SRC (PATTERN (insn));
1574 if (GET_CODE (choice) == IF_THEN_ELSE
1575 && (GET_CODE (XEXP (choice, 0)) == EQ
1576 || GET_CODE (XEXP (choice, 0)) == NE))
1577 {
1578 int want_nonzero = (GET_CODE (XEXP (choice, 0)) == NE);
1579 rtx last_insn;
1580 rtx ultimate;
1581 rtx p;
1582
1583 /* Get the place that condjump will jump to
1584 if it is reached from here. */
1585 if ((SET_SRC (PATTERN (temp1)) != const0_rtx)
1586 == want_nonzero)
1587 ultimate = XEXP (choice, 1);
1588 else
1589 ultimate = XEXP (choice, 2);
1590 /* Get it as a CODE_LABEL. */
1591 if (ultimate == pc_rtx)
1592 ultimate = get_label_after (insn);
1593 else
1594 /* Get the label out of the LABEL_REF. */
1595 ultimate = XEXP (ultimate, 0);
1596
1597 /* Insert the jump immediately before TEMP, specifically
1598 after the label that is between TEMP1 and TEMP. */
1599 last_insn = PREV_INSN (temp);
1600
1601 /* If we would be branching to the next insn, the jump
1602 would immediately be deleted and the re-inserted in
1603 a subsequent pass over the code. So don't do anything
1604 in that case. */
1605 if (next_active_insn (last_insn)
1606 != next_active_insn (ultimate))
1607 {
1608 emit_barrier_after (last_insn);
1609 p = emit_jump_insn_after (gen_jump (ultimate),
1610 last_insn);
1611 JUMP_LABEL (p) = ultimate;
1612 ++LABEL_NUSES (ultimate);
1613 if (INSN_UID (ultimate) < max_jump_chain
1614 && INSN_CODE (p) < max_jump_chain)
1615 {
1616 jump_chain[INSN_UID (p)]
1617 = jump_chain[INSN_UID (ultimate)];
1618 jump_chain[INSN_UID (ultimate)] = p;
1619 }
1620 changed = 1;
1621 continue;
1622 }
1623 }
1624 }
1625 #endif
1626 /* Detect a conditional jump going to the same place
1627 as an immediately following unconditional jump. */
1628 else if (this_is_condjump
1629 && (temp = next_active_insn (insn)) != 0
1630 && simplejump_p (temp)
1631 && (next_active_insn (JUMP_LABEL (insn))
1632 == next_active_insn (JUMP_LABEL (temp))))
1633 {
1634 rtx tem = temp;
1635
1636 /* ??? Optional. Disables some optimizations, but makes
1637 gcov output more accurate with -O. */
1638 if (flag_test_coverage && !reload_completed)
1639 for (tem = insn; tem != temp; tem = NEXT_INSN (tem))
1640 if (GET_CODE (tem) == NOTE && NOTE_LINE_NUMBER (tem) > 0)
1641 break;
1642
1643 if (tem == temp)
1644 {
1645 delete_jump (insn);
1646 changed = 1;
1647 continue;
1648 }
1649 }
1650 #ifdef HAVE_trap
1651 /* Detect a conditional jump jumping over an unconditional trap. */
1652 else if (HAVE_trap
1653 && this_is_condjump && ! this_is_simplejump
1654 && reallabelprev != 0
1655 && GET_CODE (reallabelprev) == INSN
1656 && GET_CODE (PATTERN (reallabelprev)) == TRAP_IF
1657 && TRAP_CONDITION (PATTERN (reallabelprev)) == const_true_rtx
1658 && prev_active_insn (reallabelprev) == insn
1659 && no_labels_between_p (insn, reallabelprev)
1660 && (temp2 = get_condition (insn, &temp4))
1661 && can_reverse_comparison_p (temp2, insn))
1662 {
1663 rtx new = gen_cond_trap (reverse_condition (GET_CODE (temp2)),
1664 XEXP (temp2, 0), XEXP (temp2, 1),
1665 TRAP_CODE (PATTERN (reallabelprev)));
1666
1667 if (new)
1668 {
1669 emit_insn_before (new, temp4);
1670 delete_insn (reallabelprev);
1671 delete_jump (insn);
1672 changed = 1;
1673 continue;
1674 }
1675 }
1676 /* Detect a jump jumping to an unconditional trap. */
1677 else if (HAVE_trap && this_is_condjump
1678 && (temp = next_active_insn (JUMP_LABEL (insn)))
1679 && GET_CODE (temp) == INSN
1680 && GET_CODE (PATTERN (temp)) == TRAP_IF
1681 && (this_is_simplejump
1682 || (temp2 = get_condition (insn, &temp4))))
1683 {
1684 rtx tc = TRAP_CONDITION (PATTERN (temp));
1685
1686 if (tc == const_true_rtx
1687 || (! this_is_simplejump && rtx_equal_p (temp2, tc)))
1688 {
1689 rtx new;
1690 /* Replace an unconditional jump to a trap with a trap. */
1691 if (this_is_simplejump)
1692 {
1693 emit_barrier_after (emit_insn_before (gen_trap (), insn));
1694 delete_jump (insn);
1695 changed = 1;
1696 continue;
1697 }
1698 new = gen_cond_trap (GET_CODE (temp2), XEXP (temp2, 0),
1699 XEXP (temp2, 1),
1700 TRAP_CODE (PATTERN (temp)));
1701 if (new)
1702 {
1703 emit_insn_before (new, temp4);
1704 delete_jump (insn);
1705 changed = 1;
1706 continue;
1707 }
1708 }
1709 /* If the trap condition and jump condition are mutually
1710 exclusive, redirect the jump to the following insn. */
1711 else if (GET_RTX_CLASS (GET_CODE (tc)) == '<'
1712 && ! this_is_simplejump
1713 && swap_condition (GET_CODE (temp2)) == GET_CODE (tc)
1714 && rtx_equal_p (XEXP (tc, 0), XEXP (temp2, 0))
1715 && rtx_equal_p (XEXP (tc, 1), XEXP (temp2, 1))
1716 && redirect_jump (insn, get_label_after (temp)))
1717 {
1718 changed = 1;
1719 continue;
1720 }
1721 }
1722 #endif
1723
1724 /* Detect a conditional jump jumping over an unconditional jump. */
1725
1726 else if ((this_is_condjump || this_is_condjump_in_parallel)
1727 && ! this_is_simplejump
1728 && reallabelprev != 0
1729 && GET_CODE (reallabelprev) == JUMP_INSN
1730 && prev_active_insn (reallabelprev) == insn
1731 && no_labels_between_p (insn, reallabelprev)
1732 && simplejump_p (reallabelprev))
1733 {
1734 /* When we invert the unconditional jump, we will be
1735 decrementing the usage count of its old label.
1736 Make sure that we don't delete it now because that
1737 might cause the following code to be deleted. */
1738 rtx prev_uses = prev_nonnote_insn (reallabelprev);
1739 rtx prev_label = JUMP_LABEL (insn);
1740
1741 if (prev_label)
1742 ++LABEL_NUSES (prev_label);
1743
1744 if (invert_jump (insn, JUMP_LABEL (reallabelprev)))
1745 {
1746 /* It is very likely that if there are USE insns before
1747 this jump, they hold REG_DEAD notes. These REG_DEAD
1748 notes are no longer valid due to this optimization,
1749 and will cause the life-analysis that following passes
1750 (notably delayed-branch scheduling) to think that
1751 these registers are dead when they are not.
1752
1753 To prevent this trouble, we just remove the USE insns
1754 from the insn chain. */
1755
1756 while (prev_uses && GET_CODE (prev_uses) == INSN
1757 && GET_CODE (PATTERN (prev_uses)) == USE)
1758 {
1759 rtx useless = prev_uses;
1760 prev_uses = prev_nonnote_insn (prev_uses);
1761 delete_insn (useless);
1762 }
1763
1764 delete_insn (reallabelprev);
1765 next = insn;
1766 changed = 1;
1767 }
1768
1769 /* We can now safely delete the label if it is unreferenced
1770 since the delete_insn above has deleted the BARRIER. */
1771 if (prev_label && --LABEL_NUSES (prev_label) == 0)
1772 delete_insn (prev_label);
1773 continue;
1774 }
1775 else
1776 {
1777 /* Detect a jump to a jump. */
1778
1779 nlabel = follow_jumps (JUMP_LABEL (insn));
1780 if (nlabel != JUMP_LABEL (insn)
1781 && redirect_jump (insn, nlabel))
1782 {
1783 changed = 1;
1784 next = insn;
1785 }
1786
1787 /* Look for if (foo) bar; else break; */
1788 /* The insns look like this:
1789 insn = condjump label1;
1790 ...range1 (some insns)...
1791 jump label2;
1792 label1:
1793 ...range2 (some insns)...
1794 jump somewhere unconditionally
1795 label2: */
1796 {
1797 rtx label1 = next_label (insn);
1798 rtx range1end = label1 ? prev_active_insn (label1) : 0;
1799 /* Don't do this optimization on the first round, so that
1800 jump-around-a-jump gets simplified before we ask here
1801 whether a jump is unconditional.
1802
1803 Also don't do it when we are called after reload since
1804 it will confuse reorg. */
1805 if (! first
1806 && (reload_completed ? ! flag_delayed_branch : 1)
1807 /* Make sure INSN is something we can invert. */
1808 && condjump_p (insn)
1809 && label1 != 0
1810 && JUMP_LABEL (insn) == label1
1811 && LABEL_NUSES (label1) == 1
1812 && GET_CODE (range1end) == JUMP_INSN
1813 && simplejump_p (range1end))
1814 {
1815 rtx label2 = next_label (label1);
1816 rtx range2end = label2 ? prev_active_insn (label2) : 0;
1817 if (range1end != range2end
1818 && JUMP_LABEL (range1end) == label2
1819 && GET_CODE (range2end) == JUMP_INSN
1820 && GET_CODE (NEXT_INSN (range2end)) == BARRIER
1821 /* Invert the jump condition, so we
1822 still execute the same insns in each case. */
1823 && invert_jump (insn, label1))
1824 {
1825 rtx range1beg = next_active_insn (insn);
1826 rtx range2beg = next_active_insn (label1);
1827 rtx range1after, range2after;
1828 rtx range1before, range2before;
1829 rtx rangenext;
1830
1831 /* Include in each range any notes before it, to be
1832 sure that we get the line number note if any, even
1833 if there are other notes here. */
1834 while (PREV_INSN (range1beg)
1835 && GET_CODE (PREV_INSN (range1beg)) == NOTE)
1836 range1beg = PREV_INSN (range1beg);
1837
1838 while (PREV_INSN (range2beg)
1839 && GET_CODE (PREV_INSN (range2beg)) == NOTE)
1840 range2beg = PREV_INSN (range2beg);
1841
1842 /* Don't move NOTEs for blocks or loops; shift them
1843 outside the ranges, where they'll stay put. */
1844 range1beg = squeeze_notes (range1beg, range1end);
1845 range2beg = squeeze_notes (range2beg, range2end);
1846
1847 /* Get current surrounds of the 2 ranges. */
1848 range1before = PREV_INSN (range1beg);
1849 range2before = PREV_INSN (range2beg);
1850 range1after = NEXT_INSN (range1end);
1851 range2after = NEXT_INSN (range2end);
1852
1853 /* Splice range2 where range1 was. */
1854 NEXT_INSN (range1before) = range2beg;
1855 PREV_INSN (range2beg) = range1before;
1856 NEXT_INSN (range2end) = range1after;
1857 PREV_INSN (range1after) = range2end;
1858 /* Splice range1 where range2 was. */
1859 NEXT_INSN (range2before) = range1beg;
1860 PREV_INSN (range1beg) = range2before;
1861 NEXT_INSN (range1end) = range2after;
1862 PREV_INSN (range2after) = range1end;
1863
1864 /* Check for a loop end note between the end of
1865 range2, and the next code label. If there is one,
1866 then what we have really seen is
1867 if (foo) break; end_of_loop;
1868 and moved the break sequence outside the loop.
1869 We must move the LOOP_END note to where the
1870 loop really ends now, or we will confuse loop
1871 optimization. Stop if we find a LOOP_BEG note
1872 first, since we don't want to move the LOOP_END
1873 note in that case. */
1874 for (;range2after != label2; range2after = rangenext)
1875 {
1876 rangenext = NEXT_INSN (range2after);
1877 if (GET_CODE (range2after) == NOTE)
1878 {
1879 if (NOTE_LINE_NUMBER (range2after)
1880 == NOTE_INSN_LOOP_END)
1881 {
1882 NEXT_INSN (PREV_INSN (range2after))
1883 = rangenext;
1884 PREV_INSN (rangenext)
1885 = PREV_INSN (range2after);
1886 PREV_INSN (range2after)
1887 = PREV_INSN (range1beg);
1888 NEXT_INSN (range2after) = range1beg;
1889 NEXT_INSN (PREV_INSN (range1beg))
1890 = range2after;
1891 PREV_INSN (range1beg) = range2after;
1892 }
1893 else if (NOTE_LINE_NUMBER (range2after)
1894 == NOTE_INSN_LOOP_BEG)
1895 break;
1896 }
1897 }
1898 changed = 1;
1899 continue;
1900 }
1901 }
1902 }
1903
1904 /* Now that the jump has been tensioned,
1905 try cross jumping: check for identical code
1906 before the jump and before its target label. */
1907
1908 /* First, cross jumping of conditional jumps: */
1909
1910 if (cross_jump && condjump_p (insn))
1911 {
1912 rtx newjpos, newlpos;
1913 rtx x = prev_real_insn (JUMP_LABEL (insn));
1914
1915 /* A conditional jump may be crossjumped
1916 only if the place it jumps to follows
1917 an opposing jump that comes back here. */
1918
1919 if (x != 0 && ! jump_back_p (x, insn))
1920 /* We have no opposing jump;
1921 cannot cross jump this insn. */
1922 x = 0;
1923
1924 newjpos = 0;
1925 /* TARGET is nonzero if it is ok to cross jump
1926 to code before TARGET. If so, see if matches. */
1927 if (x != 0)
1928 find_cross_jump (insn, x, 2,
1929 &newjpos, &newlpos);
1930
1931 if (newjpos != 0)
1932 {
1933 do_cross_jump (insn, newjpos, newlpos);
1934 /* Make the old conditional jump
1935 into an unconditional one. */
1936 SET_SRC (PATTERN (insn))
1937 = gen_rtx_LABEL_REF (VOIDmode, JUMP_LABEL (insn));
1938 INSN_CODE (insn) = -1;
1939 emit_barrier_after (insn);
1940 /* Add to jump_chain unless this is a new label
1941 whose UID is too large. */
1942 if (INSN_UID (JUMP_LABEL (insn)) < max_jump_chain)
1943 {
1944 jump_chain[INSN_UID (insn)]
1945 = jump_chain[INSN_UID (JUMP_LABEL (insn))];
1946 jump_chain[INSN_UID (JUMP_LABEL (insn))] = insn;
1947 }
1948 changed = 1;
1949 next = insn;
1950 }
1951 }
1952
1953 /* Cross jumping of unconditional jumps:
1954 a few differences. */
1955
1956 if (cross_jump && simplejump_p (insn))
1957 {
1958 rtx newjpos, newlpos;
1959 rtx target;
1960
1961 newjpos = 0;
1962
1963 /* TARGET is nonzero if it is ok to cross jump
1964 to code before TARGET. If so, see if matches. */
1965 find_cross_jump (insn, JUMP_LABEL (insn), 1,
1966 &newjpos, &newlpos);
1967
1968 /* If cannot cross jump to code before the label,
1969 see if we can cross jump to another jump to
1970 the same label. */
1971 /* Try each other jump to this label. */
1972 if (INSN_UID (JUMP_LABEL (insn)) < max_uid)
1973 for (target = jump_chain[INSN_UID (JUMP_LABEL (insn))];
1974 target != 0 && newjpos == 0;
1975 target = jump_chain[INSN_UID (target)])
1976 if (target != insn
1977 && JUMP_LABEL (target) == JUMP_LABEL (insn)
1978 /* Ignore TARGET if it's deleted. */
1979 && ! INSN_DELETED_P (target))
1980 find_cross_jump (insn, target, 2,
1981 &newjpos, &newlpos);
1982
1983 if (newjpos != 0)
1984 {
1985 do_cross_jump (insn, newjpos, newlpos);
1986 changed = 1;
1987 next = insn;
1988 }
1989 }
1990
1991 /* This code was dead in the previous jump.c! */
1992 if (cross_jump && GET_CODE (PATTERN (insn)) == RETURN)
1993 {
1994 /* Return insns all "jump to the same place"
1995 so we can cross-jump between any two of them. */
1996
1997 rtx newjpos, newlpos, target;
1998
1999 newjpos = 0;
2000
2001 /* If cannot cross jump to code before the label,
2002 see if we can cross jump to another jump to
2003 the same label. */
2004 /* Try each other jump to this label. */
2005 for (target = jump_chain[0];
2006 target != 0 && newjpos == 0;
2007 target = jump_chain[INSN_UID (target)])
2008 if (target != insn
2009 && ! INSN_DELETED_P (target)
2010 && GET_CODE (PATTERN (target)) == RETURN)
2011 find_cross_jump (insn, target, 2,
2012 &newjpos, &newlpos);
2013
2014 if (newjpos != 0)
2015 {
2016 do_cross_jump (insn, newjpos, newlpos);
2017 changed = 1;
2018 next = insn;
2019 }
2020 }
2021 }
2022 }
2023
2024 first = 0;
2025 }
2026
2027 /* Delete extraneous line number notes.
2028 Note that two consecutive notes for different lines are not really
2029 extraneous. There should be some indication where that line belonged,
2030 even if it became empty. */
2031
2032 {
2033 rtx last_note = 0;
2034
2035 for (insn = f; insn; insn = NEXT_INSN (insn))
2036 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0)
2037 {
2038 /* Delete this note if it is identical to previous note. */
2039 if (last_note
2040 && NOTE_SOURCE_FILE (insn) == NOTE_SOURCE_FILE (last_note)
2041 && NOTE_LINE_NUMBER (insn) == NOTE_LINE_NUMBER (last_note))
2042 {
2043 delete_insn (insn);
2044 continue;
2045 }
2046
2047 last_note = insn;
2048 }
2049 }
2050
2051 #ifdef HAVE_return
2052 if (HAVE_return)
2053 {
2054 /* If we fall through to the epilogue, see if we can insert a RETURN insn
2055 in front of it. If the machine allows it at this point (we might be
2056 after reload for a leaf routine), it will improve optimization for it
2057 to be there. We do this both here and at the start of this pass since
2058 the RETURN might have been deleted by some of our optimizations. */
2059 insn = get_last_insn ();
2060 while (insn && GET_CODE (insn) == NOTE)
2061 insn = PREV_INSN (insn);
2062
2063 if (insn && GET_CODE (insn) != BARRIER)
2064 {
2065 emit_jump_insn (gen_return ());
2066 emit_barrier ();
2067 }
2068 }
2069 #endif
2070
2071 can_reach_end = calculate_can_reach_end (last_insn, 0, 1);
2072
2073 /* Show JUMP_CHAIN no longer valid. */
2074 jump_chain = 0;
2075 }
2076 \f
2077 /* Initialize LABEL_NUSES and JUMP_LABEL fields. Delete any REG_LABEL
2078 notes whose labels don't occur in the insn any more. Returns the
2079 largest INSN_UID found. */
2080 static int
2081 init_label_info (f)
2082 rtx f;
2083 {
2084 int largest_uid = 0;
2085 rtx insn;
2086
2087 for (insn = f; insn; insn = NEXT_INSN (insn))
2088 {
2089 if (GET_CODE (insn) == CODE_LABEL)
2090 LABEL_NUSES (insn) = (LABEL_PRESERVE_P (insn) != 0);
2091 else if (GET_CODE (insn) == JUMP_INSN)
2092 JUMP_LABEL (insn) = 0;
2093 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2094 {
2095 rtx note, next;
2096
2097 for (note = REG_NOTES (insn); note; note = next)
2098 {
2099 next = XEXP (note, 1);
2100 if (REG_NOTE_KIND (note) == REG_LABEL
2101 && ! reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
2102 remove_note (insn, note);
2103 }
2104 }
2105 if (INSN_UID (insn) > largest_uid)
2106 largest_uid = INSN_UID (insn);
2107 }
2108
2109 return largest_uid;
2110 }
2111
2112 /* Delete insns following barriers, up to next label.
2113
2114 Also delete no-op jumps created by gcse. */
2115 static void
2116 delete_barrier_successors (f)
2117 rtx f;
2118 {
2119 rtx insn;
2120
2121 for (insn = f; insn;)
2122 {
2123 if (GET_CODE (insn) == BARRIER)
2124 {
2125 insn = NEXT_INSN (insn);
2126 while (insn != 0 && GET_CODE (insn) != CODE_LABEL)
2127 {
2128 if (GET_CODE (insn) == NOTE
2129 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_FUNCTION_END)
2130 insn = NEXT_INSN (insn);
2131 else
2132 insn = delete_insn (insn);
2133 }
2134 /* INSN is now the code_label. */
2135 }
2136 /* Also remove (set (pc) (pc)) insns which can be created by
2137 gcse. We eliminate such insns now to avoid having them
2138 cause problems later. */
2139 else if (GET_CODE (insn) == JUMP_INSN
2140 && SET_SRC (PATTERN (insn)) == pc_rtx
2141 && SET_DEST (PATTERN (insn)) == pc_rtx)
2142 insn = delete_insn (insn);
2143
2144 else
2145 insn = NEXT_INSN (insn);
2146 }
2147 }
2148
2149 /* Mark the label each jump jumps to.
2150 Combine consecutive labels, and count uses of labels.
2151
2152 For each label, make a chain (using `jump_chain')
2153 of all the *unconditional* jumps that jump to it;
2154 also make a chain of all returns.
2155
2156 CROSS_JUMP indicates whether we are doing cross jumping
2157 and if we are whether we will be paying attention to
2158 death notes or not. */
2159
2160 static void
2161 mark_all_labels (f, cross_jump)
2162 rtx f;
2163 int cross_jump;
2164 {
2165 rtx insn;
2166
2167 for (insn = f; insn; insn = NEXT_INSN (insn))
2168 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2169 {
2170 mark_jump_label (PATTERN (insn), insn, cross_jump);
2171 if (! INSN_DELETED_P (insn) && GET_CODE (insn) == JUMP_INSN)
2172 {
2173 if (JUMP_LABEL (insn) != 0 && simplejump_p (insn))
2174 {
2175 jump_chain[INSN_UID (insn)]
2176 = jump_chain[INSN_UID (JUMP_LABEL (insn))];
2177 jump_chain[INSN_UID (JUMP_LABEL (insn))] = insn;
2178 }
2179 if (GET_CODE (PATTERN (insn)) == RETURN)
2180 {
2181 jump_chain[INSN_UID (insn)] = jump_chain[0];
2182 jump_chain[0] = insn;
2183 }
2184 }
2185 }
2186 }
2187
2188 /* Delete all labels already not referenced.
2189 Also find and return the last insn. */
2190
2191 static rtx
2192 delete_unreferenced_labels (f)
2193 rtx f;
2194 {
2195 rtx final = NULL_RTX;
2196 rtx insn;
2197
2198 for (insn = f; insn; )
2199 {
2200 if (GET_CODE (insn) == CODE_LABEL && LABEL_NUSES (insn) == 0)
2201 insn = delete_insn (insn);
2202 else
2203 {
2204 final = insn;
2205 insn = NEXT_INSN (insn);
2206 }
2207 }
2208
2209 return final;
2210 }
2211
2212 /* Delete various simple forms of moves which have no necessary
2213 side effect. */
2214
2215 static void
2216 delete_noop_moves (f)
2217 rtx f;
2218 {
2219 rtx insn, next;
2220
2221 for (insn = f; insn; )
2222 {
2223 next = NEXT_INSN (insn);
2224
2225 if (GET_CODE (insn) == INSN)
2226 {
2227 register rtx body = PATTERN (insn);
2228
2229 /* Combine stack_adjusts with following push_insns. */
2230 #ifdef PUSH_ROUNDING
2231 if (GET_CODE (body) == SET
2232 && SET_DEST (body) == stack_pointer_rtx
2233 && GET_CODE (SET_SRC (body)) == PLUS
2234 && XEXP (SET_SRC (body), 0) == stack_pointer_rtx
2235 && GET_CODE (XEXP (SET_SRC (body), 1)) == CONST_INT
2236 && INTVAL (XEXP (SET_SRC (body), 1)) > 0)
2237 {
2238 rtx p;
2239 rtx stack_adjust_insn = insn;
2240 int stack_adjust_amount = INTVAL (XEXP (SET_SRC (body), 1));
2241 int total_pushed = 0;
2242 int pushes = 0;
2243
2244 /* Find all successive push insns. */
2245 p = insn;
2246 /* Don't convert more than three pushes;
2247 that starts adding too many displaced addresses
2248 and the whole thing starts becoming a losing
2249 proposition. */
2250 while (pushes < 3)
2251 {
2252 rtx pbody, dest;
2253 p = next_nonnote_insn (p);
2254 if (p == 0 || GET_CODE (p) != INSN)
2255 break;
2256 pbody = PATTERN (p);
2257 if (GET_CODE (pbody) != SET)
2258 break;
2259 dest = SET_DEST (pbody);
2260 /* Allow a no-op move between the adjust and the push. */
2261 if (GET_CODE (dest) == REG
2262 && GET_CODE (SET_SRC (pbody)) == REG
2263 && REGNO (dest) == REGNO (SET_SRC (pbody)))
2264 continue;
2265 if (! (GET_CODE (dest) == MEM
2266 && GET_CODE (XEXP (dest, 0)) == POST_INC
2267 && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
2268 break;
2269 pushes++;
2270 if (total_pushed + GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)))
2271 > stack_adjust_amount)
2272 break;
2273 total_pushed += GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));
2274 }
2275
2276 /* Discard the amount pushed from the stack adjust;
2277 maybe eliminate it entirely. */
2278 if (total_pushed >= stack_adjust_amount)
2279 {
2280 delete_computation (stack_adjust_insn);
2281 total_pushed = stack_adjust_amount;
2282 }
2283 else
2284 XEXP (SET_SRC (PATTERN (stack_adjust_insn)), 1)
2285 = GEN_INT (stack_adjust_amount - total_pushed);
2286
2287 /* Change the appropriate push insns to ordinary stores. */
2288 p = insn;
2289 while (total_pushed > 0)
2290 {
2291 rtx pbody, dest;
2292 p = next_nonnote_insn (p);
2293 if (GET_CODE (p) != INSN)
2294 break;
2295 pbody = PATTERN (p);
2296 if (GET_CODE (pbody) != SET)
2297 break;
2298 dest = SET_DEST (pbody);
2299 /* Allow a no-op move between the adjust and the push. */
2300 if (GET_CODE (dest) == REG
2301 && GET_CODE (SET_SRC (pbody)) == REG
2302 && REGNO (dest) == REGNO (SET_SRC (pbody)))
2303 continue;
2304 if (! (GET_CODE (dest) == MEM
2305 && GET_CODE (XEXP (dest, 0)) == POST_INC
2306 && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
2307 break;
2308 total_pushed -= GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));
2309 /* If this push doesn't fully fit in the space
2310 of the stack adjust that we deleted,
2311 make another stack adjust here for what we
2312 didn't use up. There should be peepholes
2313 to recognize the resulting sequence of insns. */
2314 if (total_pushed < 0)
2315 {
2316 emit_insn_before (gen_add2_insn (stack_pointer_rtx,
2317 GEN_INT (- total_pushed)),
2318 p);
2319 break;
2320 }
2321 XEXP (dest, 0)
2322 = plus_constant (stack_pointer_rtx, total_pushed);
2323 }
2324 }
2325 #endif
2326
2327 /* Detect and delete no-op move instructions
2328 resulting from not allocating a parameter in a register. */
2329
2330 if (GET_CODE (body) == SET
2331 && (SET_DEST (body) == SET_SRC (body)
2332 || (GET_CODE (SET_DEST (body)) == MEM
2333 && GET_CODE (SET_SRC (body)) == MEM
2334 && rtx_equal_p (SET_SRC (body), SET_DEST (body))))
2335 && ! (GET_CODE (SET_DEST (body)) == MEM
2336 && MEM_VOLATILE_P (SET_DEST (body)))
2337 && ! (GET_CODE (SET_SRC (body)) == MEM
2338 && MEM_VOLATILE_P (SET_SRC (body))))
2339 delete_computation (insn);
2340
2341 /* Detect and ignore no-op move instructions
2342 resulting from smart or fortuitous register allocation. */
2343
2344 else if (GET_CODE (body) == SET)
2345 {
2346 int sreg = true_regnum (SET_SRC (body));
2347 int dreg = true_regnum (SET_DEST (body));
2348
2349 if (sreg == dreg && sreg >= 0)
2350 delete_insn (insn);
2351 else if (sreg >= 0 && dreg >= 0)
2352 {
2353 rtx trial;
2354 rtx tem = find_equiv_reg (NULL_RTX, insn, 0,
2355 sreg, NULL_PTR, dreg,
2356 GET_MODE (SET_SRC (body)));
2357
2358 if (tem != 0
2359 && GET_MODE (tem) == GET_MODE (SET_DEST (body)))
2360 {
2361 /* DREG may have been the target of a REG_DEAD note in
2362 the insn which makes INSN redundant. If so, reorg
2363 would still think it is dead. So search for such a
2364 note and delete it if we find it. */
2365 if (! find_regno_note (insn, REG_UNUSED, dreg))
2366 for (trial = prev_nonnote_insn (insn);
2367 trial && GET_CODE (trial) != CODE_LABEL;
2368 trial = prev_nonnote_insn (trial))
2369 if (find_regno_note (trial, REG_DEAD, dreg))
2370 {
2371 remove_death (dreg, trial);
2372 break;
2373 }
2374
2375 /* Deleting insn could lose a death-note for SREG. */
2376 if ((trial = find_regno_note (insn, REG_DEAD, sreg)))
2377 {
2378 /* Change this into a USE so that we won't emit
2379 code for it, but still can keep the note. */
2380 PATTERN (insn)
2381 = gen_rtx_USE (VOIDmode, XEXP (trial, 0));
2382 INSN_CODE (insn) = -1;
2383 /* Remove all reg notes but the REG_DEAD one. */
2384 REG_NOTES (insn) = trial;
2385 XEXP (trial, 1) = NULL_RTX;
2386 }
2387 else
2388 delete_insn (insn);
2389 }
2390 }
2391 else if (dreg >= 0 && CONSTANT_P (SET_SRC (body))
2392 && find_equiv_reg (SET_SRC (body), insn, 0, dreg,
2393 NULL_PTR, 0,
2394 GET_MODE (SET_DEST (body))))
2395 {
2396 /* This handles the case where we have two consecutive
2397 assignments of the same constant to pseudos that didn't
2398 get a hard reg. Each SET from the constant will be
2399 converted into a SET of the spill register and an
2400 output reload will be made following it. This produces
2401 two loads of the same constant into the same spill
2402 register. */
2403
2404 rtx in_insn = insn;
2405
2406 /* Look back for a death note for the first reg.
2407 If there is one, it is no longer accurate. */
2408 while (in_insn && GET_CODE (in_insn) != CODE_LABEL)
2409 {
2410 if ((GET_CODE (in_insn) == INSN
2411 || GET_CODE (in_insn) == JUMP_INSN)
2412 && find_regno_note (in_insn, REG_DEAD, dreg))
2413 {
2414 remove_death (dreg, in_insn);
2415 break;
2416 }
2417 in_insn = PREV_INSN (in_insn);
2418 }
2419
2420 /* Delete the second load of the value. */
2421 delete_insn (insn);
2422 }
2423 }
2424 else if (GET_CODE (body) == PARALLEL)
2425 {
2426 /* If each part is a set between two identical registers or
2427 a USE or CLOBBER, delete the insn. */
2428 int i, sreg, dreg;
2429 rtx tem;
2430
2431 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2432 {
2433 tem = XVECEXP (body, 0, i);
2434 if (GET_CODE (tem) == USE || GET_CODE (tem) == CLOBBER)
2435 continue;
2436
2437 if (GET_CODE (tem) != SET
2438 || (sreg = true_regnum (SET_SRC (tem))) < 0
2439 || (dreg = true_regnum (SET_DEST (tem))) < 0
2440 || dreg != sreg)
2441 break;
2442 }
2443
2444 if (i < 0)
2445 delete_insn (insn);
2446 }
2447 /* Also delete insns to store bit fields if they are no-ops. */
2448 /* Not worth the hair to detect this in the big-endian case. */
2449 else if (! BYTES_BIG_ENDIAN
2450 && GET_CODE (body) == SET
2451 && GET_CODE (SET_DEST (body)) == ZERO_EXTRACT
2452 && XEXP (SET_DEST (body), 2) == const0_rtx
2453 && XEXP (SET_DEST (body), 0) == SET_SRC (body)
2454 && ! (GET_CODE (SET_SRC (body)) == MEM
2455 && MEM_VOLATILE_P (SET_SRC (body))))
2456 delete_insn (insn);
2457 }
2458 insn = next;
2459 }
2460 }
2461
2462 /* See if there is still a NOTE_INSN_FUNCTION_END in this function.
2463 If so indicate that this function can drop off the end by returning
2464 1, else return 0.
2465
2466 CHECK_DELETED indicates whether we must check if the note being
2467 searched for has the deleted flag set.
2468
2469 DELETE_FINAL_NOTE indicates whether we should delete the note
2470 if we find it. */
2471
2472 static int
2473 calculate_can_reach_end (last, check_deleted, delete_final_note)
2474 rtx last;
2475 int check_deleted;
2476 int delete_final_note;
2477 {
2478 rtx insn = last;
2479 int n_labels = 1;
2480
2481 while (insn != NULL_RTX)
2482 {
2483 int ok = 0;
2484
2485 /* One label can follow the end-note: the return label. */
2486 if (GET_CODE (insn) == CODE_LABEL && n_labels-- > 0)
2487 ok = 1;
2488 /* Ordinary insns can follow it if returning a structure. */
2489 else if (GET_CODE (insn) == INSN)
2490 ok = 1;
2491 /* If machine uses explicit RETURN insns, no epilogue,
2492 then one of them follows the note. */
2493 else if (GET_CODE (insn) == JUMP_INSN
2494 && GET_CODE (PATTERN (insn)) == RETURN)
2495 ok = 1;
2496 /* A barrier can follow the return insn. */
2497 else if (GET_CODE (insn) == BARRIER)
2498 ok = 1;
2499 /* Other kinds of notes can follow also. */
2500 else if (GET_CODE (insn) == NOTE
2501 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_FUNCTION_END)
2502 ok = 1;
2503
2504 if (ok != 1)
2505 break;
2506
2507 insn = PREV_INSN (insn);
2508 }
2509
2510 /* See if we backed up to the appropriate type of note. */
2511 if (insn != NULL_RTX
2512 && GET_CODE (insn) == NOTE
2513 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_END
2514 && (check_deleted == 0
2515 || ! INSN_DELETED_P (insn)))
2516 {
2517 if (delete_final_note)
2518 delete_insn (insn);
2519 return 1;
2520 }
2521
2522 return 0;
2523 }
2524
2525 /* LOOP_START is a NOTE_INSN_LOOP_BEG note that is followed by an unconditional
2526 jump. Assume that this unconditional jump is to the exit test code. If
2527 the code is sufficiently simple, make a copy of it before INSN,
2528 followed by a jump to the exit of the loop. Then delete the unconditional
2529 jump after INSN.
2530
2531 Return 1 if we made the change, else 0.
2532
2533 This is only safe immediately after a regscan pass because it uses the
2534 values of regno_first_uid and regno_last_uid. */
2535
2536 static int
2537 duplicate_loop_exit_test (loop_start)
2538 rtx loop_start;
2539 {
2540 rtx insn, set, reg, p, link;
2541 rtx copy = 0;
2542 int num_insns = 0;
2543 rtx exitcode = NEXT_INSN (JUMP_LABEL (next_nonnote_insn (loop_start)));
2544 rtx lastexit;
2545 int max_reg = max_reg_num ();
2546 rtx *reg_map = 0;
2547
2548 /* Scan the exit code. We do not perform this optimization if any insn:
2549
2550 is a CALL_INSN
2551 is a CODE_LABEL
2552 has a REG_RETVAL or REG_LIBCALL note (hard to adjust)
2553 is a NOTE_INSN_LOOP_BEG because this means we have a nested loop
2554 is a NOTE_INSN_BLOCK_{BEG,END} because duplicating these notes
2555 is not valid.
2556
2557 We also do not do this if we find an insn with ASM_OPERANDS. While
2558 this restriction should not be necessary, copying an insn with
2559 ASM_OPERANDS can confuse asm_noperands in some cases.
2560
2561 Also, don't do this if the exit code is more than 20 insns. */
2562
2563 for (insn = exitcode;
2564 insn
2565 && ! (GET_CODE (insn) == NOTE
2566 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
2567 insn = NEXT_INSN (insn))
2568 {
2569 switch (GET_CODE (insn))
2570 {
2571 case CODE_LABEL:
2572 case CALL_INSN:
2573 return 0;
2574 case NOTE:
2575 /* We could be in front of the wrong NOTE_INSN_LOOP_END if there is
2576 a jump immediately after the loop start that branches outside
2577 the loop but within an outer loop, near the exit test.
2578 If we copied this exit test and created a phony
2579 NOTE_INSN_LOOP_VTOP, this could make instructions immediately
2580 before the exit test look like these could be safely moved
2581 out of the loop even if they actually may be never executed.
2582 This can be avoided by checking here for NOTE_INSN_LOOP_CONT. */
2583
2584 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
2585 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2586 return 0;
2587
2588 if (optimize < 2
2589 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
2590 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END))
2591 /* If we were to duplicate this code, we would not move
2592 the BLOCK notes, and so debugging the moved code would
2593 be difficult. Thus, we only move the code with -O2 or
2594 higher. */
2595 return 0;
2596
2597 break;
2598 case JUMP_INSN:
2599 case INSN:
2600 /* The code below would grossly mishandle REG_WAS_0 notes,
2601 so get rid of them here. */
2602 while ((p = find_reg_note (insn, REG_WAS_0, NULL_RTX)) != 0)
2603 remove_note (insn, p);
2604 if (++num_insns > 20
2605 || find_reg_note (insn, REG_RETVAL, NULL_RTX)
2606 || find_reg_note (insn, REG_LIBCALL, NULL_RTX)
2607 || asm_noperands (PATTERN (insn)) > 0)
2608 return 0;
2609 break;
2610 default:
2611 break;
2612 }
2613 }
2614
2615 /* Unless INSN is zero, we can do the optimization. */
2616 if (insn == 0)
2617 return 0;
2618
2619 lastexit = insn;
2620
2621 /* See if any insn sets a register only used in the loop exit code and
2622 not a user variable. If so, replace it with a new register. */
2623 for (insn = exitcode; insn != lastexit; insn = NEXT_INSN (insn))
2624 if (GET_CODE (insn) == INSN
2625 && (set = single_set (insn)) != 0
2626 && ((reg = SET_DEST (set), GET_CODE (reg) == REG)
2627 || (GET_CODE (reg) == SUBREG
2628 && (reg = SUBREG_REG (reg), GET_CODE (reg) == REG)))
2629 && REGNO (reg) >= FIRST_PSEUDO_REGISTER
2630 && REGNO_FIRST_UID (REGNO (reg)) == INSN_UID (insn))
2631 {
2632 for (p = NEXT_INSN (insn); p != lastexit; p = NEXT_INSN (p))
2633 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (p))
2634 break;
2635
2636 if (p != lastexit)
2637 {
2638 /* We can do the replacement. Allocate reg_map if this is the
2639 first replacement we found. */
2640 if (reg_map == 0)
2641 {
2642 reg_map = (rtx *) alloca (max_reg * sizeof (rtx));
2643 bzero ((char *) reg_map, max_reg * sizeof (rtx));
2644 }
2645
2646 REG_LOOP_TEST_P (reg) = 1;
2647
2648 reg_map[REGNO (reg)] = gen_reg_rtx (GET_MODE (reg));
2649 }
2650 }
2651
2652 /* Now copy each insn. */
2653 for (insn = exitcode; insn != lastexit; insn = NEXT_INSN (insn))
2654 switch (GET_CODE (insn))
2655 {
2656 case BARRIER:
2657 copy = emit_barrier_before (loop_start);
2658 break;
2659 case NOTE:
2660 /* Only copy line-number notes. */
2661 if (NOTE_LINE_NUMBER (insn) >= 0)
2662 {
2663 copy = emit_note_before (NOTE_LINE_NUMBER (insn), loop_start);
2664 NOTE_SOURCE_FILE (copy) = NOTE_SOURCE_FILE (insn);
2665 }
2666 break;
2667
2668 case INSN:
2669 copy = emit_insn_before (copy_rtx (PATTERN (insn)), loop_start);
2670 if (reg_map)
2671 replace_regs (PATTERN (copy), reg_map, max_reg, 1);
2672
2673 mark_jump_label (PATTERN (copy), copy, 0);
2674
2675 /* Copy all REG_NOTES except REG_LABEL since mark_jump_label will
2676 make them. */
2677 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2678 if (REG_NOTE_KIND (link) != REG_LABEL)
2679 REG_NOTES (copy)
2680 = copy_rtx (gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
2681 XEXP (link, 0),
2682 REG_NOTES (copy)));
2683 if (reg_map && REG_NOTES (copy))
2684 replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
2685 break;
2686
2687 case JUMP_INSN:
2688 copy = emit_jump_insn_before (copy_rtx (PATTERN (insn)), loop_start);
2689 if (reg_map)
2690 replace_regs (PATTERN (copy), reg_map, max_reg, 1);
2691 mark_jump_label (PATTERN (copy), copy, 0);
2692 if (REG_NOTES (insn))
2693 {
2694 REG_NOTES (copy) = copy_rtx (REG_NOTES (insn));
2695 if (reg_map)
2696 replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
2697 }
2698
2699 /* If this is a simple jump, add it to the jump chain. */
2700
2701 if (INSN_UID (copy) < max_jump_chain && JUMP_LABEL (copy)
2702 && simplejump_p (copy))
2703 {
2704 jump_chain[INSN_UID (copy)]
2705 = jump_chain[INSN_UID (JUMP_LABEL (copy))];
2706 jump_chain[INSN_UID (JUMP_LABEL (copy))] = copy;
2707 }
2708 break;
2709
2710 default:
2711 abort ();
2712 }
2713
2714 /* Now clean up by emitting a jump to the end label and deleting the jump
2715 at the start of the loop. */
2716 if (! copy || GET_CODE (copy) != BARRIER)
2717 {
2718 copy = emit_jump_insn_before (gen_jump (get_label_after (insn)),
2719 loop_start);
2720 mark_jump_label (PATTERN (copy), copy, 0);
2721 if (INSN_UID (copy) < max_jump_chain
2722 && INSN_UID (JUMP_LABEL (copy)) < max_jump_chain)
2723 {
2724 jump_chain[INSN_UID (copy)]
2725 = jump_chain[INSN_UID (JUMP_LABEL (copy))];
2726 jump_chain[INSN_UID (JUMP_LABEL (copy))] = copy;
2727 }
2728 emit_barrier_before (loop_start);
2729 }
2730
2731 /* Mark the exit code as the virtual top of the converted loop. */
2732 emit_note_before (NOTE_INSN_LOOP_VTOP, exitcode);
2733
2734 delete_insn (next_nonnote_insn (loop_start));
2735
2736 return 1;
2737 }
2738 \f
2739 /* Move all block-beg, block-end, loop-beg, loop-cont, loop-vtop, and
2740 loop-end notes between START and END out before START. Assume that
2741 END is not such a note. START may be such a note. Returns the value
2742 of the new starting insn, which may be different if the original start
2743 was such a note. */
2744
2745 rtx
2746 squeeze_notes (start, end)
2747 rtx start, end;
2748 {
2749 rtx insn;
2750 rtx next;
2751
2752 for (insn = start; insn != end; insn = next)
2753 {
2754 next = NEXT_INSN (insn);
2755 if (GET_CODE (insn) == NOTE
2756 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END
2757 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
2758 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
2759 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
2760 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT
2761 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP))
2762 {
2763 if (insn == start)
2764 start = next;
2765 else
2766 {
2767 rtx prev = PREV_INSN (insn);
2768 PREV_INSN (insn) = PREV_INSN (start);
2769 NEXT_INSN (insn) = start;
2770 NEXT_INSN (PREV_INSN (insn)) = insn;
2771 PREV_INSN (NEXT_INSN (insn)) = insn;
2772 NEXT_INSN (prev) = next;
2773 PREV_INSN (next) = prev;
2774 }
2775 }
2776 }
2777
2778 return start;
2779 }
2780 \f
2781 /* Compare the instructions before insn E1 with those before E2
2782 to find an opportunity for cross jumping.
2783 (This means detecting identical sequences of insns followed by
2784 jumps to the same place, or followed by a label and a jump
2785 to that label, and replacing one with a jump to the other.)
2786
2787 Assume E1 is a jump that jumps to label E2
2788 (that is not always true but it might as well be).
2789 Find the longest possible equivalent sequences
2790 and store the first insns of those sequences into *F1 and *F2.
2791 Store zero there if no equivalent preceding instructions are found.
2792
2793 We give up if we find a label in stream 1.
2794 Actually we could transfer that label into stream 2. */
2795
2796 static void
2797 find_cross_jump (e1, e2, minimum, f1, f2)
2798 rtx e1, e2;
2799 int minimum;
2800 rtx *f1, *f2;
2801 {
2802 register rtx i1 = e1, i2 = e2;
2803 register rtx p1, p2;
2804 int lose = 0;
2805
2806 rtx last1 = 0, last2 = 0;
2807 rtx afterlast1 = 0, afterlast2 = 0;
2808
2809 *f1 = 0;
2810 *f2 = 0;
2811
2812 while (1)
2813 {
2814 i1 = prev_nonnote_insn (i1);
2815
2816 i2 = PREV_INSN (i2);
2817 while (i2 && (GET_CODE (i2) == NOTE || GET_CODE (i2) == CODE_LABEL))
2818 i2 = PREV_INSN (i2);
2819
2820 if (i1 == 0)
2821 break;
2822
2823 /* Don't allow the range of insns preceding E1 or E2
2824 to include the other (E2 or E1). */
2825 if (i2 == e1 || i1 == e2)
2826 break;
2827
2828 /* If we will get to this code by jumping, those jumps will be
2829 tensioned to go directly to the new label (before I2),
2830 so this cross-jumping won't cost extra. So reduce the minimum. */
2831 if (GET_CODE (i1) == CODE_LABEL)
2832 {
2833 --minimum;
2834 break;
2835 }
2836
2837 if (i2 == 0 || GET_CODE (i1) != GET_CODE (i2))
2838 break;
2839
2840 /* Avoid moving insns across EH regions if either of the insns
2841 can throw. */
2842 if (flag_exceptions
2843 && (asynchronous_exceptions || GET_CODE (i1) == CALL_INSN)
2844 && !in_same_eh_region (i1, i2))
2845 break;
2846
2847 p1 = PATTERN (i1);
2848 p2 = PATTERN (i2);
2849
2850 /* If this is a CALL_INSN, compare register usage information.
2851 If we don't check this on stack register machines, the two
2852 CALL_INSNs might be merged leaving reg-stack.c with mismatching
2853 numbers of stack registers in the same basic block.
2854 If we don't check this on machines with delay slots, a delay slot may
2855 be filled that clobbers a parameter expected by the subroutine.
2856
2857 ??? We take the simple route for now and assume that if they're
2858 equal, they were constructed identically. */
2859
2860 if (GET_CODE (i1) == CALL_INSN
2861 && ! rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
2862 CALL_INSN_FUNCTION_USAGE (i2)))
2863 lose = 1;
2864
2865 #ifdef STACK_REGS
2866 /* If cross_jump_death_matters is not 0, the insn's mode
2867 indicates whether or not the insn contains any stack-like
2868 regs. */
2869
2870 if (!lose && cross_jump_death_matters && stack_regs_mentioned (i1))
2871 {
2872 /* If register stack conversion has already been done, then
2873 death notes must also be compared before it is certain that
2874 the two instruction streams match. */
2875
2876 rtx note;
2877 HARD_REG_SET i1_regset, i2_regset;
2878
2879 CLEAR_HARD_REG_SET (i1_regset);
2880 CLEAR_HARD_REG_SET (i2_regset);
2881
2882 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
2883 if (REG_NOTE_KIND (note) == REG_DEAD
2884 && STACK_REG_P (XEXP (note, 0)))
2885 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
2886
2887 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
2888 if (REG_NOTE_KIND (note) == REG_DEAD
2889 && STACK_REG_P (XEXP (note, 0)))
2890 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
2891
2892 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
2893
2894 lose = 1;
2895
2896 done:
2897 ;
2898 }
2899 #endif
2900
2901 /* Don't allow old-style asm or volatile extended asms to be accepted
2902 for cross jumping purposes. It is conceptually correct to allow
2903 them, since cross-jumping preserves the dynamic instruction order
2904 even though it is changing the static instruction order. However,
2905 if an asm is being used to emit an assembler pseudo-op, such as
2906 the MIPS `.set reorder' pseudo-op, then the static instruction order
2907 matters and it must be preserved. */
2908 if (GET_CODE (p1) == ASM_INPUT || GET_CODE (p2) == ASM_INPUT
2909 || (GET_CODE (p1) == ASM_OPERANDS && MEM_VOLATILE_P (p1))
2910 || (GET_CODE (p2) == ASM_OPERANDS && MEM_VOLATILE_P (p2)))
2911 lose = 1;
2912
2913 if (lose || GET_CODE (p1) != GET_CODE (p2)
2914 || ! rtx_renumbered_equal_p (p1, p2))
2915 {
2916 /* The following code helps take care of G++ cleanups. */
2917 rtx equiv1;
2918 rtx equiv2;
2919
2920 if (!lose && GET_CODE (p1) == GET_CODE (p2)
2921 && ((equiv1 = find_reg_note (i1, REG_EQUAL, NULL_RTX)) != 0
2922 || (equiv1 = find_reg_note (i1, REG_EQUIV, NULL_RTX)) != 0)
2923 && ((equiv2 = find_reg_note (i2, REG_EQUAL, NULL_RTX)) != 0
2924 || (equiv2 = find_reg_note (i2, REG_EQUIV, NULL_RTX)) != 0)
2925 /* If the equivalences are not to a constant, they may
2926 reference pseudos that no longer exist, so we can't
2927 use them. */
2928 && CONSTANT_P (XEXP (equiv1, 0))
2929 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
2930 {
2931 rtx s1 = single_set (i1);
2932 rtx s2 = single_set (i2);
2933 if (s1 != 0 && s2 != 0
2934 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
2935 {
2936 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
2937 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
2938 if (! rtx_renumbered_equal_p (p1, p2))
2939 cancel_changes (0);
2940 else if (apply_change_group ())
2941 goto win;
2942 }
2943 }
2944
2945 /* Insns fail to match; cross jumping is limited to the following
2946 insns. */
2947
2948 #ifdef HAVE_cc0
2949 /* Don't allow the insn after a compare to be shared by
2950 cross-jumping unless the compare is also shared.
2951 Here, if either of these non-matching insns is a compare,
2952 exclude the following insn from possible cross-jumping. */
2953 if (sets_cc0_p (p1) || sets_cc0_p (p2))
2954 last1 = afterlast1, last2 = afterlast2, ++minimum;
2955 #endif
2956
2957 /* If cross-jumping here will feed a jump-around-jump
2958 optimization, this jump won't cost extra, so reduce
2959 the minimum. */
2960 if (GET_CODE (i1) == JUMP_INSN
2961 && JUMP_LABEL (i1)
2962 && prev_real_insn (JUMP_LABEL (i1)) == e1)
2963 --minimum;
2964 break;
2965 }
2966
2967 win:
2968 if (GET_CODE (p1) != USE && GET_CODE (p1) != CLOBBER)
2969 {
2970 /* Ok, this insn is potentially includable in a cross-jump here. */
2971 afterlast1 = last1, afterlast2 = last2;
2972 last1 = i1, last2 = i2, --minimum;
2973 }
2974 }
2975
2976 if (minimum <= 0 && last1 != 0 && last1 != e1)
2977 *f1 = last1, *f2 = last2;
2978 }
2979
2980 static void
2981 do_cross_jump (insn, newjpos, newlpos)
2982 rtx insn, newjpos, newlpos;
2983 {
2984 /* Find an existing label at this point
2985 or make a new one if there is none. */
2986 register rtx label = get_label_before (newlpos);
2987
2988 /* Make the same jump insn jump to the new point. */
2989 if (GET_CODE (PATTERN (insn)) == RETURN)
2990 {
2991 /* Remove from jump chain of returns. */
2992 delete_from_jump_chain (insn);
2993 /* Change the insn. */
2994 PATTERN (insn) = gen_jump (label);
2995 INSN_CODE (insn) = -1;
2996 JUMP_LABEL (insn) = label;
2997 LABEL_NUSES (label)++;
2998 /* Add to new the jump chain. */
2999 if (INSN_UID (label) < max_jump_chain
3000 && INSN_UID (insn) < max_jump_chain)
3001 {
3002 jump_chain[INSN_UID (insn)] = jump_chain[INSN_UID (label)];
3003 jump_chain[INSN_UID (label)] = insn;
3004 }
3005 }
3006 else
3007 redirect_jump (insn, label);
3008
3009 /* Delete the matching insns before the jump. Also, remove any REG_EQUAL
3010 or REG_EQUIV note in the NEWLPOS stream that isn't also present in
3011 the NEWJPOS stream. */
3012
3013 while (newjpos != insn)
3014 {
3015 rtx lnote;
3016
3017 for (lnote = REG_NOTES (newlpos); lnote; lnote = XEXP (lnote, 1))
3018 if ((REG_NOTE_KIND (lnote) == REG_EQUAL
3019 || REG_NOTE_KIND (lnote) == REG_EQUIV)
3020 && ! find_reg_note (newjpos, REG_EQUAL, XEXP (lnote, 0))
3021 && ! find_reg_note (newjpos, REG_EQUIV, XEXP (lnote, 0)))
3022 remove_note (newlpos, lnote);
3023
3024 delete_insn (newjpos);
3025 newjpos = next_real_insn (newjpos);
3026 newlpos = next_real_insn (newlpos);
3027 }
3028 }
3029 \f
3030 /* Return the label before INSN, or put a new label there. */
3031
3032 rtx
3033 get_label_before (insn)
3034 rtx insn;
3035 {
3036 rtx label;
3037
3038 /* Find an existing label at this point
3039 or make a new one if there is none. */
3040 label = prev_nonnote_insn (insn);
3041
3042 if (label == 0 || GET_CODE (label) != CODE_LABEL)
3043 {
3044 rtx prev = PREV_INSN (insn);
3045
3046 label = gen_label_rtx ();
3047 emit_label_after (label, prev);
3048 LABEL_NUSES (label) = 0;
3049 }
3050 return label;
3051 }
3052
3053 /* Return the label after INSN, or put a new label there. */
3054
3055 rtx
3056 get_label_after (insn)
3057 rtx insn;
3058 {
3059 rtx label;
3060
3061 /* Find an existing label at this point
3062 or make a new one if there is none. */
3063 label = next_nonnote_insn (insn);
3064
3065 if (label == 0 || GET_CODE (label) != CODE_LABEL)
3066 {
3067 label = gen_label_rtx ();
3068 emit_label_after (label, insn);
3069 LABEL_NUSES (label) = 0;
3070 }
3071 return label;
3072 }
3073 \f
3074 /* Return 1 if INSN is a jump that jumps to right after TARGET
3075 only on the condition that TARGET itself would drop through.
3076 Assumes that TARGET is a conditional jump. */
3077
3078 static int
3079 jump_back_p (insn, target)
3080 rtx insn, target;
3081 {
3082 rtx cinsn, ctarget;
3083 enum rtx_code codei, codet;
3084
3085 if (simplejump_p (insn) || ! condjump_p (insn)
3086 || simplejump_p (target)
3087 || target != prev_real_insn (JUMP_LABEL (insn)))
3088 return 0;
3089
3090 cinsn = XEXP (SET_SRC (PATTERN (insn)), 0);
3091 ctarget = XEXP (SET_SRC (PATTERN (target)), 0);
3092
3093 codei = GET_CODE (cinsn);
3094 codet = GET_CODE (ctarget);
3095
3096 if (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx)
3097 {
3098 if (! can_reverse_comparison_p (cinsn, insn))
3099 return 0;
3100 codei = reverse_condition (codei);
3101 }
3102
3103 if (XEXP (SET_SRC (PATTERN (target)), 2) == pc_rtx)
3104 {
3105 if (! can_reverse_comparison_p (ctarget, target))
3106 return 0;
3107 codet = reverse_condition (codet);
3108 }
3109
3110 return (codei == codet
3111 && rtx_renumbered_equal_p (XEXP (cinsn, 0), XEXP (ctarget, 0))
3112 && rtx_renumbered_equal_p (XEXP (cinsn, 1), XEXP (ctarget, 1)));
3113 }
3114 \f
3115 /* Given a comparison, COMPARISON, inside a conditional jump insn, INSN,
3116 return non-zero if it is safe to reverse this comparison. It is if our
3117 floating-point is not IEEE, if this is an NE or EQ comparison, or if
3118 this is known to be an integer comparison. */
3119
3120 int
3121 can_reverse_comparison_p (comparison, insn)
3122 rtx comparison;
3123 rtx insn;
3124 {
3125 rtx arg0;
3126
3127 /* If this is not actually a comparison, we can't reverse it. */
3128 if (GET_RTX_CLASS (GET_CODE (comparison)) != '<')
3129 return 0;
3130
3131 if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3132 /* If this is an NE comparison, it is safe to reverse it to an EQ
3133 comparison and vice versa, even for floating point. If no operands
3134 are NaNs, the reversal is valid. If some operand is a NaN, EQ is
3135 always false and NE is always true, so the reversal is also valid. */
3136 || flag_fast_math
3137 || GET_CODE (comparison) == NE
3138 || GET_CODE (comparison) == EQ)
3139 return 1;
3140
3141 arg0 = XEXP (comparison, 0);
3142
3143 /* Make sure ARG0 is one of the actual objects being compared. If we
3144 can't do this, we can't be sure the comparison can be reversed.
3145
3146 Handle cc0 and a MODE_CC register. */
3147 if ((GET_CODE (arg0) == REG && GET_MODE_CLASS (GET_MODE (arg0)) == MODE_CC)
3148 #ifdef HAVE_cc0
3149 || arg0 == cc0_rtx
3150 #endif
3151 )
3152 {
3153 rtx prev = prev_nonnote_insn (insn);
3154 rtx set;
3155
3156 /* If the comparison itself was a loop invariant, it could have been
3157 hoisted out of the loop. If we proceed to unroll such a loop, then
3158 we may not be able to find the comparison when copying the loop.
3159
3160 Returning zero in that case is the safe thing to do. */
3161 if (prev == 0)
3162 return 0;
3163
3164 set = single_set (prev);
3165 if (set == 0 || SET_DEST (set) != arg0)
3166 return 0;
3167
3168 arg0 = SET_SRC (set);
3169
3170 if (GET_CODE (arg0) == COMPARE)
3171 arg0 = XEXP (arg0, 0);
3172 }
3173
3174 /* We can reverse this if ARG0 is a CONST_INT or if its mode is
3175 not VOIDmode and neither a MODE_CC nor MODE_FLOAT type. */
3176 return (GET_CODE (arg0) == CONST_INT
3177 || (GET_MODE (arg0) != VOIDmode
3178 && GET_MODE_CLASS (GET_MODE (arg0)) != MODE_CC
3179 && GET_MODE_CLASS (GET_MODE (arg0)) != MODE_FLOAT));
3180 }
3181
3182 /* Given an rtx-code for a comparison, return the code
3183 for the negated comparison.
3184 WATCH OUT! reverse_condition is not safe to use on a jump
3185 that might be acting on the results of an IEEE floating point comparison,
3186 because of the special treatment of non-signaling nans in comparisons.
3187 Use can_reverse_comparison_p to be sure. */
3188
3189 enum rtx_code
3190 reverse_condition (code)
3191 enum rtx_code code;
3192 {
3193 switch (code)
3194 {
3195 case EQ:
3196 return NE;
3197
3198 case NE:
3199 return EQ;
3200
3201 case GT:
3202 return LE;
3203
3204 case GE:
3205 return LT;
3206
3207 case LT:
3208 return GE;
3209
3210 case LE:
3211 return GT;
3212
3213 case GTU:
3214 return LEU;
3215
3216 case GEU:
3217 return LTU;
3218
3219 case LTU:
3220 return GEU;
3221
3222 case LEU:
3223 return GTU;
3224
3225 default:
3226 abort ();
3227 return UNKNOWN;
3228 }
3229 }
3230
3231 /* Similar, but return the code when two operands of a comparison are swapped.
3232 This IS safe for IEEE floating-point. */
3233
3234 enum rtx_code
3235 swap_condition (code)
3236 enum rtx_code code;
3237 {
3238 switch (code)
3239 {
3240 case EQ:
3241 case NE:
3242 return code;
3243
3244 case GT:
3245 return LT;
3246
3247 case GE:
3248 return LE;
3249
3250 case LT:
3251 return GT;
3252
3253 case LE:
3254 return GE;
3255
3256 case GTU:
3257 return LTU;
3258
3259 case GEU:
3260 return LEU;
3261
3262 case LTU:
3263 return GTU;
3264
3265 case LEU:
3266 return GEU;
3267
3268 default:
3269 abort ();
3270 return UNKNOWN;
3271 }
3272 }
3273
3274 /* Given a comparison CODE, return the corresponding unsigned comparison.
3275 If CODE is an equality comparison or already an unsigned comparison,
3276 CODE is returned. */
3277
3278 enum rtx_code
3279 unsigned_condition (code)
3280 enum rtx_code code;
3281 {
3282 switch (code)
3283 {
3284 case EQ:
3285 case NE:
3286 case GTU:
3287 case GEU:
3288 case LTU:
3289 case LEU:
3290 return code;
3291
3292 case GT:
3293 return GTU;
3294
3295 case GE:
3296 return GEU;
3297
3298 case LT:
3299 return LTU;
3300
3301 case LE:
3302 return LEU;
3303
3304 default:
3305 abort ();
3306 }
3307 }
3308
3309 /* Similarly, return the signed version of a comparison. */
3310
3311 enum rtx_code
3312 signed_condition (code)
3313 enum rtx_code code;
3314 {
3315 switch (code)
3316 {
3317 case EQ:
3318 case NE:
3319 case GT:
3320 case GE:
3321 case LT:
3322 case LE:
3323 return code;
3324
3325 case GTU:
3326 return GT;
3327
3328 case GEU:
3329 return GE;
3330
3331 case LTU:
3332 return LT;
3333
3334 case LEU:
3335 return LE;
3336
3337 default:
3338 abort ();
3339 }
3340 }
3341 \f
3342 /* Return non-zero if CODE1 is more strict than CODE2, i.e., if the
3343 truth of CODE1 implies the truth of CODE2. */
3344
3345 int
3346 comparison_dominates_p (code1, code2)
3347 enum rtx_code code1, code2;
3348 {
3349 if (code1 == code2)
3350 return 1;
3351
3352 switch (code1)
3353 {
3354 case EQ:
3355 if (code2 == LE || code2 == LEU || code2 == GE || code2 == GEU)
3356 return 1;
3357 break;
3358
3359 case LT:
3360 if (code2 == LE || code2 == NE)
3361 return 1;
3362 break;
3363
3364 case GT:
3365 if (code2 == GE || code2 == NE)
3366 return 1;
3367 break;
3368
3369 case LTU:
3370 if (code2 == LEU || code2 == NE)
3371 return 1;
3372 break;
3373
3374 case GTU:
3375 if (code2 == GEU || code2 == NE)
3376 return 1;
3377 break;
3378
3379 default:
3380 break;
3381 }
3382
3383 return 0;
3384 }
3385 \f
3386 /* Return 1 if INSN is an unconditional jump and nothing else. */
3387
3388 int
3389 simplejump_p (insn)
3390 rtx insn;
3391 {
3392 return (GET_CODE (insn) == JUMP_INSN
3393 && GET_CODE (PATTERN (insn)) == SET
3394 && GET_CODE (SET_DEST (PATTERN (insn))) == PC
3395 && GET_CODE (SET_SRC (PATTERN (insn))) == LABEL_REF);
3396 }
3397
3398 /* Return nonzero if INSN is a (possibly) conditional jump
3399 and nothing more. */
3400
3401 int
3402 condjump_p (insn)
3403 rtx insn;
3404 {
3405 register rtx x = PATTERN (insn);
3406 if (GET_CODE (x) != SET)
3407 return 0;
3408 if (GET_CODE (SET_DEST (x)) != PC)
3409 return 0;
3410 if (GET_CODE (SET_SRC (x)) == LABEL_REF)
3411 return 1;
3412 if (GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
3413 return 0;
3414 if (XEXP (SET_SRC (x), 2) == pc_rtx
3415 && (GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF
3416 || GET_CODE (XEXP (SET_SRC (x), 1)) == RETURN))
3417 return 1;
3418 if (XEXP (SET_SRC (x), 1) == pc_rtx
3419 && (GET_CODE (XEXP (SET_SRC (x), 2)) == LABEL_REF
3420 || GET_CODE (XEXP (SET_SRC (x), 2)) == RETURN))
3421 return 1;
3422 return 0;
3423 }
3424
3425 /* Return nonzero if INSN is a (possibly) conditional jump
3426 and nothing more. */
3427
3428 int
3429 condjump_in_parallel_p (insn)
3430 rtx insn;
3431 {
3432 register rtx x = PATTERN (insn);
3433
3434 if (GET_CODE (x) != PARALLEL)
3435 return 0;
3436 else
3437 x = XVECEXP (x, 0, 0);
3438
3439 if (GET_CODE (x) != SET)
3440 return 0;
3441 if (GET_CODE (SET_DEST (x)) != PC)
3442 return 0;
3443 if (GET_CODE (SET_SRC (x)) == LABEL_REF)
3444 return 1;
3445 if (GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
3446 return 0;
3447 if (XEXP (SET_SRC (x), 2) == pc_rtx
3448 && (GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF
3449 || GET_CODE (XEXP (SET_SRC (x), 1)) == RETURN))
3450 return 1;
3451 if (XEXP (SET_SRC (x), 1) == pc_rtx
3452 && (GET_CODE (XEXP (SET_SRC (x), 2)) == LABEL_REF
3453 || GET_CODE (XEXP (SET_SRC (x), 2)) == RETURN))
3454 return 1;
3455 return 0;
3456 }
3457
3458 /* Return the label of a conditional jump. */
3459
3460 rtx
3461 condjump_label (insn)
3462 rtx insn;
3463 {
3464 register rtx x = PATTERN (insn);
3465
3466 if (GET_CODE (x) == PARALLEL)
3467 x = XVECEXP (x, 0, 0);
3468 if (GET_CODE (x) != SET)
3469 return NULL_RTX;
3470 if (GET_CODE (SET_DEST (x)) != PC)
3471 return NULL_RTX;
3472 x = SET_SRC (x);
3473 if (GET_CODE (x) == LABEL_REF)
3474 return x;
3475 if (GET_CODE (x) != IF_THEN_ELSE)
3476 return NULL_RTX;
3477 if (XEXP (x, 2) == pc_rtx && GET_CODE (XEXP (x, 1)) == LABEL_REF)
3478 return XEXP (x, 1);
3479 if (XEXP (x, 1) == pc_rtx && GET_CODE (XEXP (x, 2)) == LABEL_REF)
3480 return XEXP (x, 2);
3481 return NULL_RTX;
3482 }
3483
3484 /* Return true if INSN is a (possibly conditional) return insn. */
3485
3486 static int
3487 returnjump_p_1 (loc, data)
3488 rtx *loc;
3489 void *data ATTRIBUTE_UNUSED;
3490 {
3491 rtx x = *loc;
3492 return GET_CODE (x) == RETURN;
3493 }
3494
3495 int
3496 returnjump_p (insn)
3497 rtx insn;
3498 {
3499 return for_each_rtx (&PATTERN (insn), returnjump_p_1, NULL);
3500 }
3501
3502 #ifdef HAVE_cc0
3503
3504 /* Return 1 if X is an RTX that does nothing but set the condition codes
3505 and CLOBBER or USE registers.
3506 Return -1 if X does explicitly set the condition codes,
3507 but also does other things. */
3508
3509 int
3510 sets_cc0_p (x)
3511 rtx x ATTRIBUTE_UNUSED;
3512 {
3513 if (GET_CODE (x) == SET && SET_DEST (x) == cc0_rtx)
3514 return 1;
3515 if (GET_CODE (x) == PARALLEL)
3516 {
3517 int i;
3518 int sets_cc0 = 0;
3519 int other_things = 0;
3520 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3521 {
3522 if (GET_CODE (XVECEXP (x, 0, i)) == SET
3523 && SET_DEST (XVECEXP (x, 0, i)) == cc0_rtx)
3524 sets_cc0 = 1;
3525 else if (GET_CODE (XVECEXP (x, 0, i)) == SET)
3526 other_things = 1;
3527 }
3528 return ! sets_cc0 ? 0 : other_things ? -1 : 1;
3529 }
3530 return 0;
3531 }
3532 #endif
3533 \f
3534 /* Follow any unconditional jump at LABEL;
3535 return the ultimate label reached by any such chain of jumps.
3536 If LABEL is not followed by a jump, return LABEL.
3537 If the chain loops or we can't find end, return LABEL,
3538 since that tells caller to avoid changing the insn.
3539
3540 If RELOAD_COMPLETED is 0, we do not chain across a NOTE_INSN_LOOP_BEG or
3541 a USE or CLOBBER. */
3542
3543 rtx
3544 follow_jumps (label)
3545 rtx label;
3546 {
3547 register rtx insn;
3548 register rtx next;
3549 register rtx value = label;
3550 register int depth;
3551
3552 for (depth = 0;
3553 (depth < 10
3554 && (insn = next_active_insn (value)) != 0
3555 && GET_CODE (insn) == JUMP_INSN
3556 && ((JUMP_LABEL (insn) != 0 && simplejump_p (insn))
3557 || GET_CODE (PATTERN (insn)) == RETURN)
3558 && (next = NEXT_INSN (insn))
3559 && GET_CODE (next) == BARRIER);
3560 depth++)
3561 {
3562 /* Don't chain through the insn that jumps into a loop
3563 from outside the loop,
3564 since that would create multiple loop entry jumps
3565 and prevent loop optimization. */
3566 rtx tem;
3567 if (!reload_completed)
3568 for (tem = value; tem != insn; tem = NEXT_INSN (tem))
3569 if (GET_CODE (tem) == NOTE
3570 && (NOTE_LINE_NUMBER (tem) == NOTE_INSN_LOOP_BEG
3571 /* ??? Optional. Disables some optimizations, but makes
3572 gcov output more accurate with -O. */
3573 || (flag_test_coverage && NOTE_LINE_NUMBER (tem) > 0)))
3574 return value;
3575
3576 /* If we have found a cycle, make the insn jump to itself. */
3577 if (JUMP_LABEL (insn) == label)
3578 return label;
3579
3580 tem = next_active_insn (JUMP_LABEL (insn));
3581 if (tem && (GET_CODE (PATTERN (tem)) == ADDR_VEC
3582 || GET_CODE (PATTERN (tem)) == ADDR_DIFF_VEC))
3583 break;
3584
3585 value = JUMP_LABEL (insn);
3586 }
3587 if (depth == 10)
3588 return label;
3589 return value;
3590 }
3591
3592 /* Assuming that field IDX of X is a vector of label_refs,
3593 replace each of them by the ultimate label reached by it.
3594 Return nonzero if a change is made.
3595 If IGNORE_LOOPS is 0, we do not chain across a NOTE_INSN_LOOP_BEG. */
3596
3597 static int
3598 tension_vector_labels (x, idx)
3599 register rtx x;
3600 register int idx;
3601 {
3602 int changed = 0;
3603 register int i;
3604 for (i = XVECLEN (x, idx) - 1; i >= 0; i--)
3605 {
3606 register rtx olabel = XEXP (XVECEXP (x, idx, i), 0);
3607 register rtx nlabel = follow_jumps (olabel);
3608 if (nlabel && nlabel != olabel)
3609 {
3610 XEXP (XVECEXP (x, idx, i), 0) = nlabel;
3611 ++LABEL_NUSES (nlabel);
3612 if (--LABEL_NUSES (olabel) == 0)
3613 delete_insn (olabel);
3614 changed = 1;
3615 }
3616 }
3617 return changed;
3618 }
3619 \f
3620 /* Find all CODE_LABELs referred to in X, and increment their use counts.
3621 If INSN is a JUMP_INSN and there is at least one CODE_LABEL referenced
3622 in INSN, then store one of them in JUMP_LABEL (INSN).
3623 If INSN is an INSN or a CALL_INSN and there is at least one CODE_LABEL
3624 referenced in INSN, add a REG_LABEL note containing that label to INSN.
3625 Also, when there are consecutive labels, canonicalize on the last of them.
3626
3627 Note that two labels separated by a loop-beginning note
3628 must be kept distinct if we have not yet done loop-optimization,
3629 because the gap between them is where loop-optimize
3630 will want to move invariant code to. CROSS_JUMP tells us
3631 that loop-optimization is done with.
3632
3633 Once reload has completed (CROSS_JUMP non-zero), we need not consider
3634 two labels distinct if they are separated by only USE or CLOBBER insns. */
3635
3636 static void
3637 mark_jump_label (x, insn, cross_jump)
3638 register rtx x;
3639 rtx insn;
3640 int cross_jump;
3641 {
3642 register RTX_CODE code = GET_CODE (x);
3643 register int i;
3644 register char *fmt;
3645
3646 switch (code)
3647 {
3648 case PC:
3649 case CC0:
3650 case REG:
3651 case SUBREG:
3652 case CONST_INT:
3653 case SYMBOL_REF:
3654 case CONST_DOUBLE:
3655 case CLOBBER:
3656 case CALL:
3657 return;
3658
3659 case MEM:
3660 /* If this is a constant-pool reference, see if it is a label. */
3661 if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3662 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
3663 mark_jump_label (get_pool_constant (XEXP (x, 0)), insn, cross_jump);
3664 break;
3665
3666 case LABEL_REF:
3667 {
3668 rtx label = XEXP (x, 0);
3669 rtx olabel = label;
3670 rtx note;
3671 rtx next;
3672
3673 if (GET_CODE (label) != CODE_LABEL)
3674 abort ();
3675
3676 /* Ignore references to labels of containing functions. */
3677 if (LABEL_REF_NONLOCAL_P (x))
3678 break;
3679
3680 /* If there are other labels following this one,
3681 replace it with the last of the consecutive labels. */
3682 for (next = NEXT_INSN (label); next; next = NEXT_INSN (next))
3683 {
3684 if (GET_CODE (next) == CODE_LABEL)
3685 label = next;
3686 else if (cross_jump && GET_CODE (next) == INSN
3687 && (GET_CODE (PATTERN (next)) == USE
3688 || GET_CODE (PATTERN (next)) == CLOBBER))
3689 continue;
3690 else if (GET_CODE (next) != NOTE)
3691 break;
3692 else if (! cross_jump
3693 && (NOTE_LINE_NUMBER (next) == NOTE_INSN_LOOP_BEG
3694 || NOTE_LINE_NUMBER (next) == NOTE_INSN_FUNCTION_END
3695 /* ??? Optional. Disables some optimizations, but
3696 makes gcov output more accurate with -O. */
3697 || (flag_test_coverage && NOTE_LINE_NUMBER (next) > 0)))
3698 break;
3699 }
3700
3701 XEXP (x, 0) = label;
3702 if (! insn || ! INSN_DELETED_P (insn))
3703 ++LABEL_NUSES (label);
3704
3705 if (insn)
3706 {
3707 if (GET_CODE (insn) == JUMP_INSN)
3708 JUMP_LABEL (insn) = label;
3709
3710 /* If we've changed OLABEL and we had a REG_LABEL note
3711 for it, update it as well. */
3712 else if (label != olabel
3713 && (note = find_reg_note (insn, REG_LABEL, olabel)) != 0)
3714 XEXP (note, 0) = label;
3715
3716 /* Otherwise, add a REG_LABEL note for LABEL unless there already
3717 is one. */
3718 else if (! find_reg_note (insn, REG_LABEL, label))
3719 {
3720 /* This code used to ignore labels which refered to dispatch
3721 tables to avoid flow.c generating worse code.
3722
3723 However, in the presense of global optimizations like
3724 gcse which call find_basic_blocks without calling
3725 life_analysis, not recording such labels will lead
3726 to compiler aborts because of inconsistencies in the
3727 flow graph. So we go ahead and record the label.
3728
3729 It may also be the case that the optimization argument
3730 is no longer valid because of the more accurate cfg
3731 we build in find_basic_blocks -- it no longer pessimizes
3732 code when it finds a REG_LABEL note. */
3733 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, label,
3734 REG_NOTES (insn));
3735 }
3736 }
3737 return;
3738 }
3739
3740 /* Do walk the labels in a vector, but not the first operand of an
3741 ADDR_DIFF_VEC. Don't set the JUMP_LABEL of a vector. */
3742 case ADDR_VEC:
3743 case ADDR_DIFF_VEC:
3744 if (! INSN_DELETED_P (insn))
3745 {
3746 int eltnum = code == ADDR_DIFF_VEC ? 1 : 0;
3747
3748 for (i = 0; i < XVECLEN (x, eltnum); i++)
3749 mark_jump_label (XVECEXP (x, eltnum, i), NULL_RTX, cross_jump);
3750 }
3751 return;
3752
3753 default:
3754 break;
3755 }
3756
3757 fmt = GET_RTX_FORMAT (code);
3758 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3759 {
3760 if (fmt[i] == 'e')
3761 mark_jump_label (XEXP (x, i), insn, cross_jump);
3762 else if (fmt[i] == 'E')
3763 {
3764 register int j;
3765 for (j = 0; j < XVECLEN (x, i); j++)
3766 mark_jump_label (XVECEXP (x, i, j), insn, cross_jump);
3767 }
3768 }
3769 }
3770
3771 /* If all INSN does is set the pc, delete it,
3772 and delete the insn that set the condition codes for it
3773 if that's what the previous thing was. */
3774
3775 void
3776 delete_jump (insn)
3777 rtx insn;
3778 {
3779 register rtx set = single_set (insn);
3780
3781 if (set && GET_CODE (SET_DEST (set)) == PC)
3782 delete_computation (insn);
3783 }
3784
3785 /* Delete INSN and recursively delete insns that compute values used only
3786 by INSN. This uses the REG_DEAD notes computed during flow analysis.
3787 If we are running before flow.c, we need do nothing since flow.c will
3788 delete dead code. We also can't know if the registers being used are
3789 dead or not at this point.
3790
3791 Otherwise, look at all our REG_DEAD notes. If a previous insn does
3792 nothing other than set a register that dies in this insn, we can delete
3793 that insn as well.
3794
3795 On machines with CC0, if CC0 is used in this insn, we may be able to
3796 delete the insn that set it. */
3797
3798 static void
3799 delete_computation (insn)
3800 rtx insn;
3801 {
3802 rtx note, next;
3803
3804 #ifdef HAVE_cc0
3805 if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
3806 {
3807 rtx prev = prev_nonnote_insn (insn);
3808 /* We assume that at this stage
3809 CC's are always set explicitly
3810 and always immediately before the jump that
3811 will use them. So if the previous insn
3812 exists to set the CC's, delete it
3813 (unless it performs auto-increments, etc.). */
3814 if (prev && GET_CODE (prev) == INSN
3815 && sets_cc0_p (PATTERN (prev)))
3816 {
3817 if (sets_cc0_p (PATTERN (prev)) > 0
3818 && !FIND_REG_INC_NOTE (prev, NULL_RTX))
3819 delete_computation (prev);
3820 else
3821 /* Otherwise, show that cc0 won't be used. */
3822 REG_NOTES (prev) = gen_rtx_EXPR_LIST (REG_UNUSED,
3823 cc0_rtx, REG_NOTES (prev));
3824 }
3825 }
3826 #endif
3827
3828 #ifdef INSN_SCHEDULING
3829 /* ?!? The schedulers do not keep REG_DEAD notes accurate after
3830 reload has completed. The schedulers need to be fixed. Until
3831 they are, we must not rely on the death notes here. */
3832 if (reload_completed && flag_schedule_insns_after_reload)
3833 {
3834 delete_insn (insn);
3835 return;
3836 }
3837 #endif
3838
3839 for (note = REG_NOTES (insn); note; note = next)
3840 {
3841 rtx our_prev;
3842
3843 next = XEXP (note, 1);
3844
3845 if (REG_NOTE_KIND (note) != REG_DEAD
3846 /* Verify that the REG_NOTE is legitimate. */
3847 || GET_CODE (XEXP (note, 0)) != REG)
3848 continue;
3849
3850 for (our_prev = prev_nonnote_insn (insn);
3851 our_prev && GET_CODE (our_prev) == INSN;
3852 our_prev = prev_nonnote_insn (our_prev))
3853 {
3854 /* If we reach a SEQUENCE, it is too complex to try to
3855 do anything with it, so give up. */
3856 if (GET_CODE (PATTERN (our_prev)) == SEQUENCE)
3857 break;
3858
3859 if (GET_CODE (PATTERN (our_prev)) == USE
3860 && GET_CODE (XEXP (PATTERN (our_prev), 0)) == INSN)
3861 /* reorg creates USEs that look like this. We leave them
3862 alone because reorg needs them for its own purposes. */
3863 break;
3864
3865 if (reg_set_p (XEXP (note, 0), PATTERN (our_prev)))
3866 {
3867 if (FIND_REG_INC_NOTE (our_prev, NULL_RTX))
3868 break;
3869
3870 if (GET_CODE (PATTERN (our_prev)) == PARALLEL)
3871 {
3872 /* If we find a SET of something else, we can't
3873 delete the insn. */
3874
3875 int i;
3876
3877 for (i = 0; i < XVECLEN (PATTERN (our_prev), 0); i++)
3878 {
3879 rtx part = XVECEXP (PATTERN (our_prev), 0, i);
3880
3881 if (GET_CODE (part) == SET
3882 && SET_DEST (part) != XEXP (note, 0))
3883 break;
3884 }
3885
3886 if (i == XVECLEN (PATTERN (our_prev), 0))
3887 delete_computation (our_prev);
3888 }
3889 else if (GET_CODE (PATTERN (our_prev)) == SET
3890 && SET_DEST (PATTERN (our_prev)) == XEXP (note, 0))
3891 delete_computation (our_prev);
3892
3893 break;
3894 }
3895
3896 /* If OUR_PREV references the register that dies here, it is an
3897 additional use. Hence any prior SET isn't dead. However, this
3898 insn becomes the new place for the REG_DEAD note. */
3899 if (reg_overlap_mentioned_p (XEXP (note, 0),
3900 PATTERN (our_prev)))
3901 {
3902 XEXP (note, 1) = REG_NOTES (our_prev);
3903 REG_NOTES (our_prev) = note;
3904 break;
3905 }
3906 }
3907 }
3908
3909 delete_insn (insn);
3910 }
3911 \f
3912 /* Delete insn INSN from the chain of insns and update label ref counts.
3913 May delete some following insns as a consequence; may even delete
3914 a label elsewhere and insns that follow it.
3915
3916 Returns the first insn after INSN that was not deleted. */
3917
3918 rtx
3919 delete_insn (insn)
3920 register rtx insn;
3921 {
3922 register rtx next = NEXT_INSN (insn);
3923 register rtx prev = PREV_INSN (insn);
3924 register int was_code_label = (GET_CODE (insn) == CODE_LABEL);
3925 register int dont_really_delete = 0;
3926
3927 while (next && INSN_DELETED_P (next))
3928 next = NEXT_INSN (next);
3929
3930 /* This insn is already deleted => return first following nondeleted. */
3931 if (INSN_DELETED_P (insn))
3932 return next;
3933
3934 if (was_code_label)
3935 remove_node_from_expr_list (insn, &nonlocal_goto_handler_labels);
3936
3937 /* Don't delete user-declared labels. Convert them to special NOTEs
3938 instead. */
3939 if (was_code_label && LABEL_NAME (insn) != 0
3940 && optimize && ! dont_really_delete)
3941 {
3942 PUT_CODE (insn, NOTE);
3943 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED_LABEL;
3944 NOTE_SOURCE_FILE (insn) = 0;
3945 dont_really_delete = 1;
3946 }
3947 else
3948 /* Mark this insn as deleted. */
3949 INSN_DELETED_P (insn) = 1;
3950
3951 /* If this is an unconditional jump, delete it from the jump chain. */
3952 if (simplejump_p (insn))
3953 delete_from_jump_chain (insn);
3954
3955 /* If instruction is followed by a barrier,
3956 delete the barrier too. */
3957
3958 if (next != 0 && GET_CODE (next) == BARRIER)
3959 {
3960 INSN_DELETED_P (next) = 1;
3961 next = NEXT_INSN (next);
3962 }
3963
3964 /* Patch out INSN (and the barrier if any) */
3965
3966 if (optimize && ! dont_really_delete)
3967 {
3968 if (prev)
3969 {
3970 NEXT_INSN (prev) = next;
3971 if (GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SEQUENCE)
3972 NEXT_INSN (XVECEXP (PATTERN (prev), 0,
3973 XVECLEN (PATTERN (prev), 0) - 1)) = next;
3974 }
3975
3976 if (next)
3977 {
3978 PREV_INSN (next) = prev;
3979 if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == SEQUENCE)
3980 PREV_INSN (XVECEXP (PATTERN (next), 0, 0)) = prev;
3981 }
3982
3983 if (prev && NEXT_INSN (prev) == 0)
3984 set_last_insn (prev);
3985 }
3986
3987 /* If deleting a jump, decrement the count of the label,
3988 and delete the label if it is now unused. */
3989
3990 if (GET_CODE (insn) == JUMP_INSN && JUMP_LABEL (insn))
3991 if (--LABEL_NUSES (JUMP_LABEL (insn)) == 0)
3992 {
3993 /* This can delete NEXT or PREV,
3994 either directly if NEXT is JUMP_LABEL (INSN),
3995 or indirectly through more levels of jumps. */
3996 delete_insn (JUMP_LABEL (insn));
3997 /* I feel a little doubtful about this loop,
3998 but I see no clean and sure alternative way
3999 to find the first insn after INSN that is not now deleted.
4000 I hope this works. */
4001 while (next && INSN_DELETED_P (next))
4002 next = NEXT_INSN (next);
4003 return next;
4004 }
4005
4006 /* Likewise if we're deleting a dispatch table. */
4007
4008 if (GET_CODE (insn) == JUMP_INSN
4009 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
4010 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
4011 {
4012 rtx pat = PATTERN (insn);
4013 int i, diff_vec_p = GET_CODE (pat) == ADDR_DIFF_VEC;
4014 int len = XVECLEN (pat, diff_vec_p);
4015
4016 for (i = 0; i < len; i++)
4017 if (--LABEL_NUSES (XEXP (XVECEXP (pat, diff_vec_p, i), 0)) == 0)
4018 delete_insn (XEXP (XVECEXP (pat, diff_vec_p, i), 0));
4019 while (next && INSN_DELETED_P (next))
4020 next = NEXT_INSN (next);
4021 return next;
4022 }
4023
4024 while (prev && (INSN_DELETED_P (prev) || GET_CODE (prev) == NOTE))
4025 prev = PREV_INSN (prev);
4026
4027 /* If INSN was a label and a dispatch table follows it,
4028 delete the dispatch table. The tablejump must have gone already.
4029 It isn't useful to fall through into a table. */
4030
4031 if (was_code_label
4032 && NEXT_INSN (insn) != 0
4033 && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
4034 && (GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_VEC
4035 || GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_DIFF_VEC))
4036 next = delete_insn (NEXT_INSN (insn));
4037
4038 /* If INSN was a label, delete insns following it if now unreachable. */
4039
4040 if (was_code_label && prev && GET_CODE (prev) == BARRIER)
4041 {
4042 register RTX_CODE code;
4043 while (next != 0
4044 && (GET_RTX_CLASS (code = GET_CODE (next)) == 'i'
4045 || code == NOTE || code == BARRIER
4046 || (code == CODE_LABEL && INSN_DELETED_P (next))))
4047 {
4048 if (code == NOTE
4049 && NOTE_LINE_NUMBER (next) != NOTE_INSN_FUNCTION_END)
4050 next = NEXT_INSN (next);
4051 /* Keep going past other deleted labels to delete what follows. */
4052 else if (code == CODE_LABEL && INSN_DELETED_P (next))
4053 next = NEXT_INSN (next);
4054 else
4055 /* Note: if this deletes a jump, it can cause more
4056 deletion of unreachable code, after a different label.
4057 As long as the value from this recursive call is correct,
4058 this invocation functions correctly. */
4059 next = delete_insn (next);
4060 }
4061 }
4062
4063 return next;
4064 }
4065
4066 /* Advance from INSN till reaching something not deleted
4067 then return that. May return INSN itself. */
4068
4069 rtx
4070 next_nondeleted_insn (insn)
4071 rtx insn;
4072 {
4073 while (INSN_DELETED_P (insn))
4074 insn = NEXT_INSN (insn);
4075 return insn;
4076 }
4077 \f
4078 /* Delete a range of insns from FROM to TO, inclusive.
4079 This is for the sake of peephole optimization, so assume
4080 that whatever these insns do will still be done by a new
4081 peephole insn that will replace them. */
4082
4083 void
4084 delete_for_peephole (from, to)
4085 register rtx from, to;
4086 {
4087 register rtx insn = from;
4088
4089 while (1)
4090 {
4091 register rtx next = NEXT_INSN (insn);
4092 register rtx prev = PREV_INSN (insn);
4093
4094 if (GET_CODE (insn) != NOTE)
4095 {
4096 INSN_DELETED_P (insn) = 1;
4097
4098 /* Patch this insn out of the chain. */
4099 /* We don't do this all at once, because we
4100 must preserve all NOTEs. */
4101 if (prev)
4102 NEXT_INSN (prev) = next;
4103
4104 if (next)
4105 PREV_INSN (next) = prev;
4106 }
4107
4108 if (insn == to)
4109 break;
4110 insn = next;
4111 }
4112
4113 /* Note that if TO is an unconditional jump
4114 we *do not* delete the BARRIER that follows,
4115 since the peephole that replaces this sequence
4116 is also an unconditional jump in that case. */
4117 }
4118 \f
4119 /* Invert the condition of the jump JUMP, and make it jump
4120 to label NLABEL instead of where it jumps now. */
4121
4122 int
4123 invert_jump (jump, nlabel)
4124 rtx jump, nlabel;
4125 {
4126 /* We have to either invert the condition and change the label or
4127 do neither. Either operation could fail. We first try to invert
4128 the jump. If that succeeds, we try changing the label. If that fails,
4129 we invert the jump back to what it was. */
4130
4131 if (! invert_exp (PATTERN (jump), jump))
4132 return 0;
4133
4134 if (redirect_jump (jump, nlabel))
4135 {
4136 if (flag_branch_probabilities)
4137 {
4138 rtx note = find_reg_note (jump, REG_BR_PROB, 0);
4139
4140 /* An inverted jump means that a probability taken becomes a
4141 probability not taken. Subtract the branch probability from the
4142 probability base to convert it back to a taken probability.
4143 (We don't flip the probability on a branch that's never taken. */
4144 if (note && XINT (XEXP (note, 0), 0) >= 0)
4145 XINT (XEXP (note, 0), 0) = REG_BR_PROB_BASE - XINT (XEXP (note, 0), 0);
4146 }
4147
4148 return 1;
4149 }
4150
4151 if (! invert_exp (PATTERN (jump), jump))
4152 /* This should just be putting it back the way it was. */
4153 abort ();
4154
4155 return 0;
4156 }
4157
4158 /* Invert the jump condition of rtx X contained in jump insn, INSN.
4159
4160 Return 1 if we can do so, 0 if we cannot find a way to do so that
4161 matches a pattern. */
4162
4163 int
4164 invert_exp (x, insn)
4165 rtx x;
4166 rtx insn;
4167 {
4168 register RTX_CODE code;
4169 register int i;
4170 register char *fmt;
4171
4172 code = GET_CODE (x);
4173
4174 if (code == IF_THEN_ELSE)
4175 {
4176 register rtx comp = XEXP (x, 0);
4177 register rtx tem;
4178
4179 /* We can do this in two ways: The preferable way, which can only
4180 be done if this is not an integer comparison, is to reverse
4181 the comparison code. Otherwise, swap the THEN-part and ELSE-part
4182 of the IF_THEN_ELSE. If we can't do either, fail. */
4183
4184 if (can_reverse_comparison_p (comp, insn)
4185 && validate_change (insn, &XEXP (x, 0),
4186 gen_rtx_fmt_ee (reverse_condition (GET_CODE (comp)),
4187 GET_MODE (comp), XEXP (comp, 0),
4188 XEXP (comp, 1)), 0))
4189 return 1;
4190
4191 tem = XEXP (x, 1);
4192 validate_change (insn, &XEXP (x, 1), XEXP (x, 2), 1);
4193 validate_change (insn, &XEXP (x, 2), tem, 1);
4194 return apply_change_group ();
4195 }
4196
4197 fmt = GET_RTX_FORMAT (code);
4198 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4199 {
4200 if (fmt[i] == 'e')
4201 if (! invert_exp (XEXP (x, i), insn))
4202 return 0;
4203 if (fmt[i] == 'E')
4204 {
4205 register int j;
4206 for (j = 0; j < XVECLEN (x, i); j++)
4207 if (!invert_exp (XVECEXP (x, i, j), insn))
4208 return 0;
4209 }
4210 }
4211
4212 return 1;
4213 }
4214 \f
4215 /* Make jump JUMP jump to label NLABEL instead of where it jumps now.
4216 If the old jump target label is unused as a result,
4217 it and the code following it may be deleted.
4218
4219 If NLABEL is zero, we are to turn the jump into a (possibly conditional)
4220 RETURN insn.
4221
4222 The return value will be 1 if the change was made, 0 if it wasn't (this
4223 can only occur for NLABEL == 0). */
4224
4225 int
4226 redirect_jump (jump, nlabel)
4227 rtx jump, nlabel;
4228 {
4229 register rtx olabel = JUMP_LABEL (jump);
4230
4231 if (nlabel == olabel)
4232 return 1;
4233
4234 if (! redirect_exp (&PATTERN (jump), olabel, nlabel, jump))
4235 return 0;
4236
4237 /* If this is an unconditional branch, delete it from the jump_chain of
4238 OLABEL and add it to the jump_chain of NLABEL (assuming both labels
4239 have UID's in range and JUMP_CHAIN is valid). */
4240 if (jump_chain && (simplejump_p (jump)
4241 || GET_CODE (PATTERN (jump)) == RETURN))
4242 {
4243 int label_index = nlabel ? INSN_UID (nlabel) : 0;
4244
4245 delete_from_jump_chain (jump);
4246 if (label_index < max_jump_chain
4247 && INSN_UID (jump) < max_jump_chain)
4248 {
4249 jump_chain[INSN_UID (jump)] = jump_chain[label_index];
4250 jump_chain[label_index] = jump;
4251 }
4252 }
4253
4254 JUMP_LABEL (jump) = nlabel;
4255 if (nlabel)
4256 ++LABEL_NUSES (nlabel);
4257
4258 if (olabel && --LABEL_NUSES (olabel) == 0)
4259 delete_insn (olabel);
4260
4261 return 1;
4262 }
4263
4264 /* Delete the instruction JUMP from any jump chain it might be on. */
4265
4266 static void
4267 delete_from_jump_chain (jump)
4268 rtx jump;
4269 {
4270 int index;
4271 rtx olabel = JUMP_LABEL (jump);
4272
4273 /* Handle unconditional jumps. */
4274 if (jump_chain && olabel != 0
4275 && INSN_UID (olabel) < max_jump_chain
4276 && simplejump_p (jump))
4277 index = INSN_UID (olabel);
4278 /* Handle return insns. */
4279 else if (jump_chain && GET_CODE (PATTERN (jump)) == RETURN)
4280 index = 0;
4281 else return;
4282
4283 if (jump_chain[index] == jump)
4284 jump_chain[index] = jump_chain[INSN_UID (jump)];
4285 else
4286 {
4287 rtx insn;
4288
4289 for (insn = jump_chain[index];
4290 insn != 0;
4291 insn = jump_chain[INSN_UID (insn)])
4292 if (jump_chain[INSN_UID (insn)] == jump)
4293 {
4294 jump_chain[INSN_UID (insn)] = jump_chain[INSN_UID (jump)];
4295 break;
4296 }
4297 }
4298 }
4299
4300 /* If NLABEL is nonzero, throughout the rtx at LOC,
4301 alter (LABEL_REF OLABEL) to (LABEL_REF NLABEL). If OLABEL is
4302 zero, alter (RETURN) to (LABEL_REF NLABEL).
4303
4304 If NLABEL is zero, alter (LABEL_REF OLABEL) to (RETURN) and check
4305 validity with validate_change. Convert (set (pc) (label_ref olabel))
4306 to (return).
4307
4308 Return 0 if we found a change we would like to make but it is invalid.
4309 Otherwise, return 1. */
4310
4311 int
4312 redirect_exp (loc, olabel, nlabel, insn)
4313 rtx *loc;
4314 rtx olabel, nlabel;
4315 rtx insn;
4316 {
4317 register rtx x = *loc;
4318 register RTX_CODE code = GET_CODE (x);
4319 register int i;
4320 register char *fmt;
4321
4322 if (code == LABEL_REF)
4323 {
4324 if (XEXP (x, 0) == olabel)
4325 {
4326 if (nlabel)
4327 XEXP (x, 0) = nlabel;
4328 else
4329 return validate_change (insn, loc, gen_rtx_RETURN (VOIDmode), 0);
4330 return 1;
4331 }
4332 }
4333 else if (code == RETURN && olabel == 0)
4334 {
4335 x = gen_rtx_LABEL_REF (VOIDmode, nlabel);
4336 if (loc == &PATTERN (insn))
4337 x = gen_rtx_SET (VOIDmode, pc_rtx, x);
4338 return validate_change (insn, loc, x, 0);
4339 }
4340
4341 if (code == SET && nlabel == 0 && SET_DEST (x) == pc_rtx
4342 && GET_CODE (SET_SRC (x)) == LABEL_REF
4343 && XEXP (SET_SRC (x), 0) == olabel)
4344 return validate_change (insn, loc, gen_rtx_RETURN (VOIDmode), 0);
4345
4346 fmt = GET_RTX_FORMAT (code);
4347 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4348 {
4349 if (fmt[i] == 'e')
4350 if (! redirect_exp (&XEXP (x, i), olabel, nlabel, insn))
4351 return 0;
4352 if (fmt[i] == 'E')
4353 {
4354 register int j;
4355 for (j = 0; j < XVECLEN (x, i); j++)
4356 if (! redirect_exp (&XVECEXP (x, i, j), olabel, nlabel, insn))
4357 return 0;
4358 }
4359 }
4360
4361 return 1;
4362 }
4363 \f
4364 /* Make jump JUMP jump to label NLABEL, assuming it used to be a tablejump.
4365
4366 If the old jump target label (before the dispatch table) becomes unused,
4367 it and the dispatch table may be deleted. In that case, find the insn
4368 before the jump references that label and delete it and logical successors
4369 too. */
4370
4371 static void
4372 redirect_tablejump (jump, nlabel)
4373 rtx jump, nlabel;
4374 {
4375 register rtx olabel = JUMP_LABEL (jump);
4376
4377 /* Add this jump to the jump_chain of NLABEL. */
4378 if (jump_chain && INSN_UID (nlabel) < max_jump_chain
4379 && INSN_UID (jump) < max_jump_chain)
4380 {
4381 jump_chain[INSN_UID (jump)] = jump_chain[INSN_UID (nlabel)];
4382 jump_chain[INSN_UID (nlabel)] = jump;
4383 }
4384
4385 PATTERN (jump) = gen_jump (nlabel);
4386 JUMP_LABEL (jump) = nlabel;
4387 ++LABEL_NUSES (nlabel);
4388 INSN_CODE (jump) = -1;
4389
4390 if (--LABEL_NUSES (olabel) == 0)
4391 {
4392 delete_labelref_insn (jump, olabel, 0);
4393 delete_insn (olabel);
4394 }
4395 }
4396
4397 /* Find the insn referencing LABEL that is a logical predecessor of INSN.
4398 If we found one, delete it and then delete this insn if DELETE_THIS is
4399 non-zero. Return non-zero if INSN or a predecessor references LABEL. */
4400
4401 static int
4402 delete_labelref_insn (insn, label, delete_this)
4403 rtx insn, label;
4404 int delete_this;
4405 {
4406 int deleted = 0;
4407 rtx link;
4408
4409 if (GET_CODE (insn) != NOTE
4410 && reg_mentioned_p (label, PATTERN (insn)))
4411 {
4412 if (delete_this)
4413 {
4414 delete_insn (insn);
4415 deleted = 1;
4416 }
4417 else
4418 return 1;
4419 }
4420
4421 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4422 if (delete_labelref_insn (XEXP (link, 0), label, 1))
4423 {
4424 if (delete_this)
4425 {
4426 delete_insn (insn);
4427 deleted = 1;
4428 }
4429 else
4430 return 1;
4431 }
4432
4433 return deleted;
4434 }
4435 \f
4436 /* Like rtx_equal_p except that it considers two REGs as equal
4437 if they renumber to the same value and considers two commutative
4438 operations to be the same if the order of the operands has been
4439 reversed.
4440
4441 ??? Addition is not commutative on the PA due to the weird implicit
4442 space register selection rules for memory addresses. Therefore, we
4443 don't consider a + b == b + a.
4444
4445 We could/should make this test a little tighter. Possibly only
4446 disabling it on the PA via some backend macro or only disabling this
4447 case when the PLUS is inside a MEM. */
4448
4449 int
4450 rtx_renumbered_equal_p (x, y)
4451 rtx x, y;
4452 {
4453 register int i;
4454 register RTX_CODE code = GET_CODE (x);
4455 register char *fmt;
4456
4457 if (x == y)
4458 return 1;
4459
4460 if ((code == REG || (code == SUBREG && GET_CODE (SUBREG_REG (x)) == REG))
4461 && (GET_CODE (y) == REG || (GET_CODE (y) == SUBREG
4462 && GET_CODE (SUBREG_REG (y)) == REG)))
4463 {
4464 int reg_x = -1, reg_y = -1;
4465 int word_x = 0, word_y = 0;
4466
4467 if (GET_MODE (x) != GET_MODE (y))
4468 return 0;
4469
4470 /* If we haven't done any renumbering, don't
4471 make any assumptions. */
4472 if (reg_renumber == 0)
4473 return rtx_equal_p (x, y);
4474
4475 if (code == SUBREG)
4476 {
4477 reg_x = REGNO (SUBREG_REG (x));
4478 word_x = SUBREG_WORD (x);
4479
4480 if (reg_renumber[reg_x] >= 0)
4481 {
4482 reg_x = reg_renumber[reg_x] + word_x;
4483 word_x = 0;
4484 }
4485 }
4486
4487 else
4488 {
4489 reg_x = REGNO (x);
4490 if (reg_renumber[reg_x] >= 0)
4491 reg_x = reg_renumber[reg_x];
4492 }
4493
4494 if (GET_CODE (y) == SUBREG)
4495 {
4496 reg_y = REGNO (SUBREG_REG (y));
4497 word_y = SUBREG_WORD (y);
4498
4499 if (reg_renumber[reg_y] >= 0)
4500 {
4501 reg_y = reg_renumber[reg_y];
4502 word_y = 0;
4503 }
4504 }
4505
4506 else
4507 {
4508 reg_y = REGNO (y);
4509 if (reg_renumber[reg_y] >= 0)
4510 reg_y = reg_renumber[reg_y];
4511 }
4512
4513 return reg_x >= 0 && reg_x == reg_y && word_x == word_y;
4514 }
4515
4516 /* Now we have disposed of all the cases
4517 in which different rtx codes can match. */
4518 if (code != GET_CODE (y))
4519 return 0;
4520
4521 switch (code)
4522 {
4523 case PC:
4524 case CC0:
4525 case ADDR_VEC:
4526 case ADDR_DIFF_VEC:
4527 return 0;
4528
4529 case CONST_INT:
4530 return INTVAL (x) == INTVAL (y);
4531
4532 case LABEL_REF:
4533 /* We can't assume nonlocal labels have their following insns yet. */
4534 if (LABEL_REF_NONLOCAL_P (x) || LABEL_REF_NONLOCAL_P (y))
4535 return XEXP (x, 0) == XEXP (y, 0);
4536
4537 /* Two label-refs are equivalent if they point at labels
4538 in the same position in the instruction stream. */
4539 return (next_real_insn (XEXP (x, 0))
4540 == next_real_insn (XEXP (y, 0)));
4541
4542 case SYMBOL_REF:
4543 return XSTR (x, 0) == XSTR (y, 0);
4544
4545 case CODE_LABEL:
4546 /* If we didn't match EQ equality above, they aren't the same. */
4547 return 0;
4548
4549 default:
4550 break;
4551 }
4552
4553 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
4554
4555 if (GET_MODE (x) != GET_MODE (y))
4556 return 0;
4557
4558 /* For commutative operations, the RTX match if the operand match in any
4559 order. Also handle the simple binary and unary cases without a loop.
4560
4561 ??? Don't consider PLUS a commutative operator; see comments above. */
4562 if ((code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
4563 && code != PLUS)
4564 return ((rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 0))
4565 && rtx_renumbered_equal_p (XEXP (x, 1), XEXP (y, 1)))
4566 || (rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 1))
4567 && rtx_renumbered_equal_p (XEXP (x, 1), XEXP (y, 0))));
4568 else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2')
4569 return (rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 0))
4570 && rtx_renumbered_equal_p (XEXP (x, 1), XEXP (y, 1)));
4571 else if (GET_RTX_CLASS (code) == '1')
4572 return rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 0));
4573
4574 /* Compare the elements. If any pair of corresponding elements
4575 fail to match, return 0 for the whole things. */
4576
4577 fmt = GET_RTX_FORMAT (code);
4578 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4579 {
4580 register int j;
4581 switch (fmt[i])
4582 {
4583 case 'w':
4584 if (XWINT (x, i) != XWINT (y, i))
4585 return 0;
4586 break;
4587
4588 case 'i':
4589 if (XINT (x, i) != XINT (y, i))
4590 return 0;
4591 break;
4592
4593 case 's':
4594 if (strcmp (XSTR (x, i), XSTR (y, i)))
4595 return 0;
4596 break;
4597
4598 case 'e':
4599 if (! rtx_renumbered_equal_p (XEXP (x, i), XEXP (y, i)))
4600 return 0;
4601 break;
4602
4603 case 'u':
4604 if (XEXP (x, i) != XEXP (y, i))
4605 return 0;
4606 /* fall through. */
4607 case '0':
4608 break;
4609
4610 case 'E':
4611 if (XVECLEN (x, i) != XVECLEN (y, i))
4612 return 0;
4613 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4614 if (!rtx_renumbered_equal_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
4615 return 0;
4616 break;
4617
4618 default:
4619 abort ();
4620 }
4621 }
4622 return 1;
4623 }
4624 \f
4625 /* If X is a hard register or equivalent to one or a subregister of one,
4626 return the hard register number. If X is a pseudo register that was not
4627 assigned a hard register, return the pseudo register number. Otherwise,
4628 return -1. Any rtx is valid for X. */
4629
4630 int
4631 true_regnum (x)
4632 rtx x;
4633 {
4634 if (GET_CODE (x) == REG)
4635 {
4636 if (REGNO (x) >= FIRST_PSEUDO_REGISTER && reg_renumber[REGNO (x)] >= 0)
4637 return reg_renumber[REGNO (x)];
4638 return REGNO (x);
4639 }
4640 if (GET_CODE (x) == SUBREG)
4641 {
4642 int base = true_regnum (SUBREG_REG (x));
4643 if (base >= 0 && base < FIRST_PSEUDO_REGISTER)
4644 return SUBREG_WORD (x) + base;
4645 }
4646 return -1;
4647 }
4648 \f
4649 /* Optimize code of the form:
4650
4651 for (x = a[i]; x; ...)
4652 ...
4653 for (x = a[i]; x; ...)
4654 ...
4655 foo:
4656
4657 Loop optimize will change the above code into
4658
4659 if (x = a[i])
4660 for (;;)
4661 { ...; if (! (x = ...)) break; }
4662 if (x = a[i])
4663 for (;;)
4664 { ...; if (! (x = ...)) break; }
4665 foo:
4666
4667 In general, if the first test fails, the program can branch
4668 directly to `foo' and skip the second try which is doomed to fail.
4669 We run this after loop optimization and before flow analysis. */
4670
4671 /* When comparing the insn patterns, we track the fact that different
4672 pseudo-register numbers may have been used in each computation.
4673 The following array stores an equivalence -- same_regs[I] == J means
4674 that pseudo register I was used in the first set of tests in a context
4675 where J was used in the second set. We also count the number of such
4676 pending equivalences. If nonzero, the expressions really aren't the
4677 same. */
4678
4679 static int *same_regs;
4680
4681 static int num_same_regs;
4682
4683 /* Track any registers modified between the target of the first jump and
4684 the second jump. They never compare equal. */
4685
4686 static char *modified_regs;
4687
4688 /* Record if memory was modified. */
4689
4690 static int modified_mem;
4691
4692 /* Called via note_stores on each insn between the target of the first
4693 branch and the second branch. It marks any changed registers. */
4694
4695 static void
4696 mark_modified_reg (dest, x)
4697 rtx dest;
4698 rtx x ATTRIBUTE_UNUSED;
4699 {
4700 int regno, i;
4701
4702 if (GET_CODE (dest) == SUBREG)
4703 dest = SUBREG_REG (dest);
4704
4705 if (GET_CODE (dest) == MEM)
4706 modified_mem = 1;
4707
4708 if (GET_CODE (dest) != REG)
4709 return;
4710
4711 regno = REGNO (dest);
4712 if (regno >= FIRST_PSEUDO_REGISTER)
4713 modified_regs[regno] = 1;
4714 else
4715 for (i = 0; i < HARD_REGNO_NREGS (regno, GET_MODE (dest)); i++)
4716 modified_regs[regno + i] = 1;
4717 }
4718
4719 /* F is the first insn in the chain of insns. */
4720
4721 void
4722 thread_jumps (f, max_reg, flag_before_loop)
4723 rtx f;
4724 int max_reg;
4725 int flag_before_loop;
4726 {
4727 /* Basic algorithm is to find a conditional branch,
4728 the label it may branch to, and the branch after
4729 that label. If the two branches test the same condition,
4730 walk back from both branch paths until the insn patterns
4731 differ, or code labels are hit. If we make it back to
4732 the target of the first branch, then we know that the first branch
4733 will either always succeed or always fail depending on the relative
4734 senses of the two branches. So adjust the first branch accordingly
4735 in this case. */
4736
4737 rtx label, b1, b2, t1, t2;
4738 enum rtx_code code1, code2;
4739 rtx b1op0, b1op1, b2op0, b2op1;
4740 int changed = 1;
4741 int i;
4742 int *all_reset;
4743
4744 /* Allocate register tables and quick-reset table. */
4745 modified_regs = (char *) alloca (max_reg * sizeof (char));
4746 same_regs = (int *) alloca (max_reg * sizeof (int));
4747 all_reset = (int *) alloca (max_reg * sizeof (int));
4748 for (i = 0; i < max_reg; i++)
4749 all_reset[i] = -1;
4750
4751 while (changed)
4752 {
4753 changed = 0;
4754
4755 for (b1 = f; b1; b1 = NEXT_INSN (b1))
4756 {
4757 /* Get to a candidate branch insn. */
4758 if (GET_CODE (b1) != JUMP_INSN
4759 || ! condjump_p (b1) || simplejump_p (b1)
4760 || JUMP_LABEL (b1) == 0)
4761 continue;
4762
4763 bzero (modified_regs, max_reg * sizeof (char));
4764 modified_mem = 0;
4765
4766 bcopy ((char *) all_reset, (char *) same_regs,
4767 max_reg * sizeof (int));
4768 num_same_regs = 0;
4769
4770 label = JUMP_LABEL (b1);
4771
4772 /* Look for a branch after the target. Record any registers and
4773 memory modified between the target and the branch. Stop when we
4774 get to a label since we can't know what was changed there. */
4775 for (b2 = NEXT_INSN (label); b2; b2 = NEXT_INSN (b2))
4776 {
4777 if (GET_CODE (b2) == CODE_LABEL)
4778 break;
4779
4780 else if (GET_CODE (b2) == JUMP_INSN)
4781 {
4782 /* If this is an unconditional jump and is the only use of
4783 its target label, we can follow it. */
4784 if (simplejump_p (b2)
4785 && JUMP_LABEL (b2) != 0
4786 && LABEL_NUSES (JUMP_LABEL (b2)) == 1)
4787 {
4788 b2 = JUMP_LABEL (b2);
4789 continue;
4790 }
4791 else
4792 break;
4793 }
4794
4795 if (GET_CODE (b2) != CALL_INSN && GET_CODE (b2) != INSN)
4796 continue;
4797
4798 if (GET_CODE (b2) == CALL_INSN)
4799 {
4800 modified_mem = 1;
4801 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4802 if (call_used_regs[i] && ! fixed_regs[i]
4803 && i != STACK_POINTER_REGNUM
4804 && i != FRAME_POINTER_REGNUM
4805 && i != HARD_FRAME_POINTER_REGNUM
4806 && i != ARG_POINTER_REGNUM)
4807 modified_regs[i] = 1;
4808 }
4809
4810 note_stores (PATTERN (b2), mark_modified_reg);
4811 }
4812
4813 /* Check the next candidate branch insn from the label
4814 of the first. */
4815 if (b2 == 0
4816 || GET_CODE (b2) != JUMP_INSN
4817 || b2 == b1
4818 || ! condjump_p (b2)
4819 || simplejump_p (b2))
4820 continue;
4821
4822 /* Get the comparison codes and operands, reversing the
4823 codes if appropriate. If we don't have comparison codes,
4824 we can't do anything. */
4825 b1op0 = XEXP (XEXP (SET_SRC (PATTERN (b1)), 0), 0);
4826 b1op1 = XEXP (XEXP (SET_SRC (PATTERN (b1)), 0), 1);
4827 code1 = GET_CODE (XEXP (SET_SRC (PATTERN (b1)), 0));
4828 if (XEXP (SET_SRC (PATTERN (b1)), 1) == pc_rtx)
4829 code1 = reverse_condition (code1);
4830
4831 b2op0 = XEXP (XEXP (SET_SRC (PATTERN (b2)), 0), 0);
4832 b2op1 = XEXP (XEXP (SET_SRC (PATTERN (b2)), 0), 1);
4833 code2 = GET_CODE (XEXP (SET_SRC (PATTERN (b2)), 0));
4834 if (XEXP (SET_SRC (PATTERN (b2)), 1) == pc_rtx)
4835 code2 = reverse_condition (code2);
4836
4837 /* If they test the same things and knowing that B1 branches
4838 tells us whether or not B2 branches, check if we
4839 can thread the branch. */
4840 if (rtx_equal_for_thread_p (b1op0, b2op0, b2)
4841 && rtx_equal_for_thread_p (b1op1, b2op1, b2)
4842 && (comparison_dominates_p (code1, code2)
4843 || (comparison_dominates_p (code1, reverse_condition (code2))
4844 && can_reverse_comparison_p (XEXP (SET_SRC (PATTERN (b1)),
4845 0),
4846 b1))))
4847 {
4848 t1 = prev_nonnote_insn (b1);
4849 t2 = prev_nonnote_insn (b2);
4850
4851 while (t1 != 0 && t2 != 0)
4852 {
4853 if (t2 == label)
4854 {
4855 /* We have reached the target of the first branch.
4856 If there are no pending register equivalents,
4857 we know that this branch will either always
4858 succeed (if the senses of the two branches are
4859 the same) or always fail (if not). */
4860 rtx new_label;
4861
4862 if (num_same_regs != 0)
4863 break;
4864
4865 if (comparison_dominates_p (code1, code2))
4866 new_label = JUMP_LABEL (b2);
4867 else
4868 new_label = get_label_after (b2);
4869
4870 if (JUMP_LABEL (b1) != new_label)
4871 {
4872 rtx prev = PREV_INSN (new_label);
4873
4874 if (flag_before_loop
4875 && GET_CODE (prev) == NOTE
4876 && NOTE_LINE_NUMBER (prev) == NOTE_INSN_LOOP_BEG)
4877 {
4878 /* Don't thread to the loop label. If a loop
4879 label is reused, loop optimization will
4880 be disabled for that loop. */
4881 new_label = gen_label_rtx ();
4882 emit_label_after (new_label, PREV_INSN (prev));
4883 }
4884 changed |= redirect_jump (b1, new_label);
4885 }
4886 break;
4887 }
4888
4889 /* If either of these is not a normal insn (it might be
4890 a JUMP_INSN, CALL_INSN, or CODE_LABEL) we fail. (NOTEs
4891 have already been skipped above.) Similarly, fail
4892 if the insns are different. */
4893 if (GET_CODE (t1) != INSN || GET_CODE (t2) != INSN
4894 || recog_memoized (t1) != recog_memoized (t2)
4895 || ! rtx_equal_for_thread_p (PATTERN (t1),
4896 PATTERN (t2), t2))
4897 break;
4898
4899 t1 = prev_nonnote_insn (t1);
4900 t2 = prev_nonnote_insn (t2);
4901 }
4902 }
4903 }
4904 }
4905 }
4906 \f
4907 /* This is like RTX_EQUAL_P except that it knows about our handling of
4908 possibly equivalent registers and knows to consider volatile and
4909 modified objects as not equal.
4910
4911 YINSN is the insn containing Y. */
4912
4913 int
4914 rtx_equal_for_thread_p (x, y, yinsn)
4915 rtx x, y;
4916 rtx yinsn;
4917 {
4918 register int i;
4919 register int j;
4920 register enum rtx_code code;
4921 register char *fmt;
4922
4923 code = GET_CODE (x);
4924 /* Rtx's of different codes cannot be equal. */
4925 if (code != GET_CODE (y))
4926 return 0;
4927
4928 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
4929 (REG:SI x) and (REG:HI x) are NOT equivalent. */
4930
4931 if (GET_MODE (x) != GET_MODE (y))
4932 return 0;
4933
4934 /* For floating-point, consider everything unequal. This is a bit
4935 pessimistic, but this pass would only rarely do anything for FP
4936 anyway. */
4937 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
4938 && FLOAT_MODE_P (GET_MODE (x)) && ! flag_fast_math)
4939 return 0;
4940
4941 /* For commutative operations, the RTX match if the operand match in any
4942 order. Also handle the simple binary and unary cases without a loop. */
4943 if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
4944 return ((rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn)
4945 && rtx_equal_for_thread_p (XEXP (x, 1), XEXP (y, 1), yinsn))
4946 || (rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 1), yinsn)
4947 && rtx_equal_for_thread_p (XEXP (x, 1), XEXP (y, 0), yinsn)));
4948 else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2')
4949 return (rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn)
4950 && rtx_equal_for_thread_p (XEXP (x, 1), XEXP (y, 1), yinsn));
4951 else if (GET_RTX_CLASS (code) == '1')
4952 return rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn);
4953
4954 /* Handle special-cases first. */
4955 switch (code)
4956 {
4957 case REG:
4958 if (REGNO (x) == REGNO (y) && ! modified_regs[REGNO (x)])
4959 return 1;
4960
4961 /* If neither is user variable or hard register, check for possible
4962 equivalence. */
4963 if (REG_USERVAR_P (x) || REG_USERVAR_P (y)
4964 || REGNO (x) < FIRST_PSEUDO_REGISTER
4965 || REGNO (y) < FIRST_PSEUDO_REGISTER)
4966 return 0;
4967
4968 if (same_regs[REGNO (x)] == -1)
4969 {
4970 same_regs[REGNO (x)] = REGNO (y);
4971 num_same_regs++;
4972
4973 /* If this is the first time we are seeing a register on the `Y'
4974 side, see if it is the last use. If not, we can't thread the
4975 jump, so mark it as not equivalent. */
4976 if (REGNO_LAST_UID (REGNO (y)) != INSN_UID (yinsn))
4977 return 0;
4978
4979 return 1;
4980 }
4981 else
4982 return (same_regs[REGNO (x)] == REGNO (y));
4983
4984 break;
4985
4986 case MEM:
4987 /* If memory modified or either volatile, not equivalent.
4988 Else, check address. */
4989 if (modified_mem || MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
4990 return 0;
4991
4992 return rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn);
4993
4994 case ASM_INPUT:
4995 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
4996 return 0;
4997
4998 break;
4999
5000 case SET:
5001 /* Cancel a pending `same_regs' if setting equivalenced registers.
5002 Then process source. */
5003 if (GET_CODE (SET_DEST (x)) == REG
5004 && GET_CODE (SET_DEST (y)) == REG)
5005 {
5006 if (same_regs[REGNO (SET_DEST (x))] == REGNO (SET_DEST (y)))
5007 {
5008 same_regs[REGNO (SET_DEST (x))] = -1;
5009 num_same_regs--;
5010 }
5011 else if (REGNO (SET_DEST (x)) != REGNO (SET_DEST (y)))
5012 return 0;
5013 }
5014 else
5015 if (rtx_equal_for_thread_p (SET_DEST (x), SET_DEST (y), yinsn) == 0)
5016 return 0;
5017
5018 return rtx_equal_for_thread_p (SET_SRC (x), SET_SRC (y), yinsn);
5019
5020 case LABEL_REF:
5021 return XEXP (x, 0) == XEXP (y, 0);
5022
5023 case SYMBOL_REF:
5024 return XSTR (x, 0) == XSTR (y, 0);
5025
5026 default:
5027 break;
5028 }
5029
5030 if (x == y)
5031 return 1;
5032
5033 fmt = GET_RTX_FORMAT (code);
5034 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5035 {
5036 switch (fmt[i])
5037 {
5038 case 'w':
5039 if (XWINT (x, i) != XWINT (y, i))
5040 return 0;
5041 break;
5042
5043 case 'n':
5044 case 'i':
5045 if (XINT (x, i) != XINT (y, i))
5046 return 0;
5047 break;
5048
5049 case 'V':
5050 case 'E':
5051 /* Two vectors must have the same length. */
5052 if (XVECLEN (x, i) != XVECLEN (y, i))
5053 return 0;
5054
5055 /* And the corresponding elements must match. */
5056 for (j = 0; j < XVECLEN (x, i); j++)
5057 if (rtx_equal_for_thread_p (XVECEXP (x, i, j),
5058 XVECEXP (y, i, j), yinsn) == 0)
5059 return 0;
5060 break;
5061
5062 case 'e':
5063 if (rtx_equal_for_thread_p (XEXP (x, i), XEXP (y, i), yinsn) == 0)
5064 return 0;
5065 break;
5066
5067 case 'S':
5068 case 's':
5069 if (strcmp (XSTR (x, i), XSTR (y, i)))
5070 return 0;
5071 break;
5072
5073 case 'u':
5074 /* These are just backpointers, so they don't matter. */
5075 break;
5076
5077 case '0':
5078 break;
5079
5080 /* It is believed that rtx's at this level will never
5081 contain anything but integers and other rtx's,
5082 except for within LABEL_REFs and SYMBOL_REFs. */
5083 default:
5084 abort ();
5085 }
5086 }
5087 return 1;
5088 }
5089 \f
5090
5091 #ifndef HAVE_cc0
5092 /* Return the insn that NEW can be safely inserted in front of starting at
5093 the jump insn INSN. Return 0 if it is not safe to do this jump
5094 optimization. Note that NEW must contain a single set. */
5095
5096 static rtx
5097 find_insert_position (insn, new)
5098 rtx insn;
5099 rtx new;
5100 {
5101 int i;
5102 rtx prev;
5103
5104 /* If NEW does not clobber, it is safe to insert NEW before INSN. */
5105 if (GET_CODE (PATTERN (new)) != PARALLEL)
5106 return insn;
5107
5108 for (i = XVECLEN (PATTERN (new), 0) - 1; i >= 0; i--)
5109 if (GET_CODE (XVECEXP (PATTERN (new), 0, i)) == CLOBBER
5110 && reg_overlap_mentioned_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
5111 insn))
5112 break;
5113
5114 if (i < 0)
5115 return insn;
5116
5117 /* There is a good chance that the previous insn PREV sets the thing
5118 being clobbered (often the CC in a hard reg). If PREV does not
5119 use what NEW sets, we can insert NEW before PREV. */
5120
5121 prev = prev_active_insn (insn);
5122 for (i = XVECLEN (PATTERN (new), 0) - 1; i >= 0; i--)
5123 if (GET_CODE (XVECEXP (PATTERN (new), 0, i)) == CLOBBER
5124 && reg_overlap_mentioned_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
5125 insn)
5126 && ! modified_in_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
5127 prev))
5128 return 0;
5129
5130 return reg_mentioned_p (SET_DEST (single_set (new)), prev) ? 0 : prev;
5131 }
5132 #endif /* !HAVE_cc0 */
This page took 0.278345 seconds and 5 git commands to generate.