1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 88, 92, 93, 94, 1995 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
26 #include "insn-flags.h"
27 #include "insn-codes.h"
29 #include "insn-config.h"
34 /* Each optab contains info on how this target machine
35 can perform a particular operation
36 for all sizes and kinds of operands.
38 The operation to be performed is often specified
39 by passing one of these optabs as an argument.
41 See expr.h for documentation of these optabs. */
46 optab smul_highpart_optab
;
47 optab umul_highpart_optab
;
48 optab smul_widen_optab
;
49 optab umul_widen_optab
;
72 optab movstrict_optab
;
83 optab ucmp_optab
; /* Used only for libcalls for unsigned comparisons. */
88 /* Tables of patterns for extending one integer mode to another. */
89 enum insn_code extendtab
[MAX_MACHINE_MODE
][MAX_MACHINE_MODE
][2];
91 /* Tables of patterns for converting between fixed and floating point. */
92 enum insn_code fixtab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
93 enum insn_code fixtrunctab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
94 enum insn_code floattab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
96 /* Contains the optab used for each rtx code. */
97 optab code_to_optab
[NUM_RTX_CODE
+ 1];
99 /* SYMBOL_REF rtx's for the library functions that are called
100 implicitly and not via optabs. */
102 rtx extendsfdf2_libfunc
;
103 rtx extendsfxf2_libfunc
;
104 rtx extendsftf2_libfunc
;
105 rtx extenddfxf2_libfunc
;
106 rtx extenddftf2_libfunc
;
108 rtx truncdfsf2_libfunc
;
109 rtx truncxfsf2_libfunc
;
110 rtx trunctfsf2_libfunc
;
111 rtx truncxfdf2_libfunc
;
112 rtx trunctfdf2_libfunc
;
156 rtx floatsisf_libfunc
;
157 rtx floatdisf_libfunc
;
158 rtx floattisf_libfunc
;
160 rtx floatsidf_libfunc
;
161 rtx floatdidf_libfunc
;
162 rtx floattidf_libfunc
;
164 rtx floatsixf_libfunc
;
165 rtx floatdixf_libfunc
;
166 rtx floattixf_libfunc
;
168 rtx floatsitf_libfunc
;
169 rtx floatditf_libfunc
;
170 rtx floattitf_libfunc
;
188 rtx fixunssfsi_libfunc
;
189 rtx fixunssfdi_libfunc
;
190 rtx fixunssfti_libfunc
;
192 rtx fixunsdfsi_libfunc
;
193 rtx fixunsdfdi_libfunc
;
194 rtx fixunsdfti_libfunc
;
196 rtx fixunsxfsi_libfunc
;
197 rtx fixunsxfdi_libfunc
;
198 rtx fixunsxfti_libfunc
;
200 rtx fixunstfsi_libfunc
;
201 rtx fixunstfdi_libfunc
;
202 rtx fixunstfti_libfunc
;
204 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
205 gives the gen_function to make a branch to test that condition. */
207 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
209 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
210 gives the insn code to make a store-condition insn
211 to test that condition. */
213 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
215 #ifdef HAVE_conditional_move
216 /* Indexed by the machine mode, gives the insn code to make a conditional
217 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
218 setcc_gen_code to cut down on the number of named patterns. Consider a day
219 when a lot more rtx codes are conditional (eg: for the ARM). */
221 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
224 static int add_equal_note
PROTO((rtx
, rtx
, enum rtx_code
, rtx
, rtx
));
225 static rtx widen_operand
PROTO((rtx
, enum machine_mode
,
226 enum machine_mode
, int, int));
227 static enum insn_code can_fix_p
PROTO((enum machine_mode
, enum machine_mode
,
229 static enum insn_code can_float_p
PROTO((enum machine_mode
, enum machine_mode
,
231 static rtx ftruncify
PROTO((rtx
));
232 static optab init_optab
PROTO((enum rtx_code
));
233 static void init_libfuncs
PROTO((optab
, int, int, char *, int));
234 static void init_integral_libfuncs
PROTO((optab
, char *, int));
235 static void init_floating_libfuncs
PROTO((optab
, char *, int));
236 static void init_complex_libfuncs
PROTO((optab
, char *, int));
238 /* Add a REG_EQUAL note to the last insn in SEQ. TARGET is being set to
239 the result of operation CODE applied to OP0 (and OP1 if it is a binary
242 If the last insn does not set TARGET, don't do anything, but return 1.
244 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
245 don't add the REG_EQUAL note but return 0. Our caller can then try
246 again, ensuring that TARGET is not one of the operands. */
249 add_equal_note (seq
, target
, code
, op0
, op1
)
259 if ((GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
260 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
261 || GET_CODE (seq
) != SEQUENCE
262 || (set
= single_set (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1))) == 0
263 || GET_CODE (target
) == ZERO_EXTRACT
264 || (! rtx_equal_p (SET_DEST (set
), target
)
265 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
267 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
268 || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set
), 0)),
272 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
273 besides the last insn. */
274 if (reg_overlap_mentioned_p (target
, op0
)
275 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
276 for (i
= XVECLEN (seq
, 0) - 2; i
>= 0; i
--)
277 if (reg_set_p (target
, XVECEXP (seq
, 0, i
)))
280 if (GET_RTX_CLASS (code
) == '1')
281 note
= gen_rtx (code
, GET_MODE (target
), copy_rtx (op0
));
283 note
= gen_rtx (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
285 REG_NOTES (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1))
286 = gen_rtx (EXPR_LIST
, REG_EQUAL
, note
,
287 REG_NOTES (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1)));
292 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
293 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
294 not actually do a sign-extend or zero-extend, but can leave the
295 higher-order bits of the result rtx undefined, for example, in the case
296 of logical operations, but not right shifts. */
299 widen_operand (op
, mode
, oldmode
, unsignedp
, no_extend
)
301 enum machine_mode mode
, oldmode
;
307 /* If we must extend do so. If OP is either a constant or a SUBREG
308 for a promoted object, also extend since it will be more efficient to
311 || GET_MODE (op
) == VOIDmode
312 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)))
313 return convert_modes (mode
, oldmode
, op
, unsignedp
);
315 /* If MODE is no wider than a single word, we return a paradoxical
317 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
318 return gen_rtx (SUBREG
, mode
, force_reg (GET_MODE (op
), op
), 0);
320 /* Otherwise, get an object of MODE, clobber it, and set the low-order
323 result
= gen_reg_rtx (mode
);
324 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, result
));
325 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
329 /* Generate code to perform an operation specified by BINOPTAB
330 on operands OP0 and OP1, with result having machine-mode MODE.
332 UNSIGNEDP is for the case where we have to widen the operands
333 to perform the operation. It says to use zero-extension.
335 If TARGET is nonzero, the value
336 is generated there, if it is convenient to do so.
337 In all cases an rtx is returned for the locus of the value;
338 this may or may not be TARGET. */
341 expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
342 enum machine_mode mode
;
347 enum optab_methods methods
;
349 enum optab_methods next_methods
350 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
351 ? OPTAB_WIDEN
: methods
);
352 enum mode_class
class;
353 enum machine_mode wider_mode
;
355 int commutative_op
= 0;
356 int shift_op
= (binoptab
->code
== ASHIFT
357 || binoptab
->code
== ASHIFTRT
358 || binoptab
->code
== LSHIFTRT
359 || binoptab
->code
== ROTATE
360 || binoptab
->code
== ROTATERT
);
361 rtx entry_last
= get_last_insn ();
364 class = GET_MODE_CLASS (mode
);
366 op0
= protect_from_queue (op0
, 0);
367 op1
= protect_from_queue (op1
, 0);
369 target
= protect_from_queue (target
, 1);
373 op0
= force_not_mem (op0
);
374 op1
= force_not_mem (op1
);
377 /* If subtracting an integer constant, convert this into an addition of
378 the negated constant. */
380 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
382 op1
= negate_rtx (mode
, op1
);
383 binoptab
= add_optab
;
386 /* If we are inside an appropriately-short loop and one operand is an
387 expensive constant, force it into a register. */
388 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
389 && rtx_cost (op0
, binoptab
->code
) > 2)
390 op0
= force_reg (mode
, op0
);
392 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
393 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > 2)
394 op1
= force_reg (mode
, op1
);
396 /* Record where to delete back to if we backtrack. */
397 last
= get_last_insn ();
399 /* If operation is commutative,
400 try to make the first operand a register.
401 Even better, try to make it the same as the target.
402 Also try to make the last operand a constant. */
403 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
404 || binoptab
== smul_widen_optab
405 || binoptab
== umul_widen_optab
406 || binoptab
== smul_highpart_optab
407 || binoptab
== umul_highpart_optab
)
411 if (((target
== 0 || GET_CODE (target
) == REG
)
412 ? ((GET_CODE (op1
) == REG
413 && GET_CODE (op0
) != REG
)
415 : rtx_equal_p (op1
, target
))
416 || GET_CODE (op0
) == CONST_INT
)
424 /* If we can do it with a three-operand insn, do so. */
426 if (methods
!= OPTAB_MUST_WIDEN
427 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
429 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
430 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
431 enum machine_mode mode1
= insn_operand_mode
[icode
][2];
433 rtx xop0
= op0
, xop1
= op1
;
438 temp
= gen_reg_rtx (mode
);
440 /* If it is a commutative operator and the modes would match
441 if we would swap the operands, we can save the conversions. */
444 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
445 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
449 tmp
= op0
; op0
= op1
; op1
= tmp
;
450 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
454 /* In case the insn wants input operands in modes different from
455 the result, convert the operands. */
457 if (GET_MODE (op0
) != VOIDmode
458 && GET_MODE (op0
) != mode0
459 && mode0
!= VOIDmode
)
460 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
462 if (GET_MODE (xop1
) != VOIDmode
463 && GET_MODE (xop1
) != mode1
464 && mode1
!= VOIDmode
)
465 xop1
= convert_to_mode (mode1
, xop1
, unsignedp
);
467 /* Now, if insn's predicates don't allow our operands, put them into
470 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
)
471 && mode0
!= VOIDmode
)
472 xop0
= copy_to_mode_reg (mode0
, xop0
);
474 if (! (*insn_operand_predicate
[icode
][2]) (xop1
, mode1
)
475 && mode1
!= VOIDmode
)
476 xop1
= copy_to_mode_reg (mode1
, xop1
);
478 if (! (*insn_operand_predicate
[icode
][0]) (temp
, mode
))
479 temp
= gen_reg_rtx (mode
);
481 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
484 /* If PAT is a multi-insn sequence, try to add an appropriate
485 REG_EQUAL note to it. If we can't because TEMP conflicts with an
486 operand, call ourselves again, this time without a target. */
487 if (GET_CODE (pat
) == SEQUENCE
488 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
490 delete_insns_since (last
);
491 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
499 delete_insns_since (last
);
502 /* If this is a multiply, see if we can do a widening operation that
503 takes operands of this mode and makes a wider mode. */
505 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
506 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
507 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
508 != CODE_FOR_nothing
))
510 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
511 unsignedp
? umul_widen_optab
: smul_widen_optab
,
512 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
516 if (GET_MODE_CLASS (mode
) == MODE_INT
)
517 return gen_lowpart (mode
, temp
);
519 return convert_to_mode (mode
, temp
, unsignedp
);
523 /* Look for a wider mode of the same class for which we think we
524 can open-code the operation. Check for a widening multiply at the
525 wider mode as well. */
527 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
528 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
529 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
530 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
532 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
533 || (binoptab
== smul_optab
534 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
535 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
536 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
537 != CODE_FOR_nothing
)))
539 rtx xop0
= op0
, xop1
= op1
;
542 /* For certain integer operations, we need not actually extend
543 the narrow operands, as long as we will truncate
544 the results to the same narrowness. */
546 if ((binoptab
== ior_optab
|| binoptab
== and_optab
547 || binoptab
== xor_optab
548 || binoptab
== add_optab
|| binoptab
== sub_optab
549 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
550 && class == MODE_INT
)
553 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
555 /* The second operand of a shift must always be extended. */
556 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
557 no_extend
&& binoptab
!= ashl_optab
);
559 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
560 unsignedp
, OPTAB_DIRECT
);
563 if (class != MODE_INT
)
566 target
= gen_reg_rtx (mode
);
567 convert_move (target
, temp
, 0);
571 return gen_lowpart (mode
, temp
);
574 delete_insns_since (last
);
578 /* These can be done a word at a time. */
579 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
581 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
582 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
588 /* If TARGET is the same as one of the operands, the REG_EQUAL note
589 won't be accurate, so use a new target. */
590 if (target
== 0 || target
== op0
|| target
== op1
)
591 target
= gen_reg_rtx (mode
);
595 /* Do the actual arithmetic. */
596 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
598 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
599 rtx x
= expand_binop (word_mode
, binoptab
,
600 operand_subword_force (op0
, i
, mode
),
601 operand_subword_force (op1
, i
, mode
),
602 target_piece
, unsignedp
, next_methods
);
607 if (target_piece
!= x
)
608 emit_move_insn (target_piece
, x
);
611 insns
= get_insns ();
614 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
616 if (binoptab
->code
!= UNKNOWN
)
618 = gen_rtx (binoptab
->code
, mode
, copy_rtx (op0
), copy_rtx (op1
));
622 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
627 /* Synthesize double word shifts from single word shifts. */
628 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
629 || binoptab
== ashr_optab
)
631 && GET_CODE (op1
) == CONST_INT
632 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
633 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
634 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
635 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
637 rtx insns
, inter
, equiv_value
;
638 rtx into_target
, outof_target
;
639 rtx into_input
, outof_input
;
640 int shift_count
, left_shift
, outof_word
;
642 /* If TARGET is the same as one of the operands, the REG_EQUAL note
643 won't be accurate, so use a new target. */
644 if (target
== 0 || target
== op0
|| target
== op1
)
645 target
= gen_reg_rtx (mode
);
649 shift_count
= INTVAL (op1
);
651 /* OUTOF_* is the word we are shifting bits away from, and
652 INTO_* is the word that we are shifting bits towards, thus
653 they differ depending on the direction of the shift and
656 left_shift
= binoptab
== ashl_optab
;
657 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
659 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
660 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
662 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
663 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
665 if (shift_count
>= BITS_PER_WORD
)
667 inter
= expand_binop (word_mode
, binoptab
,
669 GEN_INT (shift_count
- BITS_PER_WORD
),
670 into_target
, unsignedp
, next_methods
);
672 if (inter
!= 0 && inter
!= into_target
)
673 emit_move_insn (into_target
, inter
);
675 /* For a signed right shift, we must fill the word we are shifting
676 out of with copies of the sign bit. Otherwise it is zeroed. */
677 if (inter
!= 0 && binoptab
!= ashr_optab
)
678 inter
= CONST0_RTX (word_mode
);
680 inter
= expand_binop (word_mode
, binoptab
,
682 GEN_INT (BITS_PER_WORD
- 1),
683 outof_target
, unsignedp
, next_methods
);
685 if (inter
!= 0 && inter
!= outof_target
)
686 emit_move_insn (outof_target
, inter
);
691 optab reverse_unsigned_shift
, unsigned_shift
;
693 /* For a shift of less then BITS_PER_WORD, to compute the carry,
694 we must do a logical shift in the opposite direction of the
697 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
699 /* For a shift of less than BITS_PER_WORD, to compute the word
700 shifted towards, we need to unsigned shift the orig value of
703 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
705 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
707 GEN_INT (BITS_PER_WORD
- shift_count
),
708 0, unsignedp
, next_methods
);
713 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
714 op1
, 0, unsignedp
, next_methods
);
717 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
718 into_target
, unsignedp
, next_methods
);
720 if (inter
!= 0 && inter
!= into_target
)
721 emit_move_insn (into_target
, inter
);
724 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
725 op1
, outof_target
, unsignedp
, next_methods
);
727 if (inter
!= 0 && inter
!= outof_target
)
728 emit_move_insn (outof_target
, inter
);
731 insns
= get_insns ();
736 if (binoptab
->code
!= UNKNOWN
)
737 equiv_value
= gen_rtx (binoptab
->code
, mode
, op0
, op1
);
741 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
746 /* Synthesize double word rotates from single word shifts. */
747 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
749 && GET_CODE (op1
) == CONST_INT
750 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
751 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
752 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
754 rtx insns
, equiv_value
;
755 rtx into_target
, outof_target
;
756 rtx into_input
, outof_input
;
758 int shift_count
, left_shift
, outof_word
;
760 /* If TARGET is the same as one of the operands, the REG_EQUAL note
761 won't be accurate, so use a new target. */
762 if (target
== 0 || target
== op0
|| target
== op1
)
763 target
= gen_reg_rtx (mode
);
767 shift_count
= INTVAL (op1
);
769 /* OUTOF_* is the word we are shifting bits away from, and
770 INTO_* is the word that we are shifting bits towards, thus
771 they differ depending on the direction of the shift and
774 left_shift
= (binoptab
== rotl_optab
);
775 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
777 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
778 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
780 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
781 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
783 if (shift_count
== BITS_PER_WORD
)
785 /* This is just a word swap. */
786 emit_move_insn (outof_target
, into_input
);
787 emit_move_insn (into_target
, outof_input
);
792 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
793 rtx first_shift_count
, second_shift_count
;
794 optab reverse_unsigned_shift
, unsigned_shift
;
796 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
797 ? lshr_optab
: ashl_optab
);
799 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
800 ? ashl_optab
: lshr_optab
);
802 if (shift_count
> BITS_PER_WORD
)
804 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
805 second_shift_count
= GEN_INT (2*BITS_PER_WORD
- shift_count
);
809 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
810 second_shift_count
= GEN_INT (shift_count
);
813 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
814 outof_input
, first_shift_count
,
815 NULL_RTX
, unsignedp
, next_methods
);
816 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
817 into_input
, second_shift_count
,
818 into_target
, unsignedp
, next_methods
);
820 if (into_temp1
!= 0 && into_temp2
!= 0)
821 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
822 into_target
, unsignedp
, next_methods
);
826 if (inter
!= 0 && inter
!= into_target
)
827 emit_move_insn (into_target
, inter
);
829 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
830 into_input
, first_shift_count
,
831 NULL_RTX
, unsignedp
, next_methods
);
832 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
833 outof_input
, second_shift_count
,
834 outof_target
, unsignedp
, next_methods
);
836 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
837 inter
= expand_binop (word_mode
, ior_optab
,
838 outof_temp1
, outof_temp2
,
839 outof_target
, unsignedp
, next_methods
);
841 if (inter
!= 0 && inter
!= outof_target
)
842 emit_move_insn (outof_target
, inter
);
845 insns
= get_insns ();
850 if (binoptab
->code
!= UNKNOWN
)
851 equiv_value
= gen_rtx (binoptab
->code
, mode
, op0
, op1
);
855 /* We can't make this a no conflict block if this is a word swap,
856 because the word swap case fails if the input and output values
857 are in the same register. */
858 if (shift_count
!= BITS_PER_WORD
)
859 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
868 /* These can be done a word at a time by propagating carries. */
869 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
871 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
872 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
875 rtx carry_tmp
= gen_reg_rtx (word_mode
);
876 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
877 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
878 rtx carry_in
, carry_out
;
881 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
882 value is one of those, use it. Otherwise, use 1 since it is the
883 one easiest to get. */
884 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
885 int normalizep
= STORE_FLAG_VALUE
;
890 /* Prepare the operands. */
891 xop0
= force_reg (mode
, op0
);
892 xop1
= force_reg (mode
, op1
);
894 if (target
== 0 || GET_CODE (target
) != REG
895 || target
== xop0
|| target
== xop1
)
896 target
= gen_reg_rtx (mode
);
898 /* Indicate for flow that the entire target reg is being set. */
899 if (GET_CODE (target
) == REG
)
900 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, target
));
902 /* Do the actual arithmetic. */
903 for (i
= 0; i
< nwords
; i
++)
905 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
906 rtx target_piece
= operand_subword (target
, index
, 1, mode
);
907 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
908 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
911 /* Main add/subtract of the input operands. */
912 x
= expand_binop (word_mode
, binoptab
,
913 op0_piece
, op1_piece
,
914 target_piece
, unsignedp
, next_methods
);
920 /* Store carry from main add/subtract. */
921 carry_out
= gen_reg_rtx (word_mode
);
922 carry_out
= emit_store_flag (carry_out
,
923 binoptab
== add_optab
? LTU
: GTU
,
925 word_mode
, 1, normalizep
);
932 /* Add/subtract previous carry to main result. */
933 x
= expand_binop (word_mode
,
934 normalizep
== 1 ? binoptab
: otheroptab
,
936 target_piece
, 1, next_methods
);
939 else if (target_piece
!= x
)
940 emit_move_insn (target_piece
, x
);
944 /* THIS CODE HAS NOT BEEN TESTED. */
945 /* Get out carry from adding/subtracting carry in. */
946 carry_tmp
= emit_store_flag (carry_tmp
,
947 binoptab
== add_optab
950 word_mode
, 1, normalizep
);
952 /* Logical-ior the two poss. carry together. */
953 carry_out
= expand_binop (word_mode
, ior_optab
,
954 carry_out
, carry_tmp
,
955 carry_out
, 0, next_methods
);
961 carry_in
= carry_out
;
964 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
966 rtx temp
= emit_move_insn (target
, target
);
968 REG_NOTES (temp
) = gen_rtx (EXPR_LIST
, REG_EQUAL
,
969 gen_rtx (binoptab
->code
, mode
,
976 delete_insns_since (last
);
979 /* If we want to multiply two two-word values and have normal and widening
980 multiplies of single-word values, we can do this with three smaller
981 multiplications. Note that we do not make a REG_NO_CONFLICT block here
982 because we are not operating on one word at a time.
984 The multiplication proceeds as follows:
985 _______________________
986 [__op0_high_|__op0_low__]
987 _______________________
988 * [__op1_high_|__op1_low__]
989 _______________________________________________
990 _______________________
991 (1) [__op0_low__*__op1_low__]
992 _______________________
993 (2a) [__op0_low__*__op1_high_]
994 _______________________
995 (2b) [__op0_high_*__op1_low__]
996 _______________________
997 (3) [__op0_high_*__op1_high_]
1000 This gives a 4-word result. Since we are only interested in the
1001 lower 2 words, partial result (3) and the upper words of (2a) and
1002 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1003 calculated using non-widening multiplication.
1005 (1), however, needs to be calculated with an unsigned widening
1006 multiplication. If this operation is not directly supported we
1007 try using a signed widening multiplication and adjust the result.
1008 This adjustment works as follows:
1010 If both operands are positive then no adjustment is needed.
1012 If the operands have different signs, for example op0_low < 0 and
1013 op1_low >= 0, the instruction treats the most significant bit of
1014 op0_low as a sign bit instead of a bit with significance
1015 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1016 with 2**BITS_PER_WORD - op0_low, and two's complements the
1017 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1020 Similarly, if both operands are negative, we need to add
1021 (op0_low + op1_low) * 2**BITS_PER_WORD.
1023 We use a trick to adjust quickly. We logically shift op0_low right
1024 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1025 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1026 logical shift exists, we do an arithmetic right shift and subtract
1029 if (binoptab
== smul_optab
1030 && class == MODE_INT
1031 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1032 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1033 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1034 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1035 != CODE_FOR_nothing
)
1036 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1037 != CODE_FOR_nothing
)))
1039 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1040 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1041 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1042 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1043 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1044 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1049 /* If the target is the same as one of the inputs, don't use it. This
1050 prevents problems with the REG_EQUAL note. */
1051 if (target
== op0
|| target
== op1
1052 || (target
!= 0 && GET_CODE (target
) != REG
))
1055 /* Multiply the two lower words to get a double-word product.
1056 If unsigned widening multiplication is available, use that;
1057 otherwise use the signed form and compensate. */
1059 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1061 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1062 target
, 1, OPTAB_DIRECT
);
1064 /* If we didn't succeed, delete everything we did so far. */
1066 delete_insns_since (last
);
1068 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1072 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1073 != CODE_FOR_nothing
)
1075 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1076 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1077 target
, 1, OPTAB_DIRECT
);
1078 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1079 NULL_RTX
, 1, next_methods
);
1081 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1082 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1085 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1086 NULL_RTX
, 0, next_methods
);
1088 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1089 op0_xhigh
, op0_xhigh
, 0,
1093 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1094 NULL_RTX
, 1, next_methods
);
1096 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1097 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1100 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1101 NULL_RTX
, 0, next_methods
);
1103 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1104 op1_xhigh
, op1_xhigh
, 0,
1109 /* If we have been able to directly compute the product of the
1110 low-order words of the operands and perform any required adjustments
1111 of the operands, we proceed by trying two more multiplications
1112 and then computing the appropriate sum.
1114 We have checked above that the required addition is provided.
1115 Full-word addition will normally always succeed, especially if
1116 it is provided at all, so we don't worry about its failure. The
1117 multiplication may well fail, however, so we do handle that. */
1119 if (product
&& op0_xhigh
&& op1_xhigh
)
1121 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1122 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1123 NULL_RTX
, 0, OPTAB_DIRECT
);
1126 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1127 product_high
, 0, next_methods
);
1129 if (temp
!= 0 && temp
!= product_high
)
1130 emit_move_insn (product_high
, temp
);
1133 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1134 NULL_RTX
, 0, OPTAB_DIRECT
);
1137 temp
= expand_binop (word_mode
, add_optab
, temp
,
1138 product_high
, product_high
,
1141 if (temp
!= 0 && temp
!= product_high
)
1142 emit_move_insn (product_high
, temp
);
1146 temp
= emit_move_insn (product
, product
);
1147 REG_NOTES (temp
) = gen_rtx (EXPR_LIST
, REG_EQUAL
,
1148 gen_rtx (MULT
, mode
, copy_rtx (op0
),
1156 /* If we get here, we couldn't do it for some reason even though we
1157 originally thought we could. Delete anything we've emitted in
1160 delete_insns_since (last
);
1163 /* We need to open-code the complex type operations: '+, -, * and /' */
1165 /* At this point we allow operations between two similar complex
1166 numbers, and also if one of the operands is not a complex number
1167 but rather of MODE_FLOAT or MODE_INT. However, the caller
1168 must make sure that the MODE of the non-complex operand matches
1169 the SUBMODE of the complex operand. */
1171 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1173 rtx real0
= 0, imag0
= 0;
1174 rtx real1
= 0, imag1
= 0;
1175 rtx realr
, imagr
, res
;
1180 /* Find the correct mode for the real and imaginary parts */
1181 enum machine_mode submode
1182 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
1183 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
1186 if (submode
== BLKmode
)
1190 target
= gen_reg_rtx (mode
);
1194 realr
= gen_realpart (submode
, target
);
1195 imagr
= gen_imagpart (submode
, target
);
1197 if (GET_MODE (op0
) == mode
)
1199 real0
= gen_realpart (submode
, op0
);
1200 imag0
= gen_imagpart (submode
, op0
);
1205 if (GET_MODE (op1
) == mode
)
1207 real1
= gen_realpart (submode
, op1
);
1208 imag1
= gen_imagpart (submode
, op1
);
1213 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0|| imag1
!= 0))
1216 switch (binoptab
->code
)
1219 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1221 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1222 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1223 realr
, unsignedp
, methods
);
1227 else if (res
!= realr
)
1228 emit_move_insn (realr
, res
);
1231 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1232 imagr
, unsignedp
, methods
);
1235 else if (binoptab
->code
== MINUS
)
1236 res
= expand_unop (submode
, neg_optab
, imag1
, imagr
, unsignedp
);
1242 else if (res
!= imagr
)
1243 emit_move_insn (imagr
, res
);
1249 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1255 /* Don't fetch these from memory more than once. */
1256 real0
= force_reg (submode
, real0
);
1257 real1
= force_reg (submode
, real1
);
1258 imag0
= force_reg (submode
, imag0
);
1259 imag1
= force_reg (submode
, imag1
);
1261 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1262 unsignedp
, methods
);
1264 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1265 unsignedp
, methods
);
1267 if (temp1
== 0 || temp2
== 0)
1270 res
= expand_binop (submode
, sub_optab
, temp1
, temp2
,
1271 realr
, unsignedp
, methods
);
1275 else if (res
!= realr
)
1276 emit_move_insn (realr
, res
);
1278 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1279 NULL_RTX
, unsignedp
, methods
);
1281 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1282 NULL_RTX
, unsignedp
, methods
);
1284 if (temp1
== 0 || temp2
== 0)
1287 res
= expand_binop (submode
, add_optab
, temp1
, temp2
,
1288 imagr
, unsignedp
, methods
);
1292 else if (res
!= imagr
)
1293 emit_move_insn (imagr
, res
);
1299 /* Don't fetch these from memory more than once. */
1300 real0
= force_reg (submode
, real0
);
1301 real1
= force_reg (submode
, real1
);
1303 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1304 realr
, unsignedp
, methods
);
1307 else if (res
!= realr
)
1308 emit_move_insn (realr
, res
);
1311 res
= expand_binop (submode
, binoptab
,
1312 real1
, imag0
, imagr
, unsignedp
, methods
);
1314 res
= expand_binop (submode
, binoptab
,
1315 real0
, imag1
, imagr
, unsignedp
, methods
);
1319 else if (res
!= imagr
)
1320 emit_move_insn (imagr
, res
);
1327 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1331 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1333 /* Don't fetch these from memory more than once. */
1334 real1
= force_reg (submode
, real1
);
1336 /* Simply divide the real and imaginary parts by `c' */
1337 if (class == MODE_COMPLEX_FLOAT
)
1338 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1339 realr
, unsignedp
, methods
);
1341 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1342 real0
, real1
, realr
, unsignedp
);
1346 else if (res
!= realr
)
1347 emit_move_insn (realr
, res
);
1349 if (class == MODE_COMPLEX_FLOAT
)
1350 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1351 imagr
, unsignedp
, methods
);
1353 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1354 imag0
, real1
, imagr
, unsignedp
);
1358 else if (res
!= imagr
)
1359 emit_move_insn (imagr
, res
);
1365 /* Divisor is of complex type:
1372 /* Don't fetch these from memory more than once. */
1373 real0
= force_reg (submode
, real0
);
1374 real1
= force_reg (submode
, real1
);
1377 imag0
= force_reg (submode
, imag0
);
1379 imag1
= force_reg (submode
, imag1
);
1381 /* Divisor: c*c + d*d */
1382 temp1
= expand_binop (submode
, smul_optab
, real1
, real1
,
1383 NULL_RTX
, unsignedp
, methods
);
1385 temp2
= expand_binop (submode
, smul_optab
, imag1
, imag1
,
1386 NULL_RTX
, unsignedp
, methods
);
1388 if (temp1
== 0 || temp2
== 0)
1391 divisor
= expand_binop (submode
, add_optab
, temp1
, temp2
,
1392 NULL_RTX
, unsignedp
, methods
);
1398 /* ((a)(c-id))/divisor */
1399 /* (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)) */
1401 /* Calculate the dividend */
1402 real_t
= expand_binop (submode
, smul_optab
, real0
, real1
,
1403 NULL_RTX
, unsignedp
, methods
);
1405 imag_t
= expand_binop (submode
, smul_optab
, real0
, imag1
,
1406 NULL_RTX
, unsignedp
, methods
);
1408 if (real_t
== 0 || imag_t
== 0)
1411 imag_t
= expand_unop (submode
, neg_optab
, imag_t
,
1412 NULL_RTX
, unsignedp
);
1416 /* ((a+ib)(c-id))/divider */
1417 /* Calculate the dividend */
1418 temp1
= expand_binop (submode
, smul_optab
, real0
, real1
,
1419 NULL_RTX
, unsignedp
, methods
);
1421 temp2
= expand_binop (submode
, smul_optab
, imag0
, imag1
,
1422 NULL_RTX
, unsignedp
, methods
);
1424 if (temp1
== 0 || temp2
== 0)
1427 real_t
= expand_binop (submode
, add_optab
, temp1
, temp2
,
1428 NULL_RTX
, unsignedp
, methods
);
1430 temp1
= expand_binop (submode
, smul_optab
, imag0
, real1
,
1431 NULL_RTX
, unsignedp
, methods
);
1433 temp2
= expand_binop (submode
, smul_optab
, real0
, imag1
,
1434 NULL_RTX
, unsignedp
, methods
);
1436 if (temp1
== 0 || temp2
== 0)
1439 imag_t
= expand_binop (submode
, sub_optab
, temp1
, temp2
,
1440 NULL_RTX
, unsignedp
, methods
);
1442 if (real_t
== 0 || imag_t
== 0)
1446 if (class == MODE_COMPLEX_FLOAT
)
1447 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
1448 realr
, unsignedp
, methods
);
1450 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1451 real_t
, divisor
, realr
, unsignedp
);
1455 else if (res
!= realr
)
1456 emit_move_insn (realr
, res
);
1458 if (class == MODE_COMPLEX_FLOAT
)
1459 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
1460 imagr
, unsignedp
, methods
);
1462 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1463 imag_t
, divisor
, imagr
, unsignedp
);
1467 else if (res
!= imagr
)
1468 emit_move_insn (imagr
, res
);
1483 if (binoptab
->code
!= UNKNOWN
)
1485 = gen_rtx (binoptab
->code
, mode
, copy_rtx (op0
), copy_rtx (op1
));
1489 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1495 /* It can't be open-coded in this mode.
1496 Use a library call if one is available and caller says that's ok. */
1498 if (binoptab
->handlers
[(int) mode
].libfunc
1499 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1502 rtx funexp
= binoptab
->handlers
[(int) mode
].libfunc
;
1504 enum machine_mode op1_mode
= mode
;
1511 op1_mode
= word_mode
;
1512 /* Specify unsigned here,
1513 since negative shift counts are meaningless. */
1514 op1x
= convert_to_mode (word_mode
, op1
, 1);
1517 if (GET_MODE (op0
) != VOIDmode
1518 && GET_MODE (op0
) != mode
)
1519 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1521 /* Pass 1 for NO_QUEUE so we don't lose any increments
1522 if the libcall is cse'd or moved. */
1523 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1524 NULL_RTX
, 1, mode
, 2,
1525 op0
, mode
, op1x
, op1_mode
);
1527 insns
= get_insns ();
1530 target
= gen_reg_rtx (mode
);
1531 emit_libcall_block (insns
, target
, value
,
1532 gen_rtx (binoptab
->code
, mode
, op0
, op1
));
1537 delete_insns_since (last
);
1539 /* It can't be done in this mode. Can we do it in a wider mode? */
1541 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1542 || methods
== OPTAB_MUST_WIDEN
))
1544 /* Caller says, don't even try. */
1545 delete_insns_since (entry_last
);
1549 /* Compute the value of METHODS to pass to recursive calls.
1550 Don't allow widening to be tried recursively. */
1552 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1554 /* Look for a wider mode of the same class for which it appears we can do
1557 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1559 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1560 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1562 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1563 != CODE_FOR_nothing
)
1564 || (methods
== OPTAB_LIB
1565 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1567 rtx xop0
= op0
, xop1
= op1
;
1570 /* For certain integer operations, we need not actually extend
1571 the narrow operands, as long as we will truncate
1572 the results to the same narrowness. */
1574 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1575 || binoptab
== xor_optab
1576 || binoptab
== add_optab
|| binoptab
== sub_optab
1577 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1578 && class == MODE_INT
)
1581 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1582 unsignedp
, no_extend
);
1584 /* The second operand of a shift must always be extended. */
1585 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1586 no_extend
&& binoptab
!= ashl_optab
);
1588 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1589 unsignedp
, methods
);
1592 if (class != MODE_INT
)
1595 target
= gen_reg_rtx (mode
);
1596 convert_move (target
, temp
, 0);
1600 return gen_lowpart (mode
, temp
);
1603 delete_insns_since (last
);
1608 delete_insns_since (entry_last
);
1612 /* Expand a binary operator which has both signed and unsigned forms.
1613 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1616 If we widen unsigned operands, we may use a signed wider operation instead
1617 of an unsigned wider operation, since the result would be the same. */
1620 sign_expand_binop (mode
, uoptab
, soptab
, op0
, op1
, target
, unsignedp
, methods
)
1621 enum machine_mode mode
;
1622 optab uoptab
, soptab
;
1623 rtx op0
, op1
, target
;
1625 enum optab_methods methods
;
1628 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1629 struct optab wide_soptab
;
1631 /* Do it without widening, if possible. */
1632 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1633 unsignedp
, OPTAB_DIRECT
);
1634 if (temp
|| methods
== OPTAB_DIRECT
)
1637 /* Try widening to a signed int. Make a fake signed optab that
1638 hides any signed insn for direct use. */
1639 wide_soptab
= *soptab
;
1640 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1641 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1643 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1644 unsignedp
, OPTAB_WIDEN
);
1646 /* For unsigned operands, try widening to an unsigned int. */
1647 if (temp
== 0 && unsignedp
)
1648 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1649 unsignedp
, OPTAB_WIDEN
);
1650 if (temp
|| methods
== OPTAB_WIDEN
)
1653 /* Use the right width lib call if that exists. */
1654 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1655 if (temp
|| methods
== OPTAB_LIB
)
1658 /* Must widen and use a lib call, use either signed or unsigned. */
1659 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1660 unsignedp
, methods
);
1664 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1665 unsignedp
, methods
);
1669 /* Generate code to perform an operation specified by BINOPTAB
1670 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1671 We assume that the order of the operands for the instruction
1672 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1673 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1675 Either TARG0 or TARG1 may be zero, but what that means is that
1676 that result is not actually wanted. We will generate it into
1677 a dummy pseudo-reg and discard it. They may not both be zero.
1679 Returns 1 if this operation can be performed; 0 if not. */
1682 expand_twoval_binop (binoptab
, op0
, op1
, targ0
, targ1
, unsignedp
)
1688 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1689 enum mode_class
class;
1690 enum machine_mode wider_mode
;
1691 rtx entry_last
= get_last_insn ();
1694 class = GET_MODE_CLASS (mode
);
1696 op0
= protect_from_queue (op0
, 0);
1697 op1
= protect_from_queue (op1
, 0);
1701 op0
= force_not_mem (op0
);
1702 op1
= force_not_mem (op1
);
1705 /* If we are inside an appropriately-short loop and one operand is an
1706 expensive constant, force it into a register. */
1707 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
1708 && rtx_cost (op0
, binoptab
->code
) > 2)
1709 op0
= force_reg (mode
, op0
);
1711 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
1712 && rtx_cost (op1
, binoptab
->code
) > 2)
1713 op1
= force_reg (mode
, op1
);
1716 targ0
= protect_from_queue (targ0
, 1);
1718 targ0
= gen_reg_rtx (mode
);
1720 targ1
= protect_from_queue (targ1
, 1);
1722 targ1
= gen_reg_rtx (mode
);
1724 /* Record where to go back to if we fail. */
1725 last
= get_last_insn ();
1727 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1729 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1730 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
1731 enum machine_mode mode1
= insn_operand_mode
[icode
][2];
1733 rtx xop0
= op0
, xop1
= op1
;
1735 /* In case this insn wants input operands in modes different from the
1736 result, convert the operands. */
1737 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (op0
) != mode0
)
1738 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1740 if (GET_MODE (op1
) != VOIDmode
&& GET_MODE (op1
) != mode1
)
1741 xop1
= convert_to_mode (mode1
, xop1
, unsignedp
);
1743 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1744 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
))
1745 xop0
= copy_to_mode_reg (mode0
, xop0
);
1747 if (! (*insn_operand_predicate
[icode
][2]) (xop1
, mode1
))
1748 xop1
= copy_to_mode_reg (mode1
, xop1
);
1750 /* We could handle this, but we should always be called with a pseudo
1751 for our targets and all insns should take them as outputs. */
1752 if (! (*insn_operand_predicate
[icode
][0]) (targ0
, mode
)
1753 || ! (*insn_operand_predicate
[icode
][3]) (targ1
, mode
))
1756 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1763 delete_insns_since (last
);
1766 /* It can't be done in this mode. Can we do it in a wider mode? */
1768 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1770 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1771 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1773 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1774 != CODE_FOR_nothing
)
1776 register rtx t0
= gen_reg_rtx (wider_mode
);
1777 register rtx t1
= gen_reg_rtx (wider_mode
);
1779 if (expand_twoval_binop (binoptab
,
1780 convert_modes (wider_mode
, mode
, op0
,
1782 convert_modes (wider_mode
, mode
, op1
,
1786 convert_move (targ0
, t0
, unsignedp
);
1787 convert_move (targ1
, t1
, unsignedp
);
1791 delete_insns_since (last
);
1796 delete_insns_since (entry_last
);
1800 /* Generate code to perform an operation specified by UNOPTAB
1801 on operand OP0, with result having machine-mode MODE.
1803 UNSIGNEDP is for the case where we have to widen the operands
1804 to perform the operation. It says to use zero-extension.
1806 If TARGET is nonzero, the value
1807 is generated there, if it is convenient to do so.
1808 In all cases an rtx is returned for the locus of the value;
1809 this may or may not be TARGET. */
1812 expand_unop (mode
, unoptab
, op0
, target
, unsignedp
)
1813 enum machine_mode mode
;
1819 enum mode_class
class;
1820 enum machine_mode wider_mode
;
1822 rtx last
= get_last_insn ();
1825 class = GET_MODE_CLASS (mode
);
1827 op0
= protect_from_queue (op0
, 0);
1831 op0
= force_not_mem (op0
);
1835 target
= protect_from_queue (target
, 1);
1837 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1839 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1840 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
1846 temp
= gen_reg_rtx (mode
);
1848 if (GET_MODE (xop0
) != VOIDmode
1849 && GET_MODE (xop0
) != mode0
)
1850 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1852 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
1854 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
))
1855 xop0
= copy_to_mode_reg (mode0
, xop0
);
1857 if (! (*insn_operand_predicate
[icode
][0]) (temp
, mode
))
1858 temp
= gen_reg_rtx (mode
);
1860 pat
= GEN_FCN (icode
) (temp
, xop0
);
1863 if (GET_CODE (pat
) == SEQUENCE
1864 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
1866 delete_insns_since (last
);
1867 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
1875 delete_insns_since (last
);
1878 /* It can't be done in this mode. Can we open-code it in a wider mode? */
1880 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1881 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1882 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1884 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
1888 /* For certain operations, we need not actually extend
1889 the narrow operand, as long as we will truncate the
1890 results to the same narrowness. */
1892 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
1893 (unoptab
== neg_optab
1894 || unoptab
== one_cmpl_optab
)
1895 && class == MODE_INT
);
1897 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
1902 if (class != MODE_INT
)
1905 target
= gen_reg_rtx (mode
);
1906 convert_move (target
, temp
, 0);
1910 return gen_lowpart (mode
, temp
);
1913 delete_insns_since (last
);
1917 /* These can be done a word at a time. */
1918 if (unoptab
== one_cmpl_optab
1919 && class == MODE_INT
1920 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1921 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1926 if (target
== 0 || target
== op0
)
1927 target
= gen_reg_rtx (mode
);
1931 /* Do the actual arithmetic. */
1932 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1934 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1935 rtx x
= expand_unop (word_mode
, unoptab
,
1936 operand_subword_force (op0
, i
, mode
),
1937 target_piece
, unsignedp
);
1938 if (target_piece
!= x
)
1939 emit_move_insn (target_piece
, x
);
1942 insns
= get_insns ();
1945 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
1946 gen_rtx (unoptab
->code
, mode
, copy_rtx (op0
)));
1950 /* Open-code the complex negation operation. */
1951 else if (unoptab
== neg_optab
1952 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
1958 /* Find the correct mode for the real and imaginary parts */
1959 enum machine_mode submode
1960 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
1961 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
1964 if (submode
== BLKmode
)
1968 target
= gen_reg_rtx (mode
);
1972 target_piece
= gen_imagpart (submode
, target
);
1973 x
= expand_unop (submode
, unoptab
,
1974 gen_imagpart (submode
, op0
),
1975 target_piece
, unsignedp
);
1976 if (target_piece
!= x
)
1977 emit_move_insn (target_piece
, x
);
1979 target_piece
= gen_realpart (submode
, target
);
1980 x
= expand_unop (submode
, unoptab
,
1981 gen_realpart (submode
, op0
),
1982 target_piece
, unsignedp
);
1983 if (target_piece
!= x
)
1984 emit_move_insn (target_piece
, x
);
1989 emit_no_conflict_block (seq
, target
, op0
, 0,
1990 gen_rtx (unoptab
->code
, mode
, copy_rtx (op0
)));
1994 /* Now try a library call in this mode. */
1995 if (unoptab
->handlers
[(int) mode
].libfunc
)
1998 rtx funexp
= unoptab
->handlers
[(int) mode
].libfunc
;
2003 /* Pass 1 for NO_QUEUE so we don't lose any increments
2004 if the libcall is cse'd or moved. */
2005 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2006 NULL_RTX
, 1, mode
, 1, op0
, mode
);
2007 insns
= get_insns ();
2010 target
= gen_reg_rtx (mode
);
2011 emit_libcall_block (insns
, target
, value
,
2012 gen_rtx (unoptab
->code
, mode
, op0
));
2017 /* It can't be done in this mode. Can we do it in a wider mode? */
2019 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2021 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2022 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2024 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2025 != CODE_FOR_nothing
)
2026 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2030 /* For certain operations, we need not actually extend
2031 the narrow operand, as long as we will truncate the
2032 results to the same narrowness. */
2034 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2035 (unoptab
== neg_optab
2036 || unoptab
== one_cmpl_optab
)
2037 && class == MODE_INT
);
2039 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2044 if (class != MODE_INT
)
2047 target
= gen_reg_rtx (mode
);
2048 convert_move (target
, temp
, 0);
2052 return gen_lowpart (mode
, temp
);
2055 delete_insns_since (last
);
2060 /* If there is no negate operation, try doing a subtract from zero.
2061 The US Software GOFAST library needs this. */
2062 if (unoptab
== neg_optab
)
2065 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2066 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2074 /* Emit code to compute the absolute value of OP0, with result to
2075 TARGET if convenient. (TARGET may be 0.) The return value says
2076 where the result actually is to be found.
2078 MODE is the mode of the operand; the mode of the result is
2079 different but can be deduced from MODE.
2081 UNSIGNEDP is relevant if extension is needed. */
2084 expand_abs (mode
, op0
, target
, unsignedp
, safe
)
2085 enum machine_mode mode
;
2093 /* First try to do it with a special abs instruction. */
2094 temp
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2098 /* If this machine has expensive jumps, we can do integer absolute
2099 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2100 where W is the width of MODE. */
2102 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2104 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2105 size_int (GET_MODE_BITSIZE (mode
) - 1),
2108 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2111 temp
= expand_binop (mode
, sub_optab
, temp
, extended
, target
, 0,
2118 /* If that does not win, use conditional jump and negate. */
2119 op1
= gen_label_rtx ();
2120 if (target
== 0 || ! safe
2121 || GET_MODE (target
) != mode
2122 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2123 || (GET_CODE (target
) == REG
2124 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2125 target
= gen_reg_rtx (mode
);
2127 emit_move_insn (target
, op0
);
2130 /* If this mode is an integer too wide to compare properly,
2131 compare word by word. Rely on CSE to optimize constant cases. */
2132 if (GET_MODE_CLASS (mode
) == MODE_INT
&& ! can_compare_p (mode
))
2133 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2137 temp
= compare_from_rtx (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2139 if (temp
== const1_rtx
)
2141 else if (temp
!= const0_rtx
)
2143 if (bcc_gen_fctn
[(int) GET_CODE (temp
)] != 0)
2144 emit_jump_insn ((*bcc_gen_fctn
[(int) GET_CODE (temp
)]) (op1
));
2150 op0
= expand_unop (mode
, neg_optab
, target
, target
, 0);
2152 emit_move_insn (target
, op0
);
2158 /* Emit code to compute the absolute value of OP0, with result to
2159 TARGET if convenient. (TARGET may be 0.) The return value says
2160 where the result actually is to be found.
2162 MODE is the mode of the operand; the mode of the result is
2163 different but can be deduced from MODE.
2165 UNSIGNEDP is relevant for complex integer modes. */
2168 expand_complex_abs (mode
, op0
, target
, unsignedp
)
2169 enum machine_mode mode
;
2174 enum mode_class
class = GET_MODE_CLASS (mode
);
2175 enum machine_mode wider_mode
;
2177 rtx entry_last
= get_last_insn ();
2181 /* Find the correct mode for the real and imaginary parts. */
2182 enum machine_mode submode
2183 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
2184 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
2187 if (submode
== BLKmode
)
2190 op0
= protect_from_queue (op0
, 0);
2194 op0
= force_not_mem (op0
);
2197 last
= get_last_insn ();
2200 target
= protect_from_queue (target
, 1);
2202 if (abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2204 int icode
= (int) abs_optab
->handlers
[(int) mode
].insn_code
;
2205 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
2211 temp
= gen_reg_rtx (submode
);
2213 if (GET_MODE (xop0
) != VOIDmode
2214 && GET_MODE (xop0
) != mode0
)
2215 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2217 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2219 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
))
2220 xop0
= copy_to_mode_reg (mode0
, xop0
);
2222 if (! (*insn_operand_predicate
[icode
][0]) (temp
, submode
))
2223 temp
= gen_reg_rtx (submode
);
2225 pat
= GEN_FCN (icode
) (temp
, xop0
);
2228 if (GET_CODE (pat
) == SEQUENCE
2229 && ! add_equal_note (pat
, temp
, abs_optab
->code
, xop0
, NULL_RTX
))
2231 delete_insns_since (last
);
2232 return expand_unop (mode
, abs_optab
, op0
, NULL_RTX
, unsignedp
);
2240 delete_insns_since (last
);
2243 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2245 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2246 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2248 if (abs_optab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2252 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2253 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2257 if (class != MODE_COMPLEX_INT
)
2260 target
= gen_reg_rtx (submode
);
2261 convert_move (target
, temp
, 0);
2265 return gen_lowpart (submode
, temp
);
2268 delete_insns_since (last
);
2272 /* Open-code the complex absolute-value operation
2273 if we can open-code sqrt. Otherwise it's not worth while. */
2274 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
)
2276 rtx real
, imag
, total
;
2278 real
= gen_realpart (submode
, op0
);
2279 imag
= gen_imagpart (submode
, op0
);
2281 /* Square both parts. */
2282 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
2283 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
2285 /* Sum the parts. */
2286 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
2287 0, OPTAB_LIB_WIDEN
);
2289 /* Get sqrt in TARGET. Set TARGET to where the result is. */
2290 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
2292 delete_insns_since (last
);
2297 /* Now try a library call in this mode. */
2298 if (abs_optab
->handlers
[(int) mode
].libfunc
)
2301 rtx funexp
= abs_optab
->handlers
[(int) mode
].libfunc
;
2306 /* Pass 1 for NO_QUEUE so we don't lose any increments
2307 if the libcall is cse'd or moved. */
2308 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
2309 NULL_RTX
, 1, submode
, 1, op0
, mode
);
2310 insns
= get_insns ();
2313 target
= gen_reg_rtx (submode
);
2314 emit_libcall_block (insns
, target
, value
,
2315 gen_rtx (abs_optab
->code
, mode
, op0
));
2320 /* It can't be done in this mode. Can we do it in a wider mode? */
2322 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2323 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2325 if ((abs_optab
->handlers
[(int) wider_mode
].insn_code
2326 != CODE_FOR_nothing
)
2327 || abs_optab
->handlers
[(int) wider_mode
].libfunc
)
2331 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2333 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2337 if (class != MODE_COMPLEX_INT
)
2340 target
= gen_reg_rtx (submode
);
2341 convert_move (target
, temp
, 0);
2345 return gen_lowpart (submode
, temp
);
2348 delete_insns_since (last
);
2352 delete_insns_since (entry_last
);
2356 /* Generate an instruction whose insn-code is INSN_CODE,
2357 with two operands: an output TARGET and an input OP0.
2358 TARGET *must* be nonzero, and the output is always stored there.
2359 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2360 the value that is stored into TARGET. */
2363 emit_unop_insn (icode
, target
, op0
, code
)
2370 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
2373 temp
= target
= protect_from_queue (target
, 1);
2375 op0
= protect_from_queue (op0
, 0);
2378 op0
= force_not_mem (op0
);
2380 /* Now, if insn does not accept our operands, put them into pseudos. */
2382 if (! (*insn_operand_predicate
[icode
][1]) (op0
, mode0
))
2383 op0
= copy_to_mode_reg (mode0
, op0
);
2385 if (! (*insn_operand_predicate
[icode
][0]) (temp
, GET_MODE (temp
))
2386 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
2387 temp
= gen_reg_rtx (GET_MODE (temp
));
2389 pat
= GEN_FCN (icode
) (temp
, op0
);
2391 if (GET_CODE (pat
) == SEQUENCE
&& code
!= UNKNOWN
)
2392 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2397 emit_move_insn (target
, temp
);
2400 /* Emit code to perform a series of operations on a multi-word quantity, one
2403 Such a block is preceded by a CLOBBER of the output, consists of multiple
2404 insns, each setting one word of the output, and followed by a SET copying
2405 the output to itself.
2407 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2408 note indicating that it doesn't conflict with the (also multi-word)
2409 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2412 INSNS is a block of code generated to perform the operation, not including
2413 the CLOBBER and final copy. All insns that compute intermediate values
2414 are first emitted, followed by the block as described above.
2416 TARGET, OP0, and OP1 are the output and inputs of the operations,
2417 respectively. OP1 may be zero for a unary operation.
2419 EQUIV, if non-zero, is an expression to be placed into a REG_EQUAL note
2422 If TARGET is not a register, INSNS is simply emitted with no special
2423 processing. Likewise if anything in INSNS is not an INSN or if
2424 there is a libcall block inside INSNS.
2426 The final insn emitted is returned. */
2429 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv
)
2435 rtx prev
, next
, first
, last
, insn
;
2437 if (GET_CODE (target
) != REG
|| reload_in_progress
)
2438 return emit_insns (insns
);
2440 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2441 if (GET_CODE (insn
) != INSN
2442 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2443 return emit_insns (insns
);
2445 /* First emit all insns that do not store into words of the output and remove
2446 these from the list. */
2447 for (insn
= insns
; insn
; insn
= next
)
2452 next
= NEXT_INSN (insn
);
2454 if (GET_CODE (PATTERN (insn
)) == SET
)
2455 set
= PATTERN (insn
);
2456 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2458 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2459 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
2461 set
= XVECEXP (PATTERN (insn
), 0, i
);
2469 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
2471 if (PREV_INSN (insn
))
2472 NEXT_INSN (PREV_INSN (insn
)) = next
;
2477 PREV_INSN (next
) = PREV_INSN (insn
);
2483 prev
= get_last_insn ();
2485 /* Now write the CLOBBER of the output, followed by the setting of each
2486 of the words, followed by the final copy. */
2487 if (target
!= op0
&& target
!= op1
)
2488 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, target
));
2490 for (insn
= insns
; insn
; insn
= next
)
2492 next
= NEXT_INSN (insn
);
2495 if (op1
&& GET_CODE (op1
) == REG
)
2496 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_NO_CONFLICT
, op1
,
2499 if (op0
&& GET_CODE (op0
) == REG
)
2500 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_NO_CONFLICT
, op0
,
2504 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2505 != CODE_FOR_nothing
)
2507 last
= emit_move_insn (target
, target
);
2510 = gen_rtx (EXPR_LIST
, REG_EQUAL
, equiv
, REG_NOTES (last
));
2513 last
= get_last_insn ();
2516 first
= get_insns ();
2518 first
= NEXT_INSN (prev
);
2520 /* Encapsulate the block so it gets manipulated as a unit. */
2521 REG_NOTES (first
) = gen_rtx (INSN_LIST
, REG_LIBCALL
, last
,
2523 REG_NOTES (last
) = gen_rtx (INSN_LIST
, REG_RETVAL
, first
, REG_NOTES (last
));
2528 /* Emit code to make a call to a constant function or a library call.
2530 INSNS is a list containing all insns emitted in the call.
2531 These insns leave the result in RESULT. Our block is to copy RESULT
2532 to TARGET, which is logically equivalent to EQUIV.
2534 We first emit any insns that set a pseudo on the assumption that these are
2535 loading constants into registers; doing so allows them to be safely cse'ed
2536 between blocks. Then we emit all the other insns in the block, followed by
2537 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
2538 note with an operand of EQUIV.
2540 Moving assignments to pseudos outside of the block is done to improve
2541 the generated code, but is not required to generate correct code,
2542 hence being unable to move an assignment is not grounds for not making
2543 a libcall block. There are two reasons why it is safe to leave these
2544 insns inside the block: First, we know that these pseudos cannot be
2545 used in generated RTL outside the block since they are created for
2546 temporary purposes within the block. Second, CSE will not record the
2547 values of anything set inside a libcall block, so we know they must
2548 be dead at the end of the block.
2550 Except for the first group of insns (the ones setting pseudos), the
2551 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
2554 emit_libcall_block (insns
, target
, result
, equiv
)
2560 rtx prev
, next
, first
, last
, insn
;
2562 /* First emit all insns that set pseudos. Remove them from the list as
2563 we go. Avoid insns that set pseudos which were referenced in previous
2564 insns. These can be generated by move_by_pieces, for example,
2565 to update an address. Similarly, avoid insns that reference things
2566 set in previous insns. */
2568 for (insn
= insns
; insn
; insn
= next
)
2570 rtx set
= single_set (insn
);
2572 next
= NEXT_INSN (insn
);
2574 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
2575 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
2577 || (! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
))
2578 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
2579 && ! modified_in_p (SET_SRC (set
), insns
)
2580 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
2582 if (PREV_INSN (insn
))
2583 NEXT_INSN (PREV_INSN (insn
)) = next
;
2588 PREV_INSN (next
) = PREV_INSN (insn
);
2594 prev
= get_last_insn ();
2596 /* Write the remaining insns followed by the final copy. */
2598 for (insn
= insns
; insn
; insn
= next
)
2600 next
= NEXT_INSN (insn
);
2605 last
= emit_move_insn (target
, result
);
2606 REG_NOTES (last
) = gen_rtx (EXPR_LIST
,
2607 REG_EQUAL
, copy_rtx (equiv
), REG_NOTES (last
));
2610 first
= get_insns ();
2612 first
= NEXT_INSN (prev
);
2614 /* Encapsulate the block so it gets manipulated as a unit. */
2615 REG_NOTES (first
) = gen_rtx (INSN_LIST
, REG_LIBCALL
, last
,
2617 REG_NOTES (last
) = gen_rtx (INSN_LIST
, REG_RETVAL
, first
, REG_NOTES (last
));
2620 /* Generate code to store zero in X. */
2626 emit_move_insn (x
, const0_rtx
);
2629 /* Generate code to store 1 in X
2630 assuming it contains zero beforehand. */
2633 emit_0_to_1_insn (x
)
2636 emit_move_insn (x
, const1_rtx
);
2639 /* Generate code to compare X with Y
2640 so that the condition codes are set.
2642 MODE is the mode of the inputs (in case they are const_int).
2643 UNSIGNEDP nonzero says that X and Y are unsigned;
2644 this matters if they need to be widened.
2646 If they have mode BLKmode, then SIZE specifies the size of both X and Y,
2647 and ALIGN specifies the known shared alignment of X and Y.
2649 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
2650 It is ignored for fixed-point and block comparisons;
2651 it is used only for floating-point comparisons. */
2654 emit_cmp_insn (x
, y
, comparison
, size
, mode
, unsignedp
, align
)
2656 enum rtx_code comparison
;
2658 enum machine_mode mode
;
2662 enum mode_class
class;
2663 enum machine_mode wider_mode
;
2665 class = GET_MODE_CLASS (mode
);
2667 /* They could both be VOIDmode if both args are immediate constants,
2668 but we should fold that at an earlier stage.
2669 With no special code here, this will call abort,
2670 reminding the programmer to implement such folding. */
2672 if (mode
!= BLKmode
&& flag_force_mem
)
2674 x
= force_not_mem (x
);
2675 y
= force_not_mem (y
);
2678 /* If we are inside an appropriately-short loop and one operand is an
2679 expensive constant, force it into a register. */
2680 if (CONSTANT_P (x
) && preserve_subexpressions_p () && rtx_cost (x
, COMPARE
) > 2)
2681 x
= force_reg (mode
, x
);
2683 if (CONSTANT_P (y
) && preserve_subexpressions_p () && rtx_cost (y
, COMPARE
) > 2)
2684 y
= force_reg (mode
, y
);
2686 /* Don't let both operands fail to indicate the mode. */
2687 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
2688 x
= force_reg (mode
, x
);
2690 /* Handle all BLKmode compares. */
2692 if (mode
== BLKmode
)
2695 x
= protect_from_queue (x
, 0);
2696 y
= protect_from_queue (y
, 0);
2700 #ifdef HAVE_cmpstrqi
2702 && GET_CODE (size
) == CONST_INT
2703 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
2705 enum machine_mode result_mode
2706 = insn_operand_mode
[(int) CODE_FOR_cmpstrqi
][0];
2707 rtx result
= gen_reg_rtx (result_mode
);
2708 emit_insn (gen_cmpstrqi (result
, x
, y
, size
, GEN_INT (align
)));
2709 emit_cmp_insn (result
, const0_rtx
, comparison
, NULL_RTX
,
2714 #ifdef HAVE_cmpstrhi
2716 && GET_CODE (size
) == CONST_INT
2717 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
2719 enum machine_mode result_mode
2720 = insn_operand_mode
[(int) CODE_FOR_cmpstrhi
][0];
2721 rtx result
= gen_reg_rtx (result_mode
);
2722 emit_insn (gen_cmpstrhi (result
, x
, y
, size
, GEN_INT (align
)));
2723 emit_cmp_insn (result
, const0_rtx
, comparison
, NULL_RTX
,
2728 #ifdef HAVE_cmpstrsi
2731 enum machine_mode result_mode
2732 = insn_operand_mode
[(int) CODE_FOR_cmpstrsi
][0];
2733 rtx result
= gen_reg_rtx (result_mode
);
2734 size
= protect_from_queue (size
, 0);
2735 emit_insn (gen_cmpstrsi (result
, x
, y
,
2736 convert_to_mode (SImode
, size
, 1),
2738 emit_cmp_insn (result
, const0_rtx
, comparison
, NULL_RTX
,
2744 #ifdef TARGET_MEM_FUNCTIONS
2745 emit_library_call (memcmp_libfunc
, 0,
2746 TYPE_MODE (integer_type_node
), 3,
2747 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
2750 emit_library_call (bcmp_libfunc
, 0,
2751 TYPE_MODE (integer_type_node
), 3,
2752 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
2755 emit_cmp_insn (hard_libcall_value (TYPE_MODE (integer_type_node
)),
2756 const0_rtx
, comparison
, NULL_RTX
,
2757 TYPE_MODE (integer_type_node
), 0, 0);
2762 /* Handle some compares against zero. */
2764 if (y
== CONST0_RTX (mode
)
2765 && tst_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2767 int icode
= (int) tst_optab
->handlers
[(int) mode
].insn_code
;
2770 x
= protect_from_queue (x
, 0);
2771 y
= protect_from_queue (y
, 0);
2773 /* Now, if insn does accept these operands, put them into pseudos. */
2774 if (! (*insn_operand_predicate
[icode
][0])
2775 (x
, insn_operand_mode
[icode
][0]))
2776 x
= copy_to_mode_reg (insn_operand_mode
[icode
][0], x
);
2778 emit_insn (GEN_FCN (icode
) (x
));
2782 /* Handle compares for which there is a directly suitable insn. */
2784 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2786 int icode
= (int) cmp_optab
->handlers
[(int) mode
].insn_code
;
2789 x
= protect_from_queue (x
, 0);
2790 y
= protect_from_queue (y
, 0);
2792 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2793 if (! (*insn_operand_predicate
[icode
][0])
2794 (x
, insn_operand_mode
[icode
][0]))
2795 x
= copy_to_mode_reg (insn_operand_mode
[icode
][0], x
);
2797 if (! (*insn_operand_predicate
[icode
][1])
2798 (y
, insn_operand_mode
[icode
][1]))
2799 y
= copy_to_mode_reg (insn_operand_mode
[icode
][1], y
);
2801 emit_insn (GEN_FCN (icode
) (x
, y
));
2805 /* Try widening if we can find a direct insn that way. */
2807 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2809 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2810 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2812 if (cmp_optab
->handlers
[(int) wider_mode
].insn_code
2813 != CODE_FOR_nothing
)
2815 x
= protect_from_queue (x
, 0);
2816 y
= protect_from_queue (y
, 0);
2817 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
2818 y
= convert_modes (wider_mode
, mode
, y
, unsignedp
);
2819 emit_cmp_insn (x
, y
, comparison
, NULL_RTX
,
2820 wider_mode
, unsignedp
, align
);
2826 /* Handle a lib call just for the mode we are using. */
2828 if (cmp_optab
->handlers
[(int) mode
].libfunc
2829 && class != MODE_FLOAT
)
2831 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
2832 /* If we want unsigned, and this mode has a distinct unsigned
2833 comparison routine, use that. */
2834 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
2835 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
2837 emit_library_call (libfunc
, 1,
2838 word_mode
, 2, x
, mode
, y
, mode
);
2840 /* Integer comparison returns a result that must be compared against 1,
2841 so that even if we do an unsigned compare afterward,
2842 there is still a value that can represent the result "less than". */
2844 emit_cmp_insn (hard_libcall_value (word_mode
), const1_rtx
,
2845 comparison
, NULL_RTX
, word_mode
, unsignedp
, 0);
2849 if (class == MODE_FLOAT
)
2850 emit_float_lib_cmp (x
, y
, comparison
);
2856 /* Nonzero if a compare of mode MODE can be done straightforwardly
2857 (without splitting it into pieces). */
2860 can_compare_p (mode
)
2861 enum machine_mode mode
;
2865 if (cmp_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
2867 mode
= GET_MODE_WIDER_MODE (mode
);
2868 } while (mode
!= VOIDmode
);
2873 /* Emit a library call comparison between floating point X and Y.
2874 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
2877 emit_float_lib_cmp (x
, y
, comparison
)
2879 enum rtx_code comparison
;
2881 enum machine_mode mode
= GET_MODE (x
);
2888 libfunc
= eqhf2_libfunc
;
2892 libfunc
= nehf2_libfunc
;
2896 libfunc
= gthf2_libfunc
;
2900 libfunc
= gehf2_libfunc
;
2904 libfunc
= lthf2_libfunc
;
2908 libfunc
= lehf2_libfunc
;
2911 else if (mode
== SFmode
)
2915 libfunc
= eqsf2_libfunc
;
2919 libfunc
= nesf2_libfunc
;
2923 libfunc
= gtsf2_libfunc
;
2927 libfunc
= gesf2_libfunc
;
2931 libfunc
= ltsf2_libfunc
;
2935 libfunc
= lesf2_libfunc
;
2938 else if (mode
== DFmode
)
2942 libfunc
= eqdf2_libfunc
;
2946 libfunc
= nedf2_libfunc
;
2950 libfunc
= gtdf2_libfunc
;
2954 libfunc
= gedf2_libfunc
;
2958 libfunc
= ltdf2_libfunc
;
2962 libfunc
= ledf2_libfunc
;
2965 else if (mode
== XFmode
)
2969 libfunc
= eqxf2_libfunc
;
2973 libfunc
= nexf2_libfunc
;
2977 libfunc
= gtxf2_libfunc
;
2981 libfunc
= gexf2_libfunc
;
2985 libfunc
= ltxf2_libfunc
;
2989 libfunc
= lexf2_libfunc
;
2992 else if (mode
== TFmode
)
2996 libfunc
= eqtf2_libfunc
;
3000 libfunc
= netf2_libfunc
;
3004 libfunc
= gttf2_libfunc
;
3008 libfunc
= getf2_libfunc
;
3012 libfunc
= lttf2_libfunc
;
3016 libfunc
= letf2_libfunc
;
3021 enum machine_mode wider_mode
;
3023 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3024 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3026 if ((cmp_optab
->handlers
[(int) wider_mode
].insn_code
3027 != CODE_FOR_nothing
)
3028 || (cmp_optab
->handlers
[(int) wider_mode
].libfunc
!= 0))
3030 x
= protect_from_queue (x
, 0);
3031 y
= protect_from_queue (y
, 0);
3032 x
= convert_to_mode (wider_mode
, x
, 0);
3033 y
= convert_to_mode (wider_mode
, y
, 0);
3034 emit_float_lib_cmp (x
, y
, comparison
);
3044 emit_library_call (libfunc
, 1,
3045 word_mode
, 2, x
, mode
, y
, mode
);
3047 emit_cmp_insn (hard_libcall_value (word_mode
), const0_rtx
, comparison
,
3048 NULL_RTX
, word_mode
, 0, 0);
3051 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3054 emit_indirect_jump (loc
)
3057 if (! ((*insn_operand_predicate
[(int)CODE_FOR_indirect_jump
][0])
3059 loc
= copy_to_mode_reg (Pmode
, loc
);
3061 emit_jump_insn (gen_indirect_jump (loc
));
3065 #ifdef HAVE_conditional_move
3067 /* Emit a conditional move instruction if the machine supports one for that
3068 condition and machine mode.
3070 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3071 the mode to use should they be constants. If it is VOIDmode, they cannot
3074 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3075 should be stored there. MODE is the mode to use should they be constants.
3076 If it is VOIDmode, they cannot both be constants.
3078 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3079 is not supported. */
3082 emit_conditional_move (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
3087 enum machine_mode cmode
;
3089 enum machine_mode mode
;
3092 rtx tem
, subtarget
, comparison
, insn
;
3093 enum insn_code icode
;
3095 /* If one operand is constant, make it the second one. Only do this
3096 if the other operand is not constant as well. */
3098 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
3099 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
3104 code
= swap_condition (code
);
3107 if (cmode
== VOIDmode
)
3108 cmode
= GET_MODE (op0
);
3110 if ((CONSTANT_P (op2
) && ! CONSTANT_P (op3
))
3111 || (GET_CODE (op2
) == CONST_INT
&& GET_CODE (op3
) != CONST_INT
))
3116 /* ??? This may not be appropriate (consider IEEE). Perhaps we should
3117 call can_reverse_comparison_p here and bail out if necessary.
3118 It's not clear whether we need to do this canonicalization though. */
3119 code
= reverse_condition (code
);
3122 if (mode
== VOIDmode
)
3123 mode
= GET_MODE (op2
);
3125 icode
= movcc_gen_code
[mode
];
3127 if (icode
== CODE_FOR_nothing
)
3132 op2
= force_not_mem (op2
);
3133 op3
= force_not_mem (op3
);
3137 target
= protect_from_queue (target
, 1);
3139 target
= gen_reg_rtx (mode
);
3145 op2
= protect_from_queue (op2
, 0);
3146 op3
= protect_from_queue (op3
, 0);
3148 /* If the insn doesn't accept these operands, put them in pseudos. */
3150 if (! (*insn_operand_predicate
[icode
][0])
3151 (subtarget
, insn_operand_mode
[icode
][0]))
3152 subtarget
= gen_reg_rtx (insn_operand_mode
[icode
][0]);
3154 if (! (*insn_operand_predicate
[icode
][2])
3155 (op2
, insn_operand_mode
[icode
][2]))
3156 op2
= copy_to_mode_reg (insn_operand_mode
[icode
][2], op2
);
3158 if (! (*insn_operand_predicate
[icode
][3])
3159 (op3
, insn_operand_mode
[icode
][3]))
3160 op3
= copy_to_mode_reg (insn_operand_mode
[icode
][3], op3
);
3162 /* Everything should now be in the suitable form, so emit the compare insn
3163 and then the conditional move. */
3166 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
, 0);
3168 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3169 if (GET_CODE (comparison
) != code
)
3170 /* This shouldn't happen. */
3173 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3175 /* If that failed, then give up. */
3181 if (subtarget
!= target
)
3182 convert_move (target
, subtarget
, 0);
3187 /* Return non-zero if a conditional move of mode MODE is supported.
3189 This function is for combine so it can tell whether an insn that looks
3190 like a conditional move is actually supported by the hardware. If we
3191 guess wrong we lose a bit on optimization, but that's it. */
3192 /* ??? sparc64 supports conditionally moving integers values based on fp
3193 comparisons, and vice versa. How do we handle them? */
3196 can_conditionally_move_p (mode
)
3197 enum machine_mode mode
;
3199 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3205 #endif /* HAVE_conditional_move */
3207 /* These three functions generate an insn body and return it
3208 rather than emitting the insn.
3210 They do not protect from queued increments,
3211 because they may be used 1) in protect_from_queue itself
3212 and 2) in other passes where there is no queue. */
3214 /* Generate and return an insn body to add Y to X. */
3217 gen_add2_insn (x
, y
)
3220 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3222 if (! (*insn_operand_predicate
[icode
][0]) (x
, insn_operand_mode
[icode
][0])
3223 || ! (*insn_operand_predicate
[icode
][1]) (x
, insn_operand_mode
[icode
][1])
3224 || ! (*insn_operand_predicate
[icode
][2]) (y
, insn_operand_mode
[icode
][2]))
3227 return (GEN_FCN (icode
) (x
, x
, y
));
3231 have_add2_insn (mode
)
3232 enum machine_mode mode
;
3234 return add_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
;
3237 /* Generate and return an insn body to subtract Y from X. */
3240 gen_sub2_insn (x
, y
)
3243 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3245 if (! (*insn_operand_predicate
[icode
][0]) (x
, insn_operand_mode
[icode
][0])
3246 || ! (*insn_operand_predicate
[icode
][1]) (x
, insn_operand_mode
[icode
][1])
3247 || ! (*insn_operand_predicate
[icode
][2]) (y
, insn_operand_mode
[icode
][2]))
3250 return (GEN_FCN (icode
) (x
, x
, y
));
3254 have_sub2_insn (mode
)
3255 enum machine_mode mode
;
3257 return sub_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
;
3260 /* Generate the body of an instruction to copy Y into X.
3261 It may be a SEQUENCE, if one insn isn't enough. */
3264 gen_move_insn (x
, y
)
3267 register enum machine_mode mode
= GET_MODE (x
);
3268 enum insn_code insn_code
;
3271 if (mode
== VOIDmode
)
3272 mode
= GET_MODE (y
);
3274 insn_code
= mov_optab
->handlers
[(int) mode
].insn_code
;
3276 /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
3277 find a mode to do it in. If we have a movcc, use it. Otherwise,
3278 find the MODE_INT mode of the same width. */
3280 if (GET_MODE_CLASS (mode
) == MODE_CC
&& insn_code
== CODE_FOR_nothing
)
3282 enum machine_mode tmode
= VOIDmode
;
3286 && mov_optab
->handlers
[(int) CCmode
].insn_code
!= CODE_FOR_nothing
)
3289 for (tmode
= QImode
; tmode
!= VOIDmode
;
3290 tmode
= GET_MODE_WIDER_MODE (tmode
))
3291 if (GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (mode
))
3294 if (tmode
== VOIDmode
)
3297 /* Get X and Y in TMODE. We can't use gen_lowpart here because it
3298 may call change_address which is not appropriate if we were
3299 called when a reload was in progress. We don't have to worry
3300 about changing the address since the size in bytes is supposed to
3301 be the same. Copy the MEM to change the mode and move any
3302 substitutions from the old MEM to the new one. */
3304 if (reload_in_progress
)
3306 x
= gen_lowpart_common (tmode
, x1
);
3307 if (x
== 0 && GET_CODE (x1
) == MEM
)
3309 x
= gen_rtx (MEM
, tmode
, XEXP (x1
, 0));
3310 RTX_UNCHANGING_P (x
) = RTX_UNCHANGING_P (x1
);
3311 MEM_IN_STRUCT_P (x
) = MEM_IN_STRUCT_P (x1
);
3312 MEM_VOLATILE_P (x
) = MEM_VOLATILE_P (x1
);
3313 copy_replacements (x1
, x
);
3316 y
= gen_lowpart_common (tmode
, y1
);
3317 if (y
== 0 && GET_CODE (y1
) == MEM
)
3319 y
= gen_rtx (MEM
, tmode
, XEXP (y1
, 0));
3320 RTX_UNCHANGING_P (y
) = RTX_UNCHANGING_P (y1
);
3321 MEM_IN_STRUCT_P (y
) = MEM_IN_STRUCT_P (y1
);
3322 MEM_VOLATILE_P (y
) = MEM_VOLATILE_P (y1
);
3323 copy_replacements (y1
, y
);
3328 x
= gen_lowpart (tmode
, x
);
3329 y
= gen_lowpart (tmode
, y
);
3332 insn_code
= mov_optab
->handlers
[(int) tmode
].insn_code
;
3333 return (GEN_FCN (insn_code
) (x
, y
));
3337 emit_move_insn_1 (x
, y
);
3338 seq
= gen_sequence ();
3343 /* Return the insn code used to extend FROM_MODE to TO_MODE.
3344 UNSIGNEDP specifies zero-extension instead of sign-extension. If
3345 no such operation exists, CODE_FOR_nothing will be returned. */
3348 can_extend_p (to_mode
, from_mode
, unsignedp
)
3349 enum machine_mode to_mode
, from_mode
;
3352 return extendtab
[(int) to_mode
][(int) from_mode
][unsignedp
];
3355 /* Generate the body of an insn to extend Y (with mode MFROM)
3356 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
3359 gen_extend_insn (x
, y
, mto
, mfrom
, unsignedp
)
3361 enum machine_mode mto
, mfrom
;
3364 return (GEN_FCN (extendtab
[(int) mto
][(int) mfrom
][unsignedp
]) (x
, y
));
3367 /* can_fix_p and can_float_p say whether the target machine
3368 can directly convert a given fixed point type to
3369 a given floating point type, or vice versa.
3370 The returned value is the CODE_FOR_... value to use,
3371 or CODE_FOR_nothing if these modes cannot be directly converted.
3373 *TRUNCP_PTR is set to 1 if it is necessary to output
3374 an explicit FTRUNC insn before the fix insn; otherwise 0. */
3376 static enum insn_code
3377 can_fix_p (fixmode
, fltmode
, unsignedp
, truncp_ptr
)
3378 enum machine_mode fltmode
, fixmode
;
3383 if (fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
] != CODE_FOR_nothing
)
3384 return fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
];
3386 if (ftrunc_optab
->handlers
[(int) fltmode
].insn_code
!= CODE_FOR_nothing
)
3389 return fixtab
[(int) fltmode
][(int) fixmode
][unsignedp
];
3391 return CODE_FOR_nothing
;
3394 static enum insn_code
3395 can_float_p (fltmode
, fixmode
, unsignedp
)
3396 enum machine_mode fixmode
, fltmode
;
3399 return floattab
[(int) fltmode
][(int) fixmode
][unsignedp
];
3402 /* Generate code to convert FROM to floating point
3403 and store in TO. FROM must be fixed point and not VOIDmode.
3404 UNSIGNEDP nonzero means regard FROM as unsigned.
3405 Normally this is done by correcting the final value
3406 if it is negative. */
3409 expand_float (to
, from
, unsignedp
)
3413 enum insn_code icode
;
3414 register rtx target
= to
;
3415 enum machine_mode fmode
, imode
;
3417 /* Crash now, because we won't be able to decide which mode to use. */
3418 if (GET_MODE (from
) == VOIDmode
)
3421 /* Look for an insn to do the conversion. Do it in the specified
3422 modes if possible; otherwise convert either input, output or both to
3423 wider mode. If the integer mode is wider than the mode of FROM,
3424 we can do the conversion signed even if the input is unsigned. */
3426 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
3427 imode
= GET_MODE_WIDER_MODE (imode
))
3428 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
3429 fmode
= GET_MODE_WIDER_MODE (fmode
))
3431 int doing_unsigned
= unsignedp
;
3433 icode
= can_float_p (fmode
, imode
, unsignedp
);
3434 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
3435 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
3437 if (icode
!= CODE_FOR_nothing
)
3439 to
= protect_from_queue (to
, 1);
3440 from
= protect_from_queue (from
, 0);
3442 if (imode
!= GET_MODE (from
))
3443 from
= convert_to_mode (imode
, from
, unsignedp
);
3445 if (fmode
!= GET_MODE (to
))
3446 target
= gen_reg_rtx (fmode
);
3448 emit_unop_insn (icode
, target
, from
,
3449 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
3452 convert_move (to
, target
, 0);
3457 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3459 /* Unsigned integer, and no way to convert directly.
3460 Convert as signed, then conditionally adjust the result. */
3463 rtx label
= gen_label_rtx ();
3465 REAL_VALUE_TYPE offset
;
3469 to
= protect_from_queue (to
, 1);
3470 from
= protect_from_queue (from
, 0);
3473 from
= force_not_mem (from
);
3475 /* Look for a usable floating mode FMODE wider than the source and at
3476 least as wide as the target. Using FMODE will avoid rounding woes
3477 with unsigned values greater than the signed maximum value. */
3479 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
3480 fmode
= GET_MODE_WIDER_MODE (fmode
))
3481 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
3482 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
3485 if (fmode
== VOIDmode
)
3487 /* There is no such mode. Pretend the target is wide enough. */
3488 fmode
= GET_MODE (to
);
3490 /* Avoid double-rounding when TO is narrower than FROM. */
3491 if ((significand_size (fmode
) + 1)
3492 < GET_MODE_BITSIZE (GET_MODE (from
)))
3495 rtx neglabel
= gen_label_rtx ();
3497 /* Don't use TARGET if it isn't a register, is a hard register,
3498 or is the wrong mode. */
3499 if (GET_CODE (target
) != REG
3500 || REGNO (target
) < FIRST_PSEUDO_REGISTER
3501 || GET_MODE (target
) != fmode
)
3502 target
= gen_reg_rtx (fmode
);
3504 imode
= GET_MODE (from
);
3505 do_pending_stack_adjust ();
3507 /* Test whether the sign bit is set. */
3508 emit_cmp_insn (from
, const0_rtx
, GE
, NULL_RTX
, imode
, 0, 0);
3509 emit_jump_insn (gen_blt (neglabel
));
3511 /* The sign bit is not set. Convert as signed. */
3512 expand_float (target
, from
, 0);
3513 emit_jump_insn (gen_jump (label
));
3515 /* The sign bit is set.
3516 Convert to a usable (positive signed) value by shifting right
3517 one bit, while remembering if a nonzero bit was shifted
3518 out; i.e., compute (from & 1) | (from >> 1). */
3520 emit_label (neglabel
);
3521 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
3522 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3523 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
3525 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
3527 expand_float (target
, temp
, 0);
3529 /* Multiply by 2 to undo the shift above. */
3530 temp
= expand_binop (fmode
, add_optab
, target
, target
,
3531 target
, 0, OPTAB_LIB_WIDEN
);
3533 emit_move_insn (target
, temp
);
3535 do_pending_stack_adjust ();
3541 /* If we are about to do some arithmetic to correct for an
3542 unsigned operand, do it in a pseudo-register. */
3544 if (GET_MODE (to
) != fmode
3545 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
3546 target
= gen_reg_rtx (fmode
);
3548 /* Convert as signed integer to floating. */
3549 expand_float (target
, from
, 0);
3551 /* If FROM is negative (and therefore TO is negative),
3552 correct its value by 2**bitwidth. */
3554 do_pending_stack_adjust ();
3555 emit_cmp_insn (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
), 0, 0);
3556 emit_jump_insn (gen_bge (label
));
3558 /* On SCO 3.2.1, ldexp rejects values outside [0.5, 1).
3559 Rather than setting up a dconst_dot_5, let's hope SCO
3561 offset
= REAL_VALUE_LDEXP (dconst1
, GET_MODE_BITSIZE (GET_MODE (from
)));
3562 temp
= expand_binop (fmode
, add_optab
, target
,
3563 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
3564 target
, 0, OPTAB_LIB_WIDEN
);
3566 emit_move_insn (target
, temp
);
3568 do_pending_stack_adjust ();
3574 /* No hardware instruction available; call a library routine to convert from
3575 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
3581 to
= protect_from_queue (to
, 1);
3582 from
= protect_from_queue (from
, 0);
3584 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
3585 from
= convert_to_mode (SImode
, from
, unsignedp
);
3588 from
= force_not_mem (from
);
3590 if (GET_MODE (to
) == SFmode
)
3592 if (GET_MODE (from
) == SImode
)
3593 libfcn
= floatsisf_libfunc
;
3594 else if (GET_MODE (from
) == DImode
)
3595 libfcn
= floatdisf_libfunc
;
3596 else if (GET_MODE (from
) == TImode
)
3597 libfcn
= floattisf_libfunc
;
3601 else if (GET_MODE (to
) == DFmode
)
3603 if (GET_MODE (from
) == SImode
)
3604 libfcn
= floatsidf_libfunc
;
3605 else if (GET_MODE (from
) == DImode
)
3606 libfcn
= floatdidf_libfunc
;
3607 else if (GET_MODE (from
) == TImode
)
3608 libfcn
= floattidf_libfunc
;
3612 else if (GET_MODE (to
) == XFmode
)
3614 if (GET_MODE (from
) == SImode
)
3615 libfcn
= floatsixf_libfunc
;
3616 else if (GET_MODE (from
) == DImode
)
3617 libfcn
= floatdixf_libfunc
;
3618 else if (GET_MODE (from
) == TImode
)
3619 libfcn
= floattixf_libfunc
;
3623 else if (GET_MODE (to
) == TFmode
)
3625 if (GET_MODE (from
) == SImode
)
3626 libfcn
= floatsitf_libfunc
;
3627 else if (GET_MODE (from
) == DImode
)
3628 libfcn
= floatditf_libfunc
;
3629 else if (GET_MODE (from
) == TImode
)
3630 libfcn
= floattitf_libfunc
;
3639 value
= emit_library_call_value (libfcn
, NULL_RTX
, 1,
3641 1, from
, GET_MODE (from
));
3642 insns
= get_insns ();
3645 emit_libcall_block (insns
, target
, value
,
3646 gen_rtx (FLOAT
, GET_MODE (to
), from
));
3651 /* Copy result to requested destination
3652 if we have been computing in a temp location. */
3656 if (GET_MODE (target
) == GET_MODE (to
))
3657 emit_move_insn (to
, target
);
3659 convert_move (to
, target
, 0);
3663 /* expand_fix: generate code to convert FROM to fixed point
3664 and store in TO. FROM must be floating point. */
3670 rtx temp
= gen_reg_rtx (GET_MODE (x
));
3671 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
3675 expand_fix (to
, from
, unsignedp
)
3676 register rtx to
, from
;
3679 enum insn_code icode
;
3680 register rtx target
= to
;
3681 enum machine_mode fmode
, imode
;
3685 /* We first try to find a pair of modes, one real and one integer, at
3686 least as wide as FROM and TO, respectively, in which we can open-code
3687 this conversion. If the integer mode is wider than the mode of TO,
3688 we can do the conversion either signed or unsigned. */
3690 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
3691 imode
= GET_MODE_WIDER_MODE (imode
))
3692 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
3693 fmode
= GET_MODE_WIDER_MODE (fmode
))
3695 int doing_unsigned
= unsignedp
;
3697 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
3698 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
3699 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
3701 if (icode
!= CODE_FOR_nothing
)
3703 to
= protect_from_queue (to
, 1);
3704 from
= protect_from_queue (from
, 0);
3706 if (fmode
!= GET_MODE (from
))
3707 from
= convert_to_mode (fmode
, from
, 0);
3710 from
= ftruncify (from
);
3712 if (imode
!= GET_MODE (to
))
3713 target
= gen_reg_rtx (imode
);
3715 emit_unop_insn (icode
, target
, from
,
3716 doing_unsigned
? UNSIGNED_FIX
: FIX
);
3718 convert_move (to
, target
, unsignedp
);
3723 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3724 /* For an unsigned conversion, there is one more way to do it.
3725 If we have a signed conversion, we generate code that compares
3726 the real value to the largest representable positive number. If if
3727 is smaller, the conversion is done normally. Otherwise, subtract
3728 one plus the highest signed number, convert, and add it back.
3730 We only need to check all real modes, since we know we didn't find
3731 anything with a wider integer mode. */
3733 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
3734 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
3735 fmode
= GET_MODE_WIDER_MODE (fmode
))
3736 /* Make sure we won't lose significant bits doing this. */
3737 if (GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))
3738 && CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
3742 REAL_VALUE_TYPE offset
;
3743 rtx limit
, lab1
, lab2
, insn
;
3745 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
3746 offset
= REAL_VALUE_LDEXP (dconst1
, bitsize
- 1);
3747 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
3748 lab1
= gen_label_rtx ();
3749 lab2
= gen_label_rtx ();
3752 to
= protect_from_queue (to
, 1);
3753 from
= protect_from_queue (from
, 0);
3756 from
= force_not_mem (from
);
3758 if (fmode
!= GET_MODE (from
))
3759 from
= convert_to_mode (fmode
, from
, 0);
3761 /* See if we need to do the subtraction. */
3762 do_pending_stack_adjust ();
3763 emit_cmp_insn (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
), 0, 0);
3764 emit_jump_insn (gen_bge (lab1
));
3766 /* If not, do the signed "fix" and branch around fixup code. */
3767 expand_fix (to
, from
, 0);
3768 emit_jump_insn (gen_jump (lab2
));
3771 /* Otherwise, subtract 2**(N-1), convert to signed number,
3772 then add 2**(N-1). Do the addition using XOR since this
3773 will often generate better code. */
3775 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
3776 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3777 expand_fix (to
, target
, 0);
3778 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
3779 GEN_INT ((HOST_WIDE_INT
) 1 << (bitsize
- 1)),
3780 to
, 1, OPTAB_LIB_WIDEN
);
3783 emit_move_insn (to
, target
);
3787 /* Make a place for a REG_NOTE and add it. */
3788 insn
= emit_move_insn (to
, to
);
3789 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_EQUAL
,
3790 gen_rtx (UNSIGNED_FIX
, GET_MODE (to
),
3798 /* We can't do it with an insn, so use a library call. But first ensure
3799 that the mode of TO is at least as wide as SImode, since those are the
3800 only library calls we know about. */
3802 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
3804 target
= gen_reg_rtx (SImode
);
3806 expand_fix (target
, from
, unsignedp
);
3808 else if (GET_MODE (from
) == SFmode
)
3810 if (GET_MODE (to
) == SImode
)
3811 libfcn
= unsignedp
? fixunssfsi_libfunc
: fixsfsi_libfunc
;
3812 else if (GET_MODE (to
) == DImode
)
3813 libfcn
= unsignedp
? fixunssfdi_libfunc
: fixsfdi_libfunc
;
3814 else if (GET_MODE (to
) == TImode
)
3815 libfcn
= unsignedp
? fixunssfti_libfunc
: fixsfti_libfunc
;
3819 else if (GET_MODE (from
) == DFmode
)
3821 if (GET_MODE (to
) == SImode
)
3822 libfcn
= unsignedp
? fixunsdfsi_libfunc
: fixdfsi_libfunc
;
3823 else if (GET_MODE (to
) == DImode
)
3824 libfcn
= unsignedp
? fixunsdfdi_libfunc
: fixdfdi_libfunc
;
3825 else if (GET_MODE (to
) == TImode
)
3826 libfcn
= unsignedp
? fixunsdfti_libfunc
: fixdfti_libfunc
;
3830 else if (GET_MODE (from
) == XFmode
)
3832 if (GET_MODE (to
) == SImode
)
3833 libfcn
= unsignedp
? fixunsxfsi_libfunc
: fixxfsi_libfunc
;
3834 else if (GET_MODE (to
) == DImode
)
3835 libfcn
= unsignedp
? fixunsxfdi_libfunc
: fixxfdi_libfunc
;
3836 else if (GET_MODE (to
) == TImode
)
3837 libfcn
= unsignedp
? fixunsxfti_libfunc
: fixxfti_libfunc
;
3841 else if (GET_MODE (from
) == TFmode
)
3843 if (GET_MODE (to
) == SImode
)
3844 libfcn
= unsignedp
? fixunstfsi_libfunc
: fixtfsi_libfunc
;
3845 else if (GET_MODE (to
) == DImode
)
3846 libfcn
= unsignedp
? fixunstfdi_libfunc
: fixtfdi_libfunc
;
3847 else if (GET_MODE (to
) == TImode
)
3848 libfcn
= unsignedp
? fixunstfti_libfunc
: fixtfti_libfunc
;
3860 to
= protect_from_queue (to
, 1);
3861 from
= protect_from_queue (from
, 0);
3864 from
= force_not_mem (from
);
3868 value
= emit_library_call_value (libfcn
, NULL_RTX
, 1, GET_MODE (to
),
3870 1, from
, GET_MODE (from
));
3871 insns
= get_insns ();
3874 emit_libcall_block (insns
, target
, value
,
3875 gen_rtx (unsignedp
? UNSIGNED_FIX
: FIX
,
3876 GET_MODE (to
), from
));
3879 if (GET_MODE (to
) == GET_MODE (target
))
3880 emit_move_insn (to
, target
);
3882 convert_move (to
, target
, 0);
3890 optab op
= (optab
) xmalloc (sizeof (struct optab
));
3892 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
3894 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
3895 op
->handlers
[i
].libfunc
= 0;
3898 if (code
!= UNKNOWN
)
3899 code_to_optab
[(int) code
] = op
;
3904 /* Initialize the libfunc fields of an entire group of entries in some
3905 optab. Each entry is set equal to a string consisting of a leading
3906 pair of underscores followed by a generic operation name followed by
3907 a mode name (downshifted to lower case) followed by a single character
3908 representing the number of operands for the given operation (which is
3909 usually one of the characters '2', '3', or '4').
3911 OPTABLE is the table in which libfunc fields are to be initialized.
3912 FIRST_MODE is the first machine mode index in the given optab to
3914 LAST_MODE is the last machine mode index in the given optab to
3916 OPNAME is the generic (string) name of the operation.
3917 SUFFIX is the character which specifies the number of operands for
3918 the given generic operation.
3922 init_libfuncs (optable
, first_mode
, last_mode
, opname
, suffix
)
3923 register optab optable
;
3924 register int first_mode
;
3925 register int last_mode
;
3926 register char *opname
;
3927 register int suffix
;
3930 register unsigned opname_len
= strlen (opname
);
3932 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
3933 mode
= (enum machine_mode
) ((int) mode
+ 1))
3935 register char *mname
= mode_name
[(int) mode
];
3936 register unsigned mname_len
= strlen (mname
);
3937 register char *libfunc_name
3938 = (char *) xmalloc (2 + opname_len
+ mname_len
+ 1 + 1);
3945 for (q
= opname
; *q
; )
3947 for (q
= mname
; *q
; q
++)
3948 *p
++ = tolower (*q
);
3951 optable
->handlers
[(int) mode
].libfunc
3952 = gen_rtx (SYMBOL_REF
, Pmode
, libfunc_name
);
3956 /* Initialize the libfunc fields of an entire group of entries in some
3957 optab which correspond to all integer mode operations. The parameters
3958 have the same meaning as similarly named ones for the `init_libfuncs'
3959 routine. (See above). */
3962 init_integral_libfuncs (optable
, opname
, suffix
)
3963 register optab optable
;
3964 register char *opname
;
3965 register int suffix
;
3967 init_libfuncs (optable
, SImode
, TImode
, opname
, suffix
);
3970 /* Initialize the libfunc fields of an entire group of entries in some
3971 optab which correspond to all real mode operations. The parameters
3972 have the same meaning as similarly named ones for the `init_libfuncs'
3973 routine. (See above). */
3976 init_floating_libfuncs (optable
, opname
, suffix
)
3977 register optab optable
;
3978 register char *opname
;
3979 register int suffix
;
3981 init_libfuncs (optable
, SFmode
, TFmode
, opname
, suffix
);
3984 /* Initialize the libfunc fields of an entire group of entries in some
3985 optab which correspond to all complex floating modes. The parameters
3986 have the same meaning as similarly named ones for the `init_libfuncs'
3987 routine. (See above). */
3990 init_complex_libfuncs (optable
, opname
, suffix
)
3991 register optab optable
;
3992 register char *opname
;
3993 register int suffix
;
3995 init_libfuncs (optable
, SCmode
, TCmode
, opname
, suffix
);
3998 /* Call this once to initialize the contents of the optabs
3999 appropriately for the current target machine. */
4007 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4009 for (p
= fixtab
[0][0];
4010 p
< fixtab
[0][0] + sizeof fixtab
/ sizeof (fixtab
[0][0][0]);
4012 *p
= CODE_FOR_nothing
;
4014 for (p
= fixtrunctab
[0][0];
4015 p
< fixtrunctab
[0][0] + sizeof fixtrunctab
/ sizeof (fixtrunctab
[0][0][0]);
4017 *p
= CODE_FOR_nothing
;
4019 for (p
= floattab
[0][0];
4020 p
< floattab
[0][0] + sizeof floattab
/ sizeof (floattab
[0][0][0]);
4022 *p
= CODE_FOR_nothing
;
4024 for (p
= extendtab
[0][0];
4025 p
< extendtab
[0][0] + sizeof extendtab
/ sizeof extendtab
[0][0][0];
4027 *p
= CODE_FOR_nothing
;
4029 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4030 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4032 #ifdef HAVE_conditional_move
4033 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4034 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4037 add_optab
= init_optab (PLUS
);
4038 sub_optab
= init_optab (MINUS
);
4039 smul_optab
= init_optab (MULT
);
4040 smul_highpart_optab
= init_optab (UNKNOWN
);
4041 umul_highpart_optab
= init_optab (UNKNOWN
);
4042 smul_widen_optab
= init_optab (UNKNOWN
);
4043 umul_widen_optab
= init_optab (UNKNOWN
);
4044 sdiv_optab
= init_optab (DIV
);
4045 sdivmod_optab
= init_optab (UNKNOWN
);
4046 udiv_optab
= init_optab (UDIV
);
4047 udivmod_optab
= init_optab (UNKNOWN
);
4048 smod_optab
= init_optab (MOD
);
4049 umod_optab
= init_optab (UMOD
);
4050 flodiv_optab
= init_optab (DIV
);
4051 ftrunc_optab
= init_optab (UNKNOWN
);
4052 and_optab
= init_optab (AND
);
4053 ior_optab
= init_optab (IOR
);
4054 xor_optab
= init_optab (XOR
);
4055 ashl_optab
= init_optab (ASHIFT
);
4056 ashr_optab
= init_optab (ASHIFTRT
);
4057 lshr_optab
= init_optab (LSHIFTRT
);
4058 rotl_optab
= init_optab (ROTATE
);
4059 rotr_optab
= init_optab (ROTATERT
);
4060 smin_optab
= init_optab (SMIN
);
4061 smax_optab
= init_optab (SMAX
);
4062 umin_optab
= init_optab (UMIN
);
4063 umax_optab
= init_optab (UMAX
);
4064 mov_optab
= init_optab (UNKNOWN
);
4065 movstrict_optab
= init_optab (UNKNOWN
);
4066 cmp_optab
= init_optab (UNKNOWN
);
4067 ucmp_optab
= init_optab (UNKNOWN
);
4068 tst_optab
= init_optab (UNKNOWN
);
4069 neg_optab
= init_optab (NEG
);
4070 abs_optab
= init_optab (ABS
);
4071 one_cmpl_optab
= init_optab (NOT
);
4072 ffs_optab
= init_optab (FFS
);
4073 sqrt_optab
= init_optab (SQRT
);
4074 sin_optab
= init_optab (UNKNOWN
);
4075 cos_optab
= init_optab (UNKNOWN
);
4076 strlen_optab
= init_optab (UNKNOWN
);
4078 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4080 movstr_optab
[i
] = CODE_FOR_nothing
;
4082 #ifdef HAVE_SECONDARY_RELOADS
4083 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
4087 /* Fill in the optabs with the insns we support. */
4090 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
4091 /* This flag says the same insns that convert to a signed fixnum
4092 also convert validly to an unsigned one. */
4093 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4094 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4095 fixtrunctab
[i
][j
][1] = fixtrunctab
[i
][j
][0];
4098 #ifdef EXTRA_CC_MODES
4102 /* Initialize the optabs with the names of the library functions. */
4103 init_integral_libfuncs (add_optab
, "add", '3');
4104 init_floating_libfuncs (add_optab
, "add", '3');
4105 init_integral_libfuncs (sub_optab
, "sub", '3');
4106 init_floating_libfuncs (sub_optab
, "sub", '3');
4107 init_integral_libfuncs (smul_optab
, "mul", '3');
4108 init_floating_libfuncs (smul_optab
, "mul", '3');
4109 init_integral_libfuncs (sdiv_optab
, "div", '3');
4110 init_integral_libfuncs (udiv_optab
, "udiv", '3');
4111 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
4112 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
4113 init_integral_libfuncs (smod_optab
, "mod", '3');
4114 init_integral_libfuncs (umod_optab
, "umod", '3');
4115 init_floating_libfuncs (flodiv_optab
, "div", '3');
4116 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
4117 init_integral_libfuncs (and_optab
, "and", '3');
4118 init_integral_libfuncs (ior_optab
, "ior", '3');
4119 init_integral_libfuncs (xor_optab
, "xor", '3');
4120 init_integral_libfuncs (ashl_optab
, "ashl", '3');
4121 init_integral_libfuncs (ashr_optab
, "ashr", '3');
4122 init_integral_libfuncs (lshr_optab
, "lshr", '3');
4123 init_integral_libfuncs (smin_optab
, "min", '3');
4124 init_floating_libfuncs (smin_optab
, "min", '3');
4125 init_integral_libfuncs (smax_optab
, "max", '3');
4126 init_floating_libfuncs (smax_optab
, "max", '3');
4127 init_integral_libfuncs (umin_optab
, "umin", '3');
4128 init_integral_libfuncs (umax_optab
, "umax", '3');
4129 init_integral_libfuncs (neg_optab
, "neg", '2');
4130 init_floating_libfuncs (neg_optab
, "neg", '2');
4131 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
4132 init_integral_libfuncs (ffs_optab
, "ffs", '2');
4134 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
4135 init_integral_libfuncs (cmp_optab
, "cmp", '2');
4136 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
4137 init_floating_libfuncs (cmp_optab
, "cmp", '2');
4139 #ifdef MULSI3_LIBCALL
4140 smul_optab
->handlers
[(int) SImode
].libfunc
4141 = gen_rtx (SYMBOL_REF
, Pmode
, MULSI3_LIBCALL
);
4143 #ifdef MULDI3_LIBCALL
4144 smul_optab
->handlers
[(int) DImode
].libfunc
4145 = gen_rtx (SYMBOL_REF
, Pmode
, MULDI3_LIBCALL
);
4148 #ifdef DIVSI3_LIBCALL
4149 sdiv_optab
->handlers
[(int) SImode
].libfunc
4150 = gen_rtx (SYMBOL_REF
, Pmode
, DIVSI3_LIBCALL
);
4152 #ifdef DIVDI3_LIBCALL
4153 sdiv_optab
->handlers
[(int) DImode
].libfunc
4154 = gen_rtx (SYMBOL_REF
, Pmode
, DIVDI3_LIBCALL
);
4157 #ifdef UDIVSI3_LIBCALL
4158 udiv_optab
->handlers
[(int) SImode
].libfunc
4159 = gen_rtx (SYMBOL_REF
, Pmode
, UDIVSI3_LIBCALL
);
4161 #ifdef UDIVDI3_LIBCALL
4162 udiv_optab
->handlers
[(int) DImode
].libfunc
4163 = gen_rtx (SYMBOL_REF
, Pmode
, UDIVDI3_LIBCALL
);
4166 #ifdef MODSI3_LIBCALL
4167 smod_optab
->handlers
[(int) SImode
].libfunc
4168 = gen_rtx (SYMBOL_REF
, Pmode
, MODSI3_LIBCALL
);
4170 #ifdef MODDI3_LIBCALL
4171 smod_optab
->handlers
[(int) DImode
].libfunc
4172 = gen_rtx (SYMBOL_REF
, Pmode
, MODDI3_LIBCALL
);
4175 #ifdef UMODSI3_LIBCALL
4176 umod_optab
->handlers
[(int) SImode
].libfunc
4177 = gen_rtx (SYMBOL_REF
, Pmode
, UMODSI3_LIBCALL
);
4179 #ifdef UMODDI3_LIBCALL
4180 umod_optab
->handlers
[(int) DImode
].libfunc
4181 = gen_rtx (SYMBOL_REF
, Pmode
, UMODDI3_LIBCALL
);
4184 /* Use cabs for DC complex abs, since systems generally have cabs.
4185 Don't define any libcall for SCmode, so that cabs will be used. */
4186 abs_optab
->handlers
[(int) DCmode
].libfunc
4187 = gen_rtx (SYMBOL_REF
, Pmode
, "cabs");
4189 /* The ffs function operates on `int'. */
4190 #ifndef INT_TYPE_SIZE
4191 #define INT_TYPE_SIZE BITS_PER_WORD
4193 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)] .libfunc
4194 = gen_rtx (SYMBOL_REF
, Pmode
, "ffs");
4196 extendsfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extendsfdf2");
4197 extendsfxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extendsfxf2");
4198 extendsftf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extendsftf2");
4199 extenddfxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extenddfxf2");
4200 extenddftf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extenddftf2");
4202 truncdfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__truncdfsf2");
4203 truncxfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__truncxfsf2");
4204 trunctfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__trunctfsf2");
4205 truncxfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__truncxfdf2");
4206 trunctfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__trunctfdf2");
4208 memcpy_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "memcpy");
4209 bcopy_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "bcopy");
4210 memcmp_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "memcmp");
4211 bcmp_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gcc_bcmp");
4212 memset_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "memset");
4213 bzero_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "bzero");
4215 eqhf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqhf2");
4216 nehf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nehf2");
4217 gthf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gthf2");
4218 gehf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gehf2");
4219 lthf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lthf2");
4220 lehf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lehf2");
4222 eqsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqsf2");
4223 nesf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nesf2");
4224 gtsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gtsf2");
4225 gesf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gesf2");
4226 ltsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ltsf2");
4227 lesf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lesf2");
4229 eqdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqdf2");
4230 nedf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nedf2");
4231 gtdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gtdf2");
4232 gedf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gedf2");
4233 ltdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ltdf2");
4234 ledf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ledf2");
4236 eqxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqxf2");
4237 nexf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nexf2");
4238 gtxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gtxf2");
4239 gexf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gexf2");
4240 ltxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ltxf2");
4241 lexf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lexf2");
4243 eqtf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqtf2");
4244 netf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__netf2");
4245 gttf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gttf2");
4246 getf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__getf2");
4247 lttf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lttf2");
4248 letf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__letf2");
4250 floatsisf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsisf");
4251 floatdisf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatdisf");
4252 floattisf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattisf");
4254 floatsidf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsidf");
4255 floatdidf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatdidf");
4256 floattidf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattidf");
4258 floatsixf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsixf");
4259 floatdixf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatdixf");
4260 floattixf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattixf");
4262 floatsitf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsitf");
4263 floatditf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatditf");
4264 floattitf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattitf");
4266 fixsfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixsfsi");
4267 fixsfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixsfdi");
4268 fixsfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixsfti");
4270 fixdfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixdfsi");
4271 fixdfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixdfdi");
4272 fixdfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixdfti");
4274 fixxfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixxfsi");
4275 fixxfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixxfdi");
4276 fixxfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixxfti");
4278 fixtfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixtfsi");
4279 fixtfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixtfdi");
4280 fixtfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixtfti");
4282 fixunssfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunssfsi");
4283 fixunssfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunssfdi");
4284 fixunssfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunssfti");
4286 fixunsdfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsdfsi");
4287 fixunsdfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsdfdi");
4288 fixunsdfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsdfti");
4290 fixunsxfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsxfsi");
4291 fixunsxfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsxfdi");
4292 fixunsxfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsxfti");
4294 fixunstfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunstfsi");
4295 fixunstfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunstfdi");
4296 fixunstfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunstfti");
4298 #ifdef INIT_TARGET_OPTABS
4299 /* Allow the target to add more libcalls or rename some, etc. */
4306 /* SCO 3.2 apparently has a broken ldexp. */
4319 #endif /* BROKEN_LDEXP */