1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 88, 92, 93, 94, 95, 1996 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
26 #include "insn-flags.h"
27 #include "insn-codes.h"
29 #include "insn-config.h"
34 /* Each optab contains info on how this target machine
35 can perform a particular operation
36 for all sizes and kinds of operands.
38 The operation to be performed is often specified
39 by passing one of these optabs as an argument.
41 See expr.h for documentation of these optabs. */
46 optab smul_highpart_optab
;
47 optab umul_highpart_optab
;
48 optab smul_widen_optab
;
49 optab umul_widen_optab
;
72 optab movstrict_optab
;
83 optab ucmp_optab
; /* Used only for libcalls for unsigned comparisons. */
88 /* Tables of patterns for extending one integer mode to another. */
89 enum insn_code extendtab
[MAX_MACHINE_MODE
][MAX_MACHINE_MODE
][2];
91 /* Tables of patterns for converting between fixed and floating point. */
92 enum insn_code fixtab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
93 enum insn_code fixtrunctab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
94 enum insn_code floattab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
96 /* Contains the optab used for each rtx code. */
97 optab code_to_optab
[NUM_RTX_CODE
+ 1];
99 /* SYMBOL_REF rtx's for the library functions that are called
100 implicitly and not via optabs. */
102 rtx extendsfdf2_libfunc
;
103 rtx extendsfxf2_libfunc
;
104 rtx extendsftf2_libfunc
;
105 rtx extenddfxf2_libfunc
;
106 rtx extenddftf2_libfunc
;
108 rtx truncdfsf2_libfunc
;
109 rtx truncxfsf2_libfunc
;
110 rtx trunctfsf2_libfunc
;
111 rtx truncxfdf2_libfunc
;
112 rtx trunctfdf2_libfunc
;
156 rtx floatsisf_libfunc
;
157 rtx floatdisf_libfunc
;
158 rtx floattisf_libfunc
;
160 rtx floatsidf_libfunc
;
161 rtx floatdidf_libfunc
;
162 rtx floattidf_libfunc
;
164 rtx floatsixf_libfunc
;
165 rtx floatdixf_libfunc
;
166 rtx floattixf_libfunc
;
168 rtx floatsitf_libfunc
;
169 rtx floatditf_libfunc
;
170 rtx floattitf_libfunc
;
188 rtx fixunssfsi_libfunc
;
189 rtx fixunssfdi_libfunc
;
190 rtx fixunssfti_libfunc
;
192 rtx fixunsdfsi_libfunc
;
193 rtx fixunsdfdi_libfunc
;
194 rtx fixunsdfti_libfunc
;
196 rtx fixunsxfsi_libfunc
;
197 rtx fixunsxfdi_libfunc
;
198 rtx fixunsxfti_libfunc
;
200 rtx fixunstfsi_libfunc
;
201 rtx fixunstfdi_libfunc
;
202 rtx fixunstfti_libfunc
;
204 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
205 gives the gen_function to make a branch to test that condition. */
207 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
209 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
210 gives the insn code to make a store-condition insn
211 to test that condition. */
213 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
215 #ifdef HAVE_conditional_move
216 /* Indexed by the machine mode, gives the insn code to make a conditional
217 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
218 setcc_gen_code to cut down on the number of named patterns. Consider a day
219 when a lot more rtx codes are conditional (eg: for the ARM). */
221 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
224 static int add_equal_note
PROTO((rtx
, rtx
, enum rtx_code
, rtx
, rtx
));
225 static rtx widen_operand
PROTO((rtx
, enum machine_mode
,
226 enum machine_mode
, int, int));
227 static enum insn_code can_fix_p
PROTO((enum machine_mode
, enum machine_mode
,
229 static enum insn_code can_float_p
PROTO((enum machine_mode
, enum machine_mode
,
231 static rtx ftruncify
PROTO((rtx
));
232 static optab init_optab
PROTO((enum rtx_code
));
233 static void init_libfuncs
PROTO((optab
, int, int, char *, int));
234 static void init_integral_libfuncs
PROTO((optab
, char *, int));
235 static void init_floating_libfuncs
PROTO((optab
, char *, int));
236 static void init_complex_libfuncs
PROTO((optab
, char *, int));
238 /* Add a REG_EQUAL note to the last insn in SEQ. TARGET is being set to
239 the result of operation CODE applied to OP0 (and OP1 if it is a binary
242 If the last insn does not set TARGET, don't do anything, but return 1.
244 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
245 don't add the REG_EQUAL note but return 0. Our caller can then try
246 again, ensuring that TARGET is not one of the operands. */
249 add_equal_note (seq
, target
, code
, op0
, op1
)
259 if ((GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
260 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
261 || GET_CODE (seq
) != SEQUENCE
262 || (set
= single_set (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1))) == 0
263 || GET_CODE (target
) == ZERO_EXTRACT
264 || (! rtx_equal_p (SET_DEST (set
), target
)
265 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
267 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
268 || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set
), 0)),
272 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
273 besides the last insn. */
274 if (reg_overlap_mentioned_p (target
, op0
)
275 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
276 for (i
= XVECLEN (seq
, 0) - 2; i
>= 0; i
--)
277 if (reg_set_p (target
, XVECEXP (seq
, 0, i
)))
280 if (GET_RTX_CLASS (code
) == '1')
281 note
= gen_rtx (code
, GET_MODE (target
), copy_rtx (op0
));
283 note
= gen_rtx (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
285 REG_NOTES (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1))
286 = gen_rtx (EXPR_LIST
, REG_EQUAL
, note
,
287 REG_NOTES (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1)));
292 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
293 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
294 not actually do a sign-extend or zero-extend, but can leave the
295 higher-order bits of the result rtx undefined, for example, in the case
296 of logical operations, but not right shifts. */
299 widen_operand (op
, mode
, oldmode
, unsignedp
, no_extend
)
301 enum machine_mode mode
, oldmode
;
307 /* If we must extend do so. If OP is either a constant or a SUBREG
308 for a promoted object, also extend since it will be more efficient to
311 || GET_MODE (op
) == VOIDmode
312 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)))
313 return convert_modes (mode
, oldmode
, op
, unsignedp
);
315 /* If MODE is no wider than a single word, we return a paradoxical
317 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
318 return gen_rtx (SUBREG
, mode
, force_reg (GET_MODE (op
), op
), 0);
320 /* Otherwise, get an object of MODE, clobber it, and set the low-order
323 result
= gen_reg_rtx (mode
);
324 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, result
));
325 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
329 /* Generate code to perform an operation specified by BINOPTAB
330 on operands OP0 and OP1, with result having machine-mode MODE.
332 UNSIGNEDP is for the case where we have to widen the operands
333 to perform the operation. It says to use zero-extension.
335 If TARGET is nonzero, the value
336 is generated there, if it is convenient to do so.
337 In all cases an rtx is returned for the locus of the value;
338 this may or may not be TARGET. */
341 expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
342 enum machine_mode mode
;
347 enum optab_methods methods
;
349 enum optab_methods next_methods
350 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
351 ? OPTAB_WIDEN
: methods
);
352 enum mode_class
class;
353 enum machine_mode wider_mode
;
355 int commutative_op
= 0;
356 int shift_op
= (binoptab
->code
== ASHIFT
357 || binoptab
->code
== ASHIFTRT
358 || binoptab
->code
== LSHIFTRT
359 || binoptab
->code
== ROTATE
360 || binoptab
->code
== ROTATERT
);
361 rtx entry_last
= get_last_insn ();
364 class = GET_MODE_CLASS (mode
);
366 op0
= protect_from_queue (op0
, 0);
367 op1
= protect_from_queue (op1
, 0);
369 target
= protect_from_queue (target
, 1);
373 op0
= force_not_mem (op0
);
374 op1
= force_not_mem (op1
);
377 /* If subtracting an integer constant, convert this into an addition of
378 the negated constant. */
380 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
382 op1
= negate_rtx (mode
, op1
);
383 binoptab
= add_optab
;
386 /* If we are inside an appropriately-short loop and one operand is an
387 expensive constant, force it into a register. */
388 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
389 && rtx_cost (op0
, binoptab
->code
) > 2)
390 op0
= force_reg (mode
, op0
);
392 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
393 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > 2)
394 op1
= force_reg (mode
, op1
);
396 /* Record where to delete back to if we backtrack. */
397 last
= get_last_insn ();
399 /* If operation is commutative,
400 try to make the first operand a register.
401 Even better, try to make it the same as the target.
402 Also try to make the last operand a constant. */
403 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
404 || binoptab
== smul_widen_optab
405 || binoptab
== umul_widen_optab
406 || binoptab
== smul_highpart_optab
407 || binoptab
== umul_highpart_optab
)
411 if (((target
== 0 || GET_CODE (target
) == REG
)
412 ? ((GET_CODE (op1
) == REG
413 && GET_CODE (op0
) != REG
)
415 : rtx_equal_p (op1
, target
))
416 || GET_CODE (op0
) == CONST_INT
)
424 /* If we can do it with a three-operand insn, do so. */
426 if (methods
!= OPTAB_MUST_WIDEN
427 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
429 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
430 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
431 enum machine_mode mode1
= insn_operand_mode
[icode
][2];
433 rtx xop0
= op0
, xop1
= op1
;
438 temp
= gen_reg_rtx (mode
);
440 /* If it is a commutative operator and the modes would match
441 if we would swap the operands, we can save the conversions. */
444 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
445 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
449 tmp
= op0
; op0
= op1
; op1
= tmp
;
450 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
454 /* In case the insn wants input operands in modes different from
455 the result, convert the operands. */
457 if (GET_MODE (op0
) != VOIDmode
458 && GET_MODE (op0
) != mode0
459 && mode0
!= VOIDmode
)
460 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
462 if (GET_MODE (xop1
) != VOIDmode
463 && GET_MODE (xop1
) != mode1
464 && mode1
!= VOIDmode
)
465 xop1
= convert_to_mode (mode1
, xop1
, unsignedp
);
467 /* Now, if insn's predicates don't allow our operands, put them into
470 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
)
471 && mode0
!= VOIDmode
)
472 xop0
= copy_to_mode_reg (mode0
, xop0
);
474 if (! (*insn_operand_predicate
[icode
][2]) (xop1
, mode1
)
475 && mode1
!= VOIDmode
)
476 xop1
= copy_to_mode_reg (mode1
, xop1
);
478 if (! (*insn_operand_predicate
[icode
][0]) (temp
, mode
))
479 temp
= gen_reg_rtx (mode
);
481 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
484 /* If PAT is a multi-insn sequence, try to add an appropriate
485 REG_EQUAL note to it. If we can't because TEMP conflicts with an
486 operand, call ourselves again, this time without a target. */
487 if (GET_CODE (pat
) == SEQUENCE
488 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
490 delete_insns_since (last
);
491 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
499 delete_insns_since (last
);
502 /* If this is a multiply, see if we can do a widening operation that
503 takes operands of this mode and makes a wider mode. */
505 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
506 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
507 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
508 != CODE_FOR_nothing
))
510 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
511 unsignedp
? umul_widen_optab
: smul_widen_optab
,
512 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
516 if (GET_MODE_CLASS (mode
) == MODE_INT
)
517 return gen_lowpart (mode
, temp
);
519 return convert_to_mode (mode
, temp
, unsignedp
);
523 /* Look for a wider mode of the same class for which we think we
524 can open-code the operation. Check for a widening multiply at the
525 wider mode as well. */
527 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
528 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
529 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
530 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
532 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
533 || (binoptab
== smul_optab
534 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
535 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
536 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
537 != CODE_FOR_nothing
)))
539 rtx xop0
= op0
, xop1
= op1
;
542 /* For certain integer operations, we need not actually extend
543 the narrow operands, as long as we will truncate
544 the results to the same narrowness. */
546 if ((binoptab
== ior_optab
|| binoptab
== and_optab
547 || binoptab
== xor_optab
548 || binoptab
== add_optab
|| binoptab
== sub_optab
549 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
550 && class == MODE_INT
)
553 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
555 /* The second operand of a shift must always be extended. */
556 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
557 no_extend
&& binoptab
!= ashl_optab
);
559 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
560 unsignedp
, OPTAB_DIRECT
);
563 if (class != MODE_INT
)
566 target
= gen_reg_rtx (mode
);
567 convert_move (target
, temp
, 0);
571 return gen_lowpart (mode
, temp
);
574 delete_insns_since (last
);
578 /* These can be done a word at a time. */
579 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
581 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
582 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
588 /* If TARGET is the same as one of the operands, the REG_EQUAL note
589 won't be accurate, so use a new target. */
590 if (target
== 0 || target
== op0
|| target
== op1
)
591 target
= gen_reg_rtx (mode
);
595 /* Do the actual arithmetic. */
596 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
598 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
599 rtx x
= expand_binop (word_mode
, binoptab
,
600 operand_subword_force (op0
, i
, mode
),
601 operand_subword_force (op1
, i
, mode
),
602 target_piece
, unsignedp
, next_methods
);
607 if (target_piece
!= x
)
608 emit_move_insn (target_piece
, x
);
611 insns
= get_insns ();
614 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
616 if (binoptab
->code
!= UNKNOWN
)
618 = gen_rtx (binoptab
->code
, mode
, copy_rtx (op0
), copy_rtx (op1
));
622 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
627 /* Synthesize double word shifts from single word shifts. */
628 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
629 || binoptab
== ashr_optab
)
631 && GET_CODE (op1
) == CONST_INT
632 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
633 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
634 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
635 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
637 rtx insns
, inter
, equiv_value
;
638 rtx into_target
, outof_target
;
639 rtx into_input
, outof_input
;
640 int shift_count
, left_shift
, outof_word
;
642 /* If TARGET is the same as one of the operands, the REG_EQUAL note
643 won't be accurate, so use a new target. */
644 if (target
== 0 || target
== op0
|| target
== op1
)
645 target
= gen_reg_rtx (mode
);
649 shift_count
= INTVAL (op1
);
651 /* OUTOF_* is the word we are shifting bits away from, and
652 INTO_* is the word that we are shifting bits towards, thus
653 they differ depending on the direction of the shift and
656 left_shift
= binoptab
== ashl_optab
;
657 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
659 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
660 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
662 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
663 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
665 if (shift_count
>= BITS_PER_WORD
)
667 inter
= expand_binop (word_mode
, binoptab
,
669 GEN_INT (shift_count
- BITS_PER_WORD
),
670 into_target
, unsignedp
, next_methods
);
672 if (inter
!= 0 && inter
!= into_target
)
673 emit_move_insn (into_target
, inter
);
675 /* For a signed right shift, we must fill the word we are shifting
676 out of with copies of the sign bit. Otherwise it is zeroed. */
677 if (inter
!= 0 && binoptab
!= ashr_optab
)
678 inter
= CONST0_RTX (word_mode
);
680 inter
= expand_binop (word_mode
, binoptab
,
682 GEN_INT (BITS_PER_WORD
- 1),
683 outof_target
, unsignedp
, next_methods
);
685 if (inter
!= 0 && inter
!= outof_target
)
686 emit_move_insn (outof_target
, inter
);
691 optab reverse_unsigned_shift
, unsigned_shift
;
693 /* For a shift of less then BITS_PER_WORD, to compute the carry,
694 we must do a logical shift in the opposite direction of the
697 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
699 /* For a shift of less than BITS_PER_WORD, to compute the word
700 shifted towards, we need to unsigned shift the orig value of
703 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
705 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
707 GEN_INT (BITS_PER_WORD
- shift_count
),
708 0, unsignedp
, next_methods
);
713 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
714 op1
, 0, unsignedp
, next_methods
);
717 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
718 into_target
, unsignedp
, next_methods
);
720 if (inter
!= 0 && inter
!= into_target
)
721 emit_move_insn (into_target
, inter
);
724 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
725 op1
, outof_target
, unsignedp
, next_methods
);
727 if (inter
!= 0 && inter
!= outof_target
)
728 emit_move_insn (outof_target
, inter
);
731 insns
= get_insns ();
736 if (binoptab
->code
!= UNKNOWN
)
737 equiv_value
= gen_rtx (binoptab
->code
, mode
, op0
, op1
);
741 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
746 /* Synthesize double word rotates from single word shifts. */
747 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
749 && GET_CODE (op1
) == CONST_INT
750 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
751 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
752 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
754 rtx insns
, equiv_value
;
755 rtx into_target
, outof_target
;
756 rtx into_input
, outof_input
;
758 int shift_count
, left_shift
, outof_word
;
760 /* If TARGET is the same as one of the operands, the REG_EQUAL note
761 won't be accurate, so use a new target. */
762 if (target
== 0 || target
== op0
|| target
== op1
)
763 target
= gen_reg_rtx (mode
);
767 shift_count
= INTVAL (op1
);
769 /* OUTOF_* is the word we are shifting bits away from, and
770 INTO_* is the word that we are shifting bits towards, thus
771 they differ depending on the direction of the shift and
774 left_shift
= (binoptab
== rotl_optab
);
775 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
777 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
778 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
780 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
781 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
783 if (shift_count
== BITS_PER_WORD
)
785 /* This is just a word swap. */
786 emit_move_insn (outof_target
, into_input
);
787 emit_move_insn (into_target
, outof_input
);
792 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
793 rtx first_shift_count
, second_shift_count
;
794 optab reverse_unsigned_shift
, unsigned_shift
;
796 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
797 ? lshr_optab
: ashl_optab
);
799 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
800 ? ashl_optab
: lshr_optab
);
802 if (shift_count
> BITS_PER_WORD
)
804 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
805 second_shift_count
= GEN_INT (2*BITS_PER_WORD
- shift_count
);
809 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
810 second_shift_count
= GEN_INT (shift_count
);
813 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
814 outof_input
, first_shift_count
,
815 NULL_RTX
, unsignedp
, next_methods
);
816 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
817 into_input
, second_shift_count
,
818 into_target
, unsignedp
, next_methods
);
820 if (into_temp1
!= 0 && into_temp2
!= 0)
821 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
822 into_target
, unsignedp
, next_methods
);
826 if (inter
!= 0 && inter
!= into_target
)
827 emit_move_insn (into_target
, inter
);
829 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
830 into_input
, first_shift_count
,
831 NULL_RTX
, unsignedp
, next_methods
);
832 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
833 outof_input
, second_shift_count
,
834 outof_target
, unsignedp
, next_methods
);
836 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
837 inter
= expand_binop (word_mode
, ior_optab
,
838 outof_temp1
, outof_temp2
,
839 outof_target
, unsignedp
, next_methods
);
841 if (inter
!= 0 && inter
!= outof_target
)
842 emit_move_insn (outof_target
, inter
);
845 insns
= get_insns ();
850 if (binoptab
->code
!= UNKNOWN
)
851 equiv_value
= gen_rtx (binoptab
->code
, mode
, op0
, op1
);
855 /* We can't make this a no conflict block if this is a word swap,
856 because the word swap case fails if the input and output values
857 are in the same register. */
858 if (shift_count
!= BITS_PER_WORD
)
859 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
868 /* These can be done a word at a time by propagating carries. */
869 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
871 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
872 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
875 rtx carry_tmp
= gen_reg_rtx (word_mode
);
876 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
877 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
878 rtx carry_in
, carry_out
;
881 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
882 value is one of those, use it. Otherwise, use 1 since it is the
883 one easiest to get. */
884 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
885 int normalizep
= STORE_FLAG_VALUE
;
890 /* Prepare the operands. */
891 xop0
= force_reg (mode
, op0
);
892 xop1
= force_reg (mode
, op1
);
894 if (target
== 0 || GET_CODE (target
) != REG
895 || target
== xop0
|| target
== xop1
)
896 target
= gen_reg_rtx (mode
);
898 /* Indicate for flow that the entire target reg is being set. */
899 if (GET_CODE (target
) == REG
)
900 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, target
));
902 /* Do the actual arithmetic. */
903 for (i
= 0; i
< nwords
; i
++)
905 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
906 rtx target_piece
= operand_subword (target
, index
, 1, mode
);
907 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
908 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
911 /* Main add/subtract of the input operands. */
912 x
= expand_binop (word_mode
, binoptab
,
913 op0_piece
, op1_piece
,
914 target_piece
, unsignedp
, next_methods
);
920 /* Store carry from main add/subtract. */
921 carry_out
= gen_reg_rtx (word_mode
);
922 carry_out
= emit_store_flag (carry_out
,
923 binoptab
== add_optab
? LTU
: GTU
,
925 word_mode
, 1, normalizep
);
932 /* Add/subtract previous carry to main result. */
933 x
= expand_binop (word_mode
,
934 normalizep
== 1 ? binoptab
: otheroptab
,
936 target_piece
, 1, next_methods
);
939 else if (target_piece
!= x
)
940 emit_move_insn (target_piece
, x
);
944 /* THIS CODE HAS NOT BEEN TESTED. */
945 /* Get out carry from adding/subtracting carry in. */
946 carry_tmp
= emit_store_flag (carry_tmp
,
947 binoptab
== add_optab
950 word_mode
, 1, normalizep
);
952 /* Logical-ior the two poss. carry together. */
953 carry_out
= expand_binop (word_mode
, ior_optab
,
954 carry_out
, carry_tmp
,
955 carry_out
, 0, next_methods
);
961 carry_in
= carry_out
;
964 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
966 rtx temp
= emit_move_insn (target
, target
);
968 REG_NOTES (temp
) = gen_rtx (EXPR_LIST
, REG_EQUAL
,
969 gen_rtx (binoptab
->code
, mode
,
976 delete_insns_since (last
);
979 /* If we want to multiply two two-word values and have normal and widening
980 multiplies of single-word values, we can do this with three smaller
981 multiplications. Note that we do not make a REG_NO_CONFLICT block here
982 because we are not operating on one word at a time.
984 The multiplication proceeds as follows:
985 _______________________
986 [__op0_high_|__op0_low__]
987 _______________________
988 * [__op1_high_|__op1_low__]
989 _______________________________________________
990 _______________________
991 (1) [__op0_low__*__op1_low__]
992 _______________________
993 (2a) [__op0_low__*__op1_high_]
994 _______________________
995 (2b) [__op0_high_*__op1_low__]
996 _______________________
997 (3) [__op0_high_*__op1_high_]
1000 This gives a 4-word result. Since we are only interested in the
1001 lower 2 words, partial result (3) and the upper words of (2a) and
1002 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1003 calculated using non-widening multiplication.
1005 (1), however, needs to be calculated with an unsigned widening
1006 multiplication. If this operation is not directly supported we
1007 try using a signed widening multiplication and adjust the result.
1008 This adjustment works as follows:
1010 If both operands are positive then no adjustment is needed.
1012 If the operands have different signs, for example op0_low < 0 and
1013 op1_low >= 0, the instruction treats the most significant bit of
1014 op0_low as a sign bit instead of a bit with significance
1015 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1016 with 2**BITS_PER_WORD - op0_low, and two's complements the
1017 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1020 Similarly, if both operands are negative, we need to add
1021 (op0_low + op1_low) * 2**BITS_PER_WORD.
1023 We use a trick to adjust quickly. We logically shift op0_low right
1024 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1025 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1026 logical shift exists, we do an arithmetic right shift and subtract
1029 if (binoptab
== smul_optab
1030 && class == MODE_INT
1031 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1032 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1033 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1034 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1035 != CODE_FOR_nothing
)
1036 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1037 != CODE_FOR_nothing
)))
1039 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1040 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1041 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1042 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1043 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1044 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1049 /* If the target is the same as one of the inputs, don't use it. This
1050 prevents problems with the REG_EQUAL note. */
1051 if (target
== op0
|| target
== op1
1052 || (target
!= 0 && GET_CODE (target
) != REG
))
1055 /* Multiply the two lower words to get a double-word product.
1056 If unsigned widening multiplication is available, use that;
1057 otherwise use the signed form and compensate. */
1059 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1061 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1062 target
, 1, OPTAB_DIRECT
);
1064 /* If we didn't succeed, delete everything we did so far. */
1066 delete_insns_since (last
);
1068 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1072 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1073 != CODE_FOR_nothing
)
1075 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1076 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1077 target
, 1, OPTAB_DIRECT
);
1078 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1079 NULL_RTX
, 1, next_methods
);
1081 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1082 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1085 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1086 NULL_RTX
, 0, next_methods
);
1088 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1089 op0_xhigh
, op0_xhigh
, 0,
1093 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1094 NULL_RTX
, 1, next_methods
);
1096 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1097 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1100 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1101 NULL_RTX
, 0, next_methods
);
1103 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1104 op1_xhigh
, op1_xhigh
, 0,
1109 /* If we have been able to directly compute the product of the
1110 low-order words of the operands and perform any required adjustments
1111 of the operands, we proceed by trying two more multiplications
1112 and then computing the appropriate sum.
1114 We have checked above that the required addition is provided.
1115 Full-word addition will normally always succeed, especially if
1116 it is provided at all, so we don't worry about its failure. The
1117 multiplication may well fail, however, so we do handle that. */
1119 if (product
&& op0_xhigh
&& op1_xhigh
)
1121 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1122 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1123 NULL_RTX
, 0, OPTAB_DIRECT
);
1126 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1127 product_high
, 0, next_methods
);
1129 if (temp
!= 0 && temp
!= product_high
)
1130 emit_move_insn (product_high
, temp
);
1133 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1134 NULL_RTX
, 0, OPTAB_DIRECT
);
1137 temp
= expand_binop (word_mode
, add_optab
, temp
,
1138 product_high
, product_high
,
1141 if (temp
!= 0 && temp
!= product_high
)
1142 emit_move_insn (product_high
, temp
);
1146 temp
= emit_move_insn (product
, product
);
1147 REG_NOTES (temp
) = gen_rtx (EXPR_LIST
, REG_EQUAL
,
1148 gen_rtx (MULT
, mode
, copy_rtx (op0
),
1156 /* If we get here, we couldn't do it for some reason even though we
1157 originally thought we could. Delete anything we've emitted in
1160 delete_insns_since (last
);
1163 /* We need to open-code the complex type operations: '+, -, * and /' */
1165 /* At this point we allow operations between two similar complex
1166 numbers, and also if one of the operands is not a complex number
1167 but rather of MODE_FLOAT or MODE_INT. However, the caller
1168 must make sure that the MODE of the non-complex operand matches
1169 the SUBMODE of the complex operand. */
1171 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1173 rtx real0
= 0, imag0
= 0;
1174 rtx real1
= 0, imag1
= 0;
1175 rtx realr
, imagr
, res
;
1180 /* Find the correct mode for the real and imaginary parts */
1181 enum machine_mode submode
1182 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
1183 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
1186 if (submode
== BLKmode
)
1190 target
= gen_reg_rtx (mode
);
1194 realr
= gen_realpart (submode
, target
);
1195 imagr
= gen_imagpart (submode
, target
);
1197 if (GET_MODE (op0
) == mode
)
1199 real0
= gen_realpart (submode
, op0
);
1200 imag0
= gen_imagpart (submode
, op0
);
1205 if (GET_MODE (op1
) == mode
)
1207 real1
= gen_realpart (submode
, op1
);
1208 imag1
= gen_imagpart (submode
, op1
);
1213 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0|| imag1
!= 0))
1216 switch (binoptab
->code
)
1219 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1221 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1222 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1223 realr
, unsignedp
, methods
);
1227 else if (res
!= realr
)
1228 emit_move_insn (realr
, res
);
1231 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1232 imagr
, unsignedp
, methods
);
1235 else if (binoptab
->code
== MINUS
)
1236 res
= expand_unop (submode
, neg_optab
, imag1
, imagr
, unsignedp
);
1242 else if (res
!= imagr
)
1243 emit_move_insn (imagr
, res
);
1249 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1255 /* Don't fetch these from memory more than once. */
1256 real0
= force_reg (submode
, real0
);
1257 real1
= force_reg (submode
, real1
);
1258 imag0
= force_reg (submode
, imag0
);
1259 imag1
= force_reg (submode
, imag1
);
1261 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1262 unsignedp
, methods
);
1264 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1265 unsignedp
, methods
);
1267 if (temp1
== 0 || temp2
== 0)
1270 res
= expand_binop (submode
, sub_optab
, temp1
, temp2
,
1271 realr
, unsignedp
, methods
);
1275 else if (res
!= realr
)
1276 emit_move_insn (realr
, res
);
1278 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1279 NULL_RTX
, unsignedp
, methods
);
1281 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1282 NULL_RTX
, unsignedp
, methods
);
1284 if (temp1
== 0 || temp2
== 0)
1287 res
= expand_binop (submode
, add_optab
, temp1
, temp2
,
1288 imagr
, unsignedp
, methods
);
1292 else if (res
!= imagr
)
1293 emit_move_insn (imagr
, res
);
1299 /* Don't fetch these from memory more than once. */
1300 real0
= force_reg (submode
, real0
);
1301 real1
= force_reg (submode
, real1
);
1303 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1304 realr
, unsignedp
, methods
);
1307 else if (res
!= realr
)
1308 emit_move_insn (realr
, res
);
1311 res
= expand_binop (submode
, binoptab
,
1312 real1
, imag0
, imagr
, unsignedp
, methods
);
1314 res
= expand_binop (submode
, binoptab
,
1315 real0
, imag1
, imagr
, unsignedp
, methods
);
1319 else if (res
!= imagr
)
1320 emit_move_insn (imagr
, res
);
1327 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1331 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1333 /* Don't fetch these from memory more than once. */
1334 real1
= force_reg (submode
, real1
);
1336 /* Simply divide the real and imaginary parts by `c' */
1337 if (class == MODE_COMPLEX_FLOAT
)
1338 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1339 realr
, unsignedp
, methods
);
1341 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1342 real0
, real1
, realr
, unsignedp
);
1346 else if (res
!= realr
)
1347 emit_move_insn (realr
, res
);
1349 if (class == MODE_COMPLEX_FLOAT
)
1350 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1351 imagr
, unsignedp
, methods
);
1353 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1354 imag0
, real1
, imagr
, unsignedp
);
1358 else if (res
!= imagr
)
1359 emit_move_insn (imagr
, res
);
1365 /* Divisor is of complex type:
1372 /* Don't fetch these from memory more than once. */
1373 real0
= force_reg (submode
, real0
);
1374 real1
= force_reg (submode
, real1
);
1377 imag0
= force_reg (submode
, imag0
);
1379 imag1
= force_reg (submode
, imag1
);
1381 /* Divisor: c*c + d*d */
1382 temp1
= expand_binop (submode
, smul_optab
, real1
, real1
,
1383 NULL_RTX
, unsignedp
, methods
);
1385 temp2
= expand_binop (submode
, smul_optab
, imag1
, imag1
,
1386 NULL_RTX
, unsignedp
, methods
);
1388 if (temp1
== 0 || temp2
== 0)
1391 divisor
= expand_binop (submode
, add_optab
, temp1
, temp2
,
1392 NULL_RTX
, unsignedp
, methods
);
1398 /* ((a)(c-id))/divisor */
1399 /* (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)) */
1401 /* Calculate the dividend */
1402 real_t
= expand_binop (submode
, smul_optab
, real0
, real1
,
1403 NULL_RTX
, unsignedp
, methods
);
1405 imag_t
= expand_binop (submode
, smul_optab
, real0
, imag1
,
1406 NULL_RTX
, unsignedp
, methods
);
1408 if (real_t
== 0 || imag_t
== 0)
1411 imag_t
= expand_unop (submode
, neg_optab
, imag_t
,
1412 NULL_RTX
, unsignedp
);
1416 /* ((a+ib)(c-id))/divider */
1417 /* Calculate the dividend */
1418 temp1
= expand_binop (submode
, smul_optab
, real0
, real1
,
1419 NULL_RTX
, unsignedp
, methods
);
1421 temp2
= expand_binop (submode
, smul_optab
, imag0
, imag1
,
1422 NULL_RTX
, unsignedp
, methods
);
1424 if (temp1
== 0 || temp2
== 0)
1427 real_t
= expand_binop (submode
, add_optab
, temp1
, temp2
,
1428 NULL_RTX
, unsignedp
, methods
);
1430 temp1
= expand_binop (submode
, smul_optab
, imag0
, real1
,
1431 NULL_RTX
, unsignedp
, methods
);
1433 temp2
= expand_binop (submode
, smul_optab
, real0
, imag1
,
1434 NULL_RTX
, unsignedp
, methods
);
1436 if (temp1
== 0 || temp2
== 0)
1439 imag_t
= expand_binop (submode
, sub_optab
, temp1
, temp2
,
1440 NULL_RTX
, unsignedp
, methods
);
1442 if (real_t
== 0 || imag_t
== 0)
1446 if (class == MODE_COMPLEX_FLOAT
)
1447 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
1448 realr
, unsignedp
, methods
);
1450 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1451 real_t
, divisor
, realr
, unsignedp
);
1455 else if (res
!= realr
)
1456 emit_move_insn (realr
, res
);
1458 if (class == MODE_COMPLEX_FLOAT
)
1459 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
1460 imagr
, unsignedp
, methods
);
1462 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1463 imag_t
, divisor
, imagr
, unsignedp
);
1467 else if (res
!= imagr
)
1468 emit_move_insn (imagr
, res
);
1483 if (binoptab
->code
!= UNKNOWN
)
1485 = gen_rtx (binoptab
->code
, mode
, copy_rtx (op0
), copy_rtx (op1
));
1489 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1495 /* It can't be open-coded in this mode.
1496 Use a library call if one is available and caller says that's ok. */
1498 if (binoptab
->handlers
[(int) mode
].libfunc
1499 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1502 rtx funexp
= binoptab
->handlers
[(int) mode
].libfunc
;
1504 enum machine_mode op1_mode
= mode
;
1511 op1_mode
= word_mode
;
1512 /* Specify unsigned here,
1513 since negative shift counts are meaningless. */
1514 op1x
= convert_to_mode (word_mode
, op1
, 1);
1517 if (GET_MODE (op0
) != VOIDmode
1518 && GET_MODE (op0
) != mode
)
1519 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1521 /* Pass 1 for NO_QUEUE so we don't lose any increments
1522 if the libcall is cse'd or moved. */
1523 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1524 NULL_RTX
, 1, mode
, 2,
1525 op0
, mode
, op1x
, op1_mode
);
1527 insns
= get_insns ();
1530 target
= gen_reg_rtx (mode
);
1531 emit_libcall_block (insns
, target
, value
,
1532 gen_rtx (binoptab
->code
, mode
, op0
, op1
));
1537 delete_insns_since (last
);
1539 /* It can't be done in this mode. Can we do it in a wider mode? */
1541 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1542 || methods
== OPTAB_MUST_WIDEN
))
1544 /* Caller says, don't even try. */
1545 delete_insns_since (entry_last
);
1549 /* Compute the value of METHODS to pass to recursive calls.
1550 Don't allow widening to be tried recursively. */
1552 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1554 /* Look for a wider mode of the same class for which it appears we can do
1557 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1559 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1560 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1562 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1563 != CODE_FOR_nothing
)
1564 || (methods
== OPTAB_LIB
1565 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1567 rtx xop0
= op0
, xop1
= op1
;
1570 /* For certain integer operations, we need not actually extend
1571 the narrow operands, as long as we will truncate
1572 the results to the same narrowness. */
1574 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1575 || binoptab
== xor_optab
1576 || binoptab
== add_optab
|| binoptab
== sub_optab
1577 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1578 && class == MODE_INT
)
1581 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1582 unsignedp
, no_extend
);
1584 /* The second operand of a shift must always be extended. */
1585 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1586 no_extend
&& binoptab
!= ashl_optab
);
1588 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1589 unsignedp
, methods
);
1592 if (class != MODE_INT
)
1595 target
= gen_reg_rtx (mode
);
1596 convert_move (target
, temp
, 0);
1600 return gen_lowpart (mode
, temp
);
1603 delete_insns_since (last
);
1608 delete_insns_since (entry_last
);
1612 /* Expand a binary operator which has both signed and unsigned forms.
1613 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1616 If we widen unsigned operands, we may use a signed wider operation instead
1617 of an unsigned wider operation, since the result would be the same. */
1620 sign_expand_binop (mode
, uoptab
, soptab
, op0
, op1
, target
, unsignedp
, methods
)
1621 enum machine_mode mode
;
1622 optab uoptab
, soptab
;
1623 rtx op0
, op1
, target
;
1625 enum optab_methods methods
;
1628 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1629 struct optab wide_soptab
;
1631 /* Do it without widening, if possible. */
1632 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1633 unsignedp
, OPTAB_DIRECT
);
1634 if (temp
|| methods
== OPTAB_DIRECT
)
1637 /* Try widening to a signed int. Make a fake signed optab that
1638 hides any signed insn for direct use. */
1639 wide_soptab
= *soptab
;
1640 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1641 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1643 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1644 unsignedp
, OPTAB_WIDEN
);
1646 /* For unsigned operands, try widening to an unsigned int. */
1647 if (temp
== 0 && unsignedp
)
1648 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1649 unsignedp
, OPTAB_WIDEN
);
1650 if (temp
|| methods
== OPTAB_WIDEN
)
1653 /* Use the right width lib call if that exists. */
1654 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1655 if (temp
|| methods
== OPTAB_LIB
)
1658 /* Must widen and use a lib call, use either signed or unsigned. */
1659 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1660 unsignedp
, methods
);
1664 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1665 unsignedp
, methods
);
1669 /* Generate code to perform an operation specified by BINOPTAB
1670 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1671 We assume that the order of the operands for the instruction
1672 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1673 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1675 Either TARG0 or TARG1 may be zero, but what that means is that
1676 that result is not actually wanted. We will generate it into
1677 a dummy pseudo-reg and discard it. They may not both be zero.
1679 Returns 1 if this operation can be performed; 0 if not. */
1682 expand_twoval_binop (binoptab
, op0
, op1
, targ0
, targ1
, unsignedp
)
1688 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1689 enum mode_class
class;
1690 enum machine_mode wider_mode
;
1691 rtx entry_last
= get_last_insn ();
1694 class = GET_MODE_CLASS (mode
);
1696 op0
= protect_from_queue (op0
, 0);
1697 op1
= protect_from_queue (op1
, 0);
1701 op0
= force_not_mem (op0
);
1702 op1
= force_not_mem (op1
);
1705 /* If we are inside an appropriately-short loop and one operand is an
1706 expensive constant, force it into a register. */
1707 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
1708 && rtx_cost (op0
, binoptab
->code
) > 2)
1709 op0
= force_reg (mode
, op0
);
1711 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
1712 && rtx_cost (op1
, binoptab
->code
) > 2)
1713 op1
= force_reg (mode
, op1
);
1716 targ0
= protect_from_queue (targ0
, 1);
1718 targ0
= gen_reg_rtx (mode
);
1720 targ1
= protect_from_queue (targ1
, 1);
1722 targ1
= gen_reg_rtx (mode
);
1724 /* Record where to go back to if we fail. */
1725 last
= get_last_insn ();
1727 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1729 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1730 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
1731 enum machine_mode mode1
= insn_operand_mode
[icode
][2];
1733 rtx xop0
= op0
, xop1
= op1
;
1735 /* In case this insn wants input operands in modes different from the
1736 result, convert the operands. */
1737 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (op0
) != mode0
)
1738 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1740 if (GET_MODE (op1
) != VOIDmode
&& GET_MODE (op1
) != mode1
)
1741 xop1
= convert_to_mode (mode1
, xop1
, unsignedp
);
1743 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1744 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
))
1745 xop0
= copy_to_mode_reg (mode0
, xop0
);
1747 if (! (*insn_operand_predicate
[icode
][2]) (xop1
, mode1
))
1748 xop1
= copy_to_mode_reg (mode1
, xop1
);
1750 /* We could handle this, but we should always be called with a pseudo
1751 for our targets and all insns should take them as outputs. */
1752 if (! (*insn_operand_predicate
[icode
][0]) (targ0
, mode
)
1753 || ! (*insn_operand_predicate
[icode
][3]) (targ1
, mode
))
1756 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1763 delete_insns_since (last
);
1766 /* It can't be done in this mode. Can we do it in a wider mode? */
1768 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1770 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1771 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1773 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1774 != CODE_FOR_nothing
)
1776 register rtx t0
= gen_reg_rtx (wider_mode
);
1777 register rtx t1
= gen_reg_rtx (wider_mode
);
1779 if (expand_twoval_binop (binoptab
,
1780 convert_modes (wider_mode
, mode
, op0
,
1782 convert_modes (wider_mode
, mode
, op1
,
1786 convert_move (targ0
, t0
, unsignedp
);
1787 convert_move (targ1
, t1
, unsignedp
);
1791 delete_insns_since (last
);
1796 delete_insns_since (entry_last
);
1800 /* Generate code to perform an operation specified by UNOPTAB
1801 on operand OP0, with result having machine-mode MODE.
1803 UNSIGNEDP is for the case where we have to widen the operands
1804 to perform the operation. It says to use zero-extension.
1806 If TARGET is nonzero, the value
1807 is generated there, if it is convenient to do so.
1808 In all cases an rtx is returned for the locus of the value;
1809 this may or may not be TARGET. */
1812 expand_unop (mode
, unoptab
, op0
, target
, unsignedp
)
1813 enum machine_mode mode
;
1819 enum mode_class
class;
1820 enum machine_mode wider_mode
;
1822 rtx last
= get_last_insn ();
1825 class = GET_MODE_CLASS (mode
);
1827 op0
= protect_from_queue (op0
, 0);
1831 op0
= force_not_mem (op0
);
1835 target
= protect_from_queue (target
, 1);
1837 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1839 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1840 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
1846 temp
= gen_reg_rtx (mode
);
1848 if (GET_MODE (xop0
) != VOIDmode
1849 && GET_MODE (xop0
) != mode0
)
1850 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1852 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
1854 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
))
1855 xop0
= copy_to_mode_reg (mode0
, xop0
);
1857 if (! (*insn_operand_predicate
[icode
][0]) (temp
, mode
))
1858 temp
= gen_reg_rtx (mode
);
1860 pat
= GEN_FCN (icode
) (temp
, xop0
);
1863 if (GET_CODE (pat
) == SEQUENCE
1864 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
1866 delete_insns_since (last
);
1867 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
1875 delete_insns_since (last
);
1878 /* It can't be done in this mode. Can we open-code it in a wider mode? */
1880 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1881 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1882 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1884 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
1888 /* For certain operations, we need not actually extend
1889 the narrow operand, as long as we will truncate the
1890 results to the same narrowness. */
1892 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
1893 (unoptab
== neg_optab
1894 || unoptab
== one_cmpl_optab
)
1895 && class == MODE_INT
);
1897 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
1902 if (class != MODE_INT
)
1905 target
= gen_reg_rtx (mode
);
1906 convert_move (target
, temp
, 0);
1910 return gen_lowpart (mode
, temp
);
1913 delete_insns_since (last
);
1917 /* These can be done a word at a time. */
1918 if (unoptab
== one_cmpl_optab
1919 && class == MODE_INT
1920 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1921 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1926 if (target
== 0 || target
== op0
)
1927 target
= gen_reg_rtx (mode
);
1931 /* Do the actual arithmetic. */
1932 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1934 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1935 rtx x
= expand_unop (word_mode
, unoptab
,
1936 operand_subword_force (op0
, i
, mode
),
1937 target_piece
, unsignedp
);
1938 if (target_piece
!= x
)
1939 emit_move_insn (target_piece
, x
);
1942 insns
= get_insns ();
1945 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
1946 gen_rtx (unoptab
->code
, mode
, copy_rtx (op0
)));
1950 /* Open-code the complex negation operation. */
1951 else if (unoptab
== neg_optab
1952 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
1958 /* Find the correct mode for the real and imaginary parts */
1959 enum machine_mode submode
1960 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
1961 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
1964 if (submode
== BLKmode
)
1968 target
= gen_reg_rtx (mode
);
1972 target_piece
= gen_imagpart (submode
, target
);
1973 x
= expand_unop (submode
, unoptab
,
1974 gen_imagpart (submode
, op0
),
1975 target_piece
, unsignedp
);
1976 if (target_piece
!= x
)
1977 emit_move_insn (target_piece
, x
);
1979 target_piece
= gen_realpart (submode
, target
);
1980 x
= expand_unop (submode
, unoptab
,
1981 gen_realpart (submode
, op0
),
1982 target_piece
, unsignedp
);
1983 if (target_piece
!= x
)
1984 emit_move_insn (target_piece
, x
);
1989 emit_no_conflict_block (seq
, target
, op0
, 0,
1990 gen_rtx (unoptab
->code
, mode
, copy_rtx (op0
)));
1994 /* Now try a library call in this mode. */
1995 if (unoptab
->handlers
[(int) mode
].libfunc
)
1998 rtx funexp
= unoptab
->handlers
[(int) mode
].libfunc
;
2003 /* Pass 1 for NO_QUEUE so we don't lose any increments
2004 if the libcall is cse'd or moved. */
2005 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2006 NULL_RTX
, 1, mode
, 1, op0
, mode
);
2007 insns
= get_insns ();
2010 target
= gen_reg_rtx (mode
);
2011 emit_libcall_block (insns
, target
, value
,
2012 gen_rtx (unoptab
->code
, mode
, op0
));
2017 /* It can't be done in this mode. Can we do it in a wider mode? */
2019 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2021 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2022 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2024 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2025 != CODE_FOR_nothing
)
2026 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2030 /* For certain operations, we need not actually extend
2031 the narrow operand, as long as we will truncate the
2032 results to the same narrowness. */
2034 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2035 (unoptab
== neg_optab
2036 || unoptab
== one_cmpl_optab
)
2037 && class == MODE_INT
);
2039 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2044 if (class != MODE_INT
)
2047 target
= gen_reg_rtx (mode
);
2048 convert_move (target
, temp
, 0);
2052 return gen_lowpart (mode
, temp
);
2055 delete_insns_since (last
);
2060 /* If there is no negate operation, try doing a subtract from zero.
2061 The US Software GOFAST library needs this. */
2062 if (unoptab
== neg_optab
)
2065 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2066 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2074 /* Emit code to compute the absolute value of OP0, with result to
2075 TARGET if convenient. (TARGET may be 0.) The return value says
2076 where the result actually is to be found.
2078 MODE is the mode of the operand; the mode of the result is
2079 different but can be deduced from MODE.
2081 UNSIGNEDP is relevant if extension is needed. */
2084 expand_abs (mode
, op0
, target
, unsignedp
, safe
)
2085 enum machine_mode mode
;
2093 /* First try to do it with a special abs instruction. */
2094 temp
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2098 /* If this machine has expensive jumps, we can do integer absolute
2099 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2100 where W is the width of MODE. */
2102 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2104 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2105 size_int (GET_MODE_BITSIZE (mode
) - 1),
2108 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2111 temp
= expand_binop (mode
, sub_optab
, temp
, extended
, target
, 0,
2118 /* If that does not win, use conditional jump and negate. */
2119 op1
= gen_label_rtx ();
2120 if (target
== 0 || ! safe
2121 || GET_MODE (target
) != mode
2122 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2123 || (GET_CODE (target
) == REG
2124 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2125 target
= gen_reg_rtx (mode
);
2127 emit_move_insn (target
, op0
);
2130 /* If this mode is an integer too wide to compare properly,
2131 compare word by word. Rely on CSE to optimize constant cases. */
2132 if (GET_MODE_CLASS (mode
) == MODE_INT
&& ! can_compare_p (mode
))
2133 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2137 temp
= compare_from_rtx (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2139 if (temp
== const1_rtx
)
2141 else if (temp
!= const0_rtx
)
2143 if (bcc_gen_fctn
[(int) GET_CODE (temp
)] != 0)
2144 emit_jump_insn ((*bcc_gen_fctn
[(int) GET_CODE (temp
)]) (op1
));
2150 op0
= expand_unop (mode
, neg_optab
, target
, target
, 0);
2152 emit_move_insn (target
, op0
);
2158 /* Emit code to compute the absolute value of OP0, with result to
2159 TARGET if convenient. (TARGET may be 0.) The return value says
2160 where the result actually is to be found.
2162 MODE is the mode of the operand; the mode of the result is
2163 different but can be deduced from MODE.
2165 UNSIGNEDP is relevant for complex integer modes. */
2168 expand_complex_abs (mode
, op0
, target
, unsignedp
)
2169 enum machine_mode mode
;
2174 enum mode_class
class = GET_MODE_CLASS (mode
);
2175 enum machine_mode wider_mode
;
2177 rtx entry_last
= get_last_insn ();
2181 /* Find the correct mode for the real and imaginary parts. */
2182 enum machine_mode submode
2183 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
2184 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
2187 if (submode
== BLKmode
)
2190 op0
= protect_from_queue (op0
, 0);
2194 op0
= force_not_mem (op0
);
2197 last
= get_last_insn ();
2200 target
= protect_from_queue (target
, 1);
2202 if (abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2204 int icode
= (int) abs_optab
->handlers
[(int) mode
].insn_code
;
2205 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
2211 temp
= gen_reg_rtx (submode
);
2213 if (GET_MODE (xop0
) != VOIDmode
2214 && GET_MODE (xop0
) != mode0
)
2215 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2217 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2219 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
))
2220 xop0
= copy_to_mode_reg (mode0
, xop0
);
2222 if (! (*insn_operand_predicate
[icode
][0]) (temp
, submode
))
2223 temp
= gen_reg_rtx (submode
);
2225 pat
= GEN_FCN (icode
) (temp
, xop0
);
2228 if (GET_CODE (pat
) == SEQUENCE
2229 && ! add_equal_note (pat
, temp
, abs_optab
->code
, xop0
, NULL_RTX
))
2231 delete_insns_since (last
);
2232 return expand_unop (mode
, abs_optab
, op0
, NULL_RTX
, unsignedp
);
2240 delete_insns_since (last
);
2243 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2245 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2246 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2248 if (abs_optab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2252 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2253 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2257 if (class != MODE_COMPLEX_INT
)
2260 target
= gen_reg_rtx (submode
);
2261 convert_move (target
, temp
, 0);
2265 return gen_lowpart (submode
, temp
);
2268 delete_insns_since (last
);
2272 /* Open-code the complex absolute-value operation
2273 if we can open-code sqrt. Otherwise it's not worth while. */
2274 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
)
2276 rtx real
, imag
, total
;
2278 real
= gen_realpart (submode
, op0
);
2279 imag
= gen_imagpart (submode
, op0
);
2281 /* Square both parts. */
2282 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
2283 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
2285 /* Sum the parts. */
2286 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
2287 0, OPTAB_LIB_WIDEN
);
2289 /* Get sqrt in TARGET. Set TARGET to where the result is. */
2290 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
2292 delete_insns_since (last
);
2297 /* Now try a library call in this mode. */
2298 if (abs_optab
->handlers
[(int) mode
].libfunc
)
2301 rtx funexp
= abs_optab
->handlers
[(int) mode
].libfunc
;
2306 /* Pass 1 for NO_QUEUE so we don't lose any increments
2307 if the libcall is cse'd or moved. */
2308 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
2309 NULL_RTX
, 1, submode
, 1, op0
, mode
);
2310 insns
= get_insns ();
2313 target
= gen_reg_rtx (submode
);
2314 emit_libcall_block (insns
, target
, value
,
2315 gen_rtx (abs_optab
->code
, mode
, op0
));
2320 /* It can't be done in this mode. Can we do it in a wider mode? */
2322 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2323 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2325 if ((abs_optab
->handlers
[(int) wider_mode
].insn_code
2326 != CODE_FOR_nothing
)
2327 || abs_optab
->handlers
[(int) wider_mode
].libfunc
)
2331 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2333 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2337 if (class != MODE_COMPLEX_INT
)
2340 target
= gen_reg_rtx (submode
);
2341 convert_move (target
, temp
, 0);
2345 return gen_lowpart (submode
, temp
);
2348 delete_insns_since (last
);
2352 delete_insns_since (entry_last
);
2356 /* Generate an instruction whose insn-code is INSN_CODE,
2357 with two operands: an output TARGET and an input OP0.
2358 TARGET *must* be nonzero, and the output is always stored there.
2359 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2360 the value that is stored into TARGET. */
2363 emit_unop_insn (icode
, target
, op0
, code
)
2370 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
2373 temp
= target
= protect_from_queue (target
, 1);
2375 op0
= protect_from_queue (op0
, 0);
2377 /* Sign extension from memory is often done specially on RISC
2378 machines, so forcing into a register here can pessimize code. */
2379 if (flag_force_mem
&& code
!= SIGN_EXTEND
)
2380 op0
= force_not_mem (op0
);
2382 /* Now, if insn does not accept our operands, put them into pseudos. */
2384 if (! (*insn_operand_predicate
[icode
][1]) (op0
, mode0
))
2385 op0
= copy_to_mode_reg (mode0
, op0
);
2387 if (! (*insn_operand_predicate
[icode
][0]) (temp
, GET_MODE (temp
))
2388 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
2389 temp
= gen_reg_rtx (GET_MODE (temp
));
2391 pat
= GEN_FCN (icode
) (temp
, op0
);
2393 if (GET_CODE (pat
) == SEQUENCE
&& code
!= UNKNOWN
)
2394 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2399 emit_move_insn (target
, temp
);
2402 /* Emit code to perform a series of operations on a multi-word quantity, one
2405 Such a block is preceded by a CLOBBER of the output, consists of multiple
2406 insns, each setting one word of the output, and followed by a SET copying
2407 the output to itself.
2409 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2410 note indicating that it doesn't conflict with the (also multi-word)
2411 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2414 INSNS is a block of code generated to perform the operation, not including
2415 the CLOBBER and final copy. All insns that compute intermediate values
2416 are first emitted, followed by the block as described above.
2418 TARGET, OP0, and OP1 are the output and inputs of the operations,
2419 respectively. OP1 may be zero for a unary operation.
2421 EQUIV, if non-zero, is an expression to be placed into a REG_EQUAL note
2424 If TARGET is not a register, INSNS is simply emitted with no special
2425 processing. Likewise if anything in INSNS is not an INSN or if
2426 there is a libcall block inside INSNS.
2428 The final insn emitted is returned. */
2431 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv
)
2437 rtx prev
, next
, first
, last
, insn
;
2439 if (GET_CODE (target
) != REG
|| reload_in_progress
)
2440 return emit_insns (insns
);
2442 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2443 if (GET_CODE (insn
) != INSN
2444 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2445 return emit_insns (insns
);
2447 /* First emit all insns that do not store into words of the output and remove
2448 these from the list. */
2449 for (insn
= insns
; insn
; insn
= next
)
2454 next
= NEXT_INSN (insn
);
2456 if (GET_CODE (PATTERN (insn
)) == SET
)
2457 set
= PATTERN (insn
);
2458 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2460 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2461 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
2463 set
= XVECEXP (PATTERN (insn
), 0, i
);
2471 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
2473 if (PREV_INSN (insn
))
2474 NEXT_INSN (PREV_INSN (insn
)) = next
;
2479 PREV_INSN (next
) = PREV_INSN (insn
);
2485 prev
= get_last_insn ();
2487 /* Now write the CLOBBER of the output, followed by the setting of each
2488 of the words, followed by the final copy. */
2489 if (target
!= op0
&& target
!= op1
)
2490 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, target
));
2492 for (insn
= insns
; insn
; insn
= next
)
2494 next
= NEXT_INSN (insn
);
2497 if (op1
&& GET_CODE (op1
) == REG
)
2498 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_NO_CONFLICT
, op1
,
2501 if (op0
&& GET_CODE (op0
) == REG
)
2502 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_NO_CONFLICT
, op0
,
2506 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2507 != CODE_FOR_nothing
)
2509 last
= emit_move_insn (target
, target
);
2512 = gen_rtx (EXPR_LIST
, REG_EQUAL
, equiv
, REG_NOTES (last
));
2515 last
= get_last_insn ();
2518 first
= get_insns ();
2520 first
= NEXT_INSN (prev
);
2522 /* Encapsulate the block so it gets manipulated as a unit. */
2523 REG_NOTES (first
) = gen_rtx (INSN_LIST
, REG_LIBCALL
, last
,
2525 REG_NOTES (last
) = gen_rtx (INSN_LIST
, REG_RETVAL
, first
, REG_NOTES (last
));
2530 /* Emit code to make a call to a constant function or a library call.
2532 INSNS is a list containing all insns emitted in the call.
2533 These insns leave the result in RESULT. Our block is to copy RESULT
2534 to TARGET, which is logically equivalent to EQUIV.
2536 We first emit any insns that set a pseudo on the assumption that these are
2537 loading constants into registers; doing so allows them to be safely cse'ed
2538 between blocks. Then we emit all the other insns in the block, followed by
2539 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
2540 note with an operand of EQUIV.
2542 Moving assignments to pseudos outside of the block is done to improve
2543 the generated code, but is not required to generate correct code,
2544 hence being unable to move an assignment is not grounds for not making
2545 a libcall block. There are two reasons why it is safe to leave these
2546 insns inside the block: First, we know that these pseudos cannot be
2547 used in generated RTL outside the block since they are created for
2548 temporary purposes within the block. Second, CSE will not record the
2549 values of anything set inside a libcall block, so we know they must
2550 be dead at the end of the block.
2552 Except for the first group of insns (the ones setting pseudos), the
2553 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
2556 emit_libcall_block (insns
, target
, result
, equiv
)
2562 rtx prev
, next
, first
, last
, insn
;
2564 /* First emit all insns that set pseudos. Remove them from the list as
2565 we go. Avoid insns that set pseudos which were referenced in previous
2566 insns. These can be generated by move_by_pieces, for example,
2567 to update an address. Similarly, avoid insns that reference things
2568 set in previous insns. */
2570 for (insn
= insns
; insn
; insn
= next
)
2572 rtx set
= single_set (insn
);
2574 next
= NEXT_INSN (insn
);
2576 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
2577 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
2579 || (! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
))
2580 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
2581 && ! modified_in_p (SET_SRC (set
), insns
)
2582 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
2584 if (PREV_INSN (insn
))
2585 NEXT_INSN (PREV_INSN (insn
)) = next
;
2590 PREV_INSN (next
) = PREV_INSN (insn
);
2596 prev
= get_last_insn ();
2598 /* Write the remaining insns followed by the final copy. */
2600 for (insn
= insns
; insn
; insn
= next
)
2602 next
= NEXT_INSN (insn
);
2607 last
= emit_move_insn (target
, result
);
2608 REG_NOTES (last
) = gen_rtx (EXPR_LIST
,
2609 REG_EQUAL
, copy_rtx (equiv
), REG_NOTES (last
));
2612 first
= get_insns ();
2614 first
= NEXT_INSN (prev
);
2616 /* Encapsulate the block so it gets manipulated as a unit. */
2617 REG_NOTES (first
) = gen_rtx (INSN_LIST
, REG_LIBCALL
, last
,
2619 REG_NOTES (last
) = gen_rtx (INSN_LIST
, REG_RETVAL
, first
, REG_NOTES (last
));
2622 /* Generate code to store zero in X. */
2628 emit_move_insn (x
, const0_rtx
);
2631 /* Generate code to store 1 in X
2632 assuming it contains zero beforehand. */
2635 emit_0_to_1_insn (x
)
2638 emit_move_insn (x
, const1_rtx
);
2641 /* Generate code to compare X with Y
2642 so that the condition codes are set.
2644 MODE is the mode of the inputs (in case they are const_int).
2645 UNSIGNEDP nonzero says that X and Y are unsigned;
2646 this matters if they need to be widened.
2648 If they have mode BLKmode, then SIZE specifies the size of both X and Y,
2649 and ALIGN specifies the known shared alignment of X and Y.
2651 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
2652 It is ignored for fixed-point and block comparisons;
2653 it is used only for floating-point comparisons. */
2656 emit_cmp_insn (x
, y
, comparison
, size
, mode
, unsignedp
, align
)
2658 enum rtx_code comparison
;
2660 enum machine_mode mode
;
2664 enum mode_class
class;
2665 enum machine_mode wider_mode
;
2667 class = GET_MODE_CLASS (mode
);
2669 /* They could both be VOIDmode if both args are immediate constants,
2670 but we should fold that at an earlier stage.
2671 With no special code here, this will call abort,
2672 reminding the programmer to implement such folding. */
2674 if (mode
!= BLKmode
&& flag_force_mem
)
2676 x
= force_not_mem (x
);
2677 y
= force_not_mem (y
);
2680 /* If we are inside an appropriately-short loop and one operand is an
2681 expensive constant, force it into a register. */
2682 if (CONSTANT_P (x
) && preserve_subexpressions_p () && rtx_cost (x
, COMPARE
) > 2)
2683 x
= force_reg (mode
, x
);
2685 if (CONSTANT_P (y
) && preserve_subexpressions_p () && rtx_cost (y
, COMPARE
) > 2)
2686 y
= force_reg (mode
, y
);
2688 /* Don't let both operands fail to indicate the mode. */
2689 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
2690 x
= force_reg (mode
, x
);
2692 /* Handle all BLKmode compares. */
2694 if (mode
== BLKmode
)
2697 x
= protect_from_queue (x
, 0);
2698 y
= protect_from_queue (y
, 0);
2702 #ifdef HAVE_cmpstrqi
2704 && GET_CODE (size
) == CONST_INT
2705 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
2707 enum machine_mode result_mode
2708 = insn_operand_mode
[(int) CODE_FOR_cmpstrqi
][0];
2709 rtx result
= gen_reg_rtx (result_mode
);
2710 emit_insn (gen_cmpstrqi (result
, x
, y
, size
, GEN_INT (align
)));
2711 emit_cmp_insn (result
, const0_rtx
, comparison
, NULL_RTX
,
2716 #ifdef HAVE_cmpstrhi
2718 && GET_CODE (size
) == CONST_INT
2719 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
2721 enum machine_mode result_mode
2722 = insn_operand_mode
[(int) CODE_FOR_cmpstrhi
][0];
2723 rtx result
= gen_reg_rtx (result_mode
);
2724 emit_insn (gen_cmpstrhi (result
, x
, y
, size
, GEN_INT (align
)));
2725 emit_cmp_insn (result
, const0_rtx
, comparison
, NULL_RTX
,
2730 #ifdef HAVE_cmpstrsi
2733 enum machine_mode result_mode
2734 = insn_operand_mode
[(int) CODE_FOR_cmpstrsi
][0];
2735 rtx result
= gen_reg_rtx (result_mode
);
2736 size
= protect_from_queue (size
, 0);
2737 emit_insn (gen_cmpstrsi (result
, x
, y
,
2738 convert_to_mode (SImode
, size
, 1),
2740 emit_cmp_insn (result
, const0_rtx
, comparison
, NULL_RTX
,
2746 #ifdef TARGET_MEM_FUNCTIONS
2747 emit_library_call (memcmp_libfunc
, 0,
2748 TYPE_MODE (integer_type_node
), 3,
2749 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
2750 convert_to_mode (TYPE_MODE (sizetype
), size
,
2751 TREE_UNSIGNED (sizetype
)),
2752 TYPE_MODE (sizetype
));
2754 emit_library_call (bcmp_libfunc
, 0,
2755 TYPE_MODE (integer_type_node
), 3,
2756 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
2757 convert_to_mode (TYPE_MODE (integer_type_node
),
2759 TREE_UNSIGNED (integer_type_node
)),
2760 TYPE_MODE (integer_type_node
));
2762 emit_cmp_insn (hard_libcall_value (TYPE_MODE (integer_type_node
)),
2763 const0_rtx
, comparison
, NULL_RTX
,
2764 TYPE_MODE (integer_type_node
), 0, 0);
2769 /* Handle some compares against zero. */
2771 if (y
== CONST0_RTX (mode
)
2772 && tst_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2774 int icode
= (int) tst_optab
->handlers
[(int) mode
].insn_code
;
2777 x
= protect_from_queue (x
, 0);
2778 y
= protect_from_queue (y
, 0);
2780 /* Now, if insn does accept these operands, put them into pseudos. */
2781 if (! (*insn_operand_predicate
[icode
][0])
2782 (x
, insn_operand_mode
[icode
][0]))
2783 x
= copy_to_mode_reg (insn_operand_mode
[icode
][0], x
);
2785 emit_insn (GEN_FCN (icode
) (x
));
2789 /* Handle compares for which there is a directly suitable insn. */
2791 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2793 int icode
= (int) cmp_optab
->handlers
[(int) mode
].insn_code
;
2796 x
= protect_from_queue (x
, 0);
2797 y
= protect_from_queue (y
, 0);
2799 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2800 if (! (*insn_operand_predicate
[icode
][0])
2801 (x
, insn_operand_mode
[icode
][0]))
2802 x
= copy_to_mode_reg (insn_operand_mode
[icode
][0], x
);
2804 if (! (*insn_operand_predicate
[icode
][1])
2805 (y
, insn_operand_mode
[icode
][1]))
2806 y
= copy_to_mode_reg (insn_operand_mode
[icode
][1], y
);
2808 emit_insn (GEN_FCN (icode
) (x
, y
));
2812 /* Try widening if we can find a direct insn that way. */
2814 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2816 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2817 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2819 if (cmp_optab
->handlers
[(int) wider_mode
].insn_code
2820 != CODE_FOR_nothing
)
2822 x
= protect_from_queue (x
, 0);
2823 y
= protect_from_queue (y
, 0);
2824 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
2825 y
= convert_modes (wider_mode
, mode
, y
, unsignedp
);
2826 emit_cmp_insn (x
, y
, comparison
, NULL_RTX
,
2827 wider_mode
, unsignedp
, align
);
2833 /* Handle a lib call just for the mode we are using. */
2835 if (cmp_optab
->handlers
[(int) mode
].libfunc
2836 && class != MODE_FLOAT
)
2838 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
2839 /* If we want unsigned, and this mode has a distinct unsigned
2840 comparison routine, use that. */
2841 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
2842 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
2844 emit_library_call (libfunc
, 1,
2845 word_mode
, 2, x
, mode
, y
, mode
);
2847 /* Integer comparison returns a result that must be compared against 1,
2848 so that even if we do an unsigned compare afterward,
2849 there is still a value that can represent the result "less than". */
2851 emit_cmp_insn (hard_libcall_value (word_mode
), const1_rtx
,
2852 comparison
, NULL_RTX
, word_mode
, unsignedp
, 0);
2856 if (class == MODE_FLOAT
)
2857 emit_float_lib_cmp (x
, y
, comparison
);
2863 /* Nonzero if a compare of mode MODE can be done straightforwardly
2864 (without splitting it into pieces). */
2867 can_compare_p (mode
)
2868 enum machine_mode mode
;
2872 if (cmp_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
2874 mode
= GET_MODE_WIDER_MODE (mode
);
2875 } while (mode
!= VOIDmode
);
2880 /* Emit a library call comparison between floating point X and Y.
2881 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
2884 emit_float_lib_cmp (x
, y
, comparison
)
2886 enum rtx_code comparison
;
2888 enum machine_mode mode
= GET_MODE (x
);
2895 libfunc
= eqhf2_libfunc
;
2899 libfunc
= nehf2_libfunc
;
2903 libfunc
= gthf2_libfunc
;
2907 libfunc
= gehf2_libfunc
;
2911 libfunc
= lthf2_libfunc
;
2915 libfunc
= lehf2_libfunc
;
2918 else if (mode
== SFmode
)
2922 libfunc
= eqsf2_libfunc
;
2926 libfunc
= nesf2_libfunc
;
2930 libfunc
= gtsf2_libfunc
;
2934 libfunc
= gesf2_libfunc
;
2938 libfunc
= ltsf2_libfunc
;
2942 libfunc
= lesf2_libfunc
;
2945 else if (mode
== DFmode
)
2949 libfunc
= eqdf2_libfunc
;
2953 libfunc
= nedf2_libfunc
;
2957 libfunc
= gtdf2_libfunc
;
2961 libfunc
= gedf2_libfunc
;
2965 libfunc
= ltdf2_libfunc
;
2969 libfunc
= ledf2_libfunc
;
2972 else if (mode
== XFmode
)
2976 libfunc
= eqxf2_libfunc
;
2980 libfunc
= nexf2_libfunc
;
2984 libfunc
= gtxf2_libfunc
;
2988 libfunc
= gexf2_libfunc
;
2992 libfunc
= ltxf2_libfunc
;
2996 libfunc
= lexf2_libfunc
;
2999 else if (mode
== TFmode
)
3003 libfunc
= eqtf2_libfunc
;
3007 libfunc
= netf2_libfunc
;
3011 libfunc
= gttf2_libfunc
;
3015 libfunc
= getf2_libfunc
;
3019 libfunc
= lttf2_libfunc
;
3023 libfunc
= letf2_libfunc
;
3028 enum machine_mode wider_mode
;
3030 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3031 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3033 if ((cmp_optab
->handlers
[(int) wider_mode
].insn_code
3034 != CODE_FOR_nothing
)
3035 || (cmp_optab
->handlers
[(int) wider_mode
].libfunc
!= 0))
3037 x
= protect_from_queue (x
, 0);
3038 y
= protect_from_queue (y
, 0);
3039 x
= convert_to_mode (wider_mode
, x
, 0);
3040 y
= convert_to_mode (wider_mode
, y
, 0);
3041 emit_float_lib_cmp (x
, y
, comparison
);
3051 emit_library_call (libfunc
, 1,
3052 word_mode
, 2, x
, mode
, y
, mode
);
3054 emit_cmp_insn (hard_libcall_value (word_mode
), const0_rtx
, comparison
,
3055 NULL_RTX
, word_mode
, 0, 0);
3058 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3061 emit_indirect_jump (loc
)
3064 if (! ((*insn_operand_predicate
[(int)CODE_FOR_indirect_jump
][0])
3066 loc
= copy_to_mode_reg (Pmode
, loc
);
3068 emit_jump_insn (gen_indirect_jump (loc
));
3072 #ifdef HAVE_conditional_move
3074 /* Emit a conditional move instruction if the machine supports one for that
3075 condition and machine mode.
3077 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3078 the mode to use should they be constants. If it is VOIDmode, they cannot
3081 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3082 should be stored there. MODE is the mode to use should they be constants.
3083 If it is VOIDmode, they cannot both be constants.
3085 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3086 is not supported. */
3089 emit_conditional_move (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
3094 enum machine_mode cmode
;
3096 enum machine_mode mode
;
3099 rtx tem
, subtarget
, comparison
, insn
;
3100 enum insn_code icode
;
3102 /* If one operand is constant, make it the second one. Only do this
3103 if the other operand is not constant as well. */
3105 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
3106 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
3111 code
= swap_condition (code
);
3114 if (cmode
== VOIDmode
)
3115 cmode
= GET_MODE (op0
);
3117 if ((CONSTANT_P (op2
) && ! CONSTANT_P (op3
))
3118 || (GET_CODE (op2
) == CONST_INT
&& GET_CODE (op3
) != CONST_INT
))
3123 /* ??? This may not be appropriate (consider IEEE). Perhaps we should
3124 call can_reverse_comparison_p here and bail out if necessary.
3125 It's not clear whether we need to do this canonicalization though. */
3126 code
= reverse_condition (code
);
3129 if (mode
== VOIDmode
)
3130 mode
= GET_MODE (op2
);
3132 icode
= movcc_gen_code
[mode
];
3134 if (icode
== CODE_FOR_nothing
)
3139 op2
= force_not_mem (op2
);
3140 op3
= force_not_mem (op3
);
3144 target
= protect_from_queue (target
, 1);
3146 target
= gen_reg_rtx (mode
);
3152 op2
= protect_from_queue (op2
, 0);
3153 op3
= protect_from_queue (op3
, 0);
3155 /* If the insn doesn't accept these operands, put them in pseudos. */
3157 if (! (*insn_operand_predicate
[icode
][0])
3158 (subtarget
, insn_operand_mode
[icode
][0]))
3159 subtarget
= gen_reg_rtx (insn_operand_mode
[icode
][0]);
3161 if (! (*insn_operand_predicate
[icode
][2])
3162 (op2
, insn_operand_mode
[icode
][2]))
3163 op2
= copy_to_mode_reg (insn_operand_mode
[icode
][2], op2
);
3165 if (! (*insn_operand_predicate
[icode
][3])
3166 (op3
, insn_operand_mode
[icode
][3]))
3167 op3
= copy_to_mode_reg (insn_operand_mode
[icode
][3], op3
);
3169 /* Everything should now be in the suitable form, so emit the compare insn
3170 and then the conditional move. */
3173 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
, 0);
3175 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3176 if (GET_CODE (comparison
) != code
)
3177 /* This shouldn't happen. */
3180 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3182 /* If that failed, then give up. */
3188 if (subtarget
!= target
)
3189 convert_move (target
, subtarget
, 0);
3194 /* Return non-zero if a conditional move of mode MODE is supported.
3196 This function is for combine so it can tell whether an insn that looks
3197 like a conditional move is actually supported by the hardware. If we
3198 guess wrong we lose a bit on optimization, but that's it. */
3199 /* ??? sparc64 supports conditionally moving integers values based on fp
3200 comparisons, and vice versa. How do we handle them? */
3203 can_conditionally_move_p (mode
)
3204 enum machine_mode mode
;
3206 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3212 #endif /* HAVE_conditional_move */
3214 /* These three functions generate an insn body and return it
3215 rather than emitting the insn.
3217 They do not protect from queued increments,
3218 because they may be used 1) in protect_from_queue itself
3219 and 2) in other passes where there is no queue. */
3221 /* Generate and return an insn body to add Y to X. */
3224 gen_add2_insn (x
, y
)
3227 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3229 if (! (*insn_operand_predicate
[icode
][0]) (x
, insn_operand_mode
[icode
][0])
3230 || ! (*insn_operand_predicate
[icode
][1]) (x
, insn_operand_mode
[icode
][1])
3231 || ! (*insn_operand_predicate
[icode
][2]) (y
, insn_operand_mode
[icode
][2]))
3234 return (GEN_FCN (icode
) (x
, x
, y
));
3238 have_add2_insn (mode
)
3239 enum machine_mode mode
;
3241 return add_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
;
3244 /* Generate and return an insn body to subtract Y from X. */
3247 gen_sub2_insn (x
, y
)
3250 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3252 if (! (*insn_operand_predicate
[icode
][0]) (x
, insn_operand_mode
[icode
][0])
3253 || ! (*insn_operand_predicate
[icode
][1]) (x
, insn_operand_mode
[icode
][1])
3254 || ! (*insn_operand_predicate
[icode
][2]) (y
, insn_operand_mode
[icode
][2]))
3257 return (GEN_FCN (icode
) (x
, x
, y
));
3261 have_sub2_insn (mode
)
3262 enum machine_mode mode
;
3264 return sub_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
;
3267 /* Generate the body of an instruction to copy Y into X.
3268 It may be a SEQUENCE, if one insn isn't enough. */
3271 gen_move_insn (x
, y
)
3274 register enum machine_mode mode
= GET_MODE (x
);
3275 enum insn_code insn_code
;
3278 if (mode
== VOIDmode
)
3279 mode
= GET_MODE (y
);
3281 insn_code
= mov_optab
->handlers
[(int) mode
].insn_code
;
3283 /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
3284 find a mode to do it in. If we have a movcc, use it. Otherwise,
3285 find the MODE_INT mode of the same width. */
3287 if (GET_MODE_CLASS (mode
) == MODE_CC
&& insn_code
== CODE_FOR_nothing
)
3289 enum machine_mode tmode
= VOIDmode
;
3293 && mov_optab
->handlers
[(int) CCmode
].insn_code
!= CODE_FOR_nothing
)
3296 for (tmode
= QImode
; tmode
!= VOIDmode
;
3297 tmode
= GET_MODE_WIDER_MODE (tmode
))
3298 if (GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (mode
))
3301 if (tmode
== VOIDmode
)
3304 /* Get X and Y in TMODE. We can't use gen_lowpart here because it
3305 may call change_address which is not appropriate if we were
3306 called when a reload was in progress. We don't have to worry
3307 about changing the address since the size in bytes is supposed to
3308 be the same. Copy the MEM to change the mode and move any
3309 substitutions from the old MEM to the new one. */
3311 if (reload_in_progress
)
3313 x
= gen_lowpart_common (tmode
, x1
);
3314 if (x
== 0 && GET_CODE (x1
) == MEM
)
3316 x
= gen_rtx (MEM
, tmode
, XEXP (x1
, 0));
3317 RTX_UNCHANGING_P (x
) = RTX_UNCHANGING_P (x1
);
3318 MEM_IN_STRUCT_P (x
) = MEM_IN_STRUCT_P (x1
);
3319 MEM_VOLATILE_P (x
) = MEM_VOLATILE_P (x1
);
3320 copy_replacements (x1
, x
);
3323 y
= gen_lowpart_common (tmode
, y1
);
3324 if (y
== 0 && GET_CODE (y1
) == MEM
)
3326 y
= gen_rtx (MEM
, tmode
, XEXP (y1
, 0));
3327 RTX_UNCHANGING_P (y
) = RTX_UNCHANGING_P (y1
);
3328 MEM_IN_STRUCT_P (y
) = MEM_IN_STRUCT_P (y1
);
3329 MEM_VOLATILE_P (y
) = MEM_VOLATILE_P (y1
);
3330 copy_replacements (y1
, y
);
3335 x
= gen_lowpart (tmode
, x
);
3336 y
= gen_lowpart (tmode
, y
);
3339 insn_code
= mov_optab
->handlers
[(int) tmode
].insn_code
;
3340 return (GEN_FCN (insn_code
) (x
, y
));
3344 emit_move_insn_1 (x
, y
);
3345 seq
= gen_sequence ();
3350 /* Return the insn code used to extend FROM_MODE to TO_MODE.
3351 UNSIGNEDP specifies zero-extension instead of sign-extension. If
3352 no such operation exists, CODE_FOR_nothing will be returned. */
3355 can_extend_p (to_mode
, from_mode
, unsignedp
)
3356 enum machine_mode to_mode
, from_mode
;
3359 return extendtab
[(int) to_mode
][(int) from_mode
][unsignedp
];
3362 /* Generate the body of an insn to extend Y (with mode MFROM)
3363 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
3366 gen_extend_insn (x
, y
, mto
, mfrom
, unsignedp
)
3368 enum machine_mode mto
, mfrom
;
3371 return (GEN_FCN (extendtab
[(int) mto
][(int) mfrom
][unsignedp
]) (x
, y
));
3374 /* can_fix_p and can_float_p say whether the target machine
3375 can directly convert a given fixed point type to
3376 a given floating point type, or vice versa.
3377 The returned value is the CODE_FOR_... value to use,
3378 or CODE_FOR_nothing if these modes cannot be directly converted.
3380 *TRUNCP_PTR is set to 1 if it is necessary to output
3381 an explicit FTRUNC insn before the fix insn; otherwise 0. */
3383 static enum insn_code
3384 can_fix_p (fixmode
, fltmode
, unsignedp
, truncp_ptr
)
3385 enum machine_mode fltmode
, fixmode
;
3390 if (fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
] != CODE_FOR_nothing
)
3391 return fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
];
3393 if (ftrunc_optab
->handlers
[(int) fltmode
].insn_code
!= CODE_FOR_nothing
)
3396 return fixtab
[(int) fltmode
][(int) fixmode
][unsignedp
];
3398 return CODE_FOR_nothing
;
3401 static enum insn_code
3402 can_float_p (fltmode
, fixmode
, unsignedp
)
3403 enum machine_mode fixmode
, fltmode
;
3406 return floattab
[(int) fltmode
][(int) fixmode
][unsignedp
];
3409 /* Generate code to convert FROM to floating point
3410 and store in TO. FROM must be fixed point and not VOIDmode.
3411 UNSIGNEDP nonzero means regard FROM as unsigned.
3412 Normally this is done by correcting the final value
3413 if it is negative. */
3416 expand_float (to
, from
, unsignedp
)
3420 enum insn_code icode
;
3421 register rtx target
= to
;
3422 enum machine_mode fmode
, imode
;
3424 /* Crash now, because we won't be able to decide which mode to use. */
3425 if (GET_MODE (from
) == VOIDmode
)
3428 /* Look for an insn to do the conversion. Do it in the specified
3429 modes if possible; otherwise convert either input, output or both to
3430 wider mode. If the integer mode is wider than the mode of FROM,
3431 we can do the conversion signed even if the input is unsigned. */
3433 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
3434 imode
= GET_MODE_WIDER_MODE (imode
))
3435 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
3436 fmode
= GET_MODE_WIDER_MODE (fmode
))
3438 int doing_unsigned
= unsignedp
;
3440 icode
= can_float_p (fmode
, imode
, unsignedp
);
3441 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
3442 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
3444 if (icode
!= CODE_FOR_nothing
)
3446 to
= protect_from_queue (to
, 1);
3447 from
= protect_from_queue (from
, 0);
3449 if (imode
!= GET_MODE (from
))
3450 from
= convert_to_mode (imode
, from
, unsignedp
);
3452 if (fmode
!= GET_MODE (to
))
3453 target
= gen_reg_rtx (fmode
);
3455 emit_unop_insn (icode
, target
, from
,
3456 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
3459 convert_move (to
, target
, 0);
3464 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3466 /* Unsigned integer, and no way to convert directly.
3467 Convert as signed, then conditionally adjust the result. */
3470 rtx label
= gen_label_rtx ();
3472 REAL_VALUE_TYPE offset
;
3476 to
= protect_from_queue (to
, 1);
3477 from
= protect_from_queue (from
, 0);
3480 from
= force_not_mem (from
);
3482 /* Look for a usable floating mode FMODE wider than the source and at
3483 least as wide as the target. Using FMODE will avoid rounding woes
3484 with unsigned values greater than the signed maximum value. */
3486 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
3487 fmode
= GET_MODE_WIDER_MODE (fmode
))
3488 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
3489 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
3492 if (fmode
== VOIDmode
)
3494 /* There is no such mode. Pretend the target is wide enough. */
3495 fmode
= GET_MODE (to
);
3497 /* Avoid double-rounding when TO is narrower than FROM. */
3498 if ((significand_size (fmode
) + 1)
3499 < GET_MODE_BITSIZE (GET_MODE (from
)))
3502 rtx neglabel
= gen_label_rtx ();
3504 /* Don't use TARGET if it isn't a register, is a hard register,
3505 or is the wrong mode. */
3506 if (GET_CODE (target
) != REG
3507 || REGNO (target
) < FIRST_PSEUDO_REGISTER
3508 || GET_MODE (target
) != fmode
)
3509 target
= gen_reg_rtx (fmode
);
3511 imode
= GET_MODE (from
);
3512 do_pending_stack_adjust ();
3514 /* Test whether the sign bit is set. */
3515 emit_cmp_insn (from
, const0_rtx
, GE
, NULL_RTX
, imode
, 0, 0);
3516 emit_jump_insn (gen_blt (neglabel
));
3518 /* The sign bit is not set. Convert as signed. */
3519 expand_float (target
, from
, 0);
3520 emit_jump_insn (gen_jump (label
));
3523 /* The sign bit is set.
3524 Convert to a usable (positive signed) value by shifting right
3525 one bit, while remembering if a nonzero bit was shifted
3526 out; i.e., compute (from & 1) | (from >> 1). */
3528 emit_label (neglabel
);
3529 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
3530 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3531 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
3533 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
3535 expand_float (target
, temp
, 0);
3537 /* Multiply by 2 to undo the shift above. */
3538 temp
= expand_binop (fmode
, add_optab
, target
, target
,
3539 target
, 0, OPTAB_LIB_WIDEN
);
3541 emit_move_insn (target
, temp
);
3543 do_pending_stack_adjust ();
3549 /* If we are about to do some arithmetic to correct for an
3550 unsigned operand, do it in a pseudo-register. */
3552 if (GET_MODE (to
) != fmode
3553 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
3554 target
= gen_reg_rtx (fmode
);
3556 /* Convert as signed integer to floating. */
3557 expand_float (target
, from
, 0);
3559 /* If FROM is negative (and therefore TO is negative),
3560 correct its value by 2**bitwidth. */
3562 do_pending_stack_adjust ();
3563 emit_cmp_insn (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
), 0, 0);
3564 emit_jump_insn (gen_bge (label
));
3566 /* On SCO 3.2.1, ldexp rejects values outside [0.5, 1).
3567 Rather than setting up a dconst_dot_5, let's hope SCO
3569 offset
= REAL_VALUE_LDEXP (dconst1
, GET_MODE_BITSIZE (GET_MODE (from
)));
3570 temp
= expand_binop (fmode
, add_optab
, target
,
3571 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
3572 target
, 0, OPTAB_LIB_WIDEN
);
3574 emit_move_insn (target
, temp
);
3576 do_pending_stack_adjust ();
3582 /* No hardware instruction available; call a library routine to convert from
3583 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
3589 to
= protect_from_queue (to
, 1);
3590 from
= protect_from_queue (from
, 0);
3592 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
3593 from
= convert_to_mode (SImode
, from
, unsignedp
);
3596 from
= force_not_mem (from
);
3598 if (GET_MODE (to
) == SFmode
)
3600 if (GET_MODE (from
) == SImode
)
3601 libfcn
= floatsisf_libfunc
;
3602 else if (GET_MODE (from
) == DImode
)
3603 libfcn
= floatdisf_libfunc
;
3604 else if (GET_MODE (from
) == TImode
)
3605 libfcn
= floattisf_libfunc
;
3609 else if (GET_MODE (to
) == DFmode
)
3611 if (GET_MODE (from
) == SImode
)
3612 libfcn
= floatsidf_libfunc
;
3613 else if (GET_MODE (from
) == DImode
)
3614 libfcn
= floatdidf_libfunc
;
3615 else if (GET_MODE (from
) == TImode
)
3616 libfcn
= floattidf_libfunc
;
3620 else if (GET_MODE (to
) == XFmode
)
3622 if (GET_MODE (from
) == SImode
)
3623 libfcn
= floatsixf_libfunc
;
3624 else if (GET_MODE (from
) == DImode
)
3625 libfcn
= floatdixf_libfunc
;
3626 else if (GET_MODE (from
) == TImode
)
3627 libfcn
= floattixf_libfunc
;
3631 else if (GET_MODE (to
) == TFmode
)
3633 if (GET_MODE (from
) == SImode
)
3634 libfcn
= floatsitf_libfunc
;
3635 else if (GET_MODE (from
) == DImode
)
3636 libfcn
= floatditf_libfunc
;
3637 else if (GET_MODE (from
) == TImode
)
3638 libfcn
= floattitf_libfunc
;
3647 value
= emit_library_call_value (libfcn
, NULL_RTX
, 1,
3649 1, from
, GET_MODE (from
));
3650 insns
= get_insns ();
3653 emit_libcall_block (insns
, target
, value
,
3654 gen_rtx (FLOAT
, GET_MODE (to
), from
));
3659 /* Copy result to requested destination
3660 if we have been computing in a temp location. */
3664 if (GET_MODE (target
) == GET_MODE (to
))
3665 emit_move_insn (to
, target
);
3667 convert_move (to
, target
, 0);
3671 /* expand_fix: generate code to convert FROM to fixed point
3672 and store in TO. FROM must be floating point. */
3678 rtx temp
= gen_reg_rtx (GET_MODE (x
));
3679 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
3683 expand_fix (to
, from
, unsignedp
)
3684 register rtx to
, from
;
3687 enum insn_code icode
;
3688 register rtx target
= to
;
3689 enum machine_mode fmode
, imode
;
3693 /* We first try to find a pair of modes, one real and one integer, at
3694 least as wide as FROM and TO, respectively, in which we can open-code
3695 this conversion. If the integer mode is wider than the mode of TO,
3696 we can do the conversion either signed or unsigned. */
3698 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
3699 imode
= GET_MODE_WIDER_MODE (imode
))
3700 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
3701 fmode
= GET_MODE_WIDER_MODE (fmode
))
3703 int doing_unsigned
= unsignedp
;
3705 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
3706 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
3707 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
3709 if (icode
!= CODE_FOR_nothing
)
3711 to
= protect_from_queue (to
, 1);
3712 from
= protect_from_queue (from
, 0);
3714 if (fmode
!= GET_MODE (from
))
3715 from
= convert_to_mode (fmode
, from
, 0);
3718 from
= ftruncify (from
);
3720 if (imode
!= GET_MODE (to
))
3721 target
= gen_reg_rtx (imode
);
3723 emit_unop_insn (icode
, target
, from
,
3724 doing_unsigned
? UNSIGNED_FIX
: FIX
);
3726 convert_move (to
, target
, unsignedp
);
3731 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3732 /* For an unsigned conversion, there is one more way to do it.
3733 If we have a signed conversion, we generate code that compares
3734 the real value to the largest representable positive number. If if
3735 is smaller, the conversion is done normally. Otherwise, subtract
3736 one plus the highest signed number, convert, and add it back.
3738 We only need to check all real modes, since we know we didn't find
3739 anything with a wider integer mode. */
3741 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
3742 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
3743 fmode
= GET_MODE_WIDER_MODE (fmode
))
3744 /* Make sure we won't lose significant bits doing this. */
3745 if (GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))
3746 && CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
3750 REAL_VALUE_TYPE offset
;
3751 rtx limit
, lab1
, lab2
, insn
;
3753 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
3754 offset
= REAL_VALUE_LDEXP (dconst1
, bitsize
- 1);
3755 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
3756 lab1
= gen_label_rtx ();
3757 lab2
= gen_label_rtx ();
3760 to
= protect_from_queue (to
, 1);
3761 from
= protect_from_queue (from
, 0);
3764 from
= force_not_mem (from
);
3766 if (fmode
!= GET_MODE (from
))
3767 from
= convert_to_mode (fmode
, from
, 0);
3769 /* See if we need to do the subtraction. */
3770 do_pending_stack_adjust ();
3771 emit_cmp_insn (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
), 0, 0);
3772 emit_jump_insn (gen_bge (lab1
));
3774 /* If not, do the signed "fix" and branch around fixup code. */
3775 expand_fix (to
, from
, 0);
3776 emit_jump_insn (gen_jump (lab2
));
3779 /* Otherwise, subtract 2**(N-1), convert to signed number,
3780 then add 2**(N-1). Do the addition using XOR since this
3781 will often generate better code. */
3783 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
3784 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3785 expand_fix (to
, target
, 0);
3786 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
3787 GEN_INT ((HOST_WIDE_INT
) 1 << (bitsize
- 1)),
3788 to
, 1, OPTAB_LIB_WIDEN
);
3791 emit_move_insn (to
, target
);
3795 /* Make a place for a REG_NOTE and add it. */
3796 insn
= emit_move_insn (to
, to
);
3797 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_EQUAL
,
3798 gen_rtx (UNSIGNED_FIX
, GET_MODE (to
),
3806 /* We can't do it with an insn, so use a library call. But first ensure
3807 that the mode of TO is at least as wide as SImode, since those are the
3808 only library calls we know about. */
3810 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
3812 target
= gen_reg_rtx (SImode
);
3814 expand_fix (target
, from
, unsignedp
);
3816 else if (GET_MODE (from
) == SFmode
)
3818 if (GET_MODE (to
) == SImode
)
3819 libfcn
= unsignedp
? fixunssfsi_libfunc
: fixsfsi_libfunc
;
3820 else if (GET_MODE (to
) == DImode
)
3821 libfcn
= unsignedp
? fixunssfdi_libfunc
: fixsfdi_libfunc
;
3822 else if (GET_MODE (to
) == TImode
)
3823 libfcn
= unsignedp
? fixunssfti_libfunc
: fixsfti_libfunc
;
3827 else if (GET_MODE (from
) == DFmode
)
3829 if (GET_MODE (to
) == SImode
)
3830 libfcn
= unsignedp
? fixunsdfsi_libfunc
: fixdfsi_libfunc
;
3831 else if (GET_MODE (to
) == DImode
)
3832 libfcn
= unsignedp
? fixunsdfdi_libfunc
: fixdfdi_libfunc
;
3833 else if (GET_MODE (to
) == TImode
)
3834 libfcn
= unsignedp
? fixunsdfti_libfunc
: fixdfti_libfunc
;
3838 else if (GET_MODE (from
) == XFmode
)
3840 if (GET_MODE (to
) == SImode
)
3841 libfcn
= unsignedp
? fixunsxfsi_libfunc
: fixxfsi_libfunc
;
3842 else if (GET_MODE (to
) == DImode
)
3843 libfcn
= unsignedp
? fixunsxfdi_libfunc
: fixxfdi_libfunc
;
3844 else if (GET_MODE (to
) == TImode
)
3845 libfcn
= unsignedp
? fixunsxfti_libfunc
: fixxfti_libfunc
;
3849 else if (GET_MODE (from
) == TFmode
)
3851 if (GET_MODE (to
) == SImode
)
3852 libfcn
= unsignedp
? fixunstfsi_libfunc
: fixtfsi_libfunc
;
3853 else if (GET_MODE (to
) == DImode
)
3854 libfcn
= unsignedp
? fixunstfdi_libfunc
: fixtfdi_libfunc
;
3855 else if (GET_MODE (to
) == TImode
)
3856 libfcn
= unsignedp
? fixunstfti_libfunc
: fixtfti_libfunc
;
3868 to
= protect_from_queue (to
, 1);
3869 from
= protect_from_queue (from
, 0);
3872 from
= force_not_mem (from
);
3876 value
= emit_library_call_value (libfcn
, NULL_RTX
, 1, GET_MODE (to
),
3878 1, from
, GET_MODE (from
));
3879 insns
= get_insns ();
3882 emit_libcall_block (insns
, target
, value
,
3883 gen_rtx (unsignedp
? UNSIGNED_FIX
: FIX
,
3884 GET_MODE (to
), from
));
3889 if (GET_MODE (to
) == GET_MODE (target
))
3890 emit_move_insn (to
, target
);
3892 convert_move (to
, target
, 0);
3901 optab op
= (optab
) xmalloc (sizeof (struct optab
));
3903 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
3905 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
3906 op
->handlers
[i
].libfunc
= 0;
3909 if (code
!= UNKNOWN
)
3910 code_to_optab
[(int) code
] = op
;
3915 /* Initialize the libfunc fields of an entire group of entries in some
3916 optab. Each entry is set equal to a string consisting of a leading
3917 pair of underscores followed by a generic operation name followed by
3918 a mode name (downshifted to lower case) followed by a single character
3919 representing the number of operands for the given operation (which is
3920 usually one of the characters '2', '3', or '4').
3922 OPTABLE is the table in which libfunc fields are to be initialized.
3923 FIRST_MODE is the first machine mode index in the given optab to
3925 LAST_MODE is the last machine mode index in the given optab to
3927 OPNAME is the generic (string) name of the operation.
3928 SUFFIX is the character which specifies the number of operands for
3929 the given generic operation.
3933 init_libfuncs (optable
, first_mode
, last_mode
, opname
, suffix
)
3934 register optab optable
;
3935 register int first_mode
;
3936 register int last_mode
;
3937 register char *opname
;
3938 register int suffix
;
3941 register unsigned opname_len
= strlen (opname
);
3943 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
3944 mode
= (enum machine_mode
) ((int) mode
+ 1))
3946 register char *mname
= mode_name
[(int) mode
];
3947 register unsigned mname_len
= strlen (mname
);
3948 register char *libfunc_name
3949 = (char *) xmalloc (2 + opname_len
+ mname_len
+ 1 + 1);
3956 for (q
= opname
; *q
; )
3958 for (q
= mname
; *q
; q
++)
3959 *p
++ = tolower (*q
);
3962 optable
->handlers
[(int) mode
].libfunc
3963 = gen_rtx (SYMBOL_REF
, Pmode
, libfunc_name
);
3967 /* Initialize the libfunc fields of an entire group of entries in some
3968 optab which correspond to all integer mode operations. The parameters
3969 have the same meaning as similarly named ones for the `init_libfuncs'
3970 routine. (See above). */
3973 init_integral_libfuncs (optable
, opname
, suffix
)
3974 register optab optable
;
3975 register char *opname
;
3976 register int suffix
;
3978 init_libfuncs (optable
, SImode
, TImode
, opname
, suffix
);
3981 /* Initialize the libfunc fields of an entire group of entries in some
3982 optab which correspond to all real mode operations. The parameters
3983 have the same meaning as similarly named ones for the `init_libfuncs'
3984 routine. (See above). */
3987 init_floating_libfuncs (optable
, opname
, suffix
)
3988 register optab optable
;
3989 register char *opname
;
3990 register int suffix
;
3992 init_libfuncs (optable
, SFmode
, TFmode
, opname
, suffix
);
3995 /* Initialize the libfunc fields of an entire group of entries in some
3996 optab which correspond to all complex floating modes. The parameters
3997 have the same meaning as similarly named ones for the `init_libfuncs'
3998 routine. (See above). */
4001 init_complex_libfuncs (optable
, opname
, suffix
)
4002 register optab optable
;
4003 register char *opname
;
4004 register int suffix
;
4006 init_libfuncs (optable
, SCmode
, TCmode
, opname
, suffix
);
4009 /* Call this once to initialize the contents of the optabs
4010 appropriately for the current target machine. */
4018 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4020 for (p
= fixtab
[0][0];
4021 p
< fixtab
[0][0] + sizeof fixtab
/ sizeof (fixtab
[0][0][0]);
4023 *p
= CODE_FOR_nothing
;
4025 for (p
= fixtrunctab
[0][0];
4026 p
< fixtrunctab
[0][0] + sizeof fixtrunctab
/ sizeof (fixtrunctab
[0][0][0]);
4028 *p
= CODE_FOR_nothing
;
4030 for (p
= floattab
[0][0];
4031 p
< floattab
[0][0] + sizeof floattab
/ sizeof (floattab
[0][0][0]);
4033 *p
= CODE_FOR_nothing
;
4035 for (p
= extendtab
[0][0];
4036 p
< extendtab
[0][0] + sizeof extendtab
/ sizeof extendtab
[0][0][0];
4038 *p
= CODE_FOR_nothing
;
4040 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4041 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4043 #ifdef HAVE_conditional_move
4044 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4045 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4048 add_optab
= init_optab (PLUS
);
4049 sub_optab
= init_optab (MINUS
);
4050 smul_optab
= init_optab (MULT
);
4051 smul_highpart_optab
= init_optab (UNKNOWN
);
4052 umul_highpart_optab
= init_optab (UNKNOWN
);
4053 smul_widen_optab
= init_optab (UNKNOWN
);
4054 umul_widen_optab
= init_optab (UNKNOWN
);
4055 sdiv_optab
= init_optab (DIV
);
4056 sdivmod_optab
= init_optab (UNKNOWN
);
4057 udiv_optab
= init_optab (UDIV
);
4058 udivmod_optab
= init_optab (UNKNOWN
);
4059 smod_optab
= init_optab (MOD
);
4060 umod_optab
= init_optab (UMOD
);
4061 flodiv_optab
= init_optab (DIV
);
4062 ftrunc_optab
= init_optab (UNKNOWN
);
4063 and_optab
= init_optab (AND
);
4064 ior_optab
= init_optab (IOR
);
4065 xor_optab
= init_optab (XOR
);
4066 ashl_optab
= init_optab (ASHIFT
);
4067 ashr_optab
= init_optab (ASHIFTRT
);
4068 lshr_optab
= init_optab (LSHIFTRT
);
4069 rotl_optab
= init_optab (ROTATE
);
4070 rotr_optab
= init_optab (ROTATERT
);
4071 smin_optab
= init_optab (SMIN
);
4072 smax_optab
= init_optab (SMAX
);
4073 umin_optab
= init_optab (UMIN
);
4074 umax_optab
= init_optab (UMAX
);
4075 mov_optab
= init_optab (UNKNOWN
);
4076 movstrict_optab
= init_optab (UNKNOWN
);
4077 cmp_optab
= init_optab (UNKNOWN
);
4078 ucmp_optab
= init_optab (UNKNOWN
);
4079 tst_optab
= init_optab (UNKNOWN
);
4080 neg_optab
= init_optab (NEG
);
4081 abs_optab
= init_optab (ABS
);
4082 one_cmpl_optab
= init_optab (NOT
);
4083 ffs_optab
= init_optab (FFS
);
4084 sqrt_optab
= init_optab (SQRT
);
4085 sin_optab
= init_optab (UNKNOWN
);
4086 cos_optab
= init_optab (UNKNOWN
);
4087 strlen_optab
= init_optab (UNKNOWN
);
4089 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4091 movstr_optab
[i
] = CODE_FOR_nothing
;
4092 clrstr_optab
[i
] = CODE_FOR_nothing
;
4094 #ifdef HAVE_SECONDARY_RELOADS
4095 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
4099 /* Fill in the optabs with the insns we support. */
4102 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
4103 /* This flag says the same insns that convert to a signed fixnum
4104 also convert validly to an unsigned one. */
4105 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4106 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4107 fixtrunctab
[i
][j
][1] = fixtrunctab
[i
][j
][0];
4110 #ifdef EXTRA_CC_MODES
4114 /* Initialize the optabs with the names of the library functions. */
4115 init_integral_libfuncs (add_optab
, "add", '3');
4116 init_floating_libfuncs (add_optab
, "add", '3');
4117 init_integral_libfuncs (sub_optab
, "sub", '3');
4118 init_floating_libfuncs (sub_optab
, "sub", '3');
4119 init_integral_libfuncs (smul_optab
, "mul", '3');
4120 init_floating_libfuncs (smul_optab
, "mul", '3');
4121 init_integral_libfuncs (sdiv_optab
, "div", '3');
4122 init_integral_libfuncs (udiv_optab
, "udiv", '3');
4123 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
4124 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
4125 init_integral_libfuncs (smod_optab
, "mod", '3');
4126 init_integral_libfuncs (umod_optab
, "umod", '3');
4127 init_floating_libfuncs (flodiv_optab
, "div", '3');
4128 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
4129 init_integral_libfuncs (and_optab
, "and", '3');
4130 init_integral_libfuncs (ior_optab
, "ior", '3');
4131 init_integral_libfuncs (xor_optab
, "xor", '3');
4132 init_integral_libfuncs (ashl_optab
, "ashl", '3');
4133 init_integral_libfuncs (ashr_optab
, "ashr", '3');
4134 init_integral_libfuncs (lshr_optab
, "lshr", '3');
4135 init_integral_libfuncs (smin_optab
, "min", '3');
4136 init_floating_libfuncs (smin_optab
, "min", '3');
4137 init_integral_libfuncs (smax_optab
, "max", '3');
4138 init_floating_libfuncs (smax_optab
, "max", '3');
4139 init_integral_libfuncs (umin_optab
, "umin", '3');
4140 init_integral_libfuncs (umax_optab
, "umax", '3');
4141 init_integral_libfuncs (neg_optab
, "neg", '2');
4142 init_floating_libfuncs (neg_optab
, "neg", '2');
4143 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
4144 init_integral_libfuncs (ffs_optab
, "ffs", '2');
4146 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
4147 init_integral_libfuncs (cmp_optab
, "cmp", '2');
4148 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
4149 init_floating_libfuncs (cmp_optab
, "cmp", '2');
4151 #ifdef MULSI3_LIBCALL
4152 smul_optab
->handlers
[(int) SImode
].libfunc
4153 = gen_rtx (SYMBOL_REF
, Pmode
, MULSI3_LIBCALL
);
4155 #ifdef MULDI3_LIBCALL
4156 smul_optab
->handlers
[(int) DImode
].libfunc
4157 = gen_rtx (SYMBOL_REF
, Pmode
, MULDI3_LIBCALL
);
4160 #ifdef DIVSI3_LIBCALL
4161 sdiv_optab
->handlers
[(int) SImode
].libfunc
4162 = gen_rtx (SYMBOL_REF
, Pmode
, DIVSI3_LIBCALL
);
4164 #ifdef DIVDI3_LIBCALL
4165 sdiv_optab
->handlers
[(int) DImode
].libfunc
4166 = gen_rtx (SYMBOL_REF
, Pmode
, DIVDI3_LIBCALL
);
4169 #ifdef UDIVSI3_LIBCALL
4170 udiv_optab
->handlers
[(int) SImode
].libfunc
4171 = gen_rtx (SYMBOL_REF
, Pmode
, UDIVSI3_LIBCALL
);
4173 #ifdef UDIVDI3_LIBCALL
4174 udiv_optab
->handlers
[(int) DImode
].libfunc
4175 = gen_rtx (SYMBOL_REF
, Pmode
, UDIVDI3_LIBCALL
);
4178 #ifdef MODSI3_LIBCALL
4179 smod_optab
->handlers
[(int) SImode
].libfunc
4180 = gen_rtx (SYMBOL_REF
, Pmode
, MODSI3_LIBCALL
);
4182 #ifdef MODDI3_LIBCALL
4183 smod_optab
->handlers
[(int) DImode
].libfunc
4184 = gen_rtx (SYMBOL_REF
, Pmode
, MODDI3_LIBCALL
);
4187 #ifdef UMODSI3_LIBCALL
4188 umod_optab
->handlers
[(int) SImode
].libfunc
4189 = gen_rtx (SYMBOL_REF
, Pmode
, UMODSI3_LIBCALL
);
4191 #ifdef UMODDI3_LIBCALL
4192 umod_optab
->handlers
[(int) DImode
].libfunc
4193 = gen_rtx (SYMBOL_REF
, Pmode
, UMODDI3_LIBCALL
);
4196 /* Use cabs for DC complex abs, since systems generally have cabs.
4197 Don't define any libcall for SCmode, so that cabs will be used. */
4198 abs_optab
->handlers
[(int) DCmode
].libfunc
4199 = gen_rtx (SYMBOL_REF
, Pmode
, "cabs");
4201 /* The ffs function operates on `int'. */
4202 #ifndef INT_TYPE_SIZE
4203 #define INT_TYPE_SIZE BITS_PER_WORD
4205 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)] .libfunc
4206 = gen_rtx (SYMBOL_REF
, Pmode
, "ffs");
4208 extendsfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extendsfdf2");
4209 extendsfxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extendsfxf2");
4210 extendsftf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extendsftf2");
4211 extenddfxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extenddfxf2");
4212 extenddftf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extenddftf2");
4214 truncdfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__truncdfsf2");
4215 truncxfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__truncxfsf2");
4216 trunctfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__trunctfsf2");
4217 truncxfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__truncxfdf2");
4218 trunctfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__trunctfdf2");
4220 memcpy_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "memcpy");
4221 bcopy_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "bcopy");
4222 memcmp_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "memcmp");
4223 bcmp_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gcc_bcmp");
4224 memset_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "memset");
4225 bzero_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "bzero");
4227 eqhf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqhf2");
4228 nehf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nehf2");
4229 gthf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gthf2");
4230 gehf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gehf2");
4231 lthf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lthf2");
4232 lehf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lehf2");
4234 eqsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqsf2");
4235 nesf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nesf2");
4236 gtsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gtsf2");
4237 gesf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gesf2");
4238 ltsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ltsf2");
4239 lesf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lesf2");
4241 eqdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqdf2");
4242 nedf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nedf2");
4243 gtdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gtdf2");
4244 gedf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gedf2");
4245 ltdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ltdf2");
4246 ledf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ledf2");
4248 eqxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqxf2");
4249 nexf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nexf2");
4250 gtxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gtxf2");
4251 gexf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gexf2");
4252 ltxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ltxf2");
4253 lexf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lexf2");
4255 eqtf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqtf2");
4256 netf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__netf2");
4257 gttf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gttf2");
4258 getf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__getf2");
4259 lttf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lttf2");
4260 letf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__letf2");
4262 floatsisf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsisf");
4263 floatdisf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatdisf");
4264 floattisf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattisf");
4266 floatsidf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsidf");
4267 floatdidf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatdidf");
4268 floattidf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattidf");
4270 floatsixf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsixf");
4271 floatdixf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatdixf");
4272 floattixf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattixf");
4274 floatsitf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsitf");
4275 floatditf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatditf");
4276 floattitf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattitf");
4278 fixsfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixsfsi");
4279 fixsfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixsfdi");
4280 fixsfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixsfti");
4282 fixdfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixdfsi");
4283 fixdfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixdfdi");
4284 fixdfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixdfti");
4286 fixxfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixxfsi");
4287 fixxfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixxfdi");
4288 fixxfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixxfti");
4290 fixtfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixtfsi");
4291 fixtfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixtfdi");
4292 fixtfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixtfti");
4294 fixunssfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunssfsi");
4295 fixunssfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunssfdi");
4296 fixunssfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunssfti");
4298 fixunsdfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsdfsi");
4299 fixunsdfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsdfdi");
4300 fixunsdfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsdfti");
4302 fixunsxfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsxfsi");
4303 fixunsxfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsxfdi");
4304 fixunsxfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsxfti");
4306 fixunstfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunstfsi");
4307 fixunstfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunstfdi");
4308 fixunstfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunstfti");
4310 #ifdef INIT_TARGET_OPTABS
4311 /* Allow the target to add more libcalls or rename some, etc. */
4318 /* SCO 3.2 apparently has a broken ldexp. */
4331 #endif /* BROKEN_LDEXP */