1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
26 #include "insn-flags.h"
27 #include "insn-codes.h"
28 #include "insn-config.h"
33 static rtx
extract_split_bit_field ();
34 static rtx
extract_fixed_bit_field ();
35 static void store_split_bit_field ();
36 static void store_fixed_bit_field ();
37 static rtx
mask_rtx ();
38 static rtx
lshift_value ();
40 #define CEIL(x,y) (((x) + (y) - 1) / (y))
42 /* Non-zero means multiply instructions are cheaper than shifts. */
43 int mult_is_very_cheap
;
45 /* Non-zero means divides or modulus operations are relatively cheap for
46 powers of two, so don't use branches; emit the operation instead.
47 Usually, this will mean that the MD file will emit non-branch
50 static int sdiv_pow2_cheap
, smod_pow2_cheap
;
52 /* Cost of various pieces of RTL. */
53 static int add_cost
, shift_cost
, mult_cost
, negate_cost
, lea_cost
;
55 /* Max scale factor for scaled address in lea instruction. */
56 static int lea_max_mul
;
61 char *free_point
= (char *) oballoc (1);
62 /* This is "some random pseudo register" for purposes of calling recog
63 to see what insns exist. */
64 rtx reg
= gen_rtx (REG
, word_mode
, FIRST_PSEUDO_REGISTER
);
65 rtx pow2
= GEN_INT (32);
70 add_cost
= rtx_cost (gen_rtx (PLUS
, word_mode
, reg
, reg
), SET
);
71 shift_cost
= rtx_cost (gen_rtx (LSHIFT
, word_mode
, reg
,
72 /* Using a constant gives better
73 estimate of typical costs.
74 1 or 2 might have quirks. */
76 mult_cost
= rtx_cost (gen_rtx (MULT
, word_mode
, reg
, reg
), SET
);
77 negate_cost
= rtx_cost (gen_rtx (NEG
, word_mode
, reg
), SET
);
79 /* 999999 is chosen to avoid any plausible faster special case. */
81 = (rtx_cost (gen_rtx (MULT
, word_mode
, reg
, GEN_INT (999999)), SET
)
82 < rtx_cost (gen_rtx (LSHIFT
, word_mode
, reg
, GEN_INT (7)), SET
));
85 = rtx_cost (gen_rtx (DIV
, word_mode
, reg
, pow2
), SET
) <= 2 * add_cost
;
87 = rtx_cost (gen_rtx (MOD
, word_mode
, reg
, pow2
), SET
) <= 2 * add_cost
;
92 lea
= gen_rtx (SET
, VOIDmode
, reg
,
93 gen_rtx (PLUS
, word_mode
,
94 gen_rtx (MULT
, word_mode
, reg
, GEN_INT (i
)),
96 /* Using 0 as second argument is not quite right,
97 but what else is there to do? */
98 if (recog (lea
, 0, &dummy
) < 0)
101 lea_cost
= rtx_cost (SET_SRC (lea
), SET
);
104 /* Free the objects we just allocated. */
108 /* Return an rtx representing minus the value of X.
109 MODE is the intended mode of the result,
110 useful if X is a CONST_INT. */
114 enum machine_mode mode
;
117 if (GET_CODE (x
) == CONST_INT
)
119 HOST_WIDE_INT val
= - INTVAL (x
);
120 if (GET_MODE_BITSIZE (mode
) < HOST_BITS_PER_WIDE_INT
)
122 /* Sign extend the value from the bits that are significant. */
123 if (val
& ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))
124 val
|= (HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (mode
);
126 val
&= ((HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (mode
)) - 1;
128 return GEN_INT (val
);
131 return expand_unop (GET_MODE (x
), neg_optab
, x
, NULL_RTX
, 0);
134 /* Generate code to store value from rtx VALUE
135 into a bit-field within structure STR_RTX
136 containing BITSIZE bits starting at bit BITNUM.
137 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
138 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
139 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
141 /* ??? Note that there are two different ideas here for how
142 to determine the size to count bits within, for a register.
143 One is BITS_PER_WORD, and the other is the size of operand 3
144 of the insv pattern. (The latter assumes that an n-bit machine
145 will be able to insert bit fields up to n bits wide.)
146 It isn't certain that either of these is right.
147 extract_bit_field has the same quandary. */
150 store_bit_field (str_rtx
, bitsize
, bitnum
, fieldmode
, value
, align
, total_size
)
152 register int bitsize
;
154 enum machine_mode fieldmode
;
159 int unit
= (GET_CODE (str_rtx
) == MEM
) ? BITS_PER_UNIT
: BITS_PER_WORD
;
160 register int offset
= bitnum
/ unit
;
161 register int bitpos
= bitnum
% unit
;
162 register rtx op0
= str_rtx
;
164 if (GET_CODE (str_rtx
) == MEM
&& ! MEM_IN_STRUCT_P (str_rtx
))
167 /* Discount the part of the structure before the desired byte.
168 We need to know how many bytes are safe to reference after it. */
170 total_size
-= (bitpos
/ BIGGEST_ALIGNMENT
171 * (BIGGEST_ALIGNMENT
/ BITS_PER_UNIT
));
173 while (GET_CODE (op0
) == SUBREG
)
175 /* The following line once was done only if WORDS_BIG_ENDIAN,
176 but I think that is a mistake. WORDS_BIG_ENDIAN is
177 meaningful at a much higher level; when structures are copied
178 between memory and regs, the higher-numbered regs
179 always get higher addresses. */
180 offset
+= SUBREG_WORD (op0
);
181 /* We used to adjust BITPOS here, but now we do the whole adjustment
182 right after the loop. */
183 op0
= SUBREG_REG (op0
);
187 /* If OP0 is a register, BITPOS must count within a word.
188 But as we have it, it counts within whatever size OP0 now has.
189 On a bigendian machine, these are not the same, so convert. */
190 if (GET_CODE (op0
) != MEM
&& unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
191 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
194 value
= protect_from_queue (value
, 0);
197 value
= force_not_mem (value
);
199 /* Note that the adjustment of BITPOS above has no effect on whether
200 BITPOS is 0 in a REG bigger than a word. */
201 if (GET_MODE_SIZE (fieldmode
) >= UNITS_PER_WORD
202 && (! STRICT_ALIGNMENT
|| GET_CODE (op0
) != MEM
)
203 && bitpos
== 0 && bitsize
== GET_MODE_BITSIZE (fieldmode
))
205 /* Storing in a full-word or multi-word field in a register
206 can be done with just SUBREG. */
207 if (GET_MODE (op0
) != fieldmode
)
208 if (GET_CODE (op0
) == REG
)
209 op0
= gen_rtx (SUBREG
, fieldmode
, op0
, offset
);
211 op0
= change_address (op0
, fieldmode
,
212 plus_constant (XEXP (op0
, 0), offset
));
213 emit_move_insn (op0
, value
);
217 /* Storing an lsb-aligned field in a register
218 can be done with a movestrict instruction. */
220 if (GET_CODE (op0
) != MEM
222 && bitpos
+ bitsize
== unit
226 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
227 && (GET_MODE (op0
) == fieldmode
228 || (movstrict_optab
->handlers
[(int) fieldmode
].insn_code
229 != CODE_FOR_nothing
)))
231 /* Get appropriate low part of the value being stored. */
232 if (GET_CODE (value
) == CONST_INT
|| GET_CODE (value
) == REG
)
233 value
= gen_lowpart (fieldmode
, value
);
234 else if (!(GET_CODE (value
) == SYMBOL_REF
235 || GET_CODE (value
) == LABEL_REF
236 || GET_CODE (value
) == CONST
))
237 value
= convert_to_mode (fieldmode
, value
, 0);
239 if (GET_MODE (op0
) == fieldmode
)
240 emit_move_insn (op0
, value
);
243 int icode
= movstrict_optab
->handlers
[(int) fieldmode
].insn_code
;
244 if(! (*insn_operand_predicate
[icode
][1]) (value
, fieldmode
))
245 value
= copy_to_mode_reg (fieldmode
, value
);
246 emit_insn (GEN_FCN (icode
)
247 (gen_rtx (SUBREG
, fieldmode
, op0
, offset
), value
));
252 /* Handle fields bigger than a word. */
254 if (bitsize
> BITS_PER_WORD
)
256 /* Here we transfer the words of the field
257 in the order least significant first.
258 This is because the most significant word is the one which may
259 be less than full. */
261 int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
264 /* This is the mode we must force value to, so that there will be enough
265 subwords to extract. Note that fieldmode will often (always?) be
266 VOIDmode, because that is what store_field uses to indicate that this
267 is a bit field, but passing VOIDmode to operand_subword_force will
268 result in an abort. */
269 fieldmode
= mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
, 0);
271 for (i
= 0; i
< nwords
; i
++)
273 /* If I is 0, use the low-order word in both field and target;
274 if I is 1, use the next to lowest word; and so on. */
275 int wordnum
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
276 int bit_offset
= (WORDS_BIG_ENDIAN
277 ? MAX (bitsize
- (i
+ 1) * BITS_PER_WORD
, 0)
278 : i
* BITS_PER_WORD
);
279 store_bit_field (op0
, MIN (BITS_PER_WORD
,
280 bitsize
- i
* BITS_PER_WORD
),
281 bitnum
+ bit_offset
, word_mode
,
282 operand_subword_force (value
, wordnum
, fieldmode
),
288 /* From here on we can assume that the field to be stored in is
289 a full-word (whatever type that is), since it is shorter than a word. */
291 /* OFFSET is the number of words or bytes (UNIT says which)
292 from STR_RTX to the first word or byte containing part of the field. */
294 if (GET_CODE (op0
) == REG
)
297 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
298 op0
= gen_rtx (SUBREG
, TYPE_MODE (type_for_size (BITS_PER_WORD
, 0)),
304 op0
= protect_from_queue (op0
, 1);
307 /* Now OFFSET is nonzero only if OP0 is memory
308 and is therefore always measured in bytes. */
312 && !(bitsize
== 1 && GET_CODE (value
) == CONST_INT
)
313 /* Ensure insv's size is wide enough for this field. */
314 && (GET_MODE_BITSIZE (insn_operand_mode
[(int) CODE_FOR_insv
][3])
317 int xbitpos
= bitpos
;
320 rtx last
= get_last_insn ();
322 enum machine_mode maxmode
323 = insn_operand_mode
[(int) CODE_FOR_insv
][3];
325 int save_volatile_ok
= volatile_ok
;
328 /* If this machine's insv can only insert into a register, or if we
329 are to force MEMs into a register, copy OP0 into a register and
330 save it back later. */
331 if (GET_CODE (op0
) == MEM
333 || ! ((*insn_operand_predicate
[(int) CODE_FOR_insv
][0])
337 enum machine_mode bestmode
;
339 /* Get the mode to use for inserting into this field. If OP0 is
340 BLKmode, get the smallest mode consistent with the alignment. If
341 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
342 mode. Otherwise, use the smallest mode containing the field. */
344 if (GET_MODE (op0
) == BLKmode
345 || GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (maxmode
))
347 = get_best_mode (bitsize
, bitnum
, align
* BITS_PER_UNIT
, maxmode
,
348 MEM_VOLATILE_P (op0
));
350 bestmode
= GET_MODE (op0
);
352 if (bestmode
== VOIDmode
)
355 /* Adjust address to point to the containing unit of that mode. */
356 unit
= GET_MODE_BITSIZE (bestmode
);
357 /* Compute offset as multiple of this unit, counting in bytes. */
358 offset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
359 bitpos
= bitnum
% unit
;
360 op0
= change_address (op0
, bestmode
,
361 plus_constant (XEXP (op0
, 0), offset
));
363 /* Fetch that unit, store the bitfield in it, then store the unit. */
364 tempreg
= copy_to_reg (op0
);
365 store_bit_field (tempreg
, bitsize
, bitpos
, fieldmode
, value
,
367 emit_move_insn (op0
, tempreg
);
370 volatile_ok
= save_volatile_ok
;
372 /* Add OFFSET into OP0's address. */
373 if (GET_CODE (xop0
) == MEM
)
374 xop0
= change_address (xop0
, byte_mode
,
375 plus_constant (XEXP (xop0
, 0), offset
));
377 /* If xop0 is a register, we need it in MAXMODE
378 to make it acceptable to the format of insv. */
379 if (GET_CODE (xop0
) == SUBREG
)
380 PUT_MODE (xop0
, maxmode
);
381 if (GET_CODE (xop0
) == REG
&& GET_MODE (xop0
) != maxmode
)
382 xop0
= gen_rtx (SUBREG
, maxmode
, xop0
, 0);
384 /* On big-endian machines, we count bits from the most significant.
385 If the bit field insn does not, we must invert. */
387 #if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
388 xbitpos
= unit
- bitsize
- xbitpos
;
390 /* We have been counting XBITPOS within UNIT.
391 Count instead within the size of the register. */
393 if (GET_CODE (xop0
) != MEM
)
394 xbitpos
+= GET_MODE_BITSIZE (maxmode
) - unit
;
396 unit
= GET_MODE_BITSIZE (maxmode
);
398 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
400 if (GET_MODE (value
) != maxmode
)
402 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
404 /* Optimization: Don't bother really extending VALUE
405 if it has all the bits we will actually use. However,
406 if we must narrow it, be sure we do it correctly. */
408 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (maxmode
))
410 /* Avoid making subreg of a subreg, or of a mem. */
411 if (GET_CODE (value1
) != REG
)
412 value1
= copy_to_reg (value1
);
413 value1
= gen_rtx (SUBREG
, maxmode
, value1
, 0);
416 value1
= gen_lowpart (maxmode
, value1
);
418 else if (!CONSTANT_P (value
))
419 /* Parse phase is supposed to make VALUE's data type
420 match that of the component reference, which is a type
421 at least as wide as the field; so VALUE should have
422 a mode that corresponds to that type. */
426 /* If this machine's insv insists on a register,
427 get VALUE1 into a register. */
428 if (! ((*insn_operand_predicate
[(int) CODE_FOR_insv
][3])
430 value1
= force_reg (maxmode
, value1
);
432 pat
= gen_insv (xop0
, GEN_INT (bitsize
), GEN_INT (xbitpos
), value1
);
437 delete_insns_since (last
);
438 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
, align
);
444 /* Insv is not available; store using shifts and boolean ops. */
445 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
, align
);
449 /* Use shifts and boolean operations to store VALUE
450 into a bit field of width BITSIZE
451 in a memory location specified by OP0 except offset by OFFSET bytes.
452 (OFFSET must be 0 if OP0 is a register.)
453 The field starts at position BITPOS within the byte.
454 (If OP0 is a register, it may be a full word or a narrower mode,
455 but BITPOS still counts within a full word,
456 which is significant on bigendian machines.)
457 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
459 Note that protect_from_queue has already been done on OP0 and VALUE. */
462 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
, struct_align
)
464 register int offset
, bitsize
, bitpos
;
468 register enum machine_mode mode
;
469 int total_bits
= BITS_PER_WORD
;
474 /* Add OFFSET to OP0's address (if it is in memory)
475 and if a single byte contains the whole bit field
476 change OP0 to a byte. */
478 /* There is a case not handled here:
479 a structure with a known alignment of just a halfword
480 and a field split across two aligned halfwords within the structure.
481 Or likewise a structure with a known alignment of just a byte
482 and a field split across two bytes.
483 Such cases are not supposed to be able to occur. */
485 if (GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
489 /* Special treatment for a bit field split across two registers. */
490 if (bitsize
+ bitpos
> BITS_PER_WORD
)
492 store_split_bit_field (op0
, bitsize
, bitpos
, value
, BITS_PER_WORD
);
498 /* Get the proper mode to use for this field. We want a mode that
499 includes the entire field. If such a mode would be larger than
500 a word, we won't be doing the extraction the normal way. */
502 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
503 struct_align
* BITS_PER_UNIT
, word_mode
,
504 GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
));
506 if (mode
== VOIDmode
)
508 /* The only way this should occur is if the field spans word
510 store_split_bit_field (op0
, bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
511 value
, struct_align
);
515 total_bits
= GET_MODE_BITSIZE (mode
);
517 /* Get ref to an aligned byte, halfword, or word containing the field.
518 Adjust BITPOS to be position within a word,
519 and OFFSET to be the offset of that word.
520 Then alter OP0 to refer to that word. */
521 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
522 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
523 op0
= change_address (op0
, mode
,
524 plus_constant (XEXP (op0
, 0), offset
));
527 mode
= GET_MODE (op0
);
529 /* Now MODE is either some integral mode for a MEM as OP0,
530 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
531 The bit field is contained entirely within OP0.
532 BITPOS is the starting bit number within OP0.
533 (OP0's mode may actually be narrower than MODE.) */
536 /* BITPOS is the distance between our msb
537 and that of the containing datum.
538 Convert it to the distance from the lsb. */
540 bitpos
= total_bits
- bitsize
- bitpos
;
542 /* Now BITPOS is always the distance between our lsb
545 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
546 we must first convert its mode to MODE. */
548 if (GET_CODE (value
) == CONST_INT
)
550 register HOST_WIDE_INT v
= INTVAL (value
);
552 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
553 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
557 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
558 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
559 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
562 value
= lshift_value (mode
, value
, bitpos
, bitsize
);
566 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
567 && bitpos
+ bitsize
!= GET_MODE_BITSIZE (mode
));
569 if (GET_MODE (value
) != mode
)
571 /* If VALUE is a floating-point mode, access it as an integer
572 of the corresponding size, then convert it. This can occur on
573 a machine with 64 bit registers that uses SFmode for float. */
574 if (GET_MODE_CLASS (GET_MODE (value
)) == MODE_FLOAT
)
576 if (GET_CODE (value
) != REG
)
577 value
= copy_to_reg (value
);
579 = gen_rtx (SUBREG
, word_mode
, value
, 0);
582 if ((GET_CODE (value
) == REG
|| GET_CODE (value
) == SUBREG
)
583 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (value
)))
584 value
= gen_lowpart (mode
, value
);
586 value
= convert_to_mode (mode
, value
, 1);
590 value
= expand_binop (mode
, and_optab
, value
,
591 mask_rtx (mode
, 0, bitsize
, 0),
592 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
594 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
595 build_int_2 (bitpos
, 0), NULL_RTX
, 1);
598 /* Now clear the chosen bits in OP0,
599 except that if VALUE is -1 we need not bother. */
601 subtarget
= (GET_CODE (op0
) == REG
|| ! flag_force_mem
) ? op0
: 0;
605 temp
= expand_binop (mode
, and_optab
, op0
,
606 mask_rtx (mode
, bitpos
, bitsize
, 1),
607 subtarget
, 1, OPTAB_LIB_WIDEN
);
613 /* Now logical-or VALUE into OP0, unless it is zero. */
616 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
617 subtarget
, 1, OPTAB_LIB_WIDEN
);
619 emit_move_insn (op0
, temp
);
622 /* Store a bit field that is split across two words.
624 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
625 BITSIZE is the field width; BITPOS the position of its first bit
627 VALUE is the value to store. */
630 store_split_bit_field (op0
, bitsize
, bitpos
, value
, align
)
636 /* BITSIZE_1 is size of the part in the first word. */
637 int bitsize_1
= BITS_PER_WORD
- bitpos
% BITS_PER_WORD
;
638 /* BITSIZE_2 is size of the rest (in the following word). */
639 int bitsize_2
= bitsize
- bitsize_1
;
641 int unit
= GET_CODE (op0
) == MEM
? BITS_PER_UNIT
: BITS_PER_WORD
;
642 int offset
= bitpos
/ unit
;
645 /* The field must span exactly one word boundary. */
646 if (bitpos
/ BITS_PER_WORD
!= (bitpos
+ bitsize
- 1) / BITS_PER_WORD
- 1)
649 if (GET_MODE (value
) != VOIDmode
)
650 value
= convert_to_mode (word_mode
, value
, 1);
651 if (CONSTANT_P (value
) && GET_CODE (value
) != CONST_INT
)
652 value
= copy_to_reg (value
);
654 /* Split the value into two parts:
655 PART1 gets that which goes in the first word; PART2 the other. */
657 /* PART1 gets the more significant part. */
658 if (GET_CODE (value
) == CONST_INT
)
660 part1
= GEN_INT ((unsigned HOST_WIDE_INT
) (INTVAL (value
)) >> bitsize_2
);
661 part2
= GEN_INT ((unsigned HOST_WIDE_INT
) (INTVAL (value
))
662 & (((HOST_WIDE_INT
) 1 << bitsize_2
) - 1));
666 part1
= extract_fixed_bit_field (word_mode
, value
, 0, bitsize_1
,
667 BITS_PER_WORD
- bitsize
, NULL_RTX
, 1,
669 part2
= extract_fixed_bit_field (word_mode
, value
, 0, bitsize_2
,
670 BITS_PER_WORD
- bitsize_2
, NULL_RTX
, 1,
674 /* PART1 gets the less significant part. */
675 if (GET_CODE (value
) == CONST_INT
)
677 part1
= GEN_INT ((unsigned HOST_WIDE_INT
) (INTVAL (value
))
678 & (((HOST_WIDE_INT
) 1 << bitsize_1
) - 1));
679 part2
= GEN_INT ((unsigned HOST_WIDE_INT
) (INTVAL (value
)) >> bitsize_1
);
683 part1
= extract_fixed_bit_field (word_mode
, value
, 0, bitsize_1
, 0,
684 NULL_RTX
, 1, BITS_PER_WORD
);
685 part2
= extract_fixed_bit_field (word_mode
, value
, 0, bitsize_2
,
686 bitsize_1
, NULL_RTX
, 1, BITS_PER_WORD
);
690 /* Store PART1 into the first word. If OP0 is a MEM, pass OP0 and the
691 offset computed above. Otherwise, get the proper word and pass an
693 word
= (GET_CODE (op0
) == MEM
? op0
694 : operand_subword (op0
, offset
, 1, GET_MODE (op0
)));
698 store_fixed_bit_field (word
, GET_CODE (op0
) == MEM
? offset
: 0,
699 bitsize_1
, bitpos
% unit
, part1
, align
);
701 /* Offset op0 by 1 word to get to the following one. */
702 if (GET_CODE (op0
) == SUBREG
)
703 word
= operand_subword (SUBREG_REG (op0
), SUBREG_WORD (op0
) + offset
+ 1,
705 else if (GET_CODE (op0
) == MEM
)
708 word
= operand_subword (op0
, offset
+ 1, 1, GET_MODE (op0
));
713 /* Store PART2 into the second word. */
714 store_fixed_bit_field (word
,
715 (GET_CODE (op0
) == MEM
716 ? CEIL (offset
+ 1, UNITS_PER_WORD
) * UNITS_PER_WORD
718 bitsize_2
, 0, part2
, align
);
721 /* Generate code to extract a byte-field from STR_RTX
722 containing BITSIZE bits, starting at BITNUM,
723 and put it in TARGET if possible (if TARGET is nonzero).
724 Regardless of TARGET, we return the rtx for where the value is placed.
727 STR_RTX is the structure containing the byte (a REG or MEM).
728 UNSIGNEDP is nonzero if this is an unsigned bit field.
729 MODE is the natural mode of the field value once extracted.
730 TMODE is the mode the caller would like the value to have;
731 but the value may be returned with type MODE instead.
733 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
734 TOTAL_SIZE is the size in bytes of the containing structure,
737 If a TARGET is specified and we can store in it at no extra cost,
738 we do so, and return TARGET.
739 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
740 if they are equally easy. */
743 extract_bit_field (str_rtx
, bitsize
, bitnum
, unsignedp
,
744 target
, mode
, tmode
, align
, total_size
)
746 register int bitsize
;
750 enum machine_mode mode
, tmode
;
754 int unit
= (GET_CODE (str_rtx
) == MEM
) ? BITS_PER_UNIT
: BITS_PER_WORD
;
755 register int offset
= bitnum
/ unit
;
756 register int bitpos
= bitnum
% unit
;
757 register rtx op0
= str_rtx
;
758 rtx spec_target
= target
;
759 rtx spec_target_subreg
= 0;
761 if (GET_CODE (str_rtx
) == MEM
&& ! MEM_IN_STRUCT_P (str_rtx
))
764 /* Discount the part of the structure before the desired byte.
765 We need to know how many bytes are safe to reference after it. */
767 total_size
-= (bitpos
/ BIGGEST_ALIGNMENT
768 * (BIGGEST_ALIGNMENT
/ BITS_PER_UNIT
));
770 if (tmode
== VOIDmode
)
772 while (GET_CODE (op0
) == SUBREG
)
774 offset
+= SUBREG_WORD (op0
);
775 op0
= SUBREG_REG (op0
);
779 /* If OP0 is a register, BITPOS must count within a word.
780 But as we have it, it counts within whatever size OP0 now has.
781 On a bigendian machine, these are not the same, so convert. */
782 if (GET_CODE (op0
) != MEM
&& unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
783 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
786 /* Extracting a full-word or multi-word value
787 from a structure in a register.
788 This can be done with just SUBREG.
789 So too extracting a subword value in
790 the least significant part of the register. */
792 if (GET_CODE (op0
) == REG
793 && ((bitsize
>= BITS_PER_WORD
&& bitsize
== GET_MODE_BITSIZE (mode
)
794 && bitpos
% BITS_PER_WORD
== 0)
795 || (mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0) != BLKmode
797 && bitpos
+ bitsize
== BITS_PER_WORD
803 enum machine_mode mode1
804 = mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0);
806 if (mode1
!= GET_MODE (op0
))
807 op0
= gen_rtx (SUBREG
, mode1
, op0
, offset
);
810 return convert_to_mode (tmode
, op0
, unsignedp
);
814 /* Handle fields bigger than a word. */
816 if (bitsize
> BITS_PER_WORD
)
818 /* Here we transfer the words of the field
819 in the order least significant first.
820 This is because the most significant word is the one which may
821 be less than full. */
823 int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
826 if (target
== 0 || GET_CODE (target
) != REG
)
827 target
= gen_reg_rtx (mode
);
829 for (i
= 0; i
< nwords
; i
++)
831 /* If I is 0, use the low-order word in both field and target;
832 if I is 1, use the next to lowest word; and so on. */
833 int wordnum
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
834 int bit_offset
= (WORDS_BIG_ENDIAN
835 ? MAX (0, bitsize
- (i
+ 1) * BITS_PER_WORD
)
836 : i
* BITS_PER_WORD
);
837 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
839 = extract_bit_field (op0
, MIN (BITS_PER_WORD
,
840 bitsize
- i
* BITS_PER_WORD
),
842 1, target_part
, mode
, word_mode
,
845 if (target_part
== 0)
848 if (result_part
!= target_part
)
849 emit_move_insn (target_part
, result_part
);
855 /* From here on we know the desired field is smaller than a word
856 so we can assume it is an integer. So we can safely extract it as one
857 size of integer, if necessary, and then truncate or extend
858 to the size that is wanted. */
860 /* OFFSET is the number of words or bytes (UNIT says which)
861 from STR_RTX to the first word or byte containing part of the field. */
863 if (GET_CODE (op0
) == REG
)
866 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
867 op0
= gen_rtx (SUBREG
, TYPE_MODE (type_for_size (BITS_PER_WORD
, 0)),
873 op0
= protect_from_queue (str_rtx
, 1);
876 /* Now OFFSET is nonzero only for memory operands. */
882 && (GET_MODE_BITSIZE (insn_operand_mode
[(int) CODE_FOR_extzv
][0])
885 int xbitpos
= bitpos
, xoffset
= offset
;
886 rtx bitsize_rtx
, bitpos_rtx
;
887 rtx last
= get_last_insn();
889 rtx xtarget
= target
;
890 rtx xspec_target
= spec_target
;
891 rtx xspec_target_subreg
= spec_target_subreg
;
893 enum machine_mode maxmode
894 = insn_operand_mode
[(int) CODE_FOR_extzv
][0];
896 if (GET_CODE (xop0
) == MEM
)
898 int save_volatile_ok
= volatile_ok
;
901 /* Is the memory operand acceptable? */
903 || ! ((*insn_operand_predicate
[(int) CODE_FOR_extzv
][1])
904 (xop0
, GET_MODE (xop0
))))
906 /* No, load into a reg and extract from there. */
907 enum machine_mode bestmode
;
909 /* Get the mode to use for inserting into this field. If
910 OP0 is BLKmode, get the smallest mode consistent with the
911 alignment. If OP0 is a non-BLKmode object that is no
912 wider than MAXMODE, use its mode. Otherwise, use the
913 smallest mode containing the field. */
915 if (GET_MODE (xop0
) == BLKmode
916 || (GET_MODE_SIZE (GET_MODE (op0
))
917 > GET_MODE_SIZE (maxmode
)))
918 bestmode
= get_best_mode (bitsize
, bitnum
,
919 align
* BITS_PER_UNIT
, maxmode
,
920 MEM_VOLATILE_P (xop0
));
922 bestmode
= GET_MODE (xop0
);
924 if (bestmode
== VOIDmode
)
927 /* Compute offset as multiple of this unit,
928 counting in bytes. */
929 unit
= GET_MODE_BITSIZE (bestmode
);
930 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
931 xbitpos
= bitnum
% unit
;
932 xop0
= change_address (xop0
, bestmode
,
933 plus_constant (XEXP (xop0
, 0),
935 /* Fetch it to a register in that size. */
936 xop0
= force_reg (bestmode
, xop0
);
938 /* XBITPOS counts within UNIT, which is what is expected. */
941 /* Get ref to first byte containing part of the field. */
942 xop0
= change_address (xop0
, byte_mode
,
943 plus_constant (XEXP (xop0
, 0), xoffset
));
945 volatile_ok
= save_volatile_ok
;
948 /* If op0 is a register, we need it in MAXMODE (which is usually
949 SImode). to make it acceptable to the format of extzv. */
950 if (GET_CODE (xop0
) == SUBREG
&& GET_MODE (xop0
) != maxmode
)
952 if (GET_CODE (xop0
) == REG
&& GET_MODE (xop0
) != maxmode
)
953 xop0
= gen_rtx (SUBREG
, maxmode
, xop0
, 0);
955 /* On big-endian machines, we count bits from the most significant.
956 If the bit field insn does not, we must invert. */
957 #if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
958 xbitpos
= unit
- bitsize
- xbitpos
;
960 /* Now convert from counting within UNIT to counting in MAXMODE. */
962 if (GET_CODE (xop0
) != MEM
)
963 xbitpos
+= GET_MODE_BITSIZE (maxmode
) - unit
;
965 unit
= GET_MODE_BITSIZE (maxmode
);
968 || (flag_force_mem
&& GET_CODE (xtarget
) == MEM
))
969 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
971 if (GET_MODE (xtarget
) != maxmode
)
973 if (GET_CODE (xtarget
) == REG
)
975 int wider
= (GET_MODE_SIZE (maxmode
)
976 > GET_MODE_SIZE (GET_MODE (xtarget
)));
977 xtarget
= gen_lowpart (maxmode
, xtarget
);
979 xspec_target_subreg
= xtarget
;
982 xtarget
= gen_reg_rtx (maxmode
);
985 /* If this machine's extzv insists on a register target,
986 make sure we have one. */
987 if (! ((*insn_operand_predicate
[(int) CODE_FOR_extzv
][0])
989 xtarget
= gen_reg_rtx (maxmode
);
991 bitsize_rtx
= GEN_INT (bitsize
);
992 bitpos_rtx
= GEN_INT (xbitpos
);
994 pat
= gen_extzv (protect_from_queue (xtarget
, 1),
995 xop0
, bitsize_rtx
, bitpos_rtx
);
1000 spec_target
= xspec_target
;
1001 spec_target_subreg
= xspec_target_subreg
;
1005 delete_insns_since (last
);
1006 target
= extract_fixed_bit_field (tmode
, op0
, offset
, bitsize
,
1007 bitpos
, target
, 1, align
);
1013 target
= extract_fixed_bit_field (tmode
, op0
, offset
, bitsize
, bitpos
,
1020 && (GET_MODE_BITSIZE (insn_operand_mode
[(int) CODE_FOR_extv
][0])
1023 int xbitpos
= bitpos
, xoffset
= offset
;
1024 rtx bitsize_rtx
, bitpos_rtx
;
1025 rtx last
= get_last_insn();
1026 rtx xop0
= op0
, xtarget
= target
;
1027 rtx xspec_target
= spec_target
;
1028 rtx xspec_target_subreg
= spec_target_subreg
;
1030 enum machine_mode maxmode
1031 = insn_operand_mode
[(int) CODE_FOR_extv
][0];
1033 if (GET_CODE (xop0
) == MEM
)
1035 /* Is the memory operand acceptable? */
1036 if (! ((*insn_operand_predicate
[(int) CODE_FOR_extv
][1])
1037 (xop0
, GET_MODE (xop0
))))
1039 /* No, load into a reg and extract from there. */
1040 enum machine_mode bestmode
;
1042 /* Get the mode to use for inserting into this field. If
1043 OP0 is BLKmode, get the smallest mode consistent with the
1044 alignment. If OP0 is a non-BLKmode object that is no
1045 wider than MAXMODE, use its mode. Otherwise, use the
1046 smallest mode containing the field. */
1048 if (GET_MODE (xop0
) == BLKmode
1049 || (GET_MODE_SIZE (GET_MODE (op0
))
1050 > GET_MODE_SIZE (maxmode
)))
1051 bestmode
= get_best_mode (bitsize
, bitnum
,
1052 align
* BITS_PER_UNIT
, maxmode
,
1053 MEM_VOLATILE_P (xop0
));
1055 bestmode
= GET_MODE (xop0
);
1057 if (bestmode
== VOIDmode
)
1060 /* Compute offset as multiple of this unit,
1061 counting in bytes. */
1062 unit
= GET_MODE_BITSIZE (bestmode
);
1063 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1064 xbitpos
= bitnum
% unit
;
1065 xop0
= change_address (xop0
, bestmode
,
1066 plus_constant (XEXP (xop0
, 0),
1068 /* Fetch it to a register in that size. */
1069 xop0
= force_reg (bestmode
, xop0
);
1071 /* XBITPOS counts within UNIT, which is what is expected. */
1074 /* Get ref to first byte containing part of the field. */
1075 xop0
= change_address (xop0
, byte_mode
,
1076 plus_constant (XEXP (xop0
, 0), xoffset
));
1079 /* If op0 is a register, we need it in MAXMODE (which is usually
1080 SImode) to make it acceptable to the format of extv. */
1081 if (GET_CODE (xop0
) == SUBREG
&& GET_MODE (xop0
) != maxmode
)
1083 if (GET_CODE (xop0
) == REG
&& GET_MODE (xop0
) != maxmode
)
1084 xop0
= gen_rtx (SUBREG
, maxmode
, xop0
, 0);
1086 /* On big-endian machines, we count bits from the most significant.
1087 If the bit field insn does not, we must invert. */
1088 #if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
1089 xbitpos
= unit
- bitsize
- xbitpos
;
1091 /* XBITPOS counts within a size of UNIT.
1092 Adjust to count within a size of MAXMODE. */
1094 if (GET_CODE (xop0
) != MEM
)
1095 xbitpos
+= (GET_MODE_BITSIZE (maxmode
) - unit
);
1097 unit
= GET_MODE_BITSIZE (maxmode
);
1100 || (flag_force_mem
&& GET_CODE (xtarget
) == MEM
))
1101 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1103 if (GET_MODE (xtarget
) != maxmode
)
1105 if (GET_CODE (xtarget
) == REG
)
1107 int wider
= (GET_MODE_SIZE (maxmode
)
1108 > GET_MODE_SIZE (GET_MODE (xtarget
)));
1109 xtarget
= gen_lowpart (maxmode
, xtarget
);
1111 xspec_target_subreg
= xtarget
;
1114 xtarget
= gen_reg_rtx (maxmode
);
1117 /* If this machine's extv insists on a register target,
1118 make sure we have one. */
1119 if (! ((*insn_operand_predicate
[(int) CODE_FOR_extv
][0])
1120 (xtarget
, maxmode
)))
1121 xtarget
= gen_reg_rtx (maxmode
);
1123 bitsize_rtx
= GEN_INT (bitsize
);
1124 bitpos_rtx
= GEN_INT (xbitpos
);
1126 pat
= gen_extv (protect_from_queue (xtarget
, 1),
1127 xop0
, bitsize_rtx
, bitpos_rtx
);
1132 spec_target
= xspec_target
;
1133 spec_target_subreg
= xspec_target_subreg
;
1137 delete_insns_since (last
);
1138 target
= extract_fixed_bit_field (tmode
, op0
, offset
, bitsize
,
1139 bitpos
, target
, 0, align
);
1145 target
= extract_fixed_bit_field (tmode
, op0
, offset
, bitsize
, bitpos
,
1148 if (target
== spec_target
)
1150 if (target
== spec_target_subreg
)
1152 if (GET_MODE (target
) != tmode
&& GET_MODE (target
) != mode
)
1154 /* If the target mode is floating-point, first convert to the
1155 integer mode of that size and then access it as a floating-point
1156 value via a SUBREG. */
1157 if (GET_MODE_CLASS (tmode
) == MODE_FLOAT
)
1159 target
= convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode
),
1162 if (GET_CODE (target
) != REG
)
1163 target
= copy_to_reg (target
);
1164 return gen_rtx (SUBREG
, tmode
, target
, 0);
1167 return convert_to_mode (tmode
, target
, unsignedp
);
1172 /* Extract a bit field using shifts and boolean operations
1173 Returns an rtx to represent the value.
1174 OP0 addresses a register (word) or memory (byte).
1175 BITPOS says which bit within the word or byte the bit field starts in.
1176 OFFSET says how many bytes farther the bit field starts;
1177 it is 0 if OP0 is a register.
1178 BITSIZE says how many bits long the bit field is.
1179 (If OP0 is a register, it may be narrower than a full word,
1180 but BITPOS still counts within a full word,
1181 which is significant on bigendian machines.)
1183 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1184 If TARGET is nonzero, attempts to store the value there
1185 and return TARGET, but this is not guaranteed.
1186 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1188 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1191 extract_fixed_bit_field (tmode
, op0
, offset
, bitsize
, bitpos
,
1192 target
, unsignedp
, align
)
1193 enum machine_mode tmode
;
1194 register rtx op0
, target
;
1195 register int offset
, bitsize
, bitpos
;
1199 int total_bits
= BITS_PER_WORD
;
1200 enum machine_mode mode
;
1202 if (GET_CODE (op0
) == SUBREG
|| GET_CODE (op0
) == REG
)
1204 /* Special treatment for a bit field split across two registers. */
1205 if (bitsize
+ bitpos
> BITS_PER_WORD
)
1206 return extract_split_bit_field (op0
, bitsize
, bitpos
,
1211 /* Get the proper mode to use for this field. We want a mode that
1212 includes the entire field. If such a mode would be larger than
1213 a word, we won't be doing the extraction the normal way. */
1215 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
1216 align
* BITS_PER_UNIT
, word_mode
,
1217 GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
));
1219 if (mode
== VOIDmode
)
1220 /* The only way this should occur is if the field spans word
1222 return extract_split_bit_field (op0
, bitsize
,
1223 bitpos
+ offset
* BITS_PER_UNIT
,
1226 total_bits
= GET_MODE_BITSIZE (mode
);
1228 /* Get ref to an aligned byte, halfword, or word containing the field.
1229 Adjust BITPOS to be position within a word,
1230 and OFFSET to be the offset of that word.
1231 Then alter OP0 to refer to that word. */
1232 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
1233 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
1234 op0
= change_address (op0
, mode
,
1235 plus_constant (XEXP (op0
, 0), offset
));
1238 mode
= GET_MODE (op0
);
1240 #if BYTES_BIG_ENDIAN
1241 /* BITPOS is the distance between our msb and that of OP0.
1242 Convert it to the distance from the lsb. */
1244 bitpos
= total_bits
- bitsize
- bitpos
;
1246 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1247 We have reduced the big-endian case to the little-endian case. */
1253 /* If the field does not already start at the lsb,
1254 shift it so it does. */
1255 tree amount
= build_int_2 (bitpos
, 0);
1256 /* Maybe propagate the target for the shift. */
1257 /* But not if we will return it--could confuse integrate.c. */
1258 rtx subtarget
= (target
!= 0 && GET_CODE (target
) == REG
1259 && !REG_FUNCTION_VALUE_P (target
)
1261 if (tmode
!= mode
) subtarget
= 0;
1262 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1264 /* Convert the value to the desired mode. */
1266 op0
= convert_to_mode (tmode
, op0
, 1);
1268 /* Unless the msb of the field used to be the msb when we shifted,
1269 mask out the upper bits. */
1271 if (GET_MODE_BITSIZE (mode
) != bitpos
+ bitsize
1273 #ifdef SLOW_ZERO_EXTEND
1274 /* Always generate an `and' if
1275 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1276 will combine fruitfully with the zero-extend. */
1281 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1282 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1283 target
, 1, OPTAB_LIB_WIDEN
);
1287 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1288 then arithmetic-shift its lsb to the lsb of the word. */
1289 op0
= force_reg (mode
, op0
);
1293 /* Find the narrowest integer mode that contains the field. */
1295 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1296 mode
= GET_MODE_WIDER_MODE (mode
))
1297 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitpos
)
1299 op0
= convert_to_mode (mode
, op0
, 0);
1303 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitpos
))
1305 tree amount
= build_int_2 (GET_MODE_BITSIZE (mode
) - (bitsize
+ bitpos
), 0);
1306 /* Maybe propagate the target for the shift. */
1307 /* But not if we will return the result--could confuse integrate.c. */
1308 rtx subtarget
= (target
!= 0 && GET_CODE (target
) == REG
1309 && ! REG_FUNCTION_VALUE_P (target
)
1311 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1314 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1315 build_int_2 (GET_MODE_BITSIZE (mode
) - bitsize
, 0),
1319 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1320 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1321 complement of that if COMPLEMENT. The mask is truncated if
1322 necessary to the width of mode MODE. */
1325 mask_rtx (mode
, bitpos
, bitsize
, complement
)
1326 enum machine_mode mode
;
1327 int bitpos
, bitsize
, complement
;
1329 HOST_WIDE_INT masklow
, maskhigh
;
1331 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1332 masklow
= (HOST_WIDE_INT
) -1 << bitpos
;
1336 if (bitpos
+ bitsize
< HOST_BITS_PER_WIDE_INT
)
1337 masklow
&= ((unsigned HOST_WIDE_INT
) -1
1338 >> (HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1340 if (bitpos
<= HOST_BITS_PER_WIDE_INT
)
1343 maskhigh
= (HOST_WIDE_INT
) -1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
1345 if (bitpos
+ bitsize
> HOST_BITS_PER_WIDE_INT
)
1346 maskhigh
&= ((unsigned HOST_WIDE_INT
) -1
1347 >> (2 * HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1353 maskhigh
= ~maskhigh
;
1357 return immed_double_const (masklow
, maskhigh
, mode
);
1360 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1361 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1364 lshift_value (mode
, value
, bitpos
, bitsize
)
1365 enum machine_mode mode
;
1367 int bitpos
, bitsize
;
1369 unsigned HOST_WIDE_INT v
= INTVAL (value
);
1370 HOST_WIDE_INT low
, high
;
1372 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
1373 v
&= ~((HOST_WIDE_INT
) -1 << bitsize
);
1375 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1378 high
= (bitpos
> 0 ? (v
>> (HOST_BITS_PER_WIDE_INT
- bitpos
)) : 0);
1383 high
= v
<< (bitpos
- HOST_BITS_PER_WIDE_INT
);
1386 return immed_double_const (low
, high
, mode
);
1389 /* Extract a bit field that is split across two words
1390 and return an RTX for the result.
1392 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1393 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1394 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1397 extract_split_bit_field (op0
, bitsize
, bitpos
, unsignedp
, align
)
1399 int bitsize
, bitpos
, unsignedp
, align
;
1401 /* BITSIZE_1 is size of the part in the first word. */
1402 int bitsize_1
= BITS_PER_WORD
- bitpos
% BITS_PER_WORD
;
1403 /* BITSIZE_2 is size of the rest (in the following word). */
1404 int bitsize_2
= bitsize
- bitsize_1
;
1405 rtx part1
, part2
, result
;
1406 int unit
= GET_CODE (op0
) == MEM
? BITS_PER_UNIT
: BITS_PER_WORD
;
1407 int offset
= bitpos
/ unit
;
1410 /* The field must span exactly one word boundary. */
1411 if (bitpos
/ BITS_PER_WORD
!= (bitpos
+ bitsize
- 1) / BITS_PER_WORD
- 1)
1414 /* Get the part of the bit field from the first word. If OP0 is a MEM,
1415 pass OP0 and the offset computed above. Otherwise, get the proper
1416 word and pass an offset of zero. */
1417 word
= (GET_CODE (op0
) == MEM
? op0
1418 : operand_subword_force (op0
, offset
, GET_MODE (op0
)));
1419 part1
= extract_fixed_bit_field (word_mode
, word
,
1420 GET_CODE (op0
) == MEM
? offset
: 0,
1421 bitsize_1
, bitpos
% unit
, NULL_RTX
,
1424 /* Offset op0 by 1 word to get to the following one. */
1425 if (GET_CODE (op0
) == SUBREG
)
1426 word
= operand_subword_force (SUBREG_REG (op0
),
1427 SUBREG_WORD (op0
) + offset
+ 1, VOIDmode
);
1428 else if (GET_CODE (op0
) == MEM
)
1431 word
= operand_subword_force (op0
, offset
+ 1, GET_MODE (op0
));
1433 /* Get the part of the bit field from the second word. */
1434 part2
= extract_fixed_bit_field (word_mode
, word
,
1435 (GET_CODE (op0
) == MEM
1436 ? CEIL (offset
+ 1, UNITS_PER_WORD
) * UNITS_PER_WORD
1438 bitsize_2
, 0, NULL_RTX
, 1, align
);
1440 /* Shift the more significant part up to fit above the other part. */
1441 #if BYTES_BIG_ENDIAN
1442 part1
= expand_shift (LSHIFT_EXPR
, word_mode
, part1
,
1443 build_int_2 (bitsize_2
, 0), 0, 1);
1445 part2
= expand_shift (LSHIFT_EXPR
, word_mode
, part2
,
1446 build_int_2 (bitsize_1
, 0), 0, 1);
1449 /* Combine the two parts with bitwise or. This works
1450 because we extracted both parts as unsigned bit fields. */
1451 result
= expand_binop (word_mode
, ior_optab
, part1
, part2
, NULL_RTX
, 1,
1454 /* Unsigned bit field: we are done. */
1457 /* Signed bit field: sign-extend with two arithmetic shifts. */
1458 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
1459 build_int_2 (BITS_PER_WORD
- bitsize
, 0),
1461 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
1462 build_int_2 (BITS_PER_WORD
- bitsize
, 0), NULL_RTX
, 0);
1465 /* Add INC into TARGET. */
1468 expand_inc (target
, inc
)
1471 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
1473 target
, 0, OPTAB_LIB_WIDEN
);
1474 if (value
!= target
)
1475 emit_move_insn (target
, value
);
1478 /* Subtract DEC from TARGET. */
1481 expand_dec (target
, dec
)
1484 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
1486 target
, 0, OPTAB_LIB_WIDEN
);
1487 if (value
!= target
)
1488 emit_move_insn (target
, value
);
1491 /* Output a shift instruction for expression code CODE,
1492 with SHIFTED being the rtx for the value to shift,
1493 and AMOUNT the tree for the amount to shift by.
1494 Store the result in the rtx TARGET, if that is convenient.
1495 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1496 Return the rtx for where the value is. */
1499 expand_shift (code
, mode
, shifted
, amount
, target
, unsignedp
)
1500 enum tree_code code
;
1501 register enum machine_mode mode
;
1504 register rtx target
;
1507 register rtx op1
, temp
= 0;
1508 register int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
1509 register int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
1512 /* Previously detected shift-counts computed by NEGATE_EXPR
1513 and shifted in the other direction; but that does not work
1516 op1
= expand_expr (amount
, NULL_RTX
, VOIDmode
, 0);
1518 if (op1
== const0_rtx
)
1521 for (try = 0; temp
== 0 && try < 3; try++)
1523 enum optab_methods methods
;
1526 methods
= OPTAB_DIRECT
;
1528 methods
= OPTAB_WIDEN
;
1530 methods
= OPTAB_LIB_WIDEN
;
1534 /* Widening does not work for rotation. */
1535 if (methods
== OPTAB_WIDEN
)
1537 else if (methods
== OPTAB_LIB_WIDEN
)
1539 /* If we are rotating by a constant that is valid and
1540 we have been unable to open-code this by a rotation,
1541 do it as the IOR of two shifts. I.e., to rotate A
1542 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1543 where C is the bitsize of A.
1545 It is theoretically possible that the target machine might
1546 not be able to perform either shift and hence we would
1547 be making two libcalls rather than just the one for the
1548 shift (similarly if IOR could not be done). We will allow
1549 this extremely unlikely lossage to avoid complicating the
1552 if (GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) > 0
1553 && INTVAL (op1
) < GET_MODE_BITSIZE (mode
))
1555 rtx subtarget
= target
== shifted
? 0 : target
;
1558 = build_int_2 (GET_MODE_BITSIZE (mode
) - INTVAL (op1
), 0);
1560 shifted
= force_reg (mode
, shifted
);
1562 temp
= expand_shift (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
1563 mode
, shifted
, amount
, subtarget
, 1);
1564 temp1
= expand_shift (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
1565 mode
, shifted
, other_amount
, 0, 1);
1566 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
1567 unsignedp
, methods
);
1570 methods
= OPTAB_LIB
;
1573 temp
= expand_binop (mode
,
1574 left
? rotl_optab
: rotr_optab
,
1575 shifted
, op1
, target
, unsignedp
, methods
);
1577 /* If we don't have the rotate, but we are rotating by a constant
1578 that is in range, try a rotate in the opposite direction. */
1580 if (temp
== 0 && GET_CODE (op1
) == CONST_INT
1581 && INTVAL (op1
) > 0 && INTVAL (op1
) < GET_MODE_BITSIZE (mode
))
1582 temp
= expand_binop (mode
,
1583 left
? rotr_optab
: rotl_optab
,
1585 GEN_INT (GET_MODE_BITSIZE (mode
)
1587 target
, unsignedp
, methods
);
1591 temp
= expand_binop (mode
,
1592 left
? lshl_optab
: lshr_optab
,
1593 shifted
, op1
, target
, unsignedp
, methods
);
1594 if (temp
== 0 && left
)
1595 temp
= expand_binop (mode
, ashl_optab
,
1596 shifted
, op1
, target
, unsignedp
, methods
);
1599 /* Do arithmetic shifts.
1600 Also, if we are going to widen the operand, we can just as well
1601 use an arithmetic right-shift instead of a logical one. */
1602 if (temp
== 0 && ! rotate
1603 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
1605 enum optab_methods methods1
= methods
;
1607 /* If trying to widen a log shift to an arithmetic shift,
1608 don't accept an arithmetic shift of the same size. */
1610 methods1
= OPTAB_MUST_WIDEN
;
1612 /* Arithmetic shift */
1614 temp
= expand_binop (mode
,
1615 left
? ashl_optab
: ashr_optab
,
1616 shifted
, op1
, target
, unsignedp
, methods1
);
1620 /* We can do a logical (unsigned) right shift with a bit-field
1621 extract insn. But first check if one of the above methods worked. */
1625 if (unsignedp
&& code
== RSHIFT_EXPR
&& ! BITS_BIG_ENDIAN
&& HAVE_extzv
)
1627 enum machine_mode output_mode
1628 = insn_operand_mode
[(int) CODE_FOR_extzv
][0];
1630 if ((methods
== OPTAB_DIRECT
&& mode
== output_mode
)
1631 || (methods
== OPTAB_WIDEN
1632 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (output_mode
)))
1634 rtx shifted1
= convert_to_mode (output_mode
,
1635 protect_from_queue (shifted
, 0),
1637 enum machine_mode length_mode
1638 = insn_operand_mode
[(int) CODE_FOR_extzv
][2];
1639 enum machine_mode pos_mode
1640 = insn_operand_mode
[(int) CODE_FOR_extzv
][3];
1642 rtx last
= get_last_insn ();
1648 target1
= protect_from_queue (target
, 1);
1650 /* We define extract insns as having OUTPUT_MODE in a register
1651 and the mode of operand 1 in memory. Since we want
1652 OUTPUT_MODE, we will always force the operand into a
1653 register. At some point we might want to support MEM
1655 shifted1
= force_reg (output_mode
, shifted1
);
1657 /* If we don't have or cannot use a suggested target,
1658 make a place for the result, in the proper mode. */
1659 if (methods
== OPTAB_WIDEN
|| target1
== 0
1660 || ! ((*insn_operand_predicate
[(int) CODE_FOR_extzv
][0])
1661 (target1
, output_mode
)))
1662 target1
= gen_reg_rtx (output_mode
);
1664 xop1
= protect_from_queue (xop1
, 0);
1665 xop1
= convert_to_mode (pos_mode
, xop1
,
1666 TREE_UNSIGNED (TREE_TYPE (amount
)));
1668 /* If this machine's extzv insists on a register for
1669 operand 3 (position), arrange for that. */
1670 if (! ((*insn_operand_predicate
[(int) CODE_FOR_extzv
][3])
1672 xop1
= force_reg (pos_mode
, xop1
);
1674 /* WIDTH gets the width of the bit field to extract:
1675 wordsize minus # bits to shift by. */
1676 if (GET_CODE (xop1
) == CONST_INT
)
1677 width
= GEN_INT (GET_MODE_BITSIZE (mode
) - INTVAL (op1
));
1680 /* Now get the width in the proper mode. */
1681 op1
= protect_from_queue (op1
, 0);
1682 width
= convert_to_mode (length_mode
, op1
,
1683 TREE_UNSIGNED (TREE_TYPE (amount
)));
1685 width
= expand_binop (length_mode
, sub_optab
,
1686 GEN_INT (GET_MODE_BITSIZE (mode
)),
1687 width
, NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
1690 /* If this machine's extzv insists on a register for
1691 operand 2 (length), arrange for that. */
1692 if (! ((*insn_operand_predicate
[(int) CODE_FOR_extzv
][2])
1693 (width
, length_mode
)))
1694 width
= force_reg (length_mode
, width
);
1696 /* Now extract with WIDTH, omitting OP1 least sig bits. */
1697 pat
= gen_extzv (target1
, shifted1
, width
, xop1
);
1701 temp
= convert_to_mode (mode
, target1
, 1);
1704 delete_insns_since (last
);
1707 /* Can also do logical shift with signed bit-field extract
1708 followed by inserting the bit-field at a different position.
1709 That strategy is not yet implemented. */
1711 #endif /* HAVE_extzv */
1719 enum alg_code
{ alg_add
, alg_subtract
, alg_compound
};
1721 /* This structure records a sequence of operations.
1722 `ops' is the number of operations recorded.
1723 `cost' is their total cost.
1724 The operations are stored in `op' and the corresponding
1725 integer coefficients in `coeff'.
1726 These are the operations:
1727 alg_add Add to the total the multiplicand times the coefficient.
1728 alg_subtract Subtract the multiplicand times the coefficient.
1729 alg_compound This coefficient plus or minus the following one
1730 is multiplied into the total. The following operation
1731 is alg_add or alg_subtract to indicate whether to add
1732 or subtract the two coefficients. */
1734 #ifndef MAX_BITS_PER_WORD
1735 #define MAX_BITS_PER_WORD BITS_PER_WORD
1742 enum alg_code op
[MAX_BITS_PER_WORD
];
1743 unsigned HOST_WIDE_INT coeff
[MAX_BITS_PER_WORD
];
1746 /* Compute and return the best algorithm for multiplying by T.
1747 Assume that add insns cost ADD_COST and shifts cost SHIFT_COST.
1748 Return cost -1 if would cost more than MAX_COST. */
1750 static struct algorithm
1751 synth_mult (t
, add_cost
, shift_cost
, max_cost
)
1752 unsigned HOST_WIDE_INT t
;
1753 int add_cost
, shift_cost
;
1757 struct algorithm
*best_alg
1758 = (struct algorithm
*)alloca (sizeof (struct algorithm
));
1759 struct algorithm
*alg_in
1760 = (struct algorithm
*)alloca (sizeof (struct algorithm
));
1763 /* No matter what happens, we want to return a valid algorithm. */
1764 best_alg
->cost
= max_cost
;
1767 /* Is t an exponent of 2, so we can just do a shift? */
1773 if (max_cost
>= shift_cost
)
1775 best_alg
->cost
= shift_cost
;
1777 best_alg
->op
[0] = alg_add
;
1778 best_alg
->coeff
[0] = t
;
1781 best_alg
->cost
= -1;
1794 /* If MAX_COST just permits as little as an addition (or less), we won't
1795 succeed in synthesizing an algorithm for t. Return immediately with
1796 an indication of failure. */
1797 if (max_cost
<= add_cost
)
1799 best_alg
->cost
= -1;
1803 /* Look for factors of t of the form
1804 t = q(2**m +- 1), 2 <= m <= floor(log2(t)) - 1.
1805 If we find such a factor, we can multiply by t using an algorithm that
1806 multiplies by q, shift the result by m and add/subtract it to itself. */
1808 for (m
= floor_log2 (t
) - 1; m
>= 2; m
--)
1810 HOST_WIDE_INT m_exp_2
= (HOST_WIDE_INT
) 1 << m
;
1816 HOST_WIDE_INT q
= t
/ d
;
1818 cost
= add_cost
+ shift_cost
* 2;
1820 *alg_in
= synth_mult (q
, add_cost
, shift_cost
,
1821 MIN (max_cost
, best_alg
->cost
) - cost
);
1823 if (alg_in
->cost
>= 0)
1825 cost
+= alg_in
->cost
;
1827 if (cost
< best_alg
->cost
)
1829 struct algorithm
*x
;
1833 best_alg
->coeff
[best_alg
->ops
] = m_exp_2
;
1834 best_alg
->op
[best_alg
->ops
++] = alg_compound
;
1835 best_alg
->coeff
[best_alg
->ops
] = 1;
1836 best_alg
->op
[best_alg
->ops
++] = alg_add
;
1837 best_alg
->cost
= cost
;
1845 HOST_WIDE_INT q
= t
/ d
;
1847 cost
= add_cost
+ shift_cost
* 2;
1849 *alg_in
= synth_mult (q
, add_cost
, shift_cost
,
1850 MIN (max_cost
, best_alg
->cost
) - cost
);
1852 if (alg_in
->cost
>= 0)
1854 cost
+= alg_in
->cost
;
1856 if (cost
< best_alg
->cost
)
1858 struct algorithm
*x
;
1862 best_alg
->coeff
[best_alg
->ops
] = m_exp_2
;
1863 best_alg
->op
[best_alg
->ops
++] = alg_compound
;
1864 best_alg
->coeff
[best_alg
->ops
] = 1;
1865 best_alg
->op
[best_alg
->ops
++] = alg_subtract
;
1866 best_alg
->cost
= cost
;
1872 /* Try load effective address instructions, i.e. do a*3, a*5, a*9. */
1878 q
= t
& -t
; /* get out lsb */
1879 w
= (t
- q
) & -(t
- q
); /* get out next lsb */
1881 if (w
/ q
<= lea_max_mul
)
1883 cost
= lea_cost
+ (q
!= 1 ? shift_cost
: 0);
1885 *alg_in
= synth_mult (t
- q
- w
, add_cost
, shift_cost
,
1886 MIN (max_cost
, best_alg
->cost
) - cost
);
1888 if (alg_in
->cost
>= 0)
1890 cost
+= alg_in
->cost
;
1892 /* Use <= to prefer this method to the factoring method
1893 when the cost appears the same, because this method
1894 uses fewer temporary registers. */
1895 if (cost
<= best_alg
->cost
)
1897 struct algorithm
*x
;
1901 best_alg
->coeff
[best_alg
->ops
] = w
;
1902 best_alg
->op
[best_alg
->ops
++] = alg_add
;
1903 best_alg
->coeff
[best_alg
->ops
] = q
;
1904 best_alg
->op
[best_alg
->ops
++] = alg_add
;
1905 best_alg
->cost
= cost
;
1911 /* Now, use the good old method to add or subtract at the leftmost
1918 q
= t
& -t
; /* get out lsb */
1919 for (w
= q
; (w
& t
) != 0; w
<<= 1)
1922 /* Reject the case where t has only two bits.
1923 Thus we prefer addition in that case. */
1924 && !(t
< w
&& w
== q
<< 2))
1926 /* There are many bits in a row. Make 'em by subtraction. */
1932 *alg_in
= synth_mult (t
+ q
, add_cost
, shift_cost
,
1933 MIN (max_cost
, best_alg
->cost
) - cost
);
1935 if (alg_in
->cost
>= 0)
1937 cost
+= alg_in
->cost
;
1939 /* Use <= to prefer this method to the factoring method
1940 when the cost appears the same, because this method
1941 uses fewer temporary registers. */
1942 if (cost
<= best_alg
->cost
)
1944 struct algorithm
*x
;
1948 best_alg
->coeff
[best_alg
->ops
] = q
;
1949 best_alg
->op
[best_alg
->ops
++] = alg_subtract
;
1950 best_alg
->cost
= cost
;
1956 /* There's only one bit at the left. Make it by addition. */
1962 *alg_in
= synth_mult (t
- q
, add_cost
, shift_cost
,
1963 MIN (max_cost
, best_alg
->cost
) - cost
);
1965 if (alg_in
->cost
>= 0)
1967 cost
+= alg_in
->cost
;
1969 if (cost
<= best_alg
->cost
)
1971 struct algorithm
*x
;
1975 best_alg
->coeff
[best_alg
->ops
] = q
;
1976 best_alg
->op
[best_alg
->ops
++] = alg_add
;
1977 best_alg
->cost
= cost
;
1983 if (best_alg
->cost
>= max_cost
)
1984 best_alg
->cost
= -1;
1988 /* Perform a multiplication and return an rtx for the result.
1989 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
1990 TARGET is a suggestion for where to store the result (an rtx).
1992 We check specially for a constant integer as OP1.
1993 If you want this check for OP0 as well, then before calling
1994 you should swap the two operands if OP0 would be constant. */
1997 expand_mult (mode
, op0
, op1
, target
, unsignedp
)
1998 enum machine_mode mode
;
1999 register rtx op0
, op1
, target
;
2002 rtx const_op1
= op1
;
2004 /* If we are multiplying in DImode, it may still be a win
2005 to try to work with shifts and adds. */
2006 if (GET_CODE (op1
) == CONST_DOUBLE
2007 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_INT
2008 && HOST_BITS_PER_INT
<= BITS_PER_WORD
)
2010 if ((CONST_DOUBLE_HIGH (op1
) == 0 && CONST_DOUBLE_LOW (op1
) >= 0)
2011 || (CONST_DOUBLE_HIGH (op1
) == -1 && CONST_DOUBLE_LOW (op1
) < 0))
2012 const_op1
= GEN_INT (CONST_DOUBLE_LOW (op1
));
2015 /* We used to test optimize here, on the grounds that it's better to
2016 produce a smaller program when -O is not used.
2017 But this causes such a terrible slowdown sometimes
2018 that it seems better to use synth_mult always. */
2019 if (GET_CODE (const_op1
) == CONST_INT
&& ! mult_is_very_cheap
)
2021 struct algorithm alg
;
2022 struct algorithm neg_alg
;
2024 HOST_WIDE_INT absval
= INTVAL (op1
);
2027 /* Try to do the computation two ways: multiply by the negative of OP1
2028 and then negate, or do the multiplication directly. The latter is
2029 usually faster for positive numbers and the former for negative
2030 numbers, but the opposite can be faster if the original value
2031 has a factor of 2**m +/- 1, while the negated value does not or
2034 alg
= synth_mult (absval
, add_cost
, shift_cost
, mult_cost
);
2035 neg_alg
= synth_mult (- absval
, add_cost
, shift_cost
,
2036 (alg
.cost
>= 0 ? alg
.cost
: mult_cost
)
2039 if (neg_alg
.cost
>= 0 && neg_alg
.cost
+ negate_cost
< alg
.cost
)
2040 alg
= neg_alg
, negate
= 1, absval
= - absval
;
2044 /* If we found something, it must be cheaper than multiply.
2048 int factors_seen
= 0;
2050 op0
= protect_from_queue (op0
, 0);
2052 /* Avoid referencing memory over and over.
2053 For speed, but also for correctness when mem is volatile. */
2054 if (GET_CODE (op0
) == MEM
)
2055 op0
= force_reg (mode
, op0
);
2058 accum
= copy_to_mode_reg (mode
, op0
);
2061 /* 1 if this is the last in a series of adds and subtracts. */
2062 int last
= (1 == alg
.ops
|| alg
.op
[1] == alg_compound
);
2063 int log
= floor_log2 (alg
.coeff
[0]);
2064 if (! factors_seen
&& ! last
)
2065 log
-= floor_log2 (alg
.coeff
[1]);
2067 if (alg
.op
[0] != alg_add
)
2069 accum
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2070 build_int_2 (log
, 0), NULL_RTX
, 0);
2073 while (++opno
< alg
.ops
)
2075 int log
= floor_log2 (alg
.coeff
[opno
]);
2076 /* 1 if this is the last in a series of adds and subtracts. */
2077 int last
= (opno
+ 1 == alg
.ops
2078 || alg
.op
[opno
+ 1] == alg_compound
);
2080 /* If we have not yet seen any separate factors (alg_compound)
2081 then turn op0<<a1 + op0<<a2 + op0<<a3... into
2082 (op0<<(a1-a2) + op0)<<(a2-a3) + op0... */
2083 switch (alg
.op
[opno
])
2088 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2089 build_int_2 (log
, 0), NULL_RTX
, 0);
2090 accum
= force_operand (gen_rtx (PLUS
, mode
, accum
, tem
),
2096 log
-= floor_log2 (alg
.coeff
[opno
+ 1]);
2097 accum
= force_operand (gen_rtx (PLUS
, mode
, accum
, op0
),
2099 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2100 build_int_2 (log
, 0), accum
, 0);
2107 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2108 build_int_2 (log
, 0), NULL_RTX
, 0);
2109 accum
= force_operand (gen_rtx (MINUS
, mode
, accum
, tem
),
2115 log
-= floor_log2 (alg
.coeff
[opno
+ 1]);
2116 accum
= force_operand (gen_rtx (MINUS
, mode
, accum
, op0
),
2118 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2119 build_int_2 (log
, 0), accum
, 0);
2126 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2127 build_int_2 (log
, 0), NULL_RTX
, 0);
2129 log
= floor_log2 (alg
.coeff
[opno
+ 1]);
2130 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2131 build_int_2 (log
, 0), NULL_RTX
, 0);
2133 if (alg
.op
[opno
] == alg_add
)
2134 accum
= force_operand (gen_rtx (PLUS
, mode
, tem
, accum
),
2137 accum
= force_operand (gen_rtx (MINUS
, mode
, tem
, accum
),
2142 /* Write a REG_EQUAL note on the last insn so that we can cse
2143 multiplication sequences. We need not do this if we were
2144 multiplying by a power of two, since only one insn would have
2147 ??? We could also write REG_EQUAL notes on the last insn of
2148 each sequence that uses a single temporary, but it is not
2149 clear how to calculate the partial product so far.
2151 Torbjorn: Can you do this? */
2153 if (exact_log2 (absval
) < 0)
2155 last
= get_last_insn ();
2157 = gen_rtx (EXPR_LIST
, REG_EQUAL
,
2158 gen_rtx (MULT
, mode
, op0
,
2159 negate
? GEN_INT (absval
) : op1
),
2163 return (negate
? expand_unop (mode
, neg_optab
, accum
, target
, 0)
2168 /* This used to use umul_optab if unsigned,
2169 but I think that for non-widening multiply there is no difference
2170 between signed and unsigned. */
2171 op0
= expand_binop (mode
, smul_optab
,
2172 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
2178 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2179 if that is convenient, and returning where the result is.
2180 You may request either the quotient or the remainder as the result;
2181 specify REM_FLAG nonzero to get the remainder.
2183 CODE is the expression code for which kind of division this is;
2184 it controls how rounding is done. MODE is the machine mode to use.
2185 UNSIGNEDP nonzero means do unsigned division. */
2187 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2188 and then correct it by or'ing in missing high bits
2189 if result of ANDI is nonzero.
2190 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2191 This could optimize to a bfexts instruction.
2192 But C doesn't use these operations, so their optimizations are
2196 expand_divmod (rem_flag
, code
, mode
, op0
, op1
, target
, unsignedp
)
2198 enum tree_code code
;
2199 enum machine_mode mode
;
2200 register rtx op0
, op1
, target
;
2203 register rtx result
= 0;
2204 enum machine_mode compute_mode
;
2207 int can_clobber_op0
;
2208 int mod_insn_no_good
= 0;
2209 rtx adjusted_op0
= op0
;
2210 optab optab1
, optab2
;
2212 /* We shouldn't be called with op1 == const1_rtx, but some of the
2213 code below will malfunction if we are, so check here and handle
2214 the special case if so. */
2215 if (op1
== const1_rtx
)
2216 return rem_flag
? const0_rtx
: op0
;
2218 /* Don't use the function value register as a target
2219 since we have to read it as well as write it,
2220 and function-inlining gets confused by this. */
2221 if (target
&& REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
2224 /* Don't clobber an operand while doing a multi-step calculation. */
2226 if ((rem_flag
&& (reg_mentioned_p (target
, op0
)
2227 || (GET_CODE (op0
) == MEM
&& GET_CODE (target
) == MEM
)))
2228 || reg_mentioned_p (target
, op1
)
2229 || (GET_CODE (op1
) == MEM
&& GET_CODE (target
) == MEM
))
2232 can_clobber_op0
= (GET_CODE (op0
) == REG
&& op0
== target
);
2234 if (GET_CODE (op1
) == CONST_INT
)
2235 log
= exact_log2 (INTVAL (op1
));
2237 /* If log is >= 0, we are dividing by 2**log, and will do it by shifting,
2238 which is really floor-division. Otherwise we will really do a divide,
2239 and we assume that is trunc-division.
2241 We must correct the dividend by adding or subtracting something
2242 based on the divisor, in order to do the kind of rounding specified
2243 by CODE. The correction depends on what kind of rounding is actually
2244 available, and that depends on whether we will shift or divide.
2246 In many of these cases it is possible to perform the operation by a
2247 clever series of logical operations (shifts and/or exclusive-ors).
2248 Although avoiding the jump has the advantage that it extends the basic
2249 block and allows further optimization, the branch-free code is normally
2250 at least one instruction longer in the (most common) case where the
2251 dividend is non-negative. Performance measurements of the two
2252 alternatives show that the branch-free code is slightly faster on the
2253 IBM ROMP but slower on CISC processors (significantly slower on the
2254 VAX). Accordingly, the jump code has been retained.
2256 On machines where the jump code is slower, the cost of a DIV or MOD
2257 operation can be set small (less than twice that of an addition); in
2258 that case, we pretend that we don't have a power of two and perform
2259 a normal division or modulus operation. */
2261 if ((code
== TRUNC_MOD_EXPR
|| code
== TRUNC_DIV_EXPR
)
2263 && (rem_flag
? smod_pow2_cheap
: sdiv_pow2_cheap
))
2266 /* Get the mode in which to perform this computation. Normally it will
2267 be MODE, but sometimes we can't do the desired operation in MODE.
2268 If so, pick a wider mode in which we can do the operation. Convert
2269 to that mode at the start to avoid repeated conversions.
2271 First see what operations we need. These depend on the expression
2272 we are evaluating. (We assume that divxx3 insns exist under the
2273 same conditions that modxx3 insns and that these insns don't normally
2274 fail. If these assumptions are not correct, we may generate less
2275 efficient code in some cases.)
2277 Then see if we find a mode in which we can open-code that operation
2278 (either a division, modulus, or shift). Finally, check for the smallest
2279 mode for which we can do the operation with a library call. */
2281 optab1
= (log
>= 0 ? (unsignedp
? lshr_optab
: ashr_optab
)
2282 : (unsignedp
? udiv_optab
: sdiv_optab
));
2283 optab2
= (log
>= 0 ? optab1
: (unsignedp
? udivmod_optab
: sdivmod_optab
));
2285 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
2286 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
2287 if (optab1
->handlers
[(int) compute_mode
].insn_code
!= CODE_FOR_nothing
2288 || optab2
->handlers
[(int) compute_mode
].insn_code
!= CODE_FOR_nothing
)
2291 if (compute_mode
== VOIDmode
)
2292 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
2293 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
2294 if (optab1
->handlers
[(int) compute_mode
].libfunc
2295 || optab2
->handlers
[(int) compute_mode
].libfunc
)
2298 /* If we still couldn't find a mode, use MODE; we'll probably abort in
2300 if (compute_mode
== VOIDmode
)
2301 compute_mode
= mode
;
2303 size
= GET_MODE_BITSIZE (compute_mode
);
2305 /* Now convert to the best mode to use. Show we made a copy of OP0
2306 and hence we can clobber it (we cannot use a SUBREG to widen
2308 if (compute_mode
!= mode
)
2310 adjusted_op0
= op0
= convert_to_mode (compute_mode
, op0
, unsignedp
);
2311 can_clobber_op0
= 1;
2312 op1
= convert_to_mode (compute_mode
, op1
, unsignedp
);
2315 /* If we are computing the remainder and one of the operands is a volatile
2316 MEM, copy it into a register. */
2318 if (rem_flag
&& GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
))
2319 adjusted_op0
= op0
= force_reg (compute_mode
, op0
), can_clobber_op0
= 1;
2320 if (rem_flag
&& GET_CODE (op1
) == MEM
&& MEM_VOLATILE_P (op1
))
2321 op1
= force_reg (compute_mode
, op1
);
2323 /* If we are computing the remainder, op0 will be needed later to calculate
2324 X - Y * (X / Y), therefore cannot be clobbered. */
2326 can_clobber_op0
= 0;
2328 if (target
== 0 || GET_MODE (target
) != compute_mode
)
2329 target
= gen_reg_rtx (compute_mode
);
2333 case TRUNC_MOD_EXPR
:
2334 case TRUNC_DIV_EXPR
:
2335 if (log
>= 0 && ! unsignedp
)
2337 if (! can_clobber_op0
)
2339 adjusted_op0
= copy_to_suggested_reg (adjusted_op0
, target
,
2341 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2342 which will screw up mem refs for autoincrements. */
2343 op0
= force_reg (compute_mode
, op0
);
2345 /* Here we need to add OP1-1 if OP0 is negative, 0 otherwise.
2346 This can be computed without jumps by arithmetically shifting
2347 OP0 right LOG-1 places and then shifting right logically
2348 SIZE-LOG bits. The resulting value is unconditionally added
2350 if (log
== 1 || BRANCH_COST
>= 3)
2352 rtx temp
= gen_reg_rtx (compute_mode
);
2353 temp
= copy_to_suggested_reg (adjusted_op0
, temp
, compute_mode
);
2354 temp
= expand_shift (RSHIFT_EXPR
, compute_mode
, temp
,
2355 build_int_2 (log
- 1, 0), NULL_RTX
, 0);
2356 temp
= expand_shift (RSHIFT_EXPR
, compute_mode
, temp
,
2357 build_int_2 (size
- log
, 0),
2359 expand_inc (adjusted_op0
, temp
);
2363 rtx label
= gen_label_rtx ();
2364 emit_cmp_insn (adjusted_op0
, const0_rtx
, GE
,
2365 NULL_RTX
, compute_mode
, 0, 0);
2366 emit_jump_insn (gen_bge (label
));
2367 expand_inc (adjusted_op0
, plus_constant (op1
, -1));
2370 mod_insn_no_good
= 1;
2374 case FLOOR_DIV_EXPR
:
2375 case FLOOR_MOD_EXPR
:
2376 if (log
< 0 && ! unsignedp
)
2378 rtx label
= gen_label_rtx ();
2379 if (! can_clobber_op0
)
2381 adjusted_op0
= copy_to_suggested_reg (adjusted_op0
, target
,
2383 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2384 which will screw up mem refs for autoincrements. */
2385 op0
= force_reg (compute_mode
, op0
);
2387 emit_cmp_insn (adjusted_op0
, const0_rtx
, GE
,
2388 NULL_RTX
, compute_mode
, 0, 0);
2389 emit_jump_insn (gen_bge (label
));
2390 expand_dec (adjusted_op0
, op1
);
2391 expand_inc (adjusted_op0
, const1_rtx
);
2393 mod_insn_no_good
= 1;
2399 if (! can_clobber_op0
)
2401 adjusted_op0
= copy_to_suggested_reg (adjusted_op0
, target
,
2403 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2404 which will screw up mem refs for autoincrements. */
2405 op0
= force_reg (compute_mode
, op0
);
2412 label
= gen_label_rtx ();
2413 emit_cmp_insn (adjusted_op0
, const0_rtx
, LE
,
2414 NULL_RTX
, compute_mode
, 0, 0);
2415 emit_jump_insn (gen_ble (label
));
2417 expand_inc (adjusted_op0
, op1
);
2418 expand_dec (adjusted_op0
, const1_rtx
);
2424 adjusted_op0
= expand_binop (compute_mode
, add_optab
,
2425 adjusted_op0
, plus_constant (op1
, -1),
2426 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
2428 mod_insn_no_good
= 1;
2431 case ROUND_DIV_EXPR
:
2432 case ROUND_MOD_EXPR
:
2433 if (! can_clobber_op0
)
2435 adjusted_op0
= copy_to_suggested_reg (adjusted_op0
, target
,
2437 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2438 which will screw up mem refs for autoincrements. */
2439 op0
= force_reg (compute_mode
, op0
);
2443 op1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op1
,
2444 integer_one_node
, NULL_RTX
, 0);
2447 if (BRANCH_COST
>= 2)
2449 /* Negate OP1 if OP0 < 0. Do this by computing a temporary
2450 that has all bits equal to the sign bit and exclusive
2451 or-ing it with OP1. */
2452 rtx temp
= gen_reg_rtx (compute_mode
);
2453 temp
= copy_to_suggested_reg (adjusted_op0
, temp
, compute_mode
);
2454 temp
= expand_shift (RSHIFT_EXPR
, compute_mode
, temp
,
2455 build_int_2 (size
- 1, 0),
2457 op1
= expand_binop (compute_mode
, xor_optab
, op1
, temp
, op1
,
2458 unsignedp
, OPTAB_LIB_WIDEN
);
2462 rtx label
= gen_label_rtx ();
2463 emit_cmp_insn (adjusted_op0
, const0_rtx
, GE
, NULL_RTX
,
2464 compute_mode
, 0, 0);
2465 emit_jump_insn (gen_bge (label
));
2466 expand_unop (compute_mode
, neg_optab
, op1
, op1
, 0);
2470 expand_inc (adjusted_op0
, op1
);
2474 op1
= GEN_INT (((HOST_WIDE_INT
) 1 << log
) / 2);
2475 expand_inc (adjusted_op0
, op1
);
2477 mod_insn_no_good
= 1;
2481 if (rem_flag
&& !mod_insn_no_good
)
2483 /* Try to produce the remainder directly */
2485 result
= expand_binop (compute_mode
, and_optab
, adjusted_op0
,
2486 GEN_INT (((HOST_WIDE_INT
) 1 << log
) - 1),
2487 target
, 1, OPTAB_LIB_WIDEN
);
2490 /* See if we can do remainder without a library call. */
2491 result
= sign_expand_binop (mode
, umod_optab
, smod_optab
,
2492 adjusted_op0
, op1
, target
,
2493 unsignedp
, OPTAB_WIDEN
);
2496 /* No luck there. Can we do remainder and divide at once
2497 without a library call? */
2498 result
= gen_reg_rtx (compute_mode
);
2499 if (! expand_twoval_binop (unsignedp
2500 ? udivmod_optab
: sdivmod_optab
,
2502 NULL_RTX
, result
, unsignedp
))
2509 return gen_lowpart (mode
, result
);
2511 /* Produce the quotient. */
2513 result
= expand_shift (RSHIFT_EXPR
, compute_mode
, adjusted_op0
,
2514 build_int_2 (log
, 0), target
, unsignedp
);
2515 else if (rem_flag
&& !mod_insn_no_good
)
2516 /* If producing quotient in order to subtract for remainder,
2517 and a remainder subroutine would be ok,
2518 don't use a divide subroutine. */
2519 result
= sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
2520 adjusted_op0
, op1
, NULL_RTX
, unsignedp
,
2524 /* Try a quotient insn, but not a library call. */
2525 result
= sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
2527 rem_flag
? NULL_RTX
: target
,
2528 unsignedp
, OPTAB_WIDEN
);
2531 /* No luck there. Try a quotient-and-remainder insn,
2532 keeping the quotient alone. */
2533 result
= gen_reg_rtx (mode
);
2534 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
2536 result
, NULL_RTX
, unsignedp
))
2540 /* If still no luck, use a library call. */
2542 result
= sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
2544 rem_flag
? NULL_RTX
: target
,
2545 unsignedp
, OPTAB_LIB_WIDEN
);
2548 /* If we really want the remainder, get it by subtraction. */
2552 /* No divide instruction either. Use library for remainder. */
2553 result
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
2555 unsignedp
, OPTAB_LIB_WIDEN
);
2558 /* We divided. Now finish doing X - Y * (X / Y). */
2559 result
= expand_mult (compute_mode
, result
, op1
, target
, unsignedp
);
2560 if (! result
) abort ();
2561 result
= expand_binop (compute_mode
, sub_optab
, op0
,
2562 result
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
2569 return gen_lowpart (mode
, result
);
2572 /* Return a tree node with data type TYPE, describing the value of X.
2573 Usually this is an RTL_EXPR, if there is no obvious better choice.
2574 X may be an expression, however we only support those expressions
2575 generated by loop.c. */
2584 switch (GET_CODE (x
))
2587 t
= build_int_2 (INTVAL (x
),
2588 ! TREE_UNSIGNED (type
) && INTVAL (x
) >= 0 ? 0 : -1);
2589 TREE_TYPE (t
) = type
;
2593 if (GET_MODE (x
) == VOIDmode
)
2595 t
= build_int_2 (CONST_DOUBLE_LOW (x
), CONST_DOUBLE_HIGH (x
));
2596 TREE_TYPE (t
) = type
;
2602 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
2603 t
= build_real (type
, d
);
2609 return fold (build (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
2610 make_tree (type
, XEXP (x
, 1))));
2613 return fold (build (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
2614 make_tree (type
, XEXP (x
, 1))));
2617 return fold (build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0))));
2620 return fold (build (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
2621 make_tree (type
, XEXP (x
, 1))));
2624 return fold (build (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
2625 make_tree (type
, XEXP (x
, 1))));
2628 return fold (convert (type
,
2629 build (RSHIFT_EXPR
, unsigned_type (type
),
2630 make_tree (unsigned_type (type
),
2632 make_tree (type
, XEXP (x
, 1)))));
2635 return fold (convert (type
,
2636 build (RSHIFT_EXPR
, signed_type (type
),
2637 make_tree (signed_type (type
), XEXP (x
, 0)),
2638 make_tree (type
, XEXP (x
, 1)))));
2641 if (TREE_CODE (type
) != REAL_TYPE
)
2642 t
= signed_type (type
);
2646 return fold (convert (type
,
2647 build (TRUNC_DIV_EXPR
, t
,
2648 make_tree (t
, XEXP (x
, 0)),
2649 make_tree (t
, XEXP (x
, 1)))));
2651 t
= unsigned_type (type
);
2652 return fold (convert (type
,
2653 build (TRUNC_DIV_EXPR
, t
,
2654 make_tree (t
, XEXP (x
, 0)),
2655 make_tree (t
, XEXP (x
, 1)))));
2657 t
= make_node (RTL_EXPR
);
2658 TREE_TYPE (t
) = type
;
2659 RTL_EXPR_RTL (t
) = x
;
2660 /* There are no insns to be output
2661 when this rtl_expr is used. */
2662 RTL_EXPR_SEQUENCE (t
) = 0;
2667 /* Return an rtx representing the value of X * MULT + ADD.
2668 TARGET is a suggestion for where to store the result (an rtx).
2669 MODE is the machine mode for the computation.
2670 X and MULT must have mode MODE. ADD may have a different mode.
2671 So can X (defaults to same as MODE).
2672 UNSIGNEDP is non-zero to do unsigned multiplication.
2673 This may emit insns. */
2676 expand_mult_add (x
, target
, mult
, add
, mode
, unsignedp
)
2677 rtx x
, target
, mult
, add
;
2678 enum machine_mode mode
;
2681 tree type
= type_for_mode (mode
, unsignedp
);
2682 tree add_type
= (GET_MODE (add
) == VOIDmode
2683 ? type
: type_for_mode (GET_MODE (add
), unsignedp
));
2684 tree result
= fold (build (PLUS_EXPR
, type
,
2685 fold (build (MULT_EXPR
, type
,
2686 make_tree (type
, x
),
2687 make_tree (type
, mult
))),
2688 make_tree (add_type
, add
)));
2690 return expand_expr (result
, target
, VOIDmode
, 0);
2693 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
2694 and returning TARGET.
2696 If TARGET is 0, a pseudo-register or constant is returned. */
2699 expand_and (op0
, op1
, target
)
2700 rtx op0
, op1
, target
;
2702 enum machine_mode mode
= VOIDmode
;
2705 if (GET_MODE (op0
) != VOIDmode
)
2706 mode
= GET_MODE (op0
);
2707 else if (GET_MODE (op1
) != VOIDmode
)
2708 mode
= GET_MODE (op1
);
2710 if (mode
!= VOIDmode
)
2711 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
2712 else if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
)
2713 tem
= GEN_INT (INTVAL (op0
) & INTVAL (op1
));
2719 else if (tem
!= target
)
2720 emit_move_insn (target
, tem
);
2724 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
2725 and storing in TARGET. Normally return TARGET.
2726 Return 0 if that cannot be done.
2728 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
2729 it is VOIDmode, they cannot both be CONST_INT.
2731 UNSIGNEDP is for the case where we have to widen the operands
2732 to perform the operation. It says to use zero-extension.
2734 NORMALIZEP is 1 if we should convert the result to be either zero
2735 or one one. Normalize is -1 if we should convert the result to be
2736 either zero or -1. If NORMALIZEP is zero, the result will be left
2737 "raw" out of the scc insn. */
2740 emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
)
2744 enum machine_mode mode
;
2749 enum insn_code icode
;
2750 enum machine_mode compare_mode
;
2751 enum machine_mode target_mode
= GET_MODE (target
);
2754 rtx pattern
, comparison
;
2756 if (mode
== VOIDmode
)
2757 mode
= GET_MODE (op0
);
2759 /* If one operand is constant, make it the second one. Only do this
2760 if the other operand is not constant as well. */
2762 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
2763 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
2768 code
= swap_condition (code
);
2771 /* For some comparisons with 1 and -1, we can convert this to
2772 comparisons with zero. This will often produce more opportunities for
2773 store-flag insns. */
2778 if (op1
== const1_rtx
)
2779 op1
= const0_rtx
, code
= LE
;
2782 if (op1
== constm1_rtx
)
2783 op1
= const0_rtx
, code
= LT
;
2786 if (op1
== const1_rtx
)
2787 op1
= const0_rtx
, code
= GT
;
2790 if (op1
== constm1_rtx
)
2791 op1
= const0_rtx
, code
= GE
;
2794 if (op1
== const1_rtx
)
2795 op1
= const0_rtx
, code
= NE
;
2798 if (op1
== const1_rtx
)
2799 op1
= const0_rtx
, code
= EQ
;
2803 /* From now on, we won't change CODE, so set ICODE now. */
2804 icode
= setcc_gen_code
[(int) code
];
2806 /* If this is A < 0 or A >= 0, we can do this by taking the ones
2807 complement of A (for GE) and shifting the sign bit to the low bit. */
2808 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
2809 && GET_MODE_CLASS (mode
) == MODE_INT
2810 && (normalizep
|| STORE_FLAG_VALUE
== 1
2811 || (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2812 && (STORE_FLAG_VALUE
2813 == (HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))))
2817 /* If the result is to be wider than OP0, it is best to convert it
2818 first. If it is to be narrower, it is *incorrect* to convert it
2820 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
2822 op0
= protect_from_queue (op0
, 0);
2823 op0
= convert_to_mode (target_mode
, op0
, 0);
2827 if (target_mode
!= mode
)
2831 op0
= expand_unop (mode
, one_cmpl_optab
, op0
, subtarget
, 0);
2833 if (normalizep
|| STORE_FLAG_VALUE
== 1)
2834 /* If we are supposed to produce a 0/1 value, we want to do
2835 a logical shift from the sign bit to the low-order bit; for
2836 a -1/0 value, we do an arithmetic shift. */
2837 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2838 size_int (GET_MODE_BITSIZE (mode
) - 1),
2839 subtarget
, normalizep
!= -1);
2841 if (mode
!= target_mode
)
2842 op0
= convert_to_mode (target_mode
, op0
, 0);
2847 if (icode
!= CODE_FOR_nothing
)
2849 /* We think we may be able to do this with a scc insn. Emit the
2850 comparison and then the scc insn.
2852 compare_from_rtx may call emit_queue, which would be deleted below
2853 if the scc insn fails. So call it ourselves before setting LAST. */
2856 last
= get_last_insn ();
2859 = compare_from_rtx (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
, 0);
2860 if (GET_CODE (comparison
) == CONST_INT
)
2861 return (comparison
== const0_rtx
? const0_rtx
2862 : normalizep
== 1 ? const1_rtx
2863 : normalizep
== -1 ? constm1_rtx
2866 /* If the code of COMPARISON doesn't match CODE, something is
2867 wrong; we can no longer be sure that we have the operation.
2868 We could handle this case, but it should not happen. */
2870 if (GET_CODE (comparison
) != code
)
2873 /* Get a reference to the target in the proper mode for this insn. */
2874 compare_mode
= insn_operand_mode
[(int) icode
][0];
2876 if (preserve_subexpressions_p ()
2877 || ! (*insn_operand_predicate
[(int) icode
][0]) (subtarget
, compare_mode
))
2878 subtarget
= gen_reg_rtx (compare_mode
);
2880 pattern
= GEN_FCN (icode
) (subtarget
);
2883 emit_insn (pattern
);
2885 /* If we are converting to a wider mode, first convert to
2886 TARGET_MODE, then normalize. This produces better combining
2887 opportunities on machines that have a SIGN_EXTRACT when we are
2888 testing a single bit. This mostly benefits the 68k.
2890 If STORE_FLAG_VALUE does not have the sign bit set when
2891 interpreted in COMPARE_MODE, we can do this conversion as
2892 unsigned, which is usually more efficient. */
2893 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (compare_mode
))
2895 convert_move (target
, subtarget
,
2896 (GET_MODE_BITSIZE (compare_mode
)
2897 <= HOST_BITS_PER_WIDE_INT
)
2898 && 0 == (STORE_FLAG_VALUE
2899 & ((HOST_WIDE_INT
) 1
2900 << (GET_MODE_BITSIZE (compare_mode
) -1))));
2902 compare_mode
= target_mode
;
2907 /* If we want to keep subexpressions around, don't reuse our
2910 if (preserve_subexpressions_p ())
2913 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
2914 we don't have to do anything. */
2915 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
2917 else if (normalizep
== - STORE_FLAG_VALUE
)
2918 op0
= expand_unop (compare_mode
, neg_optab
, op0
, subtarget
, 0);
2920 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
2921 makes it hard to use a value of just the sign bit due to
2922 ANSI integer constant typing rules. */
2923 else if (GET_MODE_BITSIZE (compare_mode
) <= HOST_BITS_PER_WIDE_INT
2924 && (STORE_FLAG_VALUE
2925 & ((HOST_WIDE_INT
) 1
2926 << (GET_MODE_BITSIZE (compare_mode
) - 1))))
2927 op0
= expand_shift (RSHIFT_EXPR
, compare_mode
, op0
,
2928 size_int (GET_MODE_BITSIZE (compare_mode
) - 1),
2929 subtarget
, normalizep
== 1);
2930 else if (STORE_FLAG_VALUE
& 1)
2932 op0
= expand_and (op0
, const1_rtx
, subtarget
);
2933 if (normalizep
== -1)
2934 op0
= expand_unop (compare_mode
, neg_optab
, op0
, op0
, 0);
2939 /* If we were converting to a smaller mode, do the
2941 if (target_mode
!= compare_mode
)
2943 convert_move (target
, op0
);
2952 delete_insns_since (last
);
2954 subtarget
= target_mode
== mode
? target
: 0;
2956 /* If we reached here, we can't do this with a scc insn. However, there
2957 are some comparisons that can be done directly. For example, if
2958 this is an equality comparison of integers, we can try to exclusive-or
2959 (or subtract) the two operands and use a recursive call to try the
2960 comparison with zero. Don't do any of these cases if branches are
2963 if (BRANCH_COST
>= 0
2964 && GET_MODE_CLASS (mode
) == MODE_INT
&& (code
== EQ
|| code
== NE
)
2965 && op1
!= const0_rtx
)
2967 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
2971 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
2974 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
2975 mode
, unsignedp
, normalizep
);
2977 delete_insns_since (last
);
2981 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
2982 the constant zero. Reject all other comparisons at this point. Only
2983 do LE and GT if branches are expensive since they are expensive on
2984 2-operand machines. */
2986 if (BRANCH_COST
== 0
2987 || GET_MODE_CLASS (mode
) != MODE_INT
|| op1
!= const0_rtx
2988 || (code
!= EQ
&& code
!= NE
2989 && (BRANCH_COST
<= 1 || (code
!= LE
&& code
!= GT
))))
2992 /* See what we need to return. We can only return a 1, -1, or the
2995 if (normalizep
== 0)
2997 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
2998 normalizep
= STORE_FLAG_VALUE
;
3000 else if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
3001 && (STORE_FLAG_VALUE
3002 == (HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))
3008 /* Try to put the result of the comparison in the sign bit. Assume we can't
3009 do the necessary operation below. */
3013 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
3014 the sign bit set. */
3018 /* This is destructive, so SUBTARGET can't be OP0. */
3019 if (rtx_equal_p (subtarget
, op0
))
3022 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
3025 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
3029 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
3030 number of bits in the mode of OP0, minus one. */
3034 if (rtx_equal_p (subtarget
, op0
))
3037 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3038 size_int (GET_MODE_BITSIZE (mode
) - 1),
3040 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
3044 if (code
== EQ
|| code
== NE
)
3046 /* For EQ or NE, one way to do the comparison is to apply an operation
3047 that converts the operand into a positive number if it is non-zero
3048 or zero if it was originally zero. Then, for EQ, we subtract 1 and
3049 for NE we negate. This puts the result in the sign bit. Then we
3050 normalize with a shift, if needed.
3052 Two operations that can do the above actions are ABS and FFS, so try
3053 them. If that doesn't work, and MODE is smaller than a full word,
3054 we can use zero-extension to the wider mode (an unsigned conversion)
3055 as the operation. */
3057 if (abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3058 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
3059 else if (ffs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3060 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
3061 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
3064 op0
= protect_from_queue (op0
, 0);
3065 tem
= convert_to_mode (mode
, op0
, 1);
3071 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
3074 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
3077 /* If we couldn't do it that way, for NE we can "or" the two's complement
3078 of the value with itself. For EQ, we take the one's complement of
3079 that "or", which is an extra insn, so we only handle EQ if branches
3082 if (tem
== 0 && (code
== NE
|| BRANCH_COST
> 1))
3084 if (rtx_equal_p (subtarget
, op0
))
3087 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
3088 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
3091 if (tem
&& code
== EQ
)
3092 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
3096 if (tem
&& normalizep
)
3097 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
3098 size_int (GET_MODE_BITSIZE (mode
) - 1),
3099 tem
, normalizep
== 1);
3101 if (tem
&& GET_MODE (tem
) != target_mode
)
3103 convert_move (target
, tem
, 0);
3108 delete_insns_since (last
);
3112 emit_jump_insn ((*bcc_gen_fctn
[(int) code
]) (label
));
3113 emit_move_insn (target
, const1_rtx
);