]> gcc.gnu.org Git - gcc.git/blame - gcc/expmed.c
* cfgloop.c (flow_loops_cfg_dump): Use bb->index, not i.
[gcc.git] / gcc / expmed.c
CommitLineData
44037a66
TG
1/* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
ef58a523 3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
c4f2c499 4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
44037a66 5
1322177d 6This file is part of GCC.
44037a66 7
1322177d
LB
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 2, or (at your option) any later
11version.
44037a66 12
1322177d
LB
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
44037a66
TG
17
18You should have received a copy of the GNU General Public License
1322177d
LB
19along with GCC; see the file COPYING. If not, write to the Free
20Software Foundation, 59 Temple Place - Suite 330, Boston, MA
2102111-1307, USA. */
44037a66
TG
22
23
24#include "config.h"
670ee920 25#include "system.h"
01198c2f 26#include "toplev.h"
44037a66
TG
27#include "rtl.h"
28#include "tree.h"
6baf1cc8 29#include "tm_p.h"
44037a66 30#include "flags.h"
44037a66
TG
31#include "insn-config.h"
32#include "expr.h"
e78d8e51 33#include "optabs.h"
44037a66
TG
34#include "real.h"
35#include "recog.h"
b0c48229 36#include "langhooks.h"
44037a66 37
770ae6cc
RK
38static void store_fixed_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
39 unsigned HOST_WIDE_INT,
04050c69 40 unsigned HOST_WIDE_INT, rtx));
770ae6cc 41static void store_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
04050c69 42 unsigned HOST_WIDE_INT, rtx));
770ae6cc
RK
43static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx,
44 unsigned HOST_WIDE_INT,
45 unsigned HOST_WIDE_INT,
46 unsigned HOST_WIDE_INT,
04050c69 47 rtx, int));
711d877c
KG
48static rtx mask_rtx PARAMS ((enum machine_mode, int,
49 int, int));
50static rtx lshift_value PARAMS ((enum machine_mode, rtx,
51 int, int));
770ae6cc 52static rtx extract_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
04050c69 53 unsigned HOST_WIDE_INT, int));
711d877c
KG
54static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
55 enum machine_mode, rtx));
44037a66 56
44037a66 57/* Non-zero means divides or modulus operations are relatively cheap for
c410d49e 58 powers of two, so don't use branches; emit the operation instead.
44037a66
TG
59 Usually, this will mean that the MD file will emit non-branch
60 sequences. */
61
62static int sdiv_pow2_cheap, smod_pow2_cheap;
63
c7e33f89 64#ifndef SLOW_UNALIGNED_ACCESS
e1565e65 65#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
c7e33f89
RS
66#endif
67
e49a094d
RS
68/* For compilers that support multiple targets with different word sizes,
69 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
70 is the H8/300(H) compiler. */
71
72#ifndef MAX_BITS_PER_WORD
73#define MAX_BITS_PER_WORD BITS_PER_WORD
74#endif
75
d523b40e
RH
76/* Reduce conditional compilation elsewhere. */
77#ifndef HAVE_insv
a242b083 78#define HAVE_insv 0
d523b40e
RH
79#define CODE_FOR_insv CODE_FOR_nothing
80#define gen_insv(a,b,c,d) NULL_RTX
81#endif
82#ifndef HAVE_extv
a242b083 83#define HAVE_extv 0
d523b40e
RH
84#define CODE_FOR_extv CODE_FOR_nothing
85#define gen_extv(a,b,c,d) NULL_RTX
86#endif
87#ifndef HAVE_extzv
a242b083 88#define HAVE_extzv 0
d523b40e
RH
89#define CODE_FOR_extzv CODE_FOR_nothing
90#define gen_extzv(a,b,c,d) NULL_RTX
91#endif
92
c5c76735
JL
93/* Cost of various pieces of RTL. Note that some of these are indexed by
94 shift count and some by mode. */
819126a6 95static int add_cost, negate_cost, zero_cost;
e49a094d
RS
96static int shift_cost[MAX_BITS_PER_WORD];
97static int shiftadd_cost[MAX_BITS_PER_WORD];
98static int shiftsub_cost[MAX_BITS_PER_WORD];
71af73bb
TG
99static int mul_cost[NUM_MACHINE_MODES];
100static int div_cost[NUM_MACHINE_MODES];
101static int mul_widen_cost[NUM_MACHINE_MODES];
102static int mul_highpart_cost[NUM_MACHINE_MODES];
44037a66 103
44037a66
TG
104void
105init_expmed ()
106{
44037a66
TG
107 /* This is "some random pseudo register" for purposes of calling recog
108 to see what insns exist. */
38a448ca 109 rtx reg = gen_rtx_REG (word_mode, 10000);
b385aeda 110 rtx shift_insn, shiftadd_insn, shiftsub_insn;
b1ec3c92 111 int dummy;
7963ac37 112 int m;
71af73bb 113 enum machine_mode mode, wider_mode;
44037a66 114
b385aeda
RK
115 start_sequence ();
116
c5c76735 117 reg = gen_rtx_REG (word_mode, 10000);
9ec36da5 118
172a1cb0 119 zero_cost = rtx_cost (const0_rtx, 0);
38a448ca
RH
120 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
121
122 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
123 gen_rtx_ASHIFT (word_mode, reg,
124 const0_rtx)));
125
126 shiftadd_insn
127 = emit_insn (gen_rtx_SET (VOIDmode, reg,
128 gen_rtx_PLUS (word_mode,
129 gen_rtx_MULT (word_mode,
130 reg, const0_rtx),
131 reg)));
132
133 shiftsub_insn
134 = emit_insn (gen_rtx_SET (VOIDmode, reg,
135 gen_rtx_MINUS (word_mode,
136 gen_rtx_MULT (word_mode,
137 reg, const0_rtx),
138 reg)));
7963ac37
RK
139
140 init_recog ();
b385aeda
RK
141
142 shift_cost[0] = 0;
143 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
144
55a6ba9f 145 for (m = 1; m < MAX_BITS_PER_WORD; m++)
7963ac37 146 {
b7c89afe 147 rtx c_int = GEN_INT ((HOST_WIDE_INT) 1 << m);
7963ac37 148 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
b385aeda
RK
149
150 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
151 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
152 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
153
b7c89afe 154 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1) = c_int;
b385aeda 155 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
dac57de0 156 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
b385aeda 157
b7c89afe 158 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1) = c_int;
b385aeda 159 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
dac57de0 160 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
7963ac37
RK
161 }
162
38a448ca 163 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
44037a66 164
44037a66 165 sdiv_pow2_cheap
38a448ca 166 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
b385aeda 167 <= 2 * add_cost);
44037a66 168 smod_pow2_cheap
38a448ca 169 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
b385aeda 170 <= 2 * add_cost);
44037a66 171
71af73bb
TG
172 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
173 mode != VOIDmode;
174 mode = GET_MODE_WIDER_MODE (mode))
175 {
38a448ca
RH
176 reg = gen_rtx_REG (mode, 10000);
177 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
178 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
71af73bb
TG
179 wider_mode = GET_MODE_WIDER_MODE (mode);
180 if (wider_mode != VOIDmode)
181 {
182 mul_widen_cost[(int) wider_mode]
38a448ca
RH
183 = rtx_cost (gen_rtx_MULT (wider_mode,
184 gen_rtx_ZERO_EXTEND (wider_mode, reg),
185 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
71af73bb
TG
186 SET);
187 mul_highpart_cost[(int) mode]
38a448ca
RH
188 = rtx_cost (gen_rtx_TRUNCATE
189 (mode,
c5c76735
JL
190 gen_rtx_LSHIFTRT (wider_mode,
191 gen_rtx_MULT (wider_mode,
192 gen_rtx_ZERO_EXTEND
193 (wider_mode, reg),
194 gen_rtx_ZERO_EXTEND
195 (wider_mode, reg)),
196 GEN_INT (GET_MODE_BITSIZE (mode)))),
71af73bb
TG
197 SET);
198 }
199 }
200
b385aeda 201 end_sequence ();
44037a66
TG
202}
203
204/* Return an rtx representing minus the value of X.
205 MODE is the intended mode of the result,
206 useful if X is a CONST_INT. */
207
208rtx
209negate_rtx (mode, x)
210 enum machine_mode mode;
211 rtx x;
212{
a39a7484
RK
213 rtx result = simplify_unary_operation (NEG, mode, x, mode);
214
fdb5537f 215 if (result == 0)
a39a7484
RK
216 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
217
218 return result;
44037a66 219}
da920570
ZW
220
221/* Report on the availability of insv/extv/extzv and the desired mode
222 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
223 is false; else the mode of the specified operand. If OPNO is -1,
224 all the caller cares about is whether the insn is available. */
225enum machine_mode
226mode_for_extraction (pattern, opno)
227 enum extraction_pattern pattern;
228 int opno;
229{
230 const struct insn_data *data;
231
232 switch (pattern)
233 {
234 case EP_insv:
da920570
ZW
235 if (HAVE_insv)
236 {
237 data = &insn_data[CODE_FOR_insv];
238 break;
239 }
da920570
ZW
240 return MAX_MACHINE_MODE;
241
242 case EP_extv:
da920570
ZW
243 if (HAVE_extv)
244 {
245 data = &insn_data[CODE_FOR_extv];
246 break;
247 }
da920570
ZW
248 return MAX_MACHINE_MODE;
249
250 case EP_extzv:
da920570
ZW
251 if (HAVE_extzv)
252 {
253 data = &insn_data[CODE_FOR_extzv];
254 break;
255 }
da920570 256 return MAX_MACHINE_MODE;
a242b083
ZW
257
258 default:
259 abort ();
da920570
ZW
260 }
261
262 if (opno == -1)
263 return VOIDmode;
264
265 /* Everyone who uses this function used to follow it with
266 if (result == VOIDmode) result = word_mode; */
267 if (data->operand[opno].mode == VOIDmode)
268 return word_mode;
269 return data->operand[opno].mode;
270}
271
44037a66
TG
272\f
273/* Generate code to store value from rtx VALUE
274 into a bit-field within structure STR_RTX
275 containing BITSIZE bits starting at bit BITNUM.
276 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
0e805f81 277 ALIGN is the alignment that STR_RTX is known to have.
44037a66
TG
278 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
279
280/* ??? Note that there are two different ideas here for how
281 to determine the size to count bits within, for a register.
282 One is BITS_PER_WORD, and the other is the size of operand 3
0d8e55d8
JL
283 of the insv pattern.
284
285 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
286 else, we use the mode of operand 3. */
44037a66
TG
287
288rtx
04050c69 289store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, total_size)
44037a66 290 rtx str_rtx;
770ae6cc
RK
291 unsigned HOST_WIDE_INT bitsize;
292 unsigned HOST_WIDE_INT bitnum;
44037a66
TG
293 enum machine_mode fieldmode;
294 rtx value;
770ae6cc 295 HOST_WIDE_INT total_size;
44037a66 296{
770ae6cc
RK
297 unsigned int unit
298 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
299 unsigned HOST_WIDE_INT offset = bitnum / unit;
300 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
b3694847 301 rtx op0 = str_rtx;
420e7dfa 302 int byte_offset;
da920570 303
a242b083 304 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
44037a66 305
44037a66
TG
306 /* Discount the part of the structure before the desired byte.
307 We need to know how many bytes are safe to reference after it. */
308 if (total_size >= 0)
309 total_size -= (bitpos / BIGGEST_ALIGNMENT
310 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
311
312 while (GET_CODE (op0) == SUBREG)
313 {
314 /* The following line once was done only if WORDS_BIG_ENDIAN,
315 but I think that is a mistake. WORDS_BIG_ENDIAN is
316 meaningful at a much higher level; when structures are copied
317 between memory and regs, the higher-numbered regs
318 always get higher addresses. */
ddef6bc7 319 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
44037a66
TG
320 /* We used to adjust BITPOS here, but now we do the whole adjustment
321 right after the loop. */
322 op0 = SUBREG_REG (op0);
323 }
324
44037a66
TG
325 value = protect_from_queue (value, 0);
326
327 if (flag_force_mem)
32b069d3
RH
328 {
329 int old_generating_concat_p = generating_concat_p;
330 generating_concat_p = 0;
331 value = force_not_mem (value);
332 generating_concat_p = old_generating_concat_p;
333 }
44037a66 334
308ecea0
RH
335 /* If the target is a register, overwriting the entire object, or storing
336 a full-word or multi-word field can be done with just a SUBREG.
337
338 If the target is memory, storing any naturally aligned field can be
339 done with a simple store. For targets that support fast unaligned
340 memory, any naturally sized, unit aligned field can be done directly. */
c410d49e 341
420e7dfa
DN
342 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
343 + (offset * UNITS_PER_WORD);
344
57bfa49a 345 if (bitpos == 0
f21bdd05 346 && bitsize == GET_MODE_BITSIZE (fieldmode)
ea9ea008 347 && (GET_CODE (op0) != MEM
420e7dfa 348 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
ea9ea008 349 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
420e7dfa 350 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
04050c69 351 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
ea9ea008 352 || (offset * BITS_PER_UNIT % bitsize == 0
04050c69 353 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
44037a66 354 {
44037a66 355 if (GET_MODE (op0) != fieldmode)
c7e33f89 356 {
d006aa54
RH
357 if (GET_CODE (op0) == SUBREG)
358 {
359 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
360 || GET_MODE_CLASS (fieldmode) == MODE_INT
361 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
362 op0 = SUBREG_REG (op0);
363 else
364 /* Else we've got some float mode source being extracted into
365 a different float mode destination -- this combination of
366 subregs results in Severe Tire Damage. */
367 abort ();
368 }
c7e33f89 369 if (GET_CODE (op0) == REG)
420e7dfa 370 op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
c7e33f89 371 else
f4ef873c 372 op0 = adjust_address (op0, fieldmode, offset);
c7e33f89 373 }
44037a66
TG
374 emit_move_insn (op0, value);
375 return value;
376 }
377
a8ca7756
JW
378 /* Make sure we are playing with integral modes. Pun with subregs
379 if we aren't. This must come after the entire register case above,
380 since that case is valid for any mode. The following cases are only
381 valid for integral modes. */
382 {
383 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
384 if (imode != GET_MODE (op0))
385 {
386 if (GET_CODE (op0) == MEM)
f4ef873c 387 op0 = adjust_address (op0, imode, 0);
a8ca7756
JW
388 else if (imode != BLKmode)
389 op0 = gen_lowpart (imode, op0);
390 else
391 abort ();
392 }
393 }
394
4e9bb42b
AH
395 /* We may be accessing data outside the field, which means
396 we can alias adjacent data. */
397 if (GET_CODE (op0) == MEM)
398 {
399 op0 = shallow_copy_rtx (op0);
400 set_mem_alias_set (op0, 0);
401 set_mem_expr (op0, 0);
402 }
403
57bfa49a
RZ
404 /* If OP0 is a register, BITPOS must count within a word.
405 But as we have it, it counts within whatever size OP0 now has.
406 On a bigendian machine, these are not the same, so convert. */
407 if (BYTES_BIG_ENDIAN
408 && GET_CODE (op0) != MEM
409 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
410 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
411
44037a66
TG
412 /* Storing an lsb-aligned field in a register
413 can be done with a movestrict instruction. */
414
415 if (GET_CODE (op0) != MEM
f76b9db2 416 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
44037a66 417 && bitsize == GET_MODE_BITSIZE (fieldmode)
5e4900c7
JW
418 && (movstrict_optab->handlers[(int) fieldmode].insn_code
419 != CODE_FOR_nothing))
44037a66 420 {
5e4900c7
JW
421 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
422
44037a66
TG
423 /* Get appropriate low part of the value being stored. */
424 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
425 value = gen_lowpart (fieldmode, value);
426 else if (!(GET_CODE (value) == SYMBOL_REF
427 || GET_CODE (value) == LABEL_REF
428 || GET_CODE (value) == CONST))
429 value = convert_to_mode (fieldmode, value, 0);
430
5e4900c7
JW
431 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
432 value = copy_to_mode_reg (fieldmode, value);
433
434 if (GET_CODE (op0) == SUBREG)
44037a66 435 {
5e4900c7
JW
436 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
437 || GET_MODE_CLASS (fieldmode) == MODE_INT
438 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
439 op0 = SUBREG_REG (op0);
440 else
441 /* Else we've got some float mode source being extracted into
442 a different float mode destination -- this combination of
443 subregs results in Severe Tire Damage. */
444 abort ();
445 }
470032d7 446
5e4900c7 447 emit_insn (GEN_FCN (icode)
ddef6bc7
JJ
448 (gen_rtx_SUBREG (fieldmode, op0,
449 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
450 + (offset * UNITS_PER_WORD)),
451 value));
470032d7 452
44037a66
TG
453 return value;
454 }
455
456 /* Handle fields bigger than a word. */
457
458 if (bitsize > BITS_PER_WORD)
459 {
460 /* Here we transfer the words of the field
461 in the order least significant first.
462 This is because the most significant word is the one which may
ad83e87b
PB
463 be less than full.
464 However, only do that if the value is not BLKmode. */
465
770ae6cc
RK
466 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
467 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
468 unsigned int i;
44037a66
TG
469
470 /* This is the mode we must force value to, so that there will be enough
471 subwords to extract. Note that fieldmode will often (always?) be
472 VOIDmode, because that is what store_field uses to indicate that this
473 is a bit field, but passing VOIDmode to operand_subword_force will
474 result in an abort. */
df7978d9 475 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
44037a66
TG
476
477 for (i = 0; i < nwords; i++)
478 {
ad83e87b
PB
479 /* If I is 0, use the low-order word in both field and target;
480 if I is 1, use the next to lowest word; and so on. */
770ae6cc
RK
481 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
482 unsigned int bit_offset = (backwards
04050c69
RK
483 ? MAX ((int) bitsize - ((int) i + 1)
484 * BITS_PER_WORD,
485 0)
486 : (int) i * BITS_PER_WORD);
770ae6cc 487
44037a66
TG
488 store_bit_field (op0, MIN (BITS_PER_WORD,
489 bitsize - i * BITS_PER_WORD),
490 bitnum + bit_offset, word_mode,
b3487765
RS
491 operand_subword_force (value, wordnum,
492 (GET_MODE (value) == VOIDmode
493 ? fieldmode
494 : GET_MODE (value))),
04050c69 495 total_size);
44037a66
TG
496 }
497 return value;
498 }
499
500 /* From here on we can assume that the field to be stored in is
501 a full-word (whatever type that is), since it is shorter than a word. */
502
503 /* OFFSET is the number of words or bytes (UNIT says which)
504 from STR_RTX to the first word or byte containing part of the field. */
505
470032d7 506 if (GET_CODE (op0) != MEM)
44037a66
TG
507 {
508 if (offset != 0
509 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
470032d7
RH
510 {
511 if (GET_CODE (op0) != REG)
7be7a07d
JW
512 {
513 /* Since this is a destination (lvalue), we can't copy it to a
514 pseudo. We can trivially remove a SUBREG that does not
515 change the size of the operand. Such a SUBREG may have been
516 added above. Otherwise, abort. */
517 if (GET_CODE (op0) == SUBREG
518 && (GET_MODE_SIZE (GET_MODE (op0))
519 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
520 op0 = SUBREG_REG (op0);
521 else
522 abort ();
523 }
470032d7 524 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
ddef6bc7 525 op0, (offset * UNITS_PER_WORD));
470032d7 526 }
44037a66
TG
527 offset = 0;
528 }
529 else
04050c69 530 op0 = protect_from_queue (op0, 1);
44037a66 531
2305bcad
JW
532 /* If VALUE is a floating-point mode, access it as an integer of the
533 corresponding size. This can occur on a machine with 64 bit registers
534 that uses SFmode for float. This can also occur for unaligned float
535 structure fields. */
32b069d3
RH
536 if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
537 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
538 value = gen_lowpart (word_mode, value);
2305bcad 539
44037a66
TG
540 /* Now OFFSET is nonzero only if OP0 is memory
541 and is therefore always measured in bytes. */
542
a242b083 543 if (HAVE_insv
1d269b0c 544 && GET_MODE (value) != BLKmode
44037a66
TG
545 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
546 /* Ensure insv's size is wide enough for this field. */
a242b083 547 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
39e0911f 548 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
a242b083 549 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
44037a66
TG
550 {
551 int xbitpos = bitpos;
552 rtx value1;
553 rtx xop0 = op0;
554 rtx last = get_last_insn ();
555 rtx pat;
d523b40e 556 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
44037a66 557 int save_volatile_ok = volatile_ok;
0d8e55d8 558
44037a66
TG
559 volatile_ok = 1;
560
4dbebeb2
JW
561 /* If this machine's insv can only insert into a register, copy OP0
562 into a register and save it back later. */
563 /* This used to check flag_force_mem, but that was a serious
564 de-optimization now that flag_force_mem is enabled by -O2. */
44037a66 565 if (GET_CODE (op0) == MEM
a995e389 566 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
4dbebeb2 567 (op0, VOIDmode)))
44037a66
TG
568 {
569 rtx tempreg;
570 enum machine_mode bestmode;
571
572 /* Get the mode to use for inserting into this field. If OP0 is
573 BLKmode, get the smallest mode consistent with the alignment. If
574 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
575 mode. Otherwise, use the smallest mode containing the field. */
576
577 if (GET_MODE (op0) == BLKmode
578 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
579 bestmode
04050c69 580 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
717702e6 581 MEM_VOLATILE_P (op0));
44037a66
TG
582 else
583 bestmode = GET_MODE (op0);
584
bd5d175a 585 if (bestmode == VOIDmode
04050c69
RK
586 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
587 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
44037a66
TG
588 goto insv_loses;
589
c410d49e 590 /* Adjust address to point to the containing unit of that mode.
04050c69 591 Compute offset as multiple of this unit, counting in bytes. */
44037a66 592 unit = GET_MODE_BITSIZE (bestmode);
44037a66
TG
593 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
594 bitpos = bitnum % unit;
f4ef873c 595 op0 = adjust_address (op0, bestmode, offset);
44037a66 596
0e805f81
RK
597 /* Fetch that unit, store the bitfield in it, then store
598 the unit. */
44037a66
TG
599 tempreg = copy_to_reg (op0);
600 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
04050c69 601 total_size);
44037a66
TG
602 emit_move_insn (op0, tempreg);
603 return value;
604 }
605 volatile_ok = save_volatile_ok;
606
607 /* Add OFFSET into OP0's address. */
608 if (GET_CODE (xop0) == MEM)
f4ef873c 609 xop0 = adjust_address (xop0, byte_mode, offset);
44037a66
TG
610
611 /* If xop0 is a register, we need it in MAXMODE
612 to make it acceptable to the format of insv. */
613 if (GET_CODE (xop0) == SUBREG)
bac7cdfd
DE
614 /* We can't just change the mode, because this might clobber op0,
615 and we will need the original value of op0 if insv fails. */
ddef6bc7 616 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
44037a66 617 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
38a448ca 618 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
44037a66
TG
619
620 /* On big-endian machines, we count bits from the most significant.
621 If the bit field insn does not, we must invert. */
622
f76b9db2
ILT
623 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
624 xbitpos = unit - bitsize - xbitpos;
625
44037a66
TG
626 /* We have been counting XBITPOS within UNIT.
627 Count instead within the size of the register. */
f76b9db2 628 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
44037a66 629 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
f76b9db2 630
44037a66
TG
631 unit = GET_MODE_BITSIZE (maxmode);
632
633 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
634 value1 = value;
635 if (GET_MODE (value) != maxmode)
636 {
637 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
638 {
639 /* Optimization: Don't bother really extending VALUE
f5df292e
RS
640 if it has all the bits we will actually use. However,
641 if we must narrow it, be sure we do it correctly. */
44037a66 642
f5df292e 643 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
c410d49e
EC
644 {
645 rtx tmp;
646
647 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
648 if (! tmp)
649 tmp = simplify_gen_subreg (maxmode,
650 force_reg (GET_MODE (value),
651 value1),
652 GET_MODE (value), 0);
653 value1 = tmp;
654 }
f5df292e
RS
655 else
656 value1 = gen_lowpart (maxmode, value1);
44037a66 657 }
69107307 658 else if (GET_CODE (value) == CONST_INT)
2496c7bd 659 value1 = gen_int_mode (INTVAL (value), maxmode);
44037a66
TG
660 else if (!CONSTANT_P (value))
661 /* Parse phase is supposed to make VALUE's data type
662 match that of the component reference, which is a type
663 at least as wide as the field; so VALUE should have
664 a mode that corresponds to that type. */
665 abort ();
666 }
667
668 /* If this machine's insv insists on a register,
669 get VALUE1 into a register. */
a995e389 670 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
44037a66
TG
671 (value1, maxmode)))
672 value1 = force_reg (maxmode, value1);
673
b1ec3c92 674 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
44037a66
TG
675 if (pat)
676 emit_insn (pat);
677 else
678 {
679 delete_insns_since (last);
04050c69 680 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
44037a66
TG
681 }
682 }
683 else
684 insv_loses:
44037a66 685 /* Insv is not available; store using shifts and boolean ops. */
04050c69 686 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
44037a66
TG
687 return value;
688}
689\f
690/* Use shifts and boolean operations to store VALUE
691 into a bit field of width BITSIZE
692 in a memory location specified by OP0 except offset by OFFSET bytes.
693 (OFFSET must be 0 if OP0 is a register.)
694 The field starts at position BITPOS within the byte.
695 (If OP0 is a register, it may be a full word or a narrower mode,
696 but BITPOS still counts within a full word,
697 which is significant on bigendian machines.)
44037a66
TG
698
699 Note that protect_from_queue has already been done on OP0 and VALUE. */
700
701static void
04050c69 702store_fixed_bit_field (op0, offset, bitsize, bitpos, value)
b3694847 703 rtx op0;
770ae6cc 704 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
b3694847 705 rtx value;
44037a66 706{
b3694847 707 enum machine_mode mode;
770ae6cc 708 unsigned int total_bits = BITS_PER_WORD;
44037a66
TG
709 rtx subtarget, temp;
710 int all_zero = 0;
711 int all_one = 0;
712
44037a66
TG
713 /* There is a case not handled here:
714 a structure with a known alignment of just a halfword
715 and a field split across two aligned halfwords within the structure.
716 Or likewise a structure with a known alignment of just a byte
717 and a field split across two bytes.
718 Such cases are not supposed to be able to occur. */
719
720 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
721 {
722 if (offset != 0)
723 abort ();
724 /* Special treatment for a bit field split across two registers. */
725 if (bitsize + bitpos > BITS_PER_WORD)
726 {
04050c69 727 store_split_bit_field (op0, bitsize, bitpos, value);
44037a66
TG
728 return;
729 }
730 }
731 else
732 {
733 /* Get the proper mode to use for this field. We want a mode that
734 includes the entire field. If such a mode would be larger than
c410d49e 735 a word, we won't be doing the extraction the normal way.
053a35af 736 We don't want a mode bigger than the destination. */
44037a66 737
053a35af
AH
738 mode = GET_MODE (op0);
739 if (GET_MODE_BITSIZE (mode) == 0
740 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
741 mode = word_mode;
44037a66 742 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
04050c69 743 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
44037a66
TG
744
745 if (mode == VOIDmode)
746 {
747 /* The only way this should occur is if the field spans word
748 boundaries. */
04050c69
RK
749 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
750 value);
44037a66
TG
751 return;
752 }
753
754 total_bits = GET_MODE_BITSIZE (mode);
755
3bd98790 756 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
38e01259 757 be in the range 0 to total_bits-1, and put any excess bytes in
3bd98790
JW
758 OFFSET. */
759 if (bitpos >= total_bits)
760 {
761 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
762 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
763 * BITS_PER_UNIT);
764 }
765
44037a66
TG
766 /* Get ref to an aligned byte, halfword, or word containing the field.
767 Adjust BITPOS to be position within a word,
768 and OFFSET to be the offset of that word.
769 Then alter OP0 to refer to that word. */
770 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
771 offset -= (offset % (total_bits / BITS_PER_UNIT));
f4ef873c 772 op0 = adjust_address (op0, mode, offset);
44037a66
TG
773 }
774
775 mode = GET_MODE (op0);
776
777 /* Now MODE is either some integral mode for a MEM as OP0,
778 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
779 The bit field is contained entirely within OP0.
780 BITPOS is the starting bit number within OP0.
781 (OP0's mode may actually be narrower than MODE.) */
782
f76b9db2
ILT
783 if (BYTES_BIG_ENDIAN)
784 /* BITPOS is the distance between our msb
785 and that of the containing datum.
786 Convert it to the distance from the lsb. */
787 bitpos = total_bits - bitsize - bitpos;
44037a66 788
44037a66
TG
789 /* Now BITPOS is always the distance between our lsb
790 and that of OP0. */
791
792 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
793 we must first convert its mode to MODE. */
794
795 if (GET_CODE (value) == CONST_INT)
796 {
b3694847 797 HOST_WIDE_INT v = INTVAL (value);
44037a66 798
b1ec3c92
CH
799 if (bitsize < HOST_BITS_PER_WIDE_INT)
800 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
44037a66
TG
801
802 if (v == 0)
803 all_zero = 1;
b1ec3c92
CH
804 else if ((bitsize < HOST_BITS_PER_WIDE_INT
805 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
806 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
44037a66
TG
807 all_one = 1;
808
809 value = lshift_value (mode, value, bitpos, bitsize);
810 }
811 else
812 {
813 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
814 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
815
816 if (GET_MODE (value) != mode)
817 {
44037a66
TG
818 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
819 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
820 value = gen_lowpart (mode, value);
821 else
822 value = convert_to_mode (mode, value, 1);
823 }
824
825 if (must_and)
826 value = expand_binop (mode, and_optab, value,
827 mask_rtx (mode, 0, bitsize, 0),
b1ec3c92 828 NULL_RTX, 1, OPTAB_LIB_WIDEN);
44037a66
TG
829 if (bitpos > 0)
830 value = expand_shift (LSHIFT_EXPR, mode, value,
b1ec3c92 831 build_int_2 (bitpos, 0), NULL_RTX, 1);
44037a66
TG
832 }
833
834 /* Now clear the chosen bits in OP0,
835 except that if VALUE is -1 we need not bother. */
836
837 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
838
839 if (! all_one)
840 {
841 temp = expand_binop (mode, and_optab, op0,
842 mask_rtx (mode, bitpos, bitsize, 1),
843 subtarget, 1, OPTAB_LIB_WIDEN);
844 subtarget = temp;
845 }
846 else
847 temp = op0;
848
849 /* Now logical-or VALUE into OP0, unless it is zero. */
850
851 if (! all_zero)
852 temp = expand_binop (mode, ior_optab, temp, value,
853 subtarget, 1, OPTAB_LIB_WIDEN);
854 if (op0 != temp)
855 emit_move_insn (op0, temp);
856}
857\f
06c94bce 858/* Store a bit field that is split across multiple accessible memory objects.
44037a66 859
06c94bce 860 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
44037a66
TG
861 BITSIZE is the field width; BITPOS the position of its first bit
862 (within the word).
06c94bce 863 VALUE is the value to store.
06c94bce
RS
864
865 This does not yet handle fields wider than BITS_PER_WORD. */
44037a66
TG
866
867static void
04050c69 868store_split_bit_field (op0, bitsize, bitpos, value)
44037a66 869 rtx op0;
770ae6cc 870 unsigned HOST_WIDE_INT bitsize, bitpos;
44037a66 871 rtx value;
44037a66 872{
770ae6cc
RK
873 unsigned int unit;
874 unsigned int bitsdone = 0;
4ee16841 875
0eb61c19
DE
876 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
877 much at a time. */
4ee16841
DE
878 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
879 unit = BITS_PER_WORD;
880 else
04050c69 881 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
e54d80d0 882
3d709ff0
RS
883 /* If VALUE is a constant other than a CONST_INT, get it into a register in
884 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
885 that VALUE might be a floating-point constant. */
44037a66 886 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
3d709ff0
RS
887 {
888 rtx word = gen_lowpart_common (word_mode, value);
889
bc8a0e39 890 if (word && (value != word))
3d709ff0
RS
891 value = word;
892 else
893 value = gen_lowpart_common (word_mode,
d01bc862
DE
894 force_reg (GET_MODE (value) != VOIDmode
895 ? GET_MODE (value)
896 : word_mode, value));
3d709ff0 897 }
956d6950
JL
898 else if (GET_CODE (value) == ADDRESSOF)
899 value = copy_to_reg (value);
44037a66 900
06c94bce 901 while (bitsdone < bitsize)
44037a66 902 {
770ae6cc 903 unsigned HOST_WIDE_INT thissize;
06c94bce 904 rtx part, word;
770ae6cc
RK
905 unsigned HOST_WIDE_INT thispos;
906 unsigned HOST_WIDE_INT offset;
44037a66 907
06c94bce
RS
908 offset = (bitpos + bitsdone) / unit;
909 thispos = (bitpos + bitsdone) % unit;
44037a66 910
0eb61c19
DE
911 /* THISSIZE must not overrun a word boundary. Otherwise,
912 store_fixed_bit_field will call us again, and we will mutually
913 recurse forever. */
914 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
915 thissize = MIN (thissize, unit - thispos);
44037a66 916
f76b9db2
ILT
917 if (BYTES_BIG_ENDIAN)
918 {
37811a73
RK
919 int total_bits;
920
921 /* We must do an endian conversion exactly the same way as it is
922 done in extract_bit_field, so that the two calls to
923 extract_fixed_bit_field will have comparable arguments. */
1d269b0c 924 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
37811a73
RK
925 total_bits = BITS_PER_WORD;
926 else
927 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
928
f76b9db2
ILT
929 /* Fetch successively less significant portions. */
930 if (GET_CODE (value) == CONST_INT)
931 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
932 >> (bitsize - bitsdone - thissize))
933 & (((HOST_WIDE_INT) 1 << thissize) - 1));
934 else
935 /* The args are chosen so that the last part includes the
936 lsb. Give extract_bit_field the value it needs (with
04050c69
RK
937 endianness compensation) to fetch the piece we want. */
938 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
939 total_bits - bitsize + bitsdone,
940 NULL_RTX, 1);
f76b9db2 941 }
06c94bce 942 else
f76b9db2
ILT
943 {
944 /* Fetch successively more significant portions. */
945 if (GET_CODE (value) == CONST_INT)
946 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
947 >> bitsdone)
948 & (((HOST_WIDE_INT) 1 << thissize) - 1));
949 else
04050c69
RK
950 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
951 bitsdone, NULL_RTX, 1);
f76b9db2 952 }
44037a66 953
06c94bce 954 /* If OP0 is a register, then handle OFFSET here.
5f57dff0
JW
955
956 When handling multiword bitfields, extract_bit_field may pass
957 down a word_mode SUBREG of a larger REG for a bitfield that actually
958 crosses a word boundary. Thus, for a SUBREG, we must find
959 the current word starting from the base register. */
960 if (GET_CODE (op0) == SUBREG)
961 {
ddef6bc7
JJ
962 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
963 word = operand_subword_force (SUBREG_REG (op0), word_offset,
4ee16841 964 GET_MODE (SUBREG_REG (op0)));
5f57dff0
JW
965 offset = 0;
966 }
967 else if (GET_CODE (op0) == REG)
06c94bce 968 {
4ee16841 969 word = operand_subword_force (op0, offset, GET_MODE (op0));
06c94bce
RS
970 offset = 0;
971 }
972 else
973 word = op0;
44037a66 974
0eb61c19
DE
975 /* OFFSET is in UNITs, and UNIT is in bits.
976 store_fixed_bit_field wants offset in bytes. */
04050c69
RK
977 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
978 thispos, part);
06c94bce
RS
979 bitsdone += thissize;
980 }
44037a66
TG
981}
982\f
983/* Generate code to extract a byte-field from STR_RTX
984 containing BITSIZE bits, starting at BITNUM,
985 and put it in TARGET if possible (if TARGET is nonzero).
986 Regardless of TARGET, we return the rtx for where the value is placed.
987 It may be a QUEUED.
988
989 STR_RTX is the structure containing the byte (a REG or MEM).
990 UNSIGNEDP is nonzero if this is an unsigned bit field.
991 MODE is the natural mode of the field value once extracted.
992 TMODE is the mode the caller would like the value to have;
993 but the value may be returned with type MODE instead.
994
44037a66
TG
995 TOTAL_SIZE is the size in bytes of the containing structure,
996 or -1 if varying.
997
998 If a TARGET is specified and we can store in it at no extra cost,
999 we do so, and return TARGET.
1000 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1001 if they are equally easy. */
1002
1003rtx
1004extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
04050c69 1005 target, mode, tmode, total_size)
44037a66 1006 rtx str_rtx;
770ae6cc
RK
1007 unsigned HOST_WIDE_INT bitsize;
1008 unsigned HOST_WIDE_INT bitnum;
44037a66
TG
1009 int unsignedp;
1010 rtx target;
1011 enum machine_mode mode, tmode;
770ae6cc 1012 HOST_WIDE_INT total_size;
44037a66 1013{
770ae6cc
RK
1014 unsigned int unit
1015 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1016 unsigned HOST_WIDE_INT offset = bitnum / unit;
1017 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
b3694847 1018 rtx op0 = str_rtx;
44037a66
TG
1019 rtx spec_target = target;
1020 rtx spec_target_subreg = 0;
3306eb80 1021 enum machine_mode int_mode;
a242b083
ZW
1022 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1023 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
e98f90d3
GK
1024 enum machine_mode mode1;
1025 int byte_offset;
44037a66 1026
44037a66
TG
1027 /* Discount the part of the structure before the desired byte.
1028 We need to know how many bytes are safe to reference after it. */
1029 if (total_size >= 0)
1030 total_size -= (bitpos / BIGGEST_ALIGNMENT
1031 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1032
1033 if (tmode == VOIDmode)
1034 tmode = mode;
44037a66
TG
1035 while (GET_CODE (op0) == SUBREG)
1036 {
64191612
MM
1037 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1038 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1039
ddef6bc7 1040 offset += SUBREG_BYTE (op0) / UNITS_PER_WORD;
64191612 1041
e5e809f4
JL
1042 inner_size = MIN (inner_size, BITS_PER_WORD);
1043
64191612
MM
1044 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1045 {
1046 bitpos += inner_size - outer_size;
1047 if (bitpos > unit)
1048 {
1049 offset += (bitpos / unit);
1050 bitpos %= unit;
1051 }
1052 }
1053
44037a66
TG
1054 op0 = SUBREG_REG (op0);
1055 }
77295dec 1056
aac280fb
DD
1057 if (GET_CODE (op0) == REG
1058 && mode == GET_MODE (op0)
1059 && bitnum == 0
1060 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1061 {
1062 /* We're trying to extract a full register from itself. */
1063 return op0;
1064 }
1065
d006aa54
RH
1066 /* Make sure we are playing with integral modes. Pun with subregs
1067 if we aren't. */
1068 {
1069 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1070 if (imode != GET_MODE (op0))
1071 {
1072 if (GET_CODE (op0) == MEM)
f4ef873c 1073 op0 = adjust_address (op0, imode, 0);
d006aa54
RH
1074 else if (imode != BLKmode)
1075 op0 = gen_lowpart (imode, op0);
1076 else
1077 abort ();
1078 }
1079 }
1080
4e9bb42b
AH
1081 /* We may be accessing data outside the field, which means
1082 we can alias adjacent data. */
1083 if (GET_CODE (op0) == MEM)
1084 {
1085 op0 = shallow_copy_rtx (op0);
1086 set_mem_alias_set (op0, 0);
1087 set_mem_expr (op0, 0);
1088 }
1089
77295dec
DE
1090 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1091 If that's wrong, the solution is to test for it and set TARGET to 0
1092 if needed. */
c410d49e 1093
44037a66
TG
1094 /* If OP0 is a register, BITPOS must count within a word.
1095 But as we have it, it counts within whatever size OP0 now has.
1096 On a bigendian machine, these are not the same, so convert. */
db3cf6fb
MS
1097 if (BYTES_BIG_ENDIAN
1098 && GET_CODE (op0) != MEM
f76b9db2 1099 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
44037a66 1100 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
44037a66
TG
1101
1102 /* Extracting a full-word or multi-word value
c7e33f89 1103 from a structure in a register or aligned memory.
44037a66
TG
1104 This can be done with just SUBREG.
1105 So too extracting a subword value in
1106 the least significant part of the register. */
1107
e98f90d3
GK
1108 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
1109 + (offset * UNITS_PER_WORD);
1110
1111 mode1 = (VECTOR_MODE_P (tmode)
1112 ? mode
1113 : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
1114
d006aa54 1115 if (((GET_CODE (op0) != MEM
f8fe20b2 1116 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
e98f90d3
GK
1117 GET_MODE_BITSIZE (GET_MODE (op0)))
1118 && GET_MODE_SIZE (mode1) != 0
1119 && byte_offset % GET_MODE_SIZE (mode1) == 0)
c7e33f89 1120 || (GET_CODE (op0) == MEM
04050c69 1121 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
c7e33f89 1122 || (offset * BITS_PER_UNIT % bitsize == 0
04050c69 1123 && MEM_ALIGN (op0) % bitsize == 0))))
44037a66
TG
1124 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1125 && bitpos % BITS_PER_WORD == 0)
1126 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
f916eeb6
NC
1127 /* ??? The big endian test here is wrong. This is correct
1128 if the value is in a register, and if mode_for_size is not
1129 the same mode as op0. This causes us to get unnecessarily
1130 inefficient code from the Thumb port when -mbig-endian. */
f76b9db2
ILT
1131 && (BYTES_BIG_ENDIAN
1132 ? bitpos + bitsize == BITS_PER_WORD
1133 : bitpos == 0))))
44037a66 1134 {
44037a66 1135 if (mode1 != GET_MODE (op0))
c7e33f89 1136 {
d006aa54
RH
1137 if (GET_CODE (op0) == SUBREG)
1138 {
1139 if (GET_MODE (SUBREG_REG (op0)) == mode1
1140 || GET_MODE_CLASS (mode1) == MODE_INT
1141 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1142 op0 = SUBREG_REG (op0);
1143 else
1144 /* Else we've got some float mode source being extracted into
1145 a different float mode destination -- this combination of
1146 subregs results in Severe Tire Damage. */
28ce94d4 1147 goto no_subreg_mode_swap;
d006aa54 1148 }
c7e33f89 1149 if (GET_CODE (op0) == REG)
e98f90d3 1150 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
c7e33f89 1151 else
f4ef873c 1152 op0 = adjust_address (op0, mode1, offset);
c7e33f89 1153 }
44037a66
TG
1154 if (mode1 != mode)
1155 return convert_to_mode (tmode, op0, unsignedp);
1156 return op0;
1157 }
28ce94d4 1158 no_subreg_mode_swap:
44037a66
TG
1159
1160 /* Handle fields bigger than a word. */
c410d49e 1161
44037a66
TG
1162 if (bitsize > BITS_PER_WORD)
1163 {
1164 /* Here we transfer the words of the field
1165 in the order least significant first.
1166 This is because the most significant word is the one which may
1167 be less than full. */
1168
770ae6cc
RK
1169 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1170 unsigned int i;
44037a66
TG
1171
1172 if (target == 0 || GET_CODE (target) != REG)
1173 target = gen_reg_rtx (mode);
1174
34ea783b 1175 /* Indicate for flow that the entire target reg is being set. */
38a448ca 1176 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
34ea783b 1177
44037a66
TG
1178 for (i = 0; i < nwords; i++)
1179 {
1180 /* If I is 0, use the low-order word in both field and target;
1181 if I is 1, use the next to lowest word; and so on. */
77295dec 1182 /* Word number in TARGET to use. */
770ae6cc
RK
1183 unsigned int wordnum
1184 = (WORDS_BIG_ENDIAN
1185 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1186 : i);
77295dec 1187 /* Offset from start of field in OP0. */
770ae6cc
RK
1188 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1189 ? MAX (0, ((int) bitsize - ((int) i + 1)
75131237 1190 * (int) BITS_PER_WORD))
770ae6cc 1191 : (int) i * BITS_PER_WORD);
44037a66
TG
1192 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1193 rtx result_part
1194 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1195 bitsize - i * BITS_PER_WORD),
19caa751 1196 bitnum + bit_offset, 1, target_part, mode,
04050c69 1197 word_mode, total_size);
44037a66
TG
1198
1199 if (target_part == 0)
1200 abort ();
1201
1202 if (result_part != target_part)
1203 emit_move_insn (target_part, result_part);
1204 }
1205
5f57dff0 1206 if (unsignedp)
77295dec
DE
1207 {
1208 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1209 need to be zero'd out. */
1210 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1211 {
770ae6cc 1212 unsigned int i, total_words;
77295dec
DE
1213
1214 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1215 for (i = nwords; i < total_words; i++)
04050c69
RK
1216 emit_move_insn
1217 (operand_subword (target,
1218 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1219 1, VOIDmode),
1220 const0_rtx);
77295dec
DE
1221 }
1222 return target;
1223 }
1224
5f57dff0
JW
1225 /* Signed bit field: sign-extend with two arithmetic shifts. */
1226 target = expand_shift (LSHIFT_EXPR, mode, target,
1227 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1228 NULL_RTX, 0);
1229 return expand_shift (RSHIFT_EXPR, mode, target,
1230 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1231 NULL_RTX, 0);
44037a66 1232 }
c410d49e 1233
3306eb80
GK
1234 /* From here on we know the desired field is smaller than a word. */
1235
1236 /* Check if there is a correspondingly-sized integer field, so we can
1237 safely extract it as one size of integer, if necessary; then
1238 truncate or extend to the size that is wanted; then use SUBREGs or
1239 convert_to_mode to get one of the modes we really wanted. */
c410d49e 1240
3306eb80
GK
1241 int_mode = int_mode_for_mode (tmode);
1242 if (int_mode == BLKmode)
1243 int_mode = int_mode_for_mode (mode);
1244 if (int_mode == BLKmode)
c4f2c499
KH
1245 abort (); /* Should probably push op0 out to memory and then
1246 do a load. */
44037a66
TG
1247
1248 /* OFFSET is the number of words or bytes (UNIT says which)
1249 from STR_RTX to the first word or byte containing part of the field. */
1250
d006aa54 1251 if (GET_CODE (op0) != MEM)
44037a66
TG
1252 {
1253 if (offset != 0
1254 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
470032d7
RH
1255 {
1256 if (GET_CODE (op0) != REG)
1257 op0 = copy_to_reg (op0);
1258 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
ddef6bc7 1259 op0, (offset * UNITS_PER_WORD));
470032d7 1260 }
44037a66
TG
1261 offset = 0;
1262 }
1263 else
04050c69 1264 op0 = protect_from_queue (str_rtx, 1);
44037a66
TG
1265
1266 /* Now OFFSET is nonzero only for memory operands. */
1267
1268 if (unsignedp)
1269 {
a242b083
ZW
1270 if (HAVE_extzv
1271 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
39e0911f 1272 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
a242b083 1273 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
44037a66 1274 {
770ae6cc 1275 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
44037a66 1276 rtx bitsize_rtx, bitpos_rtx;
e5e809f4 1277 rtx last = get_last_insn ();
44037a66
TG
1278 rtx xop0 = op0;
1279 rtx xtarget = target;
1280 rtx xspec_target = spec_target;
1281 rtx xspec_target_subreg = spec_target_subreg;
1282 rtx pat;
d523b40e 1283 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
44037a66
TG
1284
1285 if (GET_CODE (xop0) == MEM)
1286 {
1287 int save_volatile_ok = volatile_ok;
1288 volatile_ok = 1;
1289
1290 /* Is the memory operand acceptable? */
a995e389 1291 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
3bf1b082 1292 (xop0, GET_MODE (xop0))))
44037a66
TG
1293 {
1294 /* No, load into a reg and extract from there. */
1295 enum machine_mode bestmode;
1296
1297 /* Get the mode to use for inserting into this field. If
1298 OP0 is BLKmode, get the smallest mode consistent with the
1299 alignment. If OP0 is a non-BLKmode object that is no
1300 wider than MAXMODE, use its mode. Otherwise, use the
1301 smallest mode containing the field. */
1302
1303 if (GET_MODE (xop0) == BLKmode
1304 || (GET_MODE_SIZE (GET_MODE (op0))
1305 > GET_MODE_SIZE (maxmode)))
04050c69
RK
1306 bestmode = get_best_mode (bitsize, bitnum,
1307 MEM_ALIGN (xop0), maxmode,
717702e6 1308 MEM_VOLATILE_P (xop0));
44037a66
TG
1309 else
1310 bestmode = GET_MODE (xop0);
1311
bd5d175a 1312 if (bestmode == VOIDmode
04050c69
RK
1313 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1314 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
44037a66
TG
1315 goto extzv_loses;
1316
1317 /* Compute offset as multiple of this unit,
1318 counting in bytes. */
1319 unit = GET_MODE_BITSIZE (bestmode);
1320 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1321 xbitpos = bitnum % unit;
f4ef873c
RK
1322 xop0 = adjust_address (xop0, bestmode, xoffset);
1323
44037a66
TG
1324 /* Fetch it to a register in that size. */
1325 xop0 = force_reg (bestmode, xop0);
1326
1327 /* XBITPOS counts within UNIT, which is what is expected. */
1328 }
1329 else
1330 /* Get ref to first byte containing part of the field. */
f4ef873c 1331 xop0 = adjust_address (xop0, byte_mode, xoffset);
44037a66
TG
1332
1333 volatile_ok = save_volatile_ok;
1334 }
1335
1336 /* If op0 is a register, we need it in MAXMODE (which is usually
1337 SImode). to make it acceptable to the format of extzv. */
1338 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
e5e809f4 1339 goto extzv_loses;
44037a66 1340 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
38a448ca 1341 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
44037a66
TG
1342
1343 /* On big-endian machines, we count bits from the most significant.
1344 If the bit field insn does not, we must invert. */
f76b9db2
ILT
1345 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1346 xbitpos = unit - bitsize - xbitpos;
1347
44037a66 1348 /* Now convert from counting within UNIT to counting in MAXMODE. */
f76b9db2 1349 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
44037a66 1350 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
f76b9db2 1351
44037a66
TG
1352 unit = GET_MODE_BITSIZE (maxmode);
1353
1354 if (xtarget == 0
1355 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1356 xtarget = xspec_target = gen_reg_rtx (tmode);
1357
1358 if (GET_MODE (xtarget) != maxmode)
1359 {
1360 if (GET_CODE (xtarget) == REG)
b7a09135
RS
1361 {
1362 int wider = (GET_MODE_SIZE (maxmode)
1363 > GET_MODE_SIZE (GET_MODE (xtarget)));
1364 xtarget = gen_lowpart (maxmode, xtarget);
1365 if (wider)
1366 xspec_target_subreg = xtarget;
1367 }
44037a66
TG
1368 else
1369 xtarget = gen_reg_rtx (maxmode);
1370 }
1371
1372 /* If this machine's extzv insists on a register target,
1373 make sure we have one. */
a995e389 1374 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
44037a66
TG
1375 (xtarget, maxmode)))
1376 xtarget = gen_reg_rtx (maxmode);
1377
b1ec3c92
CH
1378 bitsize_rtx = GEN_INT (bitsize);
1379 bitpos_rtx = GEN_INT (xbitpos);
44037a66
TG
1380
1381 pat = gen_extzv (protect_from_queue (xtarget, 1),
1382 xop0, bitsize_rtx, bitpos_rtx);
1383 if (pat)
1384 {
1385 emit_insn (pat);
1386 target = xtarget;
1387 spec_target = xspec_target;
1388 spec_target_subreg = xspec_target_subreg;
1389 }
1390 else
1391 {
1392 delete_insns_since (last);
3306eb80 1393 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
04050c69 1394 bitpos, target, 1);
44037a66
TG
1395 }
1396 }
1397 else
04050c69 1398 extzv_loses:
c410d49e 1399 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
04050c69 1400 bitpos, target, 1);
44037a66
TG
1401 }
1402 else
1403 {
a242b083
ZW
1404 if (HAVE_extv
1405 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
39e0911f 1406 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
a242b083 1407 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
44037a66
TG
1408 {
1409 int xbitpos = bitpos, xoffset = offset;
1410 rtx bitsize_rtx, bitpos_rtx;
e5e809f4 1411 rtx last = get_last_insn ();
44037a66
TG
1412 rtx xop0 = op0, xtarget = target;
1413 rtx xspec_target = spec_target;
1414 rtx xspec_target_subreg = spec_target_subreg;
1415 rtx pat;
d523b40e 1416 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
44037a66
TG
1417
1418 if (GET_CODE (xop0) == MEM)
1419 {
1420 /* Is the memory operand acceptable? */
a995e389 1421 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
44037a66
TG
1422 (xop0, GET_MODE (xop0))))
1423 {
1424 /* No, load into a reg and extract from there. */
1425 enum machine_mode bestmode;
1426
1427 /* Get the mode to use for inserting into this field. If
1428 OP0 is BLKmode, get the smallest mode consistent with the
1429 alignment. If OP0 is a non-BLKmode object that is no
1430 wider than MAXMODE, use its mode. Otherwise, use the
1431 smallest mode containing the field. */
1432
1433 if (GET_MODE (xop0) == BLKmode
1434 || (GET_MODE_SIZE (GET_MODE (op0))
1435 > GET_MODE_SIZE (maxmode)))
04050c69
RK
1436 bestmode = get_best_mode (bitsize, bitnum,
1437 MEM_ALIGN (xop0), maxmode,
717702e6 1438 MEM_VOLATILE_P (xop0));
44037a66
TG
1439 else
1440 bestmode = GET_MODE (xop0);
1441
bd5d175a 1442 if (bestmode == VOIDmode
04050c69
RK
1443 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1444 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
44037a66
TG
1445 goto extv_loses;
1446
1447 /* Compute offset as multiple of this unit,
1448 counting in bytes. */
1449 unit = GET_MODE_BITSIZE (bestmode);
1450 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1451 xbitpos = bitnum % unit;
f4ef873c
RK
1452 xop0 = adjust_address (xop0, bestmode, xoffset);
1453
44037a66
TG
1454 /* Fetch it to a register in that size. */
1455 xop0 = force_reg (bestmode, xop0);
1456
1457 /* XBITPOS counts within UNIT, which is what is expected. */
1458 }
1459 else
1460 /* Get ref to first byte containing part of the field. */
f4ef873c 1461 xop0 = adjust_address (xop0, byte_mode, xoffset);
44037a66
TG
1462 }
1463
1464 /* If op0 is a register, we need it in MAXMODE (which is usually
1465 SImode) to make it acceptable to the format of extv. */
1466 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
e5e809f4 1467 goto extv_loses;
44037a66 1468 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
38a448ca 1469 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
44037a66
TG
1470
1471 /* On big-endian machines, we count bits from the most significant.
1472 If the bit field insn does not, we must invert. */
f76b9db2
ILT
1473 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1474 xbitpos = unit - bitsize - xbitpos;
1475
44037a66
TG
1476 /* XBITPOS counts within a size of UNIT.
1477 Adjust to count within a size of MAXMODE. */
f76b9db2 1478 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
44037a66 1479 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
f76b9db2 1480
44037a66
TG
1481 unit = GET_MODE_BITSIZE (maxmode);
1482
1483 if (xtarget == 0
1484 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1485 xtarget = xspec_target = gen_reg_rtx (tmode);
1486
1487 if (GET_MODE (xtarget) != maxmode)
1488 {
1489 if (GET_CODE (xtarget) == REG)
b7a09135
RS
1490 {
1491 int wider = (GET_MODE_SIZE (maxmode)
1492 > GET_MODE_SIZE (GET_MODE (xtarget)));
1493 xtarget = gen_lowpart (maxmode, xtarget);
1494 if (wider)
1495 xspec_target_subreg = xtarget;
1496 }
44037a66
TG
1497 else
1498 xtarget = gen_reg_rtx (maxmode);
1499 }
1500
1501 /* If this machine's extv insists on a register target,
1502 make sure we have one. */
a995e389 1503 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
44037a66
TG
1504 (xtarget, maxmode)))
1505 xtarget = gen_reg_rtx (maxmode);
1506
b1ec3c92
CH
1507 bitsize_rtx = GEN_INT (bitsize);
1508 bitpos_rtx = GEN_INT (xbitpos);
44037a66
TG
1509
1510 pat = gen_extv (protect_from_queue (xtarget, 1),
1511 xop0, bitsize_rtx, bitpos_rtx);
1512 if (pat)
1513 {
1514 emit_insn (pat);
1515 target = xtarget;
1516 spec_target = xspec_target;
1517 spec_target_subreg = xspec_target_subreg;
1518 }
1519 else
1520 {
1521 delete_insns_since (last);
3306eb80 1522 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
04050c69 1523 bitpos, target, 0);
44037a66 1524 }
c410d49e 1525 }
44037a66 1526 else
04050c69 1527 extv_loses:
c410d49e 1528 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
04050c69 1529 bitpos, target, 0);
44037a66
TG
1530 }
1531 if (target == spec_target)
1532 return target;
1533 if (target == spec_target_subreg)
1534 return spec_target;
1535 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1536 {
1537 /* If the target mode is floating-point, first convert to the
1538 integer mode of that size and then access it as a floating-point
1539 value via a SUBREG. */
32b069d3
RH
1540 if (GET_MODE_CLASS (tmode) != MODE_INT
1541 && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
44037a66
TG
1542 {
1543 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1544 MODE_INT, 0),
1545 target, unsignedp);
32b069d3 1546 return gen_lowpart (tmode, target);
44037a66
TG
1547 }
1548 else
1549 return convert_to_mode (tmode, target, unsignedp);
1550 }
1551 return target;
1552}
1553\f
1554/* Extract a bit field using shifts and boolean operations
1555 Returns an rtx to represent the value.
1556 OP0 addresses a register (word) or memory (byte).
1557 BITPOS says which bit within the word or byte the bit field starts in.
1558 OFFSET says how many bytes farther the bit field starts;
1559 it is 0 if OP0 is a register.
1560 BITSIZE says how many bits long the bit field is.
1561 (If OP0 is a register, it may be narrower than a full word,
1562 but BITPOS still counts within a full word,
1563 which is significant on bigendian machines.)
1564
1565 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1566 If TARGET is nonzero, attempts to store the value there
1567 and return TARGET, but this is not guaranteed.
04050c69 1568 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
44037a66
TG
1569
1570static rtx
1571extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
04050c69 1572 target, unsignedp)
44037a66 1573 enum machine_mode tmode;
b3694847 1574 rtx op0, target;
770ae6cc 1575 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
44037a66 1576 int unsignedp;
44037a66 1577{
770ae6cc 1578 unsigned int total_bits = BITS_PER_WORD;
44037a66
TG
1579 enum machine_mode mode;
1580
1581 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1582 {
1583 /* Special treatment for a bit field split across two registers. */
1584 if (bitsize + bitpos > BITS_PER_WORD)
04050c69 1585 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
44037a66
TG
1586 }
1587 else
1588 {
1589 /* Get the proper mode to use for this field. We want a mode that
1590 includes the entire field. If such a mode would be larger than
1591 a word, we won't be doing the extraction the normal way. */
1592
04050c69
RK
1593 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1594 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
44037a66
TG
1595
1596 if (mode == VOIDmode)
1597 /* The only way this should occur is if the field spans word
1598 boundaries. */
1599 return extract_split_bit_field (op0, bitsize,
1600 bitpos + offset * BITS_PER_UNIT,
04050c69 1601 unsignedp);
44037a66
TG
1602
1603 total_bits = GET_MODE_BITSIZE (mode);
1604
401db791 1605 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
38e01259 1606 be in the range 0 to total_bits-1, and put any excess bytes in
401db791
JW
1607 OFFSET. */
1608 if (bitpos >= total_bits)
1609 {
1610 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1611 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1612 * BITS_PER_UNIT);
1613 }
1614
44037a66
TG
1615 /* Get ref to an aligned byte, halfword, or word containing the field.
1616 Adjust BITPOS to be position within a word,
1617 and OFFSET to be the offset of that word.
1618 Then alter OP0 to refer to that word. */
1619 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1620 offset -= (offset % (total_bits / BITS_PER_UNIT));
f4ef873c 1621 op0 = adjust_address (op0, mode, offset);
44037a66
TG
1622 }
1623
37811a73
RK
1624 mode = GET_MODE (op0);
1625
f76b9db2 1626 if (BYTES_BIG_ENDIAN)
04050c69
RK
1627 /* BITPOS is the distance between our msb and that of OP0.
1628 Convert it to the distance from the lsb. */
1629 bitpos = total_bits - bitsize - bitpos;
44037a66 1630
44037a66
TG
1631 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1632 We have reduced the big-endian case to the little-endian case. */
1633
1634 if (unsignedp)
1635 {
1636 if (bitpos)
1637 {
1638 /* If the field does not already start at the lsb,
1639 shift it so it does. */
1640 tree amount = build_int_2 (bitpos, 0);
1641 /* Maybe propagate the target for the shift. */
1642 /* But not if we will return it--could confuse integrate.c. */
1643 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1644 && !REG_FUNCTION_VALUE_P (target)
1645 ? target : 0);
1646 if (tmode != mode) subtarget = 0;
1647 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1648 }
1649 /* Convert the value to the desired mode. */
1650 if (mode != tmode)
1651 op0 = convert_to_mode (tmode, op0, 1);
1652
1653 /* Unless the msb of the field used to be the msb when we shifted,
1654 mask out the upper bits. */
1655
c99d986a 1656 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
44037a66
TG
1657 return expand_binop (GET_MODE (op0), and_optab, op0,
1658 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1659 target, 1, OPTAB_LIB_WIDEN);
1660 return op0;
1661 }
1662
1663 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1664 then arithmetic-shift its lsb to the lsb of the word. */
1665 op0 = force_reg (mode, op0);
1666 if (mode != tmode)
1667 target = 0;
1668
1669 /* Find the narrowest integer mode that contains the field. */
1670
1671 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1672 mode = GET_MODE_WIDER_MODE (mode))
1673 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1674 {
1675 op0 = convert_to_mode (mode, op0, 0);
1676 break;
1677 }
1678
1679 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1680 {
04050c69
RK
1681 tree amount
1682 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
44037a66
TG
1683 /* Maybe propagate the target for the shift. */
1684 /* But not if we will return the result--could confuse integrate.c. */
1685 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1686 && ! REG_FUNCTION_VALUE_P (target)
1687 ? target : 0);
1688 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1689 }
1690
1691 return expand_shift (RSHIFT_EXPR, mode, op0,
c410d49e 1692 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
44037a66
TG
1693 target, 0);
1694}
1695\f
1696/* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1697 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1698 complement of that if COMPLEMENT. The mask is truncated if
77295dec
DE
1699 necessary to the width of mode MODE. The mask is zero-extended if
1700 BITSIZE+BITPOS is too small for MODE. */
44037a66
TG
1701
1702static rtx
1703mask_rtx (mode, bitpos, bitsize, complement)
1704 enum machine_mode mode;
1705 int bitpos, bitsize, complement;
1706{
b1ec3c92 1707 HOST_WIDE_INT masklow, maskhigh;
44037a66 1708
b1ec3c92
CH
1709 if (bitpos < HOST_BITS_PER_WIDE_INT)
1710 masklow = (HOST_WIDE_INT) -1 << bitpos;
44037a66
TG
1711 else
1712 masklow = 0;
1713
b1ec3c92
CH
1714 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1715 masklow &= ((unsigned HOST_WIDE_INT) -1
1716 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
c410d49e 1717
b1ec3c92 1718 if (bitpos <= HOST_BITS_PER_WIDE_INT)
44037a66
TG
1719 maskhigh = -1;
1720 else
b1ec3c92 1721 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
44037a66 1722
b1ec3c92
CH
1723 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1724 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1725 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
44037a66
TG
1726 else
1727 maskhigh = 0;
1728
1729 if (complement)
1730 {
1731 maskhigh = ~maskhigh;
1732 masklow = ~masklow;
1733 }
1734
1735 return immed_double_const (masklow, maskhigh, mode);
1736}
1737
1738/* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1739 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1740
1741static rtx
1742lshift_value (mode, value, bitpos, bitsize)
1743 enum machine_mode mode;
1744 rtx value;
1745 int bitpos, bitsize;
1746{
b1ec3c92
CH
1747 unsigned HOST_WIDE_INT v = INTVAL (value);
1748 HOST_WIDE_INT low, high;
44037a66 1749
b1ec3c92
CH
1750 if (bitsize < HOST_BITS_PER_WIDE_INT)
1751 v &= ~((HOST_WIDE_INT) -1 << bitsize);
44037a66 1752
b1ec3c92 1753 if (bitpos < HOST_BITS_PER_WIDE_INT)
44037a66
TG
1754 {
1755 low = v << bitpos;
b1ec3c92 1756 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
44037a66
TG
1757 }
1758 else
1759 {
1760 low = 0;
b1ec3c92 1761 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
44037a66
TG
1762 }
1763
1764 return immed_double_const (low, high, mode);
1765}
1766\f
1767/* Extract a bit field that is split across two words
1768 and return an RTX for the result.
1769
1770 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1771 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
04050c69 1772 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
44037a66
TG
1773
1774static rtx
04050c69 1775extract_split_bit_field (op0, bitsize, bitpos, unsignedp)
44037a66 1776 rtx op0;
770ae6cc
RK
1777 unsigned HOST_WIDE_INT bitsize, bitpos;
1778 int unsignedp;
44037a66 1779{
770ae6cc
RK
1780 unsigned int unit;
1781 unsigned int bitsdone = 0;
c16ddde3 1782 rtx result = NULL_RTX;
06c94bce 1783 int first = 1;
44037a66 1784
4ee16841
DE
1785 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1786 much at a time. */
1787 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1788 unit = BITS_PER_WORD;
1789 else
04050c69 1790 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
4ee16841 1791
06c94bce
RS
1792 while (bitsdone < bitsize)
1793 {
770ae6cc 1794 unsigned HOST_WIDE_INT thissize;
06c94bce 1795 rtx part, word;
770ae6cc
RK
1796 unsigned HOST_WIDE_INT thispos;
1797 unsigned HOST_WIDE_INT offset;
06c94bce
RS
1798
1799 offset = (bitpos + bitsdone) / unit;
1800 thispos = (bitpos + bitsdone) % unit;
1801
0eb61c19
DE
1802 /* THISSIZE must not overrun a word boundary. Otherwise,
1803 extract_fixed_bit_field will call us again, and we will mutually
1804 recurse forever. */
1805 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1806 thissize = MIN (thissize, unit - thispos);
06c94bce
RS
1807
1808 /* If OP0 is a register, then handle OFFSET here.
5f57dff0
JW
1809
1810 When handling multiword bitfields, extract_bit_field may pass
1811 down a word_mode SUBREG of a larger REG for a bitfield that actually
1812 crosses a word boundary. Thus, for a SUBREG, we must find
1813 the current word starting from the base register. */
1814 if (GET_CODE (op0) == SUBREG)
1815 {
ddef6bc7
JJ
1816 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1817 word = operand_subword_force (SUBREG_REG (op0), word_offset,
5f57dff0
JW
1818 GET_MODE (SUBREG_REG (op0)));
1819 offset = 0;
1820 }
1821 else if (GET_CODE (op0) == REG)
06c94bce
RS
1822 {
1823 word = operand_subword_force (op0, offset, GET_MODE (op0));
1824 offset = 0;
1825 }
1826 else
1827 word = op0;
1828
06c94bce 1829 /* Extract the parts in bit-counting order,
0eb61c19
DE
1830 whose meaning is determined by BYTES_PER_UNIT.
1831 OFFSET is in UNITs, and UNIT is in bits.
1832 extract_fixed_bit_field wants offset in bytes. */
1833 part = extract_fixed_bit_field (word_mode, word,
1834 offset * unit / BITS_PER_UNIT,
04050c69 1835 thissize, thispos, 0, 1);
06c94bce 1836 bitsdone += thissize;
44037a66 1837
06c94bce 1838 /* Shift this part into place for the result. */
f76b9db2
ILT
1839 if (BYTES_BIG_ENDIAN)
1840 {
1841 if (bitsize != bitsdone)
1842 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1843 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1844 }
1845 else
1846 {
1847 if (bitsdone != thissize)
1848 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1849 build_int_2 (bitsdone - thissize, 0), 0, 1);
1850 }
44037a66 1851
06c94bce
RS
1852 if (first)
1853 result = part;
1854 else
1855 /* Combine the parts with bitwise or. This works
1856 because we extracted each part as an unsigned bit field. */
1857 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1858 OPTAB_LIB_WIDEN);
1859
1860 first = 0;
1861 }
44037a66
TG
1862
1863 /* Unsigned bit field: we are done. */
1864 if (unsignedp)
1865 return result;
1866 /* Signed bit field: sign-extend with two arithmetic shifts. */
1867 result = expand_shift (LSHIFT_EXPR, word_mode, result,
b1ec3c92
CH
1868 build_int_2 (BITS_PER_WORD - bitsize, 0),
1869 NULL_RTX, 0);
44037a66 1870 return expand_shift (RSHIFT_EXPR, word_mode, result,
b1ec3c92 1871 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
44037a66
TG
1872}
1873\f
1874/* Add INC into TARGET. */
1875
1876void
1877expand_inc (target, inc)
1878 rtx target, inc;
1879{
1880 rtx value = expand_binop (GET_MODE (target), add_optab,
1881 target, inc,
1882 target, 0, OPTAB_LIB_WIDEN);
1883 if (value != target)
1884 emit_move_insn (target, value);
1885}
1886
1887/* Subtract DEC from TARGET. */
1888
1889void
1890expand_dec (target, dec)
1891 rtx target, dec;
1892{
1893 rtx value = expand_binop (GET_MODE (target), sub_optab,
1894 target, dec,
1895 target, 0, OPTAB_LIB_WIDEN);
1896 if (value != target)
1897 emit_move_insn (target, value);
1898}
1899\f
1900/* Output a shift instruction for expression code CODE,
1901 with SHIFTED being the rtx for the value to shift,
1902 and AMOUNT the tree for the amount to shift by.
1903 Store the result in the rtx TARGET, if that is convenient.
1904 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1905 Return the rtx for where the value is. */
1906
1907rtx
1908expand_shift (code, mode, shifted, amount, target, unsignedp)
1909 enum tree_code code;
b3694847 1910 enum machine_mode mode;
44037a66
TG
1911 rtx shifted;
1912 tree amount;
b3694847 1913 rtx target;
44037a66
TG
1914 int unsignedp;
1915{
b3694847
SS
1916 rtx op1, temp = 0;
1917 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1918 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
44037a66
TG
1919 int try;
1920
1921 /* Previously detected shift-counts computed by NEGATE_EXPR
1922 and shifted in the other direction; but that does not work
1923 on all machines. */
1924
b1ec3c92 1925 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
44037a66 1926
1433f0f9 1927#ifdef SHIFT_COUNT_TRUNCATED
166cdf4a
RH
1928 if (SHIFT_COUNT_TRUNCATED)
1929 {
1930 if (GET_CODE (op1) == CONST_INT
c84e2712
KG
1931 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1932 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
166cdf4a
RH
1933 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1934 % GET_MODE_BITSIZE (mode));
1935 else if (GET_CODE (op1) == SUBREG
4e07d762 1936 && subreg_lowpart_p (op1))
166cdf4a
RH
1937 op1 = SUBREG_REG (op1);
1938 }
2ab0a5c4
TG
1939#endif
1940
44037a66
TG
1941 if (op1 == const0_rtx)
1942 return shifted;
1943
1944 for (try = 0; temp == 0 && try < 3; try++)
1945 {
1946 enum optab_methods methods;
1947
1948 if (try == 0)
1949 methods = OPTAB_DIRECT;
1950 else if (try == 1)
1951 methods = OPTAB_WIDEN;
1952 else
1953 methods = OPTAB_LIB_WIDEN;
1954
1955 if (rotate)
1956 {
1957 /* Widening does not work for rotation. */
1958 if (methods == OPTAB_WIDEN)
1959 continue;
1960 else if (methods == OPTAB_LIB_WIDEN)
cbec710e 1961 {
39e71615 1962 /* If we have been unable to open-code this by a rotation,
cbec710e
RK
1963 do it as the IOR of two shifts. I.e., to rotate A
1964 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1965 where C is the bitsize of A.
1966
1967 It is theoretically possible that the target machine might
1968 not be able to perform either shift and hence we would
1969 be making two libcalls rather than just the one for the
1970 shift (similarly if IOR could not be done). We will allow
1971 this extremely unlikely lossage to avoid complicating the
1972 code below. */
1973
39e71615
RK
1974 rtx subtarget = target == shifted ? 0 : target;
1975 rtx temp1;
1976 tree type = TREE_TYPE (amount);
1977 tree new_amount = make_tree (type, op1);
1978 tree other_amount
1979 = fold (build (MINUS_EXPR, type,
1980 convert (type,
1981 build_int_2 (GET_MODE_BITSIZE (mode),
1982 0)),
1983 amount));
1984
1985 shifted = force_reg (mode, shifted);
1986
1987 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1988 mode, shifted, new_amount, subtarget, 1);
1989 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1990 mode, shifted, other_amount, 0, 1);
1991 return expand_binop (mode, ior_optab, temp, temp1, target,
1992 unsignedp, methods);
cbec710e 1993 }
44037a66
TG
1994
1995 temp = expand_binop (mode,
1996 left ? rotl_optab : rotr_optab,
1997 shifted, op1, target, unsignedp, methods);
cbec710e
RK
1998
1999 /* If we don't have the rotate, but we are rotating by a constant
2000 that is in range, try a rotate in the opposite direction. */
2001
2002 if (temp == 0 && GET_CODE (op1) == CONST_INT
da920570
ZW
2003 && INTVAL (op1) > 0
2004 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
cbec710e
RK
2005 temp = expand_binop (mode,
2006 left ? rotr_optab : rotl_optab,
c410d49e 2007 shifted,
cbec710e
RK
2008 GEN_INT (GET_MODE_BITSIZE (mode)
2009 - INTVAL (op1)),
2010 target, unsignedp, methods);
44037a66
TG
2011 }
2012 else if (unsignedp)
a34958c9
RK
2013 temp = expand_binop (mode,
2014 left ? ashl_optab : lshr_optab,
2015 shifted, op1, target, unsignedp, methods);
44037a66
TG
2016
2017 /* Do arithmetic shifts.
2018 Also, if we are going to widen the operand, we can just as well
2019 use an arithmetic right-shift instead of a logical one. */
2020 if (temp == 0 && ! rotate
2021 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2022 {
2023 enum optab_methods methods1 = methods;
2024
2025 /* If trying to widen a log shift to an arithmetic shift,
2026 don't accept an arithmetic shift of the same size. */
2027 if (unsignedp)
2028 methods1 = OPTAB_MUST_WIDEN;
2029
2030 /* Arithmetic shift */
2031
2032 temp = expand_binop (mode,
2033 left ? ashl_optab : ashr_optab,
2034 shifted, op1, target, unsignedp, methods1);
2035 }
2036
711a5e64 2037 /* We used to try extzv here for logical right shifts, but that was
c410d49e 2038 only useful for one machine, the VAX, and caused poor code
711a5e64
RK
2039 generation there for lshrdi3, so the code was deleted and a
2040 define_expand for lshrsi3 was added to vax.md. */
44037a66
TG
2041 }
2042
2043 if (temp == 0)
2044 abort ();
2045 return temp;
2046}
2047\f
b385aeda 2048enum alg_code { alg_zero, alg_m, alg_shift,
b2fb324c 2049 alg_add_t_m2, alg_sub_t_m2,
7963ac37
RK
2050 alg_add_factor, alg_sub_factor,
2051 alg_add_t2_m, alg_sub_t2_m,
b385aeda 2052 alg_add, alg_subtract, alg_factor, alg_shiftop };
44037a66
TG
2053
2054/* This structure records a sequence of operations.
2055 `ops' is the number of operations recorded.
2056 `cost' is their total cost.
2057 The operations are stored in `op' and the corresponding
b385aeda
RK
2058 logarithms of the integer coefficients in `log'.
2059
44037a66 2060 These are the operations:
b385aeda
RK
2061 alg_zero total := 0;
2062 alg_m total := multiplicand;
b2fb324c 2063 alg_shift total := total * coeff
7963ac37
RK
2064 alg_add_t_m2 total := total + multiplicand * coeff;
2065 alg_sub_t_m2 total := total - multiplicand * coeff;
2066 alg_add_factor total := total * coeff + total;
2067 alg_sub_factor total := total * coeff - total;
2068 alg_add_t2_m total := total * coeff + multiplicand;
2069 alg_sub_t2_m total := total * coeff - multiplicand;
b385aeda
RK
2070
2071 The first operand must be either alg_zero or alg_m. */
44037a66 2072
44037a66
TG
2073struct algorithm
2074{
7963ac37
RK
2075 short cost;
2076 short ops;
b385aeda
RK
2077 /* The size of the OP and LOG fields are not directly related to the
2078 word size, but the worst-case algorithms will be if we have few
2079 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2080 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2081 in total wordsize operations. */
44037a66 2082 enum alg_code op[MAX_BITS_PER_WORD];
b385aeda 2083 char log[MAX_BITS_PER_WORD];
44037a66
TG
2084};
2085
711d877c
KG
2086static void synth_mult PARAMS ((struct algorithm *,
2087 unsigned HOST_WIDE_INT,
2088 int));
2089static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2090 int, int,
2091 unsigned HOST_WIDE_INT *,
2092 int *, int *));
2093static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2094 int));
44037a66 2095/* Compute and return the best algorithm for multiplying by T.
7963ac37
RK
2096 The algorithm must cost less than cost_limit
2097 If retval.cost >= COST_LIMIT, no algorithm was found and all
2098 other field of the returned struct are undefined. */
44037a66 2099
819126a6
RK
2100static void
2101synth_mult (alg_out, t, cost_limit)
2102 struct algorithm *alg_out;
b1ec3c92 2103 unsigned HOST_WIDE_INT t;
7963ac37 2104 int cost_limit;
44037a66 2105{
b2fb324c 2106 int m;
52786026 2107 struct algorithm *alg_in, *best_alg;
c84e2712 2108 int cost;
b2fb324c 2109 unsigned HOST_WIDE_INT q;
44037a66 2110
7963ac37
RK
2111 /* Indicate that no algorithm is yet found. If no algorithm
2112 is found, this value will be returned and indicate failure. */
819126a6 2113 alg_out->cost = cost_limit;
44037a66 2114
b2fb324c 2115 if (cost_limit <= 0)
819126a6 2116 return;
44037a66 2117
b385aeda
RK
2118 /* t == 1 can be done in zero cost. */
2119 if (t == 1)
b2fb324c 2120 {
819126a6
RK
2121 alg_out->ops = 1;
2122 alg_out->cost = 0;
2123 alg_out->op[0] = alg_m;
2124 return;
b2fb324c
RK
2125 }
2126
b385aeda
RK
2127 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2128 fail now. */
819126a6 2129 if (t == 0)
b385aeda
RK
2130 {
2131 if (zero_cost >= cost_limit)
819126a6 2132 return;
b385aeda
RK
2133 else
2134 {
819126a6
RK
2135 alg_out->ops = 1;
2136 alg_out->cost = zero_cost;
2137 alg_out->op[0] = alg_zero;
2138 return;
b385aeda
RK
2139 }
2140 }
2141
52786026
RK
2142 /* We'll be needing a couple extra algorithm structures now. */
2143
2144 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2145 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2146
b385aeda
RK
2147 /* If we have a group of zero bits at the low-order part of T, try
2148 multiplying by the remaining bits and then doing a shift. */
2149
b2fb324c 2150 if ((t & 1) == 0)
44037a66 2151 {
b2fb324c 2152 m = floor_log2 (t & -t); /* m = number of low zero bits */
02a65aef 2153 if (m < BITS_PER_WORD)
44037a66 2154 {
02a65aef
R
2155 q = t >> m;
2156 cost = shift_cost[m];
2157 synth_mult (alg_in, q, cost_limit - cost);
2158
2159 cost += alg_in->cost;
2160 if (cost < cost_limit)
2161 {
2162 struct algorithm *x;
2163 x = alg_in, alg_in = best_alg, best_alg = x;
2164 best_alg->log[best_alg->ops] = m;
2165 best_alg->op[best_alg->ops] = alg_shift;
2166 cost_limit = cost;
2167 }
819126a6
RK
2168 }
2169 }
2170
2171 /* If we have an odd number, add or subtract one. */
2172 if ((t & 1) != 0)
2173 {
2174 unsigned HOST_WIDE_INT w;
2175
2176 for (w = 1; (w & t) != 0; w <<= 1)
2177 ;
31031edd 2178 /* If T was -1, then W will be zero after the loop. This is another
c410d49e 2179 case where T ends with ...111. Handling this with (T + 1) and
31031edd
JL
2180 subtract 1 produces slightly better code and results in algorithm
2181 selection much faster than treating it like the ...0111 case
2182 below. */
2183 if (w == 0
2184 || (w > 2
2185 /* Reject the case where t is 3.
2186 Thus we prefer addition in that case. */
2187 && t != 3))
819126a6
RK
2188 {
2189 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2190
2191 cost = add_cost;
2192 synth_mult (alg_in, t + 1, cost_limit - cost);
b2fb324c
RK
2193
2194 cost += alg_in->cost;
819126a6 2195 if (cost < cost_limit)
44037a66 2196 {
b2fb324c
RK
2197 struct algorithm *x;
2198 x = alg_in, alg_in = best_alg, best_alg = x;
819126a6
RK
2199 best_alg->log[best_alg->ops] = 0;
2200 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2201 cost_limit = cost;
44037a66 2202 }
44037a66 2203 }
819126a6
RK
2204 else
2205 {
2206 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
44037a66 2207
819126a6
RK
2208 cost = add_cost;
2209 synth_mult (alg_in, t - 1, cost_limit - cost);
2210
2211 cost += alg_in->cost;
2212 if (cost < cost_limit)
2213 {
2214 struct algorithm *x;
2215 x = alg_in, alg_in = best_alg, best_alg = x;
2216 best_alg->log[best_alg->ops] = 0;
2217 best_alg->op[best_alg->ops] = alg_add_t_m2;
2218 cost_limit = cost;
2219 }
2220 }
2221 }
63610db9 2222
44037a66 2223 /* Look for factors of t of the form
7963ac37 2224 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
44037a66 2225 If we find such a factor, we can multiply by t using an algorithm that
7963ac37 2226 multiplies by q, shift the result by m and add/subtract it to itself.
44037a66 2227
7963ac37
RK
2228 We search for large factors first and loop down, even if large factors
2229 are less probable than small; if we find a large factor we will find a
2230 good sequence quickly, and therefore be able to prune (by decreasing
2231 COST_LIMIT) the search. */
2232
2233 for (m = floor_log2 (t - 1); m >= 2; m--)
44037a66 2234 {
7963ac37 2235 unsigned HOST_WIDE_INT d;
44037a66 2236
7963ac37 2237 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
02a65aef 2238 if (t % d == 0 && t > d && m < BITS_PER_WORD)
44037a66 2239 {
b385aeda 2240 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
819126a6 2241 synth_mult (alg_in, t / d, cost_limit - cost);
44037a66 2242
7963ac37 2243 cost += alg_in->cost;
819126a6 2244 if (cost < cost_limit)
44037a66 2245 {
7963ac37
RK
2246 struct algorithm *x;
2247 x = alg_in, alg_in = best_alg, best_alg = x;
b385aeda 2248 best_alg->log[best_alg->ops] = m;
819126a6
RK
2249 best_alg->op[best_alg->ops] = alg_add_factor;
2250 cost_limit = cost;
44037a66 2251 }
c0b262c1
TG
2252 /* Other factors will have been taken care of in the recursion. */
2253 break;
44037a66
TG
2254 }
2255
7963ac37 2256 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
02a65aef 2257 if (t % d == 0 && t > d && m < BITS_PER_WORD)
44037a66 2258 {
b385aeda 2259 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
819126a6 2260 synth_mult (alg_in, t / d, cost_limit - cost);
44037a66 2261
7963ac37 2262 cost += alg_in->cost;
819126a6 2263 if (cost < cost_limit)
44037a66 2264 {
7963ac37
RK
2265 struct algorithm *x;
2266 x = alg_in, alg_in = best_alg, best_alg = x;
b385aeda 2267 best_alg->log[best_alg->ops] = m;
819126a6
RK
2268 best_alg->op[best_alg->ops] = alg_sub_factor;
2269 cost_limit = cost;
44037a66 2270 }
c0b262c1 2271 break;
44037a66
TG
2272 }
2273 }
2274
7963ac37
RK
2275 /* Try shift-and-add (load effective address) instructions,
2276 i.e. do a*3, a*5, a*9. */
2277 if ((t & 1) != 0)
2278 {
7963ac37
RK
2279 q = t - 1;
2280 q = q & -q;
2281 m = exact_log2 (q);
02a65aef 2282 if (m >= 0 && m < BITS_PER_WORD)
b385aeda 2283 {
5eebe2eb 2284 cost = shiftadd_cost[m];
819126a6 2285 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
5eebe2eb
RK
2286
2287 cost += alg_in->cost;
819126a6 2288 if (cost < cost_limit)
5eebe2eb
RK
2289 {
2290 struct algorithm *x;
2291 x = alg_in, alg_in = best_alg, best_alg = x;
2292 best_alg->log[best_alg->ops] = m;
819126a6
RK
2293 best_alg->op[best_alg->ops] = alg_add_t2_m;
2294 cost_limit = cost;
5eebe2eb 2295 }
7963ac37 2296 }
44037a66 2297
7963ac37
RK
2298 q = t + 1;
2299 q = q & -q;
2300 m = exact_log2 (q);
02a65aef 2301 if (m >= 0 && m < BITS_PER_WORD)
b385aeda 2302 {
5eebe2eb 2303 cost = shiftsub_cost[m];
819126a6 2304 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
5eebe2eb
RK
2305
2306 cost += alg_in->cost;
819126a6 2307 if (cost < cost_limit)
5eebe2eb
RK
2308 {
2309 struct algorithm *x;
2310 x = alg_in, alg_in = best_alg, best_alg = x;
2311 best_alg->log[best_alg->ops] = m;
819126a6
RK
2312 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2313 cost_limit = cost;
5eebe2eb 2314 }
7963ac37
RK
2315 }
2316 }
44037a66 2317
819126a6
RK
2318 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2319 we have not found any algorithm. */
2320 if (cost_limit == alg_out->cost)
2321 return;
2322
52786026
RK
2323 /* If we are getting a too long sequence for `struct algorithm'
2324 to record, make this search fail. */
2325 if (best_alg->ops == MAX_BITS_PER_WORD)
2326 return;
2327
819126a6
RK
2328 /* Copy the algorithm from temporary space to the space at alg_out.
2329 We avoid using structure assignment because the majority of
2330 best_alg is normally undefined, and this is a critical function. */
2331 alg_out->ops = best_alg->ops + 1;
2332 alg_out->cost = cost_limit;
4e135bdd
KG
2333 memcpy (alg_out->op, best_alg->op,
2334 alg_out->ops * sizeof *alg_out->op);
2335 memcpy (alg_out->log, best_alg->log,
2336 alg_out->ops * sizeof *alg_out->log);
44037a66
TG
2337}
2338\f
2339/* Perform a multiplication and return an rtx for the result.
2340 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2341 TARGET is a suggestion for where to store the result (an rtx).
2342
2343 We check specially for a constant integer as OP1.
2344 If you want this check for OP0 as well, then before calling
2345 you should swap the two operands if OP0 would be constant. */
2346
2347rtx
2348expand_mult (mode, op0, op1, target, unsignedp)
2349 enum machine_mode mode;
b3694847 2350 rtx op0, op1, target;
44037a66
TG
2351 int unsignedp;
2352{
2353 rtx const_op1 = op1;
2354
ceb1d268
JW
2355 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2356 less than or equal in size to `unsigned int' this doesn't matter.
2357 If the mode is larger than `unsigned int', then synth_mult works only
2358 if the constant value exactly fits in an `unsigned int' without any
2359 truncation. This means that multiplying by negative values does
2360 not work; results are off by 2^32 on a 32 bit machine. */
2361
44037a66
TG
2362 /* If we are multiplying in DImode, it may still be a win
2363 to try to work with shifts and adds. */
2364 if (GET_CODE (op1) == CONST_DOUBLE
2365 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
ceb1d268
JW
2366 && HOST_BITS_PER_INT >= BITS_PER_WORD
2367 && CONST_DOUBLE_HIGH (op1) == 0)
2368 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2369 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2370 && GET_CODE (op1) == CONST_INT
2371 && INTVAL (op1) < 0)
2372 const_op1 = 0;
44037a66 2373
66c1f88e
RS
2374 /* We used to test optimize here, on the grounds that it's better to
2375 produce a smaller program when -O is not used.
2376 But this causes such a terrible slowdown sometimes
2377 that it seems better to use synth_mult always. */
b385aeda 2378
91ce572a
CC
2379 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2380 && (unsignedp || ! flag_trapv))
44037a66
TG
2381 {
2382 struct algorithm alg;
55c2d311 2383 struct algorithm alg2;
7963ac37 2384 HOST_WIDE_INT val = INTVAL (op1);
b385aeda
RK
2385 HOST_WIDE_INT val_so_far;
2386 rtx insn;
819126a6 2387 int mult_cost;
55c2d311 2388 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
44037a66 2389
b972dd02
JH
2390 /* op0 must be register to make mult_cost match the precomputed
2391 shiftadd_cost array. */
2392 op0 = force_reg (mode, op0);
2393
55c2d311
TG
2394 /* Try to do the computation three ways: multiply by the negative of OP1
2395 and then negate, do the multiplication directly, or do multiplication
2396 by OP1 - 1. */
44037a66 2397
38a448ca 2398 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
c0b262c1 2399 mult_cost = MIN (12 * add_cost, mult_cost);
819126a6
RK
2400
2401 synth_mult (&alg, val, mult_cost);
ceb1d268
JW
2402
2403 /* This works only if the inverted value actually fits in an
2404 `unsigned int' */
2405 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2406 {
2407 synth_mult (&alg2, - val,
2408 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2409 if (alg2.cost + negate_cost < alg.cost)
2410 alg = alg2, variant = negate_variant;
2411 }
44037a66 2412
55c2d311 2413 /* This proves very useful for division-by-constant. */
98310eaa
RK
2414 synth_mult (&alg2, val - 1,
2415 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
55c2d311
TG
2416 if (alg2.cost + add_cost < alg.cost)
2417 alg = alg2, variant = add_variant;
44037a66 2418
7963ac37 2419 if (alg.cost < mult_cost)
44037a66 2420 {
b2fb324c 2421 /* We found something cheaper than a multiply insn. */
7963ac37 2422 int opno;
44037a66 2423 rtx accum, tem;
23cb454a 2424 enum machine_mode nmode;
44037a66
TG
2425
2426 op0 = protect_from_queue (op0, 0);
2427
2428 /* Avoid referencing memory over and over.
2429 For speed, but also for correctness when mem is volatile. */
2430 if (GET_CODE (op0) == MEM)
2431 op0 = force_reg (mode, op0);
2432
b385aeda
RK
2433 /* ACCUM starts out either as OP0 or as a zero, depending on
2434 the first operation. */
2435
2436 if (alg.op[0] == alg_zero)
44037a66 2437 {
b385aeda
RK
2438 accum = copy_to_mode_reg (mode, const0_rtx);
2439 val_so_far = 0;
2440 }
2441 else if (alg.op[0] == alg_m)
2442 {
819126a6 2443 accum = copy_to_mode_reg (mode, op0);
b385aeda 2444 val_so_far = 1;
44037a66 2445 }
b385aeda
RK
2446 else
2447 abort ();
7963ac37
RK
2448
2449 for (opno = 1; opno < alg.ops; opno++)
44037a66 2450 {
b385aeda 2451 int log = alg.log[opno];
c0a08574
RK
2452 int preserve = preserve_subexpressions_p ();
2453 rtx shift_subtarget = preserve ? 0 : accum;
98310eaa
RK
2454 rtx add_target
2455 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
9ec36da5
JL
2456 && ! preserve)
2457 ? target : 0;
c0a08574 2458 rtx accum_target = preserve ? 0 : accum;
c410d49e 2459
44037a66
TG
2460 switch (alg.op[opno])
2461 {
b2fb324c
RK
2462 case alg_shift:
2463 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2464 build_int_2 (log, 0), NULL_RTX, 0);
b385aeda 2465 val_so_far <<= log;
b2fb324c
RK
2466 break;
2467
7963ac37 2468 case alg_add_t_m2:
b385aeda
RK
2469 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2470 build_int_2 (log, 0), NULL_RTX, 0);
38a448ca 2471 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
c5c76735
JL
2472 add_target
2473 ? add_target : accum_target);
b385aeda 2474 val_so_far += (HOST_WIDE_INT) 1 << log;
44037a66
TG
2475 break;
2476
7963ac37 2477 case alg_sub_t_m2:
b385aeda
RK
2478 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2479 build_int_2 (log, 0), NULL_RTX, 0);
38a448ca 2480 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
c5c76735
JL
2481 add_target
2482 ? add_target : accum_target);
b385aeda 2483 val_so_far -= (HOST_WIDE_INT) 1 << log;
7963ac37 2484 break;
44037a66 2485
7963ac37
RK
2486 case alg_add_t2_m:
2487 accum = expand_shift (LSHIFT_EXPR, mode, accum,
c0a08574
RK
2488 build_int_2 (log, 0), shift_subtarget,
2489 0);
38a448ca 2490 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
c5c76735
JL
2491 add_target
2492 ? add_target : accum_target);
b385aeda 2493 val_so_far = (val_so_far << log) + 1;
44037a66
TG
2494 break;
2495
7963ac37
RK
2496 case alg_sub_t2_m:
2497 accum = expand_shift (LSHIFT_EXPR, mode, accum,
c0a08574
RK
2498 build_int_2 (log, 0), shift_subtarget,
2499 0);
38a448ca 2500 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
c5c76735
JL
2501 add_target
2502 ? add_target : accum_target);
b385aeda 2503 val_so_far = (val_so_far << log) - 1;
7963ac37
RK
2504 break;
2505
2506 case alg_add_factor:
44037a66 2507 tem = expand_shift (LSHIFT_EXPR, mode, accum,
b1ec3c92 2508 build_int_2 (log, 0), NULL_RTX, 0);
38a448ca 2509 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
c5c76735
JL
2510 add_target
2511 ? add_target : accum_target);
b385aeda 2512 val_so_far += val_so_far << log;
7963ac37 2513 break;
44037a66 2514
7963ac37
RK
2515 case alg_sub_factor:
2516 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2517 build_int_2 (log, 0), NULL_RTX, 0);
38a448ca 2518 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
c0a08574
RK
2519 (add_target ? add_target
2520 : preserve ? 0 : tem));
b385aeda 2521 val_so_far = (val_so_far << log) - val_so_far;
7963ac37 2522 break;
44037a66 2523
b385aeda 2524 default:
6d649d26 2525 abort ();
b385aeda 2526 }
44037a66 2527
b385aeda 2528 /* Write a REG_EQUAL note on the last insn so that we can cse
23cb454a
RK
2529 multiplication sequences. Note that if ACCUM is a SUBREG,
2530 we've set the inner register and must properly indicate
2531 that. */
2532
2533 tem = op0, nmode = mode;
2534 if (GET_CODE (accum) == SUBREG)
2535 {
2536 nmode = GET_MODE (SUBREG_REG (accum));
2537 tem = gen_lowpart (nmode, op0);
2538 }
44037a66 2539
b385aeda 2540 insn = get_last_insn ();
c410d49e 2541 set_unique_reg_note (insn,
7e5bda2c 2542 REG_EQUAL,
23cb454a 2543 gen_rtx_MULT (nmode, tem,
7e5bda2c 2544 GEN_INT (val_so_far)));
b385aeda 2545 }
44037a66 2546
55c2d311 2547 if (variant == negate_variant)
44037a66 2548 {
b385aeda
RK
2549 val_so_far = - val_so_far;
2550 accum = expand_unop (mode, neg_optab, accum, target, 0);
44037a66 2551 }
55c2d311
TG
2552 else if (variant == add_variant)
2553 {
2554 val_so_far = val_so_far + 1;
38a448ca 2555 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
55c2d311 2556 }
44037a66 2557
b385aeda
RK
2558 if (val != val_so_far)
2559 abort ();
2560
2561 return accum;
44037a66
TG
2562 }
2563 }
2564
819126a6
RK
2565 /* This used to use umul_optab if unsigned, but for non-widening multiply
2566 there is no difference between signed and unsigned. */
c410d49e 2567 op0 = expand_binop (mode,
91ce572a
CC
2568 ! unsignedp
2569 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2570 ? smulv_optab : smul_optab,
44037a66
TG
2571 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2572 if (op0 == 0)
2573 abort ();
2574 return op0;
2575}
2576\f
55c2d311
TG
2577/* Return the smallest n such that 2**n >= X. */
2578
2579int
2580ceil_log2 (x)
2581 unsigned HOST_WIDE_INT x;
2582{
2583 return floor_log2 (x - 1) + 1;
2584}
2585
2586/* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2587 replace division by D, and put the least significant N bits of the result
2588 in *MULTIPLIER_PTR and return the most significant bit.
2589
2590 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2591 needed precision is in PRECISION (should be <= N).
2592
2593 PRECISION should be as small as possible so this function can choose
2594 multiplier more freely.
2595
2596 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2597 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2598
2599 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2600 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2601
2602static
2603unsigned HOST_WIDE_INT
2604choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2605 unsigned HOST_WIDE_INT d;
2606 int n;
2607 int precision;
2608 unsigned HOST_WIDE_INT *multiplier_ptr;
2609 int *post_shift_ptr;
2610 int *lgup_ptr;
2611{
f9e158c3
JM
2612 HOST_WIDE_INT mhigh_hi, mlow_hi;
2613 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
55c2d311
TG
2614 int lgup, post_shift;
2615 int pow, pow2;
f9e158c3
JM
2616 unsigned HOST_WIDE_INT nl, dummy1;
2617 HOST_WIDE_INT nh, dummy2;
55c2d311
TG
2618
2619 /* lgup = ceil(log2(divisor)); */
2620 lgup = ceil_log2 (d);
2621
2622 if (lgup > n)
2623 abort ();
2624
2625 pow = n + lgup;
2626 pow2 = n + lgup - precision;
2627
2628 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2629 {
2630 /* We could handle this with some effort, but this case is much better
2631 handled directly with a scc insn, so rely on caller using that. */
2632 abort ();
2633 }
2634
2635 /* mlow = 2^(N + lgup)/d */
2636 if (pow >= HOST_BITS_PER_WIDE_INT)
2637 {
f9e158c3 2638 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
55c2d311
TG
2639 nl = 0;
2640 }
2641 else
2642 {
2643 nh = 0;
2644 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2645 }
2646 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2647 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2648
2649 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2650 if (pow2 >= HOST_BITS_PER_WIDE_INT)
f9e158c3 2651 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
55c2d311
TG
2652 else
2653 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2654 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2655 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2656
2657 if (mhigh_hi && nh - d >= d)
2658 abort ();
2659 if (mhigh_hi > 1 || mlow_hi > 1)
2660 abort ();
2661 /* assert that mlow < mhigh. */
2662 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
c4f2c499 2663 abort ();
55c2d311
TG
2664
2665 /* If precision == N, then mlow, mhigh exceed 2^N
2666 (but they do not exceed 2^(N+1)). */
2667
2668 /* Reduce to lowest terms */
2669 for (post_shift = lgup; post_shift > 0; post_shift--)
2670 {
2671 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2672 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2673 if (ml_lo >= mh_lo)
2674 break;
2675
2676 mlow_hi = 0;
2677 mlow_lo = ml_lo;
2678 mhigh_hi = 0;
2679 mhigh_lo = mh_lo;
2680 }
2681
2682 *post_shift_ptr = post_shift;
2683 *lgup_ptr = lgup;
2684 if (n < HOST_BITS_PER_WIDE_INT)
2685 {
2686 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2687 *multiplier_ptr = mhigh_lo & mask;
2688 return mhigh_lo >= mask;
2689 }
2690 else
2691 {
2692 *multiplier_ptr = mhigh_lo;
2693 return mhigh_hi;
2694 }
2695}
2696
2697/* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2698 congruent to 1 (mod 2**N). */
2699
2700static unsigned HOST_WIDE_INT
2701invert_mod2n (x, n)
2702 unsigned HOST_WIDE_INT x;
2703 int n;
2704{
0f41302f 2705 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
55c2d311
TG
2706
2707 /* The algorithm notes that the choice y = x satisfies
2708 x*y == 1 mod 2^3, since x is assumed odd.
2709 Each iteration doubles the number of bits of significance in y. */
2710
2711 unsigned HOST_WIDE_INT mask;
2712 unsigned HOST_WIDE_INT y = x;
2713 int nbit = 3;
2714
2715 mask = (n == HOST_BITS_PER_WIDE_INT
2716 ? ~(unsigned HOST_WIDE_INT) 0
2717 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2718
2719 while (nbit < n)
2720 {
2721 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2722 nbit *= 2;
2723 }
2724 return y;
2725}
2726
2727/* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2728 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2729 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2730 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2731 become signed.
2732
2733 The result is put in TARGET if that is convenient.
2734
2735 MODE is the mode of operation. */
2736
2737rtx
2738expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2739 enum machine_mode mode;
b3694847 2740 rtx adj_operand, op0, op1, target;
55c2d311
TG
2741 int unsignedp;
2742{
2743 rtx tem;
2744 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2745
2746 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2747 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2748 NULL_RTX, 0);
22273300 2749 tem = expand_and (mode, tem, op1, NULL_RTX);
38a448ca
RH
2750 adj_operand
2751 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2752 adj_operand);
55c2d311
TG
2753
2754 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2755 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2756 NULL_RTX, 0);
22273300 2757 tem = expand_and (mode, tem, op0, NULL_RTX);
38a448ca
RH
2758 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2759 target);
55c2d311
TG
2760
2761 return target;
2762}
2763
2764/* Emit code to multiply OP0 and CNST1, putting the high half of the result
2765 in TARGET if that is convenient, and return where the result is. If the
2766 operation can not be performed, 0 is returned.
2767
2768 MODE is the mode of operation and result.
2769
71af73bb
TG
2770 UNSIGNEDP nonzero means unsigned multiply.
2771
2772 MAX_COST is the total allowed cost for the expanded RTL. */
55c2d311
TG
2773
2774rtx
71af73bb 2775expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
55c2d311 2776 enum machine_mode mode;
b3694847 2777 rtx op0, target;
55c2d311
TG
2778 unsigned HOST_WIDE_INT cnst1;
2779 int unsignedp;
71af73bb 2780 int max_cost;
55c2d311
TG
2781{
2782 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2783 optab mul_highpart_optab;
2784 optab moptab;
2785 rtx tem;
2786 int size = GET_MODE_BITSIZE (mode);
5b0ce758 2787 rtx op1, wide_op1;
55c2d311 2788
0865c631 2789 /* We can't support modes wider than HOST_BITS_PER_INT. */
5b0ce758
RK
2790 if (size > HOST_BITS_PER_WIDE_INT)
2791 abort ();
2792
2496c7bd 2793 op1 = gen_int_mode (cnst1, mode);
5b0ce758 2794
d3c52658
JJ
2795 wide_op1
2796 = immed_double_const (cnst1,
2797 (unsignedp
2798 ? (HOST_WIDE_INT) 0
2799 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2800 wider_mode);
55c2d311
TG
2801
2802 /* expand_mult handles constant multiplication of word_mode
2803 or narrower. It does a poor job for large modes. */
71af73bb
TG
2804 if (size < BITS_PER_WORD
2805 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
55c2d311
TG
2806 {
2807 /* We have to do this, since expand_binop doesn't do conversion for
2808 multiply. Maybe change expand_binop to handle widening multiply? */
2809 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2810
91ce572a
CC
2811 /* We know that this can't have signed overflow, so pretend this is
2812 an unsigned multiply. */
2813 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
55c2d311
TG
2814 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2815 build_int_2 (size, 0), NULL_RTX, 1);
2f97afcb 2816 return convert_modes (mode, wider_mode, tem, unsignedp);
55c2d311
TG
2817 }
2818
2819 if (target == 0)
2820 target = gen_reg_rtx (mode);
2821
2822 /* Firstly, try using a multiplication insn that only generates the needed
2823 high part of the product, and in the sign flavor of unsignedp. */
71af73bb
TG
2824 if (mul_highpart_cost[(int) mode] < max_cost)
2825 {
2826 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2827 target = expand_binop (mode, mul_highpart_optab,
3b5286d7 2828 op0, op1, target, unsignedp, OPTAB_DIRECT);
71af73bb
TG
2829 if (target)
2830 return target;
2831 }
55c2d311
TG
2832
2833 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2834 Need to adjust the result after the multiplication. */
02a65aef
R
2835 if (size - 1 < BITS_PER_WORD
2836 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2837 < max_cost))
71af73bb
TG
2838 {
2839 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2840 target = expand_binop (mode, mul_highpart_optab,
3b5286d7 2841 op0, op1, target, unsignedp, OPTAB_DIRECT);
71af73bb
TG
2842 if (target)
2843 /* We used the wrong signedness. Adjust the result. */
2844 return expand_mult_highpart_adjust (mode, target, op0,
2845 op1, target, unsignedp);
2846 }
55c2d311 2847
71af73bb 2848 /* Try widening multiplication. */
55c2d311 2849 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
71af73bb
TG
2850 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2851 && mul_widen_cost[(int) wider_mode] < max_cost)
a295d331
TG
2852 {
2853 op1 = force_reg (mode, op1);
2854 goto try;
c410d49e 2855 }
71af73bb
TG
2856
2857 /* Try widening the mode and perform a non-widening multiplication. */
2858 moptab = smul_optab;
2859 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
02a65aef 2860 && size - 1 < BITS_PER_WORD
71af73bb 2861 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
a295d331
TG
2862 {
2863 op1 = wide_op1;
2864 goto try;
2865 }
71af73bb
TG
2866
2867 /* Try widening multiplication of opposite signedness, and adjust. */
2868 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2869 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
02a65aef 2870 && size - 1 < BITS_PER_WORD
71af73bb
TG
2871 && (mul_widen_cost[(int) wider_mode]
2872 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
55c2d311 2873 {
a295d331
TG
2874 rtx regop1 = force_reg (mode, op1);
2875 tem = expand_binop (wider_mode, moptab, op0, regop1,
71af73bb
TG
2876 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2877 if (tem != 0)
55c2d311 2878 {
71af73bb
TG
2879 /* Extract the high half of the just generated product. */
2880 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2881 build_int_2 (size, 0), NULL_RTX, 1);
2882 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2883 /* We used the wrong signedness. Adjust the result. */
2884 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2885 target, unsignedp);
55c2d311 2886 }
55c2d311
TG
2887 }
2888
71af73bb
TG
2889 return 0;
2890
2891 try:
55c2d311 2892 /* Pass NULL_RTX as target since TARGET has wrong mode. */
a295d331 2893 tem = expand_binop (wider_mode, moptab, op0, op1,
55c2d311
TG
2894 NULL_RTX, unsignedp, OPTAB_WIDEN);
2895 if (tem == 0)
2896 return 0;
2897
2898 /* Extract the high half of the just generated product. */
a295d331
TG
2899 if (mode == word_mode)
2900 {
2901 return gen_highpart (mode, tem);
2902 }
2903 else
2904 {
2905 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2906 build_int_2 (size, 0), NULL_RTX, 1);
2907 return convert_modes (mode, wider_mode, tem, unsignedp);
2908 }
55c2d311
TG
2909}
2910\f
44037a66
TG
2911/* Emit the code to divide OP0 by OP1, putting the result in TARGET
2912 if that is convenient, and returning where the result is.
2913 You may request either the quotient or the remainder as the result;
2914 specify REM_FLAG nonzero to get the remainder.
2915
2916 CODE is the expression code for which kind of division this is;
2917 it controls how rounding is done. MODE is the machine mode to use.
2918 UNSIGNEDP nonzero means do unsigned division. */
2919
2920/* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2921 and then correct it by or'ing in missing high bits
2922 if result of ANDI is nonzero.
2923 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2924 This could optimize to a bfexts instruction.
2925 But C doesn't use these operations, so their optimizations are
2926 left for later. */
5353610b
R
2927/* ??? For modulo, we don't actually need the highpart of the first product,
2928 the low part will do nicely. And for small divisors, the second multiply
2929 can also be a low-part only multiply or even be completely left out.
2930 E.g. to calculate the remainder of a division by 3 with a 32 bit
2931 multiply, multiply with 0x55555556 and extract the upper two bits;
2932 the result is exact for inputs up to 0x1fffffff.
2933 The input range can be reduced by using cross-sum rules.
2934 For odd divisors >= 3, the following table gives right shift counts
2935 so that if an number is shifted by an integer multiple of the given
2936 amount, the remainder stays the same:
2937 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2938 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2939 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2940 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2941 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2942
2943 Cross-sum rules for even numbers can be derived by leaving as many bits
2944 to the right alone as the divisor has zeros to the right.
2945 E.g. if x is an unsigned 32 bit number:
2946 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2947 */
44037a66 2948
55c2d311
TG
2949#define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2950
44037a66
TG
2951rtx
2952expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2953 int rem_flag;
2954 enum tree_code code;
2955 enum machine_mode mode;
b3694847 2956 rtx op0, op1, target;
44037a66
TG
2957 int unsignedp;
2958{
44037a66 2959 enum machine_mode compute_mode;
b3694847 2960 rtx tquotient;
55c2d311
TG
2961 rtx quotient = 0, remainder = 0;
2962 rtx last;
2c414fba 2963 int size;
4e430df8 2964 rtx insn, set;
44037a66 2965 optab optab1, optab2;
55c2d311 2966 int op1_is_constant, op1_is_pow2;
71af73bb 2967 int max_cost, extra_cost;
9ec36da5 2968 static HOST_WIDE_INT last_div_const = 0;
55c2d311
TG
2969
2970 op1_is_constant = GET_CODE (op1) == CONST_INT;
9176af2f
TG
2971 op1_is_pow2 = (op1_is_constant
2972 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
db7cafb0 2973 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
55c2d311
TG
2974
2975 /*
2976 This is the structure of expand_divmod:
2977
2978 First comes code to fix up the operands so we can perform the operations
2979 correctly and efficiently.
2980
2981 Second comes a switch statement with code specific for each rounding mode.
2982 For some special operands this code emits all RTL for the desired
69f61901 2983 operation, for other cases, it generates only a quotient and stores it in
55c2d311
TG
2984 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2985 to indicate that it has not done anything.
2986
69f61901
RK
2987 Last comes code that finishes the operation. If QUOTIENT is set and
2988 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2989 QUOTIENT is not set, it is computed using trunc rounding.
44037a66 2990
55c2d311
TG
2991 We try to generate special code for division and remainder when OP1 is a
2992 constant. If |OP1| = 2**n we can use shifts and some other fast
2993 operations. For other values of OP1, we compute a carefully selected
2994 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2995 by m.
2996
2997 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2998 half of the product. Different strategies for generating the product are
2999 implemented in expand_mult_highpart.
3000
3001 If what we actually want is the remainder, we generate that by another
3002 by-constant multiplication and a subtraction. */
3003
3004 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3d32ffd1
TW
3005 code below will malfunction if we are, so check here and handle
3006 the special case if so. */
3007 if (op1 == const1_rtx)
3008 return rem_flag ? const0_rtx : op0;
3009
91ce572a
CC
3010 /* When dividing by -1, we could get an overflow.
3011 negv_optab can handle overflows. */
3012 if (! unsignedp && op1 == constm1_rtx)
3013 {
3014 if (rem_flag)
3015 return const0_rtx;
3016 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3017 ? negv_optab : neg_optab, op0, target, 0);
3018 }
3019
bc1c7e93
RK
3020 if (target
3021 /* Don't use the function value register as a target
3022 since we have to read it as well as write it,
3023 and function-inlining gets confused by this. */
3024 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3025 /* Don't clobber an operand while doing a multi-step calculation. */
515dfc7a 3026 || ((rem_flag || op1_is_constant)
bc1c7e93
RK
3027 && (reg_mentioned_p (target, op0)
3028 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3029 || reg_mentioned_p (target, op1)
3030 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
44037a66
TG
3031 target = 0;
3032
44037a66
TG
3033 /* Get the mode in which to perform this computation. Normally it will
3034 be MODE, but sometimes we can't do the desired operation in MODE.
3035 If so, pick a wider mode in which we can do the operation. Convert
3036 to that mode at the start to avoid repeated conversions.
3037
3038 First see what operations we need. These depend on the expression
3039 we are evaluating. (We assume that divxx3 insns exist under the
3040 same conditions that modxx3 insns and that these insns don't normally
3041 fail. If these assumptions are not correct, we may generate less
3042 efficient code in some cases.)
3043
3044 Then see if we find a mode in which we can open-code that operation
3045 (either a division, modulus, or shift). Finally, check for the smallest
3046 mode for which we can do the operation with a library call. */
3047
55c2d311
TG
3048 /* We might want to refine this now that we have division-by-constant
3049 optimization. Since expand_mult_highpart tries so many variants, it is
3050 not straightforward to generalize this. Maybe we should make an array
3051 of possible modes in init_expmed? Save this for GCC 2.7. */
3052
3053 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
44037a66 3054 : (unsignedp ? udiv_optab : sdiv_optab));
55c2d311 3055 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
44037a66
TG
3056
3057 for (compute_mode = mode; compute_mode != VOIDmode;
3058 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3059 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3060 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3061 break;
3062
3063 if (compute_mode == VOIDmode)
3064 for (compute_mode = mode; compute_mode != VOIDmode;
3065 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3066 if (optab1->handlers[(int) compute_mode].libfunc
3067 || optab2->handlers[(int) compute_mode].libfunc)
3068 break;
3069
bc1c7e93
RK
3070 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3071 in expand_binop. */
44037a66
TG
3072 if (compute_mode == VOIDmode)
3073 compute_mode = mode;
3074
55c2d311
TG
3075 if (target && GET_MODE (target) == compute_mode)
3076 tquotient = target;
3077 else
3078 tquotient = gen_reg_rtx (compute_mode);
2c414fba 3079
55c2d311
TG
3080 size = GET_MODE_BITSIZE (compute_mode);
3081#if 0
3082 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
71af73bb
TG
3083 (mode), and thereby get better code when OP1 is a constant. Do that
3084 later. It will require going over all usages of SIZE below. */
55c2d311
TG
3085 size = GET_MODE_BITSIZE (mode);
3086#endif
bc1c7e93 3087
9ec36da5
JL
3088 /* Only deduct something for a REM if the last divide done was
3089 for a different constant. Then set the constant of the last
3090 divide. */
71af73bb 3091 max_cost = div_cost[(int) compute_mode]
9ec36da5
JL
3092 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3093 && INTVAL (op1) == last_div_const)
3094 ? mul_cost[(int) compute_mode] + add_cost : 0);
3095
3096 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
71af73bb 3097
55c2d311 3098 /* Now convert to the best mode to use. */
44037a66
TG
3099 if (compute_mode != mode)
3100 {
55c2d311 3101 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
81722fa9 3102 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
e13a25d5 3103
e9a25f70
JL
3104 /* convert_modes may have placed op1 into a register, so we
3105 must recompute the following. */
e13a25d5
DM
3106 op1_is_constant = GET_CODE (op1) == CONST_INT;
3107 op1_is_pow2 = (op1_is_constant
3108 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3109 || (! unsignedp
e9a25f70 3110 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
44037a66
TG
3111 }
3112
55c2d311 3113 /* If one of the operands is a volatile MEM, copy it into a register. */
c2a47e48 3114
55c2d311
TG
3115 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3116 op0 = force_reg (compute_mode, op0);
3117 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
c2a47e48
RK
3118 op1 = force_reg (compute_mode, op1);
3119
ab0b6581
TG
3120 /* If we need the remainder or if OP1 is constant, we need to
3121 put OP0 in a register in case it has any queued subexpressions. */
3122 if (rem_flag || op1_is_constant)
3123 op0 = force_reg (compute_mode, op0);
bc1c7e93 3124
55c2d311 3125 last = get_last_insn ();
44037a66 3126
9faa82d8 3127 /* Promote floor rounding to trunc rounding for unsigned operations. */
55c2d311 3128 if (unsignedp)
44037a66 3129 {
55c2d311
TG
3130 if (code == FLOOR_DIV_EXPR)
3131 code = TRUNC_DIV_EXPR;
3132 if (code == FLOOR_MOD_EXPR)
3133 code = TRUNC_MOD_EXPR;
db7cafb0
JL
3134 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3135 code = TRUNC_DIV_EXPR;
55c2d311 3136 }
bc1c7e93 3137
55c2d311
TG
3138 if (op1 != const0_rtx)
3139 switch (code)
3140 {
3141 case TRUNC_MOD_EXPR:
3142 case TRUNC_DIV_EXPR:
34f016ed 3143 if (op1_is_constant)
55c2d311 3144 {
d8f1376c 3145 if (unsignedp)
55c2d311
TG
3146 {
3147 unsigned HOST_WIDE_INT mh, ml;
3148 int pre_shift, post_shift;
3149 int dummy;
3150 unsigned HOST_WIDE_INT d = INTVAL (op1);
3151
3152 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3153 {
3154 pre_shift = floor_log2 (d);
3155 if (rem_flag)
3156 {
db3cf6fb
MS
3157 remainder
3158 = expand_binop (compute_mode, and_optab, op0,
3159 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3160 remainder, 1,
3161 OPTAB_LIB_WIDEN);
55c2d311 3162 if (remainder)
c8dbc8ca 3163 return gen_lowpart (mode, remainder);
55c2d311
TG
3164 }
3165 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3166 build_int_2 (pre_shift, 0),
3167 tquotient, 1);
3168 }
34f016ed 3169 else if (size <= HOST_BITS_PER_WIDE_INT)
55c2d311 3170 {
dc1d6150 3171 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
55c2d311 3172 {
dc1d6150
TG
3173 /* Most significant bit of divisor is set; emit an scc
3174 insn. */
3175 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3176 compute_mode, 1, 1);
3177 if (quotient == 0)
55c2d311 3178 goto fail1;
55c2d311
TG
3179 }
3180 else
3181 {
dc1d6150
TG
3182 /* Find a suitable multiplier and right shift count
3183 instead of multiplying with D. */
3184
3185 mh = choose_multiplier (d, size, size,
3186 &ml, &post_shift, &dummy);
3187
3188 /* If the suggested multiplier is more than SIZE bits,
3189 we can do better for even divisors, using an
3190 initial right shift. */
3191 if (mh != 0 && (d & 1) == 0)
3192 {
3193 pre_shift = floor_log2 (d & -d);
3194 mh = choose_multiplier (d >> pre_shift, size,
3195 size - pre_shift,
3196 &ml, &post_shift, &dummy);
3197 if (mh)
3198 abort ();
3199 }
3200 else
3201 pre_shift = 0;
3202
3203 if (mh != 0)
3204 {
3205 rtx t1, t2, t3, t4;
3206
02a65aef
R
3207 if (post_shift - 1 >= BITS_PER_WORD)
3208 goto fail1;
3209
dc1d6150
TG
3210 extra_cost = (shift_cost[post_shift - 1]
3211 + shift_cost[1] + 2 * add_cost);
3212 t1 = expand_mult_highpart (compute_mode, op0, ml,
3213 NULL_RTX, 1,
3214 max_cost - extra_cost);
3215 if (t1 == 0)
3216 goto fail1;
38a448ca
RH
3217 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3218 op0, t1),
dc1d6150
TG
3219 NULL_RTX);
3220 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3221 build_int_2 (1, 0), NULL_RTX,1);
38a448ca
RH
3222 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3223 t1, t3),
dc1d6150 3224 NULL_RTX);
db3cf6fb
MS
3225 quotient
3226 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3227 build_int_2 (post_shift - 1, 0),
3228 tquotient, 1);
dc1d6150
TG
3229 }
3230 else
3231 {
3232 rtx t1, t2;
3233
02a65aef
R
3234 if (pre_shift >= BITS_PER_WORD
3235 || post_shift >= BITS_PER_WORD)
3236 goto fail1;
3237
dc1d6150
TG
3238 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3239 build_int_2 (pre_shift, 0),
3240 NULL_RTX, 1);
3241 extra_cost = (shift_cost[pre_shift]
3242 + shift_cost[post_shift]);
3243 t2 = expand_mult_highpart (compute_mode, t1, ml,
3244 NULL_RTX, 1,
3245 max_cost - extra_cost);
3246 if (t2 == 0)
3247 goto fail1;
db3cf6fb
MS
3248 quotient
3249 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3250 build_int_2 (post_shift, 0),
3251 tquotient, 1);
dc1d6150 3252 }
55c2d311
TG
3253 }
3254 }
34f016ed
TG
3255 else /* Too wide mode to use tricky code */
3256 break;
55c2d311
TG
3257
3258 insn = get_last_insn ();
4e430df8
RK
3259 if (insn != last
3260 && (set = single_set (insn)) != 0
3261 && SET_DEST (set) == quotient)
c410d49e 3262 set_unique_reg_note (insn,
7e5bda2c
AM
3263 REG_EQUAL,
3264 gen_rtx_UDIV (compute_mode, op0, op1));
55c2d311
TG
3265 }
3266 else /* TRUNC_DIV, signed */
3267 {
3268 unsigned HOST_WIDE_INT ml;
3269 int lgup, post_shift;
3270 HOST_WIDE_INT d = INTVAL (op1);
3271 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3272
3273 /* n rem d = n rem -d */
3274 if (rem_flag && d < 0)
3275 {
3276 d = abs_d;
2496c7bd 3277 op1 = gen_int_mode (abs_d, compute_mode);
55c2d311
TG
3278 }
3279
3280 if (d == 1)
3281 quotient = op0;
3282 else if (d == -1)
3283 quotient = expand_unop (compute_mode, neg_optab, op0,
3284 tquotient, 0);
f737b132
RK
3285 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3286 {
3287 /* This case is not handled correctly below. */
3288 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3289 compute_mode, 1, 1);
3290 if (quotient == 0)
3291 goto fail1;
3292 }
55c2d311 3293 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
a8c7e72d
RH
3294 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap)
3295 /* ??? The cheap metric is computed only for
3296 word_mode. If this operation is wider, this may
3297 not be so. Assume true if the optab has an
3298 expander for this mode. */
3299 && (((rem_flag ? smod_optab : sdiv_optab)
3300 ->handlers[(int) compute_mode].insn_code
3301 != CODE_FOR_nothing)
3302 || (sdivmod_optab->handlers[(int) compute_mode]
3303 .insn_code != CODE_FOR_nothing)))
55c2d311
TG
3304 ;
3305 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3306 {
3307 lgup = floor_log2 (abs_d);
1cf7847f 3308 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
55c2d311
TG
3309 {
3310 rtx label = gen_label_rtx ();
3311 rtx t1;
3312
3313 t1 = copy_to_mode_reg (compute_mode, op0);
f5963e61
JL
3314 do_cmp_and_jump (t1, const0_rtx, GE,
3315 compute_mode, label);
2496c7bd
LB
3316 expand_inc (t1, gen_int_mode (abs_d - 1,
3317 compute_mode));
55c2d311
TG
3318 emit_label (label);
3319 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3320 build_int_2 (lgup, 0),
3321 tquotient, 0);
3322 }
3323 else
3324 {
3325 rtx t1, t2, t3;
3326 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3327 build_int_2 (size - 1, 0),
3328 NULL_RTX, 0);
3329 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3330 build_int_2 (size - lgup, 0),
3331 NULL_RTX, 1);
38a448ca
RH
3332 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3333 op0, t2),
55c2d311
TG
3334 NULL_RTX);
3335 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3336 build_int_2 (lgup, 0),
3337 tquotient, 0);
3338 }
3339
e8031612
RK
3340 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3341 the quotient. */
55c2d311
TG
3342 if (d < 0)
3343 {
3344 insn = get_last_insn ();
4e430df8
RK
3345 if (insn != last
3346 && (set = single_set (insn)) != 0
c8e7fe58
DE
3347 && SET_DEST (set) == quotient
3348 && abs_d < ((unsigned HOST_WIDE_INT) 1
3349 << (HOST_BITS_PER_WIDE_INT - 1)))
c410d49e 3350 set_unique_reg_note (insn,
7e5bda2c
AM
3351 REG_EQUAL,
3352 gen_rtx_DIV (compute_mode,
3353 op0,
69107307
AO
3354 GEN_INT
3355 (trunc_int_for_mode
3356 (abs_d,
3357 compute_mode))));
55c2d311
TG
3358
3359 quotient = expand_unop (compute_mode, neg_optab,
3360 quotient, quotient, 0);
3361 }
3362 }
34f016ed 3363 else if (size <= HOST_BITS_PER_WIDE_INT)
55c2d311
TG
3364 {
3365 choose_multiplier (abs_d, size, size - 1,
3366 &ml, &post_shift, &lgup);
3367 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3368 {
3369 rtx t1, t2, t3;
3370
02a65aef
R
3371 if (post_shift >= BITS_PER_WORD
3372 || size - 1 >= BITS_PER_WORD)
3373 goto fail1;
3374
71af73bb
TG
3375 extra_cost = (shift_cost[post_shift]
3376 + shift_cost[size - 1] + add_cost);
55c2d311 3377 t1 = expand_mult_highpart (compute_mode, op0, ml,
71af73bb
TG
3378 NULL_RTX, 0,
3379 max_cost - extra_cost);
55c2d311
TG
3380 if (t1 == 0)
3381 goto fail1;
3382 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3383 build_int_2 (post_shift, 0), NULL_RTX, 0);
3384 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3385 build_int_2 (size - 1, 0), NULL_RTX, 0);
3386 if (d < 0)
c5c76735
JL
3387 quotient
3388 = force_operand (gen_rtx_MINUS (compute_mode,
3389 t3, t2),
3390 tquotient);
55c2d311 3391 else
c5c76735
JL
3392 quotient
3393 = force_operand (gen_rtx_MINUS (compute_mode,
3394 t2, t3),
3395 tquotient);
55c2d311
TG
3396 }
3397 else
3398 {
3399 rtx t1, t2, t3, t4;
3400
02a65aef
R
3401 if (post_shift >= BITS_PER_WORD
3402 || size - 1 >= BITS_PER_WORD)
3403 goto fail1;
3404
55c2d311 3405 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
71af73bb
TG
3406 extra_cost = (shift_cost[post_shift]
3407 + shift_cost[size - 1] + 2 * add_cost);
55c2d311 3408 t1 = expand_mult_highpart (compute_mode, op0, ml,
71af73bb
TG
3409 NULL_RTX, 0,
3410 max_cost - extra_cost);
55c2d311
TG
3411 if (t1 == 0)
3412 goto fail1;
c5c76735
JL
3413 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3414 t1, op0),
55c2d311
TG
3415 NULL_RTX);
3416 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
c5c76735
JL
3417 build_int_2 (post_shift, 0),
3418 NULL_RTX, 0);
55c2d311 3419 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
c5c76735
JL
3420 build_int_2 (size - 1, 0),
3421 NULL_RTX, 0);
55c2d311 3422 if (d < 0)
c5c76735
JL
3423 quotient
3424 = force_operand (gen_rtx_MINUS (compute_mode,
3425 t4, t3),
3426 tquotient);
55c2d311 3427 else
c5c76735
JL
3428 quotient
3429 = force_operand (gen_rtx_MINUS (compute_mode,
3430 t3, t4),
3431 tquotient);
55c2d311
TG
3432 }
3433 }
34f016ed
TG
3434 else /* Too wide mode to use tricky code */
3435 break;
55c2d311 3436
4e430df8
RK
3437 insn = get_last_insn ();
3438 if (insn != last
3439 && (set = single_set (insn)) != 0
3440 && SET_DEST (set) == quotient)
c410d49e 3441 set_unique_reg_note (insn,
7e5bda2c
AM
3442 REG_EQUAL,
3443 gen_rtx_DIV (compute_mode, op0, op1));
55c2d311
TG
3444 }
3445 break;
3446 }
3447 fail1:
3448 delete_insns_since (last);
3449 break;
44037a66 3450
55c2d311
TG
3451 case FLOOR_DIV_EXPR:
3452 case FLOOR_MOD_EXPR:
3453 /* We will come here only for signed operations. */
3454 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3455 {
3456 unsigned HOST_WIDE_INT mh, ml;
3457 int pre_shift, lgup, post_shift;
3458 HOST_WIDE_INT d = INTVAL (op1);
3459
3460 if (d > 0)
3461 {
3462 /* We could just as easily deal with negative constants here,
3463 but it does not seem worth the trouble for GCC 2.6. */
3464 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3465 {
3466 pre_shift = floor_log2 (d);
3467 if (rem_flag)
3468 {
3469 remainder = expand_binop (compute_mode, and_optab, op0,
3470 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3471 remainder, 0, OPTAB_LIB_WIDEN);
3472 if (remainder)
c8dbc8ca 3473 return gen_lowpart (mode, remainder);
55c2d311
TG
3474 }
3475 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3476 build_int_2 (pre_shift, 0),
3477 tquotient, 0);
3478 }
3479 else
3480 {
3481 rtx t1, t2, t3, t4;
3482
3483 mh = choose_multiplier (d, size, size - 1,
3484 &ml, &post_shift, &lgup);
3485 if (mh)
3486 abort ();
3487
02a65aef
R
3488 if (post_shift < BITS_PER_WORD
3489 && size - 1 < BITS_PER_WORD)
55c2d311 3490 {
02a65aef
R
3491 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3492 build_int_2 (size - 1, 0),
3493 NULL_RTX, 0);
3494 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3495 NULL_RTX, 0, OPTAB_WIDEN);
3496 extra_cost = (shift_cost[post_shift]
3497 + shift_cost[size - 1] + 2 * add_cost);
3498 t3 = expand_mult_highpart (compute_mode, t2, ml,
3499 NULL_RTX, 1,
3500 max_cost - extra_cost);
3501 if (t3 != 0)
3502 {
3503 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3504 build_int_2 (post_shift, 0),
3505 NULL_RTX, 1);
3506 quotient = expand_binop (compute_mode, xor_optab,
3507 t4, t1, tquotient, 0,
3508 OPTAB_WIDEN);
3509 }
55c2d311
TG
3510 }
3511 }
3512 }
3513 else
3514 {
3515 rtx nsign, t1, t2, t3, t4;
38a448ca
RH
3516 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3517 op0, constm1_rtx), NULL_RTX);
55c2d311
TG
3518 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3519 0, OPTAB_WIDEN);
3520 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3521 build_int_2 (size - 1, 0), NULL_RTX, 0);
38a448ca 3522 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
55c2d311
TG
3523 NULL_RTX);
3524 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3525 NULL_RTX, 0);
3526 if (t4)
3527 {
3528 rtx t5;
3529 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3530 NULL_RTX, 0);
38a448ca
RH
3531 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3532 t4, t5),
55c2d311
TG
3533 tquotient);
3534 }
3535 }
3536 }
3537
3538 if (quotient != 0)
3539 break;
3540 delete_insns_since (last);
3541
3542 /* Try using an instruction that produces both the quotient and
3543 remainder, using truncation. We can easily compensate the quotient
3544 or remainder to get floor rounding, once we have the remainder.
3545 Notice that we compute also the final remainder value here,
3546 and return the result right away. */
a45cf58c 3547 if (target == 0 || GET_MODE (target) != compute_mode)
55c2d311 3548 target = gen_reg_rtx (compute_mode);
668443c9 3549
55c2d311
TG
3550 if (rem_flag)
3551 {
668443c9
RK
3552 remainder
3553 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
55c2d311
TG
3554 quotient = gen_reg_rtx (compute_mode);
3555 }
3556 else
3557 {
668443c9
RK
3558 quotient
3559 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
55c2d311
TG
3560 remainder = gen_reg_rtx (compute_mode);
3561 }
3562
3563 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3564 quotient, remainder, 0))
3565 {
3566 /* This could be computed with a branch-less sequence.
3567 Save that for later. */
3568 rtx tem;
3569 rtx label = gen_label_rtx ();
f5963e61 3570 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
55c2d311
TG
3571 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3572 NULL_RTX, 0, OPTAB_WIDEN);
f5963e61 3573 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
55c2d311
TG
3574 expand_dec (quotient, const1_rtx);
3575 expand_inc (remainder, op1);
3576 emit_label (label);
c8dbc8ca 3577 return gen_lowpart (mode, rem_flag ? remainder : quotient);
55c2d311
TG
3578 }
3579
3580 /* No luck with division elimination or divmod. Have to do it
3581 by conditionally adjusting op0 *and* the result. */
44037a66 3582 {
55c2d311
TG
3583 rtx label1, label2, label3, label4, label5;
3584 rtx adjusted_op0;
3585 rtx tem;
3586
3587 quotient = gen_reg_rtx (compute_mode);
3588 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3589 label1 = gen_label_rtx ();
3590 label2 = gen_label_rtx ();
3591 label3 = gen_label_rtx ();
3592 label4 = gen_label_rtx ();
3593 label5 = gen_label_rtx ();
f5963e61
JL
3594 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3595 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
55c2d311
TG
3596 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3597 quotient, 0, OPTAB_LIB_WIDEN);
3598 if (tem != quotient)
3599 emit_move_insn (quotient, tem);
3600 emit_jump_insn (gen_jump (label5));
3601 emit_barrier ();
3602 emit_label (label1);
44037a66 3603 expand_inc (adjusted_op0, const1_rtx);
55c2d311
TG
3604 emit_jump_insn (gen_jump (label4));
3605 emit_barrier ();
3606 emit_label (label2);
f5963e61 3607 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
55c2d311
TG
3608 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3609 quotient, 0, OPTAB_LIB_WIDEN);
3610 if (tem != quotient)
3611 emit_move_insn (quotient, tem);
3612 emit_jump_insn (gen_jump (label5));
3613 emit_barrier ();
3614 emit_label (label3);
3615 expand_dec (adjusted_op0, const1_rtx);
3616 emit_label (label4);
3617 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3618 quotient, 0, OPTAB_LIB_WIDEN);
3619 if (tem != quotient)
3620 emit_move_insn (quotient, tem);
3621 expand_dec (quotient, const1_rtx);
3622 emit_label (label5);
44037a66 3623 }
55c2d311 3624 break;
44037a66 3625
55c2d311
TG
3626 case CEIL_DIV_EXPR:
3627 case CEIL_MOD_EXPR:
3628 if (unsignedp)
3629 {
9176af2f
TG
3630 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3631 {
3632 rtx t1, t2, t3;
3633 unsigned HOST_WIDE_INT d = INTVAL (op1);
3634 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3635 build_int_2 (floor_log2 (d), 0),
412381d9 3636 tquotient, 1);
9176af2f
TG
3637 t2 = expand_binop (compute_mode, and_optab, op0,
3638 GEN_INT (d - 1),
3639 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3640 t3 = gen_reg_rtx (compute_mode);
3641 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3642 compute_mode, 1, 1);
412381d9
TG
3643 if (t3 == 0)
3644 {
3645 rtx lab;
3646 lab = gen_label_rtx ();
f5963e61 3647 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
412381d9
TG
3648 expand_inc (t1, const1_rtx);
3649 emit_label (lab);
3650 quotient = t1;
3651 }
3652 else
38a448ca
RH
3653 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3654 t1, t3),
412381d9 3655 tquotient);
9176af2f
TG
3656 break;
3657 }
55c2d311
TG
3658
3659 /* Try using an instruction that produces both the quotient and
3660 remainder, using truncation. We can easily compensate the
3661 quotient or remainder to get ceiling rounding, once we have the
3662 remainder. Notice that we compute also the final remainder
3663 value here, and return the result right away. */
a45cf58c 3664 if (target == 0 || GET_MODE (target) != compute_mode)
55c2d311 3665 target = gen_reg_rtx (compute_mode);
668443c9 3666
55c2d311
TG
3667 if (rem_flag)
3668 {
668443c9
RK
3669 remainder = (GET_CODE (target) == REG
3670 ? target : gen_reg_rtx (compute_mode));
55c2d311
TG
3671 quotient = gen_reg_rtx (compute_mode);
3672 }
3673 else
3674 {
668443c9
RK
3675 quotient = (GET_CODE (target) == REG
3676 ? target : gen_reg_rtx (compute_mode));
55c2d311
TG
3677 remainder = gen_reg_rtx (compute_mode);
3678 }
3679
3680 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3681 remainder, 1))
3682 {
3683 /* This could be computed with a branch-less sequence.
3684 Save that for later. */
3685 rtx label = gen_label_rtx ();
f5963e61
JL
3686 do_cmp_and_jump (remainder, const0_rtx, EQ,
3687 compute_mode, label);
55c2d311
TG
3688 expand_inc (quotient, const1_rtx);
3689 expand_dec (remainder, op1);
3690 emit_label (label);
c8dbc8ca 3691 return gen_lowpart (mode, rem_flag ? remainder : quotient);
55c2d311
TG
3692 }
3693
3694 /* No luck with division elimination or divmod. Have to do it
3695 by conditionally adjusting op0 *and* the result. */
44037a66 3696 {
55c2d311
TG
3697 rtx label1, label2;
3698 rtx adjusted_op0, tem;
3699
3700 quotient = gen_reg_rtx (compute_mode);
3701 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3702 label1 = gen_label_rtx ();
3703 label2 = gen_label_rtx ();
f5963e61
JL
3704 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3705 compute_mode, label1);
55c2d311
TG
3706 emit_move_insn (quotient, const0_rtx);
3707 emit_jump_insn (gen_jump (label2));
3708 emit_barrier ();
3709 emit_label (label1);
3710 expand_dec (adjusted_op0, const1_rtx);
3711 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3712 quotient, 1, OPTAB_LIB_WIDEN);
3713 if (tem != quotient)
3714 emit_move_insn (quotient, tem);
3715 expand_inc (quotient, const1_rtx);
3716 emit_label (label2);
44037a66 3717 }
55c2d311
TG
3718 }
3719 else /* signed */
3720 {
73f27728
RK
3721 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3722 && INTVAL (op1) >= 0)
3723 {
3724 /* This is extremely similar to the code for the unsigned case
3725 above. For 2.7 we should merge these variants, but for
3726 2.6.1 I don't want to touch the code for unsigned since that
3727 get used in C. The signed case will only be used by other
3728 languages (Ada). */
3729
3730 rtx t1, t2, t3;
3731 unsigned HOST_WIDE_INT d = INTVAL (op1);
3732 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3733 build_int_2 (floor_log2 (d), 0),
3734 tquotient, 0);
3735 t2 = expand_binop (compute_mode, and_optab, op0,
3736 GEN_INT (d - 1),
3737 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3738 t3 = gen_reg_rtx (compute_mode);
3739 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3740 compute_mode, 1, 1);
3741 if (t3 == 0)
3742 {
3743 rtx lab;
3744 lab = gen_label_rtx ();
f5963e61 3745 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
73f27728
RK
3746 expand_inc (t1, const1_rtx);
3747 emit_label (lab);
3748 quotient = t1;
3749 }
3750 else
38a448ca
RH
3751 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3752 t1, t3),
73f27728
RK
3753 tquotient);
3754 break;
3755 }
3756
55c2d311
TG
3757 /* Try using an instruction that produces both the quotient and
3758 remainder, using truncation. We can easily compensate the
3759 quotient or remainder to get ceiling rounding, once we have the
3760 remainder. Notice that we compute also the final remainder
3761 value here, and return the result right away. */
a45cf58c 3762 if (target == 0 || GET_MODE (target) != compute_mode)
55c2d311
TG
3763 target = gen_reg_rtx (compute_mode);
3764 if (rem_flag)
3765 {
668443c9
RK
3766 remainder= (GET_CODE (target) == REG
3767 ? target : gen_reg_rtx (compute_mode));
55c2d311
TG
3768 quotient = gen_reg_rtx (compute_mode);
3769 }
3770 else
3771 {
668443c9
RK
3772 quotient = (GET_CODE (target) == REG
3773 ? target : gen_reg_rtx (compute_mode));
55c2d311
TG
3774 remainder = gen_reg_rtx (compute_mode);
3775 }
3776
3777 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3778 remainder, 0))
3779 {
3780 /* This could be computed with a branch-less sequence.
3781 Save that for later. */
3782 rtx tem;
3783 rtx label = gen_label_rtx ();
f5963e61
JL
3784 do_cmp_and_jump (remainder, const0_rtx, EQ,
3785 compute_mode, label);
55c2d311
TG
3786 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3787 NULL_RTX, 0, OPTAB_WIDEN);
f5963e61 3788 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
55c2d311
TG
3789 expand_inc (quotient, const1_rtx);
3790 expand_dec (remainder, op1);
3791 emit_label (label);
c8dbc8ca 3792 return gen_lowpart (mode, rem_flag ? remainder : quotient);
55c2d311
TG
3793 }
3794
3795 /* No luck with division elimination or divmod. Have to do it
3796 by conditionally adjusting op0 *and* the result. */
44037a66 3797 {
55c2d311
TG
3798 rtx label1, label2, label3, label4, label5;
3799 rtx adjusted_op0;
3800 rtx tem;
3801
3802 quotient = gen_reg_rtx (compute_mode);
3803 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3804 label1 = gen_label_rtx ();
3805 label2 = gen_label_rtx ();
3806 label3 = gen_label_rtx ();
3807 label4 = gen_label_rtx ();
3808 label5 = gen_label_rtx ();
f5963e61
JL
3809 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3810 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3811 compute_mode, label1);
55c2d311
TG
3812 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3813 quotient, 0, OPTAB_LIB_WIDEN);
3814 if (tem != quotient)
3815 emit_move_insn (quotient, tem);
3816 emit_jump_insn (gen_jump (label5));
3817 emit_barrier ();
3818 emit_label (label1);
3819 expand_dec (adjusted_op0, const1_rtx);
3820 emit_jump_insn (gen_jump (label4));
3821 emit_barrier ();
3822 emit_label (label2);
f5963e61
JL
3823 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3824 compute_mode, label3);
55c2d311
TG
3825 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3826 quotient, 0, OPTAB_LIB_WIDEN);
3827 if (tem != quotient)
3828 emit_move_insn (quotient, tem);
3829 emit_jump_insn (gen_jump (label5));
3830 emit_barrier ();
3831 emit_label (label3);
3832 expand_inc (adjusted_op0, const1_rtx);
3833 emit_label (label4);
3834 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3835 quotient, 0, OPTAB_LIB_WIDEN);
3836 if (tem != quotient)
3837 emit_move_insn (quotient, tem);
3838 expand_inc (quotient, const1_rtx);
3839 emit_label (label5);
44037a66 3840 }
55c2d311
TG
3841 }
3842 break;
bc1c7e93 3843
55c2d311
TG
3844 case EXACT_DIV_EXPR:
3845 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3846 {
3847 HOST_WIDE_INT d = INTVAL (op1);
3848 unsigned HOST_WIDE_INT ml;
91ce572a 3849 int pre_shift;
55c2d311
TG
3850 rtx t1;
3851
91ce572a
CC
3852 pre_shift = floor_log2 (d & -d);
3853 ml = invert_mod2n (d >> pre_shift, size);
3854 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3855 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
69107307 3856 quotient = expand_mult (compute_mode, t1,
2496c7bd 3857 gen_int_mode (ml, compute_mode),
69107307 3858 NULL_RTX, 0);
55c2d311
TG
3859
3860 insn = get_last_insn ();
7e5bda2c
AM
3861 set_unique_reg_note (insn,
3862 REG_EQUAL,
3863 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3864 compute_mode,
3865 op0, op1));
55c2d311
TG
3866 }
3867 break;
3868
3869 case ROUND_DIV_EXPR:
3870 case ROUND_MOD_EXPR:
69f61901
RK
3871 if (unsignedp)
3872 {
3873 rtx tem;
3874 rtx label;
3875 label = gen_label_rtx ();
3876 quotient = gen_reg_rtx (compute_mode);
3877 remainder = gen_reg_rtx (compute_mode);
3878 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3879 {
3880 rtx tem;
3881 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3882 quotient, 1, OPTAB_LIB_WIDEN);
3883 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3884 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3885 remainder, 1, OPTAB_LIB_WIDEN);
3886 }
3887 tem = plus_constant (op1, -1);
3888 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3889 build_int_2 (1, 0), NULL_RTX, 1);
f5963e61 3890 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
69f61901
RK
3891 expand_inc (quotient, const1_rtx);
3892 expand_dec (remainder, op1);
3893 emit_label (label);
3894 }
3895 else
3896 {
3897 rtx abs_rem, abs_op1, tem, mask;
3898 rtx label;
3899 label = gen_label_rtx ();
3900 quotient = gen_reg_rtx (compute_mode);
3901 remainder = gen_reg_rtx (compute_mode);
3902 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3903 {
3904 rtx tem;
3905 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3906 quotient, 0, OPTAB_LIB_WIDEN);
3907 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3908 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3909 remainder, 0, OPTAB_LIB_WIDEN);
3910 }
91ce572a
CC
3911 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3912 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
69f61901
RK
3913 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3914 build_int_2 (1, 0), NULL_RTX, 1);
f5963e61 3915 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
69f61901
RK
3916 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3917 NULL_RTX, 0, OPTAB_WIDEN);
3918 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3919 build_int_2 (size - 1, 0), NULL_RTX, 0);
3920 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3921 NULL_RTX, 0, OPTAB_WIDEN);
3922 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3923 NULL_RTX, 0, OPTAB_WIDEN);
3924 expand_inc (quotient, tem);
3925 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3926 NULL_RTX, 0, OPTAB_WIDEN);
3927 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3928 NULL_RTX, 0, OPTAB_WIDEN);
3929 expand_dec (remainder, tem);
3930 emit_label (label);
3931 }
3932 return gen_lowpart (mode, rem_flag ? remainder : quotient);
c410d49e 3933
e9a25f70
JL
3934 default:
3935 abort ();
55c2d311 3936 }
44037a66 3937
55c2d311 3938 if (quotient == 0)
44037a66 3939 {
a45cf58c
RK
3940 if (target && GET_MODE (target) != compute_mode)
3941 target = 0;
3942
55c2d311 3943 if (rem_flag)
44037a66 3944 {
32fdf36b 3945 /* Try to produce the remainder without producing the quotient.
d6a7951f
JM
3946 If we seem to have a divmod pattern that does not require widening,
3947 don't try widening here. We should really have an WIDEN argument
32fdf36b
TG
3948 to expand_twoval_binop, since what we'd really like to do here is
3949 1) try a mod insn in compute_mode
3950 2) try a divmod insn in compute_mode
3951 3) try a div insn in compute_mode and multiply-subtract to get
3952 remainder
3953 4) try the same things with widening allowed. */
3954 remainder
3955 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3956 op0, op1, target,
3957 unsignedp,
3958 ((optab2->handlers[(int) compute_mode].insn_code
3959 != CODE_FOR_nothing)
3960 ? OPTAB_DIRECT : OPTAB_WIDEN));
55c2d311 3961 if (remainder == 0)
44037a66
TG
3962 {
3963 /* No luck there. Can we do remainder and divide at once
3964 without a library call? */
55c2d311
TG
3965 remainder = gen_reg_rtx (compute_mode);
3966 if (! expand_twoval_binop ((unsignedp
3967 ? udivmod_optab
3968 : sdivmod_optab),
3969 op0, op1,
3970 NULL_RTX, remainder, unsignedp))
3971 remainder = 0;
44037a66 3972 }
55c2d311
TG
3973
3974 if (remainder)
3975 return gen_lowpart (mode, remainder);
44037a66 3976 }
44037a66 3977
dc38b292
RK
3978 /* Produce the quotient. Try a quotient insn, but not a library call.
3979 If we have a divmod in this mode, use it in preference to widening
3980 the div (for this test we assume it will not fail). Note that optab2
3981 is set to the one of the two optabs that the call below will use. */
3982 quotient
3983 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3984 op0, op1, rem_flag ? NULL_RTX : target,
3985 unsignedp,
3986 ((optab2->handlers[(int) compute_mode].insn_code
3987 != CODE_FOR_nothing)
3988 ? OPTAB_DIRECT : OPTAB_WIDEN));
3989
55c2d311 3990 if (quotient == 0)
44037a66
TG
3991 {
3992 /* No luck there. Try a quotient-and-remainder insn,
3993 keeping the quotient alone. */
55c2d311 3994 quotient = gen_reg_rtx (compute_mode);
44037a66 3995 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
55c2d311
TG
3996 op0, op1,
3997 quotient, NULL_RTX, unsignedp))
3998 {
3999 quotient = 0;
4000 if (! rem_flag)
4001 /* Still no luck. If we are not computing the remainder,
4002 use a library call for the quotient. */
4003 quotient = sign_expand_binop (compute_mode,
4004 udiv_optab, sdiv_optab,
4005 op0, op1, target,
4006 unsignedp, OPTAB_LIB_WIDEN);
4007 }
44037a66 4008 }
44037a66
TG
4009 }
4010
44037a66
TG
4011 if (rem_flag)
4012 {
a45cf58c
RK
4013 if (target && GET_MODE (target) != compute_mode)
4014 target = 0;
4015
55c2d311 4016 if (quotient == 0)
44037a66 4017 /* No divide instruction either. Use library for remainder. */
55c2d311
TG
4018 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4019 op0, op1, target,
4020 unsignedp, OPTAB_LIB_WIDEN);
44037a66
TG
4021 else
4022 {
4023 /* We divided. Now finish doing X - Y * (X / Y). */
55c2d311
TG
4024 remainder = expand_mult (compute_mode, quotient, op1,
4025 NULL_RTX, unsignedp);
4026 remainder = expand_binop (compute_mode, sub_optab, op0,
4027 remainder, target, unsignedp,
4028 OPTAB_LIB_WIDEN);
44037a66
TG
4029 }
4030 }
4031
55c2d311 4032 return gen_lowpart (mode, rem_flag ? remainder : quotient);
44037a66
TG
4033}
4034\f
4035/* Return a tree node with data type TYPE, describing the value of X.
4036 Usually this is an RTL_EXPR, if there is no obvious better choice.
4037 X may be an expression, however we only support those expressions
6d2f8887 4038 generated by loop.c. */
44037a66
TG
4039
4040tree
4041make_tree (type, x)
4042 tree type;
4043 rtx x;
4044{
4045 tree t;
4046
4047 switch (GET_CODE (x))
4048 {
4049 case CONST_INT:
4050 t = build_int_2 (INTVAL (x),
172f4957
DM
4051 (TREE_UNSIGNED (type)
4052 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4053 || INTVAL (x) >= 0 ? 0 : -1);
44037a66
TG
4054 TREE_TYPE (t) = type;
4055 return t;
4056
4057 case CONST_DOUBLE:
4058 if (GET_MODE (x) == VOIDmode)
4059 {
4060 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4061 TREE_TYPE (t) = type;
4062 }
4063 else
4064 {
4065 REAL_VALUE_TYPE d;
4066
4067 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4068 t = build_real (type, d);
4069 }
4070
4071 return t;
69ef87e2
AH
4072
4073 case CONST_VECTOR:
4074 {
4075 int i, units;
4076 rtx elt;
4077 tree t = NULL_TREE;
4078
4079 units = CONST_VECTOR_NUNITS (x);
4080
4081 /* Build a tree with vector elements. */
4082 for (i = units - 1; i >= 0; --i)
4083 {
4084 elt = CONST_VECTOR_ELT (x, i);
4085 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4086 }
c410d49e 4087
69ef87e2
AH
4088 return build_vector (type, t);
4089 }
4090
44037a66
TG
4091 case PLUS:
4092 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4093 make_tree (type, XEXP (x, 1))));
c410d49e 4094
44037a66
TG
4095 case MINUS:
4096 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4097 make_tree (type, XEXP (x, 1))));
c410d49e 4098
44037a66
TG
4099 case NEG:
4100 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4101
4102 case MULT:
4103 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4104 make_tree (type, XEXP (x, 1))));
c410d49e 4105
44037a66
TG
4106 case ASHIFT:
4107 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4108 make_tree (type, XEXP (x, 1))));
c410d49e 4109
44037a66 4110 case LSHIFTRT:
ceef8ce4 4111 t = (*lang_hooks.types.unsigned_type) (type);
44037a66 4112 return fold (convert (type,
ceef8ce4
NB
4113 build (RSHIFT_EXPR, t,
4114 make_tree (t, XEXP (x, 0)),
44037a66 4115 make_tree (type, XEXP (x, 1)))));
c410d49e 4116
44037a66 4117 case ASHIFTRT:
ceef8ce4 4118 t = (*lang_hooks.types.signed_type) (type);
44037a66 4119 return fold (convert (type,
ceef8ce4
NB
4120 build (RSHIFT_EXPR, t,
4121 make_tree (t, XEXP (x, 0)),
44037a66 4122 make_tree (type, XEXP (x, 1)))));
c410d49e 4123
44037a66
TG
4124 case DIV:
4125 if (TREE_CODE (type) != REAL_TYPE)
ceef8ce4 4126 t = (*lang_hooks.types.signed_type) (type);
44037a66
TG
4127 else
4128 t = type;
4129
4130 return fold (convert (type,
4131 build (TRUNC_DIV_EXPR, t,
4132 make_tree (t, XEXP (x, 0)),
4133 make_tree (t, XEXP (x, 1)))));
4134 case UDIV:
ceef8ce4 4135 t = (*lang_hooks.types.unsigned_type) (type);
44037a66
TG
4136 return fold (convert (type,
4137 build (TRUNC_DIV_EXPR, t,
4138 make_tree (t, XEXP (x, 0)),
4139 make_tree (t, XEXP (x, 1)))));
4140 default:
4141 t = make_node (RTL_EXPR);
4142 TREE_TYPE (t) = type;
d1608933
RK
4143
4144#ifdef POINTERS_EXTEND_UNSIGNED
4145 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4146 ptr_mode. So convert. */
4147 if (POINTER_TYPE_P (type) && GET_MODE (x) != TYPE_MODE (type))
4148 x = convert_memory_address (TYPE_MODE (type), x);
4149#endif
4150
44037a66
TG
4151 RTL_EXPR_RTL (t) = x;
4152 /* There are no insns to be output
4153 when this rtl_expr is used. */
4154 RTL_EXPR_SEQUENCE (t) = 0;
4155 return t;
4156 }
4157}
4158
4159/* Return an rtx representing the value of X * MULT + ADD.
4160 TARGET is a suggestion for where to store the result (an rtx).
4161 MODE is the machine mode for the computation.
4162 X and MULT must have mode MODE. ADD may have a different mode.
4163 So can X (defaults to same as MODE).
4164 UNSIGNEDP is non-zero to do unsigned multiplication.
4165 This may emit insns. */
4166
4167rtx
4168expand_mult_add (x, target, mult, add, mode, unsignedp)
4169 rtx x, target, mult, add;
4170 enum machine_mode mode;
4171 int unsignedp;
4172{
b0c48229 4173 tree type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
44037a66 4174 tree add_type = (GET_MODE (add) == VOIDmode
b0c48229
NB
4175 ? type: (*lang_hooks.types.type_for_mode) (GET_MODE (add),
4176 unsignedp));
44037a66
TG
4177 tree result = fold (build (PLUS_EXPR, type,
4178 fold (build (MULT_EXPR, type,
4179 make_tree (type, x),
4180 make_tree (type, mult))),
4181 make_tree (add_type, add)));
4182
4183 return expand_expr (result, target, VOIDmode, 0);
4184}
4185\f
4186/* Compute the logical-and of OP0 and OP1, storing it in TARGET
4187 and returning TARGET.
4188
4189 If TARGET is 0, a pseudo-register or constant is returned. */
4190
4191rtx
22273300
JJ
4192expand_and (mode, op0, op1, target)
4193 enum machine_mode mode;
44037a66
TG
4194 rtx op0, op1, target;
4195{
22273300 4196 rtx tem = 0;
44037a66 4197
22273300
JJ
4198 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4199 tem = simplify_binary_operation (AND, mode, op0, op1);
4200 if (tem == 0)
44037a66 4201 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
44037a66
TG
4202
4203 if (target == 0)
4204 target = tem;
4205 else if (tem != target)
4206 emit_move_insn (target, tem);
4207 return target;
4208}
4209\f
4210/* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4211 and storing in TARGET. Normally return TARGET.
4212 Return 0 if that cannot be done.
4213
4214 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
c410d49e 4215 it is VOIDmode, they cannot both be CONST_INT.
44037a66
TG
4216
4217 UNSIGNEDP is for the case where we have to widen the operands
4218 to perform the operation. It says to use zero-extension.
4219
4220 NORMALIZEP is 1 if we should convert the result to be either zero
373e7d69 4221 or one. Normalize is -1 if we should convert the result to be
44037a66
TG
4222 either zero or -1. If NORMALIZEP is zero, the result will be left
4223 "raw" out of the scc insn. */
4224
4225rtx
4226emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4227 rtx target;
4228 enum rtx_code code;
4229 rtx op0, op1;
4230 enum machine_mode mode;
4231 int unsignedp;
4232 int normalizep;
4233{
4234 rtx subtarget;
4235 enum insn_code icode;
4236 enum machine_mode compare_mode;
4237 enum machine_mode target_mode = GET_MODE (target);
4238 rtx tem;
db2f8a07 4239 rtx last = get_last_insn ();
44037a66
TG
4240 rtx pattern, comparison;
4241
20d32cc2
ZW
4242 /* ??? Ok to do this and then fail? */
4243 op0 = protect_from_queue (op0, 0);
4244 op1 = protect_from_queue (op1, 0);
4245
b30f05db
BS
4246 if (unsignedp)
4247 code = unsigned_condition (code);
4248
c2615a67
RK
4249 /* If one operand is constant, make it the second one. Only do this
4250 if the other operand is not constant as well. */
4251
8c9864f3 4252 if (swap_commutative_operands_p (op0, op1))
c2615a67
RK
4253 {
4254 tem = op0;
4255 op0 = op1;
4256 op1 = tem;
4257 code = swap_condition (code);
4258 }
4259
6405e07b
DE
4260 if (mode == VOIDmode)
4261 mode = GET_MODE (op0);
4262
c410d49e 4263 /* For some comparisons with 1 and -1, we can convert this to
44037a66 4264 comparisons with zero. This will often produce more opportunities for
0f41302f 4265 store-flag insns. */
44037a66
TG
4266
4267 switch (code)
4268 {
4269 case LT:
4270 if (op1 == const1_rtx)
4271 op1 = const0_rtx, code = LE;
4272 break;
4273 case LE:
4274 if (op1 == constm1_rtx)
4275 op1 = const0_rtx, code = LT;
4276 break;
4277 case GE:
4278 if (op1 == const1_rtx)
4279 op1 = const0_rtx, code = GT;
4280 break;
4281 case GT:
4282 if (op1 == constm1_rtx)
4283 op1 = const0_rtx, code = GE;
4284 break;
4285 case GEU:
4286 if (op1 == const1_rtx)
4287 op1 = const0_rtx, code = NE;
4288 break;
4289 case LTU:
4290 if (op1 == const1_rtx)
4291 op1 = const0_rtx, code = EQ;
4292 break;
e9a25f70
JL
4293 default:
4294 break;
44037a66
TG
4295 }
4296
6912b84b
RK
4297 /* If we are comparing a double-word integer with zero, we can convert
4298 the comparison into one involving a single word. */
4299 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4300 && GET_MODE_CLASS (mode) == MODE_INT
170c56da
JJ
4301 && op1 == const0_rtx
4302 && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
6912b84b
RK
4303 {
4304 if (code == EQ || code == NE)
4305 {
4306 /* Do a logical OR of the two words and compare the result. */
4307 rtx op0h = gen_highpart (word_mode, op0);
4308 rtx op0l = gen_lowpart (word_mode, op0);
4309 rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
4310 NULL_RTX, unsignedp, OPTAB_DIRECT);
4311 if (op0both != 0)
4312 return emit_store_flag (target, code, op0both, op1, word_mode,
4313 unsignedp, normalizep);
4314 }
4315 else if (code == LT || code == GE)
4316 /* If testing the sign bit, can just test on high word. */
4317 return emit_store_flag (target, code, gen_highpart (word_mode, op0),
4318 op1, word_mode, unsignedp, normalizep);
4319 }
4320
44037a66
TG
4321 /* From now on, we won't change CODE, so set ICODE now. */
4322 icode = setcc_gen_code[(int) code];
4323
4324 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4325 complement of A (for GE) and shifting the sign bit to the low bit. */
4326 if (op1 == const0_rtx && (code == LT || code == GE)
4327 && GET_MODE_CLASS (mode) == MODE_INT
4328 && (normalizep || STORE_FLAG_VALUE == 1
b1ec3c92 4329 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
12dd565f 4330 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
b1ec3c92 4331 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
44037a66 4332 {
8deb7047 4333 subtarget = target;
44037a66
TG
4334
4335 /* If the result is to be wider than OP0, it is best to convert it
4336 first. If it is to be narrower, it is *incorrect* to convert it
4337 first. */
4338 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4339 {
b3d4e1b2 4340 op0 = protect_from_queue (op0, 0);
81722fa9 4341 op0 = convert_modes (target_mode, mode, op0, 0);
44037a66
TG
4342 mode = target_mode;
4343 }
4344
4345 if (target_mode != mode)
4346 subtarget = 0;
4347
4348 if (code == GE)
1d6eaf3d
RK
4349 op0 = expand_unop (mode, one_cmpl_optab, op0,
4350 ((STORE_FLAG_VALUE == 1 || normalizep)
4351 ? 0 : subtarget), 0);
44037a66 4352
1d6eaf3d 4353 if (STORE_FLAG_VALUE == 1 || normalizep)
44037a66
TG
4354 /* If we are supposed to produce a 0/1 value, we want to do
4355 a logical shift from the sign bit to the low-order bit; for
4356 a -1/0 value, we do an arithmetic shift. */
4357 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4358 size_int (GET_MODE_BITSIZE (mode) - 1),
4359 subtarget, normalizep != -1);
4360
4361 if (mode != target_mode)
c2ec26b8 4362 op0 = convert_modes (target_mode, mode, op0, 0);
44037a66
TG
4363
4364 return op0;
4365 }
4366
4367 if (icode != CODE_FOR_nothing)
4368 {
a995e389
RH
4369 insn_operand_predicate_fn pred;
4370
44037a66
TG
4371 /* We think we may be able to do this with a scc insn. Emit the
4372 comparison and then the scc insn.
4373
4374 compare_from_rtx may call emit_queue, which would be deleted below
3bdf5848
BS
4375 if the scc insn fails. So call it ourselves before setting LAST.
4376 Likewise for do_pending_stack_adjust. */
44037a66
TG
4377
4378 emit_queue ();
3bdf5848 4379 do_pending_stack_adjust ();
44037a66
TG
4380 last = get_last_insn ();
4381
b1ec3c92 4382 comparison
d43e0b7d 4383 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
44037a66
TG
4384 if (GET_CODE (comparison) == CONST_INT)
4385 return (comparison == const0_rtx ? const0_rtx
4386 : normalizep == 1 ? const1_rtx
4387 : normalizep == -1 ? constm1_rtx
4388 : const_true_rtx);
4389
8f08e8c0
JL
4390 /* The code of COMPARISON may not match CODE if compare_from_rtx
4391 decided to swap its operands and reverse the original code.
c2615a67 4392
8f08e8c0
JL
4393 We know that compare_from_rtx returns either a CONST_INT or
4394 a new comparison code, so it is safe to just extract the
4395 code from COMPARISON. */
4396 code = GET_CODE (comparison);
8deb7047 4397
44037a66 4398 /* Get a reference to the target in the proper mode for this insn. */
a995e389 4399 compare_mode = insn_data[(int) icode].operand[0].mode;
44037a66 4400 subtarget = target;
a995e389 4401 pred = insn_data[(int) icode].operand[0].predicate;
44037a66 4402 if (preserve_subexpressions_p ()
a995e389 4403 || ! (*pred) (subtarget, compare_mode))
44037a66
TG
4404 subtarget = gen_reg_rtx (compare_mode);
4405
4406 pattern = GEN_FCN (icode) (subtarget);
4407 if (pattern)
4408 {
4409 emit_insn (pattern);
4410
4411 /* If we are converting to a wider mode, first convert to
4412 TARGET_MODE, then normalize. This produces better combining
4413 opportunities on machines that have a SIGN_EXTRACT when we are
4414 testing a single bit. This mostly benefits the 68k.
4415
4416 If STORE_FLAG_VALUE does not have the sign bit set when
4417 interpreted in COMPARE_MODE, we can do this conversion as
4418 unsigned, which is usually more efficient. */
4419 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4420 {
4421 convert_move (target, subtarget,
4422 (GET_MODE_BITSIZE (compare_mode)
b1ec3c92 4423 <= HOST_BITS_PER_WIDE_INT)
44037a66 4424 && 0 == (STORE_FLAG_VALUE
b1ec3c92
CH
4425 & ((HOST_WIDE_INT) 1
4426 << (GET_MODE_BITSIZE (compare_mode) -1))));
44037a66
TG
4427 op0 = target;
4428 compare_mode = target_mode;
4429 }
4430 else
4431 op0 = subtarget;
4432
4b980e20
RK
4433 /* If we want to keep subexpressions around, don't reuse our
4434 last target. */
4435
4436 if (preserve_subexpressions_p ())
4437 subtarget = 0;
4438
44037a66
TG
4439 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4440 we don't have to do anything. */
4441 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4442 ;
c5c76735
JL
4443 /* STORE_FLAG_VALUE might be the most negative number, so write
4444 the comparison this way to avoid a compiler-time warning. */
4445 else if (- normalizep == STORE_FLAG_VALUE)
44037a66
TG
4446 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4447
4448 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4449 makes it hard to use a value of just the sign bit due to
4450 ANSI integer constant typing rules. */
b1ec3c92 4451 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
44037a66 4452 && (STORE_FLAG_VALUE
b1ec3c92
CH
4453 & ((HOST_WIDE_INT) 1
4454 << (GET_MODE_BITSIZE (compare_mode) - 1))))
44037a66
TG
4455 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4456 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4457 subtarget, normalizep == 1);
4458 else if (STORE_FLAG_VALUE & 1)
4459 {
22273300 4460 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
44037a66
TG
4461 if (normalizep == -1)
4462 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4463 }
4464 else
4465 abort ();
4466
c410d49e 4467 /* If we were converting to a smaller mode, do the
44037a66
TG
4468 conversion now. */
4469 if (target_mode != compare_mode)
4470 {
522ae84c 4471 convert_move (target, op0, 0);
44037a66
TG
4472 return target;
4473 }
4474 else
4475 return op0;
4476 }
4477 }
4478
db2f8a07 4479 delete_insns_since (last);
44037a66 4480
91e66235
MM
4481 /* If expensive optimizations, use different pseudo registers for each
4482 insn, instead of reusing the same pseudo. This leads to better CSE,
4483 but slows down the compiler, since there are more pseudos */
4484 subtarget = (!flag_expensive_optimizations
4485 && (target_mode == mode)) ? target : NULL_RTX;
44037a66
TG
4486
4487 /* If we reached here, we can't do this with a scc insn. However, there
4488 are some comparisons that can be done directly. For example, if
4489 this is an equality comparison of integers, we can try to exclusive-or
4490 (or subtract) the two operands and use a recursive call to try the
4491 comparison with zero. Don't do any of these cases if branches are
4492 very cheap. */
4493
c8c1bde3 4494 if (BRANCH_COST > 0
44037a66
TG
4495 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4496 && op1 != const0_rtx)
4497 {
4498 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4499 OPTAB_WIDEN);
4500
4501 if (tem == 0)
4502 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4503 OPTAB_WIDEN);
4504 if (tem != 0)
4505 tem = emit_store_flag (target, code, tem, const0_rtx,
4506 mode, unsignedp, normalizep);
4507 if (tem == 0)
4508 delete_insns_since (last);
4509 return tem;
4510 }
4511
c410d49e 4512 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
44037a66
TG
4513 the constant zero. Reject all other comparisons at this point. Only
4514 do LE and GT if branches are expensive since they are expensive on
4515 2-operand machines. */
4516
4517 if (BRANCH_COST == 0
4518 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4519 || (code != EQ && code != NE
4520 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4521 return 0;
4522
4523 /* See what we need to return. We can only return a 1, -1, or the
4524 sign bit. */
4525
4526 if (normalizep == 0)
4527 {
4528 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4529 normalizep = STORE_FLAG_VALUE;
4530
b1ec3c92 4531 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
12dd565f 4532 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
c84e2712 4533 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
44037a66
TG
4534 ;
4535 else
4536 return 0;
4537 }
4538
4539 /* Try to put the result of the comparison in the sign bit. Assume we can't
4540 do the necessary operation below. */
4541
4542 tem = 0;
4543
4544 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4545 the sign bit set. */
4546
4547 if (code == LE)
4548 {
4549 /* This is destructive, so SUBTARGET can't be OP0. */
4550 if (rtx_equal_p (subtarget, op0))
4551 subtarget = 0;
4552
4553 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4554 OPTAB_WIDEN);
4555 if (tem)
4556 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4557 OPTAB_WIDEN);
4558 }
4559
4560 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4561 number of bits in the mode of OP0, minus one. */
4562
4563 if (code == GT)
4564 {
4565 if (rtx_equal_p (subtarget, op0))
4566 subtarget = 0;
4567
4568 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4569 size_int (GET_MODE_BITSIZE (mode) - 1),
4570 subtarget, 0);
4571 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4572 OPTAB_WIDEN);
4573 }
c410d49e 4574
44037a66
TG
4575 if (code == EQ || code == NE)
4576 {
4577 /* For EQ or NE, one way to do the comparison is to apply an operation
4578 that converts the operand into a positive number if it is non-zero
4579 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4580 for NE we negate. This puts the result in the sign bit. Then we
c410d49e 4581 normalize with a shift, if needed.
44037a66
TG
4582
4583 Two operations that can do the above actions are ABS and FFS, so try
4584 them. If that doesn't work, and MODE is smaller than a full word,
36d747f6 4585 we can use zero-extension to the wider mode (an unsigned conversion)
44037a66
TG
4586 as the operation. */
4587
c410d49e
EC
4588 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4589 that is compensated by the subsequent overflow when subtracting
30f7a378 4590 one / negating. */
91ce572a 4591
44037a66
TG
4592 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4593 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4594 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4595 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4596 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4597 {
b3d4e1b2 4598 op0 = protect_from_queue (op0, 0);
c2ec26b8 4599 tem = convert_modes (word_mode, mode, op0, 1);
81722fa9 4600 mode = word_mode;
44037a66
TG
4601 }
4602
4603 if (tem != 0)
4604 {
4605 if (code == EQ)
4606 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4607 0, OPTAB_WIDEN);
4608 else
4609 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4610 }
4611
4612 /* If we couldn't do it that way, for NE we can "or" the two's complement
4613 of the value with itself. For EQ, we take the one's complement of
4614 that "or", which is an extra insn, so we only handle EQ if branches
4615 are expensive. */
4616
4617 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4618 {
36d747f6
RS
4619 if (rtx_equal_p (subtarget, op0))
4620 subtarget = 0;
4621
44037a66
TG
4622 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4623 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4624 OPTAB_WIDEN);
4625
4626 if (tem && code == EQ)
4627 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4628 }
4629 }
4630
4631 if (tem && normalizep)
4632 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4633 size_int (GET_MODE_BITSIZE (mode) - 1),
91e66235 4634 subtarget, normalizep == 1);
44037a66 4635
91e66235 4636 if (tem)
44037a66 4637 {
91e66235
MM
4638 if (GET_MODE (tem) != target_mode)
4639 {
4640 convert_move (target, tem, 0);
4641 tem = target;
4642 }
4643 else if (!subtarget)
4644 {
4645 emit_move_insn (target, tem);
4646 tem = target;
4647 }
44037a66 4648 }
91e66235 4649 else
44037a66
TG
4650 delete_insns_since (last);
4651
4652 return tem;
4653}
04a8ee2f
TG
4654
4655/* Like emit_store_flag, but always succeeds. */
4656
4657rtx
4658emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4659 rtx target;
4660 enum rtx_code code;
4661 rtx op0, op1;
4662 enum machine_mode mode;
4663 int unsignedp;
4664 int normalizep;
4665{
4666 rtx tem, label;
4667
4668 /* First see if emit_store_flag can do the job. */
4669 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4670 if (tem != 0)
4671 return tem;
4672
4673 if (normalizep == 0)
4674 normalizep = 1;
4675
4676 /* If this failed, we have to do this with set/compare/jump/set code. */
4677
4678 if (GET_CODE (target) != REG
4679 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4680 target = gen_reg_rtx (GET_MODE (target));
4681
e4565aff 4682 emit_move_insn (target, const1_rtx);
04a8ee2f 4683 label = gen_label_rtx ();
d43e0b7d 4684 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
b30f05db 4685 NULL_RTX, label);
04a8ee2f 4686
e4565aff 4687 emit_move_insn (target, const0_rtx);
44037a66
TG
4688 emit_label (label);
4689
4690 return target;
4691}
f5963e61
JL
4692\f
4693/* Perform possibly multi-word comparison and conditional jump to LABEL
4694 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4695
4696 The algorithm is based on the code in expr.c:do_jump.
4697
4698 Note that this does not perform a general comparison. Only variants
4699 generated within expmed.c are correctly handled, others abort (but could
4700 be handled if needed). */
4701
4702static void
4703do_cmp_and_jump (arg1, arg2, op, mode, label)
4704 rtx arg1, arg2, label;
b30f05db
BS
4705 enum rtx_code op;
4706 enum machine_mode mode;
f5963e61
JL
4707{
4708 /* If this mode is an integer too wide to compare properly,
4709 compare word by word. Rely on cse to optimize constant cases. */
4710
1eb8759b
RH
4711 if (GET_MODE_CLASS (mode) == MODE_INT
4712 && ! can_compare_p (op, mode, ccp_jump))
f5963e61
JL
4713 {
4714 rtx label2 = gen_label_rtx ();
4715
4716 switch (op)
4717 {
4718 case LTU:
4719 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4720 break;
4721
4722 case LEU:
4723 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4724 break;
4725
4726 case LT:
4727 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4728 break;
4729
4730 case GT:
4731 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4732 break;
4733
4734 case GE:
4735 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4736 break;
4737
4738 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4739 that's the only equality operations we do */
4740 case EQ:
4741 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
c4f2c499 4742 abort ();
f5963e61
JL
4743 do_jump_by_parts_equality_rtx (arg1, label2, label);
4744 break;
4745
4746 case NE:
4747 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
c4f2c499 4748 abort ();
f5963e61
JL
4749 do_jump_by_parts_equality_rtx (arg1, label, label2);
4750 break;
4751
4752 default:
c4f2c499 4753 abort ();
f5963e61
JL
4754 }
4755
4756 emit_label (label2);
4757 }
4758 else
d43e0b7d 4759 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);
f5963e61 4760}
This page took 1.841434 seconds and 5 git commands to generate.