]> gcc.gnu.org Git - gcc.git/blame - gcc/longlong.h
expr.c (expand_expr, [...]): All cleanups have to be protected by interim exception...
[gcc.git] / gcc / longlong.h
CommitLineData
e1a79915 1/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
f71c71f1 2 Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
e1a79915
RS
3
4 This definition file is free software; you can redistribute it
5 and/or modify it under the terms of the GNU General Public
6 License as published by the Free Software Foundation; either
7 version 2, or (at your option) any later version.
8
9 This definition file is distributed in the hope that it will be
10 useful, but WITHOUT ANY WARRANTY; without even the implied
11 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 See the GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
17
cf7d8fd0
TG
18#ifndef SI_TYPE_SIZE
19#define SI_TYPE_SIZE 32
e1a79915
RS
20#endif
21
cf7d8fd0
TG
22#define __BITS4 (SI_TYPE_SIZE / 4)
23#define __ll_B (1L << (SI_TYPE_SIZE / 2))
e6c5404d
TG
24#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
25#define __ll_highpart(t) ((USItype) (t) / __ll_B)
e1a79915 26
2e8a6600 27/* Define auxiliary asm macros.
e1a79915
RS
28
29 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
e6c5404d
TG
30 multiplies two USItype integers MULTIPLER and MULTIPLICAND,
31 and generates a two-part USItype product in HIGH_PROD and
e1a79915
RS
32 LOW_PROD.
33
e6c5404d
TG
34 2) __umulsidi3(a,b) multiplies two USItype integers A and B,
35 and returns a UDItype product. This is just a variant of umul_ppmm.
e1a79915
RS
36
37 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
38 denominator) divides a two-word unsigned integer, composed by the
39 integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
40 places the quotient in QUOTIENT and the remainder in REMAINDER.
41 HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
42 If, in addition, the most significant bit of DENOMINATOR must be 1,
43 then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
44
b6d2adc7
TG
45 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
46 denominator). Like udiv_qrnnd but the numbers are signed. The
47 quotient is rounded towards 0.
48
49 5) count_leading_zeros(count, x) counts the number of zero-bits from
e1a79915
RS
50 the msb to the first non-zero bit. This is the number of steps X
51 needs to be shifted left to set the msb. Undefined for X == 0.
52
b6d2adc7 53 6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
e1a79915
RS
54 high_addend_2, low_addend_2) adds two two-word unsigned integers,
55 composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
56 LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
57 LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
58 lost.
59
b6d2adc7 60 7) sub_ddmmss(high_difference, low_difference, high_minuend,
e1a79915
RS
61 low_minuend, high_subtrahend, low_subtrahend) subtracts two
62 two-word unsigned integers, composed by HIGH_MINUEND_1 and
63 LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
64 respectively. The result is placed in HIGH_DIFFERENCE and
65 LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
66 and is lost.
67
68 If any of these macros are left undefined for a particular CPU,
69 C macros are used. */
70
71/* The CPUs come in alphabetical order below.
72
73 Please add support for more CPUs here, or improve the current support
74 for the CPUs below!
f71c71f1 75 (E.g. WE32100, IBM360.) */
d83dd29a 76
3a0d22dd
MM
77#if defined (__GNUC__) && !defined (NO_ASM)
78
d83dd29a
TG
79/* We sometimes need to clobber "cc" with gcc2, but that would not be
80 understood by gcc1. Use cpp to avoid major code duplication. */
81#if __GNUC__ < 2
82#define __CLOBBER_CC
83#define __AND_CLOBBER_CC
84#else /* __GNUC__ >= 2 */
85#define __CLOBBER_CC : "cc"
86#define __AND_CLOBBER_CC , "cc"
87#endif /* __GNUC__ < 2 */
e1a79915 88
f71c71f1 89#if defined (__a29k__) || defined (_AM29K)
e1a79915
RS
90#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
91 __asm__ ("add %1,%4,%5
92 addc %0,%2,%3" \
e6c5404d
TG
93 : "=r" ((USItype)(sh)), \
94 "=&r" ((USItype)(sl)) \
95 : "%r" ((USItype)(ah)), \
96 "rI" ((USItype)(bh)), \
97 "%r" ((USItype)(al)), \
98 "rI" ((USItype)(bl)))
e1a79915
RS
99#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
100 __asm__ ("sub %1,%4,%5
101 subc %0,%2,%3" \
e6c5404d
TG
102 : "=r" ((USItype)(sh)), \
103 "=&r" ((USItype)(sl)) \
104 : "r" ((USItype)(ah)), \
105 "rI" ((USItype)(bh)), \
106 "r" ((USItype)(al)), \
107 "rI" ((USItype)(bl)))
e1a79915
RS
108#define umul_ppmm(xh, xl, m0, m1) \
109 do { \
e6c5404d 110 USItype __m0 = (m0), __m1 = (m1); \
d83dd29a 111 __asm__ ("multiplu %0,%1,%2" \
e6c5404d 112 : "=r" ((USItype)(xl)) \
d83dd29a
TG
113 : "r" (__m0), \
114 "r" (__m1)); \
115 __asm__ ("multmu %0,%1,%2" \
e6c5404d 116 : "=r" ((USItype)(xh)) \
d83dd29a
TG
117 : "r" (__m0), \
118 "r" (__m1)); \
e1a79915
RS
119 } while (0)
120#define udiv_qrnnd(q, r, n1, n0, d) \
121 __asm__ ("dividu %0,%3,%4" \
e6c5404d
TG
122 : "=r" ((USItype)(q)), \
123 "=q" ((USItype)(r)) \
124 : "1" ((USItype)(n1)), \
125 "r" ((USItype)(n0)), \
126 "r" ((USItype)(d)))
e1a79915 127#define count_leading_zeros(count, x) \
d83dd29a 128 __asm__ ("clz %0,%1" \
e6c5404d
TG
129 : "=r" ((USItype)(count)) \
130 : "r" ((USItype)(x)))
e1a79915
RS
131#endif /* __a29k__ */
132
133#if defined (__arm__)
134#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
ac1e676a
RE
135 __asm__ ("adds %1, %4, %5
136 adc %0, %2, %3" \
e6c5404d
TG
137 : "=r" ((USItype)(sh)), \
138 "=&r" ((USItype)(sl)) \
139 : "%r" ((USItype)(ah)), \
140 "rI" ((USItype)(bh)), \
141 "%r" ((USItype)(al)), \
142 "rI" ((USItype)(bl)))
e1a79915 143#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
ac1e676a
RE
144 __asm__ ("subs %1, %4, %5
145 sbc %0, %2, %3" \
e6c5404d
TG
146 : "=r" ((USItype)(sh)), \
147 "=&r" ((USItype)(sl)) \
148 : "r" ((USItype)(ah)), \
149 "rI" ((USItype)(bh)), \
150 "r" ((USItype)(al)), \
151 "rI" ((USItype)(bl)))
f71c71f1 152#define umul_ppmm(xh, xl, a, b) \
6a04a634 153{register USItype __t0, __t1, __t2; \
ac1e676a 154 __asm__ ("%@ Inlined umul_ppmm
6a04a634
RE
155 mov %2, %5, lsr #16
156 mov %0, %6, lsr #16
157 bic %3, %5, %2, lsl #16
158 bic %4, %6, %0, lsl #16
159 mul %1, %3, %4
160 mul %4, %2, %4
161 mul %3, %0, %3
162 mul %0, %2, %0
163 adds %3, %4, %3
ac1e676a 164 addcs %0, %0, #65536
6a04a634
RE
165 adds %1, %1, %3, lsl #16
166 adc %0, %0, %3, lsr #16" \
f71c71f1 167 : "=&r" ((USItype)(xh)), \
6a04a634
RE
168 "=r" ((USItype)(xl)), \
169 "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
f71c71f1 170 : "r" ((USItype)(a)), \
6a04a634 171 "r" ((USItype)(b)));}
f71c71f1
TG
172#define UMUL_TIME 20
173#define UDIV_TIME 100
e1a79915
RS
174#endif /* __arm__ */
175
f71c71f1
TG
176#if defined (__clipper__)
177#define umul_ppmm(w1, w0, u, v) \
178 ({union {UDItype __ll; \
179 struct {USItype __l, __h;} __i; \
180 } __xx; \
181 __asm__ ("mulwux %2,%0" \
182 : "=r" (__xx.__ll) \
183 : "%0" ((USItype)(u)), \
184 "r" ((USItype)(v))); \
185 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
186#define smul_ppmm(w1, w0, u, v) \
187 ({union {DItype __ll; \
188 struct {SItype __l, __h;} __i; \
189 } __xx; \
190 __asm__ ("mulwx %2,%0" \
191 : "=r" (__xx.__ll) \
192 : "%0" ((SItype)(u)), \
193 "r" ((SItype)(v))); \
194 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
195#define __umulsidi3(u, v) \
196 ({UDItype __w; \
197 __asm__ ("mulwux %2,%0" \
198 : "=r" (__w) \
199 : "%0" ((USItype)(u)), \
200 "r" ((USItype)(v))); \
201 __w; })
202#endif /* __clipper__ */
203
e1a79915
RS
204#if defined (__gmicro__)
205#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
206 __asm__ ("add.w %5,%1
207 addx %3,%0" \
e6c5404d
TG
208 : "=g" ((USItype)(sh)), \
209 "=&g" ((USItype)(sl)) \
210 : "%0" ((USItype)(ah)), \
211 "g" ((USItype)(bh)), \
212 "%1" ((USItype)(al)), \
213 "g" ((USItype)(bl)))
e1a79915
RS
214#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
215 __asm__ ("sub.w %5,%1
216 subx %3,%0" \
e6c5404d
TG
217 : "=g" ((USItype)(sh)), \
218 "=&g" ((USItype)(sl)) \
219 : "0" ((USItype)(ah)), \
220 "g" ((USItype)(bh)), \
221 "1" ((USItype)(al)), \
222 "g" ((USItype)(bl)))
e1a79915
RS
223#define umul_ppmm(ph, pl, m0, m1) \
224 __asm__ ("mulx %3,%0,%1" \
e6c5404d
TG
225 : "=g" ((USItype)(ph)), \
226 "=r" ((USItype)(pl)) \
227 : "%0" ((USItype)(m0)), \
228 "g" ((USItype)(m1)))
e1a79915
RS
229#define udiv_qrnnd(q, r, nh, nl, d) \
230 __asm__ ("divx %4,%0,%1" \
e6c5404d
TG
231 : "=g" ((USItype)(q)), \
232 "=r" ((USItype)(r)) \
233 : "1" ((USItype)(nh)), \
234 "0" ((USItype)(nl)), \
235 "g" ((USItype)(d)))
e1a79915
RS
236#define count_leading_zeros(count, x) \
237 __asm__ ("bsch/1 %1,%0" \
d83dd29a 238 : "=g" (count) \
e6c5404d
TG
239 : "g" ((USItype)(x)), \
240 "0" ((USItype)0))
e1a79915
RS
241#endif
242
243#if defined (__hppa)
244#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
245 __asm__ ("add %4,%5,%1
246 addc %2,%3,%0" \
e6c5404d
TG
247 : "=r" ((USItype)(sh)), \
248 "=&r" ((USItype)(sl)) \
249 : "%rM" ((USItype)(ah)), \
250 "rM" ((USItype)(bh)), \
251 "%rM" ((USItype)(al)), \
252 "rM" ((USItype)(bl)))
e1a79915 253#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
7691f559
TG
254 __asm__ ("sub %4,%5,%1
255 subb %2,%3,%0" \
e6c5404d
TG
256 : "=r" ((USItype)(sh)), \
257 "=&r" ((USItype)(sl)) \
258 : "rM" ((USItype)(ah)), \
259 "rM" ((USItype)(bh)), \
260 "rM" ((USItype)(al)), \
261 "rM" ((USItype)(bl)))
d83dd29a
TG
262#if defined (_PA_RISC1_1)
263#define umul_ppmm(w1, w0, u, v) \
264 do { \
265 union \
266 { \
e6c5404d
TG
267 UDItype __f; \
268 struct {USItype __w1, __w0;} __w1w0; \
d83dd29a
TG
269 } __t; \
270 __asm__ ("xmpyu %1,%2,%0" \
271 : "=x" (__t.__f) \
e6c5404d
TG
272 : "x" ((USItype)(u)), \
273 "x" ((USItype)(v))); \
d83dd29a
TG
274 (w1) = __t.__w1w0.__w1; \
275 (w0) = __t.__w1w0.__w0; \
276 } while (0)
277#define UMUL_TIME 8
278#else
279#define UMUL_TIME 30
280#endif
281#define UDIV_TIME 40
f71c71f1
TG
282#define count_leading_zeros(count, x) \
283 do { \
284 USItype __tmp; \
285 __asm__ ( \
286 "ldi 1,%0
287 extru,= %1,15,16,%%r0 ; Bits 31..16 zero?
288 extru,tr %1,15,16,%1 ; No. Shift down, skip add.
289 ldo 16(%0),%0 ; Yes. Perform add.
290 extru,= %1,23,8,%%r0 ; Bits 15..8 zero?
291 extru,tr %1,23,8,%1 ; No. Shift down, skip add.
292 ldo 8(%0),%0 ; Yes. Perform add.
293 extru,= %1,27,4,%%r0 ; Bits 7..4 zero?
294 extru,tr %1,27,4,%1 ; No. Shift down, skip add.
295 ldo 4(%0),%0 ; Yes. Perform add.
296 extru,= %1,29,2,%%r0 ; Bits 3..2 zero?
297 extru,tr %1,29,2,%1 ; No. Shift down, skip add.
298 ldo 2(%0),%0 ; Yes. Perform add.
299 extru %1,30,1,%1 ; Extract bit 1.
300 sub %0,%1,%0 ; Subtract it.
301 " : "=r" (count), "=r" (__tmp) : "1" (x)); \
302 } while (0)
e1a79915
RS
303#endif
304
305#if defined (__i386__) || defined (__i486__)
306#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
307 __asm__ ("addl %5,%1
308 adcl %3,%0" \
e6c5404d
TG
309 : "=r" ((USItype)(sh)), \
310 "=&r" ((USItype)(sl)) \
311 : "%0" ((USItype)(ah)), \
312 "g" ((USItype)(bh)), \
313 "%1" ((USItype)(al)), \
314 "g" ((USItype)(bl)))
e1a79915
RS
315#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
316 __asm__ ("subl %5,%1
317 sbbl %3,%0" \
e6c5404d
TG
318 : "=r" ((USItype)(sh)), \
319 "=&r" ((USItype)(sl)) \
320 : "0" ((USItype)(ah)), \
321 "g" ((USItype)(bh)), \
322 "1" ((USItype)(al)), \
323 "g" ((USItype)(bl)))
e1a79915
RS
324#define umul_ppmm(w1, w0, u, v) \
325 __asm__ ("mull %3" \
e6c5404d
TG
326 : "=a" ((USItype)(w0)), \
327 "=d" ((USItype)(w1)) \
328 : "%0" ((USItype)(u)), \
329 "rm" ((USItype)(v)))
e1a79915
RS
330#define udiv_qrnnd(q, r, n1, n0, d) \
331 __asm__ ("divl %4" \
e6c5404d
TG
332 : "=a" ((USItype)(q)), \
333 "=d" ((USItype)(r)) \
334 : "0" ((USItype)(n0)), \
335 "1" ((USItype)(n1)), \
336 "rm" ((USItype)(d)))
e1a79915
RS
337#define count_leading_zeros(count, x) \
338 do { \
e6c5404d 339 USItype __cbtmp; \
e1a79915 340 __asm__ ("bsrl %1,%0" \
e6c5404d 341 : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
e1a79915
RS
342 (count) = __cbtmp ^ 31; \
343 } while (0)
d83dd29a
TG
344#define UMUL_TIME 40
345#define UDIV_TIME 40
e1a79915
RS
346#endif /* 80x86 */
347
348#if defined (__i860__)
349#if 0
350/* Make sure these patterns really improve the code before
351 switching them on. */
352#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
353 do { \
354 union \
355 { \
e6c5404d
TG
356 DItype __ll; \
357 struct {USItype __l, __h;} __i; \
e1a79915 358 } __a, __b, __s; \
e6c5404d
TG
359 __a.__i.__l = (al); \
360 __a.__i.__h = (ah); \
361 __b.__i.__l = (bl); \
362 __b.__i.__h = (bh); \
e1a79915 363 __asm__ ("fiadd.dd %1,%2,%0" \
e6c5404d
TG
364 : "=f" (__s.__ll) \
365 : "%f" (__a.__ll), "f" (__b.__ll)); \
366 (sh) = __s.__i.__h; \
367 (sl) = __s.__i.__l; \
e1a79915
RS
368 } while (0)
369#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
370 do { \
371 union \
372 { \
e6c5404d
TG
373 DItype __ll; \
374 struct {USItype __l, __h;} __i; \
e1a79915 375 } __a, __b, __s; \
e6c5404d
TG
376 __a.__i.__l = (al); \
377 __a.__i.__h = (ah); \
378 __b.__i.__l = (bl); \
379 __b.__i.__h = (bh); \
e1a79915 380 __asm__ ("fisub.dd %1,%2,%0" \
e6c5404d
TG
381 : "=f" (__s.__ll) \
382 : "%f" (__a.__ll), "f" (__b.__ll)); \
383 (sh) = __s.__i.__h; \
384 (sl) = __s.__i.__l; \
e1a79915
RS
385 } while (0)
386#endif
387#endif /* __i860__ */
388
f71c71f1
TG
389#if defined (__i960__)
390#define umul_ppmm(w1, w0, u, v) \
391 ({union {UDItype __ll; \
392 struct {USItype __l, __h;} __i; \
393 } __xx; \
394 __asm__ ("emul %2,%1,%0" \
395 : "=d" (__xx.__ll) \
396 : "%dI" ((USItype)(u)), \
397 "dI" ((USItype)(v))); \
398 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
399#define __umulsidi3(u, v) \
400 ({UDItype __w; \
401 __asm__ ("emul %2,%1,%0" \
402 : "=d" (__w) \
403 : "%dI" ((USItype)(u)), \
404 "dI" ((USItype)(v))); \
405 __w; })
406#endif /* __i960__ */
e1a79915
RS
407
408#if defined (__mc68000__)
409#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
410 __asm__ ("add%.l %5,%1
411 addx%.l %3,%0" \
e6c5404d
TG
412 : "=d" ((USItype)(sh)), \
413 "=&d" ((USItype)(sl)) \
414 : "%0" ((USItype)(ah)), \
415 "d" ((USItype)(bh)), \
416 "%1" ((USItype)(al)), \
417 "g" ((USItype)(bl)))
e1a79915
RS
418#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
419 __asm__ ("sub%.l %5,%1
420 subx%.l %3,%0" \
e6c5404d
TG
421 : "=d" ((USItype)(sh)), \
422 "=&d" ((USItype)(sl)) \
423 : "0" ((USItype)(ah)), \
424 "d" ((USItype)(bh)), \
425 "1" ((USItype)(al)), \
426 "g" ((USItype)(bl)))
e1a79915
RS
427#if defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)
428#define umul_ppmm(w1, w0, u, v) \
429 __asm__ ("mulu%.l %3,%1:%0" \
e6c5404d
TG
430 : "=d" ((USItype)(w0)), \
431 "=d" ((USItype)(w1)) \
432 : "%0" ((USItype)(u)), \
433 "dmi" ((USItype)(v)))
d83dd29a 434#define UMUL_TIME 45
e1a79915
RS
435#define udiv_qrnnd(q, r, n1, n0, d) \
436 __asm__ ("divu%.l %4,%1:%0" \
e6c5404d
TG
437 : "=d" ((USItype)(q)), \
438 "=d" ((USItype)(r)) \
439 : "0" ((USItype)(n0)), \
440 "1" ((USItype)(n1)), \
441 "dmi" ((USItype)(d)))
d83dd29a
TG
442#define UDIV_TIME 90
443#define sdiv_qrnnd(q, r, n1, n0, d) \
444 __asm__ ("divs%.l %4,%1:%0" \
e6c5404d
TG
445 : "=d" ((USItype)(q)), \
446 "=d" ((USItype)(r)) \
447 : "0" ((USItype)(n0)), \
448 "1" ((USItype)(n1)), \
449 "dmi" ((USItype)(d)))
e1a79915
RS
450#define count_leading_zeros(count, x) \
451 __asm__ ("bfffo %1{%b2:%b2},%0" \
e6c5404d
TG
452 : "=d" ((USItype)(count)) \
453 : "od" ((USItype)(x)), "n" (0))
e1a79915 454#else /* not mc68020 */
bb66e0f6 455/* %/ inserts REGISTER_PREFIX. */
e1a79915
RS
456#define umul_ppmm(xh, xl, a, b) \
457 __asm__ ("| Inlined umul_ppmm
f71c71f1
TG
458 move%.l %2,%/d0
459 move%.l %3,%/d1
460 move%.l %/d0,%/d2
bb66e0f6 461 swap %/d0
f71c71f1 462 move%.l %/d1,%/d3
bb66e0f6 463 swap %/d1
f71c71f1 464 move%.w %/d2,%/d4
bb66e0f6
ILT
465 mulu %/d3,%/d4
466 mulu %/d1,%/d2
467 mulu %/d0,%/d3
468 mulu %/d0,%/d1
f71c71f1
TG
469 move%.l %/d4,%/d0
470 eor%.w %/d0,%/d0
bb66e0f6 471 swap %/d0
f71c71f1
TG
472 add%.l %/d0,%/d2
473 add%.l %/d3,%/d2
e1a79915 474 jcc 1f
f71c71f1 475 add%.l #65536,%/d1
bb66e0f6
ILT
4761: swap %/d2
477 moveq #0,%/d0
f71c71f1
TG
478 move%.w %/d2,%/d0
479 move%.w %/d4,%/d2
480 move%.l %/d2,%1
481 add%.l %/d1,%/d0
482 move%.l %/d0,%0" \
e6c5404d
TG
483 : "=g" ((USItype)(xh)), \
484 "=g" ((USItype)(xl)) \
485 : "g" ((USItype)(a)), \
486 "g" ((USItype)(b)) \
d83dd29a
TG
487 : "d0", "d1", "d2", "d3", "d4")
488#define UMUL_TIME 100
489#define UDIV_TIME 400
e1a79915
RS
490#endif /* not mc68020 */
491#endif /* mc68000 */
492
493#if defined (__m88000__)
494#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
495 __asm__ ("addu.co %1,%r4,%r5
496 addu.ci %0,%r2,%r3" \
e6c5404d
TG
497 : "=r" ((USItype)(sh)), \
498 "=&r" ((USItype)(sl)) \
499 : "%rJ" ((USItype)(ah)), \
500 "rJ" ((USItype)(bh)), \
501 "%rJ" ((USItype)(al)), \
502 "rJ" ((USItype)(bl)))
e1a79915
RS
503#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
504 __asm__ ("subu.co %1,%r4,%r5
505 subu.ci %0,%r2,%r3" \
e6c5404d
TG
506 : "=r" ((USItype)(sh)), \
507 "=&r" ((USItype)(sl)) \
508 : "rJ" ((USItype)(ah)), \
509 "rJ" ((USItype)(bh)), \
510 "rJ" ((USItype)(al)), \
511 "rJ" ((USItype)(bl)))
e1a79915
RS
512#define count_leading_zeros(count, x) \
513 do { \
e6c5404d 514 USItype __cbtmp; \
e1a79915 515 __asm__ ("ff1 %0,%1" \
d83dd29a 516 : "=r" (__cbtmp) \
e6c5404d 517 : "r" ((USItype)(x))); \
e1a79915
RS
518 (count) = __cbtmp ^ 31; \
519 } while (0)
d83dd29a 520#if defined (__mc88110__)
f71c71f1
TG
521#define umul_ppmm(wh, wl, u, v) \
522 do { \
523 union {UDItype __ll; \
524 struct {USItype __h, __l;} __i; \
525 } __xx; \
526 __asm__ ("mulu.d %0,%1,%2" \
527 : "=r" (__xx.__ll) \
528 : "r" ((USItype)(u)), \
529 "r" ((USItype)(v))); \
530 (wh) = __xx.__i.__h; \
531 (wl) = __xx.__i.__l; \
532 } while (0)
d83dd29a 533#define udiv_qrnnd(q, r, n1, n0, d) \
f71c71f1
TG
534 ({union {UDItype __ll; \
535 struct {USItype __h, __l;} __i; \
536 } __xx; \
537 USItype __q; \
538 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
539 __asm__ ("divu.d %0,%1,%2" \
540 : "=r" (__q) \
541 : "r" (__xx.__ll), \
542 "r" ((USItype)(d))); \
543 (r) = (n0) - __q * (d); (q) = __q; })
544#define UMUL_TIME 5
545#define UDIV_TIME 25
546#else
547#define UMUL_TIME 17
548#define UDIV_TIME 150
549#endif /* __mc88110__ */
e1a79915
RS
550#endif /* __m88000__ */
551
552#if defined (__mips__)
d28e1517
TG
553#define umul_ppmm(w1, w0, u, v) \
554 __asm__ ("multu %2,%3
555 mflo %0
e1a79915 556 mfhi %1" \
e6c5404d
TG
557 : "=d" ((USItype)(w0)), \
558 "=d" ((USItype)(w1)) \
559 : "d" ((USItype)(u)), \
560 "d" ((USItype)(v)))
f71c71f1 561#define UMUL_TIME 10
e1a79915
RS
562#define UDIV_TIME 100
563#endif /* __mips__ */
564
565#if defined (__ns32000__)
f71c71f1
TG
566#define umul_ppmm(w1, w0, u, v) \
567 ({union {UDItype __ll; \
568 struct {USItype __l, __h;} __i; \
569 } __xx; \
570 __asm__ ("meid %2,%0" \
571 : "=g" (__xx.__ll) \
572 : "%0" ((USItype)(u)), \
573 "g" ((USItype)(v))); \
574 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
e1a79915 575#define __umulsidi3(u, v) \
e6c5404d 576 ({UDItype __w; \
d83dd29a
TG
577 __asm__ ("meid %2,%0" \
578 : "=g" (__w) \
e6c5404d
TG
579 : "%0" ((USItype)(u)), \
580 "g" ((USItype)(v))); \
d83dd29a 581 __w; })
f71c71f1
TG
582#define udiv_qrnnd(q, r, n1, n0, d) \
583 ({union {UDItype __ll; \
584 struct {USItype __l, __h;} __i; \
585 } __xx; \
586 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
587 __asm__ ("deid %2,%0" \
588 : "=g" (__xx.__ll) \
589 : "0" (__xx.__ll), \
590 "g" ((USItype)(d))); \
591 (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
e1a79915
RS
592#endif /* __ns32000__ */
593
325877e1 594#if (defined (_ARCH_PPC) || defined (_IBMR2)) && W_TYPE_SIZE == 32
f71c71f1
TG
595#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
596 do { \
597 if (__builtin_constant_p (bh) && (bh) == 0) \
598 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
599 : "=r" ((USItype)(sh)), \
600 "=&r" ((USItype)(sl)) \
601 : "%r" ((USItype)(ah)), \
602 "%r" ((USItype)(al)), \
603 "rI" ((USItype)(bl))); \
604 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
605 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
606 : "=r" ((USItype)(sh)), \
607 "=&r" ((USItype)(sl)) \
608 : "%r" ((USItype)(ah)), \
609 "%r" ((USItype)(al)), \
610 "rI" ((USItype)(bl))); \
611 else \
612 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
613 : "=r" ((USItype)(sh)), \
614 "=&r" ((USItype)(sl)) \
615 : "%r" ((USItype)(ah)), \
616 "r" ((USItype)(bh)), \
617 "%r" ((USItype)(al)), \
618 "rI" ((USItype)(bl))); \
619 } while (0)
620#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
621 do { \
622 if (__builtin_constant_p (ah) && (ah) == 0) \
325877e1 623 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
f71c71f1
TG
624 : "=r" ((USItype)(sh)), \
625 "=&r" ((USItype)(sl)) \
626 : "r" ((USItype)(bh)), \
627 "rI" ((USItype)(al)), \
628 "r" ((USItype)(bl))); \
629 else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \
325877e1 630 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
f71c71f1
TG
631 : "=r" ((USItype)(sh)), \
632 "=&r" ((USItype)(sl)) \
633 : "r" ((USItype)(bh)), \
634 "rI" ((USItype)(al)), \
635 "r" ((USItype)(bl))); \
636 else if (__builtin_constant_p (bh) && (bh) == 0) \
637 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
638 : "=r" ((USItype)(sh)), \
639 "=&r" ((USItype)(sl)) \
640 : "r" ((USItype)(ah)), \
641 "rI" ((USItype)(al)), \
642 "r" ((USItype)(bl))); \
643 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
644 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
645 : "=r" ((USItype)(sh)), \
646 "=&r" ((USItype)(sl)) \
647 : "r" ((USItype)(ah)), \
648 "rI" ((USItype)(al)), \
649 "r" ((USItype)(bl))); \
650 else \
651 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
652 : "=r" ((USItype)(sh)), \
653 "=&r" ((USItype)(sl)) \
654 : "r" ((USItype)(ah)), \
655 "r" ((USItype)(bh)), \
656 "rI" ((USItype)(al)), \
657 "r" ((USItype)(bl))); \
658 } while (0)
659#define count_leading_zeros(count, x) \
660 __asm__ ("{cntlz|cntlzw} %0,%1" \
661 : "=r" ((USItype)(count)) \
662 : "r" ((USItype)(x)))
325877e1 663#if defined (_ARCH_PPC)
f71c71f1
TG
664#define umul_ppmm(ph, pl, m0, m1) \
665 do { \
666 USItype __m0 = (m0), __m1 = (m1); \
667 __asm__ ("mulhwu %0,%1,%2" \
668 : "=r" ((USItype) ph) \
669 : "%r" (__m0), \
670 "r" (__m1)); \
671 (pl) = __m0 * __m1; \
672 } while (0)
673#define UMUL_TIME 15
674#define smul_ppmm(ph, pl, m0, m1) \
675 do { \
676 SItype __m0 = (m0), __m1 = (m1); \
677 __asm__ ("mulhw %0,%1,%2" \
678 : "=r" ((SItype) ph) \
679 : "%r" (__m0), \
680 "r" (__m1)); \
681 (pl) = __m0 * __m1; \
682 } while (0)
683#define SMUL_TIME 14
684#define UDIV_TIME 120
685#else
686#define umul_ppmm(xh, xl, m0, m1) \
687 do { \
688 USItype __m0 = (m0), __m1 = (m1); \
689 __asm__ ("mul %0,%2,%3" \
690 : "=r" ((USItype)(xh)), \
691 "=q" ((USItype)(xl)) \
692 : "r" (__m0), \
693 "r" (__m1)); \
694 (xh) += ((((SItype) __m0 >> 31) & __m1) \
695 + (((SItype) __m1 >> 31) & __m0)); \
696 } while (0)
697#define UMUL_TIME 8
698#define smul_ppmm(xh, xl, m0, m1) \
699 __asm__ ("mul %0,%2,%3" \
700 : "=r" ((SItype)(xh)), \
701 "=q" ((SItype)(xl)) \
702 : "r" (m0), \
703 "r" (m1))
704#define SMUL_TIME 4
705#define sdiv_qrnnd(q, r, nh, nl, d) \
706 __asm__ ("div %0,%2,%4" \
707 : "=r" ((SItype)(q)), "=q" ((SItype)(r)) \
708 : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d)))
709#define UDIV_TIME 100
710#endif
711#endif /* Power architecture variants. */
712
e1a79915
RS
713#if defined (__pyr__)
714#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
715 __asm__ ("addw %5,%1
716 addwc %3,%0" \
e6c5404d
TG
717 : "=r" ((USItype)(sh)), \
718 "=&r" ((USItype)(sl)) \
719 : "%0" ((USItype)(ah)), \
720 "g" ((USItype)(bh)), \
721 "%1" ((USItype)(al)), \
722 "g" ((USItype)(bl)))
e1a79915
RS
723#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
724 __asm__ ("subw %5,%1
725 subwb %3,%0" \
e6c5404d
TG
726 : "=r" ((USItype)(sh)), \
727 "=&r" ((USItype)(sl)) \
728 : "0" ((USItype)(ah)), \
729 "g" ((USItype)(bh)), \
730 "1" ((USItype)(al)), \
731 "g" ((USItype)(bl)))
e1a79915
RS
732/* This insn doesn't work on ancient pyramids. */
733#define umul_ppmm(w1, w0, u, v) \
e6c5404d
TG
734 ({union { \
735 UDItype __ll; \
736 struct {USItype __h, __l;} __i; \
737 } __xx; \
738 __xx.__i.__l = u; \
d83dd29a 739 __asm__ ("uemul %3,%0" \
e6c5404d
TG
740 : "=r" (__xx.__i.__h), \
741 "=r" (__xx.__i.__l) \
742 : "1" (__xx.__i.__l), \
f71c71f1 743 "g" ((USItype)(v))); \
e6c5404d
TG
744 (w1) = __xx.__i.__h; \
745 (w0) = __xx.__i.__l;})
e1a79915
RS
746#endif /* __pyr__ */
747
748#if defined (__ibm032__) /* RT/ROMP */
749#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
750 __asm__ ("a %1,%5
751 ae %0,%3" \
e6c5404d
TG
752 : "=r" ((USItype)(sh)), \
753 "=&r" ((USItype)(sl)) \
754 : "%0" ((USItype)(ah)), \
755 "r" ((USItype)(bh)), \
756 "%1" ((USItype)(al)), \
757 "r" ((USItype)(bl)))
e1a79915
RS
758#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
759 __asm__ ("s %1,%5
760 se %0,%3" \
e6c5404d
TG
761 : "=r" ((USItype)(sh)), \
762 "=&r" ((USItype)(sl)) \
763 : "0" ((USItype)(ah)), \
764 "r" ((USItype)(bh)), \
765 "1" ((USItype)(al)), \
766 "r" ((USItype)(bl)))
e1a79915
RS
767#define umul_ppmm(ph, pl, m0, m1) \
768 do { \
e6c5404d 769 USItype __m0 = (m0), __m1 = (m1); \
e1a79915
RS
770 __asm__ ( \
771 "s r2,r2
772 mts r10,%2
773 m r2,%3
774 m r2,%3
775 m r2,%3
776 m r2,%3
777 m r2,%3
778 m r2,%3
779 m r2,%3
780 m r2,%3
781 m r2,%3
782 m r2,%3
783 m r2,%3
784 m r2,%3
785 m r2,%3
786 m r2,%3
787 m r2,%3
788 m r2,%3
789 cas %0,r2,r0
790 mfs r10,%1" \
e6c5404d
TG
791 : "=r" ((USItype)(ph)), \
792 "=r" ((USItype)(pl)) \
d83dd29a
TG
793 : "%r" (__m0), \
794 "r" (__m1) \
795 : "r2"); \
e6c5404d
TG
796 (ph) += ((((SItype) __m0 >> 31) & __m1) \
797 + (((SItype) __m1 >> 31) & __m0)); \
e1a79915 798 } while (0)
d83dd29a
TG
799#define UMUL_TIME 20
800#define UDIV_TIME 200
e1a79915
RS
801#define count_leading_zeros(count, x) \
802 do { \
803 if ((x) >= 0x10000) \
804 __asm__ ("clz %0,%1" \
e6c5404d
TG
805 : "=r" ((USItype)(count)) \
806 : "r" ((USItype)(x) >> 16)); \
e1a79915
RS
807 else \
808 { \
809 __asm__ ("clz %0,%1" \
e6c5404d
TG
810 : "=r" ((USItype)(count)) \
811 : "r" ((USItype)(x))); \
e1a79915
RS
812 (count) += 16; \
813 } \
814 } while (0)
815#endif
816
817#if defined (__sparc__)
818#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
f71c71f1
TG
819 __asm__ ("addcc %r4,%5,%1
820 addx %r2,%3,%0" \
e6c5404d
TG
821 : "=r" ((USItype)(sh)), \
822 "=&r" ((USItype)(sl)) \
f71c71f1 823 : "%rJ" ((USItype)(ah)), \
e6c5404d 824 "rI" ((USItype)(bh)), \
f71c71f1 825 "%rJ" ((USItype)(al)), \
e6c5404d 826 "rI" ((USItype)(bl)) \
d83dd29a 827 __CLOBBER_CC)
e1a79915 828#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
f71c71f1
TG
829 __asm__ ("subcc %r4,%5,%1
830 subx %r2,%3,%0" \
e6c5404d
TG
831 : "=r" ((USItype)(sh)), \
832 "=&r" ((USItype)(sl)) \
f71c71f1 833 : "rJ" ((USItype)(ah)), \
e6c5404d 834 "rI" ((USItype)(bh)), \
f71c71f1 835 "rJ" ((USItype)(al)), \
e6c5404d 836 "rI" ((USItype)(bl)) \
d83dd29a 837 __CLOBBER_CC)
d9d3acb6 838#if defined (__sparc_v8__)
e1a79915
RS
839#define umul_ppmm(w1, w0, u, v) \
840 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
e6c5404d
TG
841 : "=r" ((USItype)(w1)), \
842 "=r" ((USItype)(w0)) \
843 : "r" ((USItype)(u)), \
844 "r" ((USItype)(v)))
e1a79915
RS
845#define udiv_qrnnd(q, r, n1, n0, d) \
846 __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
e6c5404d
TG
847 : "=&r" ((USItype)(q)), \
848 "=&r" ((USItype)(r)) \
849 : "r" ((USItype)(n1)), \
850 "r" ((USItype)(n0)), \
851 "r" ((USItype)(d)))
e1a79915 852#else
d9d3acb6
JW
853#if defined (__sparclite__)
854/* This has hardware multiply but not divide. It also has two additional
855 instructions scan (ffs from high bit) and divscc. */
856#define umul_ppmm(w1, w0, u, v) \
857 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
abe1fe6c
TG
858 : "=r" ((USItype)(w1)), \
859 "=r" ((USItype)(w0)) \
860 : "r" ((USItype)(u)), \
861 "r" ((USItype)(v)))
d9d3acb6
JW
862#define udiv_qrnnd(q, r, n1, n0, d) \
863 __asm__ ("! Inlined udiv_qrnnd
864 wr %%g0,%2,%%y ! Not a delayed write for sparclite
865 tst %%g0
866 divscc %3,%4,%%g1
867 divscc %%g1,%4,%%g1
868 divscc %%g1,%4,%%g1
869 divscc %%g1,%4,%%g1
870 divscc %%g1,%4,%%g1
871 divscc %%g1,%4,%%g1
872 divscc %%g1,%4,%%g1
873 divscc %%g1,%4,%%g1
874 divscc %%g1,%4,%%g1
875 divscc %%g1,%4,%%g1
876 divscc %%g1,%4,%%g1
877 divscc %%g1,%4,%%g1
878 divscc %%g1,%4,%%g1
879 divscc %%g1,%4,%%g1
880 divscc %%g1,%4,%%g1
881 divscc %%g1,%4,%%g1
882 divscc %%g1,%4,%%g1
883 divscc %%g1,%4,%%g1
884 divscc %%g1,%4,%%g1
885 divscc %%g1,%4,%%g1
886 divscc %%g1,%4,%%g1
887 divscc %%g1,%4,%%g1
888 divscc %%g1,%4,%%g1
889 divscc %%g1,%4,%%g1
890 divscc %%g1,%4,%%g1
891 divscc %%g1,%4,%%g1
892 divscc %%g1,%4,%%g1
893 divscc %%g1,%4,%%g1
894 divscc %%g1,%4,%%g1
895 divscc %%g1,%4,%%g1
896 divscc %%g1,%4,%%g1
897 divscc %%g1,%4,%0
898 rd %%y,%1
899 bl,a 1f
900 add %1,%4,%1
9011: ! End of inline udiv_qrnnd" \
abe1fe6c
TG
902 : "=r" ((USItype)(q)), \
903 "=r" ((USItype)(r)) \
904 : "r" ((USItype)(n1)), \
905 "r" ((USItype)(n0)), \
906 "rI" ((USItype)(d)) \
210d97af 907 : "%g1" __AND_CLOBBER_CC)
d9d3acb6
JW
908#define UDIV_TIME 37
909#define count_leading_zeros(count, x) \
910 __asm__ ("scan %1,0,%0" \
abe1fe6c
TG
911 : "=r" ((USItype)(x)) \
912 : "r" ((USItype)(count)))
d9d3acb6 913#else
e1a79915
RS
914/* SPARC without integer multiplication and divide instructions.
915 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
916#define umul_ppmm(w1, w0, u, v) \
917 __asm__ ("! Inlined umul_ppmm
918 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
919 sra %3,31,%%g2 ! Don't move this insn
920 and %2,%%g2,%%g2 ! Don't move this insn
921 andcc %%g0,0,%%g1 ! Don't move this insn
922 mulscc %%g1,%3,%%g1
923 mulscc %%g1,%3,%%g1
924 mulscc %%g1,%3,%%g1
925 mulscc %%g1,%3,%%g1
926 mulscc %%g1,%3,%%g1
927 mulscc %%g1,%3,%%g1
928 mulscc %%g1,%3,%%g1
929 mulscc %%g1,%3,%%g1
930 mulscc %%g1,%3,%%g1
931 mulscc %%g1,%3,%%g1
932 mulscc %%g1,%3,%%g1
933 mulscc %%g1,%3,%%g1
934 mulscc %%g1,%3,%%g1
935 mulscc %%g1,%3,%%g1
936 mulscc %%g1,%3,%%g1
937 mulscc %%g1,%3,%%g1
938 mulscc %%g1,%3,%%g1
939 mulscc %%g1,%3,%%g1
940 mulscc %%g1,%3,%%g1
941 mulscc %%g1,%3,%%g1
942 mulscc %%g1,%3,%%g1
943 mulscc %%g1,%3,%%g1
944 mulscc %%g1,%3,%%g1
945 mulscc %%g1,%3,%%g1
946 mulscc %%g1,%3,%%g1
947 mulscc %%g1,%3,%%g1
948 mulscc %%g1,%3,%%g1
949 mulscc %%g1,%3,%%g1
950 mulscc %%g1,%3,%%g1
951 mulscc %%g1,%3,%%g1
952 mulscc %%g1,%3,%%g1
953 mulscc %%g1,%3,%%g1
954 mulscc %%g1,0,%%g1
955 add %%g1,%%g2,%0
956 rd %%y,%1" \
e6c5404d
TG
957 : "=r" ((USItype)(w1)), \
958 "=r" ((USItype)(w0)) \
959 : "%rI" ((USItype)(u)), \
960 "r" ((USItype)(v)) \
d83dd29a 961 : "%g1", "%g2" __AND_CLOBBER_CC)
e1a79915
RS
962#define UMUL_TIME 39 /* 39 instructions */
963/* It's quite necessary to add this much assembler for the sparc.
964 The default udiv_qrnnd (in C) is more than 10 times slower! */
965#define udiv_qrnnd(q, r, n1, n0, d) \
966 __asm__ ("! Inlined udiv_qrnnd
967 mov 32,%%g1
968 subcc %1,%2,%%g0
9691: bcs 5f
970 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
971 sub %1,%2,%1 ! this kills msb of n
972 addx %1,%1,%1 ! so this can't give carry
973 subcc %%g1,1,%%g1
9742: bne 1b
975 subcc %1,%2,%%g0
976 bcs 3f
977 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
978 b 3f
979 sub %1,%2,%1 ! this kills msb of n
9804: sub %1,%2,%1
9815: addxcc %1,%1,%1
982 bcc 2b
983 subcc %%g1,1,%%g1
984! Got carry from n. Subtract next step to cancel this carry.
985 bne 4b
986 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
987 sub %1,%2,%1
9883: xnor %0,0,%0
989 ! End of inline udiv_qrnnd" \
e6c5404d
TG
990 : "=&r" ((USItype)(q)), \
991 "=&r" ((USItype)(r)) \
992 : "r" ((USItype)(d)), \
993 "1" ((USItype)(n1)), \
994 "0" ((USItype)(n0)) : "%g1" __AND_CLOBBER_CC)
e1a79915 995#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
d9d3acb6
JW
996#endif /* __sparclite__ */
997#endif /* __sparc_v8__ */
d83dd29a 998#endif /* __sparc__ */
e1a79915
RS
999
1000#if defined (__vax__)
1001#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1002 __asm__ ("addl2 %5,%1
1003 adwc %3,%0" \
e6c5404d
TG
1004 : "=g" ((USItype)(sh)), \
1005 "=&g" ((USItype)(sl)) \
1006 : "%0" ((USItype)(ah)), \
1007 "g" ((USItype)(bh)), \
1008 "%1" ((USItype)(al)), \
1009 "g" ((USItype)(bl)))
e1a79915
RS
1010#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1011 __asm__ ("subl2 %5,%1
1012 sbwc %3,%0" \
e6c5404d
TG
1013 : "=g" ((USItype)(sh)), \
1014 "=&g" ((USItype)(sl)) \
1015 : "0" ((USItype)(ah)), \
1016 "g" ((USItype)(bh)), \
1017 "1" ((USItype)(al)), \
1018 "g" ((USItype)(bl)))
e1a79915
RS
1019#define umul_ppmm(xh, xl, m0, m1) \
1020 do { \
e6c5404d
TG
1021 union { \
1022 UDItype __ll; \
1023 struct {USItype __l, __h;} __i; \
1024 } __xx; \
1025 USItype __m0 = (m0), __m1 = (m1); \
e1a79915 1026 __asm__ ("emul %1,%2,$0,%0" \
e6c5404d 1027 : "=r" (__xx.__ll) \
d83dd29a
TG
1028 : "g" (__m0), \
1029 "g" (__m1)); \
e6c5404d
TG
1030 (xh) = __xx.__i.__h; \
1031 (xl) = __xx.__i.__l; \
1032 (xh) += ((((SItype) __m0 >> 31) & __m1) \
1033 + (((SItype) __m1 >> 31) & __m0)); \
e1a79915 1034 } while (0)
f71c71f1
TG
1035#define sdiv_qrnnd(q, r, n1, n0, d) \
1036 do { \
1037 union {DItype __ll; \
1038 struct {SItype __l, __h;} __i; \
1039 } __xx; \
1040 __xx.__i.__h = n1; __xx.__i.__l = n0; \
1041 __asm__ ("ediv %3,%2,%0,%1" \
1042 : "=g" (q), "=g" (r) \
1043 : "g" (__n1n0.ll), "g" (d)); \
1044 } while (0)
e1a79915
RS
1045#endif /* __vax__ */
1046
1047#endif /* __GNUC__ */
1048
1049/* If this machine has no inline assembler, use C macros. */
1050
1051#if !defined (add_ssaaaa)
1052#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1053 do { \
e6c5404d 1054 USItype __x; \
e1a79915
RS
1055 __x = (al) + (bl); \
1056 (sh) = (ah) + (bh) + (__x < (al)); \
1057 (sl) = __x; \
1058 } while (0)
1059#endif
1060
1061#if !defined (sub_ddmmss)
1062#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1063 do { \
e6c5404d 1064 USItype __x; \
e1a79915
RS
1065 __x = (al) - (bl); \
1066 (sh) = (ah) - (bh) - (__x > (al)); \
1067 (sl) = __x; \
1068 } while (0)
1069#endif
1070
1071#if !defined (umul_ppmm)
1072#define umul_ppmm(w1, w0, u, v) \
1073 do { \
e6c5404d
TG
1074 USItype __x0, __x1, __x2, __x3; \
1075 USItype __ul, __vl, __uh, __vh; \
e1a79915
RS
1076 \
1077 __ul = __ll_lowpart (u); \
1078 __uh = __ll_highpart (u); \
1079 __vl = __ll_lowpart (v); \
1080 __vh = __ll_highpart (v); \
1081 \
e6c5404d
TG
1082 __x0 = (USItype) __ul * __vl; \
1083 __x1 = (USItype) __ul * __vh; \
1084 __x2 = (USItype) __uh * __vl; \
1085 __x3 = (USItype) __uh * __vh; \
e1a79915
RS
1086 \
1087 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
1088 __x1 += __x2; /* but this indeed can */ \
1089 if (__x1 < __x2) /* did we get it? */ \
1090 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
1091 \
1092 (w1) = __x3 + __ll_highpart (__x1); \
1093 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
1094 } while (0)
1095#endif
1096
1097#if !defined (__umulsidi3)
1098#define __umulsidi3(u, v) \
e6c5404d 1099 ({DIunion __w; \
e1a79915
RS
1100 umul_ppmm (__w.s.high, __w.s.low, u, v); \
1101 __w.ll; })
1102#endif
1103
d83dd29a
TG
1104/* Define this unconditionally, so it can be used for debugging. */
1105#define __udiv_qrnnd_c(q, r, n1, n0, d) \
e1a79915 1106 do { \
e6c5404d
TG
1107 USItype __d1, __d0, __q1, __q0; \
1108 USItype __r1, __r0, __m; \
e1a79915
RS
1109 __d1 = __ll_highpart (d); \
1110 __d0 = __ll_lowpart (d); \
1111 \
1112 __r1 = (n1) % __d1; \
1113 __q1 = (n1) / __d1; \
e6c5404d 1114 __m = (USItype) __q1 * __d0; \
e1a79915
RS
1115 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
1116 if (__r1 < __m) \
1117 { \
1118 __q1--, __r1 += (d); \
1119 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
1120 if (__r1 < __m) \
1121 __q1--, __r1 += (d); \
1122 } \
1123 __r1 -= __m; \
1124 \
1125 __r0 = __r1 % __d1; \
1126 __q0 = __r1 / __d1; \
e6c5404d 1127 __m = (USItype) __q0 * __d0; \
e1a79915
RS
1128 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
1129 if (__r0 < __m) \
1130 { \
1131 __q0--, __r0 += (d); \
1132 if (__r0 >= (d)) \
1133 if (__r0 < __m) \
1134 __q0--, __r0 += (d); \
1135 } \
1136 __r0 -= __m; \
1137 \
e6c5404d 1138 (q) = (USItype) __q1 * __ll_B | __q0; \
e1a79915
RS
1139 (r) = __r0; \
1140 } while (0)
1814cfd9
TG
1141
1142/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
9077d7fa 1143 __udiv_w_sdiv (defined in libgcc or elsewhere). */
1814cfd9
TG
1144#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
1145#define udiv_qrnnd(q, r, nh, nl, d) \
1146 do { \
1147 USItype __r; \
9077d7fa 1148 (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
1814cfd9
TG
1149 (r) = __r; \
1150 } while (0)
1151#endif
1152
d83dd29a
TG
1153/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
1154#if !defined (udiv_qrnnd)
1155#define UDIV_NEEDS_NORMALIZATION 1
1156#define udiv_qrnnd __udiv_qrnnd_c
e1a79915
RS
1157#endif
1158
1159#if !defined (count_leading_zeros)
e6c5404d 1160extern const UQItype __clz_tab[];
e1a79915
RS
1161#define count_leading_zeros(count, x) \
1162 do { \
e6c5404d
TG
1163 USItype __xr = (x); \
1164 USItype __a; \
e1a79915 1165 \
cf7d8fd0 1166 if (SI_TYPE_SIZE <= 32) \
d83dd29a
TG
1167 { \
1168 __a = __xr < (1<<2*__BITS4) \
1169 ? (__xr < (1<<__BITS4) ? 0 : __BITS4) \
1170 : (__xr < (1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
1171 } \
1172 else \
1173 { \
cf7d8fd0 1174 for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \
d83dd29a
TG
1175 if (((__xr >> __a) & 0xff) != 0) \
1176 break; \
1177 } \
e1a79915 1178 \
cf7d8fd0 1179 (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
e1a79915
RS
1180 } while (0)
1181#endif
1182
1183#ifndef UDIV_NEEDS_NORMALIZATION
1184#define UDIV_NEEDS_NORMALIZATION 0
1185#endif
This page took 0.374058 seconds and 5 git commands to generate.