]> gcc.gnu.org Git - gcc.git/blame - gcc/longlong.h
(va_start...
[gcc.git] / gcc / longlong.h
CommitLineData
e1a79915 1/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
f4432e94 2 Copyright (C) 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
e1a79915
RS
3
4 This definition file is free software; you can redistribute it
5 and/or modify it under the terms of the GNU General Public
6 License as published by the Free Software Foundation; either
7 version 2, or (at your option) any later version.
8
9 This definition file is distributed in the hope that it will be
10 useful, but WITHOUT ANY WARRANTY; without even the implied
11 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 See the GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
a35311b0
RK
16 Foundation, Inc., 59 Temple Place - Suite 330,
17 Boston, MA 02111-1307, USA. */
e1a79915 18
cf7d8fd0
TG
19#ifndef SI_TYPE_SIZE
20#define SI_TYPE_SIZE 32
e1a79915
RS
21#endif
22
cf7d8fd0
TG
23#define __BITS4 (SI_TYPE_SIZE / 4)
24#define __ll_B (1L << (SI_TYPE_SIZE / 2))
e6c5404d
TG
25#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
26#define __ll_highpart(t) ((USItype) (t) / __ll_B)
e1a79915 27
2e8a6600 28/* Define auxiliary asm macros.
e1a79915
RS
29
30 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
e6c5404d
TG
31 multiplies two USItype integers MULTIPLER and MULTIPLICAND,
32 and generates a two-part USItype product in HIGH_PROD and
e1a79915
RS
33 LOW_PROD.
34
e6c5404d
TG
35 2) __umulsidi3(a,b) multiplies two USItype integers A and B,
36 and returns a UDItype product. This is just a variant of umul_ppmm.
e1a79915
RS
37
38 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
39 denominator) divides a two-word unsigned integer, composed by the
40 integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
41 places the quotient in QUOTIENT and the remainder in REMAINDER.
42 HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
43 If, in addition, the most significant bit of DENOMINATOR must be 1,
44 then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
45
b6d2adc7
TG
46 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
47 denominator). Like udiv_qrnnd but the numbers are signed. The
48 quotient is rounded towards 0.
49
50 5) count_leading_zeros(count, x) counts the number of zero-bits from
e1a79915
RS
51 the msb to the first non-zero bit. This is the number of steps X
52 needs to be shifted left to set the msb. Undefined for X == 0.
53
b6d2adc7 54 6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
e1a79915
RS
55 high_addend_2, low_addend_2) adds two two-word unsigned integers,
56 composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
57 LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
58 LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
59 lost.
60
b6d2adc7 61 7) sub_ddmmss(high_difference, low_difference, high_minuend,
e1a79915
RS
62 low_minuend, high_subtrahend, low_subtrahend) subtracts two
63 two-word unsigned integers, composed by HIGH_MINUEND_1 and
64 LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
65 respectively. The result is placed in HIGH_DIFFERENCE and
66 LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
67 and is lost.
68
69 If any of these macros are left undefined for a particular CPU,
70 C macros are used. */
71
72/* The CPUs come in alphabetical order below.
73
74 Please add support for more CPUs here, or improve the current support
75 for the CPUs below!
f71c71f1 76 (E.g. WE32100, IBM360.) */
d83dd29a 77
3a0d22dd
MM
78#if defined (__GNUC__) && !defined (NO_ASM)
79
d83dd29a
TG
80/* We sometimes need to clobber "cc" with gcc2, but that would not be
81 understood by gcc1. Use cpp to avoid major code duplication. */
82#if __GNUC__ < 2
83#define __CLOBBER_CC
84#define __AND_CLOBBER_CC
85#else /* __GNUC__ >= 2 */
86#define __CLOBBER_CC : "cc"
87#define __AND_CLOBBER_CC , "cc"
88#endif /* __GNUC__ < 2 */
e1a79915 89
f71c71f1 90#if defined (__a29k__) || defined (_AM29K)
e1a79915
RS
91#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
92 __asm__ ("add %1,%4,%5
93 addc %0,%2,%3" \
e6c5404d
TG
94 : "=r" ((USItype)(sh)), \
95 "=&r" ((USItype)(sl)) \
96 : "%r" ((USItype)(ah)), \
97 "rI" ((USItype)(bh)), \
98 "%r" ((USItype)(al)), \
99 "rI" ((USItype)(bl)))
e1a79915
RS
100#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
101 __asm__ ("sub %1,%4,%5
102 subc %0,%2,%3" \
e6c5404d
TG
103 : "=r" ((USItype)(sh)), \
104 "=&r" ((USItype)(sl)) \
105 : "r" ((USItype)(ah)), \
106 "rI" ((USItype)(bh)), \
107 "r" ((USItype)(al)), \
108 "rI" ((USItype)(bl)))
e1a79915
RS
109#define umul_ppmm(xh, xl, m0, m1) \
110 do { \
e6c5404d 111 USItype __m0 = (m0), __m1 = (m1); \
d83dd29a 112 __asm__ ("multiplu %0,%1,%2" \
e6c5404d 113 : "=r" ((USItype)(xl)) \
d83dd29a
TG
114 : "r" (__m0), \
115 "r" (__m1)); \
116 __asm__ ("multmu %0,%1,%2" \
e6c5404d 117 : "=r" ((USItype)(xh)) \
d83dd29a
TG
118 : "r" (__m0), \
119 "r" (__m1)); \
e1a79915
RS
120 } while (0)
121#define udiv_qrnnd(q, r, n1, n0, d) \
122 __asm__ ("dividu %0,%3,%4" \
e6c5404d
TG
123 : "=r" ((USItype)(q)), \
124 "=q" ((USItype)(r)) \
125 : "1" ((USItype)(n1)), \
126 "r" ((USItype)(n0)), \
127 "r" ((USItype)(d)))
e1a79915 128#define count_leading_zeros(count, x) \
d83dd29a 129 __asm__ ("clz %0,%1" \
e6c5404d
TG
130 : "=r" ((USItype)(count)) \
131 : "r" ((USItype)(x)))
e1a79915
RS
132#endif /* __a29k__ */
133
134#if defined (__arm__)
135#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
ac1e676a
RE
136 __asm__ ("adds %1, %4, %5
137 adc %0, %2, %3" \
e6c5404d
TG
138 : "=r" ((USItype)(sh)), \
139 "=&r" ((USItype)(sl)) \
140 : "%r" ((USItype)(ah)), \
141 "rI" ((USItype)(bh)), \
142 "%r" ((USItype)(al)), \
143 "rI" ((USItype)(bl)))
e1a79915 144#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
ac1e676a
RE
145 __asm__ ("subs %1, %4, %5
146 sbc %0, %2, %3" \
e6c5404d
TG
147 : "=r" ((USItype)(sh)), \
148 "=&r" ((USItype)(sl)) \
149 : "r" ((USItype)(ah)), \
150 "rI" ((USItype)(bh)), \
151 "r" ((USItype)(al)), \
152 "rI" ((USItype)(bl)))
f71c71f1 153#define umul_ppmm(xh, xl, a, b) \
6a04a634 154{register USItype __t0, __t1, __t2; \
ac1e676a 155 __asm__ ("%@ Inlined umul_ppmm
6a04a634
RE
156 mov %2, %5, lsr #16
157 mov %0, %6, lsr #16
158 bic %3, %5, %2, lsl #16
159 bic %4, %6, %0, lsl #16
160 mul %1, %3, %4
161 mul %4, %2, %4
162 mul %3, %0, %3
163 mul %0, %2, %0
164 adds %3, %4, %3
ac1e676a 165 addcs %0, %0, #65536
6a04a634
RE
166 adds %1, %1, %3, lsl #16
167 adc %0, %0, %3, lsr #16" \
f71c71f1 168 : "=&r" ((USItype)(xh)), \
6a04a634
RE
169 "=r" ((USItype)(xl)), \
170 "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
f71c71f1 171 : "r" ((USItype)(a)), \
6a04a634 172 "r" ((USItype)(b)));}
f71c71f1
TG
173#define UMUL_TIME 20
174#define UDIV_TIME 100
e1a79915
RS
175#endif /* __arm__ */
176
f71c71f1
TG
177#if defined (__clipper__)
178#define umul_ppmm(w1, w0, u, v) \
179 ({union {UDItype __ll; \
180 struct {USItype __l, __h;} __i; \
181 } __xx; \
182 __asm__ ("mulwux %2,%0" \
183 : "=r" (__xx.__ll) \
184 : "%0" ((USItype)(u)), \
185 "r" ((USItype)(v))); \
186 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
187#define smul_ppmm(w1, w0, u, v) \
188 ({union {DItype __ll; \
189 struct {SItype __l, __h;} __i; \
190 } __xx; \
191 __asm__ ("mulwx %2,%0" \
192 : "=r" (__xx.__ll) \
193 : "%0" ((SItype)(u)), \
194 "r" ((SItype)(v))); \
195 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
196#define __umulsidi3(u, v) \
197 ({UDItype __w; \
198 __asm__ ("mulwux %2,%0" \
199 : "=r" (__w) \
200 : "%0" ((USItype)(u)), \
201 "r" ((USItype)(v))); \
202 __w; })
203#endif /* __clipper__ */
204
e1a79915
RS
205#if defined (__gmicro__)
206#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
207 __asm__ ("add.w %5,%1
208 addx %3,%0" \
e6c5404d
TG
209 : "=g" ((USItype)(sh)), \
210 "=&g" ((USItype)(sl)) \
211 : "%0" ((USItype)(ah)), \
212 "g" ((USItype)(bh)), \
213 "%1" ((USItype)(al)), \
214 "g" ((USItype)(bl)))
e1a79915
RS
215#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
216 __asm__ ("sub.w %5,%1
217 subx %3,%0" \
e6c5404d
TG
218 : "=g" ((USItype)(sh)), \
219 "=&g" ((USItype)(sl)) \
220 : "0" ((USItype)(ah)), \
221 "g" ((USItype)(bh)), \
222 "1" ((USItype)(al)), \
223 "g" ((USItype)(bl)))
e1a79915
RS
224#define umul_ppmm(ph, pl, m0, m1) \
225 __asm__ ("mulx %3,%0,%1" \
e6c5404d
TG
226 : "=g" ((USItype)(ph)), \
227 "=r" ((USItype)(pl)) \
228 : "%0" ((USItype)(m0)), \
229 "g" ((USItype)(m1)))
e1a79915
RS
230#define udiv_qrnnd(q, r, nh, nl, d) \
231 __asm__ ("divx %4,%0,%1" \
e6c5404d
TG
232 : "=g" ((USItype)(q)), \
233 "=r" ((USItype)(r)) \
234 : "1" ((USItype)(nh)), \
235 "0" ((USItype)(nl)), \
236 "g" ((USItype)(d)))
e1a79915
RS
237#define count_leading_zeros(count, x) \
238 __asm__ ("bsch/1 %1,%0" \
d83dd29a 239 : "=g" (count) \
e6c5404d
TG
240 : "g" ((USItype)(x)), \
241 "0" ((USItype)0))
e1a79915
RS
242#endif
243
244#if defined (__hppa)
245#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
246 __asm__ ("add %4,%5,%1
247 addc %2,%3,%0" \
e6c5404d
TG
248 : "=r" ((USItype)(sh)), \
249 "=&r" ((USItype)(sl)) \
250 : "%rM" ((USItype)(ah)), \
251 "rM" ((USItype)(bh)), \
252 "%rM" ((USItype)(al)), \
253 "rM" ((USItype)(bl)))
e1a79915 254#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
7691f559
TG
255 __asm__ ("sub %4,%5,%1
256 subb %2,%3,%0" \
e6c5404d
TG
257 : "=r" ((USItype)(sh)), \
258 "=&r" ((USItype)(sl)) \
259 : "rM" ((USItype)(ah)), \
260 "rM" ((USItype)(bh)), \
261 "rM" ((USItype)(al)), \
262 "rM" ((USItype)(bl)))
d83dd29a
TG
263#if defined (_PA_RISC1_1)
264#define umul_ppmm(w1, w0, u, v) \
265 do { \
266 union \
267 { \
e6c5404d
TG
268 UDItype __f; \
269 struct {USItype __w1, __w0;} __w1w0; \
d83dd29a
TG
270 } __t; \
271 __asm__ ("xmpyu %1,%2,%0" \
272 : "=x" (__t.__f) \
e6c5404d
TG
273 : "x" ((USItype)(u)), \
274 "x" ((USItype)(v))); \
d83dd29a
TG
275 (w1) = __t.__w1w0.__w1; \
276 (w0) = __t.__w1w0.__w0; \
277 } while (0)
278#define UMUL_TIME 8
279#else
280#define UMUL_TIME 30
281#endif
282#define UDIV_TIME 40
f71c71f1
TG
283#define count_leading_zeros(count, x) \
284 do { \
285 USItype __tmp; \
286 __asm__ ( \
287 "ldi 1,%0
288 extru,= %1,15,16,%%r0 ; Bits 31..16 zero?
289 extru,tr %1,15,16,%1 ; No. Shift down, skip add.
290 ldo 16(%0),%0 ; Yes. Perform add.
291 extru,= %1,23,8,%%r0 ; Bits 15..8 zero?
292 extru,tr %1,23,8,%1 ; No. Shift down, skip add.
293 ldo 8(%0),%0 ; Yes. Perform add.
294 extru,= %1,27,4,%%r0 ; Bits 7..4 zero?
295 extru,tr %1,27,4,%1 ; No. Shift down, skip add.
296 ldo 4(%0),%0 ; Yes. Perform add.
297 extru,= %1,29,2,%%r0 ; Bits 3..2 zero?
298 extru,tr %1,29,2,%1 ; No. Shift down, skip add.
299 ldo 2(%0),%0 ; Yes. Perform add.
300 extru %1,30,1,%1 ; Extract bit 1.
301 sub %0,%1,%0 ; Subtract it.
302 " : "=r" (count), "=r" (__tmp) : "1" (x)); \
303 } while (0)
e1a79915
RS
304#endif
305
306#if defined (__i386__) || defined (__i486__)
307#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
308 __asm__ ("addl %5,%1
309 adcl %3,%0" \
e6c5404d
TG
310 : "=r" ((USItype)(sh)), \
311 "=&r" ((USItype)(sl)) \
312 : "%0" ((USItype)(ah)), \
313 "g" ((USItype)(bh)), \
314 "%1" ((USItype)(al)), \
315 "g" ((USItype)(bl)))
e1a79915
RS
316#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
317 __asm__ ("subl %5,%1
318 sbbl %3,%0" \
e6c5404d
TG
319 : "=r" ((USItype)(sh)), \
320 "=&r" ((USItype)(sl)) \
321 : "0" ((USItype)(ah)), \
322 "g" ((USItype)(bh)), \
323 "1" ((USItype)(al)), \
324 "g" ((USItype)(bl)))
e1a79915
RS
325#define umul_ppmm(w1, w0, u, v) \
326 __asm__ ("mull %3" \
e6c5404d
TG
327 : "=a" ((USItype)(w0)), \
328 "=d" ((USItype)(w1)) \
329 : "%0" ((USItype)(u)), \
330 "rm" ((USItype)(v)))
e1a79915
RS
331#define udiv_qrnnd(q, r, n1, n0, d) \
332 __asm__ ("divl %4" \
e6c5404d
TG
333 : "=a" ((USItype)(q)), \
334 "=d" ((USItype)(r)) \
335 : "0" ((USItype)(n0)), \
336 "1" ((USItype)(n1)), \
337 "rm" ((USItype)(d)))
e1a79915
RS
338#define count_leading_zeros(count, x) \
339 do { \
e6c5404d 340 USItype __cbtmp; \
e1a79915 341 __asm__ ("bsrl %1,%0" \
e6c5404d 342 : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
e1a79915
RS
343 (count) = __cbtmp ^ 31; \
344 } while (0)
d83dd29a
TG
345#define UMUL_TIME 40
346#define UDIV_TIME 40
e1a79915
RS
347#endif /* 80x86 */
348
349#if defined (__i860__)
350#if 0
351/* Make sure these patterns really improve the code before
352 switching them on. */
353#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
354 do { \
355 union \
356 { \
e6c5404d
TG
357 DItype __ll; \
358 struct {USItype __l, __h;} __i; \
e1a79915 359 } __a, __b, __s; \
e6c5404d
TG
360 __a.__i.__l = (al); \
361 __a.__i.__h = (ah); \
362 __b.__i.__l = (bl); \
363 __b.__i.__h = (bh); \
e1a79915 364 __asm__ ("fiadd.dd %1,%2,%0" \
e6c5404d
TG
365 : "=f" (__s.__ll) \
366 : "%f" (__a.__ll), "f" (__b.__ll)); \
367 (sh) = __s.__i.__h; \
368 (sl) = __s.__i.__l; \
e1a79915
RS
369 } while (0)
370#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
371 do { \
372 union \
373 { \
e6c5404d
TG
374 DItype __ll; \
375 struct {USItype __l, __h;} __i; \
e1a79915 376 } __a, __b, __s; \
e6c5404d
TG
377 __a.__i.__l = (al); \
378 __a.__i.__h = (ah); \
379 __b.__i.__l = (bl); \
380 __b.__i.__h = (bh); \
e1a79915 381 __asm__ ("fisub.dd %1,%2,%0" \
e6c5404d
TG
382 : "=f" (__s.__ll) \
383 : "%f" (__a.__ll), "f" (__b.__ll)); \
384 (sh) = __s.__i.__h; \
385 (sl) = __s.__i.__l; \
e1a79915
RS
386 } while (0)
387#endif
388#endif /* __i860__ */
389
f71c71f1
TG
390#if defined (__i960__)
391#define umul_ppmm(w1, w0, u, v) \
392 ({union {UDItype __ll; \
393 struct {USItype __l, __h;} __i; \
394 } __xx; \
395 __asm__ ("emul %2,%1,%0" \
396 : "=d" (__xx.__ll) \
397 : "%dI" ((USItype)(u)), \
398 "dI" ((USItype)(v))); \
399 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
400#define __umulsidi3(u, v) \
401 ({UDItype __w; \
402 __asm__ ("emul %2,%1,%0" \
403 : "=d" (__w) \
404 : "%dI" ((USItype)(u)), \
405 "dI" ((USItype)(v))); \
406 __w; })
407#endif /* __i960__ */
e1a79915
RS
408
409#if defined (__mc68000__)
410#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
411 __asm__ ("add%.l %5,%1
412 addx%.l %3,%0" \
e6c5404d
TG
413 : "=d" ((USItype)(sh)), \
414 "=&d" ((USItype)(sl)) \
415 : "%0" ((USItype)(ah)), \
416 "d" ((USItype)(bh)), \
417 "%1" ((USItype)(al)), \
418 "g" ((USItype)(bl)))
e1a79915
RS
419#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
420 __asm__ ("sub%.l %5,%1
421 subx%.l %3,%0" \
e6c5404d
TG
422 : "=d" ((USItype)(sh)), \
423 "=&d" ((USItype)(sl)) \
424 : "0" ((USItype)(ah)), \
425 "d" ((USItype)(bh)), \
426 "1" ((USItype)(al)), \
427 "g" ((USItype)(bl)))
e1a79915
RS
428#if defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)
429#define umul_ppmm(w1, w0, u, v) \
430 __asm__ ("mulu%.l %3,%1:%0" \
e6c5404d
TG
431 : "=d" ((USItype)(w0)), \
432 "=d" ((USItype)(w1)) \
433 : "%0" ((USItype)(u)), \
434 "dmi" ((USItype)(v)))
d83dd29a 435#define UMUL_TIME 45
e1a79915
RS
436#define udiv_qrnnd(q, r, n1, n0, d) \
437 __asm__ ("divu%.l %4,%1:%0" \
e6c5404d
TG
438 : "=d" ((USItype)(q)), \
439 "=d" ((USItype)(r)) \
440 : "0" ((USItype)(n0)), \
441 "1" ((USItype)(n1)), \
442 "dmi" ((USItype)(d)))
d83dd29a
TG
443#define UDIV_TIME 90
444#define sdiv_qrnnd(q, r, n1, n0, d) \
445 __asm__ ("divs%.l %4,%1:%0" \
e6c5404d
TG
446 : "=d" ((USItype)(q)), \
447 "=d" ((USItype)(r)) \
448 : "0" ((USItype)(n0)), \
449 "1" ((USItype)(n1)), \
450 "dmi" ((USItype)(d)))
e1a79915
RS
451#define count_leading_zeros(count, x) \
452 __asm__ ("bfffo %1{%b2:%b2},%0" \
e6c5404d
TG
453 : "=d" ((USItype)(count)) \
454 : "od" ((USItype)(x)), "n" (0))
e1a79915 455#else /* not mc68020 */
b24b1d78 456/* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX. */
e1a79915
RS
457#define umul_ppmm(xh, xl, a, b) \
458 __asm__ ("| Inlined umul_ppmm
f71c71f1
TG
459 move%.l %2,%/d0
460 move%.l %3,%/d1
461 move%.l %/d0,%/d2
bb66e0f6 462 swap %/d0
f71c71f1 463 move%.l %/d1,%/d3
bb66e0f6 464 swap %/d1
f71c71f1 465 move%.w %/d2,%/d4
bb66e0f6
ILT
466 mulu %/d3,%/d4
467 mulu %/d1,%/d2
468 mulu %/d0,%/d3
469 mulu %/d0,%/d1
f71c71f1
TG
470 move%.l %/d4,%/d0
471 eor%.w %/d0,%/d0
bb66e0f6 472 swap %/d0
f71c71f1
TG
473 add%.l %/d0,%/d2
474 add%.l %/d3,%/d2
e1a79915 475 jcc 1f
b24b1d78 476 add%.l %#65536,%/d1
bb66e0f6 4771: swap %/d2
b24b1d78 478 moveq %#0,%/d0
f71c71f1
TG
479 move%.w %/d2,%/d0
480 move%.w %/d4,%/d2
481 move%.l %/d2,%1
482 add%.l %/d1,%/d0
483 move%.l %/d0,%0" \
e6c5404d
TG
484 : "=g" ((USItype)(xh)), \
485 "=g" ((USItype)(xl)) \
486 : "g" ((USItype)(a)), \
487 "g" ((USItype)(b)) \
d83dd29a
TG
488 : "d0", "d1", "d2", "d3", "d4")
489#define UMUL_TIME 100
490#define UDIV_TIME 400
e1a79915
RS
491#endif /* not mc68020 */
492#endif /* mc68000 */
493
494#if defined (__m88000__)
495#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
496 __asm__ ("addu.co %1,%r4,%r5
497 addu.ci %0,%r2,%r3" \
e6c5404d
TG
498 : "=r" ((USItype)(sh)), \
499 "=&r" ((USItype)(sl)) \
500 : "%rJ" ((USItype)(ah)), \
501 "rJ" ((USItype)(bh)), \
502 "%rJ" ((USItype)(al)), \
503 "rJ" ((USItype)(bl)))
e1a79915
RS
504#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
505 __asm__ ("subu.co %1,%r4,%r5
506 subu.ci %0,%r2,%r3" \
e6c5404d
TG
507 : "=r" ((USItype)(sh)), \
508 "=&r" ((USItype)(sl)) \
509 : "rJ" ((USItype)(ah)), \
510 "rJ" ((USItype)(bh)), \
511 "rJ" ((USItype)(al)), \
512 "rJ" ((USItype)(bl)))
e1a79915
RS
513#define count_leading_zeros(count, x) \
514 do { \
e6c5404d 515 USItype __cbtmp; \
e1a79915 516 __asm__ ("ff1 %0,%1" \
d83dd29a 517 : "=r" (__cbtmp) \
e6c5404d 518 : "r" ((USItype)(x))); \
e1a79915
RS
519 (count) = __cbtmp ^ 31; \
520 } while (0)
d83dd29a 521#if defined (__mc88110__)
f71c71f1
TG
522#define umul_ppmm(wh, wl, u, v) \
523 do { \
524 union {UDItype __ll; \
525 struct {USItype __h, __l;} __i; \
526 } __xx; \
527 __asm__ ("mulu.d %0,%1,%2" \
528 : "=r" (__xx.__ll) \
529 : "r" ((USItype)(u)), \
530 "r" ((USItype)(v))); \
531 (wh) = __xx.__i.__h; \
532 (wl) = __xx.__i.__l; \
533 } while (0)
d83dd29a 534#define udiv_qrnnd(q, r, n1, n0, d) \
f71c71f1
TG
535 ({union {UDItype __ll; \
536 struct {USItype __h, __l;} __i; \
537 } __xx; \
538 USItype __q; \
539 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
540 __asm__ ("divu.d %0,%1,%2" \
541 : "=r" (__q) \
542 : "r" (__xx.__ll), \
543 "r" ((USItype)(d))); \
544 (r) = (n0) - __q * (d); (q) = __q; })
545#define UMUL_TIME 5
546#define UDIV_TIME 25
547#else
548#define UMUL_TIME 17
549#define UDIV_TIME 150
550#endif /* __mc88110__ */
e1a79915
RS
551#endif /* __m88000__ */
552
553#if defined (__mips__)
d28e1517
TG
554#define umul_ppmm(w1, w0, u, v) \
555 __asm__ ("multu %2,%3
556 mflo %0
e1a79915 557 mfhi %1" \
e6c5404d
TG
558 : "=d" ((USItype)(w0)), \
559 "=d" ((USItype)(w1)) \
560 : "d" ((USItype)(u)), \
561 "d" ((USItype)(v)))
f71c71f1 562#define UMUL_TIME 10
e1a79915
RS
563#define UDIV_TIME 100
564#endif /* __mips__ */
565
566#if defined (__ns32000__)
f71c71f1
TG
567#define umul_ppmm(w1, w0, u, v) \
568 ({union {UDItype __ll; \
569 struct {USItype __l, __h;} __i; \
570 } __xx; \
571 __asm__ ("meid %2,%0" \
572 : "=g" (__xx.__ll) \
573 : "%0" ((USItype)(u)), \
574 "g" ((USItype)(v))); \
575 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
e1a79915 576#define __umulsidi3(u, v) \
e6c5404d 577 ({UDItype __w; \
d83dd29a
TG
578 __asm__ ("meid %2,%0" \
579 : "=g" (__w) \
e6c5404d
TG
580 : "%0" ((USItype)(u)), \
581 "g" ((USItype)(v))); \
d83dd29a 582 __w; })
f71c71f1
TG
583#define udiv_qrnnd(q, r, n1, n0, d) \
584 ({union {UDItype __ll; \
585 struct {USItype __l, __h;} __i; \
586 } __xx; \
587 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
588 __asm__ ("deid %2,%0" \
589 : "=g" (__xx.__ll) \
590 : "0" (__xx.__ll), \
591 "g" ((USItype)(d))); \
592 (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
e1a79915
RS
593#endif /* __ns32000__ */
594
325877e1 595#if (defined (_ARCH_PPC) || defined (_IBMR2)) && W_TYPE_SIZE == 32
f71c71f1
TG
596#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
597 do { \
598 if (__builtin_constant_p (bh) && (bh) == 0) \
599 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
600 : "=r" ((USItype)(sh)), \
601 "=&r" ((USItype)(sl)) \
602 : "%r" ((USItype)(ah)), \
603 "%r" ((USItype)(al)), \
604 "rI" ((USItype)(bl))); \
605 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
606 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
607 : "=r" ((USItype)(sh)), \
608 "=&r" ((USItype)(sl)) \
609 : "%r" ((USItype)(ah)), \
610 "%r" ((USItype)(al)), \
611 "rI" ((USItype)(bl))); \
612 else \
613 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
614 : "=r" ((USItype)(sh)), \
615 "=&r" ((USItype)(sl)) \
616 : "%r" ((USItype)(ah)), \
617 "r" ((USItype)(bh)), \
618 "%r" ((USItype)(al)), \
619 "rI" ((USItype)(bl))); \
620 } while (0)
621#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
622 do { \
623 if (__builtin_constant_p (ah) && (ah) == 0) \
325877e1 624 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
f71c71f1
TG
625 : "=r" ((USItype)(sh)), \
626 "=&r" ((USItype)(sl)) \
627 : "r" ((USItype)(bh)), \
628 "rI" ((USItype)(al)), \
629 "r" ((USItype)(bl))); \
630 else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \
325877e1 631 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
f71c71f1
TG
632 : "=r" ((USItype)(sh)), \
633 "=&r" ((USItype)(sl)) \
634 : "r" ((USItype)(bh)), \
635 "rI" ((USItype)(al)), \
636 "r" ((USItype)(bl))); \
637 else if (__builtin_constant_p (bh) && (bh) == 0) \
638 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
639 : "=r" ((USItype)(sh)), \
640 "=&r" ((USItype)(sl)) \
641 : "r" ((USItype)(ah)), \
642 "rI" ((USItype)(al)), \
643 "r" ((USItype)(bl))); \
644 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
645 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
646 : "=r" ((USItype)(sh)), \
647 "=&r" ((USItype)(sl)) \
648 : "r" ((USItype)(ah)), \
649 "rI" ((USItype)(al)), \
650 "r" ((USItype)(bl))); \
651 else \
652 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
653 : "=r" ((USItype)(sh)), \
654 "=&r" ((USItype)(sl)) \
655 : "r" ((USItype)(ah)), \
656 "r" ((USItype)(bh)), \
657 "rI" ((USItype)(al)), \
658 "r" ((USItype)(bl))); \
659 } while (0)
660#define count_leading_zeros(count, x) \
661 __asm__ ("{cntlz|cntlzw} %0,%1" \
662 : "=r" ((USItype)(count)) \
663 : "r" ((USItype)(x)))
325877e1 664#if defined (_ARCH_PPC)
f71c71f1
TG
665#define umul_ppmm(ph, pl, m0, m1) \
666 do { \
667 USItype __m0 = (m0), __m1 = (m1); \
668 __asm__ ("mulhwu %0,%1,%2" \
669 : "=r" ((USItype) ph) \
670 : "%r" (__m0), \
671 "r" (__m1)); \
672 (pl) = __m0 * __m1; \
673 } while (0)
674#define UMUL_TIME 15
675#define smul_ppmm(ph, pl, m0, m1) \
676 do { \
677 SItype __m0 = (m0), __m1 = (m1); \
678 __asm__ ("mulhw %0,%1,%2" \
679 : "=r" ((SItype) ph) \
680 : "%r" (__m0), \
681 "r" (__m1)); \
682 (pl) = __m0 * __m1; \
683 } while (0)
684#define SMUL_TIME 14
685#define UDIV_TIME 120
686#else
687#define umul_ppmm(xh, xl, m0, m1) \
688 do { \
689 USItype __m0 = (m0), __m1 = (m1); \
690 __asm__ ("mul %0,%2,%3" \
691 : "=r" ((USItype)(xh)), \
692 "=q" ((USItype)(xl)) \
693 : "r" (__m0), \
694 "r" (__m1)); \
695 (xh) += ((((SItype) __m0 >> 31) & __m1) \
696 + (((SItype) __m1 >> 31) & __m0)); \
697 } while (0)
698#define UMUL_TIME 8
699#define smul_ppmm(xh, xl, m0, m1) \
700 __asm__ ("mul %0,%2,%3" \
701 : "=r" ((SItype)(xh)), \
702 "=q" ((SItype)(xl)) \
703 : "r" (m0), \
704 "r" (m1))
705#define SMUL_TIME 4
706#define sdiv_qrnnd(q, r, nh, nl, d) \
707 __asm__ ("div %0,%2,%4" \
708 : "=r" ((SItype)(q)), "=q" ((SItype)(r)) \
709 : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d)))
710#define UDIV_TIME 100
711#endif
712#endif /* Power architecture variants. */
713
e1a79915
RS
714#if defined (__pyr__)
715#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
716 __asm__ ("addw %5,%1
717 addwc %3,%0" \
e6c5404d
TG
718 : "=r" ((USItype)(sh)), \
719 "=&r" ((USItype)(sl)) \
720 : "%0" ((USItype)(ah)), \
721 "g" ((USItype)(bh)), \
722 "%1" ((USItype)(al)), \
723 "g" ((USItype)(bl)))
e1a79915
RS
724#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
725 __asm__ ("subw %5,%1
726 subwb %3,%0" \
e6c5404d
TG
727 : "=r" ((USItype)(sh)), \
728 "=&r" ((USItype)(sl)) \
729 : "0" ((USItype)(ah)), \
730 "g" ((USItype)(bh)), \
731 "1" ((USItype)(al)), \
732 "g" ((USItype)(bl)))
f4432e94 733/* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */
e1a79915 734#define umul_ppmm(w1, w0, u, v) \
f4432e94
RK
735 ({union {UDItype __ll; \
736 struct {USItype __h, __l;} __i; \
737 } __xx; \
738 __asm__ ("movw %1,%R0
739 uemul %2,%0" \
740 : "=&r" (__xx.__ll) \
741 : "g" ((USItype) (u)), \
f71c71f1 742 "g" ((USItype)(v))); \
f4432e94 743 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
e1a79915
RS
744#endif /* __pyr__ */
745
746#if defined (__ibm032__) /* RT/ROMP */
747#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
748 __asm__ ("a %1,%5
749 ae %0,%3" \
e6c5404d
TG
750 : "=r" ((USItype)(sh)), \
751 "=&r" ((USItype)(sl)) \
752 : "%0" ((USItype)(ah)), \
753 "r" ((USItype)(bh)), \
754 "%1" ((USItype)(al)), \
755 "r" ((USItype)(bl)))
e1a79915
RS
756#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
757 __asm__ ("s %1,%5
758 se %0,%3" \
e6c5404d
TG
759 : "=r" ((USItype)(sh)), \
760 "=&r" ((USItype)(sl)) \
761 : "0" ((USItype)(ah)), \
762 "r" ((USItype)(bh)), \
763 "1" ((USItype)(al)), \
764 "r" ((USItype)(bl)))
e1a79915
RS
765#define umul_ppmm(ph, pl, m0, m1) \
766 do { \
e6c5404d 767 USItype __m0 = (m0), __m1 = (m1); \
e1a79915
RS
768 __asm__ ( \
769 "s r2,r2
770 mts r10,%2
771 m r2,%3
772 m r2,%3
773 m r2,%3
774 m r2,%3
775 m r2,%3
776 m r2,%3
777 m r2,%3
778 m r2,%3
779 m r2,%3
780 m r2,%3
781 m r2,%3
782 m r2,%3
783 m r2,%3
784 m r2,%3
785 m r2,%3
786 m r2,%3
787 cas %0,r2,r0
788 mfs r10,%1" \
e6c5404d
TG
789 : "=r" ((USItype)(ph)), \
790 "=r" ((USItype)(pl)) \
d83dd29a
TG
791 : "%r" (__m0), \
792 "r" (__m1) \
793 : "r2"); \
e6c5404d
TG
794 (ph) += ((((SItype) __m0 >> 31) & __m1) \
795 + (((SItype) __m1 >> 31) & __m0)); \
e1a79915 796 } while (0)
d83dd29a
TG
797#define UMUL_TIME 20
798#define UDIV_TIME 200
e1a79915
RS
799#define count_leading_zeros(count, x) \
800 do { \
801 if ((x) >= 0x10000) \
802 __asm__ ("clz %0,%1" \
e6c5404d
TG
803 : "=r" ((USItype)(count)) \
804 : "r" ((USItype)(x) >> 16)); \
e1a79915
RS
805 else \
806 { \
807 __asm__ ("clz %0,%1" \
e6c5404d
TG
808 : "=r" ((USItype)(count)) \
809 : "r" ((USItype)(x))); \
e1a79915
RS
810 (count) += 16; \
811 } \
812 } while (0)
813#endif
814
815#if defined (__sparc__)
816#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
f71c71f1
TG
817 __asm__ ("addcc %r4,%5,%1
818 addx %r2,%3,%0" \
e6c5404d
TG
819 : "=r" ((USItype)(sh)), \
820 "=&r" ((USItype)(sl)) \
f71c71f1 821 : "%rJ" ((USItype)(ah)), \
e6c5404d 822 "rI" ((USItype)(bh)), \
f71c71f1 823 "%rJ" ((USItype)(al)), \
e6c5404d 824 "rI" ((USItype)(bl)) \
d83dd29a 825 __CLOBBER_CC)
e1a79915 826#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
f71c71f1
TG
827 __asm__ ("subcc %r4,%5,%1
828 subx %r2,%3,%0" \
e6c5404d
TG
829 : "=r" ((USItype)(sh)), \
830 "=&r" ((USItype)(sl)) \
f71c71f1 831 : "rJ" ((USItype)(ah)), \
e6c5404d 832 "rI" ((USItype)(bh)), \
f71c71f1 833 "rJ" ((USItype)(al)), \
e6c5404d 834 "rI" ((USItype)(bl)) \
d83dd29a 835 __CLOBBER_CC)
d9d3acb6 836#if defined (__sparc_v8__)
e1a79915
RS
837#define umul_ppmm(w1, w0, u, v) \
838 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
e6c5404d
TG
839 : "=r" ((USItype)(w1)), \
840 "=r" ((USItype)(w0)) \
841 : "r" ((USItype)(u)), \
842 "r" ((USItype)(v)))
e1a79915
RS
843#define udiv_qrnnd(q, r, n1, n0, d) \
844 __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
e6c5404d
TG
845 : "=&r" ((USItype)(q)), \
846 "=&r" ((USItype)(r)) \
847 : "r" ((USItype)(n1)), \
848 "r" ((USItype)(n0)), \
849 "r" ((USItype)(d)))
e1a79915 850#else
d9d3acb6
JW
851#if defined (__sparclite__)
852/* This has hardware multiply but not divide. It also has two additional
853 instructions scan (ffs from high bit) and divscc. */
854#define umul_ppmm(w1, w0, u, v) \
855 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
abe1fe6c
TG
856 : "=r" ((USItype)(w1)), \
857 "=r" ((USItype)(w0)) \
858 : "r" ((USItype)(u)), \
859 "r" ((USItype)(v)))
d9d3acb6
JW
860#define udiv_qrnnd(q, r, n1, n0, d) \
861 __asm__ ("! Inlined udiv_qrnnd
862 wr %%g0,%2,%%y ! Not a delayed write for sparclite
863 tst %%g0
864 divscc %3,%4,%%g1
865 divscc %%g1,%4,%%g1
866 divscc %%g1,%4,%%g1
867 divscc %%g1,%4,%%g1
868 divscc %%g1,%4,%%g1
869 divscc %%g1,%4,%%g1
870 divscc %%g1,%4,%%g1
871 divscc %%g1,%4,%%g1
872 divscc %%g1,%4,%%g1
873 divscc %%g1,%4,%%g1
874 divscc %%g1,%4,%%g1
875 divscc %%g1,%4,%%g1
876 divscc %%g1,%4,%%g1
877 divscc %%g1,%4,%%g1
878 divscc %%g1,%4,%%g1
879 divscc %%g1,%4,%%g1
880 divscc %%g1,%4,%%g1
881 divscc %%g1,%4,%%g1
882 divscc %%g1,%4,%%g1
883 divscc %%g1,%4,%%g1
884 divscc %%g1,%4,%%g1
885 divscc %%g1,%4,%%g1
886 divscc %%g1,%4,%%g1
887 divscc %%g1,%4,%%g1
888 divscc %%g1,%4,%%g1
889 divscc %%g1,%4,%%g1
890 divscc %%g1,%4,%%g1
891 divscc %%g1,%4,%%g1
892 divscc %%g1,%4,%%g1
893 divscc %%g1,%4,%%g1
894 divscc %%g1,%4,%%g1
895 divscc %%g1,%4,%0
896 rd %%y,%1
897 bl,a 1f
898 add %1,%4,%1
8991: ! End of inline udiv_qrnnd" \
abe1fe6c
TG
900 : "=r" ((USItype)(q)), \
901 "=r" ((USItype)(r)) \
902 : "r" ((USItype)(n1)), \
903 "r" ((USItype)(n0)), \
904 "rI" ((USItype)(d)) \
210d97af 905 : "%g1" __AND_CLOBBER_CC)
d9d3acb6
JW
906#define UDIV_TIME 37
907#define count_leading_zeros(count, x) \
908 __asm__ ("scan %1,0,%0" \
abe1fe6c
TG
909 : "=r" ((USItype)(x)) \
910 : "r" ((USItype)(count)))
d9d3acb6 911#else
e1a79915
RS
912/* SPARC without integer multiplication and divide instructions.
913 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
914#define umul_ppmm(w1, w0, u, v) \
915 __asm__ ("! Inlined umul_ppmm
916 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
917 sra %3,31,%%g2 ! Don't move this insn
918 and %2,%%g2,%%g2 ! Don't move this insn
919 andcc %%g0,0,%%g1 ! Don't move this insn
920 mulscc %%g1,%3,%%g1
921 mulscc %%g1,%3,%%g1
922 mulscc %%g1,%3,%%g1
923 mulscc %%g1,%3,%%g1
924 mulscc %%g1,%3,%%g1
925 mulscc %%g1,%3,%%g1
926 mulscc %%g1,%3,%%g1
927 mulscc %%g1,%3,%%g1
928 mulscc %%g1,%3,%%g1
929 mulscc %%g1,%3,%%g1
930 mulscc %%g1,%3,%%g1
931 mulscc %%g1,%3,%%g1
932 mulscc %%g1,%3,%%g1
933 mulscc %%g1,%3,%%g1
934 mulscc %%g1,%3,%%g1
935 mulscc %%g1,%3,%%g1
936 mulscc %%g1,%3,%%g1
937 mulscc %%g1,%3,%%g1
938 mulscc %%g1,%3,%%g1
939 mulscc %%g1,%3,%%g1
940 mulscc %%g1,%3,%%g1
941 mulscc %%g1,%3,%%g1
942 mulscc %%g1,%3,%%g1
943 mulscc %%g1,%3,%%g1
944 mulscc %%g1,%3,%%g1
945 mulscc %%g1,%3,%%g1
946 mulscc %%g1,%3,%%g1
947 mulscc %%g1,%3,%%g1
948 mulscc %%g1,%3,%%g1
949 mulscc %%g1,%3,%%g1
950 mulscc %%g1,%3,%%g1
951 mulscc %%g1,%3,%%g1
952 mulscc %%g1,0,%%g1
953 add %%g1,%%g2,%0
954 rd %%y,%1" \
e6c5404d
TG
955 : "=r" ((USItype)(w1)), \
956 "=r" ((USItype)(w0)) \
957 : "%rI" ((USItype)(u)), \
958 "r" ((USItype)(v)) \
d83dd29a 959 : "%g1", "%g2" __AND_CLOBBER_CC)
e1a79915
RS
960#define UMUL_TIME 39 /* 39 instructions */
961/* It's quite necessary to add this much assembler for the sparc.
962 The default udiv_qrnnd (in C) is more than 10 times slower! */
963#define udiv_qrnnd(q, r, n1, n0, d) \
964 __asm__ ("! Inlined udiv_qrnnd
965 mov 32,%%g1
966 subcc %1,%2,%%g0
9671: bcs 5f
968 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
969 sub %1,%2,%1 ! this kills msb of n
970 addx %1,%1,%1 ! so this can't give carry
971 subcc %%g1,1,%%g1
9722: bne 1b
973 subcc %1,%2,%%g0
974 bcs 3f
975 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
976 b 3f
977 sub %1,%2,%1 ! this kills msb of n
9784: sub %1,%2,%1
9795: addxcc %1,%1,%1
980 bcc 2b
981 subcc %%g1,1,%%g1
982! Got carry from n. Subtract next step to cancel this carry.
983 bne 4b
984 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
985 sub %1,%2,%1
9863: xnor %0,0,%0
987 ! End of inline udiv_qrnnd" \
e6c5404d
TG
988 : "=&r" ((USItype)(q)), \
989 "=&r" ((USItype)(r)) \
990 : "r" ((USItype)(d)), \
991 "1" ((USItype)(n1)), \
992 "0" ((USItype)(n0)) : "%g1" __AND_CLOBBER_CC)
e1a79915 993#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
d9d3acb6
JW
994#endif /* __sparclite__ */
995#endif /* __sparc_v8__ */
d83dd29a 996#endif /* __sparc__ */
e1a79915
RS
997
998#if defined (__vax__)
999#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1000 __asm__ ("addl2 %5,%1
1001 adwc %3,%0" \
e6c5404d
TG
1002 : "=g" ((USItype)(sh)), \
1003 "=&g" ((USItype)(sl)) \
1004 : "%0" ((USItype)(ah)), \
1005 "g" ((USItype)(bh)), \
1006 "%1" ((USItype)(al)), \
1007 "g" ((USItype)(bl)))
e1a79915
RS
1008#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1009 __asm__ ("subl2 %5,%1
1010 sbwc %3,%0" \
e6c5404d
TG
1011 : "=g" ((USItype)(sh)), \
1012 "=&g" ((USItype)(sl)) \
1013 : "0" ((USItype)(ah)), \
1014 "g" ((USItype)(bh)), \
1015 "1" ((USItype)(al)), \
1016 "g" ((USItype)(bl)))
e1a79915
RS
1017#define umul_ppmm(xh, xl, m0, m1) \
1018 do { \
e6c5404d
TG
1019 union { \
1020 UDItype __ll; \
1021 struct {USItype __l, __h;} __i; \
1022 } __xx; \
1023 USItype __m0 = (m0), __m1 = (m1); \
e1a79915 1024 __asm__ ("emul %1,%2,$0,%0" \
e6c5404d 1025 : "=r" (__xx.__ll) \
d83dd29a
TG
1026 : "g" (__m0), \
1027 "g" (__m1)); \
e6c5404d
TG
1028 (xh) = __xx.__i.__h; \
1029 (xl) = __xx.__i.__l; \
1030 (xh) += ((((SItype) __m0 >> 31) & __m1) \
1031 + (((SItype) __m1 >> 31) & __m0)); \
e1a79915 1032 } while (0)
f71c71f1
TG
1033#define sdiv_qrnnd(q, r, n1, n0, d) \
1034 do { \
1035 union {DItype __ll; \
1036 struct {SItype __l, __h;} __i; \
1037 } __xx; \
1038 __xx.__i.__h = n1; __xx.__i.__l = n0; \
1039 __asm__ ("ediv %3,%2,%0,%1" \
1040 : "=g" (q), "=g" (r) \
451c2f82 1041 : "g" (__xx.__ll), "g" (d)); \
f71c71f1 1042 } while (0)
e1a79915
RS
1043#endif /* __vax__ */
1044
1045#endif /* __GNUC__ */
1046
1047/* If this machine has no inline assembler, use C macros. */
1048
1049#if !defined (add_ssaaaa)
1050#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1051 do { \
e6c5404d 1052 USItype __x; \
e1a79915
RS
1053 __x = (al) + (bl); \
1054 (sh) = (ah) + (bh) + (__x < (al)); \
1055 (sl) = __x; \
1056 } while (0)
1057#endif
1058
1059#if !defined (sub_ddmmss)
1060#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1061 do { \
e6c5404d 1062 USItype __x; \
e1a79915
RS
1063 __x = (al) - (bl); \
1064 (sh) = (ah) - (bh) - (__x > (al)); \
1065 (sl) = __x; \
1066 } while (0)
1067#endif
1068
1069#if !defined (umul_ppmm)
1070#define umul_ppmm(w1, w0, u, v) \
1071 do { \
e6c5404d
TG
1072 USItype __x0, __x1, __x2, __x3; \
1073 USItype __ul, __vl, __uh, __vh; \
e1a79915
RS
1074 \
1075 __ul = __ll_lowpart (u); \
1076 __uh = __ll_highpart (u); \
1077 __vl = __ll_lowpart (v); \
1078 __vh = __ll_highpart (v); \
1079 \
e6c5404d
TG
1080 __x0 = (USItype) __ul * __vl; \
1081 __x1 = (USItype) __ul * __vh; \
1082 __x2 = (USItype) __uh * __vl; \
1083 __x3 = (USItype) __uh * __vh; \
e1a79915
RS
1084 \
1085 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
1086 __x1 += __x2; /* but this indeed can */ \
1087 if (__x1 < __x2) /* did we get it? */ \
1088 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
1089 \
1090 (w1) = __x3 + __ll_highpart (__x1); \
1091 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
1092 } while (0)
1093#endif
1094
1095#if !defined (__umulsidi3)
1096#define __umulsidi3(u, v) \
e6c5404d 1097 ({DIunion __w; \
e1a79915
RS
1098 umul_ppmm (__w.s.high, __w.s.low, u, v); \
1099 __w.ll; })
1100#endif
1101
d83dd29a
TG
1102/* Define this unconditionally, so it can be used for debugging. */
1103#define __udiv_qrnnd_c(q, r, n1, n0, d) \
e1a79915 1104 do { \
e6c5404d
TG
1105 USItype __d1, __d0, __q1, __q0; \
1106 USItype __r1, __r0, __m; \
e1a79915
RS
1107 __d1 = __ll_highpart (d); \
1108 __d0 = __ll_lowpart (d); \
1109 \
1110 __r1 = (n1) % __d1; \
1111 __q1 = (n1) / __d1; \
e6c5404d 1112 __m = (USItype) __q1 * __d0; \
e1a79915
RS
1113 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
1114 if (__r1 < __m) \
1115 { \
1116 __q1--, __r1 += (d); \
1117 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
1118 if (__r1 < __m) \
1119 __q1--, __r1 += (d); \
1120 } \
1121 __r1 -= __m; \
1122 \
1123 __r0 = __r1 % __d1; \
1124 __q0 = __r1 / __d1; \
e6c5404d 1125 __m = (USItype) __q0 * __d0; \
e1a79915
RS
1126 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
1127 if (__r0 < __m) \
1128 { \
1129 __q0--, __r0 += (d); \
1130 if (__r0 >= (d)) \
1131 if (__r0 < __m) \
1132 __q0--, __r0 += (d); \
1133 } \
1134 __r0 -= __m; \
1135 \
e6c5404d 1136 (q) = (USItype) __q1 * __ll_B | __q0; \
e1a79915
RS
1137 (r) = __r0; \
1138 } while (0)
1814cfd9
TG
1139
1140/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
9077d7fa 1141 __udiv_w_sdiv (defined in libgcc or elsewhere). */
1814cfd9
TG
1142#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
1143#define udiv_qrnnd(q, r, nh, nl, d) \
1144 do { \
1145 USItype __r; \
9077d7fa 1146 (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
1814cfd9
TG
1147 (r) = __r; \
1148 } while (0)
1149#endif
1150
d83dd29a
TG
1151/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
1152#if !defined (udiv_qrnnd)
1153#define UDIV_NEEDS_NORMALIZATION 1
1154#define udiv_qrnnd __udiv_qrnnd_c
e1a79915
RS
1155#endif
1156
1157#if !defined (count_leading_zeros)
e6c5404d 1158extern const UQItype __clz_tab[];
e1a79915
RS
1159#define count_leading_zeros(count, x) \
1160 do { \
e6c5404d
TG
1161 USItype __xr = (x); \
1162 USItype __a; \
e1a79915 1163 \
cf7d8fd0 1164 if (SI_TYPE_SIZE <= 32) \
d83dd29a
TG
1165 { \
1166 __a = __xr < (1<<2*__BITS4) \
1167 ? (__xr < (1<<__BITS4) ? 0 : __BITS4) \
1168 : (__xr < (1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
1169 } \
1170 else \
1171 { \
f4432e94 1172 for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \
d83dd29a
TG
1173 if (((__xr >> __a) & 0xff) != 0) \
1174 break; \
1175 } \
e1a79915 1176 \
cf7d8fd0 1177 (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
e1a79915
RS
1178 } while (0)
1179#endif
1180
1181#ifndef UDIV_NEEDS_NORMALIZATION
1182#define UDIV_NEEDS_NORMALIZATION 0
1183#endif
This page took 0.436626 seconds and 5 git commands to generate.