]> gcc.gnu.org Git - gcc.git/blame - gcc/longlong.h
Update FSF address.
[gcc.git] / gcc / longlong.h
CommitLineData
e1a79915 1/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
f71c71f1 2 Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
e1a79915
RS
3
4 This definition file is free software; you can redistribute it
5 and/or modify it under the terms of the GNU General Public
6 License as published by the Free Software Foundation; either
7 version 2, or (at your option) any later version.
8
9 This definition file is distributed in the hope that it will be
10 useful, but WITHOUT ANY WARRANTY; without even the implied
11 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 See the GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
a35311b0
RK
16 Foundation, Inc., 59 Temple Place - Suite 330,
17 Boston, MA 02111-1307, USA. */
e1a79915 18
cf7d8fd0
TG
19#ifndef SI_TYPE_SIZE
20#define SI_TYPE_SIZE 32
e1a79915
RS
21#endif
22
cf7d8fd0
TG
23#define __BITS4 (SI_TYPE_SIZE / 4)
24#define __ll_B (1L << (SI_TYPE_SIZE / 2))
e6c5404d
TG
25#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
26#define __ll_highpart(t) ((USItype) (t) / __ll_B)
e1a79915 27
2e8a6600 28/* Define auxiliary asm macros.
e1a79915
RS
29
30 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
e6c5404d
TG
31 multiplies two USItype integers MULTIPLER and MULTIPLICAND,
32 and generates a two-part USItype product in HIGH_PROD and
e1a79915
RS
33 LOW_PROD.
34
e6c5404d
TG
35 2) __umulsidi3(a,b) multiplies two USItype integers A and B,
36 and returns a UDItype product. This is just a variant of umul_ppmm.
e1a79915
RS
37
38 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
39 denominator) divides a two-word unsigned integer, composed by the
40 integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
41 places the quotient in QUOTIENT and the remainder in REMAINDER.
42 HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
43 If, in addition, the most significant bit of DENOMINATOR must be 1,
44 then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
45
b6d2adc7
TG
46 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
47 denominator). Like udiv_qrnnd but the numbers are signed. The
48 quotient is rounded towards 0.
49
50 5) count_leading_zeros(count, x) counts the number of zero-bits from
e1a79915
RS
51 the msb to the first non-zero bit. This is the number of steps X
52 needs to be shifted left to set the msb. Undefined for X == 0.
53
b6d2adc7 54 6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
e1a79915
RS
55 high_addend_2, low_addend_2) adds two two-word unsigned integers,
56 composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
57 LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
58 LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
59 lost.
60
b6d2adc7 61 7) sub_ddmmss(high_difference, low_difference, high_minuend,
e1a79915
RS
62 low_minuend, high_subtrahend, low_subtrahend) subtracts two
63 two-word unsigned integers, composed by HIGH_MINUEND_1 and
64 LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
65 respectively. The result is placed in HIGH_DIFFERENCE and
66 LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
67 and is lost.
68
69 If any of these macros are left undefined for a particular CPU,
70 C macros are used. */
71
72/* The CPUs come in alphabetical order below.
73
74 Please add support for more CPUs here, or improve the current support
75 for the CPUs below!
f71c71f1 76 (E.g. WE32100, IBM360.) */
d83dd29a 77
3a0d22dd
MM
78#if defined (__GNUC__) && !defined (NO_ASM)
79
d83dd29a
TG
80/* We sometimes need to clobber "cc" with gcc2, but that would not be
81 understood by gcc1. Use cpp to avoid major code duplication. */
82#if __GNUC__ < 2
83#define __CLOBBER_CC
84#define __AND_CLOBBER_CC
85#else /* __GNUC__ >= 2 */
86#define __CLOBBER_CC : "cc"
87#define __AND_CLOBBER_CC , "cc"
88#endif /* __GNUC__ < 2 */
e1a79915 89
f71c71f1 90#if defined (__a29k__) || defined (_AM29K)
e1a79915
RS
91#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
92 __asm__ ("add %1,%4,%5
93 addc %0,%2,%3" \
e6c5404d
TG
94 : "=r" ((USItype)(sh)), \
95 "=&r" ((USItype)(sl)) \
96 : "%r" ((USItype)(ah)), \
97 "rI" ((USItype)(bh)), \
98 "%r" ((USItype)(al)), \
99 "rI" ((USItype)(bl)))
e1a79915
RS
100#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
101 __asm__ ("sub %1,%4,%5
102 subc %0,%2,%3" \
e6c5404d
TG
103 : "=r" ((USItype)(sh)), \
104 "=&r" ((USItype)(sl)) \
105 : "r" ((USItype)(ah)), \
106 "rI" ((USItype)(bh)), \
107 "r" ((USItype)(al)), \
108 "rI" ((USItype)(bl)))
e1a79915
RS
109#define umul_ppmm(xh, xl, m0, m1) \
110 do { \
e6c5404d 111 USItype __m0 = (m0), __m1 = (m1); \
d83dd29a 112 __asm__ ("multiplu %0,%1,%2" \
e6c5404d 113 : "=r" ((USItype)(xl)) \
d83dd29a
TG
114 : "r" (__m0), \
115 "r" (__m1)); \
116 __asm__ ("multmu %0,%1,%2" \
e6c5404d 117 : "=r" ((USItype)(xh)) \
d83dd29a
TG
118 : "r" (__m0), \
119 "r" (__m1)); \
e1a79915
RS
120 } while (0)
121#define udiv_qrnnd(q, r, n1, n0, d) \
122 __asm__ ("dividu %0,%3,%4" \
e6c5404d
TG
123 : "=r" ((USItype)(q)), \
124 "=q" ((USItype)(r)) \
125 : "1" ((USItype)(n1)), \
126 "r" ((USItype)(n0)), \
127 "r" ((USItype)(d)))
e1a79915 128#define count_leading_zeros(count, x) \
d83dd29a 129 __asm__ ("clz %0,%1" \
e6c5404d
TG
130 : "=r" ((USItype)(count)) \
131 : "r" ((USItype)(x)))
e1a79915
RS
132#endif /* __a29k__ */
133
134#if defined (__arm__)
135#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
ac1e676a
RE
136 __asm__ ("adds %1, %4, %5
137 adc %0, %2, %3" \
e6c5404d
TG
138 : "=r" ((USItype)(sh)), \
139 "=&r" ((USItype)(sl)) \
140 : "%r" ((USItype)(ah)), \
141 "rI" ((USItype)(bh)), \
142 "%r" ((USItype)(al)), \
143 "rI" ((USItype)(bl)))
e1a79915 144#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
ac1e676a
RE
145 __asm__ ("subs %1, %4, %5
146 sbc %0, %2, %3" \
e6c5404d
TG
147 : "=r" ((USItype)(sh)), \
148 "=&r" ((USItype)(sl)) \
149 : "r" ((USItype)(ah)), \
150 "rI" ((USItype)(bh)), \
151 "r" ((USItype)(al)), \
152 "rI" ((USItype)(bl)))
f71c71f1 153#define umul_ppmm(xh, xl, a, b) \
6a04a634 154{register USItype __t0, __t1, __t2; \
ac1e676a 155 __asm__ ("%@ Inlined umul_ppmm
6a04a634
RE
156 mov %2, %5, lsr #16
157 mov %0, %6, lsr #16
158 bic %3, %5, %2, lsl #16
159 bic %4, %6, %0, lsl #16
160 mul %1, %3, %4
161 mul %4, %2, %4
162 mul %3, %0, %3
163 mul %0, %2, %0
164 adds %3, %4, %3
ac1e676a 165 addcs %0, %0, #65536
6a04a634
RE
166 adds %1, %1, %3, lsl #16
167 adc %0, %0, %3, lsr #16" \
f71c71f1 168 : "=&r" ((USItype)(xh)), \
6a04a634
RE
169 "=r" ((USItype)(xl)), \
170 "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
f71c71f1 171 : "r" ((USItype)(a)), \
6a04a634 172 "r" ((USItype)(b)));}
f71c71f1
TG
173#define UMUL_TIME 20
174#define UDIV_TIME 100
e1a79915
RS
175#endif /* __arm__ */
176
f71c71f1
TG
177#if defined (__clipper__)
178#define umul_ppmm(w1, w0, u, v) \
179 ({union {UDItype __ll; \
180 struct {USItype __l, __h;} __i; \
181 } __xx; \
182 __asm__ ("mulwux %2,%0" \
183 : "=r" (__xx.__ll) \
184 : "%0" ((USItype)(u)), \
185 "r" ((USItype)(v))); \
186 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
187#define smul_ppmm(w1, w0, u, v) \
188 ({union {DItype __ll; \
189 struct {SItype __l, __h;} __i; \
190 } __xx; \
191 __asm__ ("mulwx %2,%0" \
192 : "=r" (__xx.__ll) \
193 : "%0" ((SItype)(u)), \
194 "r" ((SItype)(v))); \
195 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
196#define __umulsidi3(u, v) \
197 ({UDItype __w; \
198 __asm__ ("mulwux %2,%0" \
199 : "=r" (__w) \
200 : "%0" ((USItype)(u)), \
201 "r" ((USItype)(v))); \
202 __w; })
203#endif /* __clipper__ */
204
e1a79915
RS
205#if defined (__gmicro__)
206#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
207 __asm__ ("add.w %5,%1
208 addx %3,%0" \
e6c5404d
TG
209 : "=g" ((USItype)(sh)), \
210 "=&g" ((USItype)(sl)) \
211 : "%0" ((USItype)(ah)), \
212 "g" ((USItype)(bh)), \
213 "%1" ((USItype)(al)), \
214 "g" ((USItype)(bl)))
e1a79915
RS
215#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
216 __asm__ ("sub.w %5,%1
217 subx %3,%0" \
e6c5404d
TG
218 : "=g" ((USItype)(sh)), \
219 "=&g" ((USItype)(sl)) \
220 : "0" ((USItype)(ah)), \
221 "g" ((USItype)(bh)), \
222 "1" ((USItype)(al)), \
223 "g" ((USItype)(bl)))
e1a79915
RS
224#define umul_ppmm(ph, pl, m0, m1) \
225 __asm__ ("mulx %3,%0,%1" \
e6c5404d
TG
226 : "=g" ((USItype)(ph)), \
227 "=r" ((USItype)(pl)) \
228 : "%0" ((USItype)(m0)), \
229 "g" ((USItype)(m1)))
e1a79915
RS
230#define udiv_qrnnd(q, r, nh, nl, d) \
231 __asm__ ("divx %4,%0,%1" \
e6c5404d
TG
232 : "=g" ((USItype)(q)), \
233 "=r" ((USItype)(r)) \
234 : "1" ((USItype)(nh)), \
235 "0" ((USItype)(nl)), \
236 "g" ((USItype)(d)))
e1a79915
RS
237#define count_leading_zeros(count, x) \
238 __asm__ ("bsch/1 %1,%0" \
d83dd29a 239 : "=g" (count) \
e6c5404d
TG
240 : "g" ((USItype)(x)), \
241 "0" ((USItype)0))
e1a79915
RS
242#endif
243
244#if defined (__hppa)
245#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
246 __asm__ ("add %4,%5,%1
247 addc %2,%3,%0" \
e6c5404d
TG
248 : "=r" ((USItype)(sh)), \
249 "=&r" ((USItype)(sl)) \
250 : "%rM" ((USItype)(ah)), \
251 "rM" ((USItype)(bh)), \
252 "%rM" ((USItype)(al)), \
253 "rM" ((USItype)(bl)))
e1a79915 254#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
7691f559
TG
255 __asm__ ("sub %4,%5,%1
256 subb %2,%3,%0" \
e6c5404d
TG
257 : "=r" ((USItype)(sh)), \
258 "=&r" ((USItype)(sl)) \
259 : "rM" ((USItype)(ah)), \
260 "rM" ((USItype)(bh)), \
261 "rM" ((USItype)(al)), \
262 "rM" ((USItype)(bl)))
d83dd29a
TG
263#if defined (_PA_RISC1_1)
264#define umul_ppmm(w1, w0, u, v) \
265 do { \
266 union \
267 { \
e6c5404d
TG
268 UDItype __f; \
269 struct {USItype __w1, __w0;} __w1w0; \
d83dd29a
TG
270 } __t; \
271 __asm__ ("xmpyu %1,%2,%0" \
272 : "=x" (__t.__f) \
e6c5404d
TG
273 : "x" ((USItype)(u)), \
274 "x" ((USItype)(v))); \
d83dd29a
TG
275 (w1) = __t.__w1w0.__w1; \
276 (w0) = __t.__w1w0.__w0; \
277 } while (0)
278#define UMUL_TIME 8
279#else
280#define UMUL_TIME 30
281#endif
282#define UDIV_TIME 40
f71c71f1
TG
283#define count_leading_zeros(count, x) \
284 do { \
285 USItype __tmp; \
286 __asm__ ( \
287 "ldi 1,%0
288 extru,= %1,15,16,%%r0 ; Bits 31..16 zero?
289 extru,tr %1,15,16,%1 ; No. Shift down, skip add.
290 ldo 16(%0),%0 ; Yes. Perform add.
291 extru,= %1,23,8,%%r0 ; Bits 15..8 zero?
292 extru,tr %1,23,8,%1 ; No. Shift down, skip add.
293 ldo 8(%0),%0 ; Yes. Perform add.
294 extru,= %1,27,4,%%r0 ; Bits 7..4 zero?
295 extru,tr %1,27,4,%1 ; No. Shift down, skip add.
296 ldo 4(%0),%0 ; Yes. Perform add.
297 extru,= %1,29,2,%%r0 ; Bits 3..2 zero?
298 extru,tr %1,29,2,%1 ; No. Shift down, skip add.
299 ldo 2(%0),%0 ; Yes. Perform add.
300 extru %1,30,1,%1 ; Extract bit 1.
301 sub %0,%1,%0 ; Subtract it.
302 " : "=r" (count), "=r" (__tmp) : "1" (x)); \
303 } while (0)
e1a79915
RS
304#endif
305
306#if defined (__i386__) || defined (__i486__)
307#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
308 __asm__ ("addl %5,%1
309 adcl %3,%0" \
e6c5404d
TG
310 : "=r" ((USItype)(sh)), \
311 "=&r" ((USItype)(sl)) \
312 : "%0" ((USItype)(ah)), \
313 "g" ((USItype)(bh)), \
314 "%1" ((USItype)(al)), \
315 "g" ((USItype)(bl)))
e1a79915
RS
316#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
317 __asm__ ("subl %5,%1
318 sbbl %3,%0" \
e6c5404d
TG
319 : "=r" ((USItype)(sh)), \
320 "=&r" ((USItype)(sl)) \
321 : "0" ((USItype)(ah)), \
322 "g" ((USItype)(bh)), \
323 "1" ((USItype)(al)), \
324 "g" ((USItype)(bl)))
e1a79915
RS
325#define umul_ppmm(w1, w0, u, v) \
326 __asm__ ("mull %3" \
e6c5404d
TG
327 : "=a" ((USItype)(w0)), \
328 "=d" ((USItype)(w1)) \
329 : "%0" ((USItype)(u)), \
330 "rm" ((USItype)(v)))
e1a79915
RS
331#define udiv_qrnnd(q, r, n1, n0, d) \
332 __asm__ ("divl %4" \
e6c5404d
TG
333 : "=a" ((USItype)(q)), \
334 "=d" ((USItype)(r)) \
335 : "0" ((USItype)(n0)), \
336 "1" ((USItype)(n1)), \
337 "rm" ((USItype)(d)))
e1a79915
RS
338#define count_leading_zeros(count, x) \
339 do { \
e6c5404d 340 USItype __cbtmp; \
e1a79915 341 __asm__ ("bsrl %1,%0" \
e6c5404d 342 : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
e1a79915
RS
343 (count) = __cbtmp ^ 31; \
344 } while (0)
d83dd29a
TG
345#define UMUL_TIME 40
346#define UDIV_TIME 40
e1a79915
RS
347#endif /* 80x86 */
348
349#if defined (__i860__)
350#if 0
351/* Make sure these patterns really improve the code before
352 switching them on. */
353#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
354 do { \
355 union \
356 { \
e6c5404d
TG
357 DItype __ll; \
358 struct {USItype __l, __h;} __i; \
e1a79915 359 } __a, __b, __s; \
e6c5404d
TG
360 __a.__i.__l = (al); \
361 __a.__i.__h = (ah); \
362 __b.__i.__l = (bl); \
363 __b.__i.__h = (bh); \
e1a79915 364 __asm__ ("fiadd.dd %1,%2,%0" \
e6c5404d
TG
365 : "=f" (__s.__ll) \
366 : "%f" (__a.__ll), "f" (__b.__ll)); \
367 (sh) = __s.__i.__h; \
368 (sl) = __s.__i.__l; \
e1a79915
RS
369 } while (0)
370#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
371 do { \
372 union \
373 { \
e6c5404d
TG
374 DItype __ll; \
375 struct {USItype __l, __h;} __i; \
e1a79915 376 } __a, __b, __s; \
e6c5404d
TG
377 __a.__i.__l = (al); \
378 __a.__i.__h = (ah); \
379 __b.__i.__l = (bl); \
380 __b.__i.__h = (bh); \
e1a79915 381 __asm__ ("fisub.dd %1,%2,%0" \
e6c5404d
TG
382 : "=f" (__s.__ll) \
383 : "%f" (__a.__ll), "f" (__b.__ll)); \
384 (sh) = __s.__i.__h; \
385 (sl) = __s.__i.__l; \
e1a79915
RS
386 } while (0)
387#endif
388#endif /* __i860__ */
389
f71c71f1
TG
390#if defined (__i960__)
391#define umul_ppmm(w1, w0, u, v) \
392 ({union {UDItype __ll; \
393 struct {USItype __l, __h;} __i; \
394 } __xx; \
395 __asm__ ("emul %2,%1,%0" \
396 : "=d" (__xx.__ll) \
397 : "%dI" ((USItype)(u)), \
398 "dI" ((USItype)(v))); \
399 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
400#define __umulsidi3(u, v) \
401 ({UDItype __w; \
402 __asm__ ("emul %2,%1,%0" \
403 : "=d" (__w) \
404 : "%dI" ((USItype)(u)), \
405 "dI" ((USItype)(v))); \
406 __w; })
407#endif /* __i960__ */
e1a79915
RS
408
409#if defined (__mc68000__)
410#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
411 __asm__ ("add%.l %5,%1
412 addx%.l %3,%0" \
e6c5404d
TG
413 : "=d" ((USItype)(sh)), \
414 "=&d" ((USItype)(sl)) \
415 : "%0" ((USItype)(ah)), \
416 "d" ((USItype)(bh)), \
417 "%1" ((USItype)(al)), \
418 "g" ((USItype)(bl)))
e1a79915
RS
419#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
420 __asm__ ("sub%.l %5,%1
421 subx%.l %3,%0" \
e6c5404d
TG
422 : "=d" ((USItype)(sh)), \
423 "=&d" ((USItype)(sl)) \
424 : "0" ((USItype)(ah)), \
425 "d" ((USItype)(bh)), \
426 "1" ((USItype)(al)), \
427 "g" ((USItype)(bl)))
e1a79915
RS
428#if defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)
429#define umul_ppmm(w1, w0, u, v) \
430 __asm__ ("mulu%.l %3,%1:%0" \
e6c5404d
TG
431 : "=d" ((USItype)(w0)), \
432 "=d" ((USItype)(w1)) \
433 : "%0" ((USItype)(u)), \
434 "dmi" ((USItype)(v)))
d83dd29a 435#define UMUL_TIME 45
e1a79915
RS
436#define udiv_qrnnd(q, r, n1, n0, d) \
437 __asm__ ("divu%.l %4,%1:%0" \
e6c5404d
TG
438 : "=d" ((USItype)(q)), \
439 "=d" ((USItype)(r)) \
440 : "0" ((USItype)(n0)), \
441 "1" ((USItype)(n1)), \
442 "dmi" ((USItype)(d)))
d83dd29a
TG
443#define UDIV_TIME 90
444#define sdiv_qrnnd(q, r, n1, n0, d) \
445 __asm__ ("divs%.l %4,%1:%0" \
e6c5404d
TG
446 : "=d" ((USItype)(q)), \
447 "=d" ((USItype)(r)) \
448 : "0" ((USItype)(n0)), \
449 "1" ((USItype)(n1)), \
450 "dmi" ((USItype)(d)))
e1a79915
RS
451#define count_leading_zeros(count, x) \
452 __asm__ ("bfffo %1{%b2:%b2},%0" \
e6c5404d
TG
453 : "=d" ((USItype)(count)) \
454 : "od" ((USItype)(x)), "n" (0))
e1a79915 455#else /* not mc68020 */
b24b1d78 456/* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX. */
e1a79915
RS
457#define umul_ppmm(xh, xl, a, b) \
458 __asm__ ("| Inlined umul_ppmm
f71c71f1
TG
459 move%.l %2,%/d0
460 move%.l %3,%/d1
461 move%.l %/d0,%/d2
bb66e0f6 462 swap %/d0
f71c71f1 463 move%.l %/d1,%/d3
bb66e0f6 464 swap %/d1
f71c71f1 465 move%.w %/d2,%/d4
bb66e0f6
ILT
466 mulu %/d3,%/d4
467 mulu %/d1,%/d2
468 mulu %/d0,%/d3
469 mulu %/d0,%/d1
f71c71f1
TG
470 move%.l %/d4,%/d0
471 eor%.w %/d0,%/d0
bb66e0f6 472 swap %/d0
f71c71f1
TG
473 add%.l %/d0,%/d2
474 add%.l %/d3,%/d2
e1a79915 475 jcc 1f
b24b1d78 476 add%.l %#65536,%/d1
bb66e0f6 4771: swap %/d2
b24b1d78 478 moveq %#0,%/d0
f71c71f1
TG
479 move%.w %/d2,%/d0
480 move%.w %/d4,%/d2
481 move%.l %/d2,%1
482 add%.l %/d1,%/d0
483 move%.l %/d0,%0" \
e6c5404d
TG
484 : "=g" ((USItype)(xh)), \
485 "=g" ((USItype)(xl)) \
486 : "g" ((USItype)(a)), \
487 "g" ((USItype)(b)) \
d83dd29a
TG
488 : "d0", "d1", "d2", "d3", "d4")
489#define UMUL_TIME 100
490#define UDIV_TIME 400
e1a79915
RS
491#endif /* not mc68020 */
492#endif /* mc68000 */
493
494#if defined (__m88000__)
495#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
496 __asm__ ("addu.co %1,%r4,%r5
497 addu.ci %0,%r2,%r3" \
e6c5404d
TG
498 : "=r" ((USItype)(sh)), \
499 "=&r" ((USItype)(sl)) \
500 : "%rJ" ((USItype)(ah)), \
501 "rJ" ((USItype)(bh)), \
502 "%rJ" ((USItype)(al)), \
503 "rJ" ((USItype)(bl)))
e1a79915
RS
504#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
505 __asm__ ("subu.co %1,%r4,%r5
506 subu.ci %0,%r2,%r3" \
e6c5404d
TG
507 : "=r" ((USItype)(sh)), \
508 "=&r" ((USItype)(sl)) \
509 : "rJ" ((USItype)(ah)), \
510 "rJ" ((USItype)(bh)), \
511 "rJ" ((USItype)(al)), \
512 "rJ" ((USItype)(bl)))
e1a79915
RS
513#define count_leading_zeros(count, x) \
514 do { \
e6c5404d 515 USItype __cbtmp; \
e1a79915 516 __asm__ ("ff1 %0,%1" \
d83dd29a 517 : "=r" (__cbtmp) \
e6c5404d 518 : "r" ((USItype)(x))); \
e1a79915
RS
519 (count) = __cbtmp ^ 31; \
520 } while (0)
d83dd29a 521#if defined (__mc88110__)
f71c71f1
TG
522#define umul_ppmm(wh, wl, u, v) \
523 do { \
524 union {UDItype __ll; \
525 struct {USItype __h, __l;} __i; \
526 } __xx; \
527 __asm__ ("mulu.d %0,%1,%2" \
528 : "=r" (__xx.__ll) \
529 : "r" ((USItype)(u)), \
530 "r" ((USItype)(v))); \
531 (wh) = __xx.__i.__h; \
532 (wl) = __xx.__i.__l; \
533 } while (0)
d83dd29a 534#define udiv_qrnnd(q, r, n1, n0, d) \
f71c71f1
TG
535 ({union {UDItype __ll; \
536 struct {USItype __h, __l;} __i; \
537 } __xx; \
538 USItype __q; \
539 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
540 __asm__ ("divu.d %0,%1,%2" \
541 : "=r" (__q) \
542 : "r" (__xx.__ll), \
543 "r" ((USItype)(d))); \
544 (r) = (n0) - __q * (d); (q) = __q; })
545#define UMUL_TIME 5
546#define UDIV_TIME 25
547#else
548#define UMUL_TIME 17
549#define UDIV_TIME 150
550#endif /* __mc88110__ */
e1a79915
RS
551#endif /* __m88000__ */
552
553#if defined (__mips__)
d28e1517
TG
554#define umul_ppmm(w1, w0, u, v) \
555 __asm__ ("multu %2,%3
556 mflo %0
e1a79915 557 mfhi %1" \
e6c5404d
TG
558 : "=d" ((USItype)(w0)), \
559 "=d" ((USItype)(w1)) \
560 : "d" ((USItype)(u)), \
561 "d" ((USItype)(v)))
f71c71f1 562#define UMUL_TIME 10
e1a79915
RS
563#define UDIV_TIME 100
564#endif /* __mips__ */
565
566#if defined (__ns32000__)
f71c71f1
TG
567#define umul_ppmm(w1, w0, u, v) \
568 ({union {UDItype __ll; \
569 struct {USItype __l, __h;} __i; \
570 } __xx; \
571 __asm__ ("meid %2,%0" \
572 : "=g" (__xx.__ll) \
573 : "%0" ((USItype)(u)), \
574 "g" ((USItype)(v))); \
575 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
e1a79915 576#define __umulsidi3(u, v) \
e6c5404d 577 ({UDItype __w; \
d83dd29a
TG
578 __asm__ ("meid %2,%0" \
579 : "=g" (__w) \
e6c5404d
TG
580 : "%0" ((USItype)(u)), \
581 "g" ((USItype)(v))); \
d83dd29a 582 __w; })
f71c71f1
TG
583#define udiv_qrnnd(q, r, n1, n0, d) \
584 ({union {UDItype __ll; \
585 struct {USItype __l, __h;} __i; \
586 } __xx; \
587 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
588 __asm__ ("deid %2,%0" \
589 : "=g" (__xx.__ll) \
590 : "0" (__xx.__ll), \
591 "g" ((USItype)(d))); \
592 (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
e1a79915
RS
593#endif /* __ns32000__ */
594
325877e1 595#if (defined (_ARCH_PPC) || defined (_IBMR2)) && W_TYPE_SIZE == 32
f71c71f1
TG
596#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
597 do { \
598 if (__builtin_constant_p (bh) && (bh) == 0) \
599 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
600 : "=r" ((USItype)(sh)), \
601 "=&r" ((USItype)(sl)) \
602 : "%r" ((USItype)(ah)), \
603 "%r" ((USItype)(al)), \
604 "rI" ((USItype)(bl))); \
605 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
606 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
607 : "=r" ((USItype)(sh)), \
608 "=&r" ((USItype)(sl)) \
609 : "%r" ((USItype)(ah)), \
610 "%r" ((USItype)(al)), \
611 "rI" ((USItype)(bl))); \
612 else \
613 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
614 : "=r" ((USItype)(sh)), \
615 "=&r" ((USItype)(sl)) \
616 : "%r" ((USItype)(ah)), \
617 "r" ((USItype)(bh)), \
618 "%r" ((USItype)(al)), \
619 "rI" ((USItype)(bl))); \
620 } while (0)
621#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
622 do { \
623 if (__builtin_constant_p (ah) && (ah) == 0) \
325877e1 624 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
f71c71f1
TG
625 : "=r" ((USItype)(sh)), \
626 "=&r" ((USItype)(sl)) \
627 : "r" ((USItype)(bh)), \
628 "rI" ((USItype)(al)), \
629 "r" ((USItype)(bl))); \
630 else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \
325877e1 631 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
f71c71f1
TG
632 : "=r" ((USItype)(sh)), \
633 "=&r" ((USItype)(sl)) \
634 : "r" ((USItype)(bh)), \
635 "rI" ((USItype)(al)), \
636 "r" ((USItype)(bl))); \
637 else if (__builtin_constant_p (bh) && (bh) == 0) \
638 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
639 : "=r" ((USItype)(sh)), \
640 "=&r" ((USItype)(sl)) \
641 : "r" ((USItype)(ah)), \
642 "rI" ((USItype)(al)), \
643 "r" ((USItype)(bl))); \
644 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
645 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
646 : "=r" ((USItype)(sh)), \
647 "=&r" ((USItype)(sl)) \
648 : "r" ((USItype)(ah)), \
649 "rI" ((USItype)(al)), \
650 "r" ((USItype)(bl))); \
651 else \
652 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
653 : "=r" ((USItype)(sh)), \
654 "=&r" ((USItype)(sl)) \
655 : "r" ((USItype)(ah)), \
656 "r" ((USItype)(bh)), \
657 "rI" ((USItype)(al)), \
658 "r" ((USItype)(bl))); \
659 } while (0)
660#define count_leading_zeros(count, x) \
661 __asm__ ("{cntlz|cntlzw} %0,%1" \
662 : "=r" ((USItype)(count)) \
663 : "r" ((USItype)(x)))
325877e1 664#if defined (_ARCH_PPC)
f71c71f1
TG
665#define umul_ppmm(ph, pl, m0, m1) \
666 do { \
667 USItype __m0 = (m0), __m1 = (m1); \
668 __asm__ ("mulhwu %0,%1,%2" \
669 : "=r" ((USItype) ph) \
670 : "%r" (__m0), \
671 "r" (__m1)); \
672 (pl) = __m0 * __m1; \
673 } while (0)
674#define UMUL_TIME 15
675#define smul_ppmm(ph, pl, m0, m1) \
676 do { \
677 SItype __m0 = (m0), __m1 = (m1); \
678 __asm__ ("mulhw %0,%1,%2" \
679 : "=r" ((SItype) ph) \
680 : "%r" (__m0), \
681 "r" (__m1)); \
682 (pl) = __m0 * __m1; \
683 } while (0)
684#define SMUL_TIME 14
685#define UDIV_TIME 120
686#else
687#define umul_ppmm(xh, xl, m0, m1) \
688 do { \
689 USItype __m0 = (m0), __m1 = (m1); \
690 __asm__ ("mul %0,%2,%3" \
691 : "=r" ((USItype)(xh)), \
692 "=q" ((USItype)(xl)) \
693 : "r" (__m0), \
694 "r" (__m1)); \
695 (xh) += ((((SItype) __m0 >> 31) & __m1) \
696 + (((SItype) __m1 >> 31) & __m0)); \
697 } while (0)
698#define UMUL_TIME 8
699#define smul_ppmm(xh, xl, m0, m1) \
700 __asm__ ("mul %0,%2,%3" \
701 : "=r" ((SItype)(xh)), \
702 "=q" ((SItype)(xl)) \
703 : "r" (m0), \
704 "r" (m1))
705#define SMUL_TIME 4
706#define sdiv_qrnnd(q, r, nh, nl, d) \
707 __asm__ ("div %0,%2,%4" \
708 : "=r" ((SItype)(q)), "=q" ((SItype)(r)) \
709 : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d)))
710#define UDIV_TIME 100
711#endif
712#endif /* Power architecture variants. */
713
e1a79915
RS
714#if defined (__pyr__)
715#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
716 __asm__ ("addw %5,%1
717 addwc %3,%0" \
e6c5404d
TG
718 : "=r" ((USItype)(sh)), \
719 "=&r" ((USItype)(sl)) \
720 : "%0" ((USItype)(ah)), \
721 "g" ((USItype)(bh)), \
722 "%1" ((USItype)(al)), \
723 "g" ((USItype)(bl)))
e1a79915
RS
724#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
725 __asm__ ("subw %5,%1
726 subwb %3,%0" \
e6c5404d
TG
727 : "=r" ((USItype)(sh)), \
728 "=&r" ((USItype)(sl)) \
729 : "0" ((USItype)(ah)), \
730 "g" ((USItype)(bh)), \
731 "1" ((USItype)(al)), \
732 "g" ((USItype)(bl)))
e1a79915
RS
733/* This insn doesn't work on ancient pyramids. */
734#define umul_ppmm(w1, w0, u, v) \
e6c5404d
TG
735 ({union { \
736 UDItype __ll; \
737 struct {USItype __h, __l;} __i; \
738 } __xx; \
739 __xx.__i.__l = u; \
d83dd29a 740 __asm__ ("uemul %3,%0" \
e6c5404d
TG
741 : "=r" (__xx.__i.__h), \
742 "=r" (__xx.__i.__l) \
743 : "1" (__xx.__i.__l), \
f71c71f1 744 "g" ((USItype)(v))); \
e6c5404d
TG
745 (w1) = __xx.__i.__h; \
746 (w0) = __xx.__i.__l;})
e1a79915
RS
747#endif /* __pyr__ */
748
749#if defined (__ibm032__) /* RT/ROMP */
750#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
751 __asm__ ("a %1,%5
752 ae %0,%3" \
e6c5404d
TG
753 : "=r" ((USItype)(sh)), \
754 "=&r" ((USItype)(sl)) \
755 : "%0" ((USItype)(ah)), \
756 "r" ((USItype)(bh)), \
757 "%1" ((USItype)(al)), \
758 "r" ((USItype)(bl)))
e1a79915
RS
759#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
760 __asm__ ("s %1,%5
761 se %0,%3" \
e6c5404d
TG
762 : "=r" ((USItype)(sh)), \
763 "=&r" ((USItype)(sl)) \
764 : "0" ((USItype)(ah)), \
765 "r" ((USItype)(bh)), \
766 "1" ((USItype)(al)), \
767 "r" ((USItype)(bl)))
e1a79915
RS
768#define umul_ppmm(ph, pl, m0, m1) \
769 do { \
e6c5404d 770 USItype __m0 = (m0), __m1 = (m1); \
e1a79915
RS
771 __asm__ ( \
772 "s r2,r2
773 mts r10,%2
774 m r2,%3
775 m r2,%3
776 m r2,%3
777 m r2,%3
778 m r2,%3
779 m r2,%3
780 m r2,%3
781 m r2,%3
782 m r2,%3
783 m r2,%3
784 m r2,%3
785 m r2,%3
786 m r2,%3
787 m r2,%3
788 m r2,%3
789 m r2,%3
790 cas %0,r2,r0
791 mfs r10,%1" \
e6c5404d
TG
792 : "=r" ((USItype)(ph)), \
793 "=r" ((USItype)(pl)) \
d83dd29a
TG
794 : "%r" (__m0), \
795 "r" (__m1) \
796 : "r2"); \
e6c5404d
TG
797 (ph) += ((((SItype) __m0 >> 31) & __m1) \
798 + (((SItype) __m1 >> 31) & __m0)); \
e1a79915 799 } while (0)
d83dd29a
TG
800#define UMUL_TIME 20
801#define UDIV_TIME 200
e1a79915
RS
802#define count_leading_zeros(count, x) \
803 do { \
804 if ((x) >= 0x10000) \
805 __asm__ ("clz %0,%1" \
e6c5404d
TG
806 : "=r" ((USItype)(count)) \
807 : "r" ((USItype)(x) >> 16)); \
e1a79915
RS
808 else \
809 { \
810 __asm__ ("clz %0,%1" \
e6c5404d
TG
811 : "=r" ((USItype)(count)) \
812 : "r" ((USItype)(x))); \
e1a79915
RS
813 (count) += 16; \
814 } \
815 } while (0)
816#endif
817
818#if defined (__sparc__)
819#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
f71c71f1
TG
820 __asm__ ("addcc %r4,%5,%1
821 addx %r2,%3,%0" \
e6c5404d
TG
822 : "=r" ((USItype)(sh)), \
823 "=&r" ((USItype)(sl)) \
f71c71f1 824 : "%rJ" ((USItype)(ah)), \
e6c5404d 825 "rI" ((USItype)(bh)), \
f71c71f1 826 "%rJ" ((USItype)(al)), \
e6c5404d 827 "rI" ((USItype)(bl)) \
d83dd29a 828 __CLOBBER_CC)
e1a79915 829#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
f71c71f1
TG
830 __asm__ ("subcc %r4,%5,%1
831 subx %r2,%3,%0" \
e6c5404d
TG
832 : "=r" ((USItype)(sh)), \
833 "=&r" ((USItype)(sl)) \
f71c71f1 834 : "rJ" ((USItype)(ah)), \
e6c5404d 835 "rI" ((USItype)(bh)), \
f71c71f1 836 "rJ" ((USItype)(al)), \
e6c5404d 837 "rI" ((USItype)(bl)) \
d83dd29a 838 __CLOBBER_CC)
d9d3acb6 839#if defined (__sparc_v8__)
e1a79915
RS
840#define umul_ppmm(w1, w0, u, v) \
841 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
e6c5404d
TG
842 : "=r" ((USItype)(w1)), \
843 "=r" ((USItype)(w0)) \
844 : "r" ((USItype)(u)), \
845 "r" ((USItype)(v)))
e1a79915
RS
846#define udiv_qrnnd(q, r, n1, n0, d) \
847 __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
e6c5404d
TG
848 : "=&r" ((USItype)(q)), \
849 "=&r" ((USItype)(r)) \
850 : "r" ((USItype)(n1)), \
851 "r" ((USItype)(n0)), \
852 "r" ((USItype)(d)))
e1a79915 853#else
d9d3acb6
JW
854#if defined (__sparclite__)
855/* This has hardware multiply but not divide. It also has two additional
856 instructions scan (ffs from high bit) and divscc. */
857#define umul_ppmm(w1, w0, u, v) \
858 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
abe1fe6c
TG
859 : "=r" ((USItype)(w1)), \
860 "=r" ((USItype)(w0)) \
861 : "r" ((USItype)(u)), \
862 "r" ((USItype)(v)))
d9d3acb6
JW
863#define udiv_qrnnd(q, r, n1, n0, d) \
864 __asm__ ("! Inlined udiv_qrnnd
865 wr %%g0,%2,%%y ! Not a delayed write for sparclite
866 tst %%g0
867 divscc %3,%4,%%g1
868 divscc %%g1,%4,%%g1
869 divscc %%g1,%4,%%g1
870 divscc %%g1,%4,%%g1
871 divscc %%g1,%4,%%g1
872 divscc %%g1,%4,%%g1
873 divscc %%g1,%4,%%g1
874 divscc %%g1,%4,%%g1
875 divscc %%g1,%4,%%g1
876 divscc %%g1,%4,%%g1
877 divscc %%g1,%4,%%g1
878 divscc %%g1,%4,%%g1
879 divscc %%g1,%4,%%g1
880 divscc %%g1,%4,%%g1
881 divscc %%g1,%4,%%g1
882 divscc %%g1,%4,%%g1
883 divscc %%g1,%4,%%g1
884 divscc %%g1,%4,%%g1
885 divscc %%g1,%4,%%g1
886 divscc %%g1,%4,%%g1
887 divscc %%g1,%4,%%g1
888 divscc %%g1,%4,%%g1
889 divscc %%g1,%4,%%g1
890 divscc %%g1,%4,%%g1
891 divscc %%g1,%4,%%g1
892 divscc %%g1,%4,%%g1
893 divscc %%g1,%4,%%g1
894 divscc %%g1,%4,%%g1
895 divscc %%g1,%4,%%g1
896 divscc %%g1,%4,%%g1
897 divscc %%g1,%4,%%g1
898 divscc %%g1,%4,%0
899 rd %%y,%1
900 bl,a 1f
901 add %1,%4,%1
9021: ! End of inline udiv_qrnnd" \
abe1fe6c
TG
903 : "=r" ((USItype)(q)), \
904 "=r" ((USItype)(r)) \
905 : "r" ((USItype)(n1)), \
906 "r" ((USItype)(n0)), \
907 "rI" ((USItype)(d)) \
210d97af 908 : "%g1" __AND_CLOBBER_CC)
d9d3acb6
JW
909#define UDIV_TIME 37
910#define count_leading_zeros(count, x) \
911 __asm__ ("scan %1,0,%0" \
abe1fe6c
TG
912 : "=r" ((USItype)(x)) \
913 : "r" ((USItype)(count)))
d9d3acb6 914#else
e1a79915
RS
915/* SPARC without integer multiplication and divide instructions.
916 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
917#define umul_ppmm(w1, w0, u, v) \
918 __asm__ ("! Inlined umul_ppmm
919 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
920 sra %3,31,%%g2 ! Don't move this insn
921 and %2,%%g2,%%g2 ! Don't move this insn
922 andcc %%g0,0,%%g1 ! Don't move this insn
923 mulscc %%g1,%3,%%g1
924 mulscc %%g1,%3,%%g1
925 mulscc %%g1,%3,%%g1
926 mulscc %%g1,%3,%%g1
927 mulscc %%g1,%3,%%g1
928 mulscc %%g1,%3,%%g1
929 mulscc %%g1,%3,%%g1
930 mulscc %%g1,%3,%%g1
931 mulscc %%g1,%3,%%g1
932 mulscc %%g1,%3,%%g1
933 mulscc %%g1,%3,%%g1
934 mulscc %%g1,%3,%%g1
935 mulscc %%g1,%3,%%g1
936 mulscc %%g1,%3,%%g1
937 mulscc %%g1,%3,%%g1
938 mulscc %%g1,%3,%%g1
939 mulscc %%g1,%3,%%g1
940 mulscc %%g1,%3,%%g1
941 mulscc %%g1,%3,%%g1
942 mulscc %%g1,%3,%%g1
943 mulscc %%g1,%3,%%g1
944 mulscc %%g1,%3,%%g1
945 mulscc %%g1,%3,%%g1
946 mulscc %%g1,%3,%%g1
947 mulscc %%g1,%3,%%g1
948 mulscc %%g1,%3,%%g1
949 mulscc %%g1,%3,%%g1
950 mulscc %%g1,%3,%%g1
951 mulscc %%g1,%3,%%g1
952 mulscc %%g1,%3,%%g1
953 mulscc %%g1,%3,%%g1
954 mulscc %%g1,%3,%%g1
955 mulscc %%g1,0,%%g1
956 add %%g1,%%g2,%0
957 rd %%y,%1" \
e6c5404d
TG
958 : "=r" ((USItype)(w1)), \
959 "=r" ((USItype)(w0)) \
960 : "%rI" ((USItype)(u)), \
961 "r" ((USItype)(v)) \
d83dd29a 962 : "%g1", "%g2" __AND_CLOBBER_CC)
e1a79915
RS
963#define UMUL_TIME 39 /* 39 instructions */
964/* It's quite necessary to add this much assembler for the sparc.
965 The default udiv_qrnnd (in C) is more than 10 times slower! */
966#define udiv_qrnnd(q, r, n1, n0, d) \
967 __asm__ ("! Inlined udiv_qrnnd
968 mov 32,%%g1
969 subcc %1,%2,%%g0
9701: bcs 5f
971 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
972 sub %1,%2,%1 ! this kills msb of n
973 addx %1,%1,%1 ! so this can't give carry
974 subcc %%g1,1,%%g1
9752: bne 1b
976 subcc %1,%2,%%g0
977 bcs 3f
978 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
979 b 3f
980 sub %1,%2,%1 ! this kills msb of n
9814: sub %1,%2,%1
9825: addxcc %1,%1,%1
983 bcc 2b
984 subcc %%g1,1,%%g1
985! Got carry from n. Subtract next step to cancel this carry.
986 bne 4b
987 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
988 sub %1,%2,%1
9893: xnor %0,0,%0
990 ! End of inline udiv_qrnnd" \
e6c5404d
TG
991 : "=&r" ((USItype)(q)), \
992 "=&r" ((USItype)(r)) \
993 : "r" ((USItype)(d)), \
994 "1" ((USItype)(n1)), \
995 "0" ((USItype)(n0)) : "%g1" __AND_CLOBBER_CC)
e1a79915 996#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
d9d3acb6
JW
997#endif /* __sparclite__ */
998#endif /* __sparc_v8__ */
d83dd29a 999#endif /* __sparc__ */
e1a79915
RS
1000
1001#if defined (__vax__)
1002#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1003 __asm__ ("addl2 %5,%1
1004 adwc %3,%0" \
e6c5404d
TG
1005 : "=g" ((USItype)(sh)), \
1006 "=&g" ((USItype)(sl)) \
1007 : "%0" ((USItype)(ah)), \
1008 "g" ((USItype)(bh)), \
1009 "%1" ((USItype)(al)), \
1010 "g" ((USItype)(bl)))
e1a79915
RS
1011#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1012 __asm__ ("subl2 %5,%1
1013 sbwc %3,%0" \
e6c5404d
TG
1014 : "=g" ((USItype)(sh)), \
1015 "=&g" ((USItype)(sl)) \
1016 : "0" ((USItype)(ah)), \
1017 "g" ((USItype)(bh)), \
1018 "1" ((USItype)(al)), \
1019 "g" ((USItype)(bl)))
e1a79915
RS
1020#define umul_ppmm(xh, xl, m0, m1) \
1021 do { \
e6c5404d
TG
1022 union { \
1023 UDItype __ll; \
1024 struct {USItype __l, __h;} __i; \
1025 } __xx; \
1026 USItype __m0 = (m0), __m1 = (m1); \
e1a79915 1027 __asm__ ("emul %1,%2,$0,%0" \
e6c5404d 1028 : "=r" (__xx.__ll) \
d83dd29a
TG
1029 : "g" (__m0), \
1030 "g" (__m1)); \
e6c5404d
TG
1031 (xh) = __xx.__i.__h; \
1032 (xl) = __xx.__i.__l; \
1033 (xh) += ((((SItype) __m0 >> 31) & __m1) \
1034 + (((SItype) __m1 >> 31) & __m0)); \
e1a79915 1035 } while (0)
f71c71f1
TG
1036#define sdiv_qrnnd(q, r, n1, n0, d) \
1037 do { \
1038 union {DItype __ll; \
1039 struct {SItype __l, __h;} __i; \
1040 } __xx; \
1041 __xx.__i.__h = n1; __xx.__i.__l = n0; \
1042 __asm__ ("ediv %3,%2,%0,%1" \
1043 : "=g" (q), "=g" (r) \
451c2f82 1044 : "g" (__xx.__ll), "g" (d)); \
f71c71f1 1045 } while (0)
e1a79915
RS
1046#endif /* __vax__ */
1047
1048#endif /* __GNUC__ */
1049
1050/* If this machine has no inline assembler, use C macros. */
1051
1052#if !defined (add_ssaaaa)
1053#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1054 do { \
e6c5404d 1055 USItype __x; \
e1a79915
RS
1056 __x = (al) + (bl); \
1057 (sh) = (ah) + (bh) + (__x < (al)); \
1058 (sl) = __x; \
1059 } while (0)
1060#endif
1061
1062#if !defined (sub_ddmmss)
1063#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1064 do { \
e6c5404d 1065 USItype __x; \
e1a79915
RS
1066 __x = (al) - (bl); \
1067 (sh) = (ah) - (bh) - (__x > (al)); \
1068 (sl) = __x; \
1069 } while (0)
1070#endif
1071
1072#if !defined (umul_ppmm)
1073#define umul_ppmm(w1, w0, u, v) \
1074 do { \
e6c5404d
TG
1075 USItype __x0, __x1, __x2, __x3; \
1076 USItype __ul, __vl, __uh, __vh; \
e1a79915
RS
1077 \
1078 __ul = __ll_lowpart (u); \
1079 __uh = __ll_highpart (u); \
1080 __vl = __ll_lowpart (v); \
1081 __vh = __ll_highpart (v); \
1082 \
e6c5404d
TG
1083 __x0 = (USItype) __ul * __vl; \
1084 __x1 = (USItype) __ul * __vh; \
1085 __x2 = (USItype) __uh * __vl; \
1086 __x3 = (USItype) __uh * __vh; \
e1a79915
RS
1087 \
1088 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
1089 __x1 += __x2; /* but this indeed can */ \
1090 if (__x1 < __x2) /* did we get it? */ \
1091 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
1092 \
1093 (w1) = __x3 + __ll_highpart (__x1); \
1094 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
1095 } while (0)
1096#endif
1097
1098#if !defined (__umulsidi3)
1099#define __umulsidi3(u, v) \
e6c5404d 1100 ({DIunion __w; \
e1a79915
RS
1101 umul_ppmm (__w.s.high, __w.s.low, u, v); \
1102 __w.ll; })
1103#endif
1104
d83dd29a
TG
1105/* Define this unconditionally, so it can be used for debugging. */
1106#define __udiv_qrnnd_c(q, r, n1, n0, d) \
e1a79915 1107 do { \
e6c5404d
TG
1108 USItype __d1, __d0, __q1, __q0; \
1109 USItype __r1, __r0, __m; \
e1a79915
RS
1110 __d1 = __ll_highpart (d); \
1111 __d0 = __ll_lowpart (d); \
1112 \
1113 __r1 = (n1) % __d1; \
1114 __q1 = (n1) / __d1; \
e6c5404d 1115 __m = (USItype) __q1 * __d0; \
e1a79915
RS
1116 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
1117 if (__r1 < __m) \
1118 { \
1119 __q1--, __r1 += (d); \
1120 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
1121 if (__r1 < __m) \
1122 __q1--, __r1 += (d); \
1123 } \
1124 __r1 -= __m; \
1125 \
1126 __r0 = __r1 % __d1; \
1127 __q0 = __r1 / __d1; \
e6c5404d 1128 __m = (USItype) __q0 * __d0; \
e1a79915
RS
1129 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
1130 if (__r0 < __m) \
1131 { \
1132 __q0--, __r0 += (d); \
1133 if (__r0 >= (d)) \
1134 if (__r0 < __m) \
1135 __q0--, __r0 += (d); \
1136 } \
1137 __r0 -= __m; \
1138 \
e6c5404d 1139 (q) = (USItype) __q1 * __ll_B | __q0; \
e1a79915
RS
1140 (r) = __r0; \
1141 } while (0)
1814cfd9
TG
1142
1143/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
9077d7fa 1144 __udiv_w_sdiv (defined in libgcc or elsewhere). */
1814cfd9
TG
1145#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
1146#define udiv_qrnnd(q, r, nh, nl, d) \
1147 do { \
1148 USItype __r; \
9077d7fa 1149 (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
1814cfd9
TG
1150 (r) = __r; \
1151 } while (0)
1152#endif
1153
d83dd29a
TG
1154/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
1155#if !defined (udiv_qrnnd)
1156#define UDIV_NEEDS_NORMALIZATION 1
1157#define udiv_qrnnd __udiv_qrnnd_c
e1a79915
RS
1158#endif
1159
1160#if !defined (count_leading_zeros)
e6c5404d 1161extern const UQItype __clz_tab[];
e1a79915
RS
1162#define count_leading_zeros(count, x) \
1163 do { \
e6c5404d
TG
1164 USItype __xr = (x); \
1165 USItype __a; \
e1a79915 1166 \
cf7d8fd0 1167 if (SI_TYPE_SIZE <= 32) \
d83dd29a
TG
1168 { \
1169 __a = __xr < (1<<2*__BITS4) \
1170 ? (__xr < (1<<__BITS4) ? 0 : __BITS4) \
1171 : (__xr < (1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
1172 } \
1173 else \
1174 { \
cf7d8fd0 1175 for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \
d83dd29a
TG
1176 if (((__xr >> __a) & 0xff) != 0) \
1177 break; \
1178 } \
e1a79915 1179 \
cf7d8fd0 1180 (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
e1a79915
RS
1181 } while (0)
1182#endif
1183
1184#ifndef UDIV_NEEDS_NORMALIZATION
1185#define UDIV_NEEDS_NORMALIZATION 0
1186#endif
This page took 0.446397 seconds and 5 git commands to generate.