]> gcc.gnu.org Git - gcc.git/blame - gcc/longlong.h
(decl_attributes): If TREE_TYPE for the DECL is updated,
[gcc.git] / gcc / longlong.h
CommitLineData
e1a79915 1/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
f71c71f1 2 Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
e1a79915
RS
3
4 This definition file is free software; you can redistribute it
5 and/or modify it under the terms of the GNU General Public
6 License as published by the Free Software Foundation; either
7 version 2, or (at your option) any later version.
8
9 This definition file is distributed in the hope that it will be
10 useful, but WITHOUT ANY WARRANTY; without even the implied
11 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 See the GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
17
cf7d8fd0
TG
18#ifndef SI_TYPE_SIZE
19#define SI_TYPE_SIZE 32
e1a79915
RS
20#endif
21
cf7d8fd0
TG
22#define __BITS4 (SI_TYPE_SIZE / 4)
23#define __ll_B (1L << (SI_TYPE_SIZE / 2))
e6c5404d
TG
24#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
25#define __ll_highpart(t) ((USItype) (t) / __ll_B)
e1a79915 26
2e8a6600 27/* Define auxiliary asm macros.
e1a79915
RS
28
29 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
e6c5404d
TG
30 multiplies two USItype integers MULTIPLER and MULTIPLICAND,
31 and generates a two-part USItype product in HIGH_PROD and
e1a79915
RS
32 LOW_PROD.
33
e6c5404d
TG
34 2) __umulsidi3(a,b) multiplies two USItype integers A and B,
35 and returns a UDItype product. This is just a variant of umul_ppmm.
e1a79915
RS
36
37 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
38 denominator) divides a two-word unsigned integer, composed by the
39 integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
40 places the quotient in QUOTIENT and the remainder in REMAINDER.
41 HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
42 If, in addition, the most significant bit of DENOMINATOR must be 1,
43 then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
44
b6d2adc7
TG
45 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
46 denominator). Like udiv_qrnnd but the numbers are signed. The
47 quotient is rounded towards 0.
48
49 5) count_leading_zeros(count, x) counts the number of zero-bits from
e1a79915
RS
50 the msb to the first non-zero bit. This is the number of steps X
51 needs to be shifted left to set the msb. Undefined for X == 0.
52
b6d2adc7 53 6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
e1a79915
RS
54 high_addend_2, low_addend_2) adds two two-word unsigned integers,
55 composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
56 LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
57 LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
58 lost.
59
b6d2adc7 60 7) sub_ddmmss(high_difference, low_difference, high_minuend,
e1a79915
RS
61 low_minuend, high_subtrahend, low_subtrahend) subtracts two
62 two-word unsigned integers, composed by HIGH_MINUEND_1 and
63 LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
64 respectively. The result is placed in HIGH_DIFFERENCE and
65 LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
66 and is lost.
67
68 If any of these macros are left undefined for a particular CPU,
69 C macros are used. */
70
71/* The CPUs come in alphabetical order below.
72
73 Please add support for more CPUs here, or improve the current support
74 for the CPUs below!
f71c71f1 75 (E.g. WE32100, IBM360.) */
d83dd29a 76
3a0d22dd
MM
77#if defined (__GNUC__) && !defined (NO_ASM)
78
d83dd29a
TG
79/* We sometimes need to clobber "cc" with gcc2, but that would not be
80 understood by gcc1. Use cpp to avoid major code duplication. */
81#if __GNUC__ < 2
82#define __CLOBBER_CC
83#define __AND_CLOBBER_CC
84#else /* __GNUC__ >= 2 */
85#define __CLOBBER_CC : "cc"
86#define __AND_CLOBBER_CC , "cc"
87#endif /* __GNUC__ < 2 */
e1a79915 88
f71c71f1 89#if defined (__a29k__) || defined (_AM29K)
e1a79915
RS
90#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
91 __asm__ ("add %1,%4,%5
92 addc %0,%2,%3" \
e6c5404d
TG
93 : "=r" ((USItype)(sh)), \
94 "=&r" ((USItype)(sl)) \
95 : "%r" ((USItype)(ah)), \
96 "rI" ((USItype)(bh)), \
97 "%r" ((USItype)(al)), \
98 "rI" ((USItype)(bl)))
e1a79915
RS
99#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
100 __asm__ ("sub %1,%4,%5
101 subc %0,%2,%3" \
e6c5404d
TG
102 : "=r" ((USItype)(sh)), \
103 "=&r" ((USItype)(sl)) \
104 : "r" ((USItype)(ah)), \
105 "rI" ((USItype)(bh)), \
106 "r" ((USItype)(al)), \
107 "rI" ((USItype)(bl)))
e1a79915
RS
108#define umul_ppmm(xh, xl, m0, m1) \
109 do { \
e6c5404d 110 USItype __m0 = (m0), __m1 = (m1); \
d83dd29a 111 __asm__ ("multiplu %0,%1,%2" \
e6c5404d 112 : "=r" ((USItype)(xl)) \
d83dd29a
TG
113 : "r" (__m0), \
114 "r" (__m1)); \
115 __asm__ ("multmu %0,%1,%2" \
e6c5404d 116 : "=r" ((USItype)(xh)) \
d83dd29a
TG
117 : "r" (__m0), \
118 "r" (__m1)); \
e1a79915
RS
119 } while (0)
120#define udiv_qrnnd(q, r, n1, n0, d) \
121 __asm__ ("dividu %0,%3,%4" \
e6c5404d
TG
122 : "=r" ((USItype)(q)), \
123 "=q" ((USItype)(r)) \
124 : "1" ((USItype)(n1)), \
125 "r" ((USItype)(n0)), \
126 "r" ((USItype)(d)))
e1a79915 127#define count_leading_zeros(count, x) \
d83dd29a 128 __asm__ ("clz %0,%1" \
e6c5404d
TG
129 : "=r" ((USItype)(count)) \
130 : "r" ((USItype)(x)))
e1a79915
RS
131#endif /* __a29k__ */
132
133#if defined (__arm__)
134#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
ac1e676a
RE
135 __asm__ ("adds %1, %4, %5
136 adc %0, %2, %3" \
e6c5404d
TG
137 : "=r" ((USItype)(sh)), \
138 "=&r" ((USItype)(sl)) \
139 : "%r" ((USItype)(ah)), \
140 "rI" ((USItype)(bh)), \
141 "%r" ((USItype)(al)), \
142 "rI" ((USItype)(bl)))
e1a79915 143#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
ac1e676a
RE
144 __asm__ ("subs %1, %4, %5
145 sbc %0, %2, %3" \
e6c5404d
TG
146 : "=r" ((USItype)(sh)), \
147 "=&r" ((USItype)(sl)) \
148 : "r" ((USItype)(ah)), \
149 "rI" ((USItype)(bh)), \
150 "r" ((USItype)(al)), \
151 "rI" ((USItype)(bl)))
f71c71f1 152#define umul_ppmm(xh, xl, a, b) \
ac1e676a
RE
153 __asm__ ("%@ Inlined umul_ppmm
154 mov %|r0, %2, lsr #16
155 mov %|r2, %3, lsr #16
156 bic %|r1, %2, %|r0, lsl #16
157 bic %|r2, %3, %|r2, lsl #16
158 mul %1, %|r1, %|r2
159 mul %|r2, %|r0, %|r2
160 mul %|r1, %0, %|r1
161 mul %0, %|r0, %0
162 adds %|r1, %|r2, %|r1
163 addcs %0, %0, #65536
164 adds %1, %1, %|r1, lsl #16
165 adc %0, %0, %|r1, lsr #16" \
f71c71f1
TG
166 : "=&r" ((USItype)(xh)), \
167 "=r" ((USItype)(xl)) \
168 : "r" ((USItype)(a)), \
169 "r" ((USItype)(b)) \
170 : "r0", "r1", "r2")
171#define UMUL_TIME 20
172#define UDIV_TIME 100
e1a79915
RS
173#endif /* __arm__ */
174
f71c71f1
TG
175#if defined (__clipper__)
176#define umul_ppmm(w1, w0, u, v) \
177 ({union {UDItype __ll; \
178 struct {USItype __l, __h;} __i; \
179 } __xx; \
180 __asm__ ("mulwux %2,%0" \
181 : "=r" (__xx.__ll) \
182 : "%0" ((USItype)(u)), \
183 "r" ((USItype)(v))); \
184 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
185#define smul_ppmm(w1, w0, u, v) \
186 ({union {DItype __ll; \
187 struct {SItype __l, __h;} __i; \
188 } __xx; \
189 __asm__ ("mulwx %2,%0" \
190 : "=r" (__xx.__ll) \
191 : "%0" ((SItype)(u)), \
192 "r" ((SItype)(v))); \
193 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
194#define __umulsidi3(u, v) \
195 ({UDItype __w; \
196 __asm__ ("mulwux %2,%0" \
197 : "=r" (__w) \
198 : "%0" ((USItype)(u)), \
199 "r" ((USItype)(v))); \
200 __w; })
201#endif /* __clipper__ */
202
e1a79915
RS
203#if defined (__gmicro__)
204#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
205 __asm__ ("add.w %5,%1
206 addx %3,%0" \
e6c5404d
TG
207 : "=g" ((USItype)(sh)), \
208 "=&g" ((USItype)(sl)) \
209 : "%0" ((USItype)(ah)), \
210 "g" ((USItype)(bh)), \
211 "%1" ((USItype)(al)), \
212 "g" ((USItype)(bl)))
e1a79915
RS
213#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
214 __asm__ ("sub.w %5,%1
215 subx %3,%0" \
e6c5404d
TG
216 : "=g" ((USItype)(sh)), \
217 "=&g" ((USItype)(sl)) \
218 : "0" ((USItype)(ah)), \
219 "g" ((USItype)(bh)), \
220 "1" ((USItype)(al)), \
221 "g" ((USItype)(bl)))
e1a79915
RS
222#define umul_ppmm(ph, pl, m0, m1) \
223 __asm__ ("mulx %3,%0,%1" \
e6c5404d
TG
224 : "=g" ((USItype)(ph)), \
225 "=r" ((USItype)(pl)) \
226 : "%0" ((USItype)(m0)), \
227 "g" ((USItype)(m1)))
e1a79915
RS
228#define udiv_qrnnd(q, r, nh, nl, d) \
229 __asm__ ("divx %4,%0,%1" \
e6c5404d
TG
230 : "=g" ((USItype)(q)), \
231 "=r" ((USItype)(r)) \
232 : "1" ((USItype)(nh)), \
233 "0" ((USItype)(nl)), \
234 "g" ((USItype)(d)))
e1a79915
RS
235#define count_leading_zeros(count, x) \
236 __asm__ ("bsch/1 %1,%0" \
d83dd29a 237 : "=g" (count) \
e6c5404d
TG
238 : "g" ((USItype)(x)), \
239 "0" ((USItype)0))
e1a79915
RS
240#endif
241
242#if defined (__hppa)
243#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
244 __asm__ ("add %4,%5,%1
245 addc %2,%3,%0" \
e6c5404d
TG
246 : "=r" ((USItype)(sh)), \
247 "=&r" ((USItype)(sl)) \
248 : "%rM" ((USItype)(ah)), \
249 "rM" ((USItype)(bh)), \
250 "%rM" ((USItype)(al)), \
251 "rM" ((USItype)(bl)))
e1a79915 252#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
7691f559
TG
253 __asm__ ("sub %4,%5,%1
254 subb %2,%3,%0" \
e6c5404d
TG
255 : "=r" ((USItype)(sh)), \
256 "=&r" ((USItype)(sl)) \
257 : "rM" ((USItype)(ah)), \
258 "rM" ((USItype)(bh)), \
259 "rM" ((USItype)(al)), \
260 "rM" ((USItype)(bl)))
d83dd29a
TG
261#if defined (_PA_RISC1_1)
262#define umul_ppmm(w1, w0, u, v) \
263 do { \
264 union \
265 { \
e6c5404d
TG
266 UDItype __f; \
267 struct {USItype __w1, __w0;} __w1w0; \
d83dd29a
TG
268 } __t; \
269 __asm__ ("xmpyu %1,%2,%0" \
270 : "=x" (__t.__f) \
e6c5404d
TG
271 : "x" ((USItype)(u)), \
272 "x" ((USItype)(v))); \
d83dd29a
TG
273 (w1) = __t.__w1w0.__w1; \
274 (w0) = __t.__w1w0.__w0; \
275 } while (0)
276#define UMUL_TIME 8
277#else
278#define UMUL_TIME 30
279#endif
280#define UDIV_TIME 40
f71c71f1
TG
281#define count_leading_zeros(count, x) \
282 do { \
283 USItype __tmp; \
284 __asm__ ( \
285 "ldi 1,%0
286 extru,= %1,15,16,%%r0 ; Bits 31..16 zero?
287 extru,tr %1,15,16,%1 ; No. Shift down, skip add.
288 ldo 16(%0),%0 ; Yes. Perform add.
289 extru,= %1,23,8,%%r0 ; Bits 15..8 zero?
290 extru,tr %1,23,8,%1 ; No. Shift down, skip add.
291 ldo 8(%0),%0 ; Yes. Perform add.
292 extru,= %1,27,4,%%r0 ; Bits 7..4 zero?
293 extru,tr %1,27,4,%1 ; No. Shift down, skip add.
294 ldo 4(%0),%0 ; Yes. Perform add.
295 extru,= %1,29,2,%%r0 ; Bits 3..2 zero?
296 extru,tr %1,29,2,%1 ; No. Shift down, skip add.
297 ldo 2(%0),%0 ; Yes. Perform add.
298 extru %1,30,1,%1 ; Extract bit 1.
299 sub %0,%1,%0 ; Subtract it.
300 " : "=r" (count), "=r" (__tmp) : "1" (x)); \
301 } while (0)
e1a79915
RS
302#endif
303
304#if defined (__i386__) || defined (__i486__)
305#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
306 __asm__ ("addl %5,%1
307 adcl %3,%0" \
e6c5404d
TG
308 : "=r" ((USItype)(sh)), \
309 "=&r" ((USItype)(sl)) \
310 : "%0" ((USItype)(ah)), \
311 "g" ((USItype)(bh)), \
312 "%1" ((USItype)(al)), \
313 "g" ((USItype)(bl)))
e1a79915
RS
314#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
315 __asm__ ("subl %5,%1
316 sbbl %3,%0" \
e6c5404d
TG
317 : "=r" ((USItype)(sh)), \
318 "=&r" ((USItype)(sl)) \
319 : "0" ((USItype)(ah)), \
320 "g" ((USItype)(bh)), \
321 "1" ((USItype)(al)), \
322 "g" ((USItype)(bl)))
e1a79915
RS
323#define umul_ppmm(w1, w0, u, v) \
324 __asm__ ("mull %3" \
e6c5404d
TG
325 : "=a" ((USItype)(w0)), \
326 "=d" ((USItype)(w1)) \
327 : "%0" ((USItype)(u)), \
328 "rm" ((USItype)(v)))
e1a79915
RS
329#define udiv_qrnnd(q, r, n1, n0, d) \
330 __asm__ ("divl %4" \
e6c5404d
TG
331 : "=a" ((USItype)(q)), \
332 "=d" ((USItype)(r)) \
333 : "0" ((USItype)(n0)), \
334 "1" ((USItype)(n1)), \
335 "rm" ((USItype)(d)))
e1a79915
RS
336#define count_leading_zeros(count, x) \
337 do { \
e6c5404d 338 USItype __cbtmp; \
e1a79915 339 __asm__ ("bsrl %1,%0" \
e6c5404d 340 : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
e1a79915
RS
341 (count) = __cbtmp ^ 31; \
342 } while (0)
d83dd29a
TG
343#define UMUL_TIME 40
344#define UDIV_TIME 40
e1a79915
RS
345#endif /* 80x86 */
346
347#if defined (__i860__)
348#if 0
349/* Make sure these patterns really improve the code before
350 switching them on. */
351#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
352 do { \
353 union \
354 { \
e6c5404d
TG
355 DItype __ll; \
356 struct {USItype __l, __h;} __i; \
e1a79915 357 } __a, __b, __s; \
e6c5404d
TG
358 __a.__i.__l = (al); \
359 __a.__i.__h = (ah); \
360 __b.__i.__l = (bl); \
361 __b.__i.__h = (bh); \
e1a79915 362 __asm__ ("fiadd.dd %1,%2,%0" \
e6c5404d
TG
363 : "=f" (__s.__ll) \
364 : "%f" (__a.__ll), "f" (__b.__ll)); \
365 (sh) = __s.__i.__h; \
366 (sl) = __s.__i.__l; \
e1a79915
RS
367 } while (0)
368#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
369 do { \
370 union \
371 { \
e6c5404d
TG
372 DItype __ll; \
373 struct {USItype __l, __h;} __i; \
e1a79915 374 } __a, __b, __s; \
e6c5404d
TG
375 __a.__i.__l = (al); \
376 __a.__i.__h = (ah); \
377 __b.__i.__l = (bl); \
378 __b.__i.__h = (bh); \
e1a79915 379 __asm__ ("fisub.dd %1,%2,%0" \
e6c5404d
TG
380 : "=f" (__s.__ll) \
381 : "%f" (__a.__ll), "f" (__b.__ll)); \
382 (sh) = __s.__i.__h; \
383 (sl) = __s.__i.__l; \
e1a79915
RS
384 } while (0)
385#endif
386#endif /* __i860__ */
387
f71c71f1
TG
388#if defined (__i960__)
389#define umul_ppmm(w1, w0, u, v) \
390 ({union {UDItype __ll; \
391 struct {USItype __l, __h;} __i; \
392 } __xx; \
393 __asm__ ("emul %2,%1,%0" \
394 : "=d" (__xx.__ll) \
395 : "%dI" ((USItype)(u)), \
396 "dI" ((USItype)(v))); \
397 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
398#define __umulsidi3(u, v) \
399 ({UDItype __w; \
400 __asm__ ("emul %2,%1,%0" \
401 : "=d" (__w) \
402 : "%dI" ((USItype)(u)), \
403 "dI" ((USItype)(v))); \
404 __w; })
405#endif /* __i960__ */
e1a79915
RS
406
407#if defined (__mc68000__)
408#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
409 __asm__ ("add%.l %5,%1
410 addx%.l %3,%0" \
e6c5404d
TG
411 : "=d" ((USItype)(sh)), \
412 "=&d" ((USItype)(sl)) \
413 : "%0" ((USItype)(ah)), \
414 "d" ((USItype)(bh)), \
415 "%1" ((USItype)(al)), \
416 "g" ((USItype)(bl)))
e1a79915
RS
417#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
418 __asm__ ("sub%.l %5,%1
419 subx%.l %3,%0" \
e6c5404d
TG
420 : "=d" ((USItype)(sh)), \
421 "=&d" ((USItype)(sl)) \
422 : "0" ((USItype)(ah)), \
423 "d" ((USItype)(bh)), \
424 "1" ((USItype)(al)), \
425 "g" ((USItype)(bl)))
e1a79915
RS
426#if defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)
427#define umul_ppmm(w1, w0, u, v) \
428 __asm__ ("mulu%.l %3,%1:%0" \
e6c5404d
TG
429 : "=d" ((USItype)(w0)), \
430 "=d" ((USItype)(w1)) \
431 : "%0" ((USItype)(u)), \
432 "dmi" ((USItype)(v)))
d83dd29a 433#define UMUL_TIME 45
e1a79915
RS
434#define udiv_qrnnd(q, r, n1, n0, d) \
435 __asm__ ("divu%.l %4,%1:%0" \
e6c5404d
TG
436 : "=d" ((USItype)(q)), \
437 "=d" ((USItype)(r)) \
438 : "0" ((USItype)(n0)), \
439 "1" ((USItype)(n1)), \
440 "dmi" ((USItype)(d)))
d83dd29a
TG
441#define UDIV_TIME 90
442#define sdiv_qrnnd(q, r, n1, n0, d) \
443 __asm__ ("divs%.l %4,%1:%0" \
e6c5404d
TG
444 : "=d" ((USItype)(q)), \
445 "=d" ((USItype)(r)) \
446 : "0" ((USItype)(n0)), \
447 "1" ((USItype)(n1)), \
448 "dmi" ((USItype)(d)))
e1a79915
RS
449#define count_leading_zeros(count, x) \
450 __asm__ ("bfffo %1{%b2:%b2},%0" \
e6c5404d
TG
451 : "=d" ((USItype)(count)) \
452 : "od" ((USItype)(x)), "n" (0))
e1a79915 453#else /* not mc68020 */
bb66e0f6 454/* %/ inserts REGISTER_PREFIX. */
e1a79915
RS
455#define umul_ppmm(xh, xl, a, b) \
456 __asm__ ("| Inlined umul_ppmm
f71c71f1
TG
457 move%.l %2,%/d0
458 move%.l %3,%/d1
459 move%.l %/d0,%/d2
bb66e0f6 460 swap %/d0
f71c71f1 461 move%.l %/d1,%/d3
bb66e0f6 462 swap %/d1
f71c71f1 463 move%.w %/d2,%/d4
bb66e0f6
ILT
464 mulu %/d3,%/d4
465 mulu %/d1,%/d2
466 mulu %/d0,%/d3
467 mulu %/d0,%/d1
f71c71f1
TG
468 move%.l %/d4,%/d0
469 eor%.w %/d0,%/d0
bb66e0f6 470 swap %/d0
f71c71f1
TG
471 add%.l %/d0,%/d2
472 add%.l %/d3,%/d2
e1a79915 473 jcc 1f
f71c71f1 474 add%.l #65536,%/d1
bb66e0f6
ILT
4751: swap %/d2
476 moveq #0,%/d0
f71c71f1
TG
477 move%.w %/d2,%/d0
478 move%.w %/d4,%/d2
479 move%.l %/d2,%1
480 add%.l %/d1,%/d0
481 move%.l %/d0,%0" \
e6c5404d
TG
482 : "=g" ((USItype)(xh)), \
483 "=g" ((USItype)(xl)) \
484 : "g" ((USItype)(a)), \
485 "g" ((USItype)(b)) \
d83dd29a
TG
486 : "d0", "d1", "d2", "d3", "d4")
487#define UMUL_TIME 100
488#define UDIV_TIME 400
e1a79915
RS
489#endif /* not mc68020 */
490#endif /* mc68000 */
491
492#if defined (__m88000__)
493#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
494 __asm__ ("addu.co %1,%r4,%r5
495 addu.ci %0,%r2,%r3" \
e6c5404d
TG
496 : "=r" ((USItype)(sh)), \
497 "=&r" ((USItype)(sl)) \
498 : "%rJ" ((USItype)(ah)), \
499 "rJ" ((USItype)(bh)), \
500 "%rJ" ((USItype)(al)), \
501 "rJ" ((USItype)(bl)))
e1a79915
RS
502#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
503 __asm__ ("subu.co %1,%r4,%r5
504 subu.ci %0,%r2,%r3" \
e6c5404d
TG
505 : "=r" ((USItype)(sh)), \
506 "=&r" ((USItype)(sl)) \
507 : "rJ" ((USItype)(ah)), \
508 "rJ" ((USItype)(bh)), \
509 "rJ" ((USItype)(al)), \
510 "rJ" ((USItype)(bl)))
e1a79915
RS
511#define count_leading_zeros(count, x) \
512 do { \
e6c5404d 513 USItype __cbtmp; \
e1a79915 514 __asm__ ("ff1 %0,%1" \
d83dd29a 515 : "=r" (__cbtmp) \
e6c5404d 516 : "r" ((USItype)(x))); \
e1a79915
RS
517 (count) = __cbtmp ^ 31; \
518 } while (0)
d83dd29a 519#if defined (__mc88110__)
f71c71f1
TG
520#define umul_ppmm(wh, wl, u, v) \
521 do { \
522 union {UDItype __ll; \
523 struct {USItype __h, __l;} __i; \
524 } __xx; \
525 __asm__ ("mulu.d %0,%1,%2" \
526 : "=r" (__xx.__ll) \
527 : "r" ((USItype)(u)), \
528 "r" ((USItype)(v))); \
529 (wh) = __xx.__i.__h; \
530 (wl) = __xx.__i.__l; \
531 } while (0)
d83dd29a 532#define udiv_qrnnd(q, r, n1, n0, d) \
f71c71f1
TG
533 ({union {UDItype __ll; \
534 struct {USItype __h, __l;} __i; \
535 } __xx; \
536 USItype __q; \
537 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
538 __asm__ ("divu.d %0,%1,%2" \
539 : "=r" (__q) \
540 : "r" (__xx.__ll), \
541 "r" ((USItype)(d))); \
542 (r) = (n0) - __q * (d); (q) = __q; })
543#define UMUL_TIME 5
544#define UDIV_TIME 25
545#else
546#define UMUL_TIME 17
547#define UDIV_TIME 150
548#endif /* __mc88110__ */
e1a79915
RS
549#endif /* __m88000__ */
550
551#if defined (__mips__)
d28e1517
TG
552#define umul_ppmm(w1, w0, u, v) \
553 __asm__ ("multu %2,%3
554 mflo %0
e1a79915 555 mfhi %1" \
e6c5404d
TG
556 : "=d" ((USItype)(w0)), \
557 "=d" ((USItype)(w1)) \
558 : "d" ((USItype)(u)), \
559 "d" ((USItype)(v)))
f71c71f1 560#define UMUL_TIME 10
e1a79915
RS
561#define UDIV_TIME 100
562#endif /* __mips__ */
563
564#if defined (__ns32000__)
f71c71f1
TG
565#define umul_ppmm(w1, w0, u, v) \
566 ({union {UDItype __ll; \
567 struct {USItype __l, __h;} __i; \
568 } __xx; \
569 __asm__ ("meid %2,%0" \
570 : "=g" (__xx.__ll) \
571 : "%0" ((USItype)(u)), \
572 "g" ((USItype)(v))); \
573 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
e1a79915 574#define __umulsidi3(u, v) \
e6c5404d 575 ({UDItype __w; \
d83dd29a
TG
576 __asm__ ("meid %2,%0" \
577 : "=g" (__w) \
e6c5404d
TG
578 : "%0" ((USItype)(u)), \
579 "g" ((USItype)(v))); \
d83dd29a 580 __w; })
f71c71f1
TG
581#define udiv_qrnnd(q, r, n1, n0, d) \
582 ({union {UDItype __ll; \
583 struct {USItype __l, __h;} __i; \
584 } __xx; \
585 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
586 __asm__ ("deid %2,%0" \
587 : "=g" (__xx.__ll) \
588 : "0" (__xx.__ll), \
589 "g" ((USItype)(d))); \
590 (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
e1a79915
RS
591#endif /* __ns32000__ */
592
325877e1 593#if (defined (_ARCH_PPC) || defined (_IBMR2)) && W_TYPE_SIZE == 32
f71c71f1
TG
594#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
595 do { \
596 if (__builtin_constant_p (bh) && (bh) == 0) \
597 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
598 : "=r" ((USItype)(sh)), \
599 "=&r" ((USItype)(sl)) \
600 : "%r" ((USItype)(ah)), \
601 "%r" ((USItype)(al)), \
602 "rI" ((USItype)(bl))); \
603 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
604 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
605 : "=r" ((USItype)(sh)), \
606 "=&r" ((USItype)(sl)) \
607 : "%r" ((USItype)(ah)), \
608 "%r" ((USItype)(al)), \
609 "rI" ((USItype)(bl))); \
610 else \
611 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
612 : "=r" ((USItype)(sh)), \
613 "=&r" ((USItype)(sl)) \
614 : "%r" ((USItype)(ah)), \
615 "r" ((USItype)(bh)), \
616 "%r" ((USItype)(al)), \
617 "rI" ((USItype)(bl))); \
618 } while (0)
619#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
620 do { \
621 if (__builtin_constant_p (ah) && (ah) == 0) \
325877e1 622 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
f71c71f1
TG
623 : "=r" ((USItype)(sh)), \
624 "=&r" ((USItype)(sl)) \
625 : "r" ((USItype)(bh)), \
626 "rI" ((USItype)(al)), \
627 "r" ((USItype)(bl))); \
628 else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \
325877e1 629 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
f71c71f1
TG
630 : "=r" ((USItype)(sh)), \
631 "=&r" ((USItype)(sl)) \
632 : "r" ((USItype)(bh)), \
633 "rI" ((USItype)(al)), \
634 "r" ((USItype)(bl))); \
635 else if (__builtin_constant_p (bh) && (bh) == 0) \
636 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
637 : "=r" ((USItype)(sh)), \
638 "=&r" ((USItype)(sl)) \
639 : "r" ((USItype)(ah)), \
640 "rI" ((USItype)(al)), \
641 "r" ((USItype)(bl))); \
642 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
643 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
644 : "=r" ((USItype)(sh)), \
645 "=&r" ((USItype)(sl)) \
646 : "r" ((USItype)(ah)), \
647 "rI" ((USItype)(al)), \
648 "r" ((USItype)(bl))); \
649 else \
650 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
651 : "=r" ((USItype)(sh)), \
652 "=&r" ((USItype)(sl)) \
653 : "r" ((USItype)(ah)), \
654 "r" ((USItype)(bh)), \
655 "rI" ((USItype)(al)), \
656 "r" ((USItype)(bl))); \
657 } while (0)
658#define count_leading_zeros(count, x) \
659 __asm__ ("{cntlz|cntlzw} %0,%1" \
660 : "=r" ((USItype)(count)) \
661 : "r" ((USItype)(x)))
325877e1 662#if defined (_ARCH_PPC)
f71c71f1
TG
663#define umul_ppmm(ph, pl, m0, m1) \
664 do { \
665 USItype __m0 = (m0), __m1 = (m1); \
666 __asm__ ("mulhwu %0,%1,%2" \
667 : "=r" ((USItype) ph) \
668 : "%r" (__m0), \
669 "r" (__m1)); \
670 (pl) = __m0 * __m1; \
671 } while (0)
672#define UMUL_TIME 15
673#define smul_ppmm(ph, pl, m0, m1) \
674 do { \
675 SItype __m0 = (m0), __m1 = (m1); \
676 __asm__ ("mulhw %0,%1,%2" \
677 : "=r" ((SItype) ph) \
678 : "%r" (__m0), \
679 "r" (__m1)); \
680 (pl) = __m0 * __m1; \
681 } while (0)
682#define SMUL_TIME 14
683#define UDIV_TIME 120
684#else
685#define umul_ppmm(xh, xl, m0, m1) \
686 do { \
687 USItype __m0 = (m0), __m1 = (m1); \
688 __asm__ ("mul %0,%2,%3" \
689 : "=r" ((USItype)(xh)), \
690 "=q" ((USItype)(xl)) \
691 : "r" (__m0), \
692 "r" (__m1)); \
693 (xh) += ((((SItype) __m0 >> 31) & __m1) \
694 + (((SItype) __m1 >> 31) & __m0)); \
695 } while (0)
696#define UMUL_TIME 8
697#define smul_ppmm(xh, xl, m0, m1) \
698 __asm__ ("mul %0,%2,%3" \
699 : "=r" ((SItype)(xh)), \
700 "=q" ((SItype)(xl)) \
701 : "r" (m0), \
702 "r" (m1))
703#define SMUL_TIME 4
704#define sdiv_qrnnd(q, r, nh, nl, d) \
705 __asm__ ("div %0,%2,%4" \
706 : "=r" ((SItype)(q)), "=q" ((SItype)(r)) \
707 : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d)))
708#define UDIV_TIME 100
709#endif
710#endif /* Power architecture variants. */
711
e1a79915
RS
712#if defined (__pyr__)
713#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
714 __asm__ ("addw %5,%1
715 addwc %3,%0" \
e6c5404d
TG
716 : "=r" ((USItype)(sh)), \
717 "=&r" ((USItype)(sl)) \
718 : "%0" ((USItype)(ah)), \
719 "g" ((USItype)(bh)), \
720 "%1" ((USItype)(al)), \
721 "g" ((USItype)(bl)))
e1a79915
RS
722#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
723 __asm__ ("subw %5,%1
724 subwb %3,%0" \
e6c5404d
TG
725 : "=r" ((USItype)(sh)), \
726 "=&r" ((USItype)(sl)) \
727 : "0" ((USItype)(ah)), \
728 "g" ((USItype)(bh)), \
729 "1" ((USItype)(al)), \
730 "g" ((USItype)(bl)))
e1a79915
RS
731/* This insn doesn't work on ancient pyramids. */
732#define umul_ppmm(w1, w0, u, v) \
e6c5404d
TG
733 ({union { \
734 UDItype __ll; \
735 struct {USItype __h, __l;} __i; \
736 } __xx; \
737 __xx.__i.__l = u; \
d83dd29a 738 __asm__ ("uemul %3,%0" \
e6c5404d
TG
739 : "=r" (__xx.__i.__h), \
740 "=r" (__xx.__i.__l) \
741 : "1" (__xx.__i.__l), \
f71c71f1 742 "g" ((USItype)(v))); \
e6c5404d
TG
743 (w1) = __xx.__i.__h; \
744 (w0) = __xx.__i.__l;})
e1a79915
RS
745#endif /* __pyr__ */
746
747#if defined (__ibm032__) /* RT/ROMP */
748#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
749 __asm__ ("a %1,%5
750 ae %0,%3" \
e6c5404d
TG
751 : "=r" ((USItype)(sh)), \
752 "=&r" ((USItype)(sl)) \
753 : "%0" ((USItype)(ah)), \
754 "r" ((USItype)(bh)), \
755 "%1" ((USItype)(al)), \
756 "r" ((USItype)(bl)))
e1a79915
RS
757#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
758 __asm__ ("s %1,%5
759 se %0,%3" \
e6c5404d
TG
760 : "=r" ((USItype)(sh)), \
761 "=&r" ((USItype)(sl)) \
762 : "0" ((USItype)(ah)), \
763 "r" ((USItype)(bh)), \
764 "1" ((USItype)(al)), \
765 "r" ((USItype)(bl)))
e1a79915
RS
766#define umul_ppmm(ph, pl, m0, m1) \
767 do { \
e6c5404d 768 USItype __m0 = (m0), __m1 = (m1); \
e1a79915
RS
769 __asm__ ( \
770 "s r2,r2
771 mts r10,%2
772 m r2,%3
773 m r2,%3
774 m r2,%3
775 m r2,%3
776 m r2,%3
777 m r2,%3
778 m r2,%3
779 m r2,%3
780 m r2,%3
781 m r2,%3
782 m r2,%3
783 m r2,%3
784 m r2,%3
785 m r2,%3
786 m r2,%3
787 m r2,%3
788 cas %0,r2,r0
789 mfs r10,%1" \
e6c5404d
TG
790 : "=r" ((USItype)(ph)), \
791 "=r" ((USItype)(pl)) \
d83dd29a
TG
792 : "%r" (__m0), \
793 "r" (__m1) \
794 : "r2"); \
e6c5404d
TG
795 (ph) += ((((SItype) __m0 >> 31) & __m1) \
796 + (((SItype) __m1 >> 31) & __m0)); \
e1a79915 797 } while (0)
d83dd29a
TG
798#define UMUL_TIME 20
799#define UDIV_TIME 200
e1a79915
RS
800#define count_leading_zeros(count, x) \
801 do { \
802 if ((x) >= 0x10000) \
803 __asm__ ("clz %0,%1" \
e6c5404d
TG
804 : "=r" ((USItype)(count)) \
805 : "r" ((USItype)(x) >> 16)); \
e1a79915
RS
806 else \
807 { \
808 __asm__ ("clz %0,%1" \
e6c5404d
TG
809 : "=r" ((USItype)(count)) \
810 : "r" ((USItype)(x))); \
e1a79915
RS
811 (count) += 16; \
812 } \
813 } while (0)
814#endif
815
816#if defined (__sparc__)
817#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
f71c71f1
TG
818 __asm__ ("addcc %r4,%5,%1
819 addx %r2,%3,%0" \
e6c5404d
TG
820 : "=r" ((USItype)(sh)), \
821 "=&r" ((USItype)(sl)) \
f71c71f1 822 : "%rJ" ((USItype)(ah)), \
e6c5404d 823 "rI" ((USItype)(bh)), \
f71c71f1 824 "%rJ" ((USItype)(al)), \
e6c5404d 825 "rI" ((USItype)(bl)) \
d83dd29a 826 __CLOBBER_CC)
e1a79915 827#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
f71c71f1
TG
828 __asm__ ("subcc %r4,%5,%1
829 subx %r2,%3,%0" \
e6c5404d
TG
830 : "=r" ((USItype)(sh)), \
831 "=&r" ((USItype)(sl)) \
f71c71f1 832 : "rJ" ((USItype)(ah)), \
e6c5404d 833 "rI" ((USItype)(bh)), \
f71c71f1 834 "rJ" ((USItype)(al)), \
e6c5404d 835 "rI" ((USItype)(bl)) \
d83dd29a 836 __CLOBBER_CC)
d9d3acb6 837#if defined (__sparc_v8__)
e1a79915
RS
838#define umul_ppmm(w1, w0, u, v) \
839 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
e6c5404d
TG
840 : "=r" ((USItype)(w1)), \
841 "=r" ((USItype)(w0)) \
842 : "r" ((USItype)(u)), \
843 "r" ((USItype)(v)))
e1a79915
RS
844#define udiv_qrnnd(q, r, n1, n0, d) \
845 __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
e6c5404d
TG
846 : "=&r" ((USItype)(q)), \
847 "=&r" ((USItype)(r)) \
848 : "r" ((USItype)(n1)), \
849 "r" ((USItype)(n0)), \
850 "r" ((USItype)(d)))
e1a79915 851#else
d9d3acb6
JW
852#if defined (__sparclite__)
853/* This has hardware multiply but not divide. It also has two additional
854 instructions scan (ffs from high bit) and divscc. */
855#define umul_ppmm(w1, w0, u, v) \
856 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
abe1fe6c
TG
857 : "=r" ((USItype)(w1)), \
858 "=r" ((USItype)(w0)) \
859 : "r" ((USItype)(u)), \
860 "r" ((USItype)(v)))
d9d3acb6
JW
861#define udiv_qrnnd(q, r, n1, n0, d) \
862 __asm__ ("! Inlined udiv_qrnnd
863 wr %%g0,%2,%%y ! Not a delayed write for sparclite
864 tst %%g0
865 divscc %3,%4,%%g1
866 divscc %%g1,%4,%%g1
867 divscc %%g1,%4,%%g1
868 divscc %%g1,%4,%%g1
869 divscc %%g1,%4,%%g1
870 divscc %%g1,%4,%%g1
871 divscc %%g1,%4,%%g1
872 divscc %%g1,%4,%%g1
873 divscc %%g1,%4,%%g1
874 divscc %%g1,%4,%%g1
875 divscc %%g1,%4,%%g1
876 divscc %%g1,%4,%%g1
877 divscc %%g1,%4,%%g1
878 divscc %%g1,%4,%%g1
879 divscc %%g1,%4,%%g1
880 divscc %%g1,%4,%%g1
881 divscc %%g1,%4,%%g1
882 divscc %%g1,%4,%%g1
883 divscc %%g1,%4,%%g1
884 divscc %%g1,%4,%%g1
885 divscc %%g1,%4,%%g1
886 divscc %%g1,%4,%%g1
887 divscc %%g1,%4,%%g1
888 divscc %%g1,%4,%%g1
889 divscc %%g1,%4,%%g1
890 divscc %%g1,%4,%%g1
891 divscc %%g1,%4,%%g1
892 divscc %%g1,%4,%%g1
893 divscc %%g1,%4,%%g1
894 divscc %%g1,%4,%%g1
895 divscc %%g1,%4,%%g1
896 divscc %%g1,%4,%0
897 rd %%y,%1
898 bl,a 1f
899 add %1,%4,%1
9001: ! End of inline udiv_qrnnd" \
abe1fe6c
TG
901 : "=r" ((USItype)(q)), \
902 "=r" ((USItype)(r)) \
903 : "r" ((USItype)(n1)), \
904 "r" ((USItype)(n0)), \
905 "rI" ((USItype)(d)) \
210d97af 906 : "%g1" __AND_CLOBBER_CC)
d9d3acb6
JW
907#define UDIV_TIME 37
908#define count_leading_zeros(count, x) \
909 __asm__ ("scan %1,0,%0" \
abe1fe6c
TG
910 : "=r" ((USItype)(x)) \
911 : "r" ((USItype)(count)))
d9d3acb6 912#else
e1a79915
RS
913/* SPARC without integer multiplication and divide instructions.
914 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
915#define umul_ppmm(w1, w0, u, v) \
916 __asm__ ("! Inlined umul_ppmm
917 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
918 sra %3,31,%%g2 ! Don't move this insn
919 and %2,%%g2,%%g2 ! Don't move this insn
920 andcc %%g0,0,%%g1 ! Don't move this insn
921 mulscc %%g1,%3,%%g1
922 mulscc %%g1,%3,%%g1
923 mulscc %%g1,%3,%%g1
924 mulscc %%g1,%3,%%g1
925 mulscc %%g1,%3,%%g1
926 mulscc %%g1,%3,%%g1
927 mulscc %%g1,%3,%%g1
928 mulscc %%g1,%3,%%g1
929 mulscc %%g1,%3,%%g1
930 mulscc %%g1,%3,%%g1
931 mulscc %%g1,%3,%%g1
932 mulscc %%g1,%3,%%g1
933 mulscc %%g1,%3,%%g1
934 mulscc %%g1,%3,%%g1
935 mulscc %%g1,%3,%%g1
936 mulscc %%g1,%3,%%g1
937 mulscc %%g1,%3,%%g1
938 mulscc %%g1,%3,%%g1
939 mulscc %%g1,%3,%%g1
940 mulscc %%g1,%3,%%g1
941 mulscc %%g1,%3,%%g1
942 mulscc %%g1,%3,%%g1
943 mulscc %%g1,%3,%%g1
944 mulscc %%g1,%3,%%g1
945 mulscc %%g1,%3,%%g1
946 mulscc %%g1,%3,%%g1
947 mulscc %%g1,%3,%%g1
948 mulscc %%g1,%3,%%g1
949 mulscc %%g1,%3,%%g1
950 mulscc %%g1,%3,%%g1
951 mulscc %%g1,%3,%%g1
952 mulscc %%g1,%3,%%g1
953 mulscc %%g1,0,%%g1
954 add %%g1,%%g2,%0
955 rd %%y,%1" \
e6c5404d
TG
956 : "=r" ((USItype)(w1)), \
957 "=r" ((USItype)(w0)) \
958 : "%rI" ((USItype)(u)), \
959 "r" ((USItype)(v)) \
d83dd29a 960 : "%g1", "%g2" __AND_CLOBBER_CC)
e1a79915
RS
961#define UMUL_TIME 39 /* 39 instructions */
962/* It's quite necessary to add this much assembler for the sparc.
963 The default udiv_qrnnd (in C) is more than 10 times slower! */
964#define udiv_qrnnd(q, r, n1, n0, d) \
965 __asm__ ("! Inlined udiv_qrnnd
966 mov 32,%%g1
967 subcc %1,%2,%%g0
9681: bcs 5f
969 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
970 sub %1,%2,%1 ! this kills msb of n
971 addx %1,%1,%1 ! so this can't give carry
972 subcc %%g1,1,%%g1
9732: bne 1b
974 subcc %1,%2,%%g0
975 bcs 3f
976 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
977 b 3f
978 sub %1,%2,%1 ! this kills msb of n
9794: sub %1,%2,%1
9805: addxcc %1,%1,%1
981 bcc 2b
982 subcc %%g1,1,%%g1
983! Got carry from n. Subtract next step to cancel this carry.
984 bne 4b
985 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
986 sub %1,%2,%1
9873: xnor %0,0,%0
988 ! End of inline udiv_qrnnd" \
e6c5404d
TG
989 : "=&r" ((USItype)(q)), \
990 "=&r" ((USItype)(r)) \
991 : "r" ((USItype)(d)), \
992 "1" ((USItype)(n1)), \
993 "0" ((USItype)(n0)) : "%g1" __AND_CLOBBER_CC)
e1a79915 994#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
d9d3acb6
JW
995#endif /* __sparclite__ */
996#endif /* __sparc_v8__ */
d83dd29a 997#endif /* __sparc__ */
e1a79915
RS
998
999#if defined (__vax__)
1000#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1001 __asm__ ("addl2 %5,%1
1002 adwc %3,%0" \
e6c5404d
TG
1003 : "=g" ((USItype)(sh)), \
1004 "=&g" ((USItype)(sl)) \
1005 : "%0" ((USItype)(ah)), \
1006 "g" ((USItype)(bh)), \
1007 "%1" ((USItype)(al)), \
1008 "g" ((USItype)(bl)))
e1a79915
RS
1009#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1010 __asm__ ("subl2 %5,%1
1011 sbwc %3,%0" \
e6c5404d
TG
1012 : "=g" ((USItype)(sh)), \
1013 "=&g" ((USItype)(sl)) \
1014 : "0" ((USItype)(ah)), \
1015 "g" ((USItype)(bh)), \
1016 "1" ((USItype)(al)), \
1017 "g" ((USItype)(bl)))
e1a79915
RS
1018#define umul_ppmm(xh, xl, m0, m1) \
1019 do { \
e6c5404d
TG
1020 union { \
1021 UDItype __ll; \
1022 struct {USItype __l, __h;} __i; \
1023 } __xx; \
1024 USItype __m0 = (m0), __m1 = (m1); \
e1a79915 1025 __asm__ ("emul %1,%2,$0,%0" \
e6c5404d 1026 : "=r" (__xx.__ll) \
d83dd29a
TG
1027 : "g" (__m0), \
1028 "g" (__m1)); \
e6c5404d
TG
1029 (xh) = __xx.__i.__h; \
1030 (xl) = __xx.__i.__l; \
1031 (xh) += ((((SItype) __m0 >> 31) & __m1) \
1032 + (((SItype) __m1 >> 31) & __m0)); \
e1a79915 1033 } while (0)
f71c71f1
TG
1034#define sdiv_qrnnd(q, r, n1, n0, d) \
1035 do { \
1036 union {DItype __ll; \
1037 struct {SItype __l, __h;} __i; \
1038 } __xx; \
1039 __xx.__i.__h = n1; __xx.__i.__l = n0; \
1040 __asm__ ("ediv %3,%2,%0,%1" \
1041 : "=g" (q), "=g" (r) \
1042 : "g" (__n1n0.ll), "g" (d)); \
1043 } while (0)
e1a79915
RS
1044#endif /* __vax__ */
1045
1046#endif /* __GNUC__ */
1047
1048/* If this machine has no inline assembler, use C macros. */
1049
1050#if !defined (add_ssaaaa)
1051#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1052 do { \
e6c5404d 1053 USItype __x; \
e1a79915
RS
1054 __x = (al) + (bl); \
1055 (sh) = (ah) + (bh) + (__x < (al)); \
1056 (sl) = __x; \
1057 } while (0)
1058#endif
1059
1060#if !defined (sub_ddmmss)
1061#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1062 do { \
e6c5404d 1063 USItype __x; \
e1a79915
RS
1064 __x = (al) - (bl); \
1065 (sh) = (ah) - (bh) - (__x > (al)); \
1066 (sl) = __x; \
1067 } while (0)
1068#endif
1069
1070#if !defined (umul_ppmm)
1071#define umul_ppmm(w1, w0, u, v) \
1072 do { \
e6c5404d
TG
1073 USItype __x0, __x1, __x2, __x3; \
1074 USItype __ul, __vl, __uh, __vh; \
e1a79915
RS
1075 \
1076 __ul = __ll_lowpart (u); \
1077 __uh = __ll_highpart (u); \
1078 __vl = __ll_lowpart (v); \
1079 __vh = __ll_highpart (v); \
1080 \
e6c5404d
TG
1081 __x0 = (USItype) __ul * __vl; \
1082 __x1 = (USItype) __ul * __vh; \
1083 __x2 = (USItype) __uh * __vl; \
1084 __x3 = (USItype) __uh * __vh; \
e1a79915
RS
1085 \
1086 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
1087 __x1 += __x2; /* but this indeed can */ \
1088 if (__x1 < __x2) /* did we get it? */ \
1089 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
1090 \
1091 (w1) = __x3 + __ll_highpart (__x1); \
1092 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
1093 } while (0)
1094#endif
1095
1096#if !defined (__umulsidi3)
1097#define __umulsidi3(u, v) \
e6c5404d 1098 ({DIunion __w; \
e1a79915
RS
1099 umul_ppmm (__w.s.high, __w.s.low, u, v); \
1100 __w.ll; })
1101#endif
1102
d83dd29a
TG
1103/* Define this unconditionally, so it can be used for debugging. */
1104#define __udiv_qrnnd_c(q, r, n1, n0, d) \
e1a79915 1105 do { \
e6c5404d
TG
1106 USItype __d1, __d0, __q1, __q0; \
1107 USItype __r1, __r0, __m; \
e1a79915
RS
1108 __d1 = __ll_highpart (d); \
1109 __d0 = __ll_lowpart (d); \
1110 \
1111 __r1 = (n1) % __d1; \
1112 __q1 = (n1) / __d1; \
e6c5404d 1113 __m = (USItype) __q1 * __d0; \
e1a79915
RS
1114 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
1115 if (__r1 < __m) \
1116 { \
1117 __q1--, __r1 += (d); \
1118 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
1119 if (__r1 < __m) \
1120 __q1--, __r1 += (d); \
1121 } \
1122 __r1 -= __m; \
1123 \
1124 __r0 = __r1 % __d1; \
1125 __q0 = __r1 / __d1; \
e6c5404d 1126 __m = (USItype) __q0 * __d0; \
e1a79915
RS
1127 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
1128 if (__r0 < __m) \
1129 { \
1130 __q0--, __r0 += (d); \
1131 if (__r0 >= (d)) \
1132 if (__r0 < __m) \
1133 __q0--, __r0 += (d); \
1134 } \
1135 __r0 -= __m; \
1136 \
e6c5404d 1137 (q) = (USItype) __q1 * __ll_B | __q0; \
e1a79915
RS
1138 (r) = __r0; \
1139 } while (0)
1814cfd9
TG
1140
1141/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
9077d7fa 1142 __udiv_w_sdiv (defined in libgcc or elsewhere). */
1814cfd9
TG
1143#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
1144#define udiv_qrnnd(q, r, nh, nl, d) \
1145 do { \
1146 USItype __r; \
9077d7fa 1147 (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
1814cfd9
TG
1148 (r) = __r; \
1149 } while (0)
1150#endif
1151
d83dd29a
TG
1152/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
1153#if !defined (udiv_qrnnd)
1154#define UDIV_NEEDS_NORMALIZATION 1
1155#define udiv_qrnnd __udiv_qrnnd_c
e1a79915
RS
1156#endif
1157
1158#if !defined (count_leading_zeros)
e6c5404d 1159extern const UQItype __clz_tab[];
e1a79915
RS
1160#define count_leading_zeros(count, x) \
1161 do { \
e6c5404d
TG
1162 USItype __xr = (x); \
1163 USItype __a; \
e1a79915 1164 \
cf7d8fd0 1165 if (SI_TYPE_SIZE <= 32) \
d83dd29a
TG
1166 { \
1167 __a = __xr < (1<<2*__BITS4) \
1168 ? (__xr < (1<<__BITS4) ? 0 : __BITS4) \
1169 : (__xr < (1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
1170 } \
1171 else \
1172 { \
cf7d8fd0 1173 for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \
d83dd29a
TG
1174 if (((__xr >> __a) & 0xff) != 0) \
1175 break; \
1176 } \
e1a79915 1177 \
cf7d8fd0 1178 (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
e1a79915
RS
1179 } while (0)
1180#endif
1181
1182#ifndef UDIV_NEEDS_NORMALIZATION
1183#define UDIV_NEEDS_NORMALIZATION 0
1184#endif
This page took 0.349093 seconds and 5 git commands to generate.