]> gcc.gnu.org Git - gcc.git/blame - gcc/longlong.h
*** empty log message ***
[gcc.git] / gcc / longlong.h
CommitLineData
e1a79915
RS
1/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2 Copyright (C) 1991 Free Software Foundation, Inc.
3
4 This definition file is free software; you can redistribute it
5 and/or modify it under the terms of the GNU General Public
6 License as published by the Free Software Foundation; either
7 version 2, or (at your option) any later version.
8
9 This definition file is distributed in the hope that it will be
10 useful, but WITHOUT ANY WARRANTY; without even the implied
11 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 See the GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
17
18#ifndef LONG_TYPE_SIZE
19#define LONG_TYPE_SIZE 32
20#endif
21
22#define __BITS4 (LONG_TYPE_SIZE / 4)
23#define __ll_B (1L << (LONG_TYPE_SIZE / 2))
24#define __ll_lowpart(t) ((unsigned long int) (t) % __ll_B)
25#define __ll_highpart(t) ((unsigned long int) (t) / __ll_B)
26
2e8a6600 27/* Define auxiliary asm macros.
e1a79915
RS
28
29 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
30 multiplies two unsigned long integers MULTIPLER and MULTIPLICAND,
31 and generates a two unsigned word product in HIGH_PROD and
32 LOW_PROD.
33
34 2) __umulsidi3(a,b) multiplies two unsigned long integers A and B,
35 and returns a long long product. This is just a variant of umul_ppmm.
36
37 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
38 denominator) divides a two-word unsigned integer, composed by the
39 integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
40 places the quotient in QUOTIENT and the remainder in REMAINDER.
41 HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
42 If, in addition, the most significant bit of DENOMINATOR must be 1,
43 then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
44
45 4) count_leading_zeros(count, x) counts the number of zero-bits from
46 the msb to the first non-zero bit. This is the number of steps X
47 needs to be shifted left to set the msb. Undefined for X == 0.
48
49 5) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
50 high_addend_2, low_addend_2) adds two two-word unsigned integers,
51 composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
52 LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
53 LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
54 lost.
55
56 6) sub_ddmmss(high_difference, low_difference, high_minuend,
57 low_minuend, high_subtrahend, low_subtrahend) subtracts two
58 two-word unsigned integers, composed by HIGH_MINUEND_1 and
59 LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
60 respectively. The result is placed in HIGH_DIFFERENCE and
61 LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
62 and is lost.
63
64 If any of these macros are left undefined for a particular CPU,
65 C macros are used. */
66
67/* The CPUs come in alphabetical order below.
68
69 Please add support for more CPUs here, or improve the current support
70 for the CPUs below!
71 (E.g. WE32100, HP-PA (xmpyu?), i960, IBM360, TRON.) */
72
73#if defined (__GNUC__) && !defined (NO_ASM)
74
75#if defined (__a29k__) || defined (___AM29K__)
76#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
77 __asm__ ("add %1,%4,%5
78 addc %0,%2,%3" \
79 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl)) \
80 : "%r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)), \
81 "%r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl)))
82#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
83 __asm__ ("sub %1,%4,%5
84 subc %0,%2,%3" \
85 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl)) \
86 : "r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)), \
87 "r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl)))
88#define umul_ppmm(xh, xl, m0, m1) \
89 do { \
90 unsigned long int __m0 = (m0), __m1 = (m1); \
91 __asm__ ("multiplu %0,%1,%2" : "=r" ((unsigned long int)(xl)) \
92 : "r" (__m0), "r" (__m1)); \
93 __asm__ ("multmu %0,%1,%2" : "=r" ((unsigned long int)(xh)) \
94 : "r" (__m0), "r" (__m1)); \
95 } while (0)
96#define udiv_qrnnd(q, r, n1, n0, d) \
97 __asm__ ("dividu %0,%3,%4" \
98 : "=r" ((unsigned long int)(q)), "=q" ((unsigned long int)(r)) \
99 : "1" ((unsigned long int)(n1)), "r" ((unsigned long int)(n0)), \
100 "r" ((unsigned long int)(d)))
101#define count_leading_zeros(count, x) \
102 __asm__ ("clz %0,%1" : "=r" ((unsigned long int)(count)) \
103 : "r" ((unsigned long int)(x)))
104#endif /* __a29k__ */
105
106#if defined (__arm__)
107#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
108 __asm__ ("adds %1,%4,%5
109 adc %0,%2,%3" \
110 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl)) \
111 : "%r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)), \
112 "%r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl)))
113#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
114 __asm__ ("subs %1,%4,%5
115 sbc %0,%2,%3" \
116 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl)) \
117 : "r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)), \
118 "r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl)))
119#endif /* __arm__ */
120
121#if defined (__gmicro__)
122#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
123 __asm__ ("add.w %5,%1
124 addx %3,%0" \
125 : "=g" ((unsigned long int)(sh)), "=&g" ((unsigned long int)(sl))\
126 : "%0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \
127 "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
128#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
129 __asm__ ("sub.w %5,%1
130 subx %3,%0" \
131 : "=g" ((unsigned long int)(sh)), "=&g" ((unsigned long int)(sl))\
132 : "0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \
133 "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
134#define umul_ppmm(ph, pl, m0, m1) \
135 __asm__ ("mulx %3,%0,%1" \
136 : "=g" ((unsigned long int)(ph)), "=r" ((unsigned long int)(pl))\
137 : "%0" ((unsigned long int)(m0)), "g" ((unsigned long int)(m1)))
138#define udiv_qrnnd(q, r, nh, nl, d) \
139 __asm__ ("divx %4,%0,%1" \
140 : "=g" ((unsigned long int)(q)), "=r" ((unsigned long int)(r)) \
141 : "1" ((unsigned long int)(nh)), "0" ((unsigned long int)(nl)), \
142 "g" ((unsigned long int)(d)))
143#define count_leading_zeros(count, x) \
144 __asm__ ("bsch/1 %1,%0" \
145 : "=g" (count) \
146 : "g" ((unsigned long int)(x)), "0" (0UL))
147#endif
148
149#if defined (__hppa)
150#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
151 __asm__ ("add %4,%5,%1
152 addc %2,%3,%0" \
153 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
154 : "%r" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)),\
155 "%r" ((unsigned long int)(al)), "r" ((unsigned long int)(bl)))
156#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
157 __asm__ ("sub %5,%4,%1
158 subb %3,%2,%0" \
159 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
160 : "r" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)), \
161 "r" ((unsigned long int)(al)), "r" ((unsigned long int)(bl)))
162#endif
163
164#if defined (__i386__) || defined (__i486__)
165#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
166 __asm__ ("addl %5,%1
167 adcl %3,%0" \
168 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
169 : "%0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \
170 "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
171#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
172 __asm__ ("subl %5,%1
173 sbbl %3,%0" \
174 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
175 : "0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \
176 "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
177#define umul_ppmm(w1, w0, u, v) \
178 __asm__ ("mull %3" \
179 : "=a" ((unsigned long int)(w0)), "=d" ((unsigned long int)(w1))\
180 : "%0" ((unsigned long int)(u)), "rm" ((unsigned long int)(v)))
181#define udiv_qrnnd(q, r, n1, n0, d) \
182 __asm__ ("divl %4" \
183 : "=a" ((unsigned long int)(q)), "=d" ((unsigned long int)(r)) \
184 : "0" ((unsigned long int)(n0)), "1" ((unsigned long int)(n1)), \
185 "rm" ((unsigned long int)(d)))
186#define count_leading_zeros(count, x) \
187 do { \
188 unsigned long int __cbtmp; \
189 __asm__ ("bsrl %1,%0" \
190 : "=r" (__cbtmp) : "rm" ((unsigned long int)(x))); \
191 (count) = __cbtmp ^ 31; \
192 } while (0)
193#endif /* 80x86 */
194
195#if defined (__i860__)
196#if 0
197/* Make sure these patterns really improve the code before
198 switching them on. */
199#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
200 do { \
201 union \
202 { \
203 long long int ll; \
204 struct {unsigned long int l, h;} i; \
205 } __a, __b, __s; \
206 __a.i.l = (al); __a.i.h = (ah); \
207 __b.i.l = (bl); __b.i.h = (bh); \
208 __asm__ ("fiadd.dd %1,%2,%0" \
209 : "=f" (__s.ll) \
210 : "%f" (__a.ll), "f" (__b.ll)); \
211 (sh) = __s.i.h; (sl) = __s.i.l; \
212 } while (0)
213#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
214 do { \
215 union \
216 { \
217 long long int ll; \
218 struct {unsigned long int l, h;} i; \
219 } __a, __b, __s; \
220 __a.i.l = (al); __a.i.h = (ah); \
221 __b.i.l = (bl); __b.i.h = (bh); \
222 __asm__ ("fisub.dd %1,%2,%0" \
223 : "=f" (__s.ll) \
224 : "%f" (__a.ll), "f" (__b.ll)); \
225 (sh) = __s.i.h; (sl) = __s.i.l; \
226 } while (0)
227#endif
228#endif /* __i860__ */
229
230#if defined (___IBMR2__) /* IBM RS6000 */
231#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
232 __asm__ ("a %1,%4,%5
233 ae %0,%2,%3" \
234 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
235 : "%r" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)),\
236 "%r" ((unsigned long int)(al)), "r" ((unsigned long int)(bl)))
237#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
238 __asm__ ("sf %1,%5,%4
239 sfe %0,%3,%2" \
240 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
241 : "r" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)), \
242 "r" ((unsigned long int)(al)), "r" ((unsigned long int)(bl)))
243#define umul_ppmm(xh, xl, m0, m1) \
244 do { \
245 unsigned long int __m0 = (m0), __m1 = (m1); \
246 __asm__ ("mul %0,%2,%3" \
247 : "=r" ((unsigned long int)(xh)), "=q" ((unsigned long int)(xl))\
248 : "r" (__m0), "r" (__m1)); \
249 (xh) += ((((signed long int) __m0 >> 31) & __m1) \
250 + (((signed long int) __m1 >> 31) & __m0)); \
251 } while (0)
252#define UMUL_TIME 8
2e8a6600 253#if 0 /* Overflow for nh>=FFFFFFFE, d==FFFFFFFF. Find a good fix later. */
e1a79915
RS
254#define udiv_qrnnd(q, r, nh, nl, d) \
255 do { /* Use the signed "div" insn, and adjust the result. */ \
256 unsigned long int __q, __r, __nh, __nl, __d, __xh, __xl; \
257 __nh = 0; \
258 __nl = (((unsigned long int)(nh)) << 30) | ((unsigned long int)(nl) >> 2);\
259 __nh = (unsigned long int)(nh) >> 2; \
260 __d = ((unsigned long int)(d) >> 1); \
261 __asm__ ("div %0,%2,%4" \
262 : "=r" (__q), "=q" (__r) \
263 : "r" (__nh), "1" (__nl), "r" (__d)); \
264 __q <<= 1; \
265 __asm__ ("mul %0,%2,%3" \
266 : "=r" (__xh), "=q" (__xl) \
267 : "r" (__q), "r" ((unsigned long int)(d))); \
268 __xh += (((signed long int) __q >> 31) & (d)) + __q; \
269 if ((nh) < __xh || ((nh) == __xh && (nl) < __xl)) \
270 { \
271 do \
272 { \
273 sub_ddmmss (__xh, __xl, __xh, __xl, 0, (d)); \
274 __q--; \
275 } \
276 while ((nh) < __xh || ((nh) == __xh && (nl) < __xl)); \
277 __xl = (nl) - __xl; \
278 } \
279 else \
280 { \
281 sub_ddmmss (__xh, __xl, (nh), (nl), __xh, __xl); \
282 if (__xh != 0) \
283 { \
284 do \
285 { \
286 sub_ddmmss (__xh, __xl, __xh, __xl, 0, (d)); \
287 __q++; \
288 } \
289 while (__xh != 0); \
290 } \
291 if (__xl >= (d)) \
292 { \
293 __xl -= (d); \
294 __q++; \
295 } \
296 } \
297 (q) = __q; \
298 (r) = __xl; \
299 } while (0)
300#define UDIV_TIME 40
301#define UDIV_NEEDS_NORMALIZATION 1
2e8a6600 302#endif
e1a79915
RS
303#define count_leading_zeros(count, x) \
304 __asm__ ("cntlz %0,%1" \
305 : "=r" ((unsigned long int)(count)) : "r" ((unsigned long int)(x)))
306#endif /* ___IBMR2__ */
307
308#if defined (__mc68000__)
309#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
310 __asm__ ("add%.l %5,%1
311 addx%.l %3,%0" \
312 : "=d" ((unsigned long int)(sh)), "=&d" ((unsigned long int)(sl))\
313 : "%0" ((unsigned long int)(ah)), "d" ((unsigned long int)(bh)), \
314 "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
315#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
316 __asm__ ("sub%.l %5,%1
317 subx%.l %3,%0" \
318 : "=d" ((unsigned long int)(sh)), "=&d" ((unsigned long int)(sl))\
319 : "0" ((unsigned long int)(ah)), "d" ((unsigned long int)(bh)), \
320 "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
321#if defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)
322#define umul_ppmm(w1, w0, u, v) \
323 __asm__ ("mulu%.l %3,%1:%0" \
324 : "=d" ((unsigned long int)(w0)), "=d" ((unsigned long int)(w1))\
325 : "%0" ((unsigned long int)(u)), "dmi" ((unsigned long int)(v)))
326#define udiv_qrnnd(q, r, n1, n0, d) \
327 __asm__ ("divu%.l %4,%1:%0" \
328 : "=d" ((unsigned long int)(q)), "=d" ((unsigned long int)(r)) \
329 : "0" ((unsigned long int)(n0)), "1" ((unsigned long int)(n1)), \
330 "dmi" ((unsigned long int)(d)))
331#define count_leading_zeros(count, x) \
332 __asm__ ("bfffo %1{%b2:%b2},%0" \
333 : "=d" ((unsigned long int)(count)) \
334 : "od" ((unsigned long int)(x)), "n" (0))
335#else /* not mc68020 */
336#define umul_ppmm(xh, xl, a, b) \
337 __asm__ ("| Inlined umul_ppmm
338 movel %2,d0
339 movel %3,d1
340 movel d0,d2
341 swap d0
342 movel d1,d3
343 swap d1
344 movew d2,d4
345 mulu d3,d4
346 mulu d1,d2
347 mulu d0,d3
348 mulu d0,d1
349 movel d4,d0
350 eorw d0,d0
351 swap d0
352 addl d0,d2
353 addl d3,d2
354 jcc 1f
355 addl #65536,d1
3561: swap d2
357 moveq #0,d0
358 movew d2,d0
359 movew d4,d2
360 movel d2,%1
361 addl d1,d0
362 movel d0,%0" \
363 : "=g" ((unsigned long int)(xh)), "=g" ((unsigned long int)(xl)) \
364 :"g" ((unsigned long int)(a)), "g" ((unsigned long int)(b)) \
365 : "d0", "d1", "d2", "d3", "d4")
366#endif /* not mc68020 */
367#endif /* mc68000 */
368
369#if defined (__m88000__)
370#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
371 __asm__ ("addu.co %1,%r4,%r5
372 addu.ci %0,%r2,%r3" \
373 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
374 : "%rJ" ((unsigned long int)(ah)), "rJ" ((unsigned long int)(bh)),\
375 "%rJ" ((unsigned long int)(al)), "rJ" ((unsigned long int)(bl)))
376#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
377 __asm__ ("subu.co %1,%r4,%r5
378 subu.ci %0,%r2,%r3" \
379 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
380 : "rJ" ((unsigned long int)(ah)), "rJ" ((unsigned long int)(bh)),\
381 "rJ" ((unsigned long int)(al)), "rJ" ((unsigned long int)(bl)))
382#define UMUL_TIME 17
383#define UDIV_TIME 150
384#define count_leading_zeros(count, x) \
385 do { \
386 unsigned long int __cbtmp; \
387 __asm__ ("ff1 %0,%1" \
388 : "=r" (__cbtmp) : "r" ((unsigned long int)(x))); \
389 (count) = __cbtmp ^ 31; \
390 } while (0)
391#endif /* __m88000__ */
392
393#if defined (__mips__)
394#define umul_ppmm(w1, w0, u, v) \
395 __asm__ ("multu %2,%3
396 mflo %0
397 mfhi %1" \
398 : "=r" ((unsigned long int)(w0)), "=r" ((unsigned long int)(w1))\
399 : "r" ((unsigned long int)(u)), "r" ((unsigned long int)(v)))
400#define UMUL_TIME 5
401#define UDIV_TIME 100
402#endif /* __mips__ */
403
404#if defined (__ns32000__)
405#define __umulsidi3(u, v) \
406 ({long long int __w; \
407 __asm__ ("meid %2,%0" : "=g" (__w) \
408 : "%0" ((unsigned long int)(u)), "g" ((unsigned long int)(v))); \
409 __w; })
410#define div_qrnnd(q, r, n1, n0, d) \
411 __asm__ ("movd %2,r0
412 movd %3,r1
413 deid %4,r0
414 movd r1,%0
415 movd r0,%1" \
416 : "=g" ((unsigned long int)(q)), "=g" ((unsigned long int)(r)) \
417 : "g" ((unsigned long int)(n0)), "g" ((unsigned long int)(n1)), \
418 "g" ((unsigned long int)(d)) : "r0", "r1")
419#endif /* __ns32000__ */
420
421#if defined (__pyr__)
422#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
423 __asm__ ("addw %5,%1
424 addwc %3,%0" \
425 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
426 : "%0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)),\
427 "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
428#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
429 __asm__ ("subw %5,%1
430 subwb %3,%0" \
431 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
432 : "0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \
433 "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
434/* This insn doesn't work on ancient pyramids. */
435#define umul_ppmm(w1, w0, u, v) \
436 __asm__ ("movw %2,tr11
437 uemul %3,tr10
438 movw tr10,%0
439 movw tr11,%1" \
440 : "=r" ((unsigned long int)(w1)), "=r" ((unsigned long int)(w0))\
441 : "r" ((unsigned long int)(u)), "r" ((unsigned long int)(v)) \
442 : "tr10", "tr11")
443#endif /* __pyr__ */
444
445#if defined (__ibm032__) /* RT/ROMP */
446#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
447 __asm__ ("a %1,%5
448 ae %0,%3" \
449 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
450 : "%0" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)),\
451 "%1" ((unsigned long int)(al)), "r" ((unsigned long int)(bl)))
452#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
453 __asm__ ("s %1,%5
454 se %0,%3" \
455 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
456 : "0" ((unsigned long int)(ah)), "r" ((unsigned long int)(bh)), \
457 "1" ((unsigned long int)(al)), "r" ((unsigned long int)(bl)))
458#define umul_ppmm(ph, pl, m0, m1) \
459 do { \
460 unsigned long int __m0 = (m0), __m1 = (m1); \
461 __asm__ ( \
462 "s r2,r2
463 mts r10,%2
464 m r2,%3
465 m r2,%3
466 m r2,%3
467 m r2,%3
468 m r2,%3
469 m r2,%3
470 m r2,%3
471 m r2,%3
472 m r2,%3
473 m r2,%3
474 m r2,%3
475 m r2,%3
476 m r2,%3
477 m r2,%3
478 m r2,%3
479 m r2,%3
480 cas %0,r2,r0
481 mfs r10,%1" \
482 : "=r" ((unsigned long int)(ph)), "=r" ((unsigned long int)(pl)) \
483 : "%r" (__m0), "r" (__m1) \
484 : "r2"); \
485 (ph) += ((((signed long int) __m0 >> 31) & __m1) \
486 + (((signed long int) __m1 >> 31) & __m0)); \
487 } while (0)
488#define count_leading_zeros(count, x) \
489 do { \
490 if ((x) >= 0x10000) \
491 __asm__ ("clz %0,%1" \
492 : "=r" ((unsigned long int)(count)) \
493 : "r" ((unsigned long int)(x) >> 16)); \
494 else \
495 { \
496 __asm__ ("clz %0,%1" \
497 : "=r" ((unsigned long int)(count)) \
498 : "r" ((unsigned long int)(x))); \
499 (count) += 16; \
500 } \
501 } while (0)
502#endif
503
504#if defined (__sparc__)
505#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
506 __asm__ ("addcc %4,%5,%1
507 addx %2,%3,%0" \
508 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
509 : "%r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)),\
510 "%r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl)))
511#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
512 __asm__ ("subcc %4,%5,%1
513 subx %2,%3,%0" \
514 : "=r" ((unsigned long int)(sh)), "=&r" ((unsigned long int)(sl))\
515 : "r" ((unsigned long int)(ah)), "rI" ((unsigned long int)(bh)),\
516 "r" ((unsigned long int)(al)), "rI" ((unsigned long int)(bl)))
517#if defined (__sparc8__) /* How do we recog. version 8 SPARC? */
518#define umul_ppmm(w1, w0, u, v) \
519 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
520 : "=r" ((unsigned long int)(w1)), "=r" ((unsigned long int)(w0))\
521 : "r" ((unsigned long int)(u)), "r" ((unsigned long int)(v)))
522#define udiv_qrnnd(q, r, n1, n0, d) \
523 __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
524 : "=&r" ((unsigned long int)(q)), "=&r" ((unsigned long int)(r))\
525 : "r" ((unsigned long int)(n1)), "r" ((unsigned long int)(n0)), \
526 "r" ((unsigned long int)(d)))
527#else
528/* SPARC without integer multiplication and divide instructions.
529 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
530#define umul_ppmm(w1, w0, u, v) \
531 __asm__ ("! Inlined umul_ppmm
532 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
533 sra %3,31,%%g2 ! Don't move this insn
534 and %2,%%g2,%%g2 ! Don't move this insn
535 andcc %%g0,0,%%g1 ! Don't move this insn
536 mulscc %%g1,%3,%%g1
537 mulscc %%g1,%3,%%g1
538 mulscc %%g1,%3,%%g1
539 mulscc %%g1,%3,%%g1
540 mulscc %%g1,%3,%%g1
541 mulscc %%g1,%3,%%g1
542 mulscc %%g1,%3,%%g1
543 mulscc %%g1,%3,%%g1
544 mulscc %%g1,%3,%%g1
545 mulscc %%g1,%3,%%g1
546 mulscc %%g1,%3,%%g1
547 mulscc %%g1,%3,%%g1
548 mulscc %%g1,%3,%%g1
549 mulscc %%g1,%3,%%g1
550 mulscc %%g1,%3,%%g1
551 mulscc %%g1,%3,%%g1
552 mulscc %%g1,%3,%%g1
553 mulscc %%g1,%3,%%g1
554 mulscc %%g1,%3,%%g1
555 mulscc %%g1,%3,%%g1
556 mulscc %%g1,%3,%%g1
557 mulscc %%g1,%3,%%g1
558 mulscc %%g1,%3,%%g1
559 mulscc %%g1,%3,%%g1
560 mulscc %%g1,%3,%%g1
561 mulscc %%g1,%3,%%g1
562 mulscc %%g1,%3,%%g1
563 mulscc %%g1,%3,%%g1
564 mulscc %%g1,%3,%%g1
565 mulscc %%g1,%3,%%g1
566 mulscc %%g1,%3,%%g1
567 mulscc %%g1,%3,%%g1
568 mulscc %%g1,0,%%g1
569 add %%g1,%%g2,%0
570 rd %%y,%1" \
571 : "=r" ((unsigned long int)(w1)), "=r" ((unsigned long int)(w0))\
572 : "%rI" ((unsigned long int)(u)), "r" ((unsigned long int)(v)) \
573 : "%g1", "%g2")
574#define UMUL_TIME 39 /* 39 instructions */
575/* It's quite necessary to add this much assembler for the sparc.
576 The default udiv_qrnnd (in C) is more than 10 times slower! */
577#define udiv_qrnnd(q, r, n1, n0, d) \
578 __asm__ ("! Inlined udiv_qrnnd
579 mov 32,%%g1
580 subcc %1,%2,%%g0
5811: bcs 5f
582 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
583 sub %1,%2,%1 ! this kills msb of n
584 addx %1,%1,%1 ! so this can't give carry
585 subcc %%g1,1,%%g1
5862: bne 1b
587 subcc %1,%2,%%g0
588 bcs 3f
589 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
590 b 3f
591 sub %1,%2,%1 ! this kills msb of n
5924: sub %1,%2,%1
5935: addxcc %1,%1,%1
594 bcc 2b
595 subcc %%g1,1,%%g1
596! Got carry from n. Subtract next step to cancel this carry.
597 bne 4b
598 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
599 sub %1,%2,%1
6003: xnor %0,0,%0
601 ! End of inline udiv_qrnnd" \
602 : "=r&" ((unsigned long int)(q)), "=r&" ((unsigned long int)(r))\
603 : "r" ((unsigned long int)(d)), "1" ((unsigned long int)(n1)), \
604 "0" ((unsigned long int)(n0)) : "%g1")
605#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
606#endif
607#endif /* __sparc8__ */
608
609#if defined (__vax__)
610#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
611 __asm__ ("addl2 %5,%1
612 adwc %3,%0" \
613 : "=g" ((unsigned long int)(sh)), "=&g" ((unsigned long int)(sl))\
614 : "%0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)),\
615 "%1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
616#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
617 __asm__ ("subl2 %5,%1
618 sbwc %3,%0" \
619 : "=g" ((unsigned long int)(sh)), "=&g" ((unsigned long int)(sl))\
620 : "0" ((unsigned long int)(ah)), "g" ((unsigned long int)(bh)), \
621 "1" ((unsigned long int)(al)), "g" ((unsigned long int)(bl)))
622#define umul_ppmm(xh, xl, m0, m1) \
623 do { \
624 union {long long int ll;struct {unsigned long int l, h;} i;} __xx; \
625 unsigned long int __m0 = (m0), __m1 = (m1); \
626 __asm__ ("emul %1,%2,$0,%0" \
627 : "=r" (__xx.ll) : "g" (__m0), "g" (__m1)); \
628 (xh) = __xx.i.h; (xl) = __xx.i.l; \
629 (xh) += ((((signed long int) __m0 >> 31) & __m1) \
630 + (((signed long int) __m1 >> 31) & __m0)); \
631 } while (0)
632#endif /* __vax__ */
633
634#endif /* __GNUC__ */
635
636/* If this machine has no inline assembler, use C macros. */
637
638#if !defined (add_ssaaaa)
639#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
640 do { \
641 unsigned long int __x; \
642 __x = (al) + (bl); \
643 (sh) = (ah) + (bh) + (__x < (al)); \
644 (sl) = __x; \
645 } while (0)
646#endif
647
648#if !defined (sub_ddmmss)
649#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
650 do { \
651 unsigned long int __x; \
652 __x = (al) - (bl); \
653 (sh) = (ah) - (bh) - (__x > (al)); \
654 (sl) = __x; \
655 } while (0)
656#endif
657
658#if !defined (umul_ppmm)
659#define umul_ppmm(w1, w0, u, v) \
660 do { \
661 unsigned long int __x0, __x1, __x2, __x3; \
662 unsigned int __ul, __vl, __uh, __vh; \
663 \
664 __ul = __ll_lowpart (u); \
665 __uh = __ll_highpart (u); \
666 __vl = __ll_lowpart (v); \
667 __vh = __ll_highpart (v); \
668 \
669 __x0 = (unsigned long int) __ul * __vl; \
670 __x1 = (unsigned long int) __ul * __vh; \
671 __x2 = (unsigned long int) __uh * __vl; \
672 __x3 = (unsigned long int) __uh * __vh; \
673 \
674 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
675 __x1 += __x2; /* but this indeed can */ \
676 if (__x1 < __x2) /* did we get it? */ \
677 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
678 \
679 (w1) = __x3 + __ll_highpart (__x1); \
680 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
681 } while (0)
682#endif
683
684#if !defined (__umulsidi3)
685#define __umulsidi3(u, v) \
686 ({long_long __w; \
687 umul_ppmm (__w.s.high, __w.s.low, u, v); \
688 __w.ll; })
689#endif
690
691#if !defined (udiv_qrnnd) || defined (__LLDEBUG__)
692#define UDIV_NEEDS_NORMALIZATION 1
693#ifndef __LLDEBUG__
694#define udiv_qrnnd udiv_qrnnd_c
695#endif
696#define udiv_qrnnd_c(q, r, n1, n0, d) \
697 do { \
698 unsigned int __d1, __d0, __q1, __q0; \
699 unsigned long int __r1, __r0, __m; \
700 __d1 = __ll_highpart (d); \
701 __d0 = __ll_lowpart (d); \
702 \
703 __r1 = (n1) % __d1; \
704 __q1 = (n1) / __d1; \
705 __m = (unsigned long int) __q1 * __d0; \
706 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
707 if (__r1 < __m) \
708 { \
709 __q1--, __r1 += (d); \
710 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
711 if (__r1 < __m) \
712 __q1--, __r1 += (d); \
713 } \
714 __r1 -= __m; \
715 \
716 __r0 = __r1 % __d1; \
717 __q0 = __r1 / __d1; \
718 __m = (unsigned long int) __q0 * __d0; \
719 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
720 if (__r0 < __m) \
721 { \
722 __q0--, __r0 += (d); \
723 if (__r0 >= (d)) \
724 if (__r0 < __m) \
725 __q0--, __r0 += (d); \
726 } \
727 __r0 -= __m; \
728 \
729 (q) = (unsigned long int) __q1 * __ll_B | __q0; \
730 (r) = __r0; \
731 } while (0)
732#endif
733
734#if !defined (count_leading_zeros)
735extern const unsigned char __clz_tab[];
736
737#define count_leading_zeros(count, x) \
738 do { \
739 unsigned long int xr = (x); \
740 unsigned int a; \
741 \
742 a = xr < (1<<2*__BITS4) \
743 ? (xr < (1<<__BITS4) ? 0 : __BITS4) \
744 : (xr < (1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
745 \
746 (count) = 4*__BITS4 - (__clz_tab[xr >> a] + a); \
747 } while (0)
748#endif
749
750#ifndef UDIV_NEEDS_NORMALIZATION
751#define UDIV_NEEDS_NORMALIZATION 0
752#endif
This page took 0.119911 seconds and 5 git commands to generate.