]> gcc.gnu.org Git - gcc.git/blame - libgcc/config/arm/lib1funcs.S
Add basic armv8-a support
[gcc.git] / libgcc / config / arm / lib1funcs.S
CommitLineData
7857f134 1@ libgcc routines for ARM cpu.
bd28bf5a 2@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
454e0249 3
748086b7 4/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005, 2007, 2008,
c75c517d 5 2009, 2010 Free Software Foundation, Inc.
454e0249
DE
6
7This file is free software; you can redistribute it and/or modify it
8under the terms of the GNU General Public License as published by the
748086b7 9Free Software Foundation; either version 3, or (at your option) any
454e0249
DE
10later version.
11
454e0249
DE
12This file is distributed in the hope that it will be useful, but
13WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15General Public License for more details.
16
748086b7
JJ
17Under Section 7 of GPL version 3, you are granted additional
18permissions described in the GCC Runtime Library Exception, version
193.1, as published by the Free Software Foundation.
20
21You should have received a copy of the GNU General Public License and
22a copy of the GCC Runtime Library Exception along with this program;
23see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24<http://www.gnu.org/licenses/>. */
978e411f
CD
25
26/* An executable stack is *not* required for these functions. */
27#if defined(__ELF__) && defined(__linux__)
28.section .note.GNU-stack,"",%progbits
29.previous
907dd0c7
RE
30#endif /* __ELF__ and __linux__ */
31
32#ifdef __ARM_EABI__
33/* Some attributes that are common to all routines in this file. */
6f0668cf 34 /* Tag_ABI_align_needed: This code does not require 8-byte
907dd0c7
RE
35 alignment from the caller. */
36 /* .eabi_attribute 24, 0 -- default setting. */
6f0668cf 37 /* Tag_ABI_align_preserved: This code preserves 8-byte
907dd0c7
RE
38 alignment in any callee. */
39 .eabi_attribute 25, 1
40#endif /* __ARM_EABI__ */
6dcd26ea 41/* ------------------------------------------------------------------------ */
888e552f
NC
42
43/* We need to know what prefix to add to function names. */
44
2a5307b1
NC
45#ifndef __USER_LABEL_PREFIX__
46#error __USER_LABEL_PREFIX__ not defined
47#endif
48
a1e27b76
NC
49/* ANSI concatenation macros. */
50
51#define CONCAT1(a, b) CONCAT2(a, b)
52#define CONCAT2(a, b) a ## b
53
54/* Use the right prefix for global labels. */
55
56#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
57
140fa895 58#ifdef __ELF__
d5b7b3ae 59#ifdef __thumb__
6dcd26ea 60#define __PLT__ /* Not supported in Thumb assembler (for now). */
9403b7f7
RS
61#elif defined __vxworks && !defined __PIC__
62#define __PLT__ /* Not supported by the kernel loader. */
d5b7b3ae 63#else
b355a481 64#define __PLT__ (PLT)
d5b7b3ae 65#endif
b355a481
NC
66#define TYPE(x) .type SYM(x),function
67#define SIZE(x) .size SYM(x), . - SYM(x)
ce250a20 68#define LSYM(x) .x
b355a481
NC
69#else
70#define __PLT__
71#define TYPE(x)
72#define SIZE(x)
ce250a20 73#define LSYM(x) x
b355a481
NC
74#endif
75
61f0ccff 76/* Function end macros. Variants for interworking. */
888e552f 77
dde27bba
MM
78#if defined(__ARM_ARCH_2__)
79# define __ARM_ARCH__ 2
80#endif
81
82#if defined(__ARM_ARCH_3__)
83# define __ARM_ARCH__ 3
84#endif
85
496b84c8
RE
86#if defined(__ARM_ARCH_3M__) || defined(__ARM_ARCH_4__) \
87 || defined(__ARM_ARCH_4T__)
88/* We use __ARM_ARCH__ set to 4 here, but in reality it's any processor with
89 long multiply instructions. That includes v3M. */
496b84c8
RE
90# define __ARM_ARCH__ 4
91#endif
92
93#if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
e0d4a859
PB
94 || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
95 || defined(__ARM_ARCH_5TEJ__)
496b84c8
RE
96# define __ARM_ARCH__ 5
97#endif
98
fa91adc6
PB
99#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
100 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
bf98ec6c
PB
101 || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
102 || defined(__ARM_ARCH_6M__)
e0d4a859
PB
103# define __ARM_ARCH__ 6
104#endif
105
5b3e6663 106#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
e72e2da4
DG
107 || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
108 || defined(__ARM_ARCH_7EM__)
5b3e6663 109# define __ARM_ARCH__ 7
595fefee
MGD
110#endif
111
112#if defined(__ARM_ARCH_8A__)
113# define __ARM_ARCH__ 8
5b3e6663
PB
114#endif
115
3c60bed5
PB
116#ifndef __ARM_ARCH__
117#error Unable to determine architecture.
118#endif
119
8168ccc0
SH
120/* There are times when we might prefer Thumb1 code even if ARM code is
121 permitted, for example, the code might be smaller, or there might be
122 interworking problems with switching to ARM state if interworking is
123 disabled. */
124#if (defined(__thumb__) \
125 && !defined(__thumb2__) \
126 && (!defined(__THUMB_INTERWORK__) \
127 || defined (__OPTIMIZE_SIZE__) \
128 || defined(__ARM_ARCH_6M__)))
129# define __prefer_thumb__
130#endif
131
496b84c8
RE
132/* How to return from a function call depends on the architecture variant. */
133
61f0ccff 134#if (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)
496b84c8
RE
135
136# define RET bx lr
137# define RETc(x) bx##x lr
138
4c5f9898
PB
139/* Special precautions for interworking on armv4t. */
140# if (__ARM_ARCH__ == 4)
141
142/* Always use bx, not ldr pc. */
143# if (defined(__thumb__) || defined(__THUMB_INTERWORK__))
144# define __INTERWORKING__
145# endif /* __THUMB__ || __THUMB_INTERWORK__ */
146
147/* Include thumb stub before arm mode code. */
148# if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
149# define __INTERWORKING_STUBS__
150# endif /* __thumb__ && !__THUMB_INTERWORK__ */
151
152#endif /* __ARM_ARCH == 4 */
496b84c8
RE
153
154#else
155
156# define RET mov pc, lr
157# define RETc(x) mov##x pc, lr
158
159#endif
160
d0f11b16
DJ
161.macro cfi_pop advance, reg, cfa_offset
162#ifdef __ELF__
163 .pushsection .debug_frame
164 .byte 0x4 /* DW_CFA_advance_loc4 */
165 .4byte \advance
166 .byte (0xc0 | \reg) /* DW_CFA_restore */
167 .byte 0xe /* DW_CFA_def_cfa_offset */
168 .uleb128 \cfa_offset
169 .popsection
170#endif
171.endm
172.macro cfi_push advance, reg, offset, cfa_offset
173#ifdef __ELF__
174 .pushsection .debug_frame
175 .byte 0x4 /* DW_CFA_advance_loc4 */
176 .4byte \advance
177 .byte (0x80 | \reg) /* DW_CFA_offset */
178 .uleb128 (\offset / -4)
179 .byte 0xe /* DW_CFA_def_cfa_offset */
180 .uleb128 \cfa_offset
181 .popsection
182#endif
183.endm
184.macro cfi_start start_label, end_label
185#ifdef __ELF__
186 .pushsection .debug_frame
187LSYM(Lstart_frame):
188 .4byte LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE
189LSYM(Lstart_cie):
190 .4byte 0xffffffff @ CIE Identifier Tag
191 .byte 0x1 @ CIE Version
192 .ascii "\0" @ CIE Augmentation
193 .uleb128 0x1 @ CIE Code Alignment Factor
194 .sleb128 -4 @ CIE Data Alignment Factor
195 .byte 0xe @ CIE RA Column
196 .byte 0xc @ DW_CFA_def_cfa
197 .uleb128 0xd
198 .uleb128 0x0
199
200 .align 2
201LSYM(Lend_cie):
202 .4byte LSYM(Lend_fde)-LSYM(Lstart_fde) @ FDE Length
203LSYM(Lstart_fde):
204 .4byte LSYM(Lstart_frame) @ FDE CIE offset
205 .4byte \start_label @ FDE initial location
206 .4byte \end_label-\start_label @ FDE address range
207 .popsection
208#endif
209.endm
210.macro cfi_end end_label
211#ifdef __ELF__
212 .pushsection .debug_frame
213 .align 2
214LSYM(Lend_fde):
215 .popsection
216\end_label:
217#endif
218.endm
219
496b84c8
RE
220/* Don't pass dirn, it's there just to get token pasting right. */
221
d0f11b16 222.macro RETLDM regs=, cond=, unwind=, dirn=ia
61f0ccff 223#if defined (__INTERWORKING__)
496b84c8 224 .ifc "\regs",""
d0f11b16 225 ldr\cond lr, [sp], #8
496b84c8 226 .else
5b3e6663
PB
227# if defined(__thumb2__)
228 pop\cond {\regs, lr}
229# else
496b84c8 230 ldm\cond\dirn sp!, {\regs, lr}
5b3e6663 231# endif
d0f11b16
DJ
232 .endif
233 .ifnc "\unwind", ""
234 /* Mark LR as restored. */
23597: cfi_pop 97b - \unwind, 0xe, 0x0
496b84c8
RE
236 .endif
237 bx\cond lr
238#else
5b3e6663 239 /* Caller is responsible for providing IT instruction. */
496b84c8 240 .ifc "\regs",""
d0f11b16 241 ldr\cond pc, [sp], #8
496b84c8 242 .else
5b3e6663
PB
243# if defined(__thumb2__)
244 pop\cond {\regs, pc}
245# else
4531703e 246 ldm\cond\dirn sp!, {\regs, pc}
5b3e6663 247# endif
496b84c8
RE
248 .endif
249#endif
250.endm
251
5b3e6663
PB
252/* The Unified assembly syntax allows the same code to be assembled for both
253 ARM and Thumb-2. However this is only supported by recent gas, so define
254 a set of macros to allow ARM code on older assemblers. */
255#if defined(__thumb2__)
256.macro do_it cond, suffix=""
257 it\suffix \cond
258.endm
259.macro shift1 op, arg0, arg1, arg2
260 \op \arg0, \arg1, \arg2
261.endm
262#define do_push push
263#define do_pop pop
264#define COND(op1, op2, cond) op1 ## op2 ## cond
265/* Perform an arithmetic operation with a variable shift operand. This
266 requires two instructions and a scratch register on Thumb-2. */
267.macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
268 \shiftop \tmp, \src2, \shiftreg
269 \name \dest, \src1, \tmp
270.endm
271#else
272.macro do_it cond, suffix=""
273.endm
274.macro shift1 op, arg0, arg1, arg2
275 mov \arg0, \arg1, \op \arg2
276.endm
277#define do_push stmfd sp!,
278#define do_pop ldmfd sp!,
279#define COND(op1, op2, cond) op1 ## cond ## op2
280.macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
281 \name \dest, \src1, \src2, \shiftop \shiftreg
282.endm
283#endif
496b84c8 284
0c23e1be
JB
285#ifdef __ARM_EABI__
286.macro ARM_LDIV0 name signed
287 cmp r0, #0
288 .ifc \signed, unsigned
289 movne r0, #0xffffffff
290 .else
291 movgt r0, #0x7fffffff
292 movlt r0, #0x80000000
293 .endif
294 b SYM (__aeabi_idiv0) __PLT__
295.endm
296#else
297.macro ARM_LDIV0 name signed
d0f11b16
DJ
298 str lr, [sp, #-8]!
29998: cfi_push 98b - __\name, 0xe, -0x8, 0x8
6dcd26ea
RE
300 bl SYM (__div0) __PLT__
301 mov r0, #0 @ About as wrong as it could be.
d0f11b16 302 RETLDM unwind=98b
6dcd26ea 303.endm
0c23e1be 304#endif
496b84c8
RE
305
306
0c23e1be
JB
307#ifdef __ARM_EABI__
308.macro THUMB_LDIV0 name signed
309#if defined(__ARM_ARCH_6M__)
310 .ifc \signed, unsigned
311 cmp r0, #0
312 beq 1f
313 mov r0, #0
314 mvn r0, r0 @ 0xffffffff
3151:
316 .else
317 cmp r0, #0
318 beq 2f
319 blt 3f
320 mov r0, #0
321 mvn r0, r0
322 lsr r0, r0, #1 @ 0x7fffffff
323 b 2f
3243: mov r0, #0x80
325 lsl r0, r0, #24 @ 0x80000000
3262:
327 .endif
328 push {r0, r1, r2}
329 ldr r0, 4f
330 adr r1, 4f
331 add r0, r1
332 str r0, [sp, #8]
333 @ We know we are not on armv4t, so pop pc is safe.
334 pop {r0, r1, pc}
335 .align 2
3364:
337 .word __aeabi_idiv0 - 4b
338#elif defined(__thumb2__)
339 .syntax unified
340 .ifc \signed, unsigned
341 cbz r0, 1f
342 mov r0, #0xffffffff
3431:
344 .else
345 cmp r0, #0
346 do_it gt
347 movgt r0, #0x7fffffff
348 do_it lt
349 movlt r0, #0x80000000
350 .endif
351 b.w SYM(__aeabi_idiv0) __PLT__
352#else
353 .align 2
354 bx pc
355 nop
356 .arm
357 cmp r0, #0
358 .ifc \signed, unsigned
359 movne r0, #0xffffffff
360 .else
361 movgt r0, #0x7fffffff
362 movlt r0, #0x80000000
363 .endif
364 b SYM(__aeabi_idiv0) __PLT__
365 .thumb
366#endif
367.endm
368#else
369.macro THUMB_LDIV0 name signed
d0f11b16
DJ
370 push { r1, lr }
37198: cfi_push 98b - __\name, 0xe, -0x4, 0x8
6dcd26ea
RE
372 bl SYM (__div0)
373 mov r0, #0 @ About as wrong as it could be.
496b84c8 374#if defined (__INTERWORKING__)
d0f11b16
DJ
375 pop { r1, r2 }
376 bx r2
496b84c8 377#else
d0f11b16 378 pop { r1, pc }
6dcd26ea 379#endif
496b84c8 380.endm
0c23e1be 381#endif
6dcd26ea 382
888e552f 383.macro FUNC_END name
496b84c8
RE
384 SIZE (__\name)
385.endm
386
0c23e1be 387.macro DIV_FUNC_END name signed
d0f11b16 388 cfi_start __\name, LSYM(Lend_div0)
ce250a20 389LSYM(Ldiv0):
888e552f 390#ifdef __thumb__
0c23e1be 391 THUMB_LDIV0 \name \signed
888e552f 392#else
0c23e1be 393 ARM_LDIV0 \name \signed
888e552f 394#endif
d0f11b16 395 cfi_end LSYM(Lend_div0)
496b84c8 396 FUNC_END \name
888e552f
NC
397.endm
398
399.macro THUMB_FUNC_START name
400 .globl SYM (\name)
401 TYPE (\name)
402 .thumb_func
403SYM (\name):
404.endm
405
406/* Function start macros. Variants for ARM and Thumb. */
407
d5b7b3ae
RE
408#ifdef __thumb__
409#define THUMB_FUNC .thumb_func
410#define THUMB_CODE .force_thumb
5b3e6663
PB
411# if defined(__thumb2__)
412#define THUMB_SYNTAX .syntax divided
413# else
414#define THUMB_SYNTAX
415# endif
d5b7b3ae
RE
416#else
417#define THUMB_FUNC
418#define THUMB_CODE
5b3e6663 419#define THUMB_SYNTAX
d5b7b3ae 420#endif
5b3e6663 421
d5b7b3ae
RE
422.macro FUNC_START name
423 .text
424 .globl SYM (__\name)
425 TYPE (__\name)
426 .align 0
427 THUMB_CODE
428 THUMB_FUNC
5b3e6663 429 THUMB_SYNTAX
d5b7b3ae
RE
430SYM (__\name):
431.endm
496b84c8
RE
432
433/* Special function that will always be coded in ARM assembly, even if
434 in Thumb-only compilation. */
435
5b3e6663
PB
436#if defined(__thumb2__)
437
438/* For Thumb-2 we build everything in thumb mode. */
439.macro ARM_FUNC_START name
440 FUNC_START \name
441 .syntax unified
442.endm
443#define EQUIV .thumb_set
444.macro ARM_CALL name
445 bl __\name
446.endm
447
448#elif defined(__INTERWORKING_STUBS__)
449
496b84c8
RE
450.macro ARM_FUNC_START name
451 FUNC_START \name
452 bx pc
453 nop
454 .arm
db151e9d
PB
455/* A hook to tell gdb that we've switched to ARM mode. Also used to call
456 directly from other local arm routines. */
457_L__\name:
496b84c8 458.endm
46049cff 459#define EQUIV .thumb_set
db151e9d
PB
460/* Branch directly to a function declared with ARM_FUNC_START.
461 Must be called in arm mode. */
b3f8d95d
MM
462.macro ARM_CALL name
463 bl _L__\name
464.endm
5b3e6663
PB
465
466#else /* !(__INTERWORKING_STUBS__ || __thumb2__) */
467
bf98ec6c
PB
468#ifdef __ARM_ARCH_6M__
469#define EQUIV .thumb_set
470#else
496b84c8 471.macro ARM_FUNC_START name
46049cff
RE
472 .text
473 .globl SYM (__\name)
474 TYPE (__\name)
475 .align 0
476 .arm
477SYM (__\name):
496b84c8 478.endm
46049cff 479#define EQUIV .set
b3f8d95d
MM
480.macro ARM_CALL name
481 bl __\name
482.endm
bf98ec6c 483#endif
5b3e6663 484
496b84c8
RE
485#endif
486
2155b886
RE
487.macro FUNC_ALIAS new old
488 .globl SYM (__\new)
4c5f9898
PB
489#if defined (__thumb__)
490 .thumb_set SYM (__\new), SYM (__\old)
491#else
492 .set SYM (__\new), SYM (__\old)
493#endif
2155b886
RE
494.endm
495
bf98ec6c 496#ifndef __ARM_ARCH_6M__
46049cff
RE
497.macro ARM_FUNC_ALIAS new old
498 .globl SYM (__\new)
499 EQUIV SYM (__\new), SYM (__\old)
4c5f9898 500#if defined(__INTERWORKING_STUBS__)
db151e9d
PB
501 .set SYM (_L__\new), SYM (_L__\old)
502#endif
46049cff 503.endm
bf98ec6c 504#endif
46049cff 505
ef0a4b67
PB
506#ifdef __ARMEB__
507#define xxh r0
508#define xxl r1
509#define yyh r2
510#define yyl r3
511#else
512#define xxh r1
513#define xxl r0
514#define yyh r3
515#define yyl r2
516#endif
517
0c23e1be
JB
518#ifdef __ARM_EABI__
519.macro WEAK name
520 .weak SYM (__\name)
521.endm
522#endif
523
6a436e5e 524#ifdef __thumb__
888e552f 525/* Register aliases. */
454e0249 526
888e552f 527work .req r4 @ XXXX is this safe ?
bd28bf5a
RE
528dividend .req r0
529divisor .req r1
888e552f 530overdone .req r2
bd28bf5a
RE
531result .req r2
532curbit .req r3
6a436e5e 533#endif
5a9335ef 534#if 0
bd28bf5a
RE
535ip .req r12
536sp .req r13
537lr .req r14
538pc .req r15
5a9335ef 539#endif
6a436e5e 540
888e552f 541/* ------------------------------------------------------------------------ */
9a9f7594 542/* Bodies of the division and modulo routines. */
888e552f 543/* ------------------------------------------------------------------------ */
6a436e5e
NP
544.macro ARM_DIV_BODY dividend, divisor, result, curbit
545
9b66ebb1
PB
546#if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
547
f36d140e
PB
548#if defined (__thumb2__)
549 clz \curbit, \dividend
550 clz \result, \divisor
551 sub \curbit, \result, \curbit
552 rsb \curbit, \curbit, #31
553 adr \result, 1f
554 add \curbit, \result, \curbit, lsl #4
555 mov \result, #0
556 mov pc, \curbit
557.p2align 3
5581:
559 .set shift, 32
560 .rept 32
561 .set shift, shift - 1
562 cmp.w \dividend, \divisor, lsl #shift
563 nop.n
564 adc.w \result, \result, \result
565 it cs
566 subcs.w \dividend, \dividend, \divisor, lsl #shift
567 .endr
568#else
9b66ebb1
PB
569 clz \curbit, \dividend
570 clz \result, \divisor
571 sub \curbit, \result, \curbit
572 rsbs \curbit, \curbit, #31
573 addne \curbit, \curbit, \curbit, lsl #1
574 mov \result, #0
575 addne pc, pc, \curbit, lsl #2
576 nop
577 .set shift, 32
578 .rept 32
579 .set shift, shift - 1
580 cmp \dividend, \divisor, lsl #shift
581 adc \result, \result, \result
582 subcs \dividend, \dividend, \divisor, lsl #shift
583 .endr
f36d140e 584#endif
9b66ebb1
PB
585
586#else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
6a436e5e
NP
587#if __ARM_ARCH__ >= 5
588
589 clz \curbit, \divisor
590 clz \result, \dividend
591 sub \result, \curbit, \result
592 mov \curbit, #1
593 mov \divisor, \divisor, lsl \result
594 mov \curbit, \curbit, lsl \result
595 mov \result, #0
596
9b66ebb1 597#else /* __ARM_ARCH__ < 5 */
6a436e5e
NP
598
599 @ Initially shift the divisor left 3 bits if possible,
600 @ set curbit accordingly. This allows for curbit to be located
a50aa827 601 @ at the left end of each 4-bit nibbles in the division loop
6a436e5e
NP
602 @ to save one loop in most cases.
603 tst \divisor, #0xe0000000
604 moveq \divisor, \divisor, lsl #3
605 moveq \curbit, #8
606 movne \curbit, #1
607
bd28bf5a
RE
608 @ Unless the divisor is very big, shift it up in multiples of
609 @ four bits, since this is the amount of unwinding in the main
610 @ division loop. Continue shifting until the divisor is
611 @ larger than the dividend.
6a436e5e
NP
6121: cmp \divisor, #0x10000000
613 cmplo \divisor, \dividend
614 movlo \divisor, \divisor, lsl #4
615 movlo \curbit, \curbit, lsl #4
616 blo 1b
bd28bf5a 617
bd28bf5a
RE
618 @ For very big divisors, we must shift it a bit at a time, or
619 @ we will be in danger of overflowing.
6a436e5e
NP
6201: cmp \divisor, #0x80000000
621 cmplo \divisor, \dividend
622 movlo \divisor, \divisor, lsl #1
623 movlo \curbit, \curbit, lsl #1
624 blo 1b
bd28bf5a 625
6a436e5e
NP
626 mov \result, #0
627
9b66ebb1 628#endif /* __ARM_ARCH__ < 5 */
6a436e5e
NP
629
630 @ Division loop
6311: cmp \dividend, \divisor
f36d140e 632 do_it hs, t
6a436e5e
NP
633 subhs \dividend, \dividend, \divisor
634 orrhs \result, \result, \curbit
635 cmp \dividend, \divisor, lsr #1
f36d140e 636 do_it hs, t
6a436e5e
NP
637 subhs \dividend, \dividend, \divisor, lsr #1
638 orrhs \result, \result, \curbit, lsr #1
639 cmp \dividend, \divisor, lsr #2
f36d140e 640 do_it hs, t
6a436e5e
NP
641 subhs \dividend, \dividend, \divisor, lsr #2
642 orrhs \result, \result, \curbit, lsr #2
643 cmp \dividend, \divisor, lsr #3
f36d140e 644 do_it hs, t
6a436e5e
NP
645 subhs \dividend, \dividend, \divisor, lsr #3
646 orrhs \result, \result, \curbit, lsr #3
647 cmp \dividend, #0 @ Early termination?
ee102849 648 do_it ne, t
6a436e5e
NP
649 movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
650 movne \divisor, \divisor, lsr #4
651 bne 1b
652
9b66ebb1
PB
653#endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
654
6a436e5e
NP
655.endm
656/* ------------------------------------------------------------------------ */
657.macro ARM_DIV2_ORDER divisor, order
658
659#if __ARM_ARCH__ >= 5
660
661 clz \order, \divisor
662 rsb \order, \order, #31
663
664#else
665
666 cmp \divisor, #(1 << 16)
667 movhs \divisor, \divisor, lsr #16
668 movhs \order, #16
669 movlo \order, #0
670
671 cmp \divisor, #(1 << 8)
672 movhs \divisor, \divisor, lsr #8
673 addhs \order, \order, #8
674
675 cmp \divisor, #(1 << 4)
676 movhs \divisor, \divisor, lsr #4
677 addhs \order, \order, #4
678
679 cmp \divisor, #(1 << 2)
680 addhi \order, \order, #3
681 addls \order, \order, \divisor, lsr #1
682
683#endif
684
685.endm
686/* ------------------------------------------------------------------------ */
687.macro ARM_MOD_BODY dividend, divisor, order, spare
688
9b66ebb1
PB
689#if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
690
691 clz \order, \divisor
692 clz \spare, \dividend
693 sub \order, \order, \spare
694 rsbs \order, \order, #31
695 addne pc, pc, \order, lsl #3
696 nop
697 .set shift, 32
698 .rept 32
699 .set shift, shift - 1
700 cmp \dividend, \divisor, lsl #shift
701 subcs \dividend, \dividend, \divisor, lsl #shift
702 .endr
703
704#else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
6a436e5e
NP
705#if __ARM_ARCH__ >= 5
706
707 clz \order, \divisor
708 clz \spare, \dividend
709 sub \order, \order, \spare
710 mov \divisor, \divisor, lsl \order
888e552f 711
9b66ebb1 712#else /* __ARM_ARCH__ < 5 */
454e0249 713
6a436e5e 714 mov \order, #0
454e0249 715
6a436e5e
NP
716 @ Unless the divisor is very big, shift it up in multiples of
717 @ four bits, since this is the amount of unwinding in the main
718 @ division loop. Continue shifting until the divisor is
719 @ larger than the dividend.
7201: cmp \divisor, #0x10000000
721 cmplo \divisor, \dividend
722 movlo \divisor, \divisor, lsl #4
723 addlo \order, \order, #4
724 blo 1b
b355a481 725
6a436e5e
NP
726 @ For very big divisors, we must shift it a bit at a time, or
727 @ we will be in danger of overflowing.
7281: cmp \divisor, #0x80000000
729 cmplo \divisor, \dividend
730 movlo \divisor, \divisor, lsl #1
731 addlo \order, \order, #1
732 blo 1b
733
9b66ebb1 734#endif /* __ARM_ARCH__ < 5 */
6a436e5e
NP
735
736 @ Perform all needed substractions to keep only the reminder.
737 @ Do comparisons in batch of 4 first.
738 subs \order, \order, #3 @ yes, 3 is intended here
739 blt 2f
740
7411: cmp \dividend, \divisor
742 subhs \dividend, \dividend, \divisor
743 cmp \dividend, \divisor, lsr #1
744 subhs \dividend, \dividend, \divisor, lsr #1
745 cmp \dividend, \divisor, lsr #2
746 subhs \dividend, \dividend, \divisor, lsr #2
747 cmp \dividend, \divisor, lsr #3
748 subhs \dividend, \dividend, \divisor, lsr #3
749 cmp \dividend, #1
750 mov \divisor, \divisor, lsr #4
751 subges \order, \order, #4
752 bge 1b
753
754 tst \order, #3
755 teqne \dividend, #0
756 beq 5f
757
758 @ Either 1, 2 or 3 comparison/substractions are left.
7592: cmn \order, #2
760 blt 4f
761 beq 3f
762 cmp \dividend, \divisor
763 subhs \dividend, \dividend, \divisor
764 mov \divisor, \divisor, lsr #1
7653: cmp \dividend, \divisor
766 subhs \dividend, \dividend, \divisor
767 mov \divisor, \divisor, lsr #1
7684: cmp \dividend, \divisor
769 subhs \dividend, \dividend, \divisor
7705:
9b66ebb1
PB
771
772#endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
773
888e552f 774.endm
6dcd26ea 775/* ------------------------------------------------------------------------ */
888e552f
NC
776.macro THUMB_DIV_MOD_BODY modulo
777 @ Load the constant 0x10000000 into our work register.
d5b7b3ae
RE
778 mov work, #1
779 lsl work, #28
ce250a20 780LSYM(Loop1):
d5b7b3ae
RE
781 @ Unless the divisor is very big, shift it up in multiples of
782 @ four bits, since this is the amount of unwinding in the main
783 @ division loop. Continue shifting until the divisor is
784 @ larger than the dividend.
785 cmp divisor, work
ce250a20 786 bhs LSYM(Lbignum)
d5b7b3ae 787 cmp divisor, dividend
ce250a20 788 bhs LSYM(Lbignum)
d5b7b3ae 789 lsl divisor, #4
888e552f 790 lsl curbit, #4
ce250a20
RE
791 b LSYM(Loop1)
792LSYM(Lbignum):
d5b7b3ae
RE
793 @ Set work to 0x80000000
794 lsl work, #3
ce250a20 795LSYM(Loop2):
d5b7b3ae
RE
796 @ For very big divisors, we must shift it a bit at a time, or
797 @ we will be in danger of overflowing.
798 cmp divisor, work
ce250a20 799 bhs LSYM(Loop3)
d5b7b3ae 800 cmp divisor, dividend
ce250a20 801 bhs LSYM(Loop3)
d5b7b3ae 802 lsl divisor, #1
888e552f 803 lsl curbit, #1
ce250a20
RE
804 b LSYM(Loop2)
805LSYM(Loop3):
888e552f
NC
806 @ Test for possible subtractions ...
807 .if \modulo
808 @ ... On the final pass, this may subtract too much from the dividend,
809 @ so keep track of which subtractions are done, we can fix them up
810 @ afterwards.
d5b7b3ae
RE
811 mov overdone, #0
812 cmp dividend, divisor
ce250a20 813 blo LSYM(Lover1)
d5b7b3ae 814 sub dividend, dividend, divisor
ce250a20 815LSYM(Lover1):
d5b7b3ae
RE
816 lsr work, divisor, #1
817 cmp dividend, work
ce250a20 818 blo LSYM(Lover2)
d5b7b3ae
RE
819 sub dividend, dividend, work
820 mov ip, curbit
821 mov work, #1
822 ror curbit, work
823 orr overdone, curbit
824 mov curbit, ip
ce250a20 825LSYM(Lover2):
d5b7b3ae
RE
826 lsr work, divisor, #2
827 cmp dividend, work
ce250a20 828 blo LSYM(Lover3)
d5b7b3ae
RE
829 sub dividend, dividend, work
830 mov ip, curbit
831 mov work, #2
832 ror curbit, work
833 orr overdone, curbit
834 mov curbit, ip
ce250a20 835LSYM(Lover3):
d5b7b3ae
RE
836 lsr work, divisor, #3
837 cmp dividend, work
ce250a20 838 blo LSYM(Lover4)
d5b7b3ae
RE
839 sub dividend, dividend, work
840 mov ip, curbit
841 mov work, #3
842 ror curbit, work
843 orr overdone, curbit
844 mov curbit, ip
ce250a20 845LSYM(Lover4):
d5b7b3ae 846 mov ip, curbit
888e552f
NC
847 .else
848 @ ... and note which bits are done in the result. On the final pass,
849 @ this may subtract too much from the dividend, but the result will be ok,
850 @ since the "bit" will have been shifted out at the bottom.
851 cmp dividend, divisor
ce250a20 852 blo LSYM(Lover1)
888e552f
NC
853 sub dividend, dividend, divisor
854 orr result, result, curbit
ba2f4247 855LSYM(Lover1):
888e552f
NC
856 lsr work, divisor, #1
857 cmp dividend, work
ce250a20 858 blo LSYM(Lover2)
888e552f
NC
859 sub dividend, dividend, work
860 lsr work, curbit, #1
861 orr result, work
ce250a20 862LSYM(Lover2):
888e552f
NC
863 lsr work, divisor, #2
864 cmp dividend, work
ce250a20 865 blo LSYM(Lover3)
888e552f
NC
866 sub dividend, dividend, work
867 lsr work, curbit, #2
868 orr result, work
ce250a20 869LSYM(Lover3):
888e552f
NC
870 lsr work, divisor, #3
871 cmp dividend, work
ce250a20 872 blo LSYM(Lover4)
888e552f
NC
873 sub dividend, dividend, work
874 lsr work, curbit, #3
875 orr result, work
ce250a20 876LSYM(Lover4):
888e552f
NC
877 .endif
878
d5b7b3ae 879 cmp dividend, #0 @ Early termination?
ce250a20 880 beq LSYM(Lover5)
888e552f 881 lsr curbit, #4 @ No, any more bits to do?
ce250a20 882 beq LSYM(Lover5)
d5b7b3ae 883 lsr divisor, #4
ce250a20
RE
884 b LSYM(Loop3)
885LSYM(Lover5):
888e552f 886 .if \modulo
d5b7b3ae
RE
887 @ Any subtractions that we should not have done will be recorded in
888 @ the top three bits of "overdone". Exactly which were not needed
889 @ are governed by the position of the bit, stored in ip.
d5b7b3ae 890 mov work, #0xe
888e552f 891 lsl work, #28
d5b7b3ae 892 and overdone, work
ce250a20 893 beq LSYM(Lgot_result)
7405dc37
NC
894
895 @ If we terminated early, because dividend became zero, then the
896 @ bit in ip will not be in the bottom nibble, and we should not
897 @ perform the additions below. We must test for this though
898 @ (rather relying upon the TSTs to prevent the additions) since
899 @ the bit in ip could be in the top two bits which might then match
900 @ with one of the smaller RORs.
901 mov curbit, ip
902 mov work, #0x7
903 tst curbit, work
ce250a20 904 beq LSYM(Lgot_result)
7405dc37 905
d5b7b3ae
RE
906 mov curbit, ip
907 mov work, #3
908 ror curbit, work
909 tst overdone, curbit
ce250a20 910 beq LSYM(Lover6)
d5b7b3ae 911 lsr work, divisor, #3
888e552f 912 add dividend, work
ce250a20 913LSYM(Lover6):
d5b7b3ae
RE
914 mov curbit, ip
915 mov work, #2
916 ror curbit, work
917 tst overdone, curbit
ce250a20 918 beq LSYM(Lover7)
d5b7b3ae 919 lsr work, divisor, #2
888e552f 920 add dividend, work
ce250a20 921LSYM(Lover7):
d5b7b3ae
RE
922 mov curbit, ip
923 mov work, #1
924 ror curbit, work
925 tst overdone, curbit
ce250a20 926 beq LSYM(Lgot_result)
d5b7b3ae 927 lsr work, divisor, #1
888e552f
NC
928 add dividend, work
929 .endif
ce250a20 930LSYM(Lgot_result):
888e552f
NC
931.endm
932/* ------------------------------------------------------------------------ */
933/* Start of the Real Functions */
934/* ------------------------------------------------------------------------ */
935#ifdef L_udivsi3
936
8168ccc0 937#if defined(__prefer_thumb__)
f36d140e 938
888e552f 939 FUNC_START udivsi3
9bd05108 940 FUNC_ALIAS aeabi_uidiv udivsi3
888e552f 941
888e552f 942 cmp divisor, #0
ce250a20 943 beq LSYM(Ldiv0)
0c23e1be 944LSYM(udivsi3_skip_div0_test):
888e552f
NC
945 mov curbit, #1
946 mov result, #0
947
948 push { work }
949 cmp dividend, divisor
ce250a20 950 blo LSYM(Lgot_result)
888e552f
NC
951
952 THUMB_DIV_MOD_BODY 0
953
954 mov r0, result
d5b7b3ae 955 pop { work }
ef42b1dd 956 RET
888e552f 957
425d737b
MGD
958#elif defined(__ARM_ARCH_EXT_IDIV__)
959
960 ARM_FUNC_START udivsi3
961 ARM_FUNC_ALIAS aeabi_uidiv udivsi3
962
963 cmp r1, #0
964 beq LSYM(Ldiv0)
965
966 udiv r0, r0, r1
967 RET
968
f36d140e
PB
969#else /* ARM version/Thumb-2. */
970
971 ARM_FUNC_START udivsi3
972 ARM_FUNC_ALIAS aeabi_uidiv udivsi3
6a436e5e 973
0c23e1be
JB
974 /* Note: if called via udivsi3_skip_div0_test, this will unnecessarily
975 check for division-by-zero a second time. */
976LSYM(udivsi3_skip_div0_test):
6a436e5e 977 subs r2, r1, #1
f36d140e 978 do_it eq
6a436e5e
NP
979 RETc(eq)
980 bcc LSYM(Ldiv0)
981 cmp r0, r1
982 bls 11f
983 tst r1, r2
984 beq 12f
d5b7b3ae 985
6a436e5e 986 ARM_DIV_BODY r0, r1, r2, r3
888e552f 987
6a436e5e 988 mov r0, r2
888e552f 989 RET
bd28bf5a 990
f36d140e
PB
99111: do_it eq, e
992 moveq r0, #1
6a436e5e
NP
993 movne r0, #0
994 RET
995
99612: ARM_DIV2_ORDER r1, r2
997
998 mov r0, r0, lsr r2
999 RET
1000
888e552f 1001#endif /* ARM version */
bd28bf5a 1002
0c23e1be 1003 DIV_FUNC_END udivsi3 unsigned
888e552f 1004
8168ccc0 1005#if defined(__prefer_thumb__)
db151e9d 1006FUNC_START aeabi_uidivmod
0c23e1be
JB
1007 cmp r1, #0
1008 beq LSYM(Ldiv0)
db151e9d 1009 push {r0, r1, lr}
0c23e1be 1010 bl LSYM(udivsi3_skip_div0_test)
db151e9d
PB
1011 POP {r1, r2, r3}
1012 mul r2, r0
1013 sub r1, r1, r2
1014 bx r3
425d737b
MGD
1015#elif defined(__ARM_ARCH_EXT_IDIV__)
1016ARM_FUNC_START aeabi_uidivmod
1017 cmp r1, #0
1018 beq LSYM(Ldiv0)
1019 mov r2, r0
1020 udiv r0, r0, r1
1021 mls r1, r0, r1, r2
1022 RET
db151e9d 1023#else
f36d140e 1024ARM_FUNC_START aeabi_uidivmod
0c23e1be
JB
1025 cmp r1, #0
1026 beq LSYM(Ldiv0)
b3f8d95d 1027 stmfd sp!, { r0, r1, lr }
0c23e1be 1028 bl LSYM(udivsi3_skip_div0_test)
b3f8d95d
MM
1029 ldmfd sp!, { r1, r2, lr }
1030 mul r3, r2, r0
1031 sub r1, r1, r3
1032 RET
db151e9d 1033#endif
b3f8d95d
MM
1034 FUNC_END aeabi_uidivmod
1035
888e552f
NC
1036#endif /* L_udivsi3 */
1037/* ------------------------------------------------------------------------ */
1038#ifdef L_umodsi3
1039
425d737b 1040#ifdef __ARM_ARCH_EXT_IDIV__
888e552f 1041
425d737b
MGD
1042 ARM_FUNC_START umodsi3
1043
1044 cmp r1, #0
1045 beq LSYM(Ldiv0)
1046 udiv r2, r0, r1
1047 mls r0, r1, r2, r0
1048 RET
1049
1050#elif defined(__thumb__)
1051
1052 FUNC_START umodsi3
888e552f
NC
1053
1054 cmp divisor, #0
ce250a20 1055 beq LSYM(Ldiv0)
888e552f 1056 mov curbit, #1
bd28bf5a 1057 cmp dividend, divisor
ce250a20 1058 bhs LSYM(Lover10)
888e552f 1059 RET
bd28bf5a 1060
ce250a20 1061LSYM(Lover10):
888e552f
NC
1062 push { work }
1063
1064 THUMB_DIV_MOD_BODY 1
1065
1066 pop { work }
1067 RET
1068
1069#else /* ARM version. */
1070
425d737b
MGD
1071 FUNC_START umodsi3
1072
6a436e5e
NP
1073 subs r2, r1, #1 @ compare divisor with 1
1074 bcc LSYM(Ldiv0)
1075 cmpne r0, r1 @ compare dividend with divisor
1076 moveq r0, #0
1077 tsthi r1, r2 @ see if divisor is power of 2
1078 andeq r0, r0, r2
1079 RETc(ls)
1080
1081 ARM_MOD_BODY r0, r1, r2, r3
888e552f 1082
d5b7b3ae 1083 RET
454e0249 1084
7405dc37 1085#endif /* ARM version. */
d5b7b3ae 1086
0c23e1be 1087 DIV_FUNC_END umodsi3 unsigned
b355a481 1088
bd28bf5a 1089#endif /* L_umodsi3 */
6dcd26ea 1090/* ------------------------------------------------------------------------ */
bd28bf5a 1091#ifdef L_divsi3
454e0249 1092
8168ccc0 1093#if defined(__prefer_thumb__)
f36d140e 1094
6dcd26ea 1095 FUNC_START divsi3
9bd05108 1096 FUNC_ALIAS aeabi_idiv divsi3
d5b7b3ae 1097
d5b7b3ae 1098 cmp divisor, #0
ce250a20 1099 beq LSYM(Ldiv0)
0c23e1be 1100LSYM(divsi3_skip_div0_test):
d5b7b3ae
RE
1101 push { work }
1102 mov work, dividend
1103 eor work, divisor @ Save the sign of the result.
1104 mov ip, work
1105 mov curbit, #1
1106 mov result, #0
1107 cmp divisor, #0
ce250a20 1108 bpl LSYM(Lover10)
d5b7b3ae 1109 neg divisor, divisor @ Loops below use unsigned.
ce250a20 1110LSYM(Lover10):
d5b7b3ae 1111 cmp dividend, #0
ce250a20 1112 bpl LSYM(Lover11)
d5b7b3ae 1113 neg dividend, dividend
ce250a20 1114LSYM(Lover11):
d5b7b3ae 1115 cmp dividend, divisor
ce250a20 1116 blo LSYM(Lgot_result)
d5b7b3ae 1117
888e552f 1118 THUMB_DIV_MOD_BODY 0
d5b7b3ae 1119
d5b7b3ae
RE
1120 mov r0, result
1121 mov work, ip
1122 cmp work, #0
ce250a20 1123 bpl LSYM(Lover12)
d5b7b3ae 1124 neg r0, r0
ce250a20 1125LSYM(Lover12):
d5b7b3ae 1126 pop { work }
888e552f 1127 RET
454e0249 1128
425d737b
MGD
1129#elif defined(__ARM_ARCH_EXT_IDIV__)
1130
1131 ARM_FUNC_START divsi3
1132 ARM_FUNC_ALIAS aeabi_idiv divsi3
1133
1134 cmp r1, #0
1135 beq LSYM(Ldiv0)
1136 sdiv r0, r0, r1
1137 RET
1138
f36d140e 1139#else /* ARM/Thumb-2 version. */
d5b7b3ae 1140
f36d140e
PB
1141 ARM_FUNC_START divsi3
1142 ARM_FUNC_ALIAS aeabi_idiv divsi3
1143
6a436e5e 1144 cmp r1, #0
ce250a20 1145 beq LSYM(Ldiv0)
0c23e1be
JB
1146LSYM(divsi3_skip_div0_test):
1147 eor ip, r0, r1 @ save the sign of the result.
f36d140e 1148 do_it mi
6a436e5e
NP
1149 rsbmi r1, r1, #0 @ loops below use unsigned.
1150 subs r2, r1, #1 @ division by 1 or -1 ?
1151 beq 10f
1152 movs r3, r0
f36d140e 1153 do_it mi
6a436e5e
NP
1154 rsbmi r3, r0, #0 @ positive dividend value
1155 cmp r3, r1
1156 bls 11f
1157 tst r1, r2 @ divisor is power of 2 ?
1158 beq 12f
1159
1160 ARM_DIV_BODY r3, r1, r0, r2
888e552f 1161
bd28bf5a 1162 cmp ip, #0
f36d140e 1163 do_it mi
02689e18 1164 rsbmi r0, r0, #0
d5b7b3ae 1165 RET
454e0249 1166
6a436e5e 116710: teq ip, r0 @ same sign ?
f36d140e 1168 do_it mi
6a436e5e
NP
1169 rsbmi r0, r0, #0
1170 RET
1171
f36d140e
PB
117211: do_it lo
1173 movlo r0, #0
1174 do_it eq,t
6a436e5e
NP
1175 moveq r0, ip, asr #31
1176 orreq r0, r0, #1
1177 RET
1178
117912: ARM_DIV2_ORDER r1, r2
1180
1181 cmp ip, #0
1182 mov r0, r3, lsr r2
f36d140e 1183 do_it mi
6a436e5e
NP
1184 rsbmi r0, r0, #0
1185 RET
1186
6dcd26ea 1187#endif /* ARM version */
d5b7b3ae 1188
0c23e1be 1189 DIV_FUNC_END divsi3 signed
b355a481 1190
8168ccc0 1191#if defined(__prefer_thumb__)
db151e9d 1192FUNC_START aeabi_idivmod
0c23e1be
JB
1193 cmp r1, #0
1194 beq LSYM(Ldiv0)
db151e9d 1195 push {r0, r1, lr}
0c23e1be 1196 bl LSYM(divsi3_skip_div0_test)
db151e9d
PB
1197 POP {r1, r2, r3}
1198 mul r2, r0
1199 sub r1, r1, r2
1200 bx r3
425d737b
MGD
1201#elif defined(__ARM_ARCH_EXT_IDIV__)
1202ARM_FUNC_START aeabi_idivmod
1203 cmp r1, #0
1204 beq LSYM(Ldiv0)
1205 mov r2, r0
1206 sdiv r0, r0, r1
1207 mls r1, r0, r1, r2
1208 RET
db151e9d 1209#else
f36d140e 1210ARM_FUNC_START aeabi_idivmod
0c23e1be
JB
1211 cmp r1, #0
1212 beq LSYM(Ldiv0)
b3f8d95d 1213 stmfd sp!, { r0, r1, lr }
0c23e1be 1214 bl LSYM(divsi3_skip_div0_test)
b3f8d95d
MM
1215 ldmfd sp!, { r1, r2, lr }
1216 mul r3, r2, r0
1217 sub r1, r1, r3
1218 RET
db151e9d 1219#endif
b3f8d95d
MM
1220 FUNC_END aeabi_idivmod
1221
bd28bf5a 1222#endif /* L_divsi3 */
6dcd26ea 1223/* ------------------------------------------------------------------------ */
454e0249
DE
1224#ifdef L_modsi3
1225
425d737b 1226#if defined(__ARM_ARCH_EXT_IDIV__)
d5b7b3ae 1227
425d737b
MGD
1228 ARM_FUNC_START modsi3
1229
1230 cmp r1, #0
1231 beq LSYM(Ldiv0)
1232
1233 sdiv r2, r0, r1
1234 mls r0, r1, r2, r0
1235 RET
1236
1237#elif defined(__thumb__)
1238
1239 FUNC_START modsi3
454e0249 1240
d5b7b3ae
RE
1241 mov curbit, #1
1242 cmp divisor, #0
ce250a20
RE
1243 beq LSYM(Ldiv0)
1244 bpl LSYM(Lover10)
d5b7b3ae 1245 neg divisor, divisor @ Loops below use unsigned.
ce250a20 1246LSYM(Lover10):
d5b7b3ae
RE
1247 push { work }
1248 @ Need to save the sign of the dividend, unfortunately, we need
888e552f 1249 @ work later on. Must do this after saving the original value of
d5b7b3ae
RE
1250 @ the work register, because we will pop this value off first.
1251 push { dividend }
1252 cmp dividend, #0
ce250a20 1253 bpl LSYM(Lover11)
d5b7b3ae 1254 neg dividend, dividend
ce250a20 1255LSYM(Lover11):
d5b7b3ae 1256 cmp dividend, divisor
ce250a20 1257 blo LSYM(Lgot_result)
d5b7b3ae 1258
888e552f
NC
1259 THUMB_DIV_MOD_BODY 1
1260
d5b7b3ae
RE
1261 pop { work }
1262 cmp work, #0
ce250a20 1263 bpl LSYM(Lover12)
d5b7b3ae 1264 neg dividend, dividend
ce250a20 1265LSYM(Lover12):
d5b7b3ae
RE
1266 pop { work }
1267 RET
7405dc37 1268
6dcd26ea 1269#else /* ARM version. */
d5b7b3ae 1270
425d737b
MGD
1271 FUNC_START modsi3
1272
6a436e5e 1273 cmp r1, #0
ce250a20 1274 beq LSYM(Ldiv0)
6a436e5e
NP
1275 rsbmi r1, r1, #0 @ loops below use unsigned.
1276 movs ip, r0 @ preserve sign of dividend
1277 rsbmi r0, r0, #0 @ if negative make positive
1278 subs r2, r1, #1 @ compare divisor with 1
1279 cmpne r0, r1 @ compare dividend with divisor
1280 moveq r0, #0
1281 tsthi r1, r2 @ see if divisor is power of 2
1282 andeq r0, r0, r2
1283 bls 10f
1284
1285 ARM_MOD_BODY r0, r1, r2, r3
1286
128710: cmp ip, #0
1288 rsbmi r0, r0, #0
d5b7b3ae 1289 RET
7405dc37 1290
6dcd26ea
RE
1291#endif /* ARM version */
1292
0c23e1be 1293 DIV_FUNC_END modsi3 signed
b355a481 1294
454e0249 1295#endif /* L_modsi3 */
6dcd26ea 1296/* ------------------------------------------------------------------------ */
2ecc7cad 1297#ifdef L_dvmd_tls
454e0249 1298
0c23e1be
JB
1299#ifdef __ARM_EABI__
1300 WEAK aeabi_idiv0
1301 WEAK aeabi_ldiv0
1302 FUNC_START aeabi_idiv0
1303 FUNC_START aeabi_ldiv0
7405dc37 1304 RET
b3f8d95d
MM
1305 FUNC_END aeabi_ldiv0
1306 FUNC_END aeabi_idiv0
0c23e1be
JB
1307#else
1308 FUNC_START div0
1309 RET
496b84c8 1310 FUNC_END div0
0c23e1be 1311#endif
b355a481 1312
454e0249 1313#endif /* L_divmodsi_tools */
6dcd26ea 1314/* ------------------------------------------------------------------------ */
75d3a15b
NC
1315#ifdef L_dvmd_lnx
1316@ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls
1317
5d7c0add 1318/* Constant taken from <asm/signal.h>. */
d71ebc32 1319#define SIGFPE 8
d71ebc32 1320
0c23e1be
JB
1321#ifdef __ARM_EABI__
1322 WEAK aeabi_idiv0
1323 WEAK aeabi_ldiv0
1324 ARM_FUNC_START aeabi_idiv0
1325 ARM_FUNC_START aeabi_ldiv0
1326#else
3303be15 1327 ARM_FUNC_START div0
0c23e1be 1328#endif
d5b7b3ae 1329
3303be15 1330 do_push {r1, lr}
5d7c0add
DJ
1331 mov r0, #SIGFPE
1332 bl SYM(raise) __PLT__
496b84c8 1333 RETLDM r1
7405dc37 1334
0c23e1be
JB
1335#ifdef __ARM_EABI__
1336 FUNC_END aeabi_ldiv0
1337 FUNC_END aeabi_idiv0
1338#else
496b84c8 1339 FUNC_END div0
0c23e1be 1340#endif
b355a481 1341
75d3a15b 1342#endif /* L_dvmd_lnx */
8a66e987
JM
1343#ifdef L_clear_cache
1344#if defined __ARM_EABI__ && defined __linux__
1345@ EABI GNU/Linux call to cacheflush syscall.
0244b03f
PB
1346 ARM_FUNC_START clear_cache
1347 do_push {r7}
8a66e987
JM
1348#if __ARM_ARCH__ >= 7 || defined(__ARM_ARCH_6T2__)
1349 movw r7, #2
1350 movt r7, #0xf
1351#else
1352 mov r7, #0xf0000
1353 add r7, r7, #2
1354#endif
1355 mov r2, #0
1356 swi 0
0244b03f 1357 do_pop {r7}
8a66e987
JM
1358 RET
1359 FUNC_END clear_cache
1360#else
1361#error "This is only for ARM EABI GNU/Linux"
1362#endif
1363#endif /* L_clear_cache */
6dcd26ea 1364/* ------------------------------------------------------------------------ */
dc491742
RE
1365/* Dword shift operations. */
1366/* All the following Dword shift variants rely on the fact that
1367 shft xxx, Reg
1368 is in fact done as
1369 shft xxx, (Reg & 255)
1370 so for Reg value in (32...63) and (-1...-31) we will get zero (in the
1371 case of logical shifts) or the sign (for asr). */
1372
1373#ifdef __ARMEB__
1374#define al r1
1375#define ah r0
1376#else
1377#define al r0
1378#define ah r1
1379#endif
1380
c0354bf4
PB
1381/* Prevent __aeabi double-word shifts from being produced on SymbianOS. */
1382#ifndef __symbian__
1383
dc491742
RE
1384#ifdef L_lshrdi3
1385
1386 FUNC_START lshrdi3
2155b886 1387 FUNC_ALIAS aeabi_llsr lshrdi3
b3f8d95d 1388
dc491742
RE
1389#ifdef __thumb__
1390 lsr al, r2
1391 mov r3, ah
1392 lsr ah, r2
1393 mov ip, r3
1394 sub r2, #32
1395 lsr r3, r2
1396 orr al, r3
1397 neg r2, r2
1398 mov r3, ip
1399 lsl r3, r2
1400 orr al, r3
1401 RET
1402#else
1403 subs r3, r2, #32
1404 rsb ip, r2, #32
1405 movmi al, al, lsr r2
1406 movpl al, ah, lsr r3
1407 orrmi al, al, ah, lsl ip
1408 mov ah, ah, lsr r2
1409 RET
1410#endif
b3f8d95d 1411 FUNC_END aeabi_llsr
dc491742
RE
1412 FUNC_END lshrdi3
1413
1414#endif
1415
1416#ifdef L_ashrdi3
1417
1418 FUNC_START ashrdi3
2155b886 1419 FUNC_ALIAS aeabi_lasr ashrdi3
b3f8d95d 1420
dc491742
RE
1421#ifdef __thumb__
1422 lsr al, r2
1423 mov r3, ah
1424 asr ah, r2
1425 sub r2, #32
1426 @ If r2 is negative at this point the following step would OR
1427 @ the sign bit into all of AL. That's not what we want...
1428 bmi 1f
1429 mov ip, r3
1430 asr r3, r2
1431 orr al, r3
1432 mov r3, ip
14331:
1434 neg r2, r2
1435 lsl r3, r2
1436 orr al, r3
1437 RET
1438#else
1439 subs r3, r2, #32
1440 rsb ip, r2, #32
1441 movmi al, al, lsr r2
1442 movpl al, ah, asr r3
1443 orrmi al, al, ah, lsl ip
1444 mov ah, ah, asr r2
1445 RET
1446#endif
1447
b3f8d95d 1448 FUNC_END aeabi_lasr
dc491742
RE
1449 FUNC_END ashrdi3
1450
1451#endif
1452
1453#ifdef L_ashldi3
1454
1455 FUNC_START ashldi3
2155b886 1456 FUNC_ALIAS aeabi_llsl ashldi3
b3f8d95d 1457
dc491742
RE
1458#ifdef __thumb__
1459 lsl ah, r2
1460 mov r3, al
1461 lsl al, r2
1462 mov ip, r3
1463 sub r2, #32
1464 lsl r3, r2
1465 orr ah, r3
1466 neg r2, r2
1467 mov r3, ip
1468 lsr r3, r2
1469 orr ah, r3
1470 RET
1471#else
1472 subs r3, r2, #32
1473 rsb ip, r2, #32
1474 movmi ah, ah, lsl r2
1475 movpl ah, al, lsl r3
1476 orrmi ah, ah, al, lsr ip
1477 mov al, al, lsl r2
1478 RET
1479#endif
b3f8d95d 1480 FUNC_END aeabi_llsl
dc491742
RE
1481 FUNC_END ashldi3
1482
1483#endif
1484
c0354bf4
PB
1485#endif /* __symbian__ */
1486
ef0a4b67
PB
1487#if ((__ARM_ARCH__ > 5) && !defined(__ARM_ARCH_6M__)) \
1488 || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
1489 || defined(__ARM_ARCH_5TEJ__)
1490#define HAVE_ARM_CLZ 1
1491#endif
1492
1493#ifdef L_clzsi2
1494#if defined(__ARM_ARCH_6M__)
1495FUNC_START clzsi2
1496 mov r1, #28
1497 mov r3, #1
1498 lsl r3, r3, #16
1499 cmp r0, r3 /* 0x10000 */
1500 bcc 2f
1501 lsr r0, r0, #16
1502 sub r1, r1, #16
15032: lsr r3, r3, #8
1504 cmp r0, r3 /* #0x100 */
1505 bcc 2f
1506 lsr r0, r0, #8
1507 sub r1, r1, #8
15082: lsr r3, r3, #4
1509 cmp r0, r3 /* #0x10 */
1510 bcc 2f
1511 lsr r0, r0, #4
1512 sub r1, r1, #4
15132: adr r2, 1f
1514 ldrb r0, [r2, r0]
1515 add r0, r0, r1
1516 bx lr
1517.align 2
15181:
1519.byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
1520 FUNC_END clzsi2
1521#else
1522ARM_FUNC_START clzsi2
1523# if defined(HAVE_ARM_CLZ)
1524 clz r0, r0
1525 RET
1526# else
1527 mov r1, #28
1528 cmp r0, #0x10000
1529 do_it cs, t
1530 movcs r0, r0, lsr #16
1531 subcs r1, r1, #16
1532 cmp r0, #0x100
1533 do_it cs, t
1534 movcs r0, r0, lsr #8
1535 subcs r1, r1, #8
1536 cmp r0, #0x10
1537 do_it cs, t
1538 movcs r0, r0, lsr #4
1539 subcs r1, r1, #4
1540 adr r2, 1f
1541 ldrb r0, [r2, r0]
1542 add r0, r0, r1
007403f3 1543 RET
ef0a4b67
PB
1544.align 2
15451:
1546.byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
1547# endif /* !HAVE_ARM_CLZ */
1548 FUNC_END clzsi2
1549#endif
1550#endif /* L_clzsi2 */
1551
1552#ifdef L_clzdi2
1553#if !defined(HAVE_ARM_CLZ)
1554
1555# if defined(__ARM_ARCH_6M__)
1556FUNC_START clzdi2
1557 push {r4, lr}
1558# else
1559ARM_FUNC_START clzdi2
1560 do_push {r4, lr}
1561# endif
1562 cmp xxh, #0
1563 bne 1f
1564# ifdef __ARMEB__
1565 mov r0, xxl
1566 bl __clzsi2
1567 add r0, r0, #32
1568 b 2f
15691:
1570 bl __clzsi2
1571# else
1572 bl __clzsi2
1573 add r0, r0, #32
1574 b 2f
15751:
1576 mov r0, xxh
1577 bl __clzsi2
1578# endif
15792:
1580# if defined(__ARM_ARCH_6M__)
1581 pop {r4, pc}
1582# else
1583 RETLDM r4
1584# endif
1585 FUNC_END clzdi2
1586
1587#else /* HAVE_ARM_CLZ */
1588
1589ARM_FUNC_START clzdi2
1590 cmp xxh, #0
1591 do_it eq, et
1592 clzeq r0, xxl
1593 clzne r0, xxh
1594 addeq r0, r0, #32
1595 RET
1596 FUNC_END clzdi2
1597
1598#endif
1599#endif /* L_clzdi2 */
1600
06ea7371
RE
1601#ifdef L_ctzsi2
1602#if defined(__ARM_ARCH_6M__)
1603FUNC_START ctzsi2
1604 neg r1, r0
1605 and r0, r0, r1
1606 mov r1, #28
1607 mov r3, #1
1608 lsl r3, r3, #16
1609 cmp r0, r3 /* 0x10000 */
1610 bcc 2f
1611 lsr r0, r0, #16
1612 sub r1, r1, #16
16132: lsr r3, r3, #8
1614 cmp r0, r3 /* #0x100 */
1615 bcc 2f
1616 lsr r0, r0, #8
1617 sub r1, r1, #8
16182: lsr r3, r3, #4
1619 cmp r0, r3 /* #0x10 */
1620 bcc 2f
1621 lsr r0, r0, #4
1622 sub r1, r1, #4
16232: adr r2, 1f
1624 ldrb r0, [r2, r0]
1625 sub r0, r0, r1
1626 bx lr
1627.align 2
16281:
1629.byte 27, 28, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31
1630 FUNC_END ctzsi2
1631#else
1632ARM_FUNC_START ctzsi2
1633 rsb r1, r0, #0
1634 and r0, r0, r1
1635# if defined(HAVE_ARM_CLZ)
1636 clz r0, r0
1637 rsb r0, r0, #31
1638 RET
1639# else
1640 mov r1, #28
1641 cmp r0, #0x10000
1642 do_it cs, t
1643 movcs r0, r0, lsr #16
1644 subcs r1, r1, #16
1645 cmp r0, #0x100
1646 do_it cs, t
1647 movcs r0, r0, lsr #8
1648 subcs r1, r1, #8
1649 cmp r0, #0x10
1650 do_it cs, t
1651 movcs r0, r0, lsr #4
1652 subcs r1, r1, #4
1653 adr r2, 1f
1654 ldrb r0, [r2, r0]
1655 sub r0, r0, r1
1656 RET
1657.align 2
16581:
1659.byte 27, 28, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31
1660# endif /* !HAVE_ARM_CLZ */
1661 FUNC_END ctzsi2
1662#endif
1663#endif /* L_clzsi2 */
1664
dc491742 1665/* ------------------------------------------------------------------------ */
75d3a15b
NC
1666/* These next two sections are here despite the fact that they contain Thumb
1667 assembler because their presence allows interworked code to be linked even
1668 when the GCC library is this one. */
1669
c84df4c5
NC
1670/* Do not build the interworking functions when the target architecture does
1671 not support Thumb instructions. (This can be a multilib option). */
e0d4a859
PB
1672#if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\
1673 || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \
1674 || __ARM_ARCH__ >= 6
1675
1676#if defined L_call_via_rX
75d3a15b
NC
1677
1678/* These labels & instructions are used by the Arm/Thumb interworking code.
1679 The address of function to be called is loaded into a register and then
1680 one of these labels is called via a BL instruction. This puts the
1681 return address into the link register with the bottom bit set, and the
1682 code here switches to the correct mode before executing the function. */
1683
1684 .text
1685 .align 0
ec8aac6f 1686 .force_thumb
7405dc37 1687
75d3a15b 1688.macro call_via register
6dcd26ea
RE
1689 THUMB_FUNC_START _call_via_\register
1690
75d3a15b
NC
1691 bx \register
1692 nop
2a5307b1
NC
1693
1694 SIZE (_call_via_\register)
75d3a15b
NC
1695.endm
1696
1697 call_via r0
1698 call_via r1
1699 call_via r2
1700 call_via r3
1701 call_via r4
1702 call_via r5
1703 call_via r6
1704 call_via r7
1705 call_via r8
1706 call_via r9
1707 call_via sl
1708 call_via fp
1709 call_via ip
1710 call_via sp
1711 call_via lr
1712
1713#endif /* L_call_via_rX */
e0d4a859 1714
5b3e6663 1715/* Don't bother with the old interworking routines for Thumb-2. */
bf98ec6c
PB
1716/* ??? Maybe only omit these on "m" variants. */
1717#if !defined(__thumb2__) && !defined(__ARM_ARCH_6M__)
5b3e6663 1718
e0d4a859 1719#if defined L_interwork_call_via_rX
7405dc37 1720
75d3a15b
NC
1721/* These labels & instructions are used by the Arm/Thumb interworking code,
1722 when the target address is in an unknown instruction set. The address
1723 of function to be called is loaded into a register and then one of these
1724 labels is called via a BL instruction. This puts the return address
1725 into the link register with the bottom bit set, and the code here
1726 switches to the correct mode before executing the function. Unfortunately
1727 the target code cannot be relied upon to return via a BX instruction, so
1728 instead we have to store the resturn address on the stack and allow the
1729 called function to return here instead. Upon return we recover the real
a2503645
RS
1730 return address and use a BX to get back to Thumb mode.
1731
1732 There are three variations of this code. The first,
1733 _interwork_call_via_rN(), will push the return address onto the
1734 stack and pop it in _arm_return(). It should only be used if all
1735 arguments are passed in registers.
1736
1737 The second, _interwork_r7_call_via_rN(), instead stores the return
1738 address at [r7, #-4]. It is the caller's responsibility to ensure
1739 that this address is valid and contains no useful data.
1740
1741 The third, _interwork_r11_call_via_rN(), works in the same way but
1742 uses r11 instead of r7. It is useful if the caller does not really
1743 need a frame pointer. */
75d3a15b
NC
1744
1745 .text
1746 .align 0
1747
1748 .code 32
2a5307b1 1749 .globl _arm_return
d0f11b16
DJ
1750LSYM(Lstart_arm_return):
1751 cfi_start LSYM(Lstart_arm_return) LSYM(Lend_arm_return)
1752 cfi_push 0, 0xe, -0x8, 0x8
1753 nop @ This nop is for the benefit of debuggers, so that
1754 @ backtraces will use the correct unwind information.
496b84c8 1755_arm_return:
d0f11b16
DJ
1756 RETLDM unwind=LSYM(Lstart_arm_return)
1757 cfi_end LSYM(Lend_arm_return)
a2503645
RS
1758
1759 .globl _arm_return_r7
1760_arm_return_r7:
1761 ldr lr, [r7, #-4]
1762 bx lr
1763
1764 .globl _arm_return_r11
1765_arm_return_r11:
1766 ldr lr, [r11, #-4]
1767 bx lr
1768
1769.macro interwork_with_frame frame, register, name, return
1770 .code 16
1771
1772 THUMB_FUNC_START \name
1773
1774 bx pc
1775 nop
1776
1777 .code 32
1778 tst \register, #1
1779 streq lr, [\frame, #-4]
1780 adreq lr, _arm_return_\frame
1781 bx \register
1782
1783 SIZE (\name)
1784.endm
75d3a15b 1785
496b84c8
RE
1786.macro interwork register
1787 .code 16
6dcd26ea
RE
1788
1789 THUMB_FUNC_START _interwork_call_via_\register
1790
496b84c8 1791 bx pc
75d3a15b 1792 nop
496b84c8
RE
1793
1794 .code 32
1795 .globl LSYM(Lchange_\register)
1796LSYM(Lchange_\register):
75d3a15b 1797 tst \register, #1
d0f11b16 1798 streq lr, [sp, #-8]!
75d3a15b
NC
1799 adreq lr, _arm_return
1800 bx \register
2a5307b1
NC
1801
1802 SIZE (_interwork_call_via_\register)
a2503645
RS
1803
1804 interwork_with_frame r7,\register,_interwork_r7_call_via_\register
1805 interwork_with_frame r11,\register,_interwork_r11_call_via_\register
75d3a15b
NC
1806.endm
1807
1808 interwork r0
1809 interwork r1
1810 interwork r2
1811 interwork r3
1812 interwork r4
1813 interwork r5
1814 interwork r6
1815 interwork r7
1816 interwork r8
1817 interwork r9
1818 interwork sl
1819 interwork fp
1820 interwork ip
1821 interwork sp
2a5307b1 1822
6dcd26ea 1823 /* The LR case has to be handled a little differently... */
2a5307b1 1824 .code 16
6dcd26ea
RE
1825
1826 THUMB_FUNC_START _interwork_call_via_lr
1827
2a5307b1
NC
1828 bx pc
1829 nop
1830
1831 .code 32
1832 .globl .Lchange_lr
1833.Lchange_lr:
1834 tst lr, #1
d0f11b16 1835 stmeqdb r13!, {lr, pc}
2a5307b1
NC
1836 mov ip, lr
1837 adreq lr, _arm_return
1838 bx ip
1839
1840 SIZE (_interwork_call_via_lr)
1841
75d3a15b 1842#endif /* L_interwork_call_via_rX */
5b3e6663 1843#endif /* !__thumb2__ */
907dd0c7
RE
1844
1845/* Functions to support compact pic switch tables in thumb1 state.
1846 All these routines take an index into the table in r0. The
1847 table is at LR & ~1 (but this must be rounded up in the case
1848 of 32-bit entires). They are only permitted to clobber r12
1849 and r14 and r0 must be preserved on exit. */
1850#ifdef L_thumb1_case_sqi
1851
1852 .text
1853 .align 0
1854 .force_thumb
1855 .syntax unified
1856 THUMB_FUNC_START __gnu_thumb1_case_sqi
1857 push {r1}
1858 mov r1, lr
1859 lsrs r1, r1, #1
1860 lsls r1, r1, #1
1861 ldrsb r1, [r1, r0]
1862 lsls r1, r1, #1
1863 add lr, lr, r1
1864 pop {r1}
1865 bx lr
1866 SIZE (__gnu_thumb1_case_sqi)
1867#endif
1868
1869#ifdef L_thumb1_case_uqi
1870
1871 .text
1872 .align 0
1873 .force_thumb
1874 .syntax unified
1875 THUMB_FUNC_START __gnu_thumb1_case_uqi
1876 push {r1}
1877 mov r1, lr
1878 lsrs r1, r1, #1
1879 lsls r1, r1, #1
1880 ldrb r1, [r1, r0]
1881 lsls r1, r1, #1
1882 add lr, lr, r1
1883 pop {r1}
1884 bx lr
1885 SIZE (__gnu_thumb1_case_uqi)
1886#endif
1887
1888#ifdef L_thumb1_case_shi
1889
1890 .text
1891 .align 0
1892 .force_thumb
1893 .syntax unified
1894 THUMB_FUNC_START __gnu_thumb1_case_shi
1895 push {r0, r1}
1896 mov r1, lr
1897 lsrs r1, r1, #1
1898 lsls r0, r0, #1
1899 lsls r1, r1, #1
1900 ldrsh r1, [r1, r0]
1901 lsls r1, r1, #1
1902 add lr, lr, r1
1903 pop {r0, r1}
1904 bx lr
1905 SIZE (__gnu_thumb1_case_shi)
1906#endif
1907
1908#ifdef L_thumb1_case_uhi
1909
1910 .text
1911 .align 0
1912 .force_thumb
1913 .syntax unified
1914 THUMB_FUNC_START __gnu_thumb1_case_uhi
1915 push {r0, r1}
1916 mov r1, lr
1917 lsrs r1, r1, #1
1918 lsls r0, r0, #1
1919 lsls r1, r1, #1
1920 ldrh r1, [r1, r0]
1921 lsls r1, r1, #1
1922 add lr, lr, r1
1923 pop {r0, r1}
1924 bx lr
1925 SIZE (__gnu_thumb1_case_uhi)
1926#endif
1927
1928#ifdef L_thumb1_case_si
1929
1930 .text
1931 .align 0
1932 .force_thumb
1933 .syntax unified
1934 THUMB_FUNC_START __gnu_thumb1_case_si
1935 push {r0, r1}
1936 mov r1, lr
1937 adds.n r1, r1, #2 /* Align to word. */
1938 lsrs r1, r1, #2
1939 lsls r0, r0, #2
1940 lsls r1, r1, #2
1941 ldr r0, [r1, r0]
1942 adds r0, r0, r1
1943 mov lr, r0
1944 pop {r0, r1}
1945 mov pc, lr /* We know we were called from thumb code. */
1946 SIZE (__gnu_thumb1_case_si)
1947#endif
1948
e0d4a859 1949#endif /* Arch supports thumb. */
4202ce82 1950
f9a02408 1951#ifndef __symbian__
bf98ec6c 1952#ifndef __ARM_ARCH_6M__
4202ce82 1953#include "ieee754-df.S"
4202ce82 1954#include "ieee754-sf.S"
b3f8d95d 1955#include "bpabi.S"
bf98ec6c
PB
1956#else /* __ARM_ARCH_6M__ */
1957#include "bpabi-v6m.S"
1958#endif /* __ARM_ARCH_6M__ */
1959#endif /* !__symbian__ */
This page took 3.540074 seconds and 5 git commands to generate.