This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[csl-arm] Backport assembly routines


I've backported the assembly DImode shift and float conversion routines from 
mainline to csl-arm-branch.

Tested with cross to arm-none-elf.

Paul

2004-09-07  Paul Brook  <paul@codesourcery.com>

 Backport from mainline.
 2004-09-01  Richard Earnshaw  <rearnsha@arm.com>
 * arm/ieee754-df.S (aeabi_ul2d, aeabi_l2d, floatundidf)
 (floatdidf): New functions.
 * arm/ieee754-sf.S (aeabi_ul2f, aeabi_l2f, floatundisf)
 (floatdisf): New functions.
 * t-arm-elf: Use them.

 2004-05-15  Richard Earnshaw  <reanrsha@arm.com>
 * arm/lib1funcs.asm (_lshrdi3, _ashrdi3, _ashldi3): Add ASM
 implementations for ARM and Thumb.
 * arm/t-arm-elf (LIB1ASMFUNCS): Use them.
Index: ieee754-df.S
===================================================================
RCS file: /var/cvsroot/gcc-cvs/gcc/gcc/config/arm/ieee754-df.S,v
retrieving revision 1.3.10.3
diff -u -p -r1.3.10.3 ieee754-df.S
--- ieee754-df.S	18 Aug 2004 03:57:45 -0000	1.3.10.3
+++ ieee754-df.S	7 Sep 2004 15:34:31 -0000
@@ -449,6 +449,83 @@ ARM_FUNC_ALIAS aeabi_f2d extendsfdf2
 	FUNC_END aeabi_f2d
 	FUNC_END extendsfdf2
 
+ARM_FUNC_START floatundidf
+ARM_FUNC_ALIAS aeabi_ul2d floatundidf
+	
+	orrs	r2, r0, r1
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+	mvfeqd	f0, #0.0
+#endif
+	RETc(eq)
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+	@ For hard FPA code we want to return via the tail below so that
+	@ we can return the result in f0 as well as in r0/r1 for backwards
+	@ compatibility.
+	adr	ip, 1f
+	stmfd	sp!, {r4, r5, ip, lr}
+#else
+	stmfd	sp!, {r4, r5, lr}
+#endif
+	mov	r5, #0
+	b	2f
+
+ARM_FUNC_START floatdidf
+ARM_FUNC_ALIAS aeabi_l2d floatdidf
+	orrs	r2, r0, r1
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+	mvfeqd	f0, #0.0
+#endif
+	RETc(eq)
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+	@ For hard FPA code we want to return via the tail below so that
+	@ we can return the result in f0 as well as in r0/r1 for backwards
+	@ compatibility.
+	adr	ip, 1f
+	stmfd	sp!, {r4, r5, ip, lr}
+#else
+	stmfd	sp!, {r4, r5, lr}
+#endif
+	ands	r5, ah, #0x80000000	@ sign bit in r5
+	bpl	2f
+	rsbs	al, al, #0
+	rsc	ah, ah, #0
+2:
+	mov	r4, #(0x400 << 20)	@ initial exponent
+	add	r4, r4, #((52 - 1) << 20)
+#if !defined (__VFP_FP__) && !defined(__ARMEB__)
+	@ FPA little-endian: must swap the word order.
+	mov	ip, al
+	mov	xh, ah
+	mov	xl, ip
+#endif
+	movs	ip, xh, lsr #23
+	beq	LSYM(Lad_p)
+	@ The value's too big.  Scale it down a bit...
+	mov	r2, #3
+	movs	ip, ip, lsr #3
+	addne	r2, r2, #3
+	movs	ip, ip, lsr #3
+	addne	r2, r2, #3
+	rsb	r3, r2, #32
+	mov	ip, xl, lsl r3
+	mov	xl, xl, lsr r2
+	orr	xl, xl, xh, lsl r3
+	mov	xh, xh, lsr r2
+	add	r4, r4, r2, lsl #20
+	b	LSYM(Lad_p)
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+1:
+	@ Legacy code expects the result to be returned in f0.  Copy it
+	@ there as well.
+	stmfd	sp!, {r0, r1}
+	ldfd	f0, [sp], #8
+	RETLDM
+#endif
+	FUNC_END floatdidf
+	FUNC_END aeabi_l2d
+	FUNC_END floatundidf
+	FUNC_END aeabi_ul2d
+
 #endif /* L_addsubdf3 */
 
 #ifdef L_muldivdf3
Index: ieee754-sf.S
===================================================================
RCS file: /var/cvsroot/gcc-cvs/gcc/gcc/config/arm/ieee754-sf.S,v
retrieving revision 1.3.10.2
diff -u -p -r1.3.10.2 ieee754-sf.S
--- ieee754-sf.S	18 Aug 2004 03:57:45 -0000	1.3.10.2
+++ ieee754-sf.S	7 Sep 2004 15:34:31 -0000
@@ -290,6 +290,7 @@ ARM_FUNC_ALIAS aeabi_i2f floatsisf
 1:	teq	r0, #0
 	RETc(eq)
 
+3:
 	mov	r1, #0
 	mov	r2, #((127 + 23) << 23)
 	tst	r0, #0xfc000000
@@ -297,7 +298,8 @@ ARM_FUNC_ALIAS aeabi_i2f floatsisf
 
 	@ We need to scale the value a little before branching to code above.
 	tst	r0, #0xf0000000
-	movne	r1, r0, lsl #28
+4:
+	orrne	r1, r1, r0, lsl #28
 	movne	r0, r0, lsr #4
 	addne	r2, r2, #(4 << 23)
 	tst	r0, #0x0c000000
@@ -313,6 +315,110 @@ ARM_FUNC_ALIAS aeabi_i2f floatsisf
 	FUNC_END aeabi_ui2f
 	FUNC_END floatunsisf
 
+ARM_FUNC_START floatundisf
+ARM_FUNC_ALIAS aeabi_ul2f floatundisf
+	orrs	r2, r0, r1
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+	mvfeqs	f0, #0.0
+#endif
+	RETc(eq)
+	
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+	@ For hard FPA code we want to return via the tail below so that
+	@ we can return the result in f0 as well as in r0 for backwards
+	@ compatibility.
+	str	lr, [sp, #-4]!
+	adr	lr, 4f
+#endif
+
+	mov	r3, #0
+	b	2f
+
+ARM_FUNC_START floatdisf
+ARM_FUNC_ALIAS aeabi_l2f floatdisf
+
+	orrs	r2, r0, r1
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+	mvfeqs	f0, #0.0
+#endif
+	RETc(eq)
+	
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+	@ For hard FPA code we want to return via the tail below so that
+	@ we can return the result in f0 as well as in r0 for backwards
+	@ compatibility.
+	str	lr, [sp, #-4]!
+	adr	lr, 4f
+#endif
+	ands	r3, ah, #0x80000000	@ sign bit in r3
+	bpl	2f
+	rsbs	al, al, #0
+	rsc	ah, ah, #0
+2:
+	movs	ip, ah
+#ifdef __ARMEB__
+	moveq	r0, al
+#endif
+	beq	3b
+	mov	r2, #((127 + 23 + 32) << 23)	@ initial exponent
+#ifndef __ARMEB__
+	mov	r1, al
+	mov	r0, ip
+#endif
+	tst	r0, #0xfc000000
+	bne	3f
+
+#if __ARM_ARCH__ < 5
+	cmp	r0, #(1 << 13)
+	movlo	ip, #13
+	movlo	r0, r0, lsl #13
+	movhs	ip, #0
+	tst	r0, #0x03fc0000
+	addeq	ip, ip, #8
+	moveq	r0, r0, lsl #8
+	tst	r0, #0x03c00000
+	addeq	ip, ip, #4
+	moveq	r0, r0, lsl #4
+	tst	r0, #0x03000000
+	addeq	ip, ip, #2
+	moveq	r0, r0, lsl #2
+#else
+	clz	ip, r0
+	sub	ip, ip, #6
+	mov	r0, r0, lsl ip
+#endif
+	sub	r2, r2, ip, lsl #23
+	rsb	ip, ip, #32
+	orr	r0, r0, r1, lsr ip
+	rsb	ip, ip, #32
+	mov	r1, r1, asl ip
+	@ At this point we no-longer care about the precise value in r1, only
+	@ whether only the top bit is set, or if the top bit and some others
+	@ are set.
+	and	ip, r1, #0xff
+	orr	r1, r1, ip, lsl #8
+	b	LSYM(Lad_p)
+3:
+	@ We need to scale the value a little before branching to code above.
+	@ At this point we no-longer care about the precise value in r1, only
+	@ whether only the top bit is set, or if the top bit and some others
+	@ are set.
+	and	ip, r1, #0xff
+	orr	r1, r1, ip, lsl #8
+	tst	r0, #0xf0000000
+	movne	r1, r1, lsr #4
+	b	4b
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+4:
+	str	r0, [sp, #-4]!
+	ldfs	f0, [sp], #4
+	RETLDM
+#endif
+	FUNC_END floatdisf
+	FUNC_END aeabi_l2f
+	FUNC_END floatundisf
+	FUNC_END aeabi_ul2f
+
 #endif /* L_addsubsf3 */
 
 #ifdef L_muldivsf3
Index: lib1funcs.asm
===================================================================
RCS file: /var/cvsroot/gcc-cvs/gcc/gcc/config/arm/lib1funcs.asm,v
retrieving revision 1.26.4.6
diff -u -p -r1.26.4.6 lib1funcs.asm
--- lib1funcs.asm	1 Sep 2004 06:05:01 -0000	1.26.4.6
+++ lib1funcs.asm	7 Sep 2004 15:57:29 -0000
@@ -918,6 +918,116 @@ LSYM(Lover12):
 	
 #endif /* L_dvmd_lnx */
 /* ------------------------------------------------------------------------ */
+/* Dword shift operations.  */
+/* All the following Dword shift variants rely on the fact that
+	shft xxx, Reg
+   is in fact done as
+	shft xxx, (Reg & 255)
+   so for Reg value in (32...63) and (-1...-31) we will get zero (in the
+   case of logical shifts) or the sign (for asr).  */
+
+#ifdef __ARMEB__
+#define al	r1
+#define ah	r0
+#else
+#define al	r0
+#define ah	r1
+#endif
+
+#ifdef L_lshrdi3
+
+	FUNC_START lshrdi3
+
+#ifdef __thumb__
+	lsr	al, r2
+	mov	r3, ah
+	lsr	ah, r2
+	mov	ip, r3
+	sub	r2, #32
+	lsr	r3, r2
+	orr	al, r3
+	neg	r2, r2
+	mov	r3, ip
+	lsl	r3, r2
+	orr	al, r3
+	RET
+#else
+	subs	r3, r2, #32
+	rsb	ip, r2, #32
+	movmi	al, al, lsr r2
+	movpl	al, ah, lsr r3
+	orrmi	al, al, ah, lsl ip
+	mov	ah, ah, lsr r2
+	RET
+#endif
+	FUNC_END lshrdi3
+
+#endif
+	
+#ifdef L_ashrdi3
+	
+	FUNC_START ashrdi3
+#ifdef __thumb__
+	lsr	al, r2
+	mov	r3, ah
+	asr	ah, r2
+	sub	r2, #32
+	@ If r2 is negative at this point the following step would OR
+	@ the sign bit into all of AL.  That's not what we want...
+	bmi	1f
+	mov	ip, r3
+	asr	r3, r2
+	orr	al, r3
+	mov	r3, ip
+1:
+	neg	r2, r2
+	lsl	r3, r2
+	orr	al, r3
+	RET
+#else
+	subs	r3, r2, #32
+	rsb	ip, r2, #32
+	movmi	al, al, lsr r2
+	movpl	al, ah, asr r3
+	orrmi	al, al, ah, lsl ip
+	mov	ah, ah, asr r2
+	RET
+#endif
+
+	FUNC_END ashrdi3
+
+#endif
+
+#ifdef L_ashldi3
+
+	FUNC_START ashldi3
+#ifdef __thumb__
+	lsl	ah, r2
+	mov	r3, al
+	lsl	al, r2
+	mov	ip, r3
+	sub	r2, #32
+	lsl	r3, r2
+	orr	ah, r3
+	neg	r2, r2
+	mov	r3, ip
+	lsr	r3, r2
+	orr	ah, r3
+	RET
+#else
+	subs	r3, r2, #32
+	rsb	ip, r2, #32
+	movmi	ah, ah, lsl r2
+	movpl	ah, al, lsl r3
+	orrmi	ah, ah, al, lsr ip
+	mov	al, al, lsl r2
+	RET
+#endif
+	FUNC_END ashldi3
+
+#endif
+
+/* ------------------------------------------------------------------------ */
 /* These next two sections are here despite the fact that they contain Thumb 
    assembler because their presence allows interworked code to be linked even
    when the GCC library is this one.  */
Index: t-arm-elf
===================================================================
RCS file: /var/cvsroot/gcc-cvs/gcc/gcc/config/arm/t-arm-elf,v
retrieving revision 1.20.4.1
diff -u -p -r1.20.4.1 t-arm-elf
--- t-arm-elf	15 May 2004 13:02:10 -0000	1.20.4.1
+++ t-arm-elf	7 Sep 2004 15:57:29 -0000
@@ -1,9 +1,10 @@
 LIB1ASMSRC = arm/lib1funcs.asm
 LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func \
 	_call_via_rX _interwork_call_via_rX \
+	_lshrdi3 _ashrdi3 _ashldi3 \
 	_negdf2 _addsubdf3 _muldivdf3 _cmpdf2 _unorddf2 _fixdfsi _fixunsdfsi \
 	_truncdfsf2 _negsf2 _addsubsf3 _muldivsf3 _cmpsf2 _unordsf2 \
-	_fixsfsi _fixunssfsi
+	_fixsfsi _fixunssfsi _floatdidf _floatdisf
 
 MULTILIB_OPTIONS     = marm/mthumb
 MULTILIB_DIRNAMES    = arm thumb

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]