⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gcc-3.3.2-arm-softfloat.patch

📁 armlinux的交叉编译链,适合redhat9.0 嵌入式
💻 PATCH
📖 第 1 页 / 共 5 页
字号:
+	@ Result is 0, but determine sign anyway.+LSYM(Lml_z):	eor	r0, r0, r1+	bic	r0, r0, #0x7fffffff+	RET++	@ Check if denormalized result is possible, otherwise return signed 0.+LSYM(Lml_u):+	cmn	r2, #(24 << 22)+	RETc(le)++	@ Find out proper shift value.+	mvn	r1, r2, asr #22+	subs	r1, r1, #7+	bgt	LSYM(Lml_ur)++	@ Shift value left, round, etc.+	add	r1, r1, #32+	orrs	r0, r0, r3, lsr r1+	rsb	r1, r1, #32+	adc	r0, r0, ip, lsl r1+	mov	ip, r3, lsl r1+	teq	ip, #0x80000000+	biceq	r0, r0, #1+	RET++	@ Shift value right, round, etc.+	@ Note: r1 must not be 0 otherwise carry does not get set.+LSYM(Lml_ur):+	orrs	r0, r0, ip, lsr r1+	adc	r0, r0, #0+	rsb	r1, r1, #32+	mov	ip, ip, lsl r1+	teq	r3, #0+	teqeq	ip, #0x80000000+	biceq	r0, r0, #1+	RET++	@ One or both arguments are denormalized.+	@ Scale them leftwards and preserve sign bit.+LSYM(Lml_d):+	teq	r2, #0+	and	ip, r0, #0x80000000+1:	moveq	r0, r0, lsl #1+	tsteq	r0, #0x00800000+	subeq	r2, r2, #(1 << 22)+	beq	1b+	orr	r0, r0, ip+	teq	r3, #0+	and	ip, r1, #0x80000000+2:	moveq	r1, r1, lsl #1+	tsteq	r1, #0x00800000+	subeq	r3, r3, #(1 << 23)+	beq	2b+	orr	r1, r1, ip+	b	LSYM(Lml_x)++	@ One or both args are INF or NAN.+LSYM(Lml_s):+	teq	r0, #0x0+	teqne	r1, #0x0+	teqne	r0, #0x80000000+	teqne	r1, #0x80000000+	beq	LSYM(Lml_n)		@ 0 * INF or INF * 0 -> NAN+	teq	r2, ip, lsr #1+	bne	1f+	movs	r2, r0, lsl #9+	bne	LSYM(Lml_n)		@ NAN * <anything> -> NAN+1:	teq	r3, ip, lsr #1+	bne	LSYM(Lml_i)+	movs	r3, r1, lsl #9+	bne	LSYM(Lml_n)		@ <anything> * NAN -> NAN++	@ Result is INF, but we need to determine its sign.+LSYM(Lml_i):+	eor	r0, r0, r1++	@ Overflow: return INF (sign already in r0).+LSYM(Lml_o):+	and	r0, r0, #0x80000000+	orr	r0, r0, #0x7f000000+	orr	r0, r0, #0x00800000+	RET++	@ Return NAN.+LSYM(Lml_n):+	mov	r0, #0x7f000000+	orr	r0, r0, #0x00c00000+	RET++	FUNC_END mulsf3++ARM_FUNC_START divsf3++	@ Mask out exponents.+	mov	ip, #0xff000000+	and	r2, r0, ip, lsr #1+	and	r3, r1, ip, lsr #1++	@ Trap any INF/NAN or zeroes.+	teq	r2, ip, lsr #1+	teqne	r3, ip, lsr #1+	bicnes	ip, r0, #0x80000000+	bicnes	ip, r1, #0x80000000+	beq	LSYM(Ldv_s)++	@ Shift exponents right one bit to make room for overflow bit.+	@ If either of them is 0, scale denormalized arguments off line.+	@ Then substract divisor exponent from dividend''s.+	movs	r2, r2, lsr #1+	teqne	r3, #0+	beq	LSYM(Ldv_d)+LSYM(Ldv_x):+	sub	r2, r2, r3, asr #1++	@ Preserve final sign into ip.+	eor	ip, r0, r1++	@ Convert mantissa to unsigned integer.+	@ Dividend -> r3, divisor -> r1.+	mov	r3, #0x10000000+	movs	r1, r1, lsl #9+	mov	r0, r0, lsl #9+	beq	LSYM(Ldv_1)+	orr	r1, r3, r1, lsr #4+	orr	r3, r3, r0, lsr #4++	@ Initialize r0 (result) with final sign bit.+	and	r0, ip, #0x80000000++	@ Ensure result will land to known bit position.+	cmp	r3, r1+	subcc	r2, r2, #(1 << 22)+	movcc	r3, r3, lsl #1++	@ Apply exponent bias, check range for over/underflow.+	add	r2, r2, #(127 << 22)+	cmn	r2, #(24 << 22)+	RETc(le)+	cmp	r2, #(255 << 22)+	bge	LSYM(Lml_o)++	@ The actual division loop.+	mov	ip, #0x00800000+1:	cmp	r3, r1+	subcs	r3, r3, r1+	orrcs	r0, r0, ip+	cmp	r3, r1, lsr #1+	subcs	r3, r3, r1, lsr #1+	orrcs	r0, r0, ip, lsr #1+	cmp	r3, r1, lsr #2+	subcs	r3, r3, r1, lsr #2+	orrcs	r0, r0, ip, lsr #2+	cmp	r3, r1, lsr #3+	subcs	r3, r3, r1, lsr #3+	orrcs	r0, r0, ip, lsr #3+	movs	r3, r3, lsl #4+	movnes	ip, ip, lsr #4+	bne	1b++	@ Check if denormalized result is needed.+	cmp	r2, #0+	ble	LSYM(Ldv_u)++	@ Apply proper rounding.+	cmp	r3, r1+	addcs	r0, r0, #1+	biceq	r0, r0, #1++	@ Add exponent to result.+	bic	r0, r0, #0x00800000+	orr	r0, r0, r2, lsl #1+	RET++	@ Division by 0x1p*: let''s shortcut a lot of code.+LSYM(Ldv_1):+	and	ip, ip, #0x80000000+	orr	r0, ip, r0, lsr #9+	add	r2, r2, #(127 << 22)+	cmp	r2, #(255 << 22)+	bge	LSYM(Lml_o)+	cmp	r2, #0+	orrgt	r0, r0, r2, lsl #1+	RETc(gt)+	cmn	r2, #(24 << 22)+	movle	r0, ip+	RETc(le)+	orr	r0, r0, #0x00800000+	mov	r3, #0++	@ Result must be denormalized: prepare parameters to use code above.+	@ r3 already contains remainder for rounding considerations.+LSYM(Ldv_u):+	bic	ip, r0, #0x80000000+	and	r0, r0, #0x80000000+	mvn	r1, r2, asr #22+	add	r1, r1, #2+	b	LSYM(Lml_ur)++	@ One or both arguments are denormalized.+	@ Scale them leftwards and preserve sign bit.+LSYM(Ldv_d):+	teq	r2, #0+	and	ip, r0, #0x80000000+1:	moveq	r0, r0, lsl #1+	tsteq	r0, #0x00800000+	subeq	r2, r2, #(1 << 22)+	beq	1b+	orr	r0, r0, ip+	teq	r3, #0+	and	ip, r1, #0x80000000+2:	moveq	r1, r1, lsl #1+	tsteq	r1, #0x00800000+	subeq	r3, r3, #(1 << 23)+	beq	2b+	orr	r1, r1, ip+	b	LSYM(Ldv_x)++	@ One or both arguments is either INF, NAN or zero.+LSYM(Ldv_s):+	mov	ip, #0xff000000+	teq	r2, ip, lsr #1+	teqeq	r3, ip, lsr #1+	beq	LSYM(Lml_n)		@ INF/NAN / INF/NAN -> NAN+	teq	r2, ip, lsr #1+	bne	1f+	movs	r2, r0, lsl #9+	bne	LSYM(Lml_n)		@ NAN / <anything> -> NAN+	b	LSYM(Lml_i)		@ INF / <anything> -> INF+1:	teq	r3, ip, lsr #1+	bne	2f+	movs	r3, r1, lsl #9+	bne	LSYM(Lml_n)		@ <anything> / NAN -> NAN+	b	LSYM(Lml_z)		@ <anything> / INF -> 0+2:	@ One or both arguments are 0.+	bics	r2, r0, #0x80000000+	bne	LSYM(Lml_i)		@ <non_zero> / 0 -> INF+	bics	r3, r1, #0x80000000+	bne	LSYM(Lml_z)		@ 0 / <non_zero> -> 0+	b	LSYM(Lml_n)		@ 0 / 0 -> NAN++	FUNC_END divsf3++#endif /* L_muldivsf3 */++#ifdef L_cmpsf2++FUNC_START gesf2+ARM_FUNC_START gtsf2+	mov	r3, #-1+	b	1f++FUNC_START lesf2+ARM_FUNC_START ltsf2+	mov	r3, #1+	b	1f++FUNC_START nesf2+FUNC_START eqsf2+ARM_FUNC_START cmpsf2+	mov	r3, #1			@ how should we specify unordered here?++1:	@ Trap any INF/NAN first.+	mov	ip, #0xff000000+	and	r2, r1, ip, lsr #1+	teq	r2, ip, lsr #1+	and	r2, r0, ip, lsr #1+	teqne	r2, ip, lsr #1+	beq	3f++	@ Test for equality.+	@ Note that 0.0 is equal to -0.0.+2:	orr	r3, r0, r1+	bics	r3, r3, #0x80000000	@ either 0.0 or -0.0+	teqne	r0, r1			@ or both the same+	moveq	r0, #0+	RETc(eq)++	@ Check for sign difference.  The N flag is set if it is the case.+	@ If so, return sign of r0.+	movmi	r0, r0, asr #31+	orrmi	r0, r0, #1+	RETc(mi)++	@ Compare exponents.+	and	r3, r1, ip, lsr #1+	cmp	r2, r3++	@ Compare mantissa if exponents are equal+	moveq	r0, r0, lsl #9+	cmpeq	r0, r1, lsl #9+	movcs	r0, r1, asr #31+	mvncc	r0, r1, asr #31+	orr	r0, r0, #1+	RET++	@ Look for a NAN. +3:	and	r2, r1, ip, lsr #1+	teq	r2, ip, lsr #1+	bne	4f+	movs	r2, r1, lsl #9+	bne	5f			@ r1 is NAN+4:	and	r2, r0, ip, lsr #1+	teq	r2, ip, lsr #1+	bne	2b+	movs	ip, r0, lsl #9+	beq	2b			@ r0 is not NAN+5:	mov	r0, r3			@ return unordered code from r3.+	RET++	FUNC_END gesf2+	FUNC_END gtsf2+	FUNC_END lesf2+	FUNC_END ltsf2+	FUNC_END nesf2+	FUNC_END eqsf2+	FUNC_END cmpsf2++#endif /* L_cmpsf2 */++#ifdef L_unordsf2++ARM_FUNC_START unordsf2+	mov	ip, #0xff000000+	and	r2, r1, ip, lsr #1+	teq	r2, ip, lsr #1+	bne	1f+	movs	r2, r1, lsl #9+	bne	3f			@ r1 is NAN+1:	and	r2, r0, ip, lsr #1+	teq	r2, ip, lsr #1+	bne	2f+	movs	r2, r0, lsl #9+	bne	3f			@ r0 is NAN+2:	mov	r0, #0			@ arguments are ordered.+	RET+3:	mov	r0, #1			@ arguments are unordered.+	RET++	FUNC_END unordsf2++#endif /* L_unordsf2 */++#ifdef L_fixsfsi++ARM_FUNC_START fixsfsi+	movs	r0, r0, lsl #1+	RETc(eq)			@ value is 0.++	mov	r1, r1, rrx		@ preserve C flag (the actual sign)++	@ check exponent range.+	and	r2, r0, #0xff000000+	cmp	r2, #(127 << 24)+	movcc	r0, #0			@ value is too small+	RETc(cc)+	cmp	r2, #((127 + 31) << 24)+	bcs	1f			@ value is too large++	mov	r0, r0, lsl #7+	orr	r0, r0, #0x80000000+	mov	r2, r2, lsr #24+	rsb	r2, r2, #(127 + 31)+	tst	r1, #0x80000000		@ the sign bit+	mov	r0, r0, lsr r2+	rsbne	r0, r0, #0+	RET++1:	teq	r2, #0xff000000+	bne	2f+	movs	r0, r0, lsl #8+	bne	3f			@ r0 is NAN.+2:	ands	r0, r1, #0x80000000	@ the sign bit+	moveq	r0, #0x7fffffff		@ the maximum signed positive si+	RET++3:	mov	r0, #0			@ What should we convert NAN to?+	RET++	FUNC_END fixsfsi++#endif /* L_fixsfsi */++#ifdef L_fixunssfsi++ARM_FUNC_START fixunssfsi+	movs	r0, r0, lsl #1+	movcss	r0, #0			@ value is negative...+	RETc(eq)			@ ... or 0.+++	@ check exponent range.+	and	r2, r0, #0xff000000+	cmp	r2, #(127 << 24)+	movcc	r0, #0			@ value is too small+	RETc(cc)+	cmp	r2, #((127 + 32) << 24)+	bcs	1f			@ value is too large++	mov	r0, r0, lsl #7+	orr	r0, r0, #0x80000000+	mov	r2, r2, lsr #24+	rsb	r2, r2, #(127 + 31)+	mov	r0, r0, lsr r2+	RET++1:	teq	r2, #0xff000000+	bne	2f+	movs	r0, r0, lsl #8+	bne	3f			@ r0 is NAN.+2:	mov	r0, #0xffffffff		@ maximum unsigned si+	RET++3:	mov	r0, #0			@ What should we convert NAN to?+	RET++	FUNC_END fixunssfsi++#endif /* L_fixunssfsi */diff -urN gcc-3.3/gcc/config/arm/lib1funcs.asm gcc-3.3-vfp/gcc/config/arm/lib1funcs.asm--- gcc-3.3/gcc/config/arm/lib1funcs.asm	Tue Sep 18 06:02:37 2001+++ gcc-3.3-vfp/gcc/config/arm/lib1funcs.asm	Mon Sep  8 12:59:27 2003@@ -51,74 +51,117 @@ #endif #define TYPE(x) .type SYM(x),function #define SIZE(x) .size SYM(x), . - SYM(x)+#define LSYM(x) .x #else #define __PLT__ #define TYPE(x) #define SIZE(x)+#define LSYM(x) x #endif  /* Function end macros.  Variants for 26 bit APCS and interworking.  */ +@ This selects the minimum architecture level required.+#define __ARM_ARCH__ 3++#if defined(__ARM_ARCH_3M__) || defined(__ARM_ARCH_4__) \+	|| defined(__ARM_ARCH_4T__)+/* We use __ARM_ARCH__ set to 4 here, but in reality it's any processor with+   long multiply instructions.  That includes v3M.  */+# undef __ARM_ARCH__+# define __ARM_ARCH__ 4+#endif+	+#if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \+	|| defined(__ARM_ARCH_5TE__)+# undef __ARM_ARCH__+# define __ARM_ARCH__ 5+#endif++/* How to return from a function call depends on the architecture variant.  */+ #ifdef __APCS_26__+ # define RET		movs	pc, lr # define RETc(x)	mov##x##s	pc, lr-# define RETCOND 	^++#elif (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)++# define RET		bx	lr+# define RETc(x)	bx##x	lr++# if (__ARM_ARCH__ == 4) \+	&& (defined(__thumb__) || defined(__THUMB_INTERWORK__))+#  define __INTERWORKING__+# endif++#else++# define RET		mov	pc, lr+# define RETc(x)	mov##x	pc, lr++#endif++/* Don't pass dirn, it's there just to get token pasting right.  */++.macro	RETLDM	regs=, cond=, dirn=ia+#ifdef __APCS_26__+	.ifc "\regs",""+	ldm\cond\dirn	sp!, {pc}^+	.else+	ldm\cond\dirn	sp!, {\regs, pc}^+	.endif+#elif defined (__INTERWORKING__)+	.ifc "\regs",""+	ldr\cond	lr, [sp], #4+	.else+	ldm\cond\dirn	sp!, {\regs, lr}+	.endif+	bx\cond	lr+#else+	.ifc "\regs",""+	ldr\cond	pc, [sp], #4+	.else+	ldm\cond\dirn	sp!, {\regs, pc}+	.endif+#endif+.endm++ .macro ARM_LDIV0-Ldiv0:+LSYM(Ldiv0): 	str	lr, [sp, #-4]! 	bl	SYM (__div0) __PLT__ 	mov	r0, #0			@ About as wrong as it could be.-	ldmia	sp!, {pc}^+	RETLDM .endm-#else-# ifdef __THUMB_INTERWORK__-#  define RET		bx	lr-#  define RETc(x)	bx##x	lr++ .macro THUMB_LDIV0-Ldiv0:+LSYM(Ldiv0): 	push	{ lr } 	bl	SYM (__div0) 	mov	r0, #0			@ About as wrong as it could be.+#if defined (__INTERWORKING__) 	pop	{ r1 } 	bx	r1-.endm-.macro ARM_LDIV0-Ldiv0:-	str	lr, [sp, #-4]!-	bl	SYM (__div0) __PLT__-	mov	r0, #0			@ About as wrong as it could be.-	ldr	lr, [sp], #4-	bx	lr-.endm	-# else-#  define RET		mov	pc, lr-#  define RETc(x)	mov##x	pc, lr-.macro THUMB_LDIV0-Ldiv0:-	push	{ lr }-	bl	SYM (__div0)-	mov	r0, #0			@ About as wrong as it could be.+#else 	pop	{ pc }-.endm-.macro ARM_LDIV0-Ldiv0:-	str	lr, [sp, #-4]!-	bl	SYM (__div0) __PLT__-	mov	r0, #0			@ About as wrong as it could be.-	ldmia	sp!, {pc}-.endm	-# endif-# define RETCOND #endif+.endm  .macro FUNC_END name-Ldiv0:+	SIZE (__\name)+.endm++.macro DIV_FUNC_END name+LSYM(Ldiv0): #ifdef __thumb__ 	THUMB_LDIV0 #else 	ARM_LDIV0 #endif-	SIZE (__\name)	+	FUNC_END \name .endm  .macro THUMB_FUNC_START name@@ -147,7 +190,24 @@

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -