⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gcc-3.3.2-arm-softfloat.patch

📁 armlinux的交叉编译链,适合redhat9.0 嵌入式
💻 PATCH
📖 第 1 页 / 共 5 页
字号:
## Submitted:## Robert Schwebel <r.schwebel@pengutronix.de>, 2004-01-28## Error:## gcc-3.3.2 doesn't seem to have soft float support, so linking against # libgcc.a doesn't work when compiled with -msoft-float## Description:## Nicholas Pitre released this patch here: # http://lists.arm.linux.org.uk/pipermail/linux-arm/2003-October/006436.html## The patch had to be extended by the first two hunks below, otherwhise# the compiler claimed a mixup of old and new style FPU options while# compiling U-Boot. # # State:## unknown# [dank: See also patches/glibc-2.3.2/glibc-vfp.patch]diff -urN gcc-3.3.2/gcc/config/arm/elf.h gcc-3.3.2-ptx/gcc/config/arm/elf.h--- gcc-3.3.2/gcc/config/arm/elf.h	2002-11-21 22:29:24.000000000 +0100+++ gcc-3.3.2-ptx/gcc/config/arm/elf.h	2004-01-31 12:27:28.000000000 +0100@@ -46,7 +46,7 @@  #ifndef SUBTARGET_ASM_FLOAT_SPEC #define SUBTARGET_ASM_FLOAT_SPEC "\-%{mapcs-float:-mfloat} %{msoft-float:-mno-fpu}"+%{mapcs-float:-mfloat} %{msoft-float:-mfpu=softfpa -mfpu=softvfp}" #endif  #ifndef ASM_SPECdiff -urN gcc-3.3.2/gcc/config/arm/xscale-elf.h gcc-3.3.2-ptx/gcc/config/arm/xscale-elf.h--- gcc-3.3.2/gcc/config/arm/xscale-elf.h	2002-05-20 19:07:04.000000000 +0200+++ gcc-3.3.2-ptx/gcc/config/arm/xscale-elf.h	2004-01-31 12:28:50.000000000 +0100@@ -28,7 +28,7 @@ #define SUBTARGET_CPU_DEFAULT 		TARGET_CPU_xscale #endif -#define SUBTARGET_EXTRA_ASM_SPEC "%{!mcpu=*:-mcpu=xscale} %{!mhard-float:-mno-fpu}"+#define SUBTARGET_EXTRA_ASM_SPEC "%{!mcpu=*:-mcpu=xscale} %{!mhard-float:-mfpu=softfpa -mfpu=softvfp}"  #ifndef MULTILIB_DEFAULTS #define MULTILIB_DEFAULTS \diff -urN gcc-3.3/gcc/config/arm/linux-elf.h gcc-3.3-vfp/gcc/config/arm/linux-elf.h--- gcc-3.3/gcc/config/arm/linux-elf.h	Tue Dec 10 05:55:31 2002+++ gcc-3.3-vfp/gcc/config/arm/linux-elf.h	Mon Sep  8 12:57:46 2003@@ -30,15 +30,31 @@ /* Do not assume anything about header files.  */ #define NO_IMPLICIT_EXTERN_C -/* Default is to use APCS-32 mode.  */+/*+ * Default is to use APCS-32 mode with soft-vfp.+ * The old Linux default for floats can be achieved with -mhard-float+ * or with the configure --with-float=hard option.+ * If -msoft-float or --with-float=soft is used then software float + * support will be used just like the default but with the legacy+ * big endian word ordering for double float representation instead.+ */+ #undef  TARGET_DEFAULT-#define TARGET_DEFAULT (ARM_FLAG_APCS_32 | ARM_FLAG_MMU_TRAPS)+#define TARGET_DEFAULT \+	( ARM_FLAG_APCS_32 | \+	  ARM_FLAG_SOFT_FLOAT | ARM_FLAG_VFP | \+	  ARM_FLAG_MMU_TRAPS )++#undef  SUBTARGET_EXTRA_ASM_SPEC+#define SUBTARGET_EXTRA_ASM_SPEC "%{!mcpu=*:-mcpu=xscale} \+  %{mhard-float:-mfpu=fpa} \+  %{!mhard-float: %{msoft-float:-mfpu=softfpa -mfpu=softvfp}}"  #define SUBTARGET_EXTRA_LINK_SPEC " -m armelf_linux -p"  #undef  MULTILIB_DEFAULTS #define MULTILIB_DEFAULTS \-	{ "marm", "mlittle-endian", "mhard-float", "mapcs-32", "mno-thumb-interwork" }+	{ "marm", "mlittle-endian", "mapcs-32", "mno-thumb-interwork" }  #define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__" diff -urN gcc-3.3/gcc/config/arm/t-linux gcc-3.3-vfp/gcc/config/arm/t-linux--- gcc-3.3/gcc/config/arm/t-linux	Wed May 16 23:15:49 2001+++ gcc-3.3-vfp/gcc/config/arm/t-linux	Mon Sep  8 12:57:46 2003@@ -7,7 +7,10 @@ ENQUIRE=  LIB1ASMSRC = arm/lib1funcs.asm-LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_lnx+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_lnx \+	_negdf2 _addsubdf3 _muldivdf3 _cmpdf2 _unorddf2 _fixdfsi _fixunsdfsi \+	_truncdfsf2 _negsf2 _addsubsf3 _muldivsf3 _cmpsf2 _unordsf2 \+	_fixsfsi _fixunssfsi  # MULTILIB_OPTIONS = mhard-float/msoft-float # MULTILIB_DIRNAMES = hard-float soft-floatdiff -urN gcc-3.3/gcc/config/arm/ieee754-df.S gcc-3.3-vfp/gcc/config/arm/ieee754-df.S--- gcc-3.3/gcc/config/arm/ieee754-df.S	Wed Dec 31 19:00:00 1969+++ gcc-3.3-vfp/gcc/config/arm/ieee754-df.S	Mon Sep  8 12:57:46 2003@@ -0,0 +1,1224 @@+/* ieee754-df.S double-precision floating point support for ARM++   Copyright (C) 2003  Free Software Foundation, Inc.+   Contributed by Nicolas Pitre (nico@cam.org)++   This file is free software; you can redistribute it and/or modify it+   under the terms of the GNU General Public License as published by the+   Free Software Foundation; either version 2, or (at your option) any+   later version.++   In addition to the permissions in the GNU General Public License, the+   Free Software Foundation gives you unlimited permission to link the+   compiled version of this file into combinations with other programs,+   and to distribute those combinations without any restriction coming+   from the use of this file.  (The General Public License restrictions+   do apply in other respects; for example, they cover modification of+   the file, and distribution when not linked into a combine+   executable.)++   This file is distributed in the hope that it will be useful, but+   WITHOUT ANY WARRANTY; without even the implied warranty of+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU+   General Public License for more details.++   You should have received a copy of the GNU General Public License+   along with this program; see the file COPYING.  If not, write to+   the Free Software Foundation, 59 Temple Place - Suite 330,+   Boston, MA 02111-1307, USA.  */++/*+ * Notes: + * + * The goal of this code is to be as fast as possible.  This is+ * not meant to be easy to understand for the casual reader.+ * For slightly simpler code please see the single precision version+ * of this file.+ * + * Only the default rounding mode is intended for best performances.+ * Exceptions aren't supported yet, but that can be added quite easily+ * if necessary without impacting performances.+ */+++@ For FPA, float words are always big-endian.+@ For VFP, floats words follow the memory system mode.+#if defined(__VFP_FP__) && !defined(__ARMEB__)+#define xl r0+#define xh r1+#define yl r2+#define yh r3+#else+#define xh r0+#define xl r1+#define yh r2+#define yl r3+#endif+++#ifdef L_negdf2++ARM_FUNC_START negdf2+	@ flip sign bit+	eor	xh, xh, #0x80000000+	RET++	FUNC_END negdf2++#endif++#ifdef L_addsubdf3++ARM_FUNC_START subdf3+	@ flip sign bit of second arg+	eor	yh, yh, #0x80000000+#if defined(__thumb__) && !defined(__THUMB_INTERWORK__)+	b	1f			@ Skip Thumb-code prologue+#endif++ARM_FUNC_START adddf3++1:	@ Compare both args, return zero if equal but the sign.+	teq	xl, yl+	eoreq	ip, xh, yh+	teqeq	ip, #0x80000000+	beq	LSYM(Lad_z)++	@ If first arg is 0 or -0, return second arg.+	@ If second arg is 0 or -0, return first arg.+	orrs	ip, xl, xh, lsl #1+	moveq	xl, yl+	moveq	xh, yh+	orrnes	ip, yl, yh, lsl #1+	RETc(eq)++	stmfd	sp!, {r4, r5, lr}++	@ Mask out exponents.+	mov	ip, #0x7f000000+	orr	ip, ip, #0x00f00000+	and	r4, xh, ip+	and	r5, yh, ip++	@ If either of them is 0x7ff, result will be INF or NAN+	teq	r4, ip+	teqne	r5, ip+	beq	LSYM(Lad_i)++	@ Compute exponent difference.  Make largest exponent in r4,+	@ corresponding arg in xh-xl, and positive exponent difference in r5.+	subs	r5, r5, r4+	rsblt	r5, r5, #0+	ble	1f+	add	r4, r4, r5+	eor	yl, xl, yl+	eor	yh, xh, yh+	eor	xl, yl, xl+	eor	xh, yh, xh+	eor	yl, xl, yl+	eor	yh, xh, yh+1:++	@ If exponent difference is too large, return largest argument+	@ already in xh-xl.  We need up to 54 bit to handle proper rounding+	@ of 0x1p54 - 1.1.+	cmp	r5, #(54 << 20)+	RETLDM	"r4, r5" hi++	@ Convert mantissa to signed integer.+	tst	xh, #0x80000000+	bic	xh, xh, ip, lsl #1+	orr	xh, xh, #0x00100000+	beq	1f+	rsbs	xl, xl, #0+	rsc	xh, xh, #0+1:+	tst	yh, #0x80000000+	bic	yh, yh, ip, lsl #1+	orr	yh, yh, #0x00100000+	beq	1f+	rsbs	yl, yl, #0+	rsc	yh, yh, #0+1:+	@ If exponent == difference, one or both args were denormalized.+	@ Since this is not common case, rescale them off line.+	teq	r4, r5+	beq	LSYM(Lad_d)+LSYM(Lad_x):+	@ Scale down second arg with exponent difference.+	@ Apply shift one bit left to first arg and the rest to second arg+	@ to simplify things later, but only if exponent does not become 0.+	mov	ip, #0+	movs	r5, r5, lsr #20+	beq	3f+	teq	r4, #(1 << 20)+	beq	1f+	movs	xl, xl, lsl #1+	adc	xh, ip, xh, lsl #1+	sub	r4, r4, #(1 << 20)+	subs	r5, r5, #1+	beq	3f++	@ Shift yh-yl right per r5, keep leftover bits into ip.+1:	rsbs	lr, r5, #32+	blt	2f+	mov	ip, yl, lsl lr+	mov	yl, yl, lsr r5+	orr	yl, yl, yh, lsl lr+	mov	yh, yh, asr r5+	b	3f+2:	sub	r5, r5, #32+	add	lr, lr, #32+	cmp	yl, #1+	adc	ip, ip, yh, lsl lr+	mov	yl, yh, asr r5+	mov	yh, yh, asr #32+3:+	@ the actual addition+	adds	xl, xl, yl+	adc	xh, xh, yh++	@ We now have a result in xh-xl-ip.+	@ Keep absolute value in xh-xl-ip, sign in r5.+	ands	r5, xh, #0x80000000+	bpl	LSYM(Lad_p)+	rsbs	ip, ip, #0+	rscs	xl, xl, #0+	rsc	xh, xh, #0++	@ Determine how to normalize the result.+LSYM(Lad_p):+	cmp	xh, #0x00100000+	bcc	LSYM(Lad_l)+	cmp	xh, #0x00200000+	bcc	LSYM(Lad_r0)+	cmp	xh, #0x00400000+	bcc	LSYM(Lad_r1)++	@ Result needs to be shifted right.+	movs	xh, xh, lsr #1+	movs	xl, xl, rrx+	movs	ip, ip, rrx+	orrcs	ip, ip, #1+	add	r4, r4, #(1 << 20)+LSYM(Lad_r1):+	movs	xh, xh, lsr #1+	movs	xl, xl, rrx+	movs	ip, ip, rrx+	orrcs	ip, ip, #1+	add	r4, r4, #(1 << 20)++	@ Our result is now properly aligned into xh-xl, remaining bits in ip.+	@ Round with MSB of ip. If halfway between two numbers, round towards+	@ LSB of xl = 0.+LSYM(Lad_r0):+	adds	xl, xl, ip, lsr #31+	adc	xh, xh, #0+	teq	ip, #0x80000000+	biceq	xl, xl, #1++	@ One extreme rounding case may add a new MSB.  Adjust exponent.+	@ That MSB will be cleared when exponent is merged below. +	tst	xh, #0x00200000+	addne	r4, r4, #(1 << 20)++	@ Make sure we did not bust our exponent.+	adds	ip, r4, #(1 << 20)+	bmi	LSYM(Lad_o)++	@ Pack final result together.+LSYM(Lad_e):+	bic	xh, xh, #0x00300000+	orr	xh, xh, r4+	orr	xh, xh, r5+	RETLDM	"r4, r5"++LSYM(Lad_l):+	@ Result must be shifted left and exponent adjusted.+	@ No rounding necessary since ip will always be 0.+#if __ARM_ARCH__ < 5++	teq	xh, #0+	movne	r3, #-11+	moveq	r3, #21+	moveq	xh, xl+	moveq	xl, #0+	mov	r2, xh+	movs	ip, xh, lsr #16+	moveq	r2, r2, lsl #16+	addeq	r3, r3, #16+	tst	r2, #0xff000000+	moveq	r2, r2, lsl #8+	addeq	r3, r3, #8+	tst	r2, #0xf0000000+	moveq	r2, r2, lsl #4+	addeq	r3, r3, #4+	tst	r2, #0xc0000000+	moveq	r2, r2, lsl #2+	addeq	r3, r3, #2+	tst	r2, #0x80000000+	addeq	r3, r3, #1++#else++	teq	xh, #0+	moveq	xh, xl+	moveq	xl, #0+	clz	r3, xh+	addeq	r3, r3, #32+	sub	r3, r3, #11++#endif++	@ determine how to shift the value.+	subs	r2, r3, #32+	bge	2f+	adds	r2, r2, #12+	ble	1f++	@ shift value left 21 to 31 bits, or actually right 11 to 1 bits+	@ since a register switch happened above.+	add	ip, r2, #20+	rsb	r2, r2, #12+	mov	xl, xh, lsl ip+	mov	xh, xh, lsr r2+	b	3f++	@ actually shift value left 1 to 20 bits, which might also represent+	@ 32 to 52 bits if counting the register switch that happened earlier.+1:	add	r2, r2, #20+2:	rsble	ip, r2, #32+	mov	xh, xh, lsl r2+	orrle	xh, xh, xl, lsr ip+	movle	xl, xl, lsl r2++	@ adjust exponent accordingly.+3:	subs	r4, r4, r3, lsl #20+	bgt	LSYM(Lad_e)++	@ Exponent too small, denormalize result.+	@ Find out proper shift value.+	mvn	r4, r4, asr #20+	subs	r4, r4, #30+	bge	2f+	adds	r4, r4, #12+	bgt	1f++	@ shift result right of 1 to 20 bits, sign is in r5.+	add	r4, r4, #20+	rsb	r2, r4, #32+	mov	xl, xl, lsr r4+	orr	xl, xl, xh, lsl r2+	orr	xh, r5, xh, lsr r4+	RETLDM	"r4, r5"++	@ shift result right of 21 to 31 bits, or left 11 to 1 bits after+	@ a register switch from xh to xl.+1:	rsb	r4, r4, #12+	rsb	r2, r4, #32+	mov	xl, xl, lsr r2+	orr	xl, xl, xh, lsl r4+	mov	xh, r5+	RETLDM	"r4, r5"++	@ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch+	@ from xh to xl.+2:	mov	xl, xh, lsr r4+	mov	xh, r5+	RETLDM	"r4, r5"++	@ Adjust exponents for denormalized arguments.+LSYM(Lad_d):+	teq	r4, #0+	eoreq	xh, xh, #0x00100000+	addeq	r4, r4, #(1 << 20)+	eor	yh, yh, #0x00100000+	subne	r5, r5, #(1 << 20)+	b	LSYM(Lad_x)++	@ Result is x - x = 0, unless x = INF or NAN.+LSYM(Lad_z):+	sub	ip, ip, #0x00100000	@ ip becomes 0x7ff00000+	and	r2, xh, ip+	teq	r2, ip+	orreq	xh, ip, #0x00080000+	movne	xh, #0+	mov	xl, #0+	RET++	@ Overflow: return INF.+LSYM(Lad_o):+	orr	xh, r5, #0x7f000000+	orr	xh, xh, #0x00f00000+	mov	xl, #0+	RETLDM	"r4, r5"++	@ At least one of x or y is INF/NAN.+	@   if xh-xl != INF/NAN: return yh-yl (which is INF/NAN)+	@   if yh-yl != INF/NAN: return xh-xl (which is INF/NAN)+	@   if either is NAN: return NAN+	@   if opposite sign: return NAN+	@   return xh-xl (which is INF or -INF)+LSYM(Lad_i):+	teq	r4, ip+	movne	xh, yh+	movne	xl, yl+	teqeq	r5, ip+	RETLDM	"r4, r5" ne++	orrs	r4, xl, xh, lsl #12+	orreqs	r4, yl, yh, lsl #12+	teqeq	xh, yh+	orrne	xh, r5, #0x00080000+	movne	xl, #0+	RETLDM	"r4, r5"++	FUNC_END subdf3+	FUNC_END adddf3++ARM_FUNC_START floatunsidf+	teq	r0, #0+	moveq	r1, #0+	RETc(eq)+	stmfd	sp!, {r4, r5, lr}+	mov	r4, #(0x400 << 20)	@ initial exponent+	add	r4, r4, #((52-1) << 20)+	mov	r5, #0			@ sign bit is 0+	mov	xl, r0+	mov	xh, #0+	b	LSYM(Lad_l)++	FUNC_END floatunsidf++ARM_FUNC_START floatsidf+	teq	r0, #0+	moveq	r1, #0+	RETc(eq)+	stmfd	sp!, {r4, r5, lr}+	mov	r4, #(0x400 << 20)	@ initial exponent+	add	r4, r4, #((52-1) << 20)+	ands	r5, r0, #0x80000000	@ sign bit in r5+	rsbmi	r0, r0, #0		@ absolute value+	mov	xl, r0+	mov	xh, #0+	b	LSYM(Lad_l)++	FUNC_END floatsidf++ARM_FUNC_START extendsfdf2+	movs	r2, r0, lsl #1+	beq	1f			@ value is 0.0 or -0.0+	mov	xh, r2, asr #3		@ stretch exponent+	mov	xh, xh, rrx		@ retrieve sign bit+	mov	xl, r2, lsl #28		@ retrieve remaining bits+	ands	r2, r2, #0xff000000	@ isolate exponent+	beq	2f			@ exponent was 0 but not mantissa+	teq	r2, #0xff000000		@ check if INF or NAN+	eorne	xh, xh, #0x38000000	@ fixup exponent otherwise.+	RET++1:	mov	xh, r0+	mov	xl, #0+	RET++2:	@ value was denormalized.  We can normalize it now.+	stmfd	sp!, {r4, r5, lr}+	mov	r4, #(0x380 << 20)	@ setup corresponding exponent+	add	r4, r4, #(1 << 20)+	and	r5, xh, #0x80000000	@ move sign bit in r5+	bic	xh, xh, #0x80000000+	b	LSYM(Lad_l)++	FUNC_END extendsfdf2++#endif /* L_addsubdf3 */++#ifdef L_muldivdf3++ARM_FUNC_START muldf3++	stmfd	sp!, {r4, r5, r6, lr}++	@ Mask out exponents.+	mov	ip, #0x7f000000+	orr	ip, ip, #0x00f00000+	and	r4, xh, ip+	and	r5, yh, ip++	@ Trap any INF/NAN.+	teq	r4, ip+	teqne	r5, ip+	beq	LSYM(Lml_s)++	@ Trap any multiplication by 0.+	orrs	r6, xl, xh, lsl #1+	orrnes	r6, yl, yh, lsl #1+	beq	LSYM(Lml_z)++	@ Shift exponents right one bit to make room for overflow bit.+	@ If either of them is 0, scale denormalized arguments off line.+	@ Then add both exponents together.+	movs	r4, r4, lsr #1+	teqne	r5, #0+	beq	LSYM(Lml_d)+LSYM(Lml_x):+	add	r4, r4, r5, asr #1++	@ Preserve final sign in r4 along with exponent for now.+	teq	xh, yh+	orrmi	r4, r4, #0x8000++	@ Convert mantissa to unsigned integer.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -