⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lib1funcs.asm

📁 Mac OS X 10.4.9 for x86 Source Code gcc 实现源代码
💻 ASM
📖 第 1 页 / 共 2 页
字号:
@ libgcc routines for ARM cpu.@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005   Free Software Foundation, Inc.This file is free software; you can redistribute it and/or modify itunder the terms of the GNU General Public License as published by theFree Software Foundation; either version 2, or (at your option) anylater version.In addition to the permissions in the GNU General Public License, theFree Software Foundation gives you unlimited permission to link thecompiled version of this file into combinations with other programs,and to distribute those combinations without any restriction comingfrom the use of this file.  (The General Public License restrictionsdo apply in other respects; for example, they cover modification ofthe file, and distribution when not linked into a combineexecutable.)This file is distributed in the hope that it will be useful, butWITHOUT ANY WARRANTY; without even the implied warranty ofMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNUGeneral Public License for more details.You should have received a copy of the GNU General Public Licensealong with this program; see the file COPYING.  If not, write tothe Free Software Foundation, 59 Temple Place - Suite 330,Boston, MA 02111-1307, USA.  *//* ------------------------------------------------------------------------ *//* We need to know what prefix to add to function names.  */#ifndef __USER_LABEL_PREFIX__#error  __USER_LABEL_PREFIX__ not defined#endif/* ANSI concatenation macros.  */#define CONCAT1(a, b) CONCAT2(a, b)#define CONCAT2(a, b) a ## b/* Use the right prefix for global labels.  */#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)#ifdef __ELF__#ifdef __thumb__#define __PLT__  /* Not supported in Thumb assembler (for now).  */#else#define __PLT__ (PLT)#endif#define TYPE(x) .type SYM(x),function#define SIZE(x) .size SYM(x), . - SYM(x)#define LSYM(x) .x#else#define __PLT__#define TYPE(x)#define SIZE(x)#define LSYM(x) x#endif/* Function end macros.  Variants for interworking.  */@ This selects the minimum architecture level required.#define __ARM_ARCH__ 3#if defined(__ARM_ARCH_3M__) || defined(__ARM_ARCH_4__) \	|| defined(__ARM_ARCH_4T__)/* We use __ARM_ARCH__ set to 4 here, but in reality it's any processor with   long multiply instructions.  That includes v3M.  */# undef __ARM_ARCH__# define __ARM_ARCH__ 4#endif	#if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \	|| defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \	|| defined(__ARM_ARCH_5TEJ__)# undef __ARM_ARCH__# define __ARM_ARCH__ 5#endif#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \	|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \	|| defined(__ARM_ARCH_6ZK__)# undef __ARM_ARCH__# define __ARM_ARCH__ 6#endif/* How to return from a function call depends on the architecture variant.  */#if (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)# define RET		bx	lr# define RETc(x)	bx##x	lr/* Special precautions for interworking on armv4t.  */# if (__ARM_ARCH__ == 4)/* Always use bx, not ldr pc.  */#  if (defined(__thumb__) || defined(__THUMB_INTERWORK__))#    define __INTERWORKING__#   endif /* __THUMB__ || __THUMB_INTERWORK__ *//* Include thumb stub before arm mode code.  */#  if defined(__thumb__) && !defined(__THUMB_INTERWORK__)#   define __INTERWORKING_STUBS__#  endif /* __thumb__ && !__THUMB_INTERWORK__ */#endif /* __ARM_ARCH == 4 */#else# define RET		mov	pc, lr# define RETc(x)	mov##x	pc, lr#endif/* Don't pass dirn, it's there just to get token pasting right.  */.macro	RETLDM	regs=, cond=, dirn=ia#if defined (__INTERWORKING__)	.ifc "\regs",""	ldr\cond	lr, [sp], #4	.else	ldm\cond\dirn	sp!, {\regs, lr}	.endif	bx\cond	lr#else	.ifc "\regs",""	ldr\cond	pc, [sp], #4	.else	ldm\cond\dirn	sp!, {\regs, pc}	.endif#endif.endm.macro ARM_LDIV0LSYM(Ldiv0):	str	lr, [sp, #-4]!	bl	SYM (__div0) __PLT__	mov	r0, #0			@ About as wrong as it could be.	RETLDM.endm.macro THUMB_LDIV0LSYM(Ldiv0):	push	{ lr }	bl	SYM (__div0)	mov	r0, #0			@ About as wrong as it could be.#if defined (__INTERWORKING__)	pop	{ r1 }	bx	r1#else	pop	{ pc }#endif.endm.macro FUNC_END name	SIZE (__\name).endm.macro DIV_FUNC_END nameLSYM(Ldiv0):#ifdef __thumb__	THUMB_LDIV0#else	ARM_LDIV0#endif	FUNC_END \name.endm.macro THUMB_FUNC_START name	.globl	SYM (\name)	TYPE	(\name)	.thumb_funcSYM (\name):.endm/* Function start macros.  Variants for ARM and Thumb.  */#ifdef __thumb__#define THUMB_FUNC .thumb_func#define THUMB_CODE .force_thumb#else#define THUMB_FUNC#define THUMB_CODE#endif	.macro FUNC_START name	.text	.globl SYM (__\name)	TYPE (__\name)	.align 0	THUMB_CODE	THUMB_FUNCSYM (__\name):.endm/* Special function that will always be coded in ARM assembly, even if   in Thumb-only compilation.  */#if defined(__INTERWORKING_STUBS__).macro	ARM_FUNC_START name	FUNC_START \name	bx	pc	nop	.arm/* A hook to tell gdb that we've switched to ARM mode.  Also used to call   directly from other local arm routines.  */_L__\name:		.endm#define EQUIV .thumb_set/* Branch directly to a function declared with ARM_FUNC_START.   Must be called in arm mode.  */.macro  ARM_CALL name	bl	_L__\name.endm#else.macro	ARM_FUNC_START name	.text	.globl SYM (__\name)	TYPE (__\name)	.align 0	.armSYM (__\name):.endm#define EQUIV .set.macro  ARM_CALL name	bl	__\name.endm#endif.macro	FUNC_ALIAS new old	.globl	SYM (__\new)#if defined (__thumb__)	.thumb_set	SYM (__\new), SYM (__\old)#else	.set	SYM (__\new), SYM (__\old)#endif.endm.macro	ARM_FUNC_ALIAS new old	.globl	SYM (__\new)	EQUIV	SYM (__\new), SYM (__\old)#if defined(__INTERWORKING_STUBS__)	.set	SYM (_L__\new), SYM (_L__\old)#endif.endm#ifdef __thumb__/* Register aliases.  */work		.req	r4	@ XXXX is this safe ?dividend	.req	r0divisor		.req	r1overdone	.req	r2result		.req	r2curbit		.req	r3#endif#if 0ip		.req	r12sp		.req	r13lr		.req	r14pc		.req	r15#endif/* ------------------------------------------------------------------------ *//*		Bodies of the division and modulo routines.		    *//* ------------------------------------------------------------------------ */	.macro ARM_DIV_BODY dividend, divisor, result, curbit#if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)	clz	\curbit, \dividend	clz	\result, \divisor	sub	\curbit, \result, \curbit	rsbs	\curbit, \curbit, #31	addne	\curbit, \curbit, \curbit, lsl #1	mov	\result, #0	addne	pc, pc, \curbit, lsl #2	nop	.set	shift, 32	.rept	32	.set	shift, shift - 1	cmp	\dividend, \divisor, lsl #shift	adc	\result, \result, \result	subcs	\dividend, \dividend, \divisor, lsl #shift	.endr#else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */#if __ARM_ARCH__ >= 5	clz	\curbit, \divisor	clz	\result, \dividend	sub	\result, \curbit, \result	mov	\curbit, #1	mov	\divisor, \divisor, lsl \result	mov	\curbit, \curbit, lsl \result	mov	\result, #0	#else /* __ARM_ARCH__ < 5 */	@ Initially shift the divisor left 3 bits if possible,	@ set curbit accordingly.  This allows for curbit to be located	@ at the left end of each 4 bit nibbles in the division loop	@ to save one loop in most cases.	tst	\divisor, #0xe0000000	moveq	\divisor, \divisor, lsl #3	moveq	\curbit, #8	movne	\curbit, #1	@ Unless the divisor is very big, shift it up in multiples of	@ four bits, since this is the amount of unwinding in the main	@ division loop.  Continue shifting until the divisor is 	@ larger than the dividend.1:	cmp	\divisor, #0x10000000	cmplo	\divisor, \dividend	movlo	\divisor, \divisor, lsl #4	movlo	\curbit, \curbit, lsl #4	blo	1b	@ For very big divisors, we must shift it a bit at a time, or	@ we will be in danger of overflowing.1:	cmp	\divisor, #0x80000000	cmplo	\divisor, \dividend	movlo	\divisor, \divisor, lsl #1	movlo	\curbit, \curbit, lsl #1	blo	1b	mov	\result, #0#endif /* __ARM_ARCH__ < 5 */	@ Division loop1:	cmp	\dividend, \divisor	subhs	\dividend, \dividend, \divisor	orrhs	\result,   \result,   \curbit	cmp	\dividend, \divisor,  lsr #1	subhs	\dividend, \dividend, \divisor, lsr #1	orrhs	\result,   \result,   \curbit,  lsr #1	cmp	\dividend, \divisor,  lsr #2	subhs	\dividend, \dividend, \divisor, lsr #2	orrhs	\result,   \result,   \curbit,  lsr #2	cmp	\dividend, \divisor,  lsr #3	subhs	\dividend, \dividend, \divisor, lsr #3	orrhs	\result,   \result,   \curbit,  lsr #3	cmp	\dividend, #0			@ Early termination?	movnes	\curbit,   \curbit,  lsr #4	@ No, any more bits to do?	movne	\divisor,  \divisor, lsr #4	bne	1b#endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */.endm/* ------------------------------------------------------------------------ */	.macro ARM_DIV2_ORDER divisor, order#if __ARM_ARCH__ >= 5	clz	\order, \divisor	rsb	\order, \order, #31#else	cmp	\divisor, #(1 << 16)	movhs	\divisor, \divisor, lsr #16	movhs	\order, #16	movlo	\order, #0	cmp	\divisor, #(1 << 8)	movhs	\divisor, \divisor, lsr #8	addhs	\order, \order, #8	cmp	\divisor, #(1 << 4)	movhs	\divisor, \divisor, lsr #4	addhs	\order, \order, #4	cmp	\divisor, #(1 << 2)	addhi	\order, \order, #3	addls	\order, \order, \divisor, lsr #1#endif.endm/* ------------------------------------------------------------------------ */.macro ARM_MOD_BODY dividend, divisor, order, spare#if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)	clz	\order, \divisor	clz	\spare, \dividend	sub	\order, \order, \spare	rsbs	\order, \order, #31	addne	pc, pc, \order, lsl #3	nop	.set	shift, 32	.rept	32	.set	shift, shift - 1	cmp	\dividend, \divisor, lsl #shift	subcs	\dividend, \dividend, \divisor, lsl #shift	.endr#else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */#if __ARM_ARCH__ >= 5	clz	\order, \divisor	clz	\spare, \dividend	sub	\order, \order, \spare	mov	\divisor, \divisor, lsl \order	#else /* __ARM_ARCH__ < 5 */	mov	\order, #0	@ Unless the divisor is very big, shift it up in multiples of	@ four bits, since this is the amount of unwinding in the main	@ division loop.  Continue shifting until the divisor is 	@ larger than the dividend.1:	cmp	\divisor, #0x10000000	cmplo	\divisor, \dividend	movlo	\divisor, \divisor, lsl #4	addlo	\order, \order, #4	blo	1b	@ For very big divisors, we must shift it a bit at a time, or	@ we will be in danger of overflowing.1:	cmp	\divisor, #0x80000000	cmplo	\divisor, \dividend	movlo	\divisor, \divisor, lsl #1	addlo	\order, \order, #1	blo	1b#endif /* __ARM_ARCH__ < 5 */	@ Perform all needed substractions to keep only the reminder.	@ Do comparisons in batch of 4 first.	subs	\order, \order, #3		@ yes, 3 is intended here	blt	2f1:	cmp	\dividend, \divisor	subhs	\dividend, \dividend, \divisor	cmp	\dividend, \divisor,  lsr #1	subhs	\dividend, \dividend, \divisor, lsr #1	cmp	\dividend, \divisor,  lsr #2	subhs	\dividend, \dividend, \divisor, lsr #2	cmp	\dividend, \divisor,  lsr #3	subhs	\dividend, \dividend, \divisor, lsr #3	cmp	\dividend, #1	mov	\divisor, \divisor, lsr #4	subges	\order, \order, #4	bge	1b	tst	\order, #3	teqne	\dividend, #0	beq	5f	@ Either 1, 2 or 3 comparison/substractions are left.2:	cmn	\order, #2	blt	4f	beq	3f	cmp	\dividend, \divisor	subhs	\dividend, \dividend, \divisor	mov	\divisor,  \divisor,  lsr #13:	cmp	\dividend, \divisor	subhs	\dividend, \dividend, \divisor	mov	\divisor,  \divisor,  lsr #14:	cmp	\dividend, \divisor	subhs	\dividend, \dividend, \divisor5:#endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */.endm/* ------------------------------------------------------------------------ */.macro THUMB_DIV_MOD_BODY modulo	@ Load the constant 0x10000000 into our work register.	mov	work, #1	lsl	work, #28LSYM(Loop1):	@ Unless the divisor is very big, shift it up in multiples of	@ four bits, since this is the amount of unwinding in the main	@ division loop.  Continue shifting until the divisor is 	@ larger than the dividend.	cmp	divisor, work	bhs	LSYM(Lbignum)	cmp	divisor, dividend	bhs	LSYM(Lbignum)	lsl	divisor, #4	lsl	curbit,  #4	b	LSYM(Loop1)LSYM(Lbignum):	@ Set work to 0x80000000	lsl	work, #3LSYM(Loop2):	@ For very big divisors, we must shift it a bit at a time, or	@ we will be in danger of overflowing.	cmp	divisor, work	bhs	LSYM(Loop3)	cmp	divisor, dividend	bhs	LSYM(Loop3)	lsl	divisor, #1	lsl	curbit,  #1	b	LSYM(Loop2)LSYM(Loop3):	@ Test for possible subtractions ...  .if \modulo	@ ... On the final pass, this may subtract too much from the dividend, 	@ so keep track of which subtractions are done, we can fix them up 	@ afterwards.	mov	overdone, #0	cmp	dividend, divisor	blo	LSYM(Lover1)	sub	dividend, dividend, divisorLSYM(Lover1):	lsr	work, divisor, #1	cmp	dividend, work	blo	LSYM(Lover2)	sub	dividend, dividend, work	mov	ip, curbit	mov	work, #1	ror	curbit, work	orr	overdone, curbit	mov	curbit, ipLSYM(Lover2):	lsr	work, divisor, #2	cmp	dividend, work	blo	LSYM(Lover3)	sub	dividend, dividend, work	mov	ip, curbit	mov	work, #2	ror	curbit, work	orr	overdone, curbit	mov	curbit, ipLSYM(Lover3):	lsr	work, divisor, #3	cmp	dividend, work	blo	LSYM(Lover4)	sub	dividend, dividend, work	mov	ip, curbit	mov	work, #3	ror	curbit, work	orr	overdone, curbit	mov	curbit, ipLSYM(Lover4):	mov	ip, curbit  .else	@ ... and note which bits are done in the result.  On the final pass,	@ this may subtract too much from the dividend, but the result will be ok,	@ since the "bit" will have been shifted out at the bottom.	cmp	dividend, divisor	blo	LSYM(Lover1)	sub	dividend, dividend, divisor	orr	result, result, curbitLSYM(Lover1):	lsr	work, divisor, #1	cmp	dividend, work	blo	LSYM(Lover2)	sub	dividend, dividend, work	lsr	work, curbit, #1	orr	result, workLSYM(Lover2):	lsr	work, divisor, #2	cmp	dividend, work	blo	LSYM(Lover3)	sub	dividend, dividend, work	lsr	work, curbit, #2	orr	result, workLSYM(Lover3):	lsr	work, divisor, #3	cmp	dividend, work	blo	LSYM(Lover4)	sub	dividend, dividend, work	lsr	work, curbit, #3	orr	result, workLSYM(Lover4):  .endif		cmp	dividend, #0			@ Early termination?	beq	LSYM(Lover5)	lsr	curbit,  #4			@ No, any more bits to do?	beq	LSYM(Lover5)	lsr	divisor, #4	b	LSYM(Loop3)LSYM(Lover5):  .if \modulo	@ Any subtractions that we should not have done will be recorded in	@ the top three bits of "overdone".  Exactly which were not needed	@ are governed by the position of the bit, stored in ip.	mov	work, #0xe	lsl	work, #28	and	overdone, work	beq	LSYM(Lgot_result)		@ If we terminated early, because dividend became zero, then the 	@ bit in ip will not be in the bottom nibble, and we should not	@ perform the additions below.  We must test for this though	@ (rather relying upon the TSTs to prevent the additions) since	@ the bit in ip could be in the top two bits which might then match	@ with one of the smaller RORs.	mov	curbit, ip	mov	work, #0x7	tst	curbit, work	beq	LSYM(Lgot_result)		mov	curbit, ip	mov	work, #3	ror	curbit, work	tst	overdone, curbit	beq	LSYM(Lover6)	lsr	work, divisor, #3	add	dividend, workLSYM(Lover6):	mov	curbit, ip	mov	work, #2

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -