⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 linux2.6.16版本
💻 S
📖 第 1 页 / 共 3 页
字号:
/* $Id: entry.S,v 1.144 2002/02/09 19:49:30 davem Exp $ * arch/sparc64/kernel/entry.S:  Sparc64 trap low-level entry points. * * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 Eddie C. Dost        (ecd@skynet.be) * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx) * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz) */#include <linux/config.h>#include <linux/errno.h>#include <asm/head.h>#include <asm/asi.h>#include <asm/smp.h>#include <asm/ptrace.h>#include <asm/page.h>#include <asm/signal.h>#include <asm/pgtable.h>#include <asm/processor.h>#include <asm/visasm.h>#include <asm/estate.h>#include <asm/auxio.h>#include <asm/sfafsr.h>#define curptr      g6#define NR_SYSCALLS 300      /* Each OS is different... */	.text	.align		32	/* This is trivial with the new code... */	.globl		do_fpdisdo_fpdis:	sethi		%hi(TSTATE_PEF), %g4	rdpr		%tstate, %g5	andcc		%g5, %g4, %g0	be,pt		%xcc, 1f	 nop	rd		%fprs, %g5	andcc		%g5, FPRS_FEF, %g0	be,pt		%xcc, 1f	 nop	/* Legal state when DCR_IFPOE is set in Cheetah %dcr. */	sethi		%hi(109f), %g7	ba,pt		%xcc, etrap109:	 or		%g7, %lo(109b), %g7	add		%g0, %g0, %g0	ba,a,pt		%xcc, rtrap_clr_l61:	ldub		[%g6 + TI_FPSAVED], %g5	wr		%g0, FPRS_FEF, %fprs	andcc		%g5, FPRS_FEF, %g0	be,a,pt		%icc, 1f	 clr		%g7	ldx		[%g6 + TI_GSR], %g71:	andcc		%g5, FPRS_DL, %g0	bne,pn		%icc, 2f	 fzero		%f0	andcc		%g5, FPRS_DU, %g0	bne,pn		%icc, 1f	 fzero		%f2	faddd		%f0, %f2, %f4	fmuld		%f0, %f2, %f6	faddd		%f0, %f2, %f8	fmuld		%f0, %f2, %f10	faddd		%f0, %f2, %f12	fmuld		%f0, %f2, %f14	faddd		%f0, %f2, %f16	fmuld		%f0, %f2, %f18	faddd		%f0, %f2, %f20	fmuld		%f0, %f2, %f22	faddd		%f0, %f2, %f24	fmuld		%f0, %f2, %f26	faddd		%f0, %f2, %f28	fmuld		%f0, %f2, %f30	faddd		%f0, %f2, %f32	fmuld		%f0, %f2, %f34	faddd		%f0, %f2, %f36	fmuld		%f0, %f2, %f38	faddd		%f0, %f2, %f40	fmuld		%f0, %f2, %f42	faddd		%f0, %f2, %f44	fmuld		%f0, %f2, %f46	faddd		%f0, %f2, %f48	fmuld		%f0, %f2, %f50	faddd		%f0, %f2, %f52	fmuld		%f0, %f2, %f54	faddd		%f0, %f2, %f56	fmuld		%f0, %f2, %f58	b,pt		%xcc, fpdis_exit2	 faddd		%f0, %f2, %f601:	mov		SECONDARY_CONTEXT, %g3	add		%g6, TI_FPREGS + 0x80, %g1	faddd		%f0, %f2, %f4	fmuld		%f0, %f2, %f6	ldxa		[%g3] ASI_DMMU, %g5	sethi		%hi(sparc64_kern_sec_context), %g2	ldx		[%g2 + %lo(sparc64_kern_sec_context)], %g2	stxa		%g2, [%g3] ASI_DMMU	membar		#Sync	add		%g6, TI_FPREGS + 0xc0, %g2	faddd		%f0, %f2, %f8	fmuld		%f0, %f2, %f10	membar		#Sync	ldda		[%g1] ASI_BLK_S, %f32	ldda		[%g2] ASI_BLK_S, %f48	membar		#Sync	faddd		%f0, %f2, %f12	fmuld		%f0, %f2, %f14	faddd		%f0, %f2, %f16	fmuld		%f0, %f2, %f18	faddd		%f0, %f2, %f20	fmuld		%f0, %f2, %f22	faddd		%f0, %f2, %f24	fmuld		%f0, %f2, %f26	faddd		%f0, %f2, %f28	fmuld		%f0, %f2, %f30	b,pt		%xcc, fpdis_exit	 nop2:	andcc		%g5, FPRS_DU, %g0	bne,pt		%icc, 3f	 fzero		%f32	mov		SECONDARY_CONTEXT, %g3	fzero		%f34	ldxa		[%g3] ASI_DMMU, %g5	add		%g6, TI_FPREGS, %g1	sethi		%hi(sparc64_kern_sec_context), %g2	ldx		[%g2 + %lo(sparc64_kern_sec_context)], %g2	stxa		%g2, [%g3] ASI_DMMU	membar		#Sync	add		%g6, TI_FPREGS + 0x40, %g2	faddd		%f32, %f34, %f36	fmuld		%f32, %f34, %f38	membar		#Sync	ldda		[%g1] ASI_BLK_S, %f0	ldda		[%g2] ASI_BLK_S, %f16	membar		#Sync	faddd		%f32, %f34, %f40	fmuld		%f32, %f34, %f42	faddd		%f32, %f34, %f44	fmuld		%f32, %f34, %f46	faddd		%f32, %f34, %f48	fmuld		%f32, %f34, %f50	faddd		%f32, %f34, %f52	fmuld		%f32, %f34, %f54	faddd		%f32, %f34, %f56	fmuld		%f32, %f34, %f58	faddd		%f32, %f34, %f60	fmuld		%f32, %f34, %f62	ba,pt		%xcc, fpdis_exit	 nop3:	mov		SECONDARY_CONTEXT, %g3	add		%g6, TI_FPREGS, %g1	ldxa		[%g3] ASI_DMMU, %g5	sethi		%hi(sparc64_kern_sec_context), %g2	ldx		[%g2 + %lo(sparc64_kern_sec_context)], %g2	stxa		%g2, [%g3] ASI_DMMU	membar		#Sync	mov		0x40, %g2	membar		#Sync	ldda		[%g1] ASI_BLK_S, %f0	ldda		[%g1 + %g2] ASI_BLK_S, %f16	add		%g1, 0x80, %g1	ldda		[%g1] ASI_BLK_S, %f32	ldda		[%g1 + %g2] ASI_BLK_S, %f48	membar		#Syncfpdis_exit:	stxa		%g5, [%g3] ASI_DMMU	membar		#Syncfpdis_exit2:	wr		%g7, 0, %gsr	ldx		[%g6 + TI_XFSR], %fsr	rdpr		%tstate, %g3	or		%g3, %g4, %g3		! anal...	wrpr		%g3, %tstate	wr		%g0, FPRS_FEF, %fprs	! clean DU/DL bits	retry	.align		32fp_other_bounce:	call		do_fpother	 add		%sp, PTREGS_OFF, %o0	ba,pt		%xcc, rtrap	 clr		%l6	.globl		do_fpother_check_fitos	.align		32do_fpother_check_fitos:	sethi		%hi(fp_other_bounce - 4), %g7	or		%g7, %lo(fp_other_bounce - 4), %g7	/* NOTE: Need to preserve %g7 until we fully commit	 *       to the fitos fixup.	 */	stx		%fsr, [%g6 + TI_XFSR]	rdpr		%tstate, %g3	andcc		%g3, TSTATE_PRIV, %g0	bne,pn		%xcc, do_fptrap_after_fsr	 nop	ldx		[%g6 + TI_XFSR], %g3	srlx		%g3, 14, %g1	and		%g1, 7, %g1	cmp		%g1, 2			! Unfinished FP-OP	bne,pn		%xcc, do_fptrap_after_fsr	 sethi		%hi(1 << 23), %g1	! Inexact	andcc		%g3, %g1, %g0	bne,pn		%xcc, do_fptrap_after_fsr	 rdpr		%tpc, %g1	lduwa		[%g1] ASI_AIUP, %g3	! This cannot ever fail#define FITOS_MASK	0xc1f83fe0#define FITOS_COMPARE	0x81a01880	sethi		%hi(FITOS_MASK), %g1	or		%g1, %lo(FITOS_MASK), %g1	and		%g3, %g1, %g1	sethi		%hi(FITOS_COMPARE), %g2	or		%g2, %lo(FITOS_COMPARE), %g2	cmp		%g1, %g2	bne,pn		%xcc, do_fptrap_after_fsr	 nop	std		%f62, [%g6 + TI_FPREGS + (62 * 4)]	sethi		%hi(fitos_table_1), %g1	and		%g3, 0x1f, %g2	or		%g1, %lo(fitos_table_1),  %g1	sllx		%g2, 2, %g2	jmpl		%g1 + %g2, %g0	 ba,pt		%xcc, fitos_emul_continuefitos_table_1:	fitod		%f0, %f62	fitod		%f1, %f62	fitod		%f2, %f62	fitod		%f3, %f62	fitod		%f4, %f62	fitod		%f5, %f62	fitod		%f6, %f62	fitod		%f7, %f62	fitod		%f8, %f62	fitod		%f9, %f62	fitod		%f10, %f62	fitod		%f11, %f62	fitod		%f12, %f62	fitod		%f13, %f62	fitod		%f14, %f62	fitod		%f15, %f62	fitod		%f16, %f62	fitod		%f17, %f62	fitod		%f18, %f62	fitod		%f19, %f62	fitod		%f20, %f62	fitod		%f21, %f62	fitod		%f22, %f62	fitod		%f23, %f62	fitod		%f24, %f62	fitod		%f25, %f62	fitod		%f26, %f62	fitod		%f27, %f62	fitod		%f28, %f62	fitod		%f29, %f62	fitod		%f30, %f62	fitod		%f31, %f62fitos_emul_continue:	sethi		%hi(fitos_table_2), %g1	srl		%g3, 25, %g2	or		%g1, %lo(fitos_table_2), %g1	and		%g2, 0x1f, %g2	sllx		%g2, 2, %g2	jmpl		%g1 + %g2, %g0	 ba,pt		%xcc, fitos_emul_finifitos_table_2:	fdtos		%f62, %f0	fdtos		%f62, %f1	fdtos		%f62, %f2	fdtos		%f62, %f3	fdtos		%f62, %f4	fdtos		%f62, %f5	fdtos		%f62, %f6	fdtos		%f62, %f7	fdtos		%f62, %f8	fdtos		%f62, %f9	fdtos		%f62, %f10	fdtos		%f62, %f11	fdtos		%f62, %f12	fdtos		%f62, %f13	fdtos		%f62, %f14	fdtos		%f62, %f15	fdtos		%f62, %f16	fdtos		%f62, %f17	fdtos		%f62, %f18	fdtos		%f62, %f19	fdtos		%f62, %f20	fdtos		%f62, %f21	fdtos		%f62, %f22	fdtos		%f62, %f23	fdtos		%f62, %f24	fdtos		%f62, %f25	fdtos		%f62, %f26	fdtos		%f62, %f27	fdtos		%f62, %f28	fdtos		%f62, %f29	fdtos		%f62, %f30	fdtos		%f62, %f31fitos_emul_fini:	ldd		[%g6 + TI_FPREGS + (62 * 4)], %f62	done	.globl		do_fptrap	.align		32do_fptrap:	stx		%fsr, [%g6 + TI_XFSR]do_fptrap_after_fsr:	ldub		[%g6 + TI_FPSAVED], %g3	rd		%fprs, %g1	or		%g3, %g1, %g3	stb		%g3, [%g6 + TI_FPSAVED]	rd		%gsr, %g3	stx		%g3, [%g6 + TI_GSR]	mov		SECONDARY_CONTEXT, %g3	ldxa		[%g3] ASI_DMMU, %g5	sethi		%hi(sparc64_kern_sec_context), %g2	ldx		[%g2 + %lo(sparc64_kern_sec_context)], %g2	stxa		%g2, [%g3] ASI_DMMU	membar		#Sync	add		%g6, TI_FPREGS, %g2	andcc		%g1, FPRS_DL, %g0	be,pn		%icc, 4f	 mov		0x40, %g3	stda		%f0, [%g2] ASI_BLK_S	stda		%f16, [%g2 + %g3] ASI_BLK_S	andcc		%g1, FPRS_DU, %g0	be,pn		%icc, 5f4:       add		%g2, 128, %g2	stda		%f32, [%g2] ASI_BLK_S	stda		%f48, [%g2 + %g3] ASI_BLK_S5:	mov		SECONDARY_CONTEXT, %g1	membar		#Sync	stxa		%g5, [%g1] ASI_DMMU	membar		#Sync	ba,pt		%xcc, etrap	 wr		%g0, 0, %fprs	/* The registers for cross calls will be:	 *	 * DATA 0: [low 32-bits]  Address of function to call, jmp to this	 *         [high 32-bits] MMU Context Argument 0, place in %g5	 * DATA 1: Address Argument 1, place in %g1	 * DATA 2: Address Argument 2, place in %g7	 *	 * With this method we can do most of the cross-call tlb/cache	 * flushing very quickly.	 *	 * Current CPU's IRQ worklist table is locked into %g6, don't touch.	 */	.text	.align		32	.globl		do_ivecdo_ivec:	mov		0x40, %g3	ldxa		[%g3 + %g0] ASI_INTR_R, %g3	sethi		%hi(KERNBASE), %g4	cmp		%g3, %g4	bgeu,pn		%xcc, do_ivec_xcall	 srlx		%g3, 32, %g5	stxa		%g0, [%g0] ASI_INTR_RECEIVE	membar		#Sync	sethi		%hi(ivector_table), %g2	sllx		%g3, 5, %g3	or		%g2, %lo(ivector_table), %g2	add		%g2, %g3, %g3	ldub		[%g3 + 0x04], %g4	/* pil */	mov		1, %g2	sllx		%g2, %g4, %g2	sllx		%g4, 2, %g4	lduw		[%g6 + %g4], %g5	/* g5 = irq_work(cpu, pil) */	stw		%g5, [%g3 + 0x00]	/* bucket->irq_chain = g5 */	stw		%g3, [%g6 + %g4]	/* irq_work(cpu, pil) = bucket */	wr		%g2, 0x0, %set_softint	retrydo_ivec_xcall:	mov		0x50, %g1	ldxa		[%g1 + %g0] ASI_INTR_R, %g1	srl		%g3, 0, %g3	mov		0x60, %g7	ldxa		[%g7 + %g0] ASI_INTR_R, %g7	stxa		%g0, [%g0] ASI_INTR_RECEIVE	membar		#Sync	ba,pt		%xcc, 1f	 nop	.align		321:	jmpl		%g3, %g0	 nop	.globl		save_alternate_globalssave_alternate_globals: /* %o0 = save_area */	rdpr		%pstate, %o5	andn		%o5, PSTATE_IE, %o1	wrpr		%o1, PSTATE_AG, %pstate	stx		%g0, [%o0 + 0x00]	stx		%g1, [%o0 + 0x08]	stx		%g2, [%o0 + 0x10]	stx		%g3, [%o0 + 0x18]	stx		%g4, [%o0 + 0x20]	stx		%g5, [%o0 + 0x28]	stx		%g6, [%o0 + 0x30]	stx		%g7, [%o0 + 0x38]	wrpr		%o1, PSTATE_IG, %pstate	stx		%g0, [%o0 + 0x40]	stx		%g1, [%o0 + 0x48]	stx		%g2, [%o0 + 0x50]	stx		%g3, [%o0 + 0x58]	stx		%g4, [%o0 + 0x60]	stx		%g5, [%o0 + 0x68]	stx		%g6, [%o0 + 0x70]	stx		%g7, [%o0 + 0x78]	wrpr		%o1, PSTATE_MG, %pstate	stx		%g0, [%o0 + 0x80]	stx		%g1, [%o0 + 0x88]	stx		%g2, [%o0 + 0x90]	stx		%g3, [%o0 + 0x98]	stx		%g4, [%o0 + 0xa0]	stx		%g5, [%o0 + 0xa8]	stx		%g6, [%o0 + 0xb0]	stx		%g7, [%o0 + 0xb8]	wrpr		%o5, 0x0, %pstate	retl	 nop	.globl		restore_alternate_globalsrestore_alternate_globals: /* %o0 = save_area */	rdpr		%pstate, %o5	andn		%o5, PSTATE_IE, %o1	wrpr		%o1, PSTATE_AG, %pstate	ldx		[%o0 + 0x00], %g0	ldx		[%o0 + 0x08], %g1	ldx		[%o0 + 0x10], %g2	ldx		[%o0 + 0x18], %g3	ldx		[%o0 + 0x20], %g4	ldx		[%o0 + 0x28], %g5	ldx		[%o0 + 0x30], %g6	ldx		[%o0 + 0x38], %g7	wrpr		%o1, PSTATE_IG, %pstate	ldx		[%o0 + 0x40], %g0	ldx		[%o0 + 0x48], %g1	ldx		[%o0 + 0x50], %g2	ldx		[%o0 + 0x58], %g3	ldx		[%o0 + 0x60], %g4	ldx		[%o0 + 0x68], %g5	ldx		[%o0 + 0x70], %g6	ldx		[%o0 + 0x78], %g7	wrpr		%o1, PSTATE_MG, %pstate	ldx		[%o0 + 0x80], %g0	ldx		[%o0 + 0x88], %g1	ldx		[%o0 + 0x90], %g2	ldx		[%o0 + 0x98], %g3	ldx		[%o0 + 0xa0], %g4	ldx		[%o0 + 0xa8], %g5	ldx		[%o0 + 0xb0], %g6	ldx		[%o0 + 0xb8], %g7	wrpr		%o5, 0x0, %pstate	retl	 nop	.globl		getcc, setccgetcc:	ldx		[%o0 + PT_V9_TSTATE], %o1	srlx		%o1, 32, %o1	and		%o1, 0xf, %o1	retl	 stx		%o1, [%o0 + PT_V9_G1]setcc:	ldx		[%o0 + PT_V9_TSTATE], %o1	ldx		[%o0 + PT_V9_G1], %o2	or		%g0, %ulo(TSTATE_ICC), %o3	sllx		%o3, 32, %o3	andn		%o1, %o3, %o1	sllx		%o2, 32, %o2	and		%o2, %o3, %o2	or		%o1, %o2, %o1	retl	 stx		%o1, [%o0 + PT_V9_TSTATE]	.globl		utrap, utrap_illutrap:	brz,pn		%g1, etrap	 nop	save		%sp, -128, %sp	rdpr		%tstate, %l6	rdpr		%cwp, %l7	andn		%l6, TSTATE_CWP, %l6	wrpr		%l6, %l7, %tstate	rdpr		%tpc, %l6	rdpr		%tnpc, %l7	wrpr		%g1, 0, %tnpc	doneutrap_ill:        call		bad_trap	 add		%sp, PTREGS_OFF, %o0	ba,pt		%xcc, rtrap	 clr		%l6	/* XXX Here is stuff we still need to write... -DaveM XXX */	.globl		netbsd_syscallnetbsd_syscall:	retl	 nop	/* We need to carefully read the error status, ACK	 * the errors, prevent recursive traps, and pass the	 * information on to C code for logging.	 *	 * We pass the AFAR in as-is, and we encode the status	 * information as described in asm-sparc64/sfafsr.h	 */	.globl		__spitfire_access_error__spitfire_access_error:	/* Disable ESTATE error reporting so that we do not	 * take recursive traps and RED state the processor.	 */	stxa		%g0, [%g0] ASI_ESTATE_ERROR_EN	membar		#Sync	mov		UDBE_UE, %g1	ldxa		[%g0] ASI_AFSR, %g4	! Get AFSR	/* __spitfire_cee_trap branches here with AFSR in %g4 and	 * UDBE_CE in %g1.  It only clears ESTATE_ERR_CE in the	 * ESTATE Error Enable register.	 */__spitfire_cee_trap_continue:	ldxa		[%g0] ASI_AFAR, %g5	! Get AFAR	rdpr		%tt, %g3	and		%g3, 0x1ff, %g3		! Paranoia	sllx		%g3, SFSTAT_TRAP_TYPE_SHIFT, %g3	or		%g4, %g3, %g4	rdpr		%tl, %g3	cmp		%g3, 1	mov		1, %g3	bleu		%xcc, 1f	 sllx		%g3, SFSTAT_TL_GT_ONE_SHIFT, %g3	or		%g4, %g3, %g4	/* Read in the UDB error register state, clearing the	 * sticky error bits as-needed.  We only clear them if	 * the UE bit is set.  Likewise, __spitfire_cee_trap	 * below will only do so if the CE bit is set.	 *	 * NOTE: UltraSparc-I/II have high and low UDB error	 *       registers, corresponding to the two UDB units	 *       present on those chips.  UltraSparc-IIi only	 *       has a single UDB, called "SDB" in the manual.	 *       For IIi the upper UDB register always reads	 *       as zero so for our purposes things will just	 *       work with the checks below.	 */1:	ldxa		[%g0] ASI_UDBH_ERROR_R, %g3

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -