⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 优龙2410linux2.6.8内核源代码
💻 S
📖 第 1 页 / 共 4 页
字号:
/* $Id: entry.S,v 1.144 2002/02/09 19:49:30 davem Exp $ * arch/sparc64/kernel/entry.S:  Sparc64 trap low-level entry points. * * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 Eddie C. Dost        (ecd@skynet.be) * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx) * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz) */#include <linux/config.h>#include <linux/errno.h>#include <asm/head.h>#include <asm/asi.h>#include <asm/smp.h>#include <asm/ptrace.h>#include <asm/page.h>#include <asm/signal.h>#include <asm/pgtable.h>#include <asm/processor.h>#include <asm/visasm.h>#include <asm/estate.h>#include <asm/auxio.h>/* #define SYSCALL_TRACING	1 */#define curptr      g6#define NR_SYSCALLS 283      /* Each OS is different... */	.text	.align		32	.globl		sparc64_vpte_patchme1	.globl		sparc64_vpte_patchme2/* * On a second level vpte miss, check whether the original fault is to the OBP  * range (note that this is only possible for instruction miss, data misses to * obp range do not use vpte). If so, go back directly to the faulting address. * This is because we want to read the tpc, otherwise we have no way of knowing * the 8k aligned faulting address if we are using >8k kernel pagesize. This also * ensures no vpte range addresses are dropped into tlb while obp is executing * (see inherit_locked_prom_mappings() rant). */sparc64_vpte_nucleus:	mov		0xf, %g5	sllx		%g5, 28, %g5			! Load 0xf0000000	cmp		%g4, %g5			! Is addr >= LOW_OBP_ADDRESS?	blu,pn		%xcc, sparc64_vpte_patchme1	 mov		0x1, %g5	sllx		%g5, 32, %g5			! Load 0x100000000	cmp		%g4, %g5			! Is addr < HI_OBP_ADDRESS?	blu,pn		%xcc, obp_iaddr_patch	 nopsparc64_vpte_patchme1:	sethi		%hi(0), %g5			! This has to be patchedsparc64_vpte_patchme2:	or		%g5, %lo(0), %g5		! This is patched too	ba,pt		%xcc, sparc64_kpte_continue	! Part of dtlb_backend	 add		%g1, %g1, %g1			! Finish PMD offset adjustmentvpte_noent:	mov		TLB_SFSR, %g1			! Restore %g1 value	stxa		%g4, [%g1 + %g1] ASI_DMMU	! Restore previous TAG_ACCESS	done						! Slick trick	.globl		obp_iaddr_patch	.globl		obp_daddr_patchobp_iaddr_patch:	sethi		%hi(0), %g5			! This and following is patched	or		%g5, %lo(0), %g5		! g5 now holds obp pmd base physaddr	wrpr		%g0, 1, %tl			! Behave as if we are at TL0	rdpr		%tpc, %g4			! Find original faulting iaddr	srlx		%g4, 13, %g4			! Throw out context bits	sllx		%g4, 13, %g4			! g4 has vpn + ctx0 now	mov		TLB_SFSR, %g1			! Restore %g1 value	stxa		%g4, [%g1 + %g1] ASI_IMMU	! Restore previous TAG_ACCESS	srlx		%g4, 23, %g6			! Find pmd number	and		%g6, 0x7ff, %g6			! Find pmd number	sllx		%g6, 2, %g6			! Find pmd offset	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5! Load pmd, ie pagetable physaddr	brz,pn		%g5, longpath			! Kill the PROM ? :-)	 sllx		%g5, 11, %g5			! Shift into place	srlx		%g4, 13, %g6			! find pte number in pagetable	and		%g6, 0x3ff, %g6			! find pte number in pagetable	sllx		%g6, 3, %g6			! find pte offset in pagetable	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5! Load pte	brgez,pn	%g5, longpath			! Kill the PROM ? :-)	 nop	stxa		%g5, [%g0] ASI_ITLB_DATA_IN	! put into tlb	retry						! go back to original faultobp_daddr_patch:	sethi		%hi(0), %g5			! This and following is patched	or		%g5, %lo(0), %g5		! g5 now holds obp pmd base physaddr	srlx		%g4, 23, %g6			! Find pmd number	and		%g6, 0x7ff, %g6			! Find pmd number	sllx		%g6, 2, %g6			! Find pmd offset	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5! Load pmd, ie pagetable physaddr	brz,pn		%g5, longpath	 sllx		%g5, 11, %g5			! Shift into place	srlx		%g4, 13, %g6			! find pte number in pagetable	and		%g6, 0x3ff, %g6			! find pte number in pagetable	sllx		%g6, 3, %g6			! find pte offset in pagetable	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5! Load pte	brgez,pn	%g5, longpath	 nop	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! put into tlb	retry/* * On a first level data miss, check whether this is to the OBP range (note that * such accesses can be made by prom, as well as by kernel using prom_getproperty * on "address"), and if so, do not use vpte access ... rather, use information * saved during inherit_prom_mappings() using 8k pagesize. */kvmap:	mov		0xf, %g5	sllx		%g5, 28, %g5			! Load 0xf0000000	cmp		%g4, %g5			! Is addr >= LOW_OBP_ADDRESS?	blu,pn		%xcc, vmalloc_addr	 mov		0x1, %g5	sllx		%g5, 32, %g5			! Load 0x100000000	cmp		%g4, %g5			! Is addr < HI_OBP_ADDRESS?	blu,pn		%xcc, obp_daddr_patch	 nopvmalloc_addr:						! vmalloc addr accessed	ldxa		[%g3 + %g6] ASI_N, %g5		! Yep, load k-vpte	brgez,pn	%g5, longpath			! Valid, load into TLB	 nop	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB	retry	/* This is trivial with the new code... */	.globl		do_fpdisdo_fpdis:	sethi		%hi(TSTATE_PEF), %g4					! IEU0	rdpr		%tstate, %g5	andcc		%g5, %g4, %g0	be,pt		%xcc, 1f	 nop	rd		%fprs, %g5	andcc		%g5, FPRS_FEF, %g0	be,pt		%xcc, 1f	 nop	/* Legal state when DCR_IFPOE is set in Cheetah %dcr. */	sethi		%hi(109f), %g7	ba,pt		%xcc, etrap109:	 or		%g7, %lo(109b), %g7	add		%g0, %g0, %g0	ba,a,pt		%xcc, rtrap_clr_l61:	ldub		[%g6 + TI_FPSAVED], %g5					! Load	Group	wr		%g0, FPRS_FEF, %fprs					! LSU	Group+4bubbles	andcc		%g5, FPRS_FEF, %g0					! IEU1	Group	be,a,pt		%icc, 1f						! CTI	 clr		%g7							! IEU0	ldx		[%g6 + TI_GSR], %g7					! Load	Group1:	andcc		%g5, FPRS_DL, %g0					! IEU1	bne,pn		%icc, 2f						! CTI	 fzero		%f0							! FPA	andcc		%g5, FPRS_DU, %g0					! IEU1  Group	bne,pn		%icc, 1f						! CTI	 fzero		%f2							! FPA	faddd		%f0, %f2, %f4	fmuld		%f0, %f2, %f6	faddd		%f0, %f2, %f8	fmuld		%f0, %f2, %f10	faddd		%f0, %f2, %f12	fmuld		%f0, %f2, %f14	faddd		%f0, %f2, %f16	fmuld		%f0, %f2, %f18	faddd		%f0, %f2, %f20	fmuld		%f0, %f2, %f22	faddd		%f0, %f2, %f24	fmuld		%f0, %f2, %f26	faddd		%f0, %f2, %f28	fmuld		%f0, %f2, %f30	faddd		%f0, %f2, %f32	fmuld		%f0, %f2, %f34	faddd		%f0, %f2, %f36	fmuld		%f0, %f2, %f38	faddd		%f0, %f2, %f40	fmuld		%f0, %f2, %f42	faddd		%f0, %f2, %f44	fmuld		%f0, %f2, %f46	faddd		%f0, %f2, %f48	fmuld		%f0, %f2, %f50	faddd		%f0, %f2, %f52	fmuld		%f0, %f2, %f54	faddd		%f0, %f2, %f56	fmuld		%f0, %f2, %f58	b,pt		%xcc, fpdis_exit2	 faddd		%f0, %f2, %f601:	mov		SECONDARY_CONTEXT, %g3	add		%g6, TI_FPREGS + 0x80, %g1	faddd		%f0, %f2, %f4	fmuld		%f0, %f2, %f6	ldxa		[%g3] ASI_DMMU, %g5	add		%g6, TI_FPREGS + 0xc0, %g2	stxa		%g0, [%g3] ASI_DMMU	membar		#Sync	faddd		%f0, %f2, %f8	fmuld		%f0, %f2, %f10	ldda		[%g1] ASI_BLK_S, %f32	! grrr, where is ASI_BLK_NUCLEUS 8-(	ldda		[%g2] ASI_BLK_S, %f48	faddd		%f0, %f2, %f12	fmuld		%f0, %f2, %f14	faddd		%f0, %f2, %f16	fmuld		%f0, %f2, %f18	faddd		%f0, %f2, %f20	fmuld		%f0, %f2, %f22	faddd		%f0, %f2, %f24	fmuld		%f0, %f2, %f26	faddd		%f0, %f2, %f28	fmuld		%f0, %f2, %f30	b,pt		%xcc, fpdis_exit	 membar		#Sync2:	andcc		%g5, FPRS_DU, %g0	bne,pt		%icc, 3f	 fzero		%f32	mov		SECONDARY_CONTEXT, %g3	fzero		%f34	ldxa		[%g3] ASI_DMMU, %g5	add		%g6, TI_FPREGS, %g1	stxa		%g0, [%g3] ASI_DMMU	membar		#Sync	add		%g6, TI_FPREGS + 0x40, %g2	faddd		%f32, %f34, %f36	fmuld		%f32, %f34, %f38	ldda		[%g1] ASI_BLK_S, %f0	! grrr, where is ASI_BLK_NUCLEUS 8-(	ldda		[%g2] ASI_BLK_S, %f16	faddd		%f32, %f34, %f40	fmuld		%f32, %f34, %f42	faddd		%f32, %f34, %f44	fmuld		%f32, %f34, %f46	faddd		%f32, %f34, %f48	fmuld		%f32, %f34, %f50	faddd		%f32, %f34, %f52	fmuld		%f32, %f34, %f54	faddd		%f32, %f34, %f56	fmuld		%f32, %f34, %f58	faddd		%f32, %f34, %f60	fmuld		%f32, %f34, %f62	ba,pt		%xcc, fpdis_exit	 membar		#Sync3:	mov		SECONDARY_CONTEXT, %g3	add		%g6, TI_FPREGS, %g1	ldxa		[%g3] ASI_DMMU, %g5	mov		0x40, %g2	stxa		%g0, [%g3] ASI_DMMU	membar		#Sync	ldda		[%g1] ASI_BLK_S, %f0		! grrr, where is ASI_BLK_NUCLEUS 8-(	ldda		[%g1 + %g2] ASI_BLK_S, %f16	add		%g1, 0x80, %g1	ldda		[%g1] ASI_BLK_S, %f32	ldda		[%g1 + %g2] ASI_BLK_S, %f48	membar		#Syncfpdis_exit:	stxa		%g5, [%g3] ASI_DMMU	membar		#Syncfpdis_exit2:	wr		%g7, 0, %gsr	ldx		[%g6 + TI_XFSR], %fsr	rdpr		%tstate, %g3	or		%g3, %g4, %g3		! anal...	wrpr		%g3, %tstate	wr		%g0, FPRS_FEF, %fprs	! clean DU/DL bits	retry	.align		32fp_other_bounce:	call		do_fpother	 add		%sp, PTREGS_OFF, %o0	ba,pt		%xcc, rtrap	 clr		%l6	.globl		do_fpother_check_fitos	.align		32do_fpother_check_fitos:	sethi		%hi(fp_other_bounce - 4), %g7	or		%g7, %lo(fp_other_bounce - 4), %g7	/* NOTE: Need to preserve %g7 until we fully commit	 *       to the fitos fixup.	 */	stx		%fsr, [%g6 + TI_XFSR]	rdpr		%tstate, %g3	andcc		%g3, TSTATE_PRIV, %g0	bne,pn		%xcc, do_fptrap_after_fsr	 nop	ldx		[%g6 + TI_XFSR], %g3	srlx		%g3, 14, %g1	and		%g1, 7, %g1	cmp		%g1, 2			! Unfinished FP-OP	bne,pn		%xcc, do_fptrap_after_fsr	 sethi		%hi(1 << 23), %g1	! Inexact	andcc		%g3, %g1, %g0	bne,pn		%xcc, do_fptrap_after_fsr	 rdpr		%tpc, %g1	lduwa		[%g1] ASI_AIUP, %g3	! This cannot ever fail#define FITOS_MASK	0xc1f83fe0#define FITOS_COMPARE	0x81a01880	sethi		%hi(FITOS_MASK), %g1	or		%g1, %lo(FITOS_MASK), %g1	and		%g3, %g1, %g1	sethi		%hi(FITOS_COMPARE), %g2	or		%g2, %lo(FITOS_COMPARE), %g2	cmp		%g1, %g2	bne,pn		%xcc, do_fptrap_after_fsr	 nop	std		%f62, [%g6 + TI_FPREGS + (62 * 4)]	sethi		%hi(fitos_table_1), %g1	and		%g3, 0x1f, %g2	or		%g1, %lo(fitos_table_1),  %g1	sllx		%g2, 2, %g2	jmpl		%g1 + %g2, %g0	 ba,pt		%xcc, fitos_emul_continuefitos_table_1:	fitod		%f0, %f62	fitod		%f1, %f62	fitod		%f2, %f62	fitod		%f3, %f62	fitod		%f4, %f62	fitod		%f5, %f62	fitod		%f6, %f62	fitod		%f7, %f62	fitod		%f8, %f62	fitod		%f9, %f62	fitod		%f10, %f62	fitod		%f11, %f62	fitod		%f12, %f62	fitod		%f13, %f62	fitod		%f14, %f62	fitod		%f15, %f62	fitod		%f16, %f62	fitod		%f17, %f62	fitod		%f18, %f62	fitod		%f19, %f62	fitod		%f20, %f62	fitod		%f21, %f62	fitod		%f22, %f62	fitod		%f23, %f62	fitod		%f24, %f62	fitod		%f25, %f62	fitod		%f26, %f62	fitod		%f27, %f62	fitod		%f28, %f62	fitod		%f29, %f62	fitod		%f30, %f62	fitod		%f31, %f62fitos_emul_continue:	sethi		%hi(fitos_table_2), %g1	srl		%g3, 25, %g2	or		%g1, %lo(fitos_table_2), %g1	and		%g2, 0x1f, %g2	sllx		%g2, 2, %g2	jmpl		%g1 + %g2, %g0	 ba,pt		%xcc, fitos_emul_finifitos_table_2:	fdtos		%f62, %f0	fdtos		%f62, %f1	fdtos		%f62, %f2	fdtos		%f62, %f3	fdtos		%f62, %f4	fdtos		%f62, %f5	fdtos		%f62, %f6	fdtos		%f62, %f7	fdtos		%f62, %f8	fdtos		%f62, %f9	fdtos		%f62, %f10	fdtos		%f62, %f11	fdtos		%f62, %f12	fdtos		%f62, %f13	fdtos		%f62, %f14	fdtos		%f62, %f15	fdtos		%f62, %f16	fdtos		%f62, %f17	fdtos		%f62, %f18	fdtos		%f62, %f19	fdtos		%f62, %f20	fdtos		%f62, %f21	fdtos		%f62, %f22	fdtos		%f62, %f23	fdtos		%f62, %f24	fdtos		%f62, %f25	fdtos		%f62, %f26	fdtos		%f62, %f27	fdtos		%f62, %f28	fdtos		%f62, %f29	fdtos		%f62, %f30	fdtos		%f62, %f31fitos_emul_fini:	ldd		[%g6 + TI_FPREGS + (62 * 4)], %f62	done	.globl		do_fptrap	.align		32do_fptrap:	stx		%fsr, [%g6 + TI_XFSR]do_fptrap_after_fsr:	ldub		[%g6 + TI_FPSAVED], %g3	rd		%fprs, %g1	or		%g3, %g1, %g3	stb		%g3, [%g6 + TI_FPSAVED]	rd		%gsr, %g3	stx		%g3, [%g6 + TI_GSR]	mov		SECONDARY_CONTEXT, %g3	add		%g6, TI_FPREGS, %g2	ldxa		[%g3] ASI_DMMU, %g5	stxa		%g0, [%g3] ASI_DMMU	membar		#Sync	andcc		%g1, FPRS_DL, %g0	be,pn		%icc, 4f	 mov		0x40, %g3	stda		%f0, [%g2] ASI_BLK_S	stda		%f16, [%g2 + %g3] ASI_BLK_S	andcc		%g1, FPRS_DU, %g0	be,pn		%icc, 5f4:       add		%g2, 128, %g2	stda		%f32, [%g2] ASI_BLK_S	stda		%f48, [%g2 + %g3] ASI_BLK_S5:	mov		SECONDARY_CONTEXT, %g1	membar		#Sync	stxa		%g5, [%g1] ASI_DMMU	membar		#Sync	ba,pt		%xcc, etrap	 wr		%g0, 0, %fprs	/* The registers for cross calls will be:	 *	 * DATA 0: [low 32-bits]  Address of function to call, jmp to this	 *         [high 32-bits] MMU Context Argument 0, place in %g5	 * DATA 1: Address Argument 1, place in %g6	 * DATA 2: Address Argument 2, place in %g7	 *	 * With this method we can do most of the cross-call tlb/cache	 * flushing very quickly.	 *	 * Current CPU's IRQ worklist table is locked into %g1,	 * don't touch.	 */	.text	.align		32	.globl		do_ivecdo_ivec:	mov		0x40, %g3	ldxa		[%g3 + %g0] ASI_INTR_R, %g3	sethi		%hi(KERNBASE), %g4	cmp		%g3, %g4	bgeu,pn		%xcc, do_ivec_xcall	 srlx		%g3, 32, %g5

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -