⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mca_asm.s

📁 xen 3.2.2 源码
💻 S
📖 第 1 页 / 共 2 页
字号:
//// assembly portion of the IA64 MCA handling//// Mods by cfleck to integrate into kernel build// 00/03/15 davidm Added various stop bits to get a clean compile//// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp//		   kstack, switch modes, jump to C INIT handler//// 02/01/04 J.Hall <jenna.s.hall@intel.com>//		   Before entering virtual mode code://		   1. Check for TLB CPU error//		   2. Restore current thread pointer to kr6//		   3. Move stack ptr 16 bytes to conform to C calling convention//// 04/11/12 Russ Anderson <rja@sgi.com>//		   Added per cpu MCA/INIT stack save areas.//#include <linux/config.h>#include <linux/threads.h>#include <asm/asmmacro.h>#include <asm/pgtable.h>#include <asm/processor.h>#include <asm/mca_asm.h>#include <asm/mca.h>#ifdef XEN#include <asm/vhpt.h>#endif/* * When we get a machine check, the kernel stack pointer is no longer * valid, so we need to set a new stack pointer. */#define	MINSTATE_PHYS	/* Make sure stack access is physical for MINSTATE *//* * Needed for return context to SAL */#define IA64_MCA_SAME_CONTEXT	0#define IA64_MCA_COLD_BOOT	-2#include "minstate.h"/* * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec) *		1. GR1 = OS GP *		2. GR8 = PAL_PROC physical address *		3. GR9 = SAL_PROC physical address *		4. GR10 = SAL GP (physical) *		5. GR11 = Rendez state *		6. GR12 = Return address to location within SAL_CHECK */#ifdef XEN#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)		\	GET_THIS_PADDR(_tmp, ia64_sal_to_os_handoff_state_addr);;	\	ld8	_tmp=[_tmp];;				\	st8	[_tmp]=r1,0x08;;			\	st8	[_tmp]=r8,0x08;;			\	st8	[_tmp]=r9,0x08;;			\	st8	[_tmp]=r10,0x08;;			\	st8	[_tmp]=r11,0x08;;			\	st8	[_tmp]=r12,0x08;;			\	st8	[_tmp]=r17,0x08;;			\	st8	[_tmp]=r18,0x08#else#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)		\	LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \	st8	[_tmp]=r1,0x08;;			\	st8	[_tmp]=r8,0x08;;			\	st8	[_tmp]=r9,0x08;;			\	st8	[_tmp]=r10,0x08;;			\	st8	[_tmp]=r11,0x08;;			\	st8	[_tmp]=r12,0x08;;			\	st8	[_tmp]=r17,0x08;;			\	st8	[_tmp]=r18,0x08#endif /* XEN *//* * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec) * (p6) is executed if we never entered virtual mode (TLB error) * (p7) is executed if we entered virtual mode as expected (normal case) *	1. GR8 = OS_MCA return status *	2. GR9 = SAL GP (physical) *	3. GR10 = 0/1 returning same/new context *	4. GR22 = New min state save area pointer *	returns ptr to SAL rtn save loc in _tmp */#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp)	\	movl	_tmp=ia64_os_to_sal_handoff_state;;	\	DATA_VA_TO_PA(_tmp);;				\	ld8	r8=[_tmp],0x08;;			\	ld8	r9=[_tmp],0x08;;			\	ld8	r10=[_tmp],0x08;;			\	ld8	r22=[_tmp],0x08;;	// now _tmp is pointing to SAL rtn save location/* * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state *	imots_os_status=IA64_MCA_COLD_BOOT *	imots_sal_gp=SAL GP *	imots_context=IA64_MCA_SAME_CONTEXT *	imots_new_min_state=Min state save area pointer *	imots_sal_check_ra=Return address to location within SAL_CHECK * */#ifdef XEN#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\	movl	tmp=IA64_MCA_COLD_BOOT;					\	GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);;		\	ld8	sal_to_os_handoff=[sal_to_os_handoff];;			\	movl	os_to_sal_handoff=ia64_os_to_sal_handoff_state;;	\	dep	os_to_sal_handoff = 0, os_to_sal_handoff, 60, 4;;	\	/*DATA_VA_TO_PA(os_to_sal_handoff);;*/				\	st8	[os_to_sal_handoff]=tmp,8;;				\	ld8	tmp=[sal_to_os_handoff],48;;				\	st8	[os_to_sal_handoff]=tmp,8;;				\	movl	tmp=IA64_MCA_SAME_CONTEXT;;				\	st8	[os_to_sal_handoff]=tmp,8;;				\	ld8	tmp=[sal_to_os_handoff],-8;;				\	st8     [os_to_sal_handoff]=tmp,8;;				\	ld8	tmp=[sal_to_os_handoff];;				\	st8     [os_to_sal_handoff]=tmp;;#else	/* XEN */#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\	movl	tmp=IA64_MCA_COLD_BOOT;					\	movl	sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state);	\	movl	os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);;	\	st8	[os_to_sal_handoff]=tmp,8;;				\	ld8	tmp=[sal_to_os_handoff],48;;				\	st8	[os_to_sal_handoff]=tmp,8;;				\	movl	tmp=IA64_MCA_SAME_CONTEXT;;				\	st8	[os_to_sal_handoff]=tmp,8;;				\	ld8	tmp=[sal_to_os_handoff],-8;;				\	st8     [os_to_sal_handoff]=tmp,8;;				\	ld8	tmp=[sal_to_os_handoff];;				\	st8     [os_to_sal_handoff]=tmp;;#endif	/* XEN */#define GET_IA64_MCA_DATA(reg)						\	GET_THIS_PADDR(reg, ia64_mca_data)				\	;;								\	ld8 reg=[reg]	.global ia64_os_mca_dispatch	.global ia64_os_mca_dispatch_end#ifndef XEN	.global ia64_sal_to_os_handoff_state	.global	ia64_os_to_sal_handoff_state#endif	.global ia64_do_tlb_purge	.text	.align 16#ifdef	XEN/* * void set_per_cpu_data(void) * { *   int i; *   for (i = 0; i < 64; i++) { *     if (ia64_mca_tlb_list[i].cr_lid == ia64_getreg(_IA64_REG_CR_LID)) { *       ia64_set_kr(IA64_KR_PER_CPU_DATA, ia64_mca_tlb_list[i].percpu_paddr); *       return; *     } *   } *   while(1);	// Endless loop on error * } */#define SET_PER_CPU_DATA()					\	LOAD_PHYSICAL(p0,r2,ia64_mca_tlb_list);;		\	mov r7 = r0;						\	mov r6 = r0;;						\	adds r3 = IA64_MCA_PERCPU_OFFSET, r2;			\1:	add r4 = r6, r2;					\	mov r5=cr.lid;;						\	adds r7 = 1, r7;					\	ld8 r4 = [r4];;						\	cmp.ne p6, p7 = r5, r4;					\	cmp4.lt p8, p9 = NR_CPUS-1, r7;				\(p7)	br.cond.dpnt 3f;					\	adds r6 = 16, r6;					\(p9) 	br.cond.sptk 1b;					\2:	br 2b;;			/* Endless loop on error */	\3:	add r4 = r6, r3;;					\	ld8 r4 = [r4];;						\	mov ar.k3=r4/* * GET_VA_VCPU_VHPT_MADDR() emulates 'reg = __va_ul(vcpu_vhpt_maddr(v))'. */#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT#define HAS_PERVCPU_VHPT_MASK	0x2#define GET_VA_VCPU_VHPT_MADDR(reg,tmp)				\	GET_THIS_PADDR(reg,cpu_kr);;				\	add reg=IA64_KR_CURRENT_OFFSET,reg;;			\	ld8 reg=[reg];;						\	dep tmp=0,reg,60,4;;			/* V to P */	\	add tmp=IA64_VCPU_DOMAIN_OFFSET,tmp;;			\	ld8 tmp=[tmp];;						\	dep tmp=0,tmp,60,4;;			/* V to P */	\	add tmp=IA64_DOMAIN_FLAGS_OFFSET,tmp;;			\	ld8 tmp=[tmp];;						\	and tmp=HAS_PERVCPU_VHPT_MASK,tmp;;			\	cmp.eq p6,p0=tmp,r0;					\(p6)	br.cond.sptk 1f;					\	add reg=IA64_VCPU_VHPT_MADDR_OFFSET,reg;;		\	dep reg=0,reg,60,4;;			/* V to P */	\	ld8 reg=[reg];;						\	dep reg=-1,reg,60,4;			/* P to V */	\	br.sptk	2f;						\1:								\	GET_THIS_PADDR(reg, vhpt_paddr);;			\	ld8 reg=[reg];;						\	dep reg=-1,reg,60,4;			/* P to V */	\2:#else /* CONFIG_XEN_IA64_PERVCPU_VHPT */#define GET_VA_VCPU_VHPT_MADDR(reg,tmp)				\	GET_THIS_PADDR(reg, vhpt_paddr);;			\	ld8 reg=[reg];;						\	dep reg=-1,reg,60,4			/* P to V */#endif /* CONFIG_XEN_IA64_PERVCPU_VHPT */#endif	/* XEN *//* * Just the TLB purge part is moved to a separate function * so we can re-use the code for cpu hotplug code as well * Caller should now setup b1, so we can branch once the * tlb flush is complete. */ia64_do_tlb_purge:#ifdef XEN	// This needs to be called in order for GET_THIS_PADDR to work	SET_PER_CPU_DATA();;#endif#define O(member)	IA64_CPUINFO_##member##_OFFSET	GET_THIS_PADDR(r2, cpu_info)	// load phys addr of cpu_info into r2	;;	addl r17=O(PTCE_STRIDE),r2	addl r2=O(PTCE_BASE),r2	;;	ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));;	// r18=ptce_base	ld4 r19=[r2],4					// r19=ptce_count[0]	ld4 r21=[r17],4					// r21=ptce_stride[0]	;;	ld4 r20=[r2]					// r20=ptce_count[1]	ld4 r22=[r17]					// r22=ptce_stride[1]	mov r24=0	;;	adds r20=-1,r20	;;#undef O2:	cmp.ltu p6,p7=r24,r19(p7)	br.cond.dpnt.few 4f	mov ar.lc=r203:	ptc.e r18	;;	add r18=r22,r18	br.cloop.sptk.few 3b	;;	add r18=r21,r18	add r24=1,r24	;;	br.sptk.few 2b4:	srlz.i 			// srlz.i implies srlz.d	;;        // Now purge addresses formerly mapped by TR registers	// 1. Purge ITR&DTR for kernel.	movl r16=KERNEL_START	mov r18=KERNEL_TR_PAGE_SHIFT<<2	;;	ptr.i r16, r18	ptr.d r16, r18	;;	srlz.i	;;	srlz.d	;;#ifdef XEN	/* xen heap is identity mapped */	mov r19=ip	;; 	dep r17=0,r19,0,KERNEL_TR_PAGE_SHIFT	;; 	dep r17=-1,r17,60,4	;; 	ptr.d r17,r18	;;	srlz.d	;; #endif	// 2. Purge DTR for PERCPU data.	movl r16=PERCPU_ADDR	mov r18=PERCPU_PAGE_SHIFT<<2	;;	ptr.d r16,r18	;;	srlz.d	;;	// 3. Purge ITR for PAL code.	GET_THIS_PADDR(r2, ia64_mca_pal_base)	;;	ld8 r16=[r2]	mov r18=IA64_GRANULE_SHIFT<<2	;;	ptr.i r16,r18	;;	srlz.i	;;	// 4. Purge DTR for stack.#ifdef XEN	// Kernel registers are saved in a per_cpu cpu_kr_ia64_t	// to allow the kernel registers themselves to be used by domains.	GET_THIS_PADDR(r2, cpu_kr);;	add r2=IA64_KR_CURRENT_STACK_OFFSET,r2	;;	ld8 r16=[r2]#else	mov r16=IA64_KR(CURRENT_STACK)#endif	;;	shl r16=r16,IA64_GRANULE_SHIFT	movl r19=PAGE_OFFSET	;;	add r16=r19,r16	mov r18=IA64_GRANULE_SHIFT<<2	;;	ptr.d r16,r18	;;	srlz.i	;;#ifdef XEN	// 5. VHPT#if VHPT_ENABLED	GET_VA_VCPU_VHPT_MADDR(r2,r3);;	dep r16=0,r2,0,IA64_GRANULE_SHIFT	mov r18=IA64_GRANULE_SHIFT<<2	;;	ptr.d r16,r18	;;	srlz.d	;;#endif#endif	// Now branch away to caller.	br.sptk.many b1	;;ia64_os_mca_dispatch:	// Serialize all MCA processing	mov	r3=1;;	LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;ia64_os_mca_spin:	xchg8	r4=[r2],r3;;	cmp.ne	p6,p0=r4,r0(p6)	br ia64_os_mca_spin#ifdef XEN	SET_PER_CPU_DATA();;#endif	// Save the SAL to OS MCA handoff state as defined	// by SAL SPEC 3.0	// NOTE : The order in which the state gets saved	//	  is dependent on the way the C-structure	//	  for ia64_mca_sal_to_os_state_t has been	//	  defined in include/asm/mca.h	SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)	;;	// LOG PROCESSOR STATE INFO FROM HERE ON..begin_os_mca_dump:	br	ia64_os_mca_proc_state_dump;;ia64_os_mca_done_dump:#ifdef XEN	// Set current to ar.k6	GET_THIS_PADDR(r2,cpu_kr);;	add r2=IA64_KR_CURRENT_OFFSET,r2;;	ld8 r2=[r2];;	mov ar.k6=r2;;	GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);;	ld8 r2=[r2];;	adds r16=56,r2#else	LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)#endif	;;	ld8 r18=[r16]		// Get processor state parameter on existing PALE_CHECK.	;;	tbit.nz p6,p7=r18,60(p7)	br.spnt done_tlb_purge_and_reload	// The following code purges TC and TR entries. Then reload all TC entries.	// Purge percpu data TC entries.begin_tlb_purge_and_reload:	movl r18=ia64_reload_tr;;	LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;	mov b1=r18;;	br.sptk.many ia64_do_tlb_purge;;ia64_reload_tr:	// Finally reload the TR registers.	// 1. Reload DTR/ITR registers for kernel.	mov r18=KERNEL_TR_PAGE_SHIFT<<2	movl r17=KERNEL_START	;;	mov cr.itir=r18	mov cr.ifa=r17        mov r16=IA64_TR_KERNEL	mov r19=ip	movl r18=PAGE_KERNEL	;;        dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT	;;	or r18=r17,r18	;;        itr.i itr[r16]=r18	;;        itr.d dtr[r16]=r18        ;;	srlz.i	srlz.d	;;#ifdef XEN	/* xen heap is identity mapped */	mov r16=IA64_TR_XEN_HEAP_REGS  	dep r17=-1,r17,60,4	;; 	mov cr.ifa=r17	;;	itr.d dtr[r16]=r18	;;	srlz.d	;; #endif	// 2. Reload DTR register for PERCPU data.	GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)	;;	movl r16=PERCPU_ADDR		// vaddr	movl r18=PERCPU_PAGE_SHIFT<<2	;;	mov cr.itir=r18	mov cr.ifa=r16	;;	ld8 r18=[r2]			// load per-CPU PTE	mov r16=IA64_TR_PERCPU_DATA;	;;	itr.d dtr[r16]=r18	;;	srlz.d	;;	// 3. Reload ITR for PAL code.	GET_THIS_PADDR(r2, ia64_mca_pal_pte)	;;	ld8 r18=[r2]			// load PAL PTE	;;	GET_THIS_PADDR(r2, ia64_mca_pal_base)	;;	ld8 r16=[r2]			// load PAL vaddr	mov r19=IA64_GRANULE_SHIFT<<2	;;	mov cr.itir=r19	mov cr.ifa=r16	mov r20=IA64_TR_PALCODE	;;	itr.i itr[r20]=r18	;;	srlz.i	;;	// 4. Reload DTR for stack.#ifdef XEN	// avoid overlapping with kernel TR	movl r17=KERNEL_START	GET_THIS_PADDR(r2,cpu_kr);;	add r2=IA64_KR_CURRENT_OFFSET,r2;;	ld8 r16=[r2];;	;;	dep  r16=0,r16,0,KERNEL_TR_PAGE_SHIFT	;;	cmp.eq p7,p0=r17,r16(p7)	br.cond.sptk	.reload_vhpt		// Kernel registers are saved in a per_cpu cpu_kr_ia64_t	// to allow the kernel registers themselves to be used by domains.	GET_THIS_PADDR(r2, cpu_kr);;	add r2=IA64_KR_CURRENT_STACK_OFFSET,r2	;;	ld8 r16=[r2]#else	mov r16=IA64_KR(CURRENT_STACK)#endif	;;	shl r16=r16,IA64_GRANULE_SHIFT	movl r19=PAGE_OFFSET	;;	add r18=r19,r16	movl r20=PAGE_KERNEL	;;	add r16=r20,r16	mov r19=IA64_GRANULE_SHIFT<<2	;;	mov cr.itir=r19	mov cr.ifa=r18	mov r20=IA64_TR_CURRENT_STACK	;;	itr.d dtr[r20]=r16	;;	srlz.d	;;#ifdef XEN.reload_vhpt:	// 5. VHPT#if VHPT_ENABLED	GET_VA_VCPU_VHPT_MADDR(r2,r3);;	dep r16=0,r2,0,IA64_GRANULE_SHIFT	movl r20=PAGE_KERNEL	;;	mov r18=IA64_TR_VHPT	dep r17=0,r16,60,4		// physical address of	                                // va_vhpt & ~(IA64_GRANULE_SIZE - 1)	mov r19=IA64_GRANULE_SHIFT<<2	;;	or r17=r17,r20			// construct PA | page properties	mov cr.itir=r19	mov cr.ifa=r16	;;	itr.d dtr[r18]=r17		// wire in new mapping...	;;	srlz.d	;;#endif#endif	br.sptk.many done_tlb_purge_and_reloaderr:	COLD_BOOT_HANDOFF_STATE(r20,r21,r22)	br.sptk.many ia64_os_mca_done_restoredone_tlb_purge_and_reload:	// Setup new stack frame for OS_MCA handling	GET_IA64_MCA_DATA(r2)	;;	add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2	add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2	;;	rse_switch_context(r6,r3,r2);;	// RSC management in this new context	GET_IA64_MCA_DATA(r2)	;;	add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2	;;	mov r12=r2		// establish new stack-pointer        // Enter virtual mode from physical mode	VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)ia64_os_mca_virtual_begin:	// Call virtual mode handler	movl		r2=ia64_mca_ucmc_handler;;	mov		b6=r2;;	br.call.sptk.many    b0=b6;;.ret0:	// Revert back to physical mode before going back to SAL	PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)ia64_os_mca_virtual_end:	// restore the original stack frame here	GET_IA64_MCA_DATA(r2)	;;	add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2	;;	movl    r4=IA64_PSR_MC	;;	rse_return_context(r4,r3,r2)	// switch from interrupt context for RSE	// let us restore all the registers from our PSI structure

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -