⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ultra.s

📁 linux 内核源代码
💻 S
📖 第 1 页 / 共 2 页
字号:
	 *   %g4	scratch 4	 */	.align		32	.globl		xcall_flush_tlb_mmxcall_flush_tlb_mm:	/* 21 insns */	mov		PRIMARY_CONTEXT, %g2	ldxa		[%g2] ASI_DMMU, %g3	srlx		%g3, CTX_PGSZ1_NUC_SHIFT, %g4	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4	or		%g5, %g4, %g5	/* Preserve nucleus page size fields */	stxa		%g5, [%g2] ASI_DMMU	mov		0x40, %g4	stxa		%g0, [%g4] ASI_DMMU_DEMAP	stxa		%g0, [%g4] ASI_IMMU_DEMAP	stxa		%g3, [%g2] ASI_DMMU	retry	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	.globl		xcall_flush_tlb_pendingxcall_flush_tlb_pending:	/* 21 insns */	/* %g5=context, %g1=nr, %g7=vaddrs[] */	sllx		%g1, 3, %g1	mov		PRIMARY_CONTEXT, %g4	ldxa		[%g4] ASI_DMMU, %g2	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %g4	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4	or		%g5, %g4, %g5	mov		PRIMARY_CONTEXT, %g4	stxa		%g5, [%g4] ASI_DMMU1:	sub		%g1, (1 << 3), %g1	ldx		[%g7 + %g1], %g5	andcc		%g5, 0x1, %g0	be,pn		%icc, 2f	 andn		%g5, 0x1, %g5	stxa		%g0, [%g5] ASI_IMMU_DEMAP2:	stxa		%g0, [%g5] ASI_DMMU_DEMAP	membar		#Sync	brnz,pt		%g1, 1b	 nop	stxa		%g2, [%g4] ASI_DMMU	retry	nop	.globl		xcall_flush_tlb_kernel_rangexcall_flush_tlb_kernel_range:	/* 25 insns */	sethi		%hi(PAGE_SIZE - 1), %g2	or		%g2, %lo(PAGE_SIZE - 1), %g2	andn		%g1, %g2, %g1	andn		%g7, %g2, %g7	sub		%g7, %g1, %g3	add		%g2, 1, %g2	sub		%g3, %g2, %g3	or		%g1, 0x20, %g1		! Nucleus1:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP	membar		#Sync	brnz,pt		%g3, 1b	 sub		%g3, %g2, %g3	retry	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	nop	/* This runs in a very controlled environment, so we do	 * not need to worry about BH races etc.	 */	.globl		xcall_sync_tickxcall_sync_tick:661:	rdpr		%pstate, %g2	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate	.section	.sun4v_2insn_patch, "ax"	.word		661b	nop	nop	.previous	rdpr		%pil, %g2	wrpr		%g0, 15, %pil	sethi		%hi(109f), %g7	b,pt		%xcc, etrap_irq109:	 or		%g7, %lo(109b), %g7#ifdef CONFIG_TRACE_IRQFLAGS	call		trace_hardirqs_off	 nop#endif	call		smp_synchronize_tick_client	 nop	clr		%l6	b		rtrap_xcall	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1	/* NOTE: This is SPECIAL!!  We do etrap/rtrap however	 *       we choose to deal with the "BH's run with	 *       %pil==15" problem (described in asm/pil.h)	 *       by just invoking rtrap directly past where	 *       BH's are checked for.	 *	 *       We do it like this because we do not want %pil==15	 *       lockups to prevent regs being reported.	 */	.globl		xcall_report_regsxcall_report_regs:661:	rdpr		%pstate, %g2	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate	.section	.sun4v_2insn_patch, "ax"	.word		661b	nop	nop	.previous	rdpr		%pil, %g2	wrpr		%g0, 15, %pil	sethi		%hi(109f), %g7	b,pt		%xcc, etrap_irq109:	 or		%g7, %lo(109b), %g7#ifdef CONFIG_TRACE_IRQFLAGS	call		trace_hardirqs_off	 nop#endif	call		__show_regs	 add		%sp, PTREGS_OFF, %o0	clr		%l6	/* Has to be a non-v9 branch due to the large distance. */	b		rtrap_xcall	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1#ifdef DCACHE_ALIASING_POSSIBLE	.align		32	.globl		xcall_flush_dcache_page_cheetahxcall_flush_dcache_page_cheetah: /* %g1 == physical page address */	sethi		%hi(PAGE_SIZE), %g31:	subcc		%g3, (1 << 5), %g3	stxa		%g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE	membar		#Sync	bne,pt		%icc, 1b	 nop	retry	nop#endif /* DCACHE_ALIASING_POSSIBLE */	.globl		xcall_flush_dcache_page_spitfirexcall_flush_dcache_page_spitfire: /* %g1 == physical page address				     %g7 == kernel page virtual address				     %g5 == (page->mapping != NULL)  */#ifdef DCACHE_ALIASING_POSSIBLE	srlx		%g1, (13 - 2), %g1	! Form tag comparitor	sethi		%hi(L1DCACHE_SIZE), %g3	! D$ size == 16K	sub		%g3, (1 << 5), %g3	! D$ linesize == 321:	ldxa		[%g3] ASI_DCACHE_TAG, %g2	andcc		%g2, 0x3, %g0	be,pn		%xcc, 2f	 andn		%g2, 0x3, %g2	cmp		%g2, %g1	bne,pt		%xcc, 2f	 nop	stxa		%g0, [%g3] ASI_DCACHE_TAG	membar		#Sync2:	cmp		%g3, 0	bne,pt		%xcc, 1b	 sub		%g3, (1 << 5), %g3	brz,pn		%g5, 2f#endif /* DCACHE_ALIASING_POSSIBLE */	 sethi		%hi(PAGE_SIZE), %g31:	flush		%g7	subcc		%g3, (1 << 5), %g3	bne,pt		%icc, 1b	 add		%g7, (1 << 5), %g72:	retry	nop	nop	/* %g5:	error	 * %g6:	tlb op	 */__hypervisor_tlb_xcall_error:	mov	%g5, %g4	mov	%g6, %g5	ba,pt	%xcc, etrap	 rd	%pc, %g7	mov	%l4, %o0	call	hypervisor_tlbop_error_xcall	 mov	%l5, %o1	ba,a,pt	%xcc, rtrap_clr_l6	.globl		__hypervisor_xcall_flush_tlb_mm__hypervisor_xcall_flush_tlb_mm: /* 21 insns */	/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */	mov		%o0, %g2	mov		%o1, %g3	mov		%o2, %g4	mov		%o3, %g1	mov		%o5, %g7	clr		%o0		/* ARG0: CPU lists unimplemented */	clr		%o1		/* ARG1: CPU lists unimplemented */	mov		%g5, %o2	/* ARG2: mmu context */	mov		HV_MMU_ALL, %o3	/* ARG3: flags */	mov		HV_FAST_MMU_DEMAP_CTX, %o5	ta		HV_FAST_TRAP	mov		HV_FAST_MMU_DEMAP_CTX, %g6	brnz,pn		%o0, __hypervisor_tlb_xcall_error	 mov		%o0, %g5	mov		%g2, %o0	mov		%g3, %o1	mov		%g4, %o2	mov		%g1, %o3	mov		%g7, %o5	membar		#Sync	retry	.globl		__hypervisor_xcall_flush_tlb_pending__hypervisor_xcall_flush_tlb_pending: /* 21 insns */	/* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */	sllx		%g1, 3, %g1	mov		%o0, %g2	mov		%o1, %g3	mov		%o2, %g41:	sub		%g1, (1 << 3), %g1	ldx		[%g7 + %g1], %o0	/* ARG0: virtual address */	mov		%g5, %o1		/* ARG1: mmu context */	mov		HV_MMU_ALL, %o2		/* ARG2: flags */	srlx		%o0, PAGE_SHIFT, %o0	sllx		%o0, PAGE_SHIFT, %o0	ta		HV_MMU_UNMAP_ADDR_TRAP	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6	brnz,a,pn	%o0, __hypervisor_tlb_xcall_error	 mov		%o0, %g5	brnz,pt		%g1, 1b	 nop	mov		%g2, %o0	mov		%g3, %o1	mov		%g4, %o2	membar		#Sync	retry	.globl		__hypervisor_xcall_flush_tlb_kernel_range__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */	/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */	sethi		%hi(PAGE_SIZE - 1), %g2	or		%g2, %lo(PAGE_SIZE - 1), %g2	andn		%g1, %g2, %g1	andn		%g7, %g2, %g7	sub		%g7, %g1, %g3	add		%g2, 1, %g2	sub		%g3, %g2, %g3	mov		%o0, %g2	mov		%o1, %g4	mov		%o2, %g71:	add		%g1, %g3, %o0	/* ARG0: virtual address */	mov		0, %o1		/* ARG1: mmu context */	mov		HV_MMU_ALL, %o2	/* ARG2: flags */	ta		HV_MMU_UNMAP_ADDR_TRAP	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6	brnz,pn		%o0, __hypervisor_tlb_xcall_error	 mov		%o0, %g5	sethi		%hi(PAGE_SIZE), %o2	brnz,pt		%g3, 1b	 sub		%g3, %o2, %g3	mov		%g2, %o0	mov		%g4, %o1	mov		%g7, %o2	membar		#Sync	retry	/* These just get rescheduled to PIL vectors. */	.globl		xcall_call_functionxcall_call_function:	wr		%g0, (1 << PIL_SMP_CALL_FUNC), %set_softint	retry	.globl		xcall_receive_signalxcall_receive_signal:	wr		%g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint	retry	.globl		xcall_capturexcall_capture:	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint	retry	.globl		xcall_new_mmu_context_versionxcall_new_mmu_context_version:	wr		%g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint	retry#endif /* CONFIG_SMP */	.globl		hypervisor_patch_cachetlbopshypervisor_patch_cachetlbops:	save		%sp, -128, %sp	sethi		%hi(__flush_tlb_mm), %o0	or		%o0, %lo(__flush_tlb_mm), %o0	sethi		%hi(__hypervisor_flush_tlb_mm), %o1	or		%o1, %lo(__hypervisor_flush_tlb_mm), %o1	call		tlb_patch_one	 mov		10, %o2	sethi		%hi(__flush_tlb_pending), %o0	or		%o0, %lo(__flush_tlb_pending), %o0	sethi		%hi(__hypervisor_flush_tlb_pending), %o1	or		%o1, %lo(__hypervisor_flush_tlb_pending), %o1	call		tlb_patch_one	 mov		16, %o2	sethi		%hi(__flush_tlb_kernel_range), %o0	or		%o0, %lo(__flush_tlb_kernel_range), %o0	sethi		%hi(__hypervisor_flush_tlb_kernel_range), %o1	or		%o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1	call		tlb_patch_one	 mov		16, %o2#ifdef DCACHE_ALIASING_POSSIBLE	sethi		%hi(__flush_dcache_page), %o0	or		%o0, %lo(__flush_dcache_page), %o0	sethi		%hi(__hypervisor_flush_dcache_page), %o1	or		%o1, %lo(__hypervisor_flush_dcache_page), %o1	call		tlb_patch_one	 mov		2, %o2#endif /* DCACHE_ALIASING_POSSIBLE */#ifdef CONFIG_SMP	sethi		%hi(xcall_flush_tlb_mm), %o0	or		%o0, %lo(xcall_flush_tlb_mm), %o0	sethi		%hi(__hypervisor_xcall_flush_tlb_mm), %o1	or		%o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1	call		tlb_patch_one	 mov		21, %o2	sethi		%hi(xcall_flush_tlb_pending), %o0	or		%o0, %lo(xcall_flush_tlb_pending), %o0	sethi		%hi(__hypervisor_xcall_flush_tlb_pending), %o1	or		%o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1	call		tlb_patch_one	 mov		21, %o2	sethi		%hi(xcall_flush_tlb_kernel_range), %o0	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0	sethi		%hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1	or		%o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1	call		tlb_patch_one	 mov		25, %o2#endif /* CONFIG_SMP */	ret	 restore

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -