⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 底层驱动开发
💻 S
📖 第 1 页 / 共 4 页
字号:
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * kernel entry points (interruptions, system call wrappers) *  Copyright (C) 1999,2000 Philipp Rumpf  *  Copyright (C) 1999 SuSE GmbH Nuernberg  *  Copyright (C) 2000 Hewlett-Packard (John Marvin) *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand) * *    This program is free software; you can redistribute it and/or modify *    it under the terms of the GNU General Public License as published by *    the Free Software Foundation; either version 2, or (at your option) *    any later version. * *    This program is distributed in the hope that it will be useful, *    but WITHOUT ANY WARRANTY; without even the implied warranty of *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the *    GNU General Public License for more details. * *    You should have received a copy of the GNU General Public License *    along with this program; if not, write to the Free Software *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */#include <linux/config.h>#include <asm/asm-offsets.h>/* we have the following possibilities to act on an interruption: *  - handle in assembly and use shadowed registers only *  - save registers to kernel stack and handle in assembly or C */#include <asm/assembly.h>	/* for LDREG/STREG defines */#include <asm/pgtable.h>#include <asm/psw.h>#include <asm/signal.h>#include <asm/unistd.h>#include <asm/thread_info.h>#ifdef __LP64__#define CMPIB           cmpib,*#define CMPB            cmpb,*#define COND(x)		*x	.level 2.0w#else#define CMPIB           cmpib,#define CMPB            cmpb,#define COND(x)		x	.level 2.0#endif	.import         pa_dbit_lock,data	/* space_to_prot macro creates a prot id from a space id */#if (SPACEID_SHIFT) == 0	.macro  space_to_prot spc prot	depd,z  \spc,62,31,\prot	.endm#else	.macro  space_to_prot spc prot	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot	.endm#endif	/* Switch to virtual mapping, trashing only %r1 */	.macro  virt_map	rsm     PSW_SM_Q,%r0	tovirt_r1 %r29	mfsp	%sr7, %r1	or,=    %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */	mtsp	%r1, %sr3	mtsp	%r0, %sr4	mtsp	%r0, %sr5	mtsp	%r0, %sr6	mtsp	%r0, %sr7	load32	KERNEL_PSW, %r1	mtctl	%r1, %cr22	mtctl	%r0, %cr17	/* Clear IIASQ tail */	mtctl	%r0, %cr17	/* Clear IIASQ head */	load32	4f, %r1	mtctl	%r1, %cr18	/* Set IIAOQ tail */	ldo	4(%r1), %r1	mtctl	%r1, %cr18	/* Set IIAOQ head */	rfir	nop4:	.endm	/*	 * The "get_stack" macros are responsible for determining the	 * kernel stack value.	 *	 * For Faults:	 *      If sr7 == 0	 *          Already using a kernel stack, so call the	 *          get_stack_use_r30 macro to push a pt_regs structure	 *          on the stack, and store registers there.	 *      else	 *          Need to set up a kernel stack, so call the	 *          get_stack_use_cr30 macro to set up a pointer	 *          to the pt_regs structure contained within the	 *          task pointer pointed to by cr30. Set the stack	 *          pointer to point to the end of the task structure.	 *	 * For Interrupts:	 *      If sr7 == 0	 *          Already using a kernel stack, check to see if r30	 *          is already pointing to the per processor interrupt	 *          stack. If it is, call the get_stack_use_r30 macro	 *          to push a pt_regs structure on the stack, and store	 *          registers there. Otherwise, call get_stack_use_cr31	 *          to get a pointer to the base of the interrupt stack	 *          and push a pt_regs structure on that stack.	 *      else	 *          Need to set up a kernel stack, so call the	 *          get_stack_use_cr30 macro to set up a pointer	 *          to the pt_regs structure contained within the	 *          task pointer pointed to by cr30. Set the stack	 *          pointer to point to the end of the task structure.	 *          N.B: We don't use the interrupt stack for the	 *          first interrupt from userland, because signals/	 *          resched's are processed when returning to userland,	 *          and we can sleep in those cases.	 *	 * Note that we use shadowed registers for temps until	 * we can save %r26 and %r29. %r26 is used to preserve	 * %r8 (a shadowed register) which temporarily contained	 * either the fault type ("code") or the eirr. We need	 * to use a non-shadowed register to carry the value over	 * the rfir in virt_map. We use %r26 since this value winds	 * up being passed as the argument to either do_cpu_irq_mask	 * or handle_interruption. %r29 is used to hold a pointer	 * the register save area, and once again, it needs to	 * be a non-shadowed register so that it survives the rfir.	 *	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.	 */	.macro  get_stack_use_cr30	/* we save the registers in the task struct */	mfctl   %cr30, %r1	tophys  %r1,%r9	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */	tophys  %r1,%r9	ldo     TASK_REGS(%r9),%r9	STREG   %r30, PT_GR30(%r9)	STREG   %r29,PT_GR29(%r9)	STREG   %r26,PT_GR26(%r9)	copy    %r9,%r29	mfctl   %cr30, %r1	ldo	THREAD_SZ_ALGN(%r1), %r30	.endm	.macro  get_stack_use_r30	/* we put a struct pt_regs on the stack and save the registers there */	tophys  %r30,%r9	STREG   %r30,PT_GR30(%r9)	ldo	PT_SZ_ALGN(%r30),%r30	STREG   %r29,PT_GR29(%r9)	STREG   %r26,PT_GR26(%r9)	copy    %r9,%r29	.endm	.macro  rest_stack	LDREG   PT_GR1(%r29), %r1	LDREG   PT_GR30(%r29),%r30	LDREG   PT_GR29(%r29),%r29	.endm	/* default interruption handler	 * (calls traps.c:handle_interruption) */	.macro	def code	b	intr_save	ldi     \code, %r8	.align	32	.endm	/* Interrupt interruption handler	 * (calls irq.c:do_cpu_irq_mask) */	.macro	extint code	b	intr_extint	mfsp    %sr7,%r16	.align	32	.endm		.import	os_hpmc, code	/* HPMC handler */	.macro	hpmc code	nop			/* must be a NOP, will be patched later */	load32	PA(os_hpmc), %r3	bv,n	0(%r3)	nop	.word	0		/* checksum (will be patched) */	.word	PA(os_hpmc)	/* address of handler */	.word	0		/* length of handler */	.endm	/*	 * Performance Note: Instructions will be moved up into	 * this part of the code later on, once we are sure	 * that the tlb miss handlers are close to final form.	 */	/* Register definitions for tlb miss handler macros */	va  = r8	/* virtual address for which the trap occured */	spc = r24	/* space for which the trap occured */#ifndef __LP64__	/*	 * itlb miss interruption handler (parisc 1.1 - 32 bit)	 */	.macro	itlb_11 code	mfctl	%pcsq, spc	b	itlb_miss_11	mfctl	%pcoq, va	.align		32	.endm#endif		/*	 * itlb miss interruption handler (parisc 2.0)	 */	.macro	itlb_20 code	mfctl	%pcsq, spc#ifdef __LP64__	b       itlb_miss_20w#else	b	itlb_miss_20#endif	mfctl	%pcoq, va	.align		32	.endm	#ifndef __LP64__	/*	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)	 *	 * Note: naitlb misses will be treated	 * as an ordinary itlb miss for now.	 * However, note that naitlb misses	 * have the faulting address in the	 * IOR/ISR.	 */	.macro	naitlb_11 code	mfctl	%isr,spc	b	itlb_miss_11	mfctl 	%ior,va	/* FIXME: If user causes a naitlb miss, the priv level may not be in	 * lower bits of va, where the itlb miss handler is expecting them	 */	.align		32	.endm#endif		/*	 * naitlb miss interruption handler (parisc 2.0)	 *	 * Note: naitlb misses will be treated	 * as an ordinary itlb miss for now.	 * However, note that naitlb misses	 * have the faulting address in the	 * IOR/ISR.	 */	.macro	naitlb_20 code	mfctl	%isr,spc#ifdef __LP64__	b       itlb_miss_20w#else	b	itlb_miss_20#endif	mfctl 	%ior,va	/* FIXME: If user causes a naitlb miss, the priv level may not be in	 * lower bits of va, where the itlb miss handler is expecting them	 */	.align		32	.endm	#ifndef __LP64__	/*	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)	 */	.macro	dtlb_11 code	mfctl	%isr, spc	b	dtlb_miss_11	mfctl	%ior, va	.align		32	.endm#endif	/*	 * dtlb miss interruption handler (parisc 2.0)	 */	.macro	dtlb_20 code	mfctl	%isr, spc#ifdef __LP64__	b       dtlb_miss_20w#else	b	dtlb_miss_20#endif	mfctl	%ior, va	.align		32	.endm	#ifndef __LP64__	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */	.macro	nadtlb_11 code	mfctl	%isr,spc	b       nadtlb_miss_11	mfctl	%ior,va	.align		32	.endm#endif		/* nadtlb miss interruption handler (parisc 2.0) */	.macro	nadtlb_20 code	mfctl	%isr,spc#ifdef __LP64__	b       nadtlb_miss_20w#else	b       nadtlb_miss_20#endif	mfctl	%ior,va	.align		32	.endm	#ifndef __LP64__	/*	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)	 */	.macro	dbit_11 code	mfctl	%isr,spc	b	dbit_trap_11	mfctl	%ior,va	.align		32	.endm#endif	/*	 * dirty bit trap interruption handler (parisc 2.0)	 */	.macro	dbit_20 code	mfctl	%isr,spc#ifdef __LP64__	b       dbit_trap_20w#else	b	dbit_trap_20#endif	mfctl	%ior,va	.align		32	.endm	/* The following are simple 32 vs 64 bit instruction	 * abstractions for the macros */	.macro		EXTR	reg1,start,length,reg2#ifdef __LP64__	extrd,u		\reg1,32+\start,\length,\reg2#else	extrw,u		\reg1,\start,\length,\reg2#endif	.endm	.macro		DEP	reg1,start,length,reg2#ifdef __LP64__	depd		\reg1,32+\start,\length,\reg2#else	depw		\reg1,\start,\length,\reg2#endif	.endm	.macro		DEPI	val,start,length,reg#ifdef __LP64__	depdi		\val,32+\start,\length,\reg#else	depwi		\val,\start,\length,\reg#endif	.endm	/* In LP64, the space contains part of the upper 32 bits of the	 * fault.  We have to extract this and place it in the va,	 * zeroing the corresponding bits in the space register */	.macro		space_adjust	spc,va,tmp#ifdef __LP64__	extrd,u		\spc,63,SPACEID_SHIFT,\tmp	depd		%r0,63,SPACEID_SHIFT,\spc	depd		\tmp,31,SPACEID_SHIFT,\va#endif	.endm	.import		swapper_pg_dir,code	/* Get the pgd.  For faults on space zero (kernel space), this	 * is simply swapper_pg_dir.  For user space faults, the	 * pgd is stored in %cr25 */	.macro		get_pgd		spc,reg	ldil		L%PA(swapper_pg_dir),\reg	ldo		R%PA(swapper_pg_dir)(\reg),\reg	or,COND(=)	%r0,\spc,%r0	mfctl		%cr25,\reg	.endm	/* 		space_check(spc,tmp,fault)		spc - The space we saw the fault with.		tmp - The place to store the current space.		fault - Function to call on failure.		Only allow faults on different spaces from the		currently active one if we're the kernel 	*/	.macro		space_check	spc,tmp,fault	mfsp		%sr7,\tmp	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page					 * as kernel, so defeat the space					 * check if it is */	copy		\spc,\tmp	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */	cmpb,COND(<>),n	\tmp,\spc,\fault	.endm	/* Look up a PTE in a 2-Level scheme (faulting at each	 * level if the entry isn't present 	 *	 * NOTE: we use ldw even for LP64, since the short pointers	 * can address up to 1TB	 */	.macro		L2_ptep	pmd,pte,index,va,fault#if PT_NLEVELS == 3	EXTR		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index#else	EXTR		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index#endif	DEP             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */	copy		%r0,\pte	ldw,s		\index(\pmd),\pmd	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault	DEP		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */	copy		\pmd,%r9#ifdef __LP64__	shld		%r9,PxD_VALUE_SHIFT,\pmd#else	shlw		%r9,PxD_VALUE_SHIFT,\pmd#endif	EXTR		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index	DEP		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd	LDREG		%r0(\pmd),\pte		/* pmd is now pte */	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault	.endm	/* Look up PTE in a 3-Level scheme.	 *	 * Here we implement a Hybrid L2/L3 scheme: we allocate the	 * first pmd adjacent to the pgd.  This means that we can	 * subtract a constant offset to get to it.  The pmd and pgd	 * sizes are arranged so that a single pmd covers 4GB (giving	 * a full LP64 process access to 8TB) so our lookups are	 * effectively L2 for the first 4GB of the kernel (i.e. for	 * all ILP32 processes and all the kernel for machines with	 * under 4GB of memory) */	.macro		L3_ptep pgd,pte,index,va,fault	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index	copy		%r0,\pte	extrd,u,*=	\va,31,32,%r0	ldw,s		\index(\pgd),\pgd	extrd,u,*=	\va,31,32,%r0	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault	extrd,u,*=	\va,31,32,%r0	shld		\pgd,PxD_VALUE_SHIFT,\index	extrd,u,*=	\va,31,32,%r0	copy		\index,\pgd	extrd,u,*<>	\va,31,32,%r0	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd	L2_ptep		\pgd,\pte,\index,\va,\fault	.endm	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and	 * don't needlessly dirty the cache line if it was already set */	.macro		update_ptep	ptep,pte,tmp,tmp1	ldi		_PAGE_ACCESSED,\tmp1	or		\tmp1,\pte,\tmp	and,COND(<>)	\tmp1,\pte,%r0	STREG		\tmp,0(\ptep)	.endm	/* Set the dirty bit (and accessed bit).  No need to be	 * clever, this is only used from the dirty fault */	.macro		update_dirty	ptep,pte,tmp	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp	or		\tmp,\pte,\pte	STREG		\pte,0(\ptep)	.endm	/* Convert the pte and prot to tlb insertion values.  How	 * this happens is quite subtle, read below */	.macro		make_insert_tlb	spc,pte,prot	space_to_prot   \spc \prot        /* create prot id from space */	/* The following is the real subtlety.  This is depositing	 * T <-> _PAGE_REFTRAP	 * D <-> _PAGE_DIRTY	 * B <-> _PAGE_DMB (memory break)	 *	 * Then incredible subtlety: The access rights are	 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ	 * See 3-14 of the parisc 2.0 manual	 *	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we	 * trigger an access rights trap in user space if the user	 * tries to read an unreadable page */	depd            \pte,8,7,\prot	/* PAGE_USER indicates the page can be read with user privileges,	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1	 * contains _PAGE_READ */	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0	depdi		7,11,3,\prot	/* If we're a gateway page, drop PL2 back to zero for promotion	 * to kernel privilege (so we can execute the page as kernel).	 * Any privilege promotion page always denys read and write */	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */	/* Get rid of prot bits and convert to page addr for iitlbt */	depd		%r0,63,PAGE_SHIFT,\pte	extrd,u		\pte,56,32,\pte	.endm	/* Identical macro to make_insert_tlb above, except it	 * makes the tlb entry for the differently formatted pa11	 * insertion instructions */	.macro		make_insert_tlb_11	spc,pte,prot	zdep		\spc,30,15,\prot	dep		\pte,8,7,\prot	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0	depi		1,12,1,\prot	extru,=         \pte,_PAGE_USER_BIT,1,%r0	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */	/* Get rid of prot bits and convert to page addr for iitlba */	depi		0,31,12,\pte	extru		\pte,24,25,\pte	.endm	/* This is for ILP32 PA2.0 only.  The TLB insertion needs	 * to extend into I/O space if the address is 0xfXXXXXXX	 * so we extend the f's into the top word of the pte in	 * this case */	.macro		f_extend	pte,tmp	extrd,s		\pte,42,4,\tmp	addi,<>		1,\tmp,%r0	extrd,s		\pte,63,25,\pte	.endm	/* The alias region is an 8MB aligned 16MB to do clear and	 * copy user pages at addresses congruent with the user	 * virtual address.	 *	 * To use the alias page, you set %r26 up with the to TLB	 * entry (identifying the physical page) and %r23 up with	 * the from tlb entry (or nothing if only a to entry---for	 * clear_user_page_asm) */	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -