⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 S
📖 第 1 页 / 共 5 页
字号:
#define CLONE_VM 0x100	/* Must agree with <linux/sched.h> */	.export __kernel_thread, code	.import do_fork__kernel_thread:	STREG	%r2, -RP_OFFSET(%r30)	copy	%r30, %r1	ldo	PT_SZ_ALGN(%r30),%r30#ifdef __LP64__	/* Yo, function pointers in wide mode are little structs... -PB */	ldd	24(%r26), %r2	STREG	%r2, PT_GR27(%r1)	/* Store childs %dp */	ldd	16(%r26), %r26#endif	STREG	%r26, PT_GR26(%r1)  /* Store function & argument for child */	STREG	%r25, PT_GR25(%r1)	ldo	CLONE_VM(%r0), %r26   /* Force CLONE_VM since only init_mm */	or	%r26, %r24, %r26      /* will have kernel mappings.	 */	copy	%r0, %r25#ifdef __LP64__	ldo	-16(%r30),%r29		/* Reference param save area */#endif	bl	do_fork, %r2	copy	%r1, %r24	/* Parent Returns here */	LDREG	-PT_SZ_ALGN-RP_OFFSET(%r30), %r2	bv	%r0(%r2)	ldo	-PT_SZ_ALGN(%r30), %r30	/*	 * Child Returns here	 *	 * copy_thread moved args from temp save area set up above	 * into task save area.	 */	.export	ret_from_kernel_threadret_from_kernel_thread:	/* Call schedule_tail first though */	bl	schedule_tail, %r2	nop	LDREG	TASK_PT_GR26-TASK_SZ_ALGN(%r30), %r1	LDREG	TASK_PT_GR25-TASK_SZ_ALGN(%r30), %r26#ifdef __LP64__	LDREG	TASK_PT_GR27-TASK_SZ_ALGN(%r30), %r27#endif	ble	0(%sr7, %r1)	copy	%r31, %r2#ifdef __LP64__	ldo	-16(%r30),%r29		/* Reference param save area */	loadgp				/* Thread could have been in a module */#endif	b	sys_exit	ldi	0, %r26	.import	sys_execve, code	.export	__execve, code__execve:	copy	%r2, %r15	copy	%r30, %r16	ldo	PT_SZ_ALGN(%r30), %r30	STREG	%r26, PT_GR26(%r16)	STREG	%r25, PT_GR25(%r16)	STREG	%r24, PT_GR24(%r16)#ifdef __LP64__	ldo	-16(%r30),%r29		/* Reference param save area */#endif	bl	sys_execve, %r2	copy	%r16, %r26	cmpib,=,n 0,%r28,intr_return    /* forward */	/* yes, this will trap and die. */	copy	%r15, %r2	copy	%r16, %r30	bv	%r0(%r2)	nop	.align 4	/*	 * struct task_struct *_switch_to(struct task_struct *prev,	 *	struct task_struct *next)	 *	 * switch kernel stacks and return prev */	.export	_switch_to, code_switch_to:	STREG	 %r2, -RP_OFFSET(%r30)	callee_save	ldil	L%_switch_to_ret, %r2	ldo	R%_switch_to_ret(%r2), %r2	STREG	%r2, TASK_PT_KPC(%r26)	LDREG	TASK_PT_KPC(%r25), %r2	STREG	%r30, TASK_PT_KSP(%r26)	LDREG	TASK_PT_KSP(%r25), %r30	bv	%r0(%r2)	mtctl   %r25,%cr30_switch_to_ret:	mtctl	%r0, %cr0		/* Needed for single stepping */	callee_rest	LDREG	-RP_OFFSET(%r30), %r2	bv	%r0(%r2)	copy	%r26, %r28	/*	 * Common rfi return path for interruptions, kernel execve, and	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will	 * return via this path if the signal was received when the process	 * was running; if the process was blocked on a syscall then the	 * normal syscall_exit path is used.  All syscalls for traced	 * proceses exit via intr_restore.	 *	 * XXX If any syscalls that change a processes space id ever exit	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and	 * adjust IASQ[0..1].	 *	 * Note that the following code uses a "relied upon translation".	 * See the parisc ACD for details. The ssm is necessary due to a	 * PCXT bug.	 */	.align 4096	.export	syscall_exit_rfisyscall_exit_rfi:	mfctl   %cr30,%r16	ldo	TASK_REGS(%r16),%r16	/* Force iaoq to userspace, as the user has had access to our current	 * context via sigcontext. Also Filter the PSW for the same reason.	 */	LDREG	PT_IAOQ0(%r16),%r19	depi	3,31,2,%r19	STREG	%r19,PT_IAOQ0(%r16)	LDREG	PT_IAOQ1(%r16),%r19	depi	3,31,2,%r19	STREG	%r19,PT_IAOQ1(%r16)	LDREG   PT_PSW(%r16),%r19	ldil    L%USER_PSW_MASK,%r1	ldo     R%USER_PSW_MASK(%r1),%r1#ifdef __LP64__	ldil    L%USER_PSW_HI_MASK,%r20	ldo     R%USER_PSW_HI_MASK(%r20),%r20	depd    %r20,31,32,%r1#endif	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */	ldil    L%USER_PSW,%r1	ldo     R%USER_PSW(%r1),%r1	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */	STREG   %r19,PT_PSW(%r16)	/*	 * If we aren't being traced, we never saved space registers	 * (we don't store them in the sigcontext), so set them	 * to "proper" values now (otherwise we'll wind up restoring	 * whatever was last stored in the task structure, which might	 * be inconsistant if an interrupt occured while on the gateway	 * page) Note that we may be "trashing" values the user put in	 * them, but we don't support the the user changing them.	 */	STREG   %r0,PT_SR2(%r16)	mfsp    %sr3,%r19	STREG   %r19,PT_SR0(%r16)	STREG   %r19,PT_SR1(%r16)	STREG   %r19,PT_SR3(%r16)	STREG   %r19,PT_SR4(%r16)	STREG   %r19,PT_SR5(%r16)	STREG   %r19,PT_SR6(%r16)	STREG   %r19,PT_SR7(%r16)intr_return:	ssm     PSW_SM_I, %r0	/* Check for software interrupts */	.import irq_stat,data	ldil	L%irq_stat,%r19	ldo	R%irq_stat(%r19),%r19#ifdef CONFIG_SMP	mfctl   %cr30,%r1	ldw	TASK_PROCESSOR(%r1),%r1 /* get cpu # - int */	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount	** irq_stat[] is defined using ____cacheline_aligned.	*/#ifdef __LP64__	shld	%r1, 6, %r20#else	shlw	%r1, 5, %r20#endif	add     %r19,%r20,%r19	/* now have &irq_stat[smp_processor_id()] */#endif /* CONFIG_SMP */	LDREG   IRQSTAT_SIRQ_PEND(%r19),%r20    /* hardirq.h: unsigned long */	cmpib,<>,n 0,%r20,intr_do_softirq /* forward */intr_check_resched:	/* check for reschedule */	mfctl   %cr30,%r1	LDREG     TASK_NEED_RESCHED(%r1),%r19	/* sched.h: long need_resched */	CMPIB<>,n 0,%r19,intr_do_resched /* forward */intr_check_sig:	/* As above */	mfctl   %cr30,%r1	ldw	TASK_SIGPENDING(%r1),%r19	/* sched.h: int sigpending */	cmpib,<>,n 0,%r19,intr_do_signal /* forward */intr_restore:	copy            %r16,%r29	ldo             PT_FR31(%r29),%r1	rest_fp         %r1	rest_general    %r29	ssm		0,%r0	nop	nop	nop	nop	nop	nop	nop	tophys_r1       %r29	rsm             (PSW_SM_Q|PSW_SM_P|PSW_SM_D|PSW_SM_I),%r0	rest_specials	%r29	rest_stack	rfi	nop	nop	nop	nop	nop	nop	nop	nop	.import do_softirq,codeintr_do_softirq:	bl      do_softirq,%r2#ifdef __LP64__	ldo	-16(%r30),%r29		/* Reference param save area */#else	nop#endif	b       intr_check_resched	nop	.import schedule,codeintr_do_resched:	/* Only do reschedule if we are returning to user space */	LDREG   PT_IASQ0(%r16), %r20	CMPIB= 0,%r20,intr_restore /* backward */	nop	LDREG   PT_IASQ1(%r16), %r20	CMPIB= 0,%r20,intr_restore /* backward */	nop#ifdef __LP64__	ldo	-16(%r30),%r29		/* Reference param save area */#endif	ldil	L%intr_return, %r2	b	schedule	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */	.import do_signal,codeintr_do_signal:	/* Only do signals if we are returning to user space */	LDREG   PT_IASQ0(%r16), %r20	CMPIB= 0,%r20,intr_restore /* backward */	nop	LDREG   PT_IASQ1(%r16), %r20	CMPIB= 0,%r20,intr_restore /* backward */	nop	copy	%r0, %r24			/* unsigned long in_syscall */	copy	%r16, %r25			/* struct pt_regs *regs */	ssm     PSW_SM_I, %r0#ifdef __LP64__	ldo	-16(%r30),%r29			/* Reference param save area */#endif	bl	do_signal,%r2	copy	%r0, %r26			/* sigset_t *oldset = NULL */	b	intr_restore	nop	/*	 * External interrupts.	 */intr_extint:	CMPIB=,n 0,%r16,1f	/* on User or kernel stack? */	get_stack_use_cr30	b,n 3f1:#if 0  /* Interrupt Stack support not working yet! */	mfctl	%cr31,%r1	copy	%r30,%r17	/* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/#ifdef __LP64__	depdi	0,63,15,%r17#else	depi	0,31,15,%r17#endif	CMPB=,n	%r1,%r17,2f	get_stack_use_cr31	b,n 3f#endif2:	get_stack_use_r303:	save_specials	%r29	virt_map	save_general	%r29	ldo	PT_FR0(%r29), %r24	save_fp	%r24		loadgp	copy	%r29, %r26	/* arg0 is pt_regs */	copy	%r29, %r16	/* save pt_regs */	ldil	L%intr_return, %r2#ifdef __LP64__	ldo	-16(%r30),%r29	/* Reference param save area */#endif	b	do_cpu_irq_mask	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */	.export         intr_save, code /* for os_hpmc */intr_save:	mfsp    %sr7,%r16	CMPIB=,n 0,%r16,1f	get_stack_use_cr30	b	2f	copy    %r8,%r261:	get_stack_use_r30	copy    %r8,%r262:	save_specials	%r29	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */	/*	 * FIXME: 1) Use a #define for the hardwired "6" below (and in	 *           traps.c.	 *        2) Once we start executing code above 4 Gb, we need	 *           to adjust iasq/iaoq here in the same way we	 *           adjust isr/ior below.	 */	CMPIB=,n        6,%r26,skip_save_ior	/* save_specials left ipsw value in r8 for us to test */	mfctl           %cr20, %r16 /* isr */	mfctl           %cr21, %r17 /* ior */#ifdef __LP64__	/*	 * If the interrupted code was running with W bit off (32 bit),	 * clear the b bits (bits 0 & 1) in the ior.	 */	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0	depdi           0,1,2,%r17	/*	 * FIXME: This code has hardwired assumptions about the split	 *        between space bits and offset bits. This will change	 *        when we allow alternate page sizes.	 */	/* adjust isr/ior. */	extrd,u         %r16,63,7,%r1    /* get high bits from isr for ior */	depd            %r1,31,7,%r17    /* deposit them into ior */	depdi           0,63,7,%r16      /* clear them from isr */#endif	STREG           %r16, PT_ISR(%r29)	STREG           %r17, PT_IOR(%r29)skip_save_ior:	virt_map	save_general	%r29	ldo		PT_FR0(%r29), %r25	save_fp		%r25		loadgp	copy		%r29, %r25	/* arg1 is pt_regs */#ifdef CONFIG_KWDB	copy		%r29, %r3	/* KWDB - update frame pointer (gr3) */#endif#ifdef __LP64__	ldo		-16(%r30),%r29	/* Reference param save area */#endif	ldil		L%intr_return, %r2	copy		%r25, %r16	/* save pt_regs */	b		handle_interruption	ldo		R%intr_return(%r2), %r2	/* return to intr_return */	/*	 * Note for all tlb miss handlers:	 *	 * cr24 contains a pointer to the kernel address space	 * page directory.	 *	 * cr25 contains a pointer to the current user address	 * space page directory.	 *	 * sr3 will contain the space id of the user address space	 * of the current running thread while that thread is	 * running in the kernel.	 */	/*	 * register number allocations.  Note that these are all	 * in the shadowed registers	 */	t0 = r1		/* temporary register 0 */	va = r8		/* virtual address for which the trap occured */	t1 = r9		/* temporary register 1 */	pte  = r16	/* pte/phys page # */	prot = r17	/* prot bits */	spc  = r24	/* space for which the trap occured */	ptp = r25	/* page directory/page table pointer */#ifdef __LP64__dtlb_miss_20w:	extrd,u         spc,63,7,t1     /* adjust va */	depd            t1,31,7,va      /* adjust va */	depdi           0,63,7,spc      /* adjust space */	mfctl           %cr25,ptp	/* Assume user space miss */	or,*<>          %r0,spc,%r0     /* If it is user space, nullify */	mfctl           %cr24,ptp	/* Load kernel pgd instead */	extrd,u         va,33,9,t1      /* Get pgd index */	mfsp            %sr7,t0		/* Get current space */	or,*=           %r0,t0,%r0      /* If kernel, nullify following test */	cmpb,*<>,n       t0,spc,dtlb_fault /* forward */	/* First level page table lookup */	ldd,s           t1(ptp),ptp	extrd,u         va,42,9,t0     /* get second-level index */	bb,>=,n         ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_20w	depdi           0,63,12,ptp     /* clear prot bits */	/* Second level page table lookup */	ldd,s           t0(ptp),ptp	extrd,u         va,51,9,t0     /* get third-level index */	bb,>=,n         ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_20w	depdi           0,63,12,ptp     /* clear prot bits */	/* Third level page table lookup */	shladd           t0,3,ptp,ptp	ldi		_PAGE_ACCESSED,t1	ldd              0(ptp),pte	bb,>=,n          pte,_PAGE_PRESENT_BIT,dtlb_check_alias_20w	/* Check whether the "accessed" bit was set, otherwise do so */	or		t1,pte,t0	/* t0 has R bit set */	and,*<>         t1,pte,%r0      /* test and nullify if already set */	std             t0,0(ptp)       /* write back pte */	space_to_prot   spc prot        /* create prot id from space */	depd            pte,8,7,prot    /* add in prot bits from pte */	extrd,u,*=      pte,_PAGE_USER_BIT+32,1,r0	depdi		7,11,3,prot   /* Set for user space (1 rsvd for read) */	extrd,u,*= 	pte,_PAGE_GATEWAY_BIT+32,1,r0	depdi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */	/* Get rid of prot bits and convert to page addr for idtlbt */	depdi		0,63,12,pte

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -