⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fast_interrupt.s

📁 C++ 编写的EROS RTOS
💻 S
📖 第 1 页 / 共 4 页
字号:
	 * is what we want v/s what the stack now holds:	 *	 *          WANT        HAVE	 *          eflags      eflags	 *          cs          cs	 *          eip         eip	 *          err code    eflags	 *          int number  cs		 *          eax         eip	 *          ecx         error code	 * %esp->   edx         int number    <- %esp	 *          ebx	 *          cr2  if page fault, else unused	 *          ebp	 *          esi	 *          edi	 *          cr3	 *          0	 *	 * In addition, only %cs and %ss hold proper kernel segment	 * selectors.  Save %eax to it's proper place using a segment	 * override, and then reload the data segments:	 */1:	ss			/* segment prefix */	movl    %eax,8(%esp)    /* save %eax by hand to proper */	mov	$0x10,%ax	mov	%ax,%ds	mov	%ax,%es	/*	 * Reshuffle the stack, moving the error code and interrupt number	 * up two words, and constructing a stack that looks like	 * what pushal would create:	 */	movl	4(%esp),%eax	/* err code */	movl	%eax,16(%esp)   /* reshuffled err code */	movl	0(%esp),%eax	/* int no */	movl	%eax,12(%esp)   /* reshuffled int no */	movl    %ecx,4(%esp)    /* save %ecx by hand */	movl    %edx,0(%esp)    /* save %edx by hand */        subl	$28,%esp	/* adjust sp to point to bottom of save area */        movl	%esp,%eax		jmp	L_load_kernel_map	/*	 * Page fault interrupt generates an error code diagnosing the 	 * fault.  It can occur on the iret/movseg path but if so it's an 	 * instruction fetch fault, and should be considered a fatal error,	 * so we don't do anything special about that case (shouldn't 	 * happen anyway)	 */	.align 16LEXT(intr_pagefault)	cmpl    $L_iret_pc, 8(%esp)	je	1f#ifndef NO_FAST_GATE_JUMP	cmpl    $EXT(Fast_Send_Probe), 8(%esp)	je	3f#ifdef NEWFANGLED_PATH		cmpl    $EXT(Fast_Rcv_Probe), 8(%esp)	je	3f#endif#endif	pusha	movl	%cr2,%eax	movl	%eax,12(%esp)	jmp	EXT(intr_common)1:	cli        ss	movl	$0x07460750,0x000b8000	/* "PF" */2:	hlt	jmp 2b#ifndef NO_FAST_GATE_JUMP	/* Recovery from fast-path fault: */3:	movl	%EBX,%ESP	jmp	Standard_Gate_Path#endif			.align 16	/* Special fast-path clock entry point, which is interrupt 0x20. */ENTRY(intr_clock)	pushl	$0		/* error code, in case not fast path */	pushl	$0x20		/* interrupt #, in case not fast path */	pusha			/* cheapest way to get some scratch				   space that is compatible with the				   long path. */		/* ACK the PIC: */	movb	$0x20,%al	outb	%al,$0x20		/*	 * Spurious interrupts don't show up on this line, so don't bother 	 * to check.	 */		/*	 * First, bump the counter. Do this on the in-memory copy, so	 * that it is properly updated.	 */	ss	addl $1,_8SysTimer.now	ss	adcl $0,_8SysTimer.now+4#ifdef OPTION_KERN_PROFILE	ss	cmpl $0,EXT(KernelProfileTable)	je no_profiling	ss	testl	$3,44(%esp)	/* test interrupted kernel? */	jnz no_profiling	ss	/* might not be necessary for ESP-relative load */	movl 40(%esp),%ecx	/* EIP:	 32 from pushal, + intno + errcode */	ss	movl 40(%esp),%edx	/* EIP:	 32 from pushal, + intno + errcode */	andl $0x3ffffffc,%edx	/* convert to linear address */	shrl $2,%edx		/* reduce to table slot offset */	ss	addl EXT(KernelProfileTable),%edx /* add table base */	cmpl $EXT(etext),%ecx#if 0	jbe  increment	int3	/* jmp no_profiling */increment:	#endif	#if 1	ss	incl (%edx)#endif	no_profiling:#endif#if 0	/* enable for testing:	 */		jmp EXT(intr_common)#endif	/*		 * Check to see if there is anything worth waking up:	 */	ss	movl _8SysTimer.wakeup+4,%ecx	ss	movl _8SysTimer.now+4,%edx	cmpl %edx,%ecx	ja .L_fast_int_exit	jne 1f	/* wake someone up */	ss	movl _8SysTimer.now,%edx	ss	cmpl %edx,_8SysTimer.wakeup	ja  EXT(.L_fast_int_exit)		/* nothing to do */		/* We definitely need to wake someone up, but if this a nested 	 * clock interrupt we are not supposed to do it now.  Mark the	 * interrupt pending in either case, but bail if we are nested:		 */	1:	ss	orl	$0x1,EXT(_3IRQ.PendingInterrupts); /* clock is IRQ 0 */	ss	cmpl    $0,EXT(TrapDepth)	jz      EXT(intr_common)	/* FALL THROUGH */	/* This is placed here for cache locality, because the timer 	 * interrupt is by far the highest stress on the interrupt 	 * system. */LEXT(.L_fast_int_exit)	/* restore the registers: */	popa	/* scrub the int # and error code and return from interrupt: */	addl	$8,%esp	iret.text	.align 16LEXT(intr_entry)        /* If we interrupted the call gate path, test for it and	   recover here! */	pusha/*  * intr_common MUST NOT BE AN ENTRY because alignment * might mess up the code generation. */LEXT(intr_common)	subl    $8,%esp		/* CR3 doesn't change, reload units doesn't				   change */	mov	%esp,%eax	/* pointer to save area */#ifdef V86_SUPPORT	/*	 * If we interrupted a V86 task, segment registers have	 * already been saved, so no need to save them redundantly.	 */	ss	testl	$0x20000,56(%eax)		/* is VM bit set? */	jnz	L_load_kernel_segments#endif		/*	 * If we interrupted the kernel, there is no need to	 * redundantly save/restore the segment registers.  Once we	 * know that we did not take a V86 interrupt, we can test the	 * low 3 bits of the saved CS to determine the privilege level	 * that we interrupted:	 */	ss	testl	$3,52(%eax)		/* move interrupted CS to %eax */	jz	L_dispatch_interrupt#if 0	cli        ss	movw  $0x8f32,0x000b80105:	hlt	jmp 5b#endif		/*	 *  It might *seem* faster to dork ESP rather than eat the 	 * marginal instruction byte fetches, but the marginal instruction 	 * bytes are a wash, and doing things this way eliminates AGEN 	 * interlocks on the Pentium and later	 */	movl	%gs,80(%esp)	movl	%fs,76(%esp)	movl	%ds,72(%esp)	movl	%es,68(%esp)L_load_kernel_segments:	/*	 * Prior to loading the address space pointer, need to make sure	 * we are running from a copy of the interrupt stub that exists	 * in the domain's address space:	 */	/*	 * Now load the kernel segments.  We will continue to run code	 * out of the Window.	 */	mov	$0x10,%bx	movl	%bx,%ds	movl	%bx,%es			/* for string copies */	/* the kernel doesn't use any other segments. */L_load_kernel_map:	/*	 * In the new design, the kernel runs mapped into the user	 * address space, and relies on being able to make direct use 	 * of sender-side virtual addresses.   Disable this for 	 * development, as it prevents detection of pg 0 reference 	 * faults.	 */L_dispatch_interrupt:	/*	 * %eax now holds the save area pointer, which is valid in both	 * the kernel and user address spaces.	 *	 * If we aren't already running on the kernel stack, switch to	 * it now.  Nothing special will be required on return to the	 * thread, since we will reload explicitly from the save area.	 */		cmpl	$0,EXT(TrapDepth)	jne	L_skip_stack_reload		movl	$EXT(InterruptStackTop),%esp	L_skip_stack_reload:		/*	 * Now running out of kernel data and stack.	 */	/*	 * Bump the interrupt depth counter here.  All of our entry 	 * points use interrupt gates, so interrupts are definitely 	 * disabled.  Doing this from the assembly code eliminates	 * the need to play sleazy games in the C part of the return 	 * path.	 */		incl	EXT(TrapDepth);	incl	EXT(_3IRQ.DisableDepth);		/*	 * Call the interrupt dispatch routine, passing it a pointer	 * to the register save area.  It is entirely up to the interrupt	 * routines to decide whether or not to re-enable interrupts.	 */	pushl	%eax		call	EXT(OnTrapOrInterrupt__3IDTP8SaveArea)	/* This should NEVER return */	jmp	EXT(halt)	/*	 * Entry point for IPC invocations - a special-cased version of	 * the path above.	 */	  	/*	 * NOTE FOR THE BENEFIT OF THE FAST PATH:	 *	 * The following registers are receiver-only, and can therefore 	 * safely be smashed in this path:	 *	 *    EDI -- rcv data ptr	 *    ECX -- rcv keys, rcv len	 *	 * In addition, %ESP is not used by the calling convention.	 *	 * This path should endeavour not to smash anything else if	 * it can be avoided.	 */		.align 16ENTRY(istub0x31)#if 1	subl	$8,%ESP		/* err code, exception no not used in 				   fast path, patched later in slow path */#else	pushl	$0	pushl	$0x31#endif		pusha	subl    $8,%esp		/* CR3 doesn't change, reload units doesn't				   change */	/*	 *  It might *seem* faster to dork ESP rather than eat the 	 * marginal instruction byte fetches, but the marginal instruction 	 * bytes are a wash, and doing things this way eliminates AGEN 	 * interlocks on the Pentium and later	 */	movl	%gs,80(%esp)	movl	%fs,76(%esp)	movl	%ds,72(%esp)	movl	%es,68(%esp)	/*	 * Now load the kernel segments.  We will continue to run code	 * out of the Window.	 */	mov	$0x10,%cx	movl	%cx,%ds	movl	%cx,%es			/* for string copies */	/* CONTEXT_SIZE */	subl	$52,%ESP		/* make ESP point to context */#ifndef NO_FAST_GATE_JUMP	/*	 * Following path is an experimental hand-coding of a fast gate 	 * jump path.  This path bets like crazy that it is going to 	 * succeed.  The basic strategy is to avoid altering anything 	 * in the saved sender state until we know we're going to get 	 * through, so that we can always fall back to the standard path.	 *	 * On entry, %ESP points to sender context, ECX is a scratch reg, 	 * EDI is a scratch reg ESP points to kernel interrupt stack.	 */		/*	 * Standard invocation rolls back the PC here.  We do not, on	 * the theory that we are going to make it all the way through.      	 *	 * It then establishes a recovery block, which we don't do either,	 * since we won't call anything that might longjmp.	 */	/* 0. Verify that this is indeed a valid invocation: */		/*    Make sure invoked key register number is within range */	cmpl	$15,%EBP	ja	Standard_Gate_Path	/* 1.  Convert %EBP to a pointer to the invoked key.  Do this early	       to prevent AGEN interlock on the Pentium: */		shll	$4,%EBP		/* convert EBP ndx to EBP offset */	addl	36(%ESP),%EBP	/* key regs base addr */	/*    Test the invocation type -- note that FORK invocations 	      take the long path. */	cmpl	$0x02000000,%EBX	jae	Standard_Gate_Path#if 0	testl	$0x01000000,%EBX	/* for now do only calls */	jz	Standard_Gate_Path#endif#if 0	testl	$0x01000000,%EBX	/* for now do only returns*/	jnz	Standard_Gate_Path#endif	testl	$0x000e0000,%EBX	/* require valid string send type */	jnz	Standard_Gate_Path	/* 2. We now know that this is a validly formatted invocation.	      Prepare the sender to proceed with the invocation.		      No critical registers have been smashed yet. */	/*    Load the context pointer on the assumption that this will	      prove to be a proper gate key.  Do this early to prevent	      AGEN interlock on Pentium:	*/		movl	12(%EBP),%EDI	/* load context ptr */	/* Now have recipient context ptr in EDI, sender context in ESP */		/*    Verify that invoked key is a prepared, unhazarded gate key.	      The xorl is a godawful hack, but saves several instructions.		      FIX -- Should I invert the logic of the prepared bit?  */		cmpw	$0x01,(%EBP)	/* Test key type::subtype, /U */	ja	Standard_Gate_Path	Normal_Gate_Key:	/* 3. We have a normal gate key, and %EBP points to the key.	      See if the recipient domain is ready to receive. */	/*    Make sure recipient context is valid */	cmpl	$0,12(%EDI)	je	Standard_Gate_Path		/* Make sure recipient doesn't need any special functional units */	cmpl	$0,60(%EDI)	jnz	Standard_Gate_Path#if 0	/* If recipient has no mapping table ptr, fix it */	cmpl	$0,64(%EDI)	jnz     have_mapping_table	movl	$KERNPAGEDIR,56(%EDI)have_mapping_table:#endif		/* Verify that recipient state matches invoked key type */	movb	(%EBP),%CL	cmpb	%CL,56(%EDI)	/* state should match keytype */	jne	Standard_Gate_Path#ifdef COUNT_PHASES		incl	EXT(state_ok);#endif		/*    See if we are sending keys 0..2 */#ifdef SEND_KEY_3	testl	$0x0fff0000,%EDX#else		testl	$0xffff0000,%EDX#endif		ja	Standard_Gate_Path	/*    Or receiving anything other than a resume key */	testl	$0x0fff0000,92(%EDI)	jnz	Standard_Gate_Path

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -