📄 fast_interrupt.s
字号:
#if 0 jmp Standard_Gate_Path#endif jmp Done_String_Xmit Done_Empty_String_Xmit: /* Update receiver string length */ movw $0,92(%EDI) Done_String_Xmit: /* We are definitely going forward at this point. */ /* Transfer the order code to free up EAX */ movl %EAX,96(%EDI) /* EAX IS NOW AVAILABLE */ /* Transfer the key data field to the recipient */ movw 2(%EBP),%AX movw %AX,84(%EDI)#define ZAP_RESUME_KEYS #ifdef ZAP_RESUME_KEYS /* If the invoked key was a resume key, we need to demolish all outstanding resume keys before performing the key copy. Note that we know there was at least one resume key. */ cmpb $0x21,(%EBP) jne not_resume_key /* EBP is dead below this point, so revise it to point to the recipient domain root */ movl 32(%EDI),%EAX /* EAX points to key we are now zapping */ leal 24(%EDI),%ECX /* ECX points to context keyring */ /* We know that there is at least one resume key in the chain, so we need not test the first one. */walk_resume_chain: /* Overwrite curKey with zero number key */ movl $0x80020129,(%EAX) /* quickie zero number key */ movl 8(%EAX),%EAX /* fetch curKey->prev to %EAX */ cmpb $0x21,(%EAX) /* is prev key a prepared resume key? */ je walk_resume_chain movl %EAX,32(%EDI) /* set context prev ptr to right thing. */ movl %ECX,4(%EAX) /* set prev->next to domain root */ not_resume_key: #endif /* EBP now points to key, but will no longer be used. */ /* Transfer the string, if any -- this frees ESI */ /* Tell recipient they got no string */ movw $0,88(%EDI)#if 0 /* Transfer keys if apropriate */ testl $0xffff0000,88(%EDI) jnz do_key_xferdone_key_xfer:#endif /* Decide if it's a call or a return */ /* Test the invocation type -- note that FORK invocations take the long path. */ testl $0x01000000,%EBX /* 0 = reply, 1 = call */ jnz Do_Fast_Call /* IT'S A RETURN */ /* Sender goes to available state: */ movb $0,56(%ESP) /* put null key in recipient slot 4 (if any) */ testl $0xf0000000,88(%EDI) jz Cleanup movl 88(%EDI),%EAX shrl $24,%EAX /* compute offset relative to key regs */ addl 36(%EDI),%EAX /* add base address of gen keys node */ /* see if recipient key is prepared object key */ cmpb $7,(%EAX) ja copy_key /* undo link pointers */ movl 4(%EAX),%ESI /* next pointer */ movl 8(%EAX),%ECX /* prev pointer */ movl %ESI,4(%ECX) /* prev->next = next */ movl %ECX,8(%ESI) /* next->prev = next */ copy_key: #ifdef SEND_KEY_3 shrl $24,%EDX andl $0xf0,%EDX addl 36(%ESP),%EDX /* add base address of gen keys node */ movl 12(%EDX),%ECX movl (%EDX),%ESI movl %ECX,12(%EAX) /* object pointer or bits */ movl 4(%EDX),%ECX /* next ptr or bits */ movl %ESI,(%EAX) movl 8(%EDX),%ESI /* prev ptr or bits -- may not use! */ movl %ECX,4(%EAX) /* next ptr or bits */ cmpl $7,%ESI /* see if sent key is prepared */ ja src_key_not_prepared /* src key is prepared - update ptr chain */ movl %EAX,4(%EDX) /* update src next ptr */ movl %EDX,%ESI /* update dest prev ptr */src_key_not_prepared: movl %ESI,8(%EAX) /* write prev ptr or bits */#else /* overwrite with zero number key */ movl $0x80020129,(%EAX) /* quickie zero number key */ jmp Cleanup#endif Do_Fast_Call: /* Sender goes to waiting state: */ movb $1,56(%ESP) /* put resume key in recipient slot 4 (if any) */ testl $0xf0000000,88(%EDI) jz Cleanup movl 88(%EDI),%EAX shrl $24,%EAX /* compute offset relative to key regs */ addl 36(%EDI),%EAX /* add base address of gen keys node */ /* see if recipient key is prepared object key */ cmpb $7,(%EAX) ja write_resume_key /* undo link pointers */ movl 4(%EAX),%ESI /* next pointer */ movl 8(%EAX),%ECX /* prev pointer */ movl %ESI,4(%ECX) /* prev->next = next */ movl %ECX,8(%ESI) /* next->prev = next */ write_resume_key: leal 24(%ESP),%EDX /* EDX points to context keyring */ movl 32(%ESP),%ECX /* context keyring.prev to %ECX */ movl %EAX,4(%ECX) /* prev->next = resume key */ movl %EAX,32(%ESP) /* context keyring.prev = resume key */ /* overwrite rcv key with prepared resume key */ movl $0x00000001,(%EAX) /* prepared, normal resume key */ movl %EDX,4(%EAX) /* next = context keyring */ movl %ECX,8(%EAX) /* prev = prev key */ movl %ESP,12(%EAX) /* context = receiver */ Cleanup: /* If we invoked a resume key, blast all of it's siblings */ /* Load thread pointer */ movl 20(%ESP),%ECX /* Unthread sender */ movl $0,20(%ESP) /* Receiver goes to running state */ movb $2,56(%EDI) /* Thread receiver */ movl %ECX,20(%EDI) /* Update thread context pointer */ movl %EDI,32(%ECX) /* Copy recipient address space pointer to %EAX and update TSS slot */ movl 64(%EDI),%EAX testl %EAX,%EAX jnz Have_Space movl $KERNPAGEDIR,%EAX movl $KERNPAGEDIR,64(%EDI)Have_Space: /* THIS IS ESSENTIALLY A COPY OF THE BOTTOM HALF OF resume_process ANY CHANGES MADE THERE SHOULD BE DUPLICATED HERE */ leal 64(%EDI),%ESP leal 128(%EDI),%EDX movl %EDX,4+EXT(_3TSS.TaskTable) /* Reload target address space: */ popl %eax movl %cr3,%ebx /* retrieve old pointer */ cmpl %eax,%ebx je 1f movl %eax,%cr3 /* restore user address space */ 1: movl %EAX,28+EXT(_3TSS.TaskTable) popa /* * Skip over the pushed error number and exception number: */ addl $8,%esp /* * Reload the segment registers by reaching up past the critical * stuff with movl so that if we fault we always defecate at the * same point on the stack: */L_fast_reload_es: movl 20(%esp),%esL_fast_reload_ds: movl 24(%esp),%dsL_fast_reload_fs: movl 28(%esp),%fsL_fast_reload_gs: movl 32(%esp),%gs /* * ONCE BELOW THIS POINT, ALL REFERENCES MUST BE TO THE CODE * OR STACK SEGMENTS. BECAUSE DS NOW HOLDS THE USER DATA * SEGMENT, WE CANNOT TRUST THAT THE DATA SEGMENT IS VALID. */ /* * Sayonara, interrupt context. The iret will restore interrupts * automagically. */L_fast_iret_pc:1: iret jmp EXT(halt) .globl EXT(Standard_Gate_Path)Standard_Gate_Path:#endif /* Clean up some things in the stack frame that the fast path left uninitialized: */#if 1 /* CONTEXT_SIZE */ movl $0x0,96(%ESP) /* zero error number */ movl $0x31,92(%ESP) /* exception number */#endif /* * Bump the interrupt depth counter here. All of our entry * points use interrupt gates, so interrupts are definitely * disabled. Doing this from the assembly code eliminates * the need to play sleazy games in the C part of the return * path. * * Doing this AFTER the %ESP adjustment allows the SUBL to settle, * preventing subsequent AGEN interlocks on Pentium and later * processors. */ movl $1,EXT(TrapDepth) movl $1,EXT(_3IRQ.DisableDepth) movl $EXT(InterruptStackTop),%esp /* * Call the interrupt dispatch routine, passing it a pointer * to the register save area. It is entirely up to the interrupt * routines to decide whether or not to re-enable interrupts. */ call EXT(OnKeyInvocationTrap__3IDT) /* This should NEVER return */ jmp EXT(halt) /* * Following code implements the string move probe portion of * the invocation. It is out of line because slightly less than * 50% of all invocations move a string */ L_do_string_move: /* * There is a send string. Need to see if there is a resume string. * if so, need to probe the source and target buffers for validity * and move the string. EDI is unforunately in use; we shall * need to fix that once the length check is done. * * Following entry point is common to all interrupts, but it might * well make sense to special case the V86 path, particularly * in light of the new interrupt handling features in the * 486Dx4 and later processors. */ /* INTERRUPTS MUST BE DISABLED ON ENTRY TO THIS FUNCTION */ .align 16ENTRY(resume_process) pushl %ebp movl %esp,%ebp movl 0x8(%ebp),%esp /* * We are now on our way back to whatever we interrupted. If the * interrupt routine enabled interrupts, it was responsible for * disabling them before returning, so interrupts are now * disabled. * * %esp holds a pointer to the bottom of the context we are * restoring from. */ /* * Decrement the interrupt depth counter here. Doing this from * the assembly code eliminates the need to play sleazy games in * the C part of the return path. */ decl EXT(TrapDepth); decl EXT(_3IRQ.DisableDepth); /* ANY CHANGE TO THIS PATH FROM HERE DOWN SHOULD BE IMPLEMENTED IN THE FAST_GATE_PATH AS WELL */ /* CONTEXT_SIZE */ leal 120(%ESP),%EDX movl %EDX,4+EXT(_3TSS.TaskTable) /* CONTEXT_SIZE */ addl $56,%ESP /* find base of save area */ /* Reload target address space: */ popl %eax movl %cr3,%ebx /* retrieve old pointer */ cmpl %eax,%ebx je 1f movl %eax,%cr3 /* restore user address space */ 1: movl %EAX,28+EXT(_3TSS.TaskTable) popa /* * Skip over the pushed error number and exception number: */ addl $8,%esp /* * Reload the segment registers by reaching up past the critical * stuff with movl so that if we fault we always defecate at the * same point on the stack: */L_reload_es: movl 20(%esp),%esL_reload_ds: movl 24(%esp),%dsL_reload_fs: movl 28(%esp),%fsL_reload_gs: movl 32(%esp),%gs /* * ONCE BELOW THIS POINT, ALL REFERENCES MUST BE TO THE CODE * OR STACK SEGMENTS. BECAUSE DS NOW HOLDS THE USER DATA * SEGMENT, WE CANNOT TRUST THAT THE DATA SEGMENT IS VALID. */ /* * Sayonara, interrupt context. The iret will restore interrupts * automagically. */L_iret_pc:1: iret jmp EXT(halt) /* * This entry point exists so that the first thread can be * dispatched and so that the call gate entry point can exit * through common code. */ /* INTERRUPTS MUST BE DISABLED ON ENTRY TO THIS FUNCTION */ .align 16ENTRY(resume_kernel_task) pushl %ebp movl %esp,%ebp /* * Interrupts are now disabled. We were passed a pointer to * the bottom of the save area we are supposed to restore. */ movl 0x8(%ebp),%esp addl $0x8,%esp /* no special functional units or address space to reload */ /* * Decrement the interrupt depth counter here. Doing this from * the assembly code eliminates the need to play sleazy games in * the C part of the return path. */ decl EXT(TrapDepth); decl EXT(_3IRQ.DisableDepth); popa /* * Skip over the pushed error number and exception number: */ addl $8,%esp iret /* INTERRUPTS MUST BE DISABLED ON ENTRY TO THIS FUNCTION */ .align 16ENTRY(resume_v86_task) pushl %ebp movl %esp,%ebp movl 0x8(%ebp),%esp /* * We are now on our way back to whatever we interrupted. If the * interrupt routine enabled interrupts, it was responsible for * disabling them before returning, so interrupts are now * disabled. * * %esp holds a pointer to the bottom of the context we are * restoring from. */ /* CONTEXT_SIZE */ leal 136(%ESP),%EDX movl %EDX,4+EXT(_3TSS.TaskTable) /* CONTEXT_SIZE */ addl $56,%ESP /* find save area */ /* * Decrement the interrupt depth counter here. Doing this from * the assembly code eliminates the need to play sleazy games in * the C part of the return path. */ decl EXT(TrapDepth); decl EXT(_3IRQ.DisableDepth); /* Reload target address space: */ popl %eax movl %cr3,%ebx /* retrieve old pointer */ cmp %eax,%ebx je 1f movl %eax,%cr3 /* restore user address space */1: movl %EAX,28+EXT(_3TSS.TaskTable) popa /* * Skip over the pushed error number and exception number: */ addl $8,%esp /* * Sayonara, interrupt context. The iret will restore interrupts * automagically. */L_v86_iret_pc: iret
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -