📄 interrupt.s
字号:
testl $0x20000,TR_OFF_EFLAGS(%esp) /* is VM bit set? */ jnz intr_entry /* normal fault */#endif testl $3,TR_OFF_CS(%esp) /* supervisor mode? */ jnz intr_entry /* normal fault */ cmpl $L_ipc_block_hi, TR_OFF_EIP(%esp) je intr_gp_ipc cmpl $L_ipc_block_lo, TR_OFF_EIP(%esp) je intr_gp_ipc #ifdef ASM_VALIDATE_STRINGS cmpl $L_ipc_send_str, TR_OFF_EIP(%esp) je intr_gp_ipc cmpl $L_ipc_send_str_end, TR_OFF_EIP(%esp) je intr_gp_ipc#endif cmpl $L_iret_pc, TR_OFF_EIP(%esp) je 1f#ifdef DEBUG_NESTED_IRET cmpl $L_fast_int_iret_pc, TR_OFF_EIP(%esp) je L_fault_nested_iret#endif cmpl $L_reload_ds, TR_OFF_EIP(%esp) je 1f cmpl $L_reload_es, TR_OFF_EIP(%esp) je 1f cmpl $L_reload_fs, TR_OFF_EIP(%esp) je 1f cmpl $L_reload_gs, TR_OFF_EIP(%esp) je 1f#ifdef FAST_IPC_RETURN cmpl $L_fast_iret_pc, TR_OFF_EIP(%esp) je 1f cmpl $L_fast_reload_ds, TR_OFF_EIP(%esp) je 1f cmpl $L_fast_reload_es, TR_OFF_EIP(%esp) je 1f cmpl $L_fast_reload_fs, TR_OFF_EIP(%esp) je 1f cmpl $L_fast_reload_gs, TR_OFF_EIP(%esp) je 1f#endif cmpl $L_v86_iret_pc, TR_OFF_EIP(%esp) je 1f /* It's a normal (fatal) kernel-mode fault */ jmp intr_entry /* * Recovering from an exception on the IRET instruction. Here * is what we want v/s what the stack now holds: * * WANT HAVE * eflags eflags * cs cs * eip eip * err code eflags * int number cs * eax eip * ecx error code * %esp-> edx int number <- %esp * ebx * cr2 if page fault, else unused * ebp * esi * edi * cr3 * 0 * * In addition, only %cs and %ss hold proper kernel segment * selectors. Save %eax to it's proper place using a segment * override, and then reload the data segments: */1: ss /* segment prefix */ movl %eax,8(%esp) /* save %eax by hand to proper offset */ mov $0x10,%ax mov %ax,%ds mov %ax,%es /* * Reshuffle the stack, moving the error code and interrupt number * up two words, and constructing a stack that looks like * what pushal would create: */ movl 4(%esp),%eax /* err code */ movl %eax,16(%esp) /* reshuffled err code */ movl 0(%esp),%eax /* int no */ movl %eax,12(%esp) /* reshuffled int no */ movl %ecx,4(%esp) /* save %ecx by hand */ movl %edx,0(%esp) /* save %edx by hand */ subl $28,%esp /* adjust sp to point to bottom of save area */ movl %esp,%eax jmp L_load_kernel_map#ifdef DEBUG_NESTED_IRETL_fault_nested_iret: /* below printed at line 8, in case we die. */ ss movl $0x8f418f46,0x000b8500 /* "FA" */ ss movl $0x8f548f53,0x000b8504 /* "ST" */ ss movl $0x8f498f20,0x000b8508 /* " I" */ ss movl $0x8f458f52,0x000b850c /* "RE" */ ss movl $0x8f208f54,0x000b8510 /* "T " */ jmp dump_state#endif /* General protection fault can occur in the inbound IPC path if the user handed us an IPC descriptor block pointer that resulted in an out-of-bounds reference. If that occurred: %ds, %es, %fs, %gs still hold whatever they held at the time of the fault. %eax points to current context. Backpatch the original save area to look like this was a page fault taken on the INT instruction, unwind the kernel stack, and take the standard user-level fault entry path. Otherwise just branch to the intr_ec path. */ .align 16LEXT(intr_gp_ipc)#if 1 /* below printed at line 8, in case we die. */ ss movl $0x0f500f49,0x000b8500 /* "IP" */ ss movl $0x0f200f43,0x000b8504 /* "C " */ ss movl $0x0f500f47,0x000b8508 /* "GP" */#endif#ifdef DEBUG_GP_IPC jmp dump_state#endif movl (%esp),%edx /* pick up vector number */ ss movl %edx,FX_OFF_ExceptNo(%eax) /* copy it in user save area */ /* This fault was actually a trap instruction, which we are fixing up to look like a page fault. Roll the PC back so that the correct instruction will get restarted... */ ss subl $2,FX_OFF_EIP(%eax) /* re-establish original stack pointer */ movl %eax,%esp jmp L_ipc_fault_recovery /* * Page fault interrupt generates an error code diagnosing the * fault. It can occur on the iret/movseg path but if so it's an * instruction fetch fault, and should be considered a fatal error, * so we don't do anything special about that case (shouldn't * happen anyway). We must take care, however, only to check the * page fault EIP if the fault occurred from supervisor mode! */ .align 16LEXT(intr_pagefault) ss testl $0x4,TR_OFF_Error(%esp) /* were we in supervisor mode? */ jnz pf_user_mode cmpl $L_iret_pc, TR_OFF_EIP(%esp) je pf_iret #ifdef FAST_IPC_RETURN cmpl $L_fast_iret_pc, TR_OFF_EIP(%esp) je pf_iret#endif#ifdef FAST_IPC_STRINGS#ifndef ASM_VALIDATE_STRINGS cmpl $string_move,TR_OFF_EIP(%esp) jb L_not_string_move cmpl $string_done,TR_OFF_EIP(%esp) jb pf_ipc_stringmoveL_not_string_move:#else cmpl $L_rcv_string_probe, TR_OFF_EIP(%esp) je pf_ipc_rcv cmpl $L_rcv_string_probe_resid, TR_OFF_EIP(%esp) je pf_ipc_rcv#endif#endif cmpl $L_ipc_block_hi, TR_OFF_EIP(%esp) je pf_ipc_block cmpl $L_ipc_block_lo, TR_OFF_EIP(%esp) je pf_ipc_block #ifdef ASM_VALIDATE_STRINGS cmpl $L_ipc_send_str, TR_OFF_EIP(%esp) je pf_ipc_block cmpl $L_ipc_send_str_end, TR_OFF_EIP(%esp) je pf_ipc_block#endif pf_user_mode: pusha subl $8,%esp /* CR3 doesn't change, reload units doesn't change */ movl %cr2,%eax movl %eax,FX_OFF_ExceptAddr(%esp) jmp EXT(intr_common_fx) /* We have taken a page fault on the IRET instruction. This is * not a recoverable fault. Display the fault address and halt. */pf_iret: ss movl $0x0f520f49,0x000b8000 /* "IR" */ ss movl $0x0f540f45,0x000b8004 /* "ET" */ ss movl $0x0f500f20,0x000b8008 /* " P" */ ss movl $0x0f200f46,0x000b800C /* "F " */#if 1 ss movl $0x0f780f30,0x000b8010 /* "0x" */ ss movl $0x000b8022,%esi pf_show_addr: /* Print out the fault address */ movb $0x0f,%ah movl %cr2,%ebx movl $0x8,%ecx px: movb %bl,%al andb $0xf,%al cmpb $0x9,%al ja hex_digit addb $0x30,%al /* add ascii of '0' */ jmp got_digithex_digit: addb $0x41,%al /* add ascii of 'A' */ subb $10,%al /* subtract 10 */got_digit: shr $4,%ebx ss movw %ax,(%esi) subl $2,%esi loop px#endif 2: hlt jmp 2b /* We page faulted somewhere in the IPC block probe, or while probing the send string %cr2 holds the linear fault address. %ds, %es, %fs, %gs still hold whatever they held at the time of the fault. %eax points to current context. backpatch the save area to look like this was a page fault taken on the INT instruction, unwind the kernel stack, and take the standard user-level fault entry path. Note that the fault address is LINEAR, which is what the page fault handler wants. */ pf_ipc_block: movl $EXT(InterruptStackTop),%esp movl %cr2,%ebx ss movl %ebx,FX_OFF_ExceptAddr(%eax) /* exception address */ ss movl $0x0e,FX_OFF_ExceptNo(%eax) /* page fault exception */ ss movl $0x4,FX_OFF_Error(%eax) /* user-mode read access fault */ /* This fault was actually a trap instruction, which we are fixing up to look like a page fault. Roll the PC back so that the correct instruction will get restarted... */ ss subl $2,FX_OFF_EIP(%eax) /* re-establish original stack pointer */ movl %eax,%esp jmp L_ipc_fault_recovery #ifndef ASM_VALIDATE_STRINGSpf_ipc_stringmove:#if defined(FAST_IPC_BPT) && 0 int3#endif /* We page faulted while moving the string. Patch things up so that we simply resume at Standard_Gate_Path */ jmp Standard_Gate_Path#endif #ifdef FAST_IPC_STRINGS#ifdef ASM_VALIDATE_STRINGSpf_ipc_rcv:#ifdef FAST_IPC_STATS incl EXT(nFastIpcRcvPf)#endif /* We page faulted while probing the rcv string. Patch things up so that we simply resume at Standard_Gate_Path */ jmp Standard_Gate_Path#endif /* ASM_VALIDATE_STRINGS */#endif /* FAST_IPC_STRINGS */ .align 16 /* Special fast-path clock entry point, which is interrupt 0x20. */ENTRY(intr_clock) pushl $0 /* error code, in case not fast path */ pushl $0x20 /* interrupt #, in case not fast path */ pusha /* cheapest way to get some scratch space that is compatible with the long path. */ /* ACK the PIC: */ movb $0x20,%al outb %al,$0x20 /* * Spurious interrupts don't show up on this line, so don't bother * to check. */ /* * First, bump the counter. Do this on the in-memory copy, so * that it is properly updated. */ ss addl $1,_8SysTimer.now ss adcl $0,_8SysTimer.now+4#ifdef OPTION_KERN_PROFILE ss cmpl $0,EXT(KernelProfileTable) je no_profiling /* Bottom of stack at this juncture is EDI from the PUSHA, thus the funny offset computations */ ss testl $3,FX_OFF_CS-FX_OFF_EDI(%esp) /* test interrupted kernel? */ jnz no_profiling ss /* might not be necessary for ESP-relative load */ movl FX_OFF_EIP-FX_OFF_EDI(%esp),%ecx ss movl FX_OFF_EIP-FX_OFF_EDI(%esp),%edx andl $0x3ffffffc,%edx /* convert to linear address */ shrl $2,%edx /* reduce to table slot offset */ ss addl EXT(KernelProfileTable),%edx /* add table base */ cmpl $EXT(etext),%ecx ss incl (%edx)no_profiling:#endif#if 0 /* enable for testing: */ jmp EXT(intr_common)#endif /* * See if quanta has expired */ ss movl EXT(_10CpuReserve.Current),%edx ss addl $-1,0(%edx) ss adcl $-1,4(%edx) ss cmp $0,0(%edx) jne 1f ss cmp $0,4(%edx) jne 1f ss movl $1,8(%edx) /* expired */ jmp 2f 1: /* * Check to see if there is anything worth waking up: */ ss movl _8SysTimer.wakeup+4,%ecx ss movl _8SysTimer.now+4,%edx cmpl %edx,%ecx ja .L_fast_int_exit jne 2f /* wake someone up */ ss movl _8SysTimer.now,%edx ss cmpl %edx,_8SysTimer.wakeup ja EXT(.L_fast_int_exit) /* nothing to do */ /* We definitely need to wake someone up */2: jmp EXT(intr_common) /* FALL THROUGH */ /* This is placed here for cache locality, because the timer * interrupt is by far the highest stress on the interrupt * system. */LEXT(.L_fast_int_exit) /* restore the registers: */ popa /* scrub the int # and error code and return from interrupt: */ addl $8,%espL_fast_int_iret_pc: iret.text .align 16ENTRY(intr_entry) /* If we interrupted the call gate path, test for it and recover here! */ pusha/* * intr_common MUST NOT BE AN ENTRY because alignment * might mess up the code generation. */LEXT(intr_common) subl $8,%esp /* CR3 doesn't change, reload units doesn't change */LEXT(intr_common_fx) mov %esp,%eax /* pointer to save area */#ifdef V86_SUPPORT /* * If we interrupted a V86 task, segment registers have * already been saved, so no need to save them redundantly. */ ss testl $0x20000,FX_OFF_EFLAGS(%eax) /* is VM bit set? */ jnz L_load_kernel_segments#endif /* * If we interrupted the kernel, there is no need to * redundantly save/restore the segment registers. Once we * know that we did not take a V86 interrupt, we can test the * low 3 bits of the saved CS to determine the privilege level * that we interrupted: */ ss testl $3,FX_OFF_CS(%eax) /* move interrupted CS to %eax */ jz L_dispatch_interrupt#if defined(DOMAIN_TRACING) /* ESP POINTS TO BOTTOM OF SAVE AREA */ ss cmpl $5,EXT(CpuType) jb 1f /* don't call it if not supported */ .byte 0x0f .byte 0x31 /* RDTSC instr - results to %edx:%eax */ ss subl EXT(DomainTracingScratchpad),%eax ss sbbl EXT(DomainTracingScratchpad)+4,%edx ss addl %eax,90(%esp) ss adcl %edx,94(%esp) mov %esp,%eax /* pointer to save area */1: #endif L_ipc_fault_recovery: /* Populate the pseudo-registers with zero values... */#if 0 movl $0,96(%esp) movl $0,92(%esp) movl $0,88(%esp)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -