📄 interrupt.s
字号:
movl $0,84(%esp)#else movl $0,FX_OFF_invType(%esp) movl $0,FX_OFF_invKey(%esp) movl $0,FX_OFF_sndLen(%esp) movl $0,FX_OFF_sndPtr(%esp) movl $0,FX_OFF_sndKeys(%esp) movl $0,FX_OFF_rcvKeys(%esp)#endif /* * It might *seem* faster to dork ESP rather than eat the * marginal instruction byte fetches, but the marginal instruction * bytes are a wash, and doing things this way eliminates AGEN * interlocks on the Pentium and later */ movl %gs,FX_OFF_GS(%esp) movl %fs,FX_OFF_FS(%esp) movl %ds,FX_OFF_DS(%esp) movl %es,FX_OFF_ES(%esp)L_load_kernel_segments: /* * Prior to loading the address space pointer, need to make sure * we are running from a copy of the interrupt stub that exists * in the domain's address space: */ /* * Now load the kernel segments. We will continue to run code * out of the Window. */ mov $0x10,%bx mov %bx,%ds mov %bx,%es /* for string copies */ /* the kernel doesn't use any other segments. */L_load_kernel_map: /* * In the new design, the kernel runs mapped into the user * address space, and relies on being able to make direct use * of sender-side virtual addresses. Disable this for * development, as it prevents detection of pg 0 reference * faults. */L_dispatch_interrupt: /* * WATCH OUT for come-from coding out of bad_ipc_block!!! * * %eax now holds the save area pointer, which is valid in both * the kernel and user address spaces. * * If we aren't already running on the kernel stack, switch to * it now. Nothing special will be required on return to the * thread, since we will reload explicitly from the save area. * We can determine whether we came from the kernel by testing the * least significant bits of the %CS register, but this only works * if we are NOT in V8086 mode. */ testl $0x20000,FX_OFF_EFLAGS(%esp) /* is VM bit set? */ jnz L_forced_reload testl $3,FX_OFF_CS(%esp) jz L_skip_stack_reload L_forced_reload: movl $EXT(InterruptStackTop),%esp L_skip_stack_reload: /* * Now running out of kernel data and stack. */ /* * Bump the interrupt depth counter here. All of our entry * points use interrupt gates, so interrupts are definitely * disabled. Doing this from the assembly code eliminates * the need to play sleazy games in the C part of the return * path. */ incl EXT(_3IRQ.DisableDepth); /* * Call the interrupt dispatch routine, passing it a pointer * to the register save area. It is entirely up to the interrupt * routines to decide whether or not to re-enable interrupts. */ pushl %eax call EXT(OnTrapOrInterrupt__3IDTP9fixregs_t) /* This should NEVER return */ jmp EXT(halt)#if 0 /* bad_ipc_block gets branched to from the IPC path if the user has set up a %ESP value that would point into kernel memory. It forges a fault address and hacks the trap code to look like a page fault, and then redirects into the code above, effectively converting the IPC into a page fault. */bad_ipc_block: movl %EBX,20(%eax) /* exception address */ movl $0x0e,40(%eax) /* page fault exception */ movl $0x4,44(%eax) /* user-mode read access fault */ /* Populate the pseudo-registers with zero values, since this instruction logically trapped before we read them... */#if 0 movl $0,96(%esp) movl $0,92(%esp) movl $0,88(%esp) movl $0,84(%esp)#else movl $0,FX_OFF_invType(%esp) movl $0,FX_OFF_invKey(%esp) movl $0,FX_OFF_sndLen(%esp) movl $0,FX_OFF_sndPtr(%esp) movl $0,FX_OFF_sndKeys(%esp) movl $0,FX_OFF_rcvKeys(%esp)#endif /* This fault was actually a trap instruction, which we are fixing up to look like a page fault. Roll the PC back so that the correct instruction will get restarted... */ subl $2,48(%eax) jmp L_dispatch_interrupt#endif /* * Entry point for IPC invocations - a special-cased version of * the path above. */ /* * NOTE FOR THE BENEFIT OF THE FAST PATH: * * The following registers are receiver-only, and can therefore * safely be smashed in this path: * * EDI -- rcv data ptr * ECX -- rcv keys, rcv len * * In addition, %ESP is not used by the calling convention. * * This path should endeavour not to smash anything else if * it can be avoided. */ .align 16ENTRY(intr_InvokeKey) pushl $0x0 /* zero error code */ pushl $0x31 /* IPC trap number */ pusha subl $8,%esp /* CR3 doesn't change, reload units doesn't change */ movl %ESP,%EAX movl %ESP,%EBP#if 0 /* Populate the pseudo-registers with zero values... */#if 0 movl $0,96(%esp) movl $0,92(%esp) movl $0,88(%esp) movl $0,84(%esp)#else movl $0,FX_OFF_invType(%esp) movl $0,FX_OFF_invKey(%esp) movl $0,FX_OFF_sndLen(%esp) movl $0,FX_OFF_sndPtr(%esp) movl $0,FX_OFF_sndKeys(%esp) movl $0,FX_OFF_rcvKeys(%esp)#endif#endif /* * It might *seem* faster to dork ESP rather than eat the * marginal instruction byte fetches, but the marginal instruction * bytes are a wash, and doing things this way eliminates AGEN * interlocks on the Pentium and later */ movl %gs,FX_OFF_GS(%esp) movl %fs,FX_OFF_FS(%esp) movl %ds,FX_OFF_DS(%esp) movl %es,FX_OFF_ES(%esp) movl $EXT(InterruptStackTop),%esp /* VALIDATE THE IPC BLOCK * * There are two possible problems with the IPC block: * * 1. page not present, leading to invalid kernel * reference. * 2. alleged block not in user space (security attack) * * We need to check the first by probing, and the second * by arithmetic or by some form of segment logic. * * Not all EROS address spaces are the same size. If the check * can be pulled off by using the segmentation logic to advantage, * we can avoid a bit of arithmetic and also avoid needing to load * the bound from somewhere. * * EROS defines a flat, 32-bit address space, so we can safely * reference the IPC block via %DS, provided %DS holds a kosher * value. At this point in the code, either %DS is valid or the * user is dicking with us. For the present, we are marking * DOMAINCODESEG readable, so having DOMAINCODESEG loaded in %DS * at this point doesn't change anything (base and bounds on * DOMAINCODESEG and DOMAINDATASEG are identical). If we change * DOMAINCODESEG to Exec only, a reference through DOMAINCODESEG * will generate a GP fault, which we can recover from in the GP * fault handler. * * We know that %DS does not hold NULLSEG because the null segment * is not marked "present" in the GDT. Attempts to load NULLSEG * therefore cause a GP fault, in which case the program never got * as far as running this instruction. If a debugger contrived to * put '0' in a segment register field in the domain root, we would * have invoked the keeper while preparing the domain. * * In all other cases %DS holds the DOMAINDATASEG, * * If the alleged IPC block is out of legal user space, then * loading the first and last byte of it should generate a GP fault * due to a segment bounds violation. Further, loading these two * bytes will cause a page fault if the page is not present. * * We therefore probe the IPC block before loading the kernel data * segment registers. * */ movl FX_OFF_EBP(%ebp),%EBX /* user EBP to %EBX */ /* Do the high address first to give the store unit maximal opportunities to merge the writes. This is the reference that will GP fault if %DS is invalid. This reference will generate a bounds error if the top of the alleged IPC block is in kernel space. This reference may generate a page fault if the uppermost byte of the IPC descriptor block falls in an invalid page. */L_ipc_block_hi: /* RCV KEYS */ movl IPC_rcvKeys(%ebx),%edx /* NOTE via CALLER %DS */ movl %edx,FX_OFF_rcvKeys(%ebp) /* NOTE via KERNEL %SS */ /* Now do the low address. This reference will generate a bounds error if the bottom of the alleged IPC block is in kernel space. The fault could occur here rather than at L_ipc_block_hi if the passed IPC block pointer was, say, 0xFFFFFFF8, because the offset addition causes rollover in the AGEN unit before the limit is checked. This reference may generate a page fault if the lowermost byte of the IPC descriptor block falls in an invalid page. */L_ipc_block_lo: /* INVTYPE */ movl IPC_invType(%ebx),%esi /* NOTE via CALLER %DS */ /* INVKEY */ movl IPC_invKey(%ebx),%ecx /* NOTE via CALLER %DS */ /* INVTYPE */ movl %esi,FX_OFF_invType(%ebp) /* NOTE via KERNEL %SS */ /* INVKEY */ movl %ecx,FX_OFF_invKey(%ebp) /* NOTE via KERNEL %SS */ /* SND LEN */ movl IPC_sndLen(%ebx),%ecx /* NOTE via CALLER %DS */ /* SND PTR */ movl IPC_sndPtr(%ebx),%esi /* NOTE via CALLER %DS */ /* SND LEN */ movl %ecx,FX_OFF_sndLen(%ebp) /* NOTE via KERNEL %SS */ /* SND PTR */ movl %esi,FX_OFF_sndPtr(%ebp) /* NOTE via KERNEL %SS */ /* SND KEYS */ movl IPC_sndKeys(%ebx),%edx /* NOTE via CALLER %DS */ movl %edx,FX_OFF_sndKeys(%ebp) /* NOTE via KERNEL %SS */ #ifdef FAST_IPC_ARG_VALIDATE /* First need to make sure that we have a well-formed IPC * invocation. Following are essentially the tests in * ValidateEntryBlock() * * EAX presently points to active process structure * ECX holds xmit length * ESI holds xmit string * EDX holds sndKeys */ cmpl $EROS_NODE_SIZE,FX_OFF_invKey(%ebp) jae bogus_ipc_arg_block cmpl $EROS_MESSAGE_LIMIT,%ecx ja bogus_ipc_arg_block testl $0xe0e0e0e0,%edx jnz bogus_ipc_arg_block cmpl $2,FX_OFF_invType(%ebp) ja bogus_ipc_arg_block#ifndef ASM_VALIDATE_STRINGS /* SEND invocation is not a candidate for the fast path: */ je kern_seg_load /* wrong invocation type */#endif#endif#ifdef ASM_VALIDATE_STRINGS /* At this juncture, * * %ECX contains the xmit string length and * %ESI contains the base ptr of the xmit string * %DS still holds sender data segment * * Use this information to validate the xmit string. In the * latest design, we could actually skip the validation entirely, * on the theory that the receive buffer is undefined until the * IPC completes, but validating the string here means that the * slow path can safely assume that the send buffer has already * been validated. * * Note that the CMPB instructions in the code below are * performed solely for the sake of their side effect: if the * pages in the sent string are not valid, they will cause * either a GP fault (%DS seg bounds) or a page fault. */ testl %ecx,%ecx#ifdef FAST_IPC jz fast_ipc#else jz kern_seg_load /* no sent string */#endif addl %esi,%ecx /* this is upper exclusive bound. */send_loop:L_ipc_send_str: cmpb $0,(%esi) /* VIA USER DS */ addl $EROS_PAGE_SIZE,%esi cmpl %esi,%ecx ja send_loopL_ipc_send_str_end: cmpb $0,-1(%ecx) /* VIA USER DS */ #endif /* ASM_VALIDATE_STRINGS */#ifdef FAST_IPCfast_ipc: mov $0x10,%cx mov %cx,%ds mov %cx,%es#ifdef FAST_IPC_STATS incl EXT(nFastIpcPath)#endif /* We are checking to see if this is a fast gate jump. To qualify for fast-path handling, the invocation must be a CALL or SEND on a /start/, /resume/, or /fault/ key. If the key is a /fault/ key, the invocation must be a restart invocation (i.e. we are resuming the recipient). Finally, the invoked key must be prepared. */#ifdef ASM_VALIDATE_STRINGS /* If we did a fast string validation, then we need to check * this here, because we were unable to merge the test above. */ cmpl $2,FX_OFF_invType(%eax) jae Standard_Gate_Path /* wrong invocation type */#endif /* If processes are waiting for this one to become available, take the long way */ cmpl $0,PR_OFF_stallQ-PR_OFF_FIXREGS(%eax) jnz Standard_Gate_Path movzbl FX_OFF_invKey(%eax),%ecx shll $4,%ecx /* ndx * key size */ leal PR_OFF_keyregs-PR_OFF_FIXREGS(%eax,%ecx),%ecx /* %ecx now holds address of invoked capability. Check prepared bit and key type */ movl (%ecx),%ebx testl $0x80,%ebx jz Standard_Gate_Path /* not prepared */#ifdef FAST_IPC_STATS incl EXT(nFastIpcPrepared)#endif#if defined(FAST_IPC_REDSEG) /* Regrettably, it is important to handle red segments containing gate keys in the fast path, as this case encompases the space bank, which is highly performance sensitive. Also, many programs use red segments as indirection objects. We therefore check the key to see if it is a prepared red segment key here, and if so, branch out of line to handle that case. STALE (applied while there were still Capability Pages): A prepared red SEGMENT key will have a keyType field of 0x94, a subtype field of 0x00, and a keyInfo field of 0x1: 0x00010094 A prepared red NODE key will have a keyType field of 0x90, a subtype field of 0x00, and a keyInfo field of 0x1: 0x00010090 CORRECT (now that Capability Pages are gone): A prepared red SEGMENT key will have a keyType field of 0x90, a subtype field of 0x00, and a keyInfo field of 0x1: 0x00010090 A prepared red NODE key will have a keyType field of 0x8C, a subtype field of 0x00, and a keyInfo field of 0x1: 0x0001008C Neither a start nor a resume key will have bit 0x10 set, so we simply do a bit check here and sort things out further in the maybe_red_seg path. */ movl $0,redflags /* until proven otherwise */ /* This test does NOT have to correctly identify red segment keys. It must branch for all red keys, it must NOT branch for gate keys. */ cmpl $0x1008c,%ebx jae maybe_red_seg examine_invoked_key:#endif movl 12(%ecx),%ebp /* pointer to recipient process, IFF this is a gate key */ /* %bl (keyType) now holds 0 ttttt XX where ttttt is the type of the key. We want a ttttt value <= 1, but we also need to do the fault key check. It proves to be cheaper to do the shift, since we need to match against the recipient state anyway. */ cmpb $0x84,%bl /* prepared rsm key */ ja Standard_Gate_Path /* wrong key type */#if 0 je Standard_Gate_Path /* do not handle rsm keys */#endif#if 0 jb Standard_Gate_Path /* do not handle start keys */#endif#ifdef FAST_IPC_STATS incl EXT(nFastIpcFast) cmpb $0x84,%bl /* prepared rsm key */#endif je recip_runstate_valid /* If the key is a start key, we need to verify that the recipient is available. If it is a prepared resume key, it is not possible for the recipient to be in any state other than waiting. */ cmpb $RS_Available,PR_OFF_runState(%ebp) jne Standard_Gate_Path /* wrong recipient state */ recip_runstate_valid: /* Recipient mapping table might have been blasted, in which case we need to take the long path */ cmpl $0,PR_OFF_MappingTable(%ebp) jz Standard_Gate_Path /* Recipient is in proper state, but may not be runnable. */ cmpl $0,PR_OFF_saveArea(%ebp) jz Standard_Gate_Path /* recipient is faulted or hazarded */ /* recipient runstate is okay, but recipient string could still be bogus and/or lead to page faults. If this is a non-faulting restart key, we can bypass all of that. %bx is now of the form subtype : main-type so we can just check that. */ cmpw $0x0084,%bx jbe not_fault_key#if 0 jmp dump_state#endif /* It's a fault key. See if order code is RC_OK, indicating a restart */ cmpl $0,FX_OFF_EAX(%eax) jnz Standard_Gate_Path /* not restart */ /* fault key dest requires %EAX pointing to src process */ subl $PR_OFF_FIXREGS,%eax
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -