📄 process.c
字号:
{ struct task_struct *tsk = current; memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); /* * Forget coprocessor state.. */ clear_fpu(tsk); tsk->used_math = 0;}void release_thread(struct task_struct *dead_task){ if (dead_task->mm) { void * ldt = dead_task->mm->context.segments; // temporary debugging check if (ldt) { printk("WARNING: dead process %8s still has LDT? <%p>\n", dead_task->comm, ldt); BUG(); } }}/* * we do not have to muck with descriptors here, that is * done in switch_mm() as needed. */void copy_segments(struct task_struct *p, struct mm_struct *new_mm){ struct mm_struct * old_mm; void *old_ldt, *ldt; ldt = NULL; old_mm = current->mm; if (old_mm && (old_ldt = old_mm->context.segments) != NULL) { /* * Completely new LDT, we initialize it from the parent: */ ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE); if (!ldt) printk(KERN_WARNING "ldt allocation failed\n"); else memcpy(ldt, old_ldt, LDT_ENTRIES*LDT_ENTRY_SIZE); } new_mm->context.segments = ldt; new_mm->context.cpuvalid = 0UL; return;}int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, unsigned long unused, struct task_struct * p, struct pt_regs * regs){ struct pt_regs * childregs; struct task_struct *me = current; childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1; *childregs = *regs; childregs->rax = 0; childregs->rsp = rsp; if (rsp == ~0) { childregs->rsp = (unsigned long)childregs; } p->thread.rsp = (unsigned long) childregs; p->thread.rsp0 = (unsigned long) (childregs+1); p->thread.userrsp = current->thread.userrsp; p->thread.rip = (unsigned long) ret_from_fork; p->thread.fs = me->thread.fs; p->thread.gs = me->thread.gs; asm("movl %%gs,%0" : "=m" (p->thread.gsindex)); asm("movl %%fs,%0" : "=m" (p->thread.fsindex)); asm("movl %%es,%0" : "=m" (p->thread.es)); asm("movl %%ds,%0" : "=m" (p->thread.ds)); unlazy_fpu(current); p->thread.i387 = current->thread.i387; if (unlikely(me->thread.io_bitmap_ptr != NULL)) { p->thread.io_bitmap_ptr = kmalloc((IO_BITMAP_SIZE+1)*4, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) return -ENOMEM; memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, (IO_BITMAP_SIZE+1)*4); } return 0;}/* * This special macro can be used to load a debugging register */#define loaddebug(thread,register) \ set_debug(thread->debugreg[register], register)/* * switch_to(x,y) should switch tasks from x to y. * * This could still be optimized: * - fold all the options into a flag word and test it with a single test. * - could test fs/gs bitsliced */struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p){ struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; struct tss_struct *tss = init_tss + smp_processor_id(); unlazy_fpu(prev_p); /* * Reload rsp0, LDT and the page table pointer: */ tss->rsp0 = next->rsp0; /* * Switch DS and ES. */ asm volatile("movl %%es,%0" : "=m" (prev->es)); if (unlikely(next->es | prev->es)) loadsegment(es, next->es); asm volatile ("movl %%ds,%0" : "=m" (prev->ds)); if (unlikely(next->ds | prev->ds)) loadsegment(ds, next->ds); /* * Switch FS and GS. */ { unsigned fsindex; asm volatile("movl %%fs,%0" : "=g" (fsindex)); /* segment register != 0 always requires a reload. also reload when it has changed. when prev process used 64bit base always reload to avoid an information leak. */ if (unlikely((fsindex | next->fsindex) || prev->fs)) loadsegment(fs, next->fsindex); /* check if the user changed the selector if yes clear 64bit base. */ if (unlikely(fsindex != prev->fsindex)) prev->fs = 0; /* when next process has a 64bit base use it */ if (next->fs) wrmsrl(MSR_FS_BASE, next->fs); prev->fsindex = fsindex; } { unsigned gsindex; asm volatile("movl %%gs,%0" : "=g" (gsindex)); if (unlikely((gsindex | next->gsindex) || prev->gs)) load_gs_index(next->gsindex); if (unlikely(gsindex != prev->gsindex)) prev->gs = 0; if (next->gs) wrmsrl(MSR_KERNEL_GS_BASE, next->gs); prev->gsindex = gsindex; } /* * Switch the PDA context. */ prev->userrsp = read_pda(oldrsp); write_pda(oldrsp, next->userrsp); write_pda(pcurrent, next_p); write_pda(kernelstack, (unsigned long)next_p + THREAD_SIZE - PDA_STACKOFFSET); /* * Now maybe reload the debug registers */ if (unlikely(next->debugreg[7])) { loaddebug(next, 0); loaddebug(next, 1); loaddebug(next, 2); loaddebug(next, 3); /* no 4 and 5 */ loaddebug(next, 6); loaddebug(next, 7); } /* * Handle the IO bitmap */ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) { if (next->io_bitmap_ptr) { /* * 4 cachelines copy ... not good, but not that * bad either. Anyone got something better? * This only affects processes which use ioperm(). * [Putting the TSSs into 4k-tlb mapped regions * and playing VM tricks to switch the IO bitmap * is not really acceptable.] */ memcpy(tss->io_bitmap, next->io_bitmap_ptr, IO_BITMAP_SIZE*sizeof(u32)); tss->io_map_base = IO_BITMAP_OFFSET; } else { /* * a bitmap offset pointing outside of the TSS limit * causes a nicely controllable SIGSEGV if a process * tries to use a port IO instruction. The first * sys_ioperm() call sets up the bitmap properly. */ tss->io_map_base = INVALID_IO_BITMAP_OFFSET; } } return prev_p;}/* * sys_execve() executes a new program. */asmlinkage long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs){ long error; char * filename; filename = getname(name); error = PTR_ERR(filename); if (IS_ERR(filename)) return error; error = do_execve(filename, argv, envp, ®s); if (error == 0) current->ptrace &= ~PT_DTRACE; putname(filename); return error;}void set_personality_64bit(void){ /* inherit personality from parent */ /* Make sure to be in 64bit mode */ current->thread.flags = 0;}asmlinkage long sys_fork(struct pt_regs regs){ return do_fork(SIGCHLD, regs.rsp, ®s, 0, NULL);}asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, struct pt_regs regs){ if (!newsp) newsp = regs.rsp; return do_fork(clone_flags, newsp, ®s, 0, NULL);}/* * This is trivial, and on the face of it looks like it * could equally well be done in user mode. * * Not so, for quite unobvious reasons - register pressure. * In user mode vfork() cannot have a stack frame, and if * done by calling the "clone()" system call directly, you * do not have enough call-clobbered registers to hold all * the information you need. */asmlinkage long sys_vfork(struct pt_regs regs){ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.rsp, ®s, 0, NULL);}/* * These bracket the sleeping functions.. */extern void scheduling_functions_start_here(void);extern void scheduling_functions_end_here(void);#define first_sched ((unsigned long) scheduling_functions_start_here)#define last_sched ((unsigned long) scheduling_functions_end_here)unsigned long get_wchan(struct task_struct *p){ u64 fp,rip; int count = 0; if (!p || p == current || p->state==TASK_RUNNING) return 0; if (p->thread.rsp < (u64)p || p->thread.rsp > (u64)p + THREAD_SIZE) return 0; fp = *(u64 *)(p->thread.rsp); do { if (fp < (unsigned long)p || fp > (unsigned long)p+THREAD_SIZE) return 0; rip = *(u64 *)(fp+8); if (rip < first_sched || rip >= last_sched) return rip; fp = *(u64 *)fp; } while (count++ < 16); return 0;}#undef last_sched#undef first_schedasmlinkage long sys_arch_prctl(int code, unsigned long addr){ int ret = 0; unsigned long tmp; switch (code) { case ARCH_SET_GS:#ifndef GS_SWITCH_WORKING /* Disabled for now because we still have one unexplained bug in the context switch for this. */ return -EINVAL; #else if (addr >= TASK_SIZE) return -EPERM; asm volatile("movw %%gs,%0" : "=g" (current->thread.gsindex)); asm volatile("movl %0,%%gs" :: "r" (0)); current->thread.gs = addr; ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); break;#endif case ARCH_SET_FS: /* Not strictly needed for fs, but do it for symmetry with gs. */ if (addr >= TASK_SIZE) return -EPERM; asm volatile("movw %%fs,%0" : "=g" (current->thread.fsindex)); asm volatile("movl %0,%%fs" :: "r" (0)); current->thread.fs = addr; ret = checking_wrmsrl(MSR_FS_BASE, addr); break; /* Returned value may not be correct when the user changed fs/gs */ case ARCH_GET_FS: rdmsrl(MSR_FS_BASE, tmp); ret = put_user(tmp, (unsigned long *)addr); break; case ARCH_GET_GS: rdmsrl(MSR_KERNEL_GS_BASE, tmp); ret = put_user(tmp, (unsigned long *)addr); break; default: ret = -EINVAL; break; } return ret; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -