📄 process.c
字号:
set_ti_thread_flag(p->thread_info, TIF_FORK); p->thread.fs = me->thread.fs; p->thread.gs = me->thread.gs; asm("movl %%gs,%0" : "=m" (p->thread.gsindex)); asm("movl %%fs,%0" : "=m" (p->thread.fsindex)); asm("movl %%es,%0" : "=m" (p->thread.es)); asm("movl %%ds,%0" : "=m" (p->thread.ds)); if (unlikely(me->thread.io_bitmap_ptr != NULL)) { p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) return -ENOMEM; memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES); } /* * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) {#ifdef CONFIG_IA32_EMULATION if (test_thread_flag(TIF_IA32)) err = ia32_child_tls(p, childregs); else #endif err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); if (err) goto out; } err = 0;out: if (err && p->thread.io_bitmap_ptr) kfree(p->thread.io_bitmap_ptr); return err;}/* * This special macro can be used to load a debugging register */#define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)/* * switch_to(x,y) should switch tasks from x to y. * * This could still be optimized: * - fold all the options into a flag word and test it with a single test. * - could test fs/gs bitsliced */struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p){ struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; int cpu = smp_processor_id(); struct tss_struct *tss = init_tss + cpu; unlazy_fpu(prev_p); /* * Reload esp0, LDT and the page table pointer: */ tss->rsp0 = next->rsp0; /* * Switch DS and ES. * This won't pick up thread selector changes, but I guess that is ok. */ asm volatile("movl %%es,%0" : "=m" (prev->es)); if (unlikely(next->es | prev->es)) loadsegment(es, next->es); asm volatile ("movl %%ds,%0" : "=m" (prev->ds)); if (unlikely(next->ds | prev->ds)) loadsegment(ds, next->ds); load_TLS(next, cpu); /* * Switch FS and GS. */ { unsigned fsindex; asm volatile("movl %%fs,%0" : "=g" (fsindex)); /* segment register != 0 always requires a reload. also reload when it has changed. when prev process used 64bit base always reload to avoid an information leak. */ if (unlikely(fsindex | next->fsindex | prev->fs)) { loadsegment(fs, next->fsindex); /* check if the user used a selector != 0 * if yes clear 64bit base, since overloaded base * is always mapped to the Null selector */ if (fsindex) prev->fs = 0; } /* when next process has a 64bit base use it */ if (next->fs) wrmsrl(MSR_FS_BASE, next->fs); prev->fsindex = fsindex; } { unsigned gsindex; asm volatile("movl %%gs,%0" : "=g" (gsindex)); if (unlikely(gsindex | next->gsindex | prev->gs)) { load_gs_index(next->gsindex); if (gsindex) prev->gs = 0; } if (next->gs) wrmsrl(MSR_KERNEL_GS_BASE, next->gs); prev->gsindex = gsindex; } /* * Switch the PDA context. */ prev->userrsp = read_pda(oldrsp); write_pda(oldrsp, next->userrsp); write_pda(pcurrent, next_p); write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET); /* * Now maybe reload the debug registers */ if (unlikely(next->debugreg7)) { loaddebug(next, 0); loaddebug(next, 1); loaddebug(next, 2); loaddebug(next, 3); /* no 4 and 5 */ loaddebug(next, 6); loaddebug(next, 7); } /* * Handle the IO bitmap */ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) { if (next->io_bitmap_ptr) { /* * 2 cachelines copy ... not good, but not that * bad either. Anyone got something better? * This only affects processes which use ioperm(). */ memcpy(tss->io_bitmap, next->io_bitmap_ptr, IO_BITMAP_BYTES); tss->io_bitmap_base = IO_BITMAP_OFFSET; } else { /* * a bitmap offset pointing outside of the TSS limit * causes a nicely controllable SIGSEGV if a process * tries to use a port IO instruction. The first * sys_ioperm() call sets up the bitmap properly. */ tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET; } } return prev_p;}/* * sys_execve() executes a new program. */asmlinkage long sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp, struct pt_regs regs){ long error; char * filename; filename = getname(name); error = PTR_ERR(filename); if (IS_ERR(filename)) return error; error = do_execve(filename, argv, envp, ®s); if (error == 0) current->ptrace &= ~PT_DTRACE; putname(filename); return error;}void set_personality_64bit(void){ /* inherit personality from parent */ /* Make sure to be in 64bit mode */ clear_thread_flag(TIF_IA32); }asmlinkage long sys_fork(struct pt_regs regs){ return do_fork(SIGCHLD, regs.rsp, ®s, 0, NULL, NULL);}asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs regs){ if (!newsp) newsp = regs.rsp; return do_fork(clone_flags & ~CLONE_IDLETASK, newsp, ®s, 0, parent_tid, child_tid);}/* * This is trivial, and on the face of it looks like it * could equally well be done in user mode. * * Not so, for quite unobvious reasons - register pressure. * In user mode vfork() cannot have a stack frame, and if * done by calling the "clone()" system call directly, you * do not have enough call-clobbered registers to hold all * the information you need. */asmlinkage long sys_vfork(struct pt_regs regs){ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.rsp, ®s, 0, NULL, NULL);}unsigned long get_wchan(struct task_struct *p){ unsigned long stack; u64 fp,rip; int count = 0; if (!p || p == current || p->state==TASK_RUNNING) return 0; stack = (unsigned long)p->thread_info; if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE) return 0; fp = *(u64 *)(p->thread.rsp); do { if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE) return 0; rip = *(u64 *)(fp+8); if (!in_sched_functions(rip)) return rip; fp = *(u64 *)fp; } while (count++ < 16); return 0;}long do_arch_prctl(struct task_struct *task, int code, unsigned long addr){ int ret = 0; int doit = task == current; int cpu; switch (code) { case ARCH_SET_GS: if (addr >= TASK_SIZE) return -EPERM; cpu = get_cpu(); /* handle small bases via the GDT because that's faster to switch. */ if (addr <= 0xffffffff) { set_32bit_tls(task, GS_TLS, addr); if (doit) { load_TLS(&task->thread, cpu); load_gs_index(GS_TLS_SEL); } task->thread.gsindex = GS_TLS_SEL; task->thread.gs = 0; } else { task->thread.gsindex = 0; task->thread.gs = addr; if (doit) { load_gs_index(0); ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); } } put_cpu(); break; case ARCH_SET_FS: /* Not strictly needed for fs, but do it for symmetry with gs */ if (addr >= TASK_SIZE) return -EPERM; cpu = get_cpu(); /* handle small bases via the GDT because that's faster to switch. */ if (addr <= 0xffffffff) { set_32bit_tls(task, FS_TLS, addr); if (doit) { load_TLS(&task->thread, cpu); asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL)); } task->thread.fsindex = FS_TLS_SEL; task->thread.fs = 0; } else { task->thread.fsindex = 0; task->thread.fs = addr; if (doit) { /* set the selector to 0 to not confuse __switch_to */ asm volatile("movl %0,%%fs" :: "r" (0)); ret = checking_wrmsrl(MSR_FS_BASE, addr); } } put_cpu(); break; case ARCH_GET_FS: { unsigned long base; if (task->thread.fsindex == FS_TLS_SEL) base = read_32bit_tls(task, FS_TLS); else if (doit) { rdmsrl(MSR_FS_BASE, base); } else base = task->thread.fs; ret = put_user(base, (unsigned long __user *)addr); break; } case ARCH_GET_GS: { unsigned long base; if (task->thread.gsindex == GS_TLS_SEL) base = read_32bit_tls(task, GS_TLS); else if (doit) { rdmsrl(MSR_KERNEL_GS_BASE, base); } else base = task->thread.gs; ret = put_user(base, (unsigned long __user *)addr); break; } default: ret = -EINVAL; break; } return ret; } long sys_arch_prctl(int code, unsigned long addr){ return do_arch_prctl(current, code, addr);} /* * Capture the user space registers if the task is not running (in user space) */int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs){ struct pt_regs *pp, ptregs; pp = (struct pt_regs *)(tsk->thread.rsp0); --pp; ptregs = *pp; ptregs.cs &= 0xffff; ptregs.ss &= 0xffff; elf_core_copy_regs(regs, &ptregs); return 1;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -