📄 process.c
字号:
goto out; idx = info.entry_number; if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) goto out; desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; desc->a = LDT_entry_a(&info); desc->b = LDT_entry_b(&info); } err = 0; out: if (err && p->thread.io_bitmap_ptr) kfree(p->thread.io_bitmap_ptr); return err;}/* * fill in the user structure for a core dump.. */void dump_thread(struct pt_regs * regs, struct user * dump){ int i;/* changed the size calculations - should hopefully work better. lbt */ dump->magic = CMAGIC; dump->start_code = 0; dump->start_stack = regs->esp & ~(PAGE_SIZE - 1); dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; dump->u_dsize -= dump->u_tsize; dump->u_ssize = 0; for (i = 0; i < 8; i++) dump->u_debugreg[i] = current->thread.debugreg[i]; if (dump->start_stack < TASK_SIZE) dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; dump->regs.ebx = regs->ebx; dump->regs.ecx = regs->ecx; dump->regs.edx = regs->edx; dump->regs.esi = regs->esi; dump->regs.edi = regs->edi; dump->regs.ebp = regs->ebp; dump->regs.eax = regs->eax; dump->regs.ds = regs->xds; dump->regs.es = regs->xes; savesegment(fs,dump->regs.fs); savesegment(gs,dump->regs.gs); dump->regs.orig_eax = regs->orig_eax; dump->regs.eip = regs->eip; dump->regs.cs = regs->xcs; dump->regs.eflags = regs->eflags; dump->regs.esp = regs->esp; dump->regs.ss = regs->xss; dump->u_fpvalid = dump_fpu (regs, &dump->i387);}/* * Capture the user space registers if the task is not running (in user space) */int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs){ struct pt_regs ptregs; ptregs = *(struct pt_regs *) ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs)); ptregs.xcs &= 0xffff; ptregs.xds &= 0xffff; ptregs.xes &= 0xffff; ptregs.xss &= 0xffff; elf_core_copy_regs(regs, &ptregs); return 1;}/* * This special macro can be used to load a debugging register */#define loaddebug(thread,register) \ __asm__("movl %0,%%db" #register \ : /* no output */ \ :"r" (thread->debugreg[register]))/* * switch_to(x,yn) should switch tasks from x to y. * * We fsave/fwait so that an exception goes off at the right time * (as a call from the fsave or fwait in effect) rather than to * the wrong process. Lazy FP saving no longer makes any sense * with modern CPU's, and this simplifies a lot of things (SMP * and UP become the same). * * NOTE! We used to use the x86 hardware context switching. The * reason for not using it any more becomes apparent when you * try to recover gracefully from saved state that is no longer * valid (stale segment register values in particular). With the * hardware task-switch, there is no way to fix up bad state in * a reasonable manner. * * The fact that Intel documents the hardware task-switching to * be slow is a fairly red herring - this code is not noticeably * faster. However, there _is_ some room for improvement here, * so the performance issues may eventually be a valid point. * More important, however, is the fact that this allows us much * more flexibility. * * The return value (in %eax) will be the "prev" task after * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p){ struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; int cpu = smp_processor_id(); struct tss_struct *tss = init_tss + cpu; /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ __unlazy_fpu(prev_p); /* * Reload esp0, LDT and the page table pointer: */ load_esp0(tss, next); /* * Load the per-thread Thread-Local Storage descriptor. */ load_TLS(next, cpu); /* * Save away %fs and %gs. No need to save %es and %ds, as * those are always kernel segments while inside the kernel. */ asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs)); asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); /* * Restore %fs and %gs if needed. */ if (unlikely(prev->fs | prev->gs | next->fs | next->gs)) { loadsegment(fs, next->fs); loadsegment(gs, next->gs); } /* * Now maybe reload the debug registers */ if (unlikely(next->debugreg[7])) { loaddebug(next, 0); loaddebug(next, 1); loaddebug(next, 2); loaddebug(next, 3); /* no 4 and 5 */ loaddebug(next, 6); loaddebug(next, 7); } if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) { if (next->io_bitmap_ptr) { /* * 4 cachelines copy ... not good, but not that * bad either. Anyone got something better? * This only affects processes which use ioperm(). * [Putting the TSSs into 4k-tlb mapped regions * and playing VM tricks to switch the IO bitmap * is not really acceptable.] */ memcpy(tss->io_bitmap, next->io_bitmap_ptr, IO_BITMAP_BYTES); tss->io_bitmap_base = IO_BITMAP_OFFSET; } else /* * a bitmap offset pointing outside of the TSS limit * causes a nicely controllable SIGSEGV if a process * tries to use a port IO instruction. The first * sys_ioperm() call sets up the bitmap properly. */ tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET; } return prev_p;}asmlinkage int sys_fork(struct pt_regs regs){ return do_fork(SIGCHLD, regs.esp, ®s, 0, NULL, NULL);}asmlinkage int sys_clone(struct pt_regs regs){ unsigned long clone_flags; unsigned long newsp; int __user *parent_tidptr, *child_tidptr; clone_flags = regs.ebx; newsp = regs.ecx; parent_tidptr = (int __user *)regs.edx; child_tidptr = (int __user *)regs.edi; if (!newsp) newsp = regs.esp; return do_fork(clone_flags & ~CLONE_IDLETASK, newsp, ®s, 0, parent_tidptr, child_tidptr);}/* * This is trivial, and on the face of it looks like it * could equally well be done in user mode. * * Not so, for quite unobvious reasons - register pressure. * In user mode vfork() cannot have a stack frame, and if * done by calling the "clone()" system call directly, you * do not have enough call-clobbered registers to hold all * the information you need. */asmlinkage int sys_vfork(struct pt_regs regs){ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0, NULL, NULL);}/* * sys_execve() executes a new program. */asmlinkage int sys_execve(struct pt_regs regs){ int error; char * filename; filename = getname((char __user *) regs.ebx); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, (char __user * __user *) regs.ecx, (char __user * __user *) regs.edx, ®s); if (error == 0) { current->ptrace &= ~PT_DTRACE; /* Make sure we don't return using sysenter.. */ set_thread_flag(TIF_IRET); } putname(filename);out: return error;}#define top_esp (THREAD_SIZE - sizeof(unsigned long))#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))unsigned long get_wchan(struct task_struct *p){ unsigned long ebp, esp, eip; unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; stack_page = (unsigned long)p->thread_info; esp = p->thread.esp; if (!stack_page || esp < stack_page || esp > top_esp+stack_page) return 0; /* include/asm-i386/system.h:switch_to() pushes ebp last. */ ebp = *(unsigned long *) esp; do { if (ebp < stack_page || ebp > top_ebp+stack_page) return 0; eip = *(unsigned long *) (ebp+4); if (!in_sched_functions(eip)) return eip; ebp = *(unsigned long *) ebp; } while (count++ < 16); return 0;}/* * sys_alloc_thread_area: get a yet unused TLS descriptor index. */static int get_free_idx(void){ struct thread_struct *t = ¤t->thread; int idx; for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) if (desc_empty(t->tls_array + idx)) return idx + GDT_ENTRY_TLS_MIN; return -ESRCH;}/* * Set a given TLS descriptor: */asmlinkage int sys_set_thread_area(struct user_desc __user *u_info){ struct thread_struct *t = ¤t->thread; struct user_desc info; struct desc_struct *desc; int cpu, idx; if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; idx = info.entry_number; /* * index -1 means the kernel should try to find and * allocate an empty descriptor: */ if (idx == -1) { idx = get_free_idx(); if (idx < 0) return idx; if (put_user(idx, &u_info->entry_number)) return -EFAULT; } if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN; /* * We must not get preempted while modifying the TLS. */ cpu = get_cpu(); if (LDT_empty(&info)) { desc->a = 0; desc->b = 0; } else { desc->a = LDT_entry_a(&info); desc->b = LDT_entry_b(&info); } load_TLS(t, cpu); put_cpu(); return 0;}/* * Get the current Thread-Local Storage area: */#define GET_BASE(desc) ( \ (((desc)->a >> 16) & 0x0000ffff) | \ (((desc)->b << 16) & 0x00ff0000) | \ ( (desc)->b & 0xff000000) )#define GET_LIMIT(desc) ( \ ((desc)->a & 0x0ffff) | \ ((desc)->b & 0xf0000) ) #define GET_32BIT(desc) (((desc)->b >> 22) & 1)#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)asmlinkage int sys_get_thread_area(struct user_desc __user *u_info){ struct user_desc info; struct desc_struct *desc; int idx; if (get_user(idx, &u_info->entry_number)) return -EFAULT; if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; info.entry_number = idx; info.base_addr = GET_BASE(desc); info.limit = GET_LIMIT(desc); info.seg_32bit = GET_32BIT(desc); info.contents = GET_CONTENTS(desc); info.read_exec_only = !GET_WRITABLE(desc); info.limit_in_pages = GET_LIMIT_PAGES(desc); info.seg_not_present = !GET_PRESENT(desc); info.useable = GET_USEABLE(desc); if (copy_to_user(u_info, &info, sizeof(info))) return -EFAULT; return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -