📄 process.c
字号:
memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */ if (unw_unwind_to_user(info) < 0) return; unw_get_sp(info, &sp); pt = (struct pt_regs *) (sp + 16); krbs = (unsigned long *) current + IA64_RBS_OFFSET/8; ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); ar_bsp = (unsigned long) ia64_rse_skip_regs((long *) pt->ar_bspstore, ndirty); /* * Write portion of RSE backing store living on the kernel * stack to the VM of the process. */ for (addr = pt->ar_bspstore; addr < ar_bsp; addr += 8) if (ia64_peek(pt, current, addr, &val) == 0) access_process_vm(current, addr, &val, sizeof(val), 1); /* * coredump format: * r0-r31 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) * predicate registers (p0-p63) * b0-b7 * ip cfm user-mask * ar.rsc ar.bsp ar.bspstore ar.rnat * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec */ /* r0 is zero */ for (i = 1, mask = (1UL << i); i < 32; ++i) { unw_get_gr(info, i, &dst[i], &nat); if (nat) nat_bits |= mask; mask <<= 1; } dst[32] = nat_bits; unw_get_pr(info, &dst[33]); for (i = 0; i < 8; ++i) unw_get_br(info, i, &dst[34 + i]); unw_get_rp(info, &ip); dst[42] = ip + ia64_psr(pt)->ri; dst[43] = pt->cr_ifs & 0x3fffffffff; dst[44] = pt->cr_ipsr & IA64_PSR_UM; unw_get_ar(info, UNW_AR_RSC, &dst[45]); /* * For bsp and bspstore, unw_get_ar() would return the kernel * addresses, but we need the user-level addresses instead: */ dst[46] = ar_bsp; dst[47] = pt->ar_bspstore; unw_get_ar(info, UNW_AR_RNAT, &dst[48]); unw_get_ar(info, UNW_AR_CCV, &dst[49]); unw_get_ar(info, UNW_AR_UNAT, &dst[50]); unw_get_ar(info, UNW_AR_FPSR, &dst[51]); dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */ unw_get_ar(info, UNW_AR_LC, &dst[53]); unw_get_ar(info, UNW_AR_EC, &dst[54]);}voiddo_dump_fpu (struct unw_frame_info *info, void *arg){ elf_fpreg_t *dst = arg; int i; memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */ if (unw_unwind_to_user(info) < 0) return; /* f0 is 0.0, f1 is 1.0 */ for (i = 2; i < 32; ++i) unw_get_fr(info, i, dst + i); ia64_flush_fph(current); if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) memcpy(dst + 32, current->thread.fph, 96*16);}#endif /* CONFIG_IA64_NEW_UNWIND */voidia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst){#ifdef CONFIG_IA64_NEW_UNWIND unw_init_running(do_copy_regs, dst);#else struct switch_stack *sw = ((struct switch_stack *) pt) - 1; unsigned long ar_ec, cfm, ar_bsp, ndirty, *krbs, addr; ar_ec = (sw->ar_pfs >> 52) & 0x3f; cfm = pt->cr_ifs & ((1UL << 63) - 1); if ((pt->cr_ifs & (1UL << 63)) == 0) { /* if cr_ifs isn't valid, we got here through a syscall or a break */ cfm = sw->ar_pfs & ((1UL << 38) - 1); } krbs = (unsigned long *) current + IA64_RBS_OFFSET/8; ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); ar_bsp = (unsigned long) ia64_rse_skip_regs((long *) pt->ar_bspstore, ndirty); /* * Write portion of RSE backing store living on the kernel * stack to the VM of the process. */ for (addr = pt->ar_bspstore; addr < ar_bsp; addr += 8) { long val; if (ia64_peek(pt, current, addr, &val) == 0) access_process_vm(current, addr, &val, sizeof(val), 1); } /* r0-r31 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) * predicate registers (p0-p63) * b0-b7 * ip cfm user-mask * ar.rsc ar.bsp ar.bspstore ar.rnat * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec */ memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ /* r0 is zero */ dst[ 1] = pt->r1; dst[ 2] = pt->r2; dst[ 3] = pt->r3; dst[ 4] = sw->r4; dst[ 5] = sw->r5; dst[ 6] = sw->r6; dst[ 7] = sw->r7; dst[ 8] = pt->r8; dst[ 9] = pt->r9; dst[10] = pt->r10; dst[11] = pt->r11; dst[12] = pt->r12; dst[13] = pt->r13; dst[14] = pt->r14; dst[15] = pt->r15; memcpy(dst + 16, &pt->r16, 16*8); /* r16-r31 are contiguous */ dst[32] = ia64_get_nat_bits(pt, sw); dst[33] = pt->pr; /* branch regs: */ dst[34] = pt->b0; dst[35] = sw->b1; dst[36] = sw->b2; dst[37] = sw->b3; dst[38] = sw->b4; dst[39] = sw->b5; dst[40] = pt->b6; dst[41] = pt->b7; dst[42] = pt->cr_iip + ia64_psr(pt)->ri; dst[43] = pt->cr_ifs; dst[44] = pt->cr_ipsr & IA64_PSR_UM; dst[45] = pt->ar_rsc; dst[46] = ar_bsp; dst[47] = pt->ar_bspstore; dst[48] = pt->ar_rnat; dst[49] = pt->ar_ccv; dst[50] = pt->ar_unat; dst[51] = sw->ar_fpsr; dst[52] = pt->ar_pfs; dst[53] = sw->ar_lc; dst[54] = (sw->ar_pfs >> 52) & 0x3f;#endif /* !CONFIG_IA64_NEW_UNWIND */}intdump_fpu (struct pt_regs *pt, elf_fpregset_t dst){#ifdef CONFIG_IA64_NEW_UNWIND unw_init_running(do_dump_fpu, dst);#else struct switch_stack *sw = ((struct switch_stack *) pt) - 1; memset(dst, 0, sizeof (dst)); /* don't leak any "random" bits */ /* f0 is 0.0 */ /* f1 is 1.0 */ dst[2] = sw->f2; dst[3] = sw->f3; dst[4] = sw->f4; dst[5] = sw->f5; dst[6] = pt->f6; dst[7] = pt->f7; dst[8] = pt->f8; dst[9] = pt->f9; memcpy(dst + 10, &sw->f10, 22*16); /* f10-f31 are contiguous */ ia64_flush_fph(current); if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) memcpy(dst + 32, current->thread.fph, 96*16);#endif return 1; /* f0-f31 are always valid so we always return 1 */}asmlinkage longsys_execve (char *filename, char **argv, char **envp, struct pt_regs *regs){ int error; filename = getname(filename); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, argv, envp, regs); putname(filename);out: return error;}pid_tkernel_thread (int (*fn)(void *), void *arg, unsigned long flags){ struct task_struct *parent = current; int result, tid; tid = clone(flags | CLONE_VM, 0); if (parent != current) { result = (*fn)(arg); _exit(result); } return tid;}/* * Flush thread state. This is called when a thread does an execve(). */voidflush_thread (void){ /* drop floating-point and debug-register state if it exists: */ current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);#ifndef CONFIG_SMP if (ia64_get_fpu_owner() == current) ia64_set_fpu_owner(0);#endif}/* * Clean up state associated with current thread. This is called when * the thread calls exit(). */voidexit_thread (void){#ifndef CONFIG_SMP if (ia64_get_fpu_owner() == current) ia64_set_fpu_owner(0);#endif#ifdef CONFIG_PERFMON /* stop monitoring */ if ((current->thread.flags & IA64_THREAD_PM_VALID) != 0) { /* * we cannot rely on switch_to() to save the PMU * context for the last time. There is a possible race * condition in SMP mode between the child and the * parent. by explicitly saving the PMU context here * we garantee no race. this call we also stop * monitoring */ ia64_save_pm_regs(current); /* * make sure that switch_to() will not save context again */ current->thread.flags &= ~IA64_THREAD_PM_VALID; }#endif}unsigned longget_wchan (struct task_struct *p){ struct unw_frame_info info; unsigned long ip; int count = 0; /* * These bracket the sleeping functions.. */ extern void scheduling_functions_start_here(void); extern void scheduling_functions_end_here(void);# define first_sched ((unsigned long) scheduling_functions_start_here)# define last_sched ((unsigned long) scheduling_functions_end_here) /* * Note: p may not be a blocked task (it could be current or * another process running on some other CPU. Rather than * trying to determine if p is really blocked, we just assume * it's blocked and rely on the unwind routines to fail * gracefully if the process wasn't really blocked after all. * --davidm 99/12/15 */ unw_init_from_blocked_task(&info, p); do { if (unw_unwind(&info) < 0) return 0; unw_get_ip(&info, &ip); if (ip < first_sched || ip >= last_sched) return ip; } while (count++ < 16); return 0;# undef first_sched# undef last_sched}voidmachine_restart (char *restart_cmd){ (*efi.reset_system)(EFI_RESET_WARM, 0, 0, 0);}voidmachine_halt (void){ printk("machine_halt: need PAL or ACPI version here!!\n"); machine_restart(0);}voidmachine_power_off (void){ printk("machine_power_off: unimplemented (need ACPI version here)\n"); machine_halt ();}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -