📄 process.c
字号:
ia32_save_state(p);#endif#ifdef CONFIG_PERFMON if (p->thread.pfm_context) retval = pfm_inherit(p, child_ptregs);#endif return retval;}voiddo_copy_regs (struct unw_frame_info *info, void *arg){ unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm; elf_greg_t *dst = arg; struct pt_regs *pt; char nat; int i; memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */ if (unw_unwind_to_user(info) < 0) return; unw_get_sp(info, &sp); pt = (struct pt_regs *) (sp + 16); urbs_end = ia64_get_user_rbs_end(current, pt, &cfm); if (ia64_sync_user_rbs(current, info->sw, pt->ar_bspstore, urbs_end) < 0) return; ia64_peek(current, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end), &ar_rnat); /* * coredump format: * r0-r31 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) * predicate registers (p0-p63) * b0-b7 * ip cfm user-mask * ar.rsc ar.bsp ar.bspstore ar.rnat * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec */ /* r0 is zero */ for (i = 1, mask = (1UL << i); i < 32; ++i) { unw_get_gr(info, i, &dst[i], &nat); if (nat) nat_bits |= mask; mask <<= 1; } dst[32] = nat_bits; unw_get_pr(info, &dst[33]); for (i = 0; i < 8; ++i) unw_get_br(info, i, &dst[34 + i]); unw_get_rp(info, &ip); dst[42] = ip + ia64_psr(pt)->ri; dst[43] = cfm; dst[44] = pt->cr_ipsr & IA64_PSR_UM; unw_get_ar(info, UNW_AR_RSC, &dst[45]); /* * For bsp and bspstore, unw_get_ar() would return the kernel * addresses, but we need the user-level addresses instead: */ dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */ dst[47] = pt->ar_bspstore; dst[48] = ar_rnat; unw_get_ar(info, UNW_AR_CCV, &dst[49]); unw_get_ar(info, UNW_AR_UNAT, &dst[50]); unw_get_ar(info, UNW_AR_FPSR, &dst[51]); dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */ unw_get_ar(info, UNW_AR_LC, &dst[53]); unw_get_ar(info, UNW_AR_EC, &dst[54]);}voiddo_dump_fpu (struct unw_frame_info *info, void *arg){ elf_fpreg_t *dst = arg; int i; memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */ if (unw_unwind_to_user(info) < 0) return; /* f0 is 0.0, f1 is 1.0 */ for (i = 2; i < 32; ++i) unw_get_fr(info, i, dst + i); ia64_flush_fph(current); if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) memcpy(dst + 32, current->thread.fph, 96*16);}voidia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst){ unw_init_running(do_copy_regs, dst);}intdump_fpu (struct pt_regs *pt, elf_fpregset_t dst){ unw_init_running(do_dump_fpu, dst); return 1; /* f0-f31 are always valid so we always return 1 */}asmlinkage longsys_execve (char *filename, char **argv, char **envp, struct pt_regs *regs){ int error; filename = getname(filename); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, argv, envp, regs); putname(filename);out: return error;}pid_tkernel_thread (int (*fn)(void *), void *arg, unsigned long flags){ struct task_struct *parent = current; int result, tid; tid = clone(flags | CLONE_VM, 0); if (parent != current) { result = (*fn)(arg); _exit(result); } return tid;}/* * Flush thread state. This is called when a thread does an execve(). */voidflush_thread (void){ /* drop floating-point and debug-register state if it exists: */ current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);#ifndef CONFIG_SMP if (ia64_get_fpu_owner() == current) ia64_set_fpu_owner(0);#endif}#ifdef CONFIG_PERFMON/* * By the time we get here, the task is detached from the tasklist. This is important * because it means that no other tasks can ever find it as a notifiied task, therfore * there is no race condition between this code and let's say a pfm_context_create(). * Conversely, the pfm_cleanup_notifiers() cannot try to access a task's pfm context if * this other task is in the middle of its own pfm_context_exit() because it would alreayd * be out of the task list. Note that this case is very unlikely between a direct child * and its parents (if it is the notified process) because of the way the exit is notified * via SIGCHLD. */voidrelease_thread (struct task_struct *task){ if (task->thread.pfm_context) pfm_context_exit(task); if (atomic_read(&task->thread.pfm_notifiers_check) > 0) pfm_cleanup_notifiers(task);}#endif/* * Clean up state associated with current thread. This is called when * the thread calls exit(). */voidexit_thread (void){#ifndef CONFIG_SMP if (ia64_get_fpu_owner() == current) ia64_set_fpu_owner(0);#endif#ifdef CONFIG_PERFMON /* stop monitoring */ if ((current->thread.flags & IA64_THREAD_PM_VALID) != 0) { /* * we cannot rely on switch_to() to save the PMU * context for the last time. There is a possible race * condition in SMP mode between the child and the * parent. by explicitly saving the PMU context here * we garantee no race. this call we also stop * monitoring */ pfm_flush_regs(current); /* * make sure that switch_to() will not save context again */ current->thread.flags &= ~IA64_THREAD_PM_VALID; }#endif}unsigned longget_wchan (struct task_struct *p){ struct unw_frame_info info; unsigned long ip; int count = 0; /* * These bracket the sleeping functions.. */ extern void scheduling_functions_start_here(void); extern void scheduling_functions_end_here(void);# define first_sched ((unsigned long) scheduling_functions_start_here)# define last_sched ((unsigned long) scheduling_functions_end_here) /* * Note: p may not be a blocked task (it could be current or * another process running on some other CPU. Rather than * trying to determine if p is really blocked, we just assume * it's blocked and rely on the unwind routines to fail * gracefully if the process wasn't really blocked after all. * --davidm 99/12/15 */ unw_init_from_blocked_task(&info, p); do { if (unw_unwind(&info) < 0) return 0; unw_get_ip(&info, &ip); if (ip < first_sched || ip >= last_sched) return ip; } while (count++ < 16); return 0;# undef first_sched# undef last_sched}voidcpu_halt (void){ pal_power_mgmt_info_u_t power_info[8]; unsigned long min_power; int i, min_power_state; if (ia64_pal_halt_info(power_info) != 0) return; min_power_state = 0; min_power = power_info[0].pal_power_mgmt_info_s.power_consumption; for (i = 1; i < 8; ++i) if (power_info[i].pal_power_mgmt_info_s.im && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) { min_power = power_info[i].pal_power_mgmt_info_s.power_consumption; min_power_state = i; } while (1) ia64_pal_halt(min_power_state);}voidmachine_restart (char *restart_cmd){ (*efi.reset_system)(EFI_RESET_WARM, 0, 0, 0);}voidmachine_halt (void){ cpu_halt();}voidmachine_power_off (void){ if (pm_power_off) pm_power_off(); machine_halt();}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -