📄 ptrace.c
字号:
for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { ret = ia64_peek(child, sw, user_rbs_end, addr, &val); if (ret < 0) return ret; if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) return -EIO; } return 0;}/* * Simulate user-level "flushrs". Note: we can't just add pt->loadrs>>16 to * pt->ar_bspstore because the kernel backing store and the user-level backing store may * have different alignments (and therefore a different number of intervening rnat slots). */static voiduser_flushrs (struct task_struct *task, struct pt_regs *pt){ unsigned long *krbs; long ndirty; krbs = (unsigned long *) task + IA64_RBS_OFFSET/8; ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); pt->ar_bspstore = (unsigned long) ia64_rse_skip_regs((unsigned long *) pt->ar_bspstore, ndirty); pt->loadrs = 0;}/* * Synchronize the RSE backing store of CHILD and all tasks that share the address space * with it. CHILD_URBS_END is the address of the end of the register backing store of * CHILD. If MAKE_WRITABLE is set, a user-level "flushrs" is simulated such that the VM * can be written via ptrace() and the tasks will pick up the newly written values. It * would be OK to unconditionally simulate a "flushrs", but this would be more intrusive * than strictly necessary (e.g., it would make it impossible to obtain the original value * of ar.bspstore). */static voidthreads_sync_user_rbs (struct task_struct *child, unsigned long child_urbs_end, int make_writable){ struct switch_stack *sw; unsigned long urbs_end; struct task_struct *p; struct mm_struct *mm; struct pt_regs *pt; long multi_threaded; task_lock(child); { mm = child->mm; multi_threaded = mm && (atomic_read(&mm->mm_users) > 1); } task_unlock(child); if (!multi_threaded) { sw = (struct switch_stack *) (child->thread.ksp + 16); pt = ia64_task_regs(child); ia64_sync_user_rbs(child, sw, pt->ar_bspstore, child_urbs_end); if (make_writable) user_flushrs(child, pt); } else { read_lock(&tasklist_lock); { for_each_task(p) { if (p->mm == mm && p->state != TASK_RUNNING) { sw = (struct switch_stack *) (p->thread.ksp + 16); pt = ia64_task_regs(p); urbs_end = ia64_get_user_rbs_end(p, pt, NULL); ia64_sync_user_rbs(p, sw, pt->ar_bspstore, urbs_end); if (make_writable) user_flushrs(p, pt); } } } read_unlock(&tasklist_lock); } child->thread.flags |= IA64_THREAD_KRBS_SYNCED; /* set the flag in the child thread only */}/* * Write f32-f127 back to task->thread.fph if it has been modified. */inline voidia64_flush_fph (struct task_struct *task){ struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));#ifdef CONFIG_SMP struct task_struct *fpu_owner = current;#else struct task_struct *fpu_owner = ia64_get_fpu_owner();#endif if (task == fpu_owner && psr->mfh) { psr->mfh = 0; ia64_save_fpu(&task->thread.fph[0]); task->thread.flags |= IA64_THREAD_FPH_VALID; }}/* * Sync the fph state of the task so that it can be manipulated * through thread.fph. If necessary, f32-f127 are written back to * thread.fph or, if the fph state hasn't been used before, thread.fph * is cleared to zeroes. Also, access to f32-f127 is disabled to * ensure that the task picks up the state from thread.fph when it * executes again. */voidia64_sync_fph (struct task_struct *task){ struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); ia64_flush_fph(task); if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { task->thread.flags |= IA64_THREAD_FPH_VALID; memset(&task->thread.fph, 0, sizeof(task->thread.fph)); }#ifndef CONFIG_SMP if (ia64_get_fpu_owner() == task) ia64_set_fpu_owner(0);#endif psr->dfh = 1;}static intaccess_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data, int write_access){ struct ia64_fpreg fpval; int ret; ret = unw_get_fr(info, regnum, &fpval); if (ret < 0) return ret; if (write_access) { fpval.u.bits[hi] = *data; ret = unw_set_fr(info, regnum, fpval); } else *data = fpval.u.bits[hi]; return ret;}static intaccess_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access){ unsigned long *ptr, regnum, urbs_end, rnat_addr; struct switch_stack *sw; struct unw_frame_info info; struct pt_regs *pt; pt = ia64_task_regs(child); sw = (struct switch_stack *) (child->thread.ksp + 16); if ((addr & 0x7) != 0) { dprintk("ptrace: unaligned register address 0x%lx\n", addr); return -1; } if (addr < PT_F127 + 16) { /* accessing fph */ if (write_access) ia64_sync_fph(child); else ia64_flush_fph(child); ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr); } else if (addr >= PT_F10 && addr < PT_F15 + 16) { /* scratch registers untouched by kernel (saved in switch_stack) */ ptr = (unsigned long *) ((long) sw + addr - PT_NAT_BITS); } else if (addr < PT_AR_LC + 8) { /* preserved state: */ unsigned long nat_bits, scratch_unat, dummy = 0; struct unw_frame_info info; char nat = 0; int ret; unw_init_from_blocked_task(&info, child); if (unw_unwind_to_user(&info) < 0) return -1; switch (addr) { case PT_NAT_BITS: if (write_access) { nat_bits = *data; scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits); if (unw_set_ar(&info, UNW_AR_UNAT, scratch_unat) < 0) { dprintk("ptrace: failed to set ar.unat\n"); return -1; } for (regnum = 4; regnum <= 7; ++regnum) { unw_get_gr(&info, regnum, &dummy, &nat); unw_set_gr(&info, regnum, dummy, (nat_bits >> regnum) & 1); } } else { if (unw_get_ar(&info, UNW_AR_UNAT, &scratch_unat) < 0) { dprintk("ptrace: failed to read ar.unat\n"); return -1; } nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat); for (regnum = 4; regnum <= 7; ++regnum) { unw_get_gr(&info, regnum, &dummy, &nat); nat_bits |= (nat != 0) << regnum; } *data = nat_bits; } return 0; case PT_R4: case PT_R5: case PT_R6: case PT_R7: if (write_access) { /* read NaT bit first: */ ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, data, &nat); if (ret < 0) return ret; } return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, &nat, write_access); case PT_B1: case PT_B2: case PT_B3: case PT_B4: case PT_B5: return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, write_access); case PT_AR_EC: return unw_access_ar(&info, UNW_AR_EC, data, write_access); case PT_AR_LC: return unw_access_ar(&info, UNW_AR_LC, data, write_access); default: if (addr >= PT_F2 && addr < PT_F5 + 16) return access_fr(&info, (addr - PT_F2)/16 + 2, (addr & 8) != 0, data, write_access); else if (addr >= PT_F16 && addr < PT_F31 + 16) return access_fr(&info, (addr - PT_F16)/16 + 16, (addr & 8) != 0, data, write_access); else { dprintk("ptrace: rejecting access to register address 0x%lx\n", addr); return -1; } } } else if (addr < PT_F9+16) { /* scratch state */ switch (addr) { case PT_AR_BSP: /* * By convention, we use PT_AR_BSP to refer to the end of the user-level * backing store. Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) to get * the real value of ar.bsp at the time the kernel was entered. */ urbs_end = ia64_get_user_rbs_end(child, pt, NULL); if (write_access) { if (*data != urbs_end) { if (ia64_sync_user_rbs(child, sw, pt->ar_bspstore, urbs_end) < 0) return -1; /* simulate user-level write of ar.bsp: */ pt->loadrs = 0; pt->ar_bspstore = *data; } } else *data = urbs_end; return 0; case PT_CFM: if ((long) pt->cr_ifs < 0) { if (write_access) pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL) | (*data & 0x3fffffffffUL)); else *data = pt->cr_ifs & 0x3fffffffffUL; } else { /* kernel was entered through a system call */ unsigned long cfm; unw_init_from_blocked_task(&info, child); if (unw_unwind_to_user(&info) < 0) return -1; unw_get_cfm(&info, &cfm); if (write_access) unw_set_cfm(&info, ((cfm & ~0x3fffffffffU) | (*data & 0x3fffffffffUL))); else *data = cfm; } return 0; case PT_CR_IPSR: if (write_access) pt->cr_ipsr = ((*data & IPSR_WRITE_MASK) | (pt->cr_ipsr & ~IPSR_WRITE_MASK)); else *data = (pt->cr_ipsr & IPSR_READ_MASK); return 0; case PT_AR_RNAT: urbs_end = ia64_get_user_rbs_end(child, pt, NULL); rnat_addr = (long) ia64_rse_rnat_addr((long *) urbs_end); if (write_access) return ia64_poke(child, sw, urbs_end, rnat_addr, *data); else return ia64_peek(child, sw, urbs_end, rnat_addr, data); case PT_R1: case PT_R2: case PT_R3: case PT_R8: case PT_R9: case PT_R10: case PT_R11: case PT_R12: case PT_R13: case PT_R14: case PT_R15: case PT_R16: case PT_R17: case PT_R18: case PT_R19: case PT_R20: case PT_R21: case PT_R22: case PT_R23: case PT_R24: case PT_R25: case PT_R26: case PT_R27: case PT_R28: case PT_R29: case PT_R30: case PT_R31: case PT_B0: case PT_B6: case PT_B7: case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8: case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8: case PT_AR_BSPSTORE: case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS: case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR: /* scratch register */ ptr = (unsigned long *) ((long) pt + addr - PT_CR_IPSR); break; default: /* disallow accessing anything else... */ dprintk("ptrace: rejecting access to register address 0x%lx\n", addr); return -1; } } else { /* access debug registers */ if (addr >= PT_IBR) { regnum = (addr - PT_IBR) >> 3; ptr = &child->thread.ibr[0]; } else { regnum = (addr - PT_DBR) >> 3; ptr = &child->thread.dbr[0]; } if (regnum >= 8) { dprintk("ptrace: rejecting access to register address 0x%lx\n", addr); return -1; }#ifdef CONFIG_PERFMON /* * Check if debug registers are used by perfmon. This test must be done * once we know that we can do the operation, i.e. the arguments are all * valid, but before we start modifying the state. * * Perfmon needs to keep a count of how many processes are trying to * modify the debug registers for system wide monitoring sessions. * * We also include read access here, because they may cause the * PMU-installed debug register state (dbr[], ibr[]) to be reset. The two * arrays are also used by perfmon, but we do not use * IA64_THREAD_DBG_VALID. The registers are restored by the PMU context * switch code. */ if (pfm_use_debug_registers(child)) return -1;#endif if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { child->thread.flags |= IA64_THREAD_DBG_VALID; memset(child->thread.dbr, 0, sizeof(child->thread.dbr)); memset(child->thread.ibr, 0, sizeof(child->thread.ibr)); } ptr += regnum; if (write_access) /* don't let the user set kernel-level breakpoints... */ *ptr = *data & ~(7UL << 56); else *data = *ptr; return 0; } if (write_access) *ptr = *data; else *data = *ptr; return 0;}static longptrace_getregs (struct task_struct *child, struct pt_all_user_regs *ppr){ struct switch_stack *sw; struct pt_regs *pt; long ret, retval; struct unw_frame_info info; char nat = 0; int i; retval = verify_area(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)); if (retval != 0) { return -EIO; } pt = ia64_task_regs(child); sw = (struct switch_stack *) (child->thread.ksp + 16); unw_init_from_blocked_task(&info, child); if (unw_unwind_to_user(&info) < 0) { return -EIO; } if (((unsigned long) ppr & 0x7) != 0) { dprintk("ptrace:unaligned register address %p\n", ppr); return -EIO; } retval = 0; /* control regs */ retval |= __put_user(pt->cr_iip, &ppr->cr_iip); retval |= access_uarea(child, PT_CR_IPSR, &ppr->cr_ipsr, 0); /* app regs */ retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]); retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); retval |= access_uarea(child, PT_AR_EC, &ppr->ar[PT_AUR_EC], 0); retval |= access_uarea(child, PT_AR_LC, &ppr->ar[PT_AUR_LC], 0); retval |= access_uarea(child, PT_AR_RNAT, &ppr->ar[PT_AUR_RNAT], 0); retval |= access_uarea(child, PT_AR_BSP, &ppr->ar[PT_AUR_BSP], 0); retval |= access_uarea(child, PT_CFM, &ppr->cfm, 0); /* gr1-gr3 */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -