📄 ptrace.c
字号:
*/longia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, unsigned long user_rbs_start, unsigned long user_rbs_end){ unsigned long addr, val; long ret; /* now copy word for word from kernel rbs to user rbs: */ for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { ret = ia64_peek(child, sw, user_rbs_end, addr, &val); if (ret < 0) return ret; if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) return -EIO; } return 0;}/* * Simulate user-level "flushrs". Note: we can't just add pt->loadrs>>16 to * pt->ar_bspstore because the kernel backing store and the user-level backing store may * have different alignments (and therefore a different number of intervening rnat slots). */static voiduser_flushrs (struct task_struct *task, struct pt_regs *pt){ unsigned long *krbs; long ndirty; krbs = (unsigned long *) task + IA64_RBS_OFFSET/8; ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); pt->ar_bspstore = (unsigned long) ia64_rse_skip_regs((unsigned long *) pt->ar_bspstore, ndirty); pt->loadrs = 0;}static inline voidsync_user_rbs_one_thread (struct task_struct *p, int make_writable){ struct switch_stack *sw; unsigned long urbs_end; struct pt_regs *pt; sw = (struct switch_stack *) (p->thread.ksp + 16); pt = ia64_task_regs(p); urbs_end = ia64_get_user_rbs_end(p, pt, NULL); ia64_sync_user_rbs(p, sw, pt->ar_bspstore, urbs_end); if (make_writable) user_flushrs(p, pt);}struct task_list { struct task_list *next; struct task_struct *task;};#ifdef CONFIG_SMPstatic inline voidcollect_task (struct task_list **listp, struct task_struct *p, int make_writable){ struct task_list *e; e = kmalloc(sizeof(*e), GFP_KERNEL); if (!e) /* oops, can't collect more: finish at least what we collected so far... */ return; get_task_struct(p); e->task = p; e->next = *listp; *listp = e;}static inline struct task_list *finish_task (struct task_list *list, int make_writable){ struct task_list *next = list->next; sync_user_rbs_one_thread(list->task, make_writable); free_task_struct(list->task); kfree(list); return next;}#else# define collect_task(list, p, make_writable) sync_user_rbs_one_thread(p, make_writable)# define finish_task(list, make_writable) (NULL)#endif/* * Synchronize the RSE backing store of CHILD and all tasks that share the address space * with it. CHILD_URBS_END is the address of the end of the register backing store of * CHILD. If MAKE_WRITABLE is set, a user-level "flushrs" is simulated such that the VM * can be written via ptrace() and the tasks will pick up the newly written values. It * would be OK to unconditionally simulate a "flushrs", but this would be more intrusive * than strictly necessary (e.g., it would make it impossible to obtain the original value * of ar.bspstore). */static voidthreads_sync_user_rbs (struct task_struct *child, unsigned long child_urbs_end, int make_writable){ struct switch_stack *sw; struct task_struct *p; struct mm_struct *mm; struct pt_regs *pt; long multi_threaded; task_lock(child); { mm = child->mm; multi_threaded = mm && (atomic_read(&mm->mm_users) > 1); } task_unlock(child); if (!multi_threaded) { sw = (struct switch_stack *) (child->thread.ksp + 16); pt = ia64_task_regs(child); ia64_sync_user_rbs(child, sw, pt->ar_bspstore, child_urbs_end); if (make_writable) user_flushrs(child, pt); } else { /* * Note: we can't call ia64_sync_user_rbs() while holding the * tasklist_lock because that may cause a dead-lock: ia64_sync_user_rbs() * may indirectly call tlb_flush_all(), which triggers an IPI. * Furthermore, tasklist_lock is acquired by fork() with interrupts * disabled, so with the right timing, the IPI never completes, hence * tasklist_lock never gets released, hence fork() never completes... */ struct task_list *list = NULL; read_lock(&tasklist_lock); { for_each_task(p) { if (p->mm == mm && p->state != TASK_RUNNING) collect_task(&list, p, make_writable); } } read_unlock(&tasklist_lock); while (list) list = finish_task(list, make_writable); } child->thread.flags |= IA64_THREAD_KRBS_SYNCED; /* set the flag in the child thread only */}/* * Write f32-f127 back to task->thread.fph if it has been modified. */inline voidia64_flush_fph (struct task_struct *task){ struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); if (ia64_is_local_fpu_owner(task) && psr->mfh) { psr->mfh = 0; task->thread.flags |= IA64_THREAD_FPH_VALID; ia64_save_fpu(&task->thread.fph[0]); }}/* * Sync the fph state of the task so that it can be manipulated * through thread.fph. If necessary, f32-f127 are written back to * thread.fph or, if the fph state hasn't been used before, thread.fph * is cleared to zeroes. Also, access to f32-f127 is disabled to * ensure that the task picks up the state from thread.fph when it * executes again. */voidia64_sync_fph (struct task_struct *task){ struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); ia64_flush_fph(task); if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { task->thread.flags |= IA64_THREAD_FPH_VALID; memset(&task->thread.fph, 0, sizeof(task->thread.fph)); } ia64_drop_fpu(task); psr->dfh = 1;}static intaccess_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data, int write_access){ struct ia64_fpreg fpval; int ret; ret = unw_get_fr(info, regnum, &fpval); if (ret < 0) return ret; if (write_access) { fpval.u.bits[hi] = *data; ret = unw_set_fr(info, regnum, fpval); } else *data = fpval.u.bits[hi]; return ret;}static intaccess_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access){ unsigned long *ptr, regnum, urbs_end, rnat_addr; struct switch_stack *sw; struct unw_frame_info info; struct pt_regs *pt; pt = ia64_task_regs(child); sw = (struct switch_stack *) (child->thread.ksp + 16); if ((addr & 0x7) != 0) { dprintk("ptrace: unaligned register address 0x%lx\n", addr); return -1; } if (addr < PT_F127 + 16) { /* accessing fph */ if (write_access) ia64_sync_fph(child); else ia64_flush_fph(child); ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr); } else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) { /* scratch registers untouched by kernel (saved in pt_regs) */ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, f10) + addr - PT_F10); } else if (addr >= PT_F12 && addr < PT_F15 + 16) { /* scratch registers untouched by kernel (saved in switch_stack) */ ptr = (unsigned long *) ((long) sw + (addr - PT_NAT_BITS - 32)); } else if (addr < PT_AR_LC + 8) { /* preserved state: */ unsigned long nat_bits, scratch_unat, dummy = 0; struct unw_frame_info info; char nat = 0; int ret; unw_init_from_blocked_task(&info, child); if (unw_unwind_to_user(&info) < 0) return -1; switch (addr) { case PT_NAT_BITS: if (write_access) { nat_bits = *data; scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits); if (unw_set_ar(&info, UNW_AR_UNAT, scratch_unat) < 0) { dprintk("ptrace: failed to set ar.unat\n"); return -1; } for (regnum = 4; regnum <= 7; ++regnum) { unw_get_gr(&info, regnum, &dummy, &nat); unw_set_gr(&info, regnum, dummy, (nat_bits >> regnum) & 1); } } else { if (unw_get_ar(&info, UNW_AR_UNAT, &scratch_unat) < 0) { dprintk("ptrace: failed to read ar.unat\n"); return -1; } nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat); for (regnum = 4; regnum <= 7; ++regnum) { unw_get_gr(&info, regnum, &dummy, &nat); nat_bits |= (nat != 0) << regnum; } *data = nat_bits; } return 0; case PT_R4: case PT_R5: case PT_R6: case PT_R7: if (write_access) { /* read NaT bit first: */ unsigned long dummy; ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, &dummy, &nat); if (ret < 0) return ret; } return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, &nat, write_access); case PT_B1: case PT_B2: case PT_B3: case PT_B4: case PT_B5: return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, write_access); case PT_AR_EC: return unw_access_ar(&info, UNW_AR_EC, data, write_access); case PT_AR_LC: return unw_access_ar(&info, UNW_AR_LC, data, write_access); default: if (addr >= PT_F2 && addr < PT_F5 + 16) return access_fr(&info, (addr - PT_F2)/16 + 2, (addr & 8) != 0, data, write_access); else if (addr >= PT_F16 && addr < PT_F31 + 16) return access_fr(&info, (addr - PT_F16)/16 + 16, (addr & 8) != 0, data, write_access); else { dprintk("ptrace: rejecting access to register address 0x%lx\n", addr); return -1; } } } else if (addr < PT_F9+16) { /* scratch state */ switch (addr) { case PT_AR_BSP: /* * By convention, we use PT_AR_BSP to refer to the end of the user-level * backing store. Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) to get * the real value of ar.bsp at the time the kernel was entered. */ urbs_end = ia64_get_user_rbs_end(child, pt, NULL); if (write_access) { if (*data != urbs_end) { if (ia64_sync_user_rbs(child, sw, pt->ar_bspstore, urbs_end) < 0) return -1; /* simulate user-level write of ar.bsp: */ pt->loadrs = 0; pt->ar_bspstore = *data; } } else *data = urbs_end; return 0; case PT_CFM: if ((long) pt->cr_ifs < 0) { if (write_access) pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL) | (*data & 0x3fffffffffUL)); else *data = pt->cr_ifs & 0x3fffffffffUL; } else { /* kernel was entered through a system call */ unsigned long cfm; unw_init_from_blocked_task(&info, child); if (unw_unwind_to_user(&info) < 0) return -1; unw_get_cfm(&info, &cfm); if (write_access) unw_set_cfm(&info, ((cfm & ~0x3fffffffffU) | (*data & 0x3fffffffffUL))); else *data = cfm; } return 0; case PT_CR_IPSR: if (write_access) pt->cr_ipsr = ((*data & IPSR_WRITE_MASK) | (pt->cr_ipsr & ~IPSR_WRITE_MASK)); else *data = (pt->cr_ipsr & IPSR_READ_MASK); return 0; case PT_AR_RNAT: urbs_end = ia64_get_user_rbs_end(child, pt, NULL); rnat_addr = (long) ia64_rse_rnat_addr((long *) urbs_end); if (write_access) return ia64_poke(child, sw, urbs_end, rnat_addr, *data); else return ia64_peek(child, sw, urbs_end, rnat_addr, data); case PT_R1: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r1)); break; case PT_R2: case PT_R3: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r2) + addr - PT_R2); break; case PT_R8: case PT_R9: case PT_R10: case PT_R11: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r8)+ addr - PT_R8); break; case PT_R12: case PT_R13: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r12)+ addr - PT_R12); break; case PT_R14: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r14)); break; case PT_R15: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r15)); break; case PT_R16: case PT_R17: case PT_R18: case PT_R19: case PT_R20: case PT_R21: case PT_R22: case PT_R23: case PT_R24: case PT_R25: case PT_R26: case PT_R27: case PT_R28: case PT_R29: case PT_R30: case PT_R31: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r16) + addr - PT_R16); break; case PT_B0: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b0)); break; case PT_B6: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b6)); break; case PT_B7: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b7)); break; case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8: case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, f6) + addr - PT_F6); break; case PT_AR_BSPSTORE: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_bspstore)); break; case PT_AR_RSC: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_rsc)); break; case PT_AR_UNAT: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_unat)); break; case PT_AR_PFS: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_pfs)); break; case PT_AR_CCV: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_ccv)); break; case PT_AR_FPSR: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_fpsr)); break; case PT_CR_IIP: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, cr_iip)); break; case PT_PR: ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, pr)); break; /* scratch register */ default: /* disallow accessing anything else... */ dprintk("ptrace: rejecting access to register address 0x%lx\n", addr); return -1; } } else if (addr <= PT_AR_SSD) { ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_csd) + addr - PT_AR_CSD); } else { /* access debug registers */ if (addr >= PT_IBR) { regnum = (addr - PT_IBR) >> 3; ptr = &child->thread.ibr[0]; } else { regnum = (addr - PT_DBR) >> 3; ptr = &child->thread.dbr[0]; } if (regnum >= 8) { dprintk("ptrace: rejecting access to register address 0x%lx\n", addr); return -1; }#ifdef CONFIG_PERFMON /* * Check if debug registers are used by perfmon. This test must be done * once we know that we can do the operation, i.e. the arguments are all * valid, but before we start modifying the state. * * Perfmon needs to keep a count of how many processes are trying to * modify the debug registers for system wide monitoring sessions. * * We also include read access here, because they may cause the * PMU-installed debug register state (dbr[], ibr[]) to be reset. The two * arrays are also used by perfmon, but we do not use * IA64_THREAD_DBG_VALID. The registers are restored by the PMU context * switch code. */ if (pfm_use_debug_registers(child)) return -1;#endif if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { child->thread.flags |= IA64_THREAD_DBG_VALID; memset(child->thread.dbr, 0, sizeof(child->thread.dbr)); memset(child->thread.ibr, 0, sizeof(child->thread.ibr)); } ptr += regnum; if (write_access) /* don't let the user set kernel-level breakpoints... */ *ptr = *data & ~(7UL << 56); else *data = *ptr; return 0; } if (write_access) *ptr = *data; else *data = *ptr; return 0;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -