⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ptrace.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
			regnum = ia64_rse_num_regs(bspstore, laddr);			laddr = ia64_rse_skip_regs(krbs, regnum);			if (regnum < krbs_num_regs) {				*laddr = val;			}		}	} else if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) {		return -EIO;	}	return 0;}/* * Synchronize (i.e, write) the RSE backing store living in kernel * space to the VM of the indicated child process. * * If new_bsp is non-zero, the bsp will (effectively) be updated to * the new value upon resumption of the child process.  This is * accomplished by setting the loadrs value to zero and the bspstore * value to the new bsp value. * * When new_bsp and force_loadrs_to_zero are both 0, the register * backing store in kernel space is written to user space and the * loadrs and bspstore values are left alone. * * When new_bsp is zero and force_loadrs_to_zero is 1 (non-zero), * loadrs is set to 0, and the bspstore value is set to the old bsp * value.  This will cause the stacked registers (r32 and up) to be * obtained entirely from the the child's memory space rather than * from the kernel.  (This makes it easier to write code for * modifying the stacked registers in multi-threaded programs.) * * Note:  I had originally written this function without the * force_loadrs_to_zero parameter; it was written so that loadrs would * always be set to zero.  But I had problems with certain system * calls apparently causing a portion of the RBS to be zeroed.  (I * still don't understand why this was happening.) Anyway, it'd * definitely less intrusive to leave loadrs and bspstore alone if * possible. */static longsync_kernel_register_backing_store (struct task_struct *child,                                    long new_bsp,                                    int force_loadrs_to_zero){	unsigned long *krbs, bspstore, *kbspstore, bsp, rbs_end, addr, val;	long ndirty, ret = 0;	struct pt_regs *child_regs = ia64_task_regs(child);#ifdef CONFIG_IA64_NEW_UNWIND	struct unw_frame_info info;	unsigned long cfm, sof;	unw_init_from_blocked_task(&info, child);	if (unw_unwind_to_user(&info) < 0)		return -1;	unw_get_bsp(&info, (unsigned long *) &kbspstore);	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;	ndirty = ia64_rse_num_regs(krbs, krbs + (child_regs->loadrs >> 19));	bspstore = child_regs->ar_bspstore;	bsp = (long) ia64_rse_skip_regs((long *)bspstore, ndirty);	cfm = child_regs->cr_ifs;	if (!(cfm & (1UL << 63)))		unw_get_cfm(&info, &cfm);	sof = (cfm & 0x7f);	rbs_end = (long) ia64_rse_skip_regs((long *)bspstore, sof);#else	struct switch_stack *child_stack;	unsigned long krbs_num_regs;	child_stack = (struct switch_stack *) child_regs - 1;	kbspstore = (unsigned long *) child_stack->ar_bspstore;	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;	ndirty = ia64_rse_num_regs(krbs, krbs + (child_regs->loadrs >> 19));	bspstore = child_regs->ar_bspstore;	bsp = (long) ia64_rse_skip_regs((long *)bspstore, ndirty);	krbs_num_regs = ia64_rse_num_regs(krbs, kbspstore);	rbs_end = (long) ia64_rse_skip_regs((long *)bspstore, krbs_num_regs);#endif	/* Return early if nothing to do */	if (bsp == new_bsp)		return 0;	/* Write portion of backing store living on kernel stack to the child's VM. */	for (addr = bspstore; addr < rbs_end; addr += 8) {		ret = ia64_peek(child_regs, child, addr, &val);		if (ret != 0)			return ret;		if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))			return -EIO;	}	if (new_bsp != 0) {		force_loadrs_to_zero = 1;		bsp = new_bsp;	}	if (force_loadrs_to_zero) {		child_regs->loadrs = 0;		child_regs->ar_bspstore = bsp;	}	return ret;}static voidsync_thread_rbs (struct task_struct *child, struct mm_struct *mm, int make_writable){	struct task_struct *p;	read_lock(&tasklist_lock);	{		for_each_task(p) {			if (p->mm == mm && p->state != TASK_RUNNING)				sync_kernel_register_backing_store(p, 0, make_writable);		}	}	read_unlock(&tasklist_lock);	child->thread.flags |= IA64_THREAD_KRBS_SYNCED;}/* * Write f32-f127 back to task->thread.fph if it has been modified. */inline voidia64_flush_fph (struct task_struct *task){	struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));#ifdef CONFIG_SMP	struct task_struct *fpu_owner = current;#else	struct task_struct *fpu_owner = ia64_get_fpu_owner();#endif	if (task == fpu_owner && psr->mfh) {		psr->mfh = 0;		ia64_save_fpu(&task->thread.fph[0]);		task->thread.flags |= IA64_THREAD_FPH_VALID;	}}/* * Sync the fph state of the task so that it can be manipulated * through thread.fph.  If necessary, f32-f127 are written back to * thread.fph or, if the fph state hasn't been used before, thread.fph * is cleared to zeroes.  Also, access to f32-f127 is disabled to * ensure that the task picks up the state from thread.fph when it * executes again. */voidia64_sync_fph (struct task_struct *task){	struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));	ia64_flush_fph(task);	if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {		task->thread.flags |= IA64_THREAD_FPH_VALID;		memset(&task->thread.fph, 0, sizeof(task->thread.fph));	}#ifndef CONFIG_SMP	if (ia64_get_fpu_owner() == task)		ia64_set_fpu_owner(0);#endif	psr->dfh = 1;}#ifdef CONFIG_IA64_NEW_UNWIND#include <asm/unwind.h>static intaccess_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data, int write_access){	struct ia64_fpreg fpval;	int ret;	ret = unw_get_fr(info, regnum, &fpval);	if (ret < 0)		return ret;	if (write_access) {		fpval.u.bits[hi] = *data;		ret = unw_set_fr(info, regnum, fpval);	} else		*data = fpval.u.bits[hi];	return ret;}static intaccess_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access){	unsigned long *ptr, *rbs, *bspstore, ndirty, regnum;	struct switch_stack *sw;	struct unw_frame_info info;	struct pt_regs *pt;	pt = ia64_task_regs(child);	sw = (struct switch_stack *) (child->thread.ksp + 16);	if ((addr & 0x7) != 0) {		dprintk("ptrace: unaligned register address 0x%lx\n", addr);		return -1;	}	if (addr < PT_F127 + 16) {		/* accessing fph */		if (write_access)			ia64_sync_fph(child);		else			ia64_flush_fph(child);		ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr);	} else if (addr >= PT_F10 && addr < PT_F15 + 16) {		/* scratch registers untouched by kernel (saved in switch_stack) */		ptr = (unsigned long *) ((long) sw + addr - PT_NAT_BITS);	} else if (addr < PT_AR_LC + 8) {		/* preserved state: */		unsigned long nat_bits, scratch_unat, dummy = 0;		struct unw_frame_info info;		char nat = 0;		int ret;		unw_init_from_blocked_task(&info, child);		if (unw_unwind_to_user(&info) < 0)			return -1;		switch (addr) {		      case PT_NAT_BITS:			if (write_access) {				nat_bits = *data;				scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);				if (unw_set_ar(&info, UNW_AR_UNAT, scratch_unat) < 0) {					dprintk("ptrace: failed to set ar.unat\n");					return -1;				}				for (regnum = 4; regnum <= 7; ++regnum) {					unw_get_gr(&info, regnum, &dummy, &nat);					unw_set_gr(&info, regnum, dummy, (nat_bits >> regnum) & 1);				}			} else {				if (unw_get_ar(&info, UNW_AR_UNAT, &scratch_unat) < 0) {					dprintk("ptrace: failed to read ar.unat\n");					return -1;				}				nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);				for (regnum = 4; regnum <= 7; ++regnum) {					unw_get_gr(&info, regnum, &dummy, &nat);					nat_bits |= (nat != 0) << regnum;				}				*data = nat_bits;			}			return 0;		      case PT_R4: case PT_R5: case PT_R6: case PT_R7:			if (write_access) {				/* read NaT bit first: */				ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, data, &nat);				if (ret < 0)					return ret;			}			return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, &nat,					     write_access);		      case PT_B1: case PT_B2: case PT_B3: case PT_B4: case PT_B5:			return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, write_access);		      case PT_AR_EC:			return unw_access_ar(&info, UNW_AR_EC, data, write_access);		      case PT_AR_LC:			return unw_access_ar(&info, UNW_AR_LC, data, write_access);		      default:			if (addr >= PT_F2 && addr < PT_F5 + 16)				return access_fr(&info, (addr - PT_F2)/16 + 2, (addr & 8) != 0,						 data, write_access);			else if (addr >= PT_F16 && addr < PT_F31 + 16)				return access_fr(&info, (addr - PT_F16)/16 + 16, (addr & 8) != 0,						 data, write_access);			else {				dprintk("ptrace: rejecting access to register address 0x%lx\n",					addr);				return -1;			}		}	} else if (addr < PT_F9+16) {		/* scratch state */		switch (addr) {		      case PT_AR_BSP:			if (write_access)				/* FIXME? Account for lack of ``cover'' in the syscall case */				return sync_kernel_register_backing_store(child, *data, 1);			else {				rbs = (unsigned long *) child + IA64_RBS_OFFSET/8;				bspstore = (unsigned long *) pt->ar_bspstore;				ndirty = ia64_rse_num_regs(rbs, rbs + (pt->loadrs >> 19));				/*				 * If we're in a system call, no ``cover'' was done.  So to				 * make things uniform, we'll add the appropriate displacement				 * onto bsp if we're in a system call.				 */				if (!(pt->cr_ifs & (1UL << 63))) {					struct unw_frame_info info;					unsigned long cfm;					unw_init_from_blocked_task(&info, child);					if (unw_unwind_to_user(&info) < 0)						return -1;					unw_get_cfm(&info, &cfm);					ndirty += cfm & 0x7f;				}				*data = (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);				return 0;			}		      case PT_CFM:			if (pt->cr_ifs & (1UL << 63)) {				if (write_access)					pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL)						      | (*data & 0x3fffffffffUL));				else					*data = pt->cr_ifs & 0x3fffffffffUL;			} else {				/* kernel was entered through a system call */				unsigned long cfm;				unw_init_from_blocked_task(&info, child);				if (unw_unwind_to_user(&info) < 0)					return -1;				unw_get_cfm(&info, &cfm);				if (write_access)					unw_set_cfm(&info, ((cfm & ~0x3fffffffffU)							    | (*data & 0x3fffffffffUL)));				else					*data = cfm;			}			return 0;		      case PT_CR_IPSR:			if (write_access)				pt->cr_ipsr = ((*data & IPSR_WRITE_MASK)					       | (pt->cr_ipsr & ~IPSR_WRITE_MASK));			else				*data = (pt->cr_ipsr & IPSR_READ_MASK);			return 0;		      		   case PT_R1:  case PT_R2:  case PT_R3:		      case PT_R8:  case PT_R9:  case PT_R10: case PT_R11:		      case PT_R12: case PT_R13: case PT_R14: case PT_R15:		      case PT_R16: case PT_R17: case PT_R18: case PT_R19:		      case PT_R20: case PT_R21: case PT_R22: case PT_R23:		      case PT_R24: case PT_R25: case PT_R26: case PT_R27:		      case PT_R28: case PT_R29: case PT_R30: case PT_R31:		      case PT_B0:  case PT_B6:  case PT_B7:		      case PT_F6:  case PT_F6+8: case PT_F7: case PT_F7+8:		      case PT_F8:  case PT_F8+8: case PT_F9: case PT_F9+8:		      case PT_AR_BSPSTORE:		      case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS: case PT_AR_RNAT:		      case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR:			/* scratch register */			ptr = (unsigned long *) ((long) pt + addr - PT_CR_IPSR);			break;		      default:			/* disallow accessing anything else... */			dprintk("ptrace: rejecting access to register address 0x%lx\n",				addr);			return -1;		}	} else {		/* access debug registers */		if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {			child->thread.flags |= IA64_THREAD_DBG_VALID;			memset(child->thread.dbr, 0, sizeof(child->thread.dbr));			memset(child->thread.ibr, 0, sizeof( child->thread.ibr));		}		if (addr >= PT_IBR) {			regnum = (addr - PT_IBR) >> 3;			ptr = &child->thread.ibr[0];		} else {			regnum = (addr - PT_DBR) >> 3;			ptr = &child->thread.dbr[0];		}		if (regnum >= 8) {			dprintk("ptrace: rejecting access to register address 0x%lx\n", addr);			return -1;		}		ptr += regnum;		if (write_access)			/* don't let the user set kernel-level breakpoints... */			*ptr = *data & ~(7UL << 56);		else			*data = *ptr;		return 0;	}	if (write_access)		*ptr = *data;	else		*data = *ptr;	return 0;}#else /* !CONFIG_IA64_NEW_UNWIND */static intaccess_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access){	unsigned long *ptr = NULL, *rbs, *bspstore, ndirty, regnum;	struct switch_stack *sw;	struct pt_regs *pt;	if ((addr & 0x7) != 0)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -