⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 process-linux-xen.c

📁 xen 3.2.2 源码
💻 C
📖 第 1 页 / 共 2 页
字号:
#endif	stack = ((struct switch_stack *) regs) - 1;	child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1;	child_stack = (struct switch_stack *) child_ptregs - 1;	/* copy parent's switch_stack & pt_regs to child: */	memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack));	rbs = (unsigned long) current + IA64_RBS_OFFSET;	child_rbs = (unsigned long) p + IA64_RBS_OFFSET;	rbs_size = stack->ar_bspstore - rbs;	/* copy the parent's register backing store to the child: */	memcpy((void *) child_rbs, (void *) rbs, rbs_size);	if (likely(user_mode(child_ptregs))) {		if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs))			child_ptregs->r13 = regs->r16;	/* see sys_clone2() in entry.S */		if (user_stack_base) {			child_ptregs->r12 = user_stack_base + user_stack_size - 16;			child_ptregs->ar_bspstore = user_stack_base;			child_ptregs->ar_rnat = 0;			child_ptregs->loadrs = 0;		}	} else {		/*		 * Note: we simply preserve the relative position of		 * the stack pointer here.  There is no need to		 * allocate a scratch area here, since that will have		 * been taken care of by the caller of sys_clone()		 * already.		 */		child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */		child_ptregs->r13 = (unsigned long) p;		/* set `current' pointer */	}	child_stack->ar_bspstore = child_rbs + rbs_size;	if (IS_IA32_PROCESS(regs))		child_stack->b0 = (unsigned long) &ia32_ret_from_clone;	else		child_stack->b0 = (unsigned long) &ia64_ret_from_clone;	/* copy parts of thread_struct: */	p->thread.ksp = (unsigned long) child_stack - 16;	/* stop some PSR bits from being inherited.	 * the psr.up/psr.pp bits must be cleared on fork but inherited on execve()	 * therefore we must specify them explicitly here and not include them in	 * IA64_PSR_BITS_TO_CLEAR.	 */	child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)				 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));	/*	 * NOTE: The calling convention considers all floating point	 * registers in the high partition (fph) to be scratch.  Since	 * the only way to get to this point is through a system call,	 * we know that the values in fph are all dead.  Hence, there	 * is no need to inherit the fph state from the parent to the	 * child and all we have to do is to make sure that	 * IA64_THREAD_FPH_VALID is cleared in the child.	 *	 * XXX We could push this optimization a bit further by	 * clearing IA64_THREAD_FPH_VALID on ANY system call.	 * However, it's not clear this is worth doing.  Also, it	 * would be a slight deviation from the normal Linux system	 * call behavior where scratch registers are preserved across	 * system calls (unless used by the system call itself).	 */#	define THREAD_FLAGS_TO_CLEAR	(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \					 | IA64_THREAD_PM_VALID)#	define THREAD_FLAGS_TO_SET	0	p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)			   | THREAD_FLAGS_TO_SET);	ia64_drop_fpu(p);	/* don't pick up stale state from a CPU's fph */#ifdef CONFIG_IA32_SUPPORT	/*	 * If we're cloning an IA32 task then save the IA32 extra	 * state from the current task to the new task	 */	if (IS_IA32_PROCESS(ia64_task_regs(current))) {		ia32_save_state(p);		if (clone_flags & CLONE_SETTLS)			retval = ia32_clone_tls(p, child_ptregs);		/* Copy partially mapped page list */		if (!retval)			retval = ia32_copy_partial_page_list(p, clone_flags);	}#endif#ifdef CONFIG_PERFMON	if (current->thread.pfm_context)		pfm_inherit(p, child_ptregs);#endif	return retval;}#endif /* !XEN */static voiddo_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg){	unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm;	elf_greg_t *dst = arg;	struct pt_regs *pt;	char nat;	int i;	memset(dst, 0, sizeof(elf_gregset_t));	/* don't leak any kernel bits to user-level */	if (unw_unwind_to_user(info) < 0)		return;	unw_get_sp(info, &sp);	pt = (struct pt_regs *) (sp + 16);#ifndef XEN	/* FIXME: Is this needed by XEN when it makes its crash notes	 * during kdump? */	urbs_end = ia64_get_user_rbs_end(task, pt, &cfm);	if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0)		return;	ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end),		  &ar_rnat);#else	ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((u64 *) urbs_end),		  (long *)&ar_rnat);#endif	/*	 * coredump format:	 *	r0-r31	 *	NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)	 *	predicate registers (p0-p63)	 *	b0-b7	 *	ip cfm user-mask	 *	ar.rsc ar.bsp ar.bspstore ar.rnat	 *	ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec	 */	/* r0 is zero */	for (i = 1, mask = (1UL << i); i < 32; ++i) {		unw_get_gr(info, i, &dst[i], &nat);		if (nat)			nat_bits |= mask;		mask <<= 1;	}	dst[32] = nat_bits;	unw_get_pr(info, &dst[33]);	for (i = 0; i < 8; ++i)		unw_get_br(info, i, &dst[34 + i]);	unw_get_rp(info, &ip);	dst[42] = ip + ia64_psr(pt)->ri;	dst[43] = cfm;	dst[44] = pt->cr_ipsr & IA64_PSR_UM;	unw_get_ar(info, UNW_AR_RSC, &dst[45]);	/*	 * For bsp and bspstore, unw_get_ar() would return the kernel	 * addresses, but we need the user-level addresses instead:	 */	dst[46] = urbs_end;	/* note: by convention PT_AR_BSP points to the end of the urbs! */	dst[47] = pt->ar_bspstore;	dst[48] = ar_rnat;	unw_get_ar(info, UNW_AR_CCV, &dst[49]);	unw_get_ar(info, UNW_AR_UNAT, &dst[50]);	unw_get_ar(info, UNW_AR_FPSR, &dst[51]);	dst[52] = pt->ar_pfs;	/* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */	unw_get_ar(info, UNW_AR_LC, &dst[53]);	unw_get_ar(info, UNW_AR_EC, &dst[54]);	unw_get_ar(info, UNW_AR_CSD, &dst[55]);	unw_get_ar(info, UNW_AR_SSD, &dst[56]);}#ifndef XENvoiddo_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg){	elf_fpreg_t *dst = arg;	int i;	memset(dst, 0, sizeof(elf_fpregset_t));	/* don't leak any "random" bits */	if (unw_unwind_to_user(info) < 0)		return;	/* f0 is 0.0, f1 is 1.0 */	for (i = 2; i < 32; ++i)		unw_get_fr(info, i, dst + i);	ia64_flush_fph(task);	if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0)		memcpy(dst + 32, task->thread.fph, 96*16);}#endif /* !XEN */voiddo_copy_regs (struct unw_frame_info *info, void *arg){	do_copy_task_regs(current, info, arg);}#ifndef XENvoiddo_dump_fpu (struct unw_frame_info *info, void *arg){	do_dump_task_fpu(current, info, arg);}intdump_task_regs(struct task_struct *task, elf_gregset_t *regs){	struct unw_frame_info tcore_info;	if (current == task) {		unw_init_running(do_copy_regs, regs);	} else {		memset(&tcore_info, 0, sizeof(tcore_info));		unw_init_from_blocked_task(&tcore_info, task);		do_copy_task_regs(task, &tcore_info, regs);	}	return 1;}#endif /* !XEN */voidia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst){	unw_init_running(do_copy_regs, dst);}#ifndef XENintdump_task_fpu (struct task_struct *task, elf_fpregset_t *dst){	struct unw_frame_info tcore_info;	if (current == task) {		unw_init_running(do_dump_fpu, dst);	} else {		memset(&tcore_info, 0, sizeof(tcore_info));		unw_init_from_blocked_task(&tcore_info, task);		do_dump_task_fpu(task, &tcore_info, dst);	}	return 1;}intdump_fpu (struct pt_regs *pt, elf_fpregset_t dst){	unw_init_running(do_dump_fpu, dst);	return 1;	/* f0-f31 are always valid so we always return 1 */}longsys_execve (char __user *filename, char __user * __user *argv, char __user * __user *envp,	    struct pt_regs *regs){	char *fname;	int error;	fname = getname(filename);	error = PTR_ERR(fname);	if (IS_ERR(fname))		goto out;	error = do_execve(fname, argv, envp, regs);	putname(fname);out:	return error;}pid_tkernel_thread (int (*fn)(void *), void *arg, unsigned long flags){	extern void start_kernel_thread (void);	unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread;	struct {		struct switch_stack sw;		struct pt_regs pt;	} regs;	memset(&regs, 0, sizeof(regs));	regs.pt.cr_iip = helper_fptr[0];	/* set entry point (IP) */	regs.pt.r1 = helper_fptr[1];		/* set GP */	regs.pt.r9 = (unsigned long) fn;	/* 1st argument */	regs.pt.r11 = (unsigned long) arg;	/* 2nd argument */	/* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read.  */	regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN;	regs.pt.cr_ifs = 1UL << 63;		/* mark as valid, empty frame */	regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR);	regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET;	regs.sw.pr = (1 << PRED_KERNEL_STACK);	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs.pt, 0, NULL, NULL);}EXPORT_SYMBOL(kernel_thread);/* This gets called from kernel_thread() via ia64_invoke_thread_helper().  */intkernel_thread_helper (int (*fn)(void *), void *arg){#ifdef CONFIG_IA32_SUPPORT	if (IS_IA32_PROCESS(ia64_task_regs(current))) {		/* A kernel thread is always a 64-bit process. */		current->thread.map_base  = DEFAULT_MAP_BASE;		current->thread.task_size = DEFAULT_TASK_SIZE;		ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);		ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);	}#endif	return (*fn)(arg);}/* * Flush thread state.  This is called when a thread does an execve(). */voidflush_thread (void){	/*	 * Remove function-return probe instances associated with this task	 * and put them back on the free list. Do not insert an exit probe for	 * this function, it will be disabled by kprobe_flush_task if you do.	 */	kprobe_flush_task(current);	/* drop floating-point and debug-register state if it exists: */	current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);	ia64_drop_fpu(current);	if (IS_IA32_PROCESS(ia64_task_regs(current)))		ia32_drop_partial_page_list(current);}/* * Clean up state associated with current thread.  This is called when * the thread calls exit(). */voidexit_thread (void){	/*	 * Remove function-return probe instances associated with this task	 * and put them back on the free list. Do not insert an exit probe for	 * this function, it will be disabled by kprobe_flush_task if you do.	 */	kprobe_flush_task(current);	ia64_drop_fpu(current);#ifdef CONFIG_PERFMON       /* if needed, stop monitoring and flush state to perfmon context */	if (current->thread.pfm_context)		pfm_exit_thread(current);	/* free debug register resources */	if (current->thread.flags & IA64_THREAD_DBG_VALID)		pfm_release_debug_registers(current);#endif	if (IS_IA32_PROCESS(ia64_task_regs(current)))		ia32_drop_partial_page_list(current);}unsigned longget_wchan (struct task_struct *p){	struct unw_frame_info info;	unsigned long ip;	int count = 0;	/*	 * Note: p may not be a blocked task (it could be current or	 * another process running on some other CPU.  Rather than	 * trying to determine if p is really blocked, we just assume	 * it's blocked and rely on the unwind routines to fail	 * gracefully if the process wasn't really blocked after all.	 * --davidm 99/12/15	 */	unw_init_from_blocked_task(&info, p);	do {		if (unw_unwind(&info) < 0)			return 0;		unw_get_ip(&info, &ip);		if (!in_sched_functions(ip))			return ip;	} while (count++ < 16);	return 0;}#endif // !XENvoidcpu_halt (void){	pal_power_mgmt_info_u_t power_info[8];	unsigned long min_power;	int i, min_power_state;	if (ia64_pal_halt_info(power_info) != 0)		return;	min_power_state = 0;	min_power = power_info[0].pal_power_mgmt_info_s.power_consumption;	for (i = 1; i < 8; ++i)		if (power_info[i].pal_power_mgmt_info_s.im		    && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) {			min_power = power_info[i].pal_power_mgmt_info_s.power_consumption;			min_power_state = i;		}	while (1)		ia64_pal_halt(min_power_state);}#ifndef XENvoidmachine_restart (char *restart_cmd){	(*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL);}voidmachine_halt (void){	cpu_halt();}voidmachine_power_off (void){	if (pm_power_off)		pm_power_off();	machine_halt();}#endif // !XEN

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -