⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 process.c

📁 底层驱动开发
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  arch/ppc/kernel/process.c * *  Derived from "arch/i386/kernel/process.c" *    Copyright (C) 1995  Linus Torvalds * *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and *  Paul Mackerras (paulus@cs.anu.edu.au) * *  PowerPC version *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * *  This program is free software; you can redistribute it and/or *  modify it under the terms of the GNU General Public License *  as published by the Free Software Foundation; either version *  2 of the License, or (at your option) any later version. * */#include <linux/config.h>#include <linux/errno.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/mm.h>#include <linux/smp.h>#include <linux/smp_lock.h>#include <linux/stddef.h>#include <linux/unistd.h>#include <linux/ptrace.h>#include <linux/slab.h>#include <linux/user.h>#include <linux/elf.h>#include <linux/init.h>#include <linux/prctl.h>#include <linux/init_task.h>#include <linux/module.h>#include <linux/kallsyms.h>#include <linux/mqueue.h>#include <linux/hardirq.h>#include <asm/pgtable.h>#include <asm/uaccess.h>#include <asm/system.h>#include <asm/io.h>#include <asm/processor.h>#include <asm/mmu.h>#include <asm/prom.h>extern unsigned long _get_SP(void);struct task_struct *last_task_used_math = NULL;struct task_struct *last_task_used_altivec = NULL;struct task_struct *last_task_used_spe = NULL;static struct fs_struct init_fs = INIT_FS;static struct files_struct init_files = INIT_FILES;static struct signal_struct init_signals = INIT_SIGNALS(init_signals);static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);struct mm_struct init_mm = INIT_MM(init_mm);EXPORT_SYMBOL(init_mm);/* this is 8kB-aligned so we can get to the thread_info struct   at the base of it from the stack pointer with 1 integer instruction. */union thread_union init_thread_union	__attribute__((__section__(".data.init_task"))) ={ INIT_THREAD_INFO(init_task) };/* initial task structure */struct task_struct init_task = INIT_TASK(init_task);EXPORT_SYMBOL(init_task);/* only used to get secondary processor up */struct task_struct *current_set[NR_CPUS] = {&init_task, };#undef SHOW_TASK_SWITCHES#undef CHECK_STACK#if defined(CHECK_STACK)unsigned longkernel_stack_top(struct task_struct *tsk){	return ((unsigned long)tsk) + sizeof(union task_union);}unsigned longtask_top(struct task_struct *tsk){	return ((unsigned long)tsk) + sizeof(struct thread_info);}/* check to make sure the kernel stack is healthy */int check_stack(struct task_struct *tsk){	unsigned long stack_top = kernel_stack_top(tsk);	unsigned long tsk_top = task_top(tsk);	int ret = 0;#if 0	/* check thread magic */	if ( tsk->thread.magic != THREAD_MAGIC )	{		ret |= 1;		printk("thread.magic bad: %08x\n", tsk->thread.magic);	}#endif	if ( !tsk )		printk("check_stack(): tsk bad tsk %p\n",tsk);	/* check if stored ksp is bad */	if ( (tsk->thread.ksp > stack_top) || (tsk->thread.ksp < tsk_top) )	{		printk("stack out of bounds: %s/%d\n"		       " tsk_top %08lx ksp %08lx stack_top %08lx\n",		       tsk->comm,tsk->pid,		       tsk_top, tsk->thread.ksp, stack_top);		ret |= 2;	}	/* check if stack ptr RIGHT NOW is bad */	if ( (tsk == current) && ((_get_SP() > stack_top ) || (_get_SP() < tsk_top)) )	{		printk("current stack ptr out of bounds: %s/%d\n"		       " tsk_top %08lx sp %08lx stack_top %08lx\n",		       current->comm,current->pid,		       tsk_top, _get_SP(), stack_top);		ret |= 4;	}#if 0	/* check amount of free stack */	for ( i = (unsigned long *)task_top(tsk) ; i < kernel_stack_top(tsk) ; i++ )	{		if ( !i )			printk("check_stack(): i = %p\n", i);		if ( *i != 0 )		{			/* only notify if it's less than 900 bytes */			if ( (i - (unsigned long *)task_top(tsk))  < 900 )				printk("%d bytes free on stack\n",				       i - task_top(tsk));			break;		}	}#endif	if (ret)	{		panic("bad kernel stack");	}	return(ret);}#endif /* defined(CHECK_STACK) */#ifdef CONFIG_ALTIVECintdump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs){	if (regs->msr & MSR_VEC)		giveup_altivec(current);	memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));	return 1;}voidenable_kernel_altivec(void){	WARN_ON(preemptible());#ifdef CONFIG_SMP	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))		giveup_altivec(current);	else		giveup_altivec(NULL);	/* just enable AltiVec for kernel - force */#else	giveup_altivec(last_task_used_altivec);#endif /* __SMP __ */}EXPORT_SYMBOL(enable_kernel_altivec);#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_SPEintdump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs){	if (regs->msr & MSR_SPE)		giveup_spe(current);	/* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */	memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);	return 1;}voidenable_kernel_spe(void){	WARN_ON(preemptible());#ifdef CONFIG_SMP	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))		giveup_spe(current);	else		giveup_spe(NULL);	/* just enable SPE for kernel - force */#else	giveup_spe(last_task_used_spe);#endif /* __SMP __ */}EXPORT_SYMBOL(enable_kernel_spe);#endif /* CONFIG_SPE */voidenable_kernel_fp(void){	WARN_ON(preemptible());#ifdef CONFIG_SMP	if (current->thread.regs && (current->thread.regs->msr & MSR_FP))		giveup_fpu(current);	else		giveup_fpu(NULL);	/* just enables FP for kernel */#else	giveup_fpu(last_task_used_math);#endif /* CONFIG_SMP */}EXPORT_SYMBOL(enable_kernel_fp);intdump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs){	preempt_disable();	if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))		giveup_fpu(tsk);	preempt_enable();	memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));	return 1;}struct task_struct *__switch_to(struct task_struct *prev,	struct task_struct *new){	struct thread_struct *new_thread, *old_thread;	unsigned long s;	struct task_struct *last;	local_irq_save(s);#ifdef CHECK_STACK	check_stack(prev);	check_stack(new);#endif#ifdef CONFIG_SMP	/* avoid complexity of lazy save/restore of fpu	 * by just saving it every time we switch out if	 * this task used the fpu during the last quantum.	 *	 * If it tries to use the fpu again, it'll trap and	 * reload its fp regs.  So we don't have to do a restore	 * every switch, just a save.	 *  -- Cort	 */	if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))		giveup_fpu(prev);#ifdef CONFIG_ALTIVEC	/*	 * If the previous thread used altivec in the last quantum	 * (thus changing altivec regs) then save them.	 * We used to check the VRSAVE register but not all apps	 * set it, so we don't rely on it now (and in fact we need	 * to save & restore VSCR even if VRSAVE == 0).  -- paulus	 *	 * On SMP we always save/restore altivec regs just to avoid the	 * complexity of changing processors.	 *  -- Cort	 */	if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))		giveup_altivec(prev);#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_SPE	/*	 * If the previous thread used spe in the last quantum	 * (thus changing spe regs) then save them.	 *	 * On SMP we always save/restore spe regs just to avoid the	 * complexity of changing processors.	 */	if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))		giveup_spe(prev);#endif /* CONFIG_SPE */#endif /* CONFIG_SMP */	/* Avoid the trap.  On smp this this never happens since	 * we don't set last_task_used_altivec -- Cort	 */	if (new->thread.regs && last_task_used_altivec == new)		new->thread.regs->msr |= MSR_VEC;#ifdef CONFIG_SPE	/* Avoid the trap.  On smp this this never happens since	 * we don't set last_task_used_spe	 */	if (new->thread.regs && last_task_used_spe == new)		new->thread.regs->msr |= MSR_SPE;#endif /* CONFIG_SPE */	new_thread = &new->thread;	old_thread = &current->thread;	last = _switch(old_thread, new_thread);	local_irq_restore(s);	return last;}void show_regs(struct pt_regs * regs){	int i, trap;	printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx    %s\n",	       regs->nip, regs->link, regs->gpr[1], regs, regs->trap,	       print_tainted());	printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",	       regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,	       regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,	       regs->msr&MSR_IR ? 1 : 0,	       regs->msr&MSR_DR ? 1 : 0);	trap = TRAP(regs);	if (trap == 0x300 || trap == 0x600)		printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);	printk("TASK = %p[%d] '%s' THREAD: %p\n",	       current, current->pid, current->comm, current->thread_info);	printk("Last syscall: %ld ", current->thread.last_syscall);#ifdef CONFIG_SMP	printk(" CPU: %d", smp_processor_id());#endif /* CONFIG_SMP */	for (i = 0;  i < 32;  i++) {		long r;		if ((i % 8) == 0)			printk("\n" KERN_INFO "GPR%02d: ", i);		if (__get_user(r, &regs->gpr[i]))			break;		printk("%08lX ", r);		if (i == 12 && !FULL_REGS(regs))			break;	}	printk("\n");#ifdef CONFIG_KALLSYMS	/*	 * Lookup NIP late so we have the best change of getting the	 * above info out without failing	 */	printk("NIP [%08lx] ", regs->nip);	print_symbol("%s\n", regs->nip);	printk("LR [%08lx] ", regs->link);	print_symbol("%s\n", regs->link);#endif	show_stack(current, (unsigned long *) regs->gpr[1]);}void exit_thread(void){	if (last_task_used_math == current)		last_task_used_math = NULL;	if (last_task_used_altivec == current)		last_task_used_altivec = NULL;#ifdef CONFIG_SPE	if (last_task_used_spe == current)		last_task_used_spe = NULL;#endif}void flush_thread(void){	if (last_task_used_math == current)		last_task_used_math = NULL;	if (last_task_used_altivec == current)		last_task_used_altivec = NULL;#ifdef CONFIG_SPE	if (last_task_used_spe == current)		last_task_used_spe = NULL;#endif}voidrelease_thread(struct task_struct *t){}/* * This gets called before we allocate a new thread and copy * the current task into it. */void prepare_to_copy(struct task_struct *tsk){	struct pt_regs *regs = tsk->thread.regs;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -