📄 process.c
字号:
/* * Derived from "arch/i386/kernel/process.c" * Copyright (C) 1995 Linus Torvalds * * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and * Paul Mackerras (paulus@cs.anu.edu.au) * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */#include <linux/errno.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/mm.h>#include <linux/smp.h>#include <linux/stddef.h>#include <linux/unistd.h>#include <linux/ptrace.h>#include <linux/slab.h>#include <linux/user.h>#include <linux/elf.h>#include <linux/init.h>#include <linux/prctl.h>#include <linux/init_task.h>#include <linux/module.h>#include <linux/kallsyms.h>#include <linux/mqueue.h>#include <linux/hardirq.h>#include <linux/utsname.h>#include <asm/pgtable.h>#include <asm/uaccess.h>#include <asm/system.h>#include <asm/io.h>#include <asm/processor.h>#include <asm/mmu.h>#include <asm/prom.h>#include <asm/machdep.h>#include <asm/time.h>#include <asm/syscalls.h>#ifdef CONFIG_PPC64#include <asm/firmware.h>#endifextern unsigned long _get_SP(void);#ifndef CONFIG_SMPstruct task_struct *last_task_used_math = NULL;struct task_struct *last_task_used_altivec = NULL;struct task_struct *last_task_used_spe = NULL;#endif/* * Make sure the floating-point register state in the * the thread_struct is up to date for task tsk. */void flush_fp_to_thread(struct task_struct *tsk){ if (tsk->thread.regs) { /* * We need to disable preemption here because if we didn't, * another process could get scheduled after the regs->msr * test but before we have finished saving the FP registers * to the thread_struct. That process could take over the * FPU, and then when we get scheduled again we would store * bogus values for the remaining FP registers. */ preempt_disable(); if (tsk->thread.regs->msr & MSR_FP) {#ifdef CONFIG_SMP /* * This should only ever be called for current or * for a stopped child process. Since we save away * the FP register state on context switch on SMP, * there is something wrong if a stopped child appears * to still have its FP state in the CPU registers. */ BUG_ON(tsk != current);#endif giveup_fpu(tsk); } preempt_enable(); }}void enable_kernel_fp(void){ WARN_ON(preemptible());#ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) giveup_fpu(current); else giveup_fpu(NULL); /* just enables FP for kernel */#else giveup_fpu(last_task_used_math);#endif /* CONFIG_SMP */}EXPORT_SYMBOL(enable_kernel_fp);int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs){ if (!tsk->thread.regs) return 0; flush_fp_to_thread(current); memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); return 1;}#ifdef CONFIG_ALTIVECvoid enable_kernel_altivec(void){ WARN_ON(preemptible());#ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) giveup_altivec(current); else giveup_altivec(NULL); /* just enable AltiVec for kernel - force */#else giveup_altivec(last_task_used_altivec);#endif /* CONFIG_SMP */}EXPORT_SYMBOL(enable_kernel_altivec);/* * Make sure the VMX/Altivec register state in the * the thread_struct is up to date for task tsk. */void flush_altivec_to_thread(struct task_struct *tsk){ if (tsk->thread.regs) { preempt_disable(); if (tsk->thread.regs->msr & MSR_VEC) {#ifdef CONFIG_SMP BUG_ON(tsk != current);#endif giveup_altivec(tsk); } preempt_enable(); }}int dump_task_altivec(struct task_struct *tsk, elf_vrregset_t *vrregs){ /* ELF_NVRREG includes the VSCR and VRSAVE which we need to save * separately, see below */ const int nregs = ELF_NVRREG - 2; elf_vrreg_t *reg; u32 *dest; if (tsk == current) flush_altivec_to_thread(tsk); reg = (elf_vrreg_t *)vrregs; /* copy the 32 vr registers */ memcpy(reg, &tsk->thread.vr[0], nregs * sizeof(*reg)); reg += nregs; /* copy the vscr */ memcpy(reg, &tsk->thread.vscr, sizeof(*reg)); reg++; /* vrsave is stored in the high 32bit slot of the final 128bits */ memset(reg, 0, sizeof(*reg)); dest = (u32 *)reg; *dest = tsk->thread.vrsave; return 1;}#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_SPEvoid enable_kernel_spe(void){ WARN_ON(preemptible());#ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) giveup_spe(current); else giveup_spe(NULL); /* just enable SPE for kernel - force */#else giveup_spe(last_task_used_spe);#endif /* __SMP __ */}EXPORT_SYMBOL(enable_kernel_spe);void flush_spe_to_thread(struct task_struct *tsk){ if (tsk->thread.regs) { preempt_disable(); if (tsk->thread.regs->msr & MSR_SPE) {#ifdef CONFIG_SMP BUG_ON(tsk != current);#endif giveup_spe(tsk); } preempt_enable(); }}int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs){ flush_spe_to_thread(current); /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35); return 1;}#endif /* CONFIG_SPE */#ifndef CONFIG_SMP/* * If we are doing lazy switching of CPU state (FP, altivec or SPE), * and the current task has some state, discard it. */void discard_lazy_cpu_state(void){ preempt_disable(); if (last_task_used_math == current) last_task_used_math = NULL;#ifdef CONFIG_ALTIVEC if (last_task_used_altivec == current) last_task_used_altivec = NULL;#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_SPE if (last_task_used_spe == current) last_task_used_spe = NULL;#endif preempt_enable();}#endif /* CONFIG_SMP */int set_dabr(unsigned long dabr){#ifdef CONFIG_PPC_MERGE /* XXX for now */ if (ppc_md.set_dabr) return ppc_md.set_dabr(dabr);#endif /* XXX should we have a CPU_FTR_HAS_DABR ? */#if defined(CONFIG_PPC64) || defined(CONFIG_6xx) mtspr(SPRN_DABR, dabr);#endif return 0;}#ifdef CONFIG_PPC64DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);#endifstatic DEFINE_PER_CPU(unsigned long, current_dabr);struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *new){ struct thread_struct *new_thread, *old_thread; unsigned long flags; struct task_struct *last;#ifdef CONFIG_SMP /* avoid complexity of lazy save/restore of fpu * by just saving it every time we switch out if * this task used the fpu during the last quantum. * * If it tries to use the fpu again, it'll trap and * reload its fp regs. So we don't have to do a restore * every switch, just a save. * -- Cort */ if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) giveup_fpu(prev);#ifdef CONFIG_ALTIVEC /* * If the previous thread used altivec in the last quantum * (thus changing altivec regs) then save them. * We used to check the VRSAVE register but not all apps * set it, so we don't rely on it now (and in fact we need * to save & restore VSCR even if VRSAVE == 0). -- paulus * * On SMP we always save/restore altivec regs just to avoid the * complexity of changing processors. * -- Cort */ if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) giveup_altivec(prev);#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_SPE /* * If the previous thread used spe in the last quantum * (thus changing spe regs) then save them. * * On SMP we always save/restore spe regs just to avoid the * complexity of changing processors. */ if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) giveup_spe(prev);#endif /* CONFIG_SPE */#else /* CONFIG_SMP */#ifdef CONFIG_ALTIVEC /* Avoid the trap. On smp this this never happens since * we don't set last_task_used_altivec -- Cort */ if (new->thread.regs && last_task_used_altivec == new) new->thread.regs->msr |= MSR_VEC;#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_SPE /* Avoid the trap. On smp this this never happens since * we don't set last_task_used_spe */ if (new->thread.regs && last_task_used_spe == new) new->thread.regs->msr |= MSR_SPE;#endif /* CONFIG_SPE */#endif /* CONFIG_SMP */ if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) { set_dabr(new->thread.dabr); __get_cpu_var(current_dabr) = new->thread.dabr; } new_thread = &new->thread; old_thread = ¤t->thread;#ifdef CONFIG_PPC64 /* * Collect processor utilization data per process */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) { struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); long unsigned start_tb, current_tb; start_tb = old_thread->start_tb; cu->current_tb = current_tb = mfspr(SPRN_PURR); old_thread->accum_tb += (current_tb - start_tb); new_thread->start_tb = current_tb; }#endif local_irq_save(flags); account_system_vtime(current); account_process_vtime(current); calculate_steal_time(); last = _switch(old_thread, new_thread); local_irq_restore(flags); return last;}static int instructions_to_print = 16;static void show_instructions(struct pt_regs *regs){ int i; unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int)); printk("Instruction dump:"); for (i = 0; i < instructions_to_print; i++) { int instr; if (!(i % 8)) printk("\n");#if !defined(CONFIG_BOOKE) /* If executing with the IMMU off, adjust pc rather * than print XXXXXXXX. */ if (!(regs->msr & MSR_IR)) pc = (unsigned long)phys_to_virt(pc);#endif /* We use __get_user here *only* to avoid an OOPS on a * bad address because the pc *should* only be a * kernel address. */ if (!__kernel_text_address(pc) || __get_user(instr, (unsigned int __user *)pc)) { printk("XXXXXXXX "); } else { if (regs->nip == pc) printk("<%08x> ", instr); else printk("%08x ", instr); } pc += sizeof(int); } printk("\n");}static struct regbit { unsigned long bit; const char *name;} msr_bits[] = { {MSR_EE, "EE"}, {MSR_PR, "PR"}, {MSR_FP, "FP"}, {MSR_ME, "ME"}, {MSR_IR, "IR"}, {MSR_DR, "DR"}, {0, NULL}};static void printbits(unsigned long val, struct regbit *bits){ const char *sep = ""; printk("<"); for (; bits->bit; ++bits) if (val & bits->bit) { printk("%s%s", sep, bits->name); sep = ","; } printk(">");}#ifdef CONFIG_PPC64#define REG "%016lx"#define REGS_PER_LINE 4#define LAST_VOLATILE 13#else#define REG "%08lx"#define REGS_PER_LINE 8#define LAST_VOLATILE 12#endifvoid show_regs(struct pt_regs * regs){ int i, trap; printk("NIP: "REG" LR: "REG" CTR: "REG"\n", regs->nip, regs->link, regs->ctr); printk("REGS: %p TRAP: %04lx %s (%s)\n", regs, regs->trap, print_tainted(), init_utsname()->release); printk("MSR: "REG" ", regs->msr); printbits(regs->msr, msr_bits); printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); trap = TRAP(regs); if (trap == 0x300 || trap == 0x600)#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);#else printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);#endif printk("TASK = %p[%d] '%s' THREAD: %p", current, task_pid_nr(current), current->comm, task_thread_info(current));#ifdef CONFIG_SMP printk(" CPU: %d", smp_processor_id());#endif /* CONFIG_SMP */ for (i = 0; i < 32; i++) { if ((i % REGS_PER_LINE) == 0) printk("\n" KERN_INFO "GPR%02d: ", i); printk(REG " ", regs->gpr[i]); if (i == LAST_VOLATILE && !FULL_REGS(regs)) break; } printk("\n");#ifdef CONFIG_KALLSYMS /* * Lookup NIP late so we have the best change of getting the * above info out without failing */ printk("NIP ["REG"] ", regs->nip); print_symbol("%s\n", regs->nip); printk("LR ["REG"] ", regs->link); print_symbol("%s\n", regs->link);#endif show_stack(current, (unsigned long *) regs->gpr[1]); if (!user_mode(regs)) show_instructions(regs);}void exit_thread(void){ discard_lazy_cpu_state();}void flush_thread(void){#ifdef CONFIG_PPC64 struct thread_info *t = current_thread_info(); if (test_ti_thread_flag(t, TIF_ABI_PENDING)) { clear_ti_thread_flag(t, TIF_ABI_PENDING); if (test_ti_thread_flag(t, TIF_32BIT)) clear_ti_thread_flag(t, TIF_32BIT); else set_ti_thread_flag(t, TIF_32BIT); }#endif discard_lazy_cpu_state(); if (current->thread.dabr) { current->thread.dabr = 0; set_dabr(0); }}voidrelease_thread(struct task_struct *t){}/* * This gets called before we allocate a new thread and copy * the current task into it.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -