⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fault.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  linux/arch/m32r/mm/fault.c * *  Copyright (c) 2001, 2002  Hitoshi Yamamoto, and H. Kondo *  Copyright (c) 2004  Naoto Sugai, NIIBE Yutaka * *  Some code taken from i386 version. *    Copyright (C) 1995  Linus Torvalds */#include <linux/config.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/ptrace.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/smp.h>#include <linux/smp_lock.h>#include <linux/interrupt.h>#include <linux/init.h>#include <linux/tty.h>#include <linux/vt_kern.h>		/* For unblank_screen() */#include <linux/highmem.h>#include <linux/module.h>#include <asm/m32r.h>#include <asm/system.h>#include <asm/uaccess.h>#include <asm/hardirq.h>#include <asm/mmu_context.h>#include <asm/tlbflush.h>extern void die(const char *, struct pt_regs *, long);#ifndef CONFIG_SMPasmlinkage unsigned int tlb_entry_i_dat;asmlinkage unsigned int tlb_entry_d_dat;#define tlb_entry_i tlb_entry_i_dat#define tlb_entry_d tlb_entry_d_dat#elseunsigned int tlb_entry_i_dat[NR_CPUS];unsigned int tlb_entry_d_dat[NR_CPUS];#define tlb_entry_i tlb_entry_i_dat[smp_processor_id()]#define tlb_entry_d tlb_entry_d_dat[smp_processor_id()]#endifextern void init_tlb(void);/* * Unlock any spinlocks which will prevent us from getting the * message out */void bust_spinlocks(int yes){	int loglevel_save = console_loglevel;	if (yes) {		oops_in_progress = 1;		return;	}#ifdef CONFIG_VT	unblank_screen();#endif	oops_in_progress = 0;	/*	 * OK, the message is on the console.  Now we call printk()	 * without oops_in_progress set so that printk will give klogd	 * a poke.  Hold onto your hats...	 */	console_loglevel = 15;		/* NMI oopser may have shut the console up */	printk(" ");	console_loglevel = loglevel_save;}/*======================================================================* * do_page_fault() *======================================================================* * This routine handles page faults.  It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * ARGUMENT: *  regs       : M32R SP reg. *  error_code : See below *  address    : M32R MMU MDEVA reg. (Operand ACE) *             : M32R BPC reg. (Instruction ACE) * * error_code : *  bit 0 == 0 means no page found, 1 means protection fault *  bit 1 == 0 means read, 1 means write *  bit 2 == 0 means kernel, 1 means user-mode *  bit 3 == 0 means data, 1 means instruction *======================================================================*/#define ACE_PROTECTION		1#define ACE_WRITE		2#define ACE_USERMODE		4#define ACE_INSTRUCTION		8asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,  unsigned long address){	struct task_struct *tsk;	struct mm_struct *mm;	struct vm_area_struct * vma;	unsigned long page, addr;	int write;	siginfo_t info;	/*	 * If BPSW IE bit enable --> set PSW IE bit	 */	if (regs->psw & M32R_PSW_BIE)		local_irq_enable();	tsk = current;	info.si_code = SEGV_MAPERR;	/*	 * We fault-in kernel-space virtual memory on-demand. The	 * 'reference' page table is init_mm.pgd.	 *	 * NOTE! We MUST NOT take any locks for this case. We may	 * be in an interrupt or a critical region, and should	 * only copy the information from the master page table,	 * nothing more.	 *	 * This verifies that the fault happens in kernel space	 * (error_code & ACE_USERMODE) == 0, and that the fault was not a	 * protection error (error_code & ACE_PROTECTION) == 0.	 */	if (address >= TASK_SIZE && !(error_code & ACE_USERMODE))		goto vmalloc_fault;	mm = tsk->mm;	/*	 * If we're in an interrupt or have no user context or are running in an	 * atomic region then we must not take the fault..	 */	if (in_atomic() || !mm)		goto bad_area_nosemaphore;	/* When running in the kernel we expect faults to occur only to	 * addresses in user space.  All other faults represent errors in the	 * kernel and should generate an OOPS.  Unfortunatly, in the case of an	 * erroneous fault occuring in a code path which already holds mmap_sem	 * we will deadlock attempting to validate the fault against the	 * address space.  Luckily the kernel only validly references user	 * space from well defined areas of code, which are listed in the	 * exceptions table.	 *	 * As the vast majority of faults will be valid we will only perform	 * the source reference check when there is a possibilty of a deadlock.	 * Attempt to lock the address space, if we cannot we then validate the	 * source.  If this is invalid we can skip the address space check,	 * thus avoiding the deadlock.	 */	if (!down_read_trylock(&mm->mmap_sem)) {		if ((error_code & ACE_USERMODE) == 0 &&		    !search_exception_tables(regs->psw))			goto bad_area_nosemaphore;		down_read(&mm->mmap_sem);	}	vma = find_vma(mm, address);	if (!vma)		goto bad_area;	if (vma->vm_start <= address)		goto good_area;	if (!(vma->vm_flags & VM_GROWSDOWN))		goto bad_area;#if 0	if (error_code & ACE_USERMODE) {		/*		 * accessing the stack below "spu" is always a bug.		 * The "+ 4" is there due to the push instruction		 * doing pre-decrement on the stack and that		 * doesn't show up until later..		 */		if (address + 4 < regs->spu)			goto bad_area;	}#endif	if (expand_stack(vma, address))		goto bad_area;/* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */good_area:	info.si_code = SEGV_ACCERR;	write = 0;	switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {		default:	/* 3: write, present */			/* fall through */		case ACE_WRITE:	/* write, not present */			if (!(vma->vm_flags & VM_WRITE))				goto bad_area;			write++;			break;		case ACE_PROTECTION:	/* read, present */		case 0:		/* read, not present */			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))				goto bad_area;	}	/*	 * For instruction access exception, check if the area is executable	 */	if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC))	  goto bad_area;survive:	/*	 * If for any reason at all we couldn't handle the fault,	 * make sure we exit gracefully rather than endlessly redo	 * the fault.	 */	addr = (address & PAGE_MASK);	set_thread_fault_code(error_code);	switch (handle_mm_fault(mm, vma, addr, write)) {		case VM_FAULT_MINOR:			tsk->min_flt++;			break;		case VM_FAULT_MAJOR:			tsk->maj_flt++;			break;		case VM_FAULT_SIGBUS:			goto do_sigbus;		case VM_FAULT_OOM:			goto out_of_memory;		default:			BUG();	}	set_thread_fault_code(0);	up_read(&mm->mmap_sem);	return;/* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */bad_area:	up_read(&mm->mmap_sem);bad_area_nosemaphore:	/* User mode accesses just cause a SIGSEGV */	if (error_code & ACE_USERMODE) {		tsk->thread.address = address;		tsk->thread.error_code = error_code | (address >= TASK_SIZE);		tsk->thread.trap_no = 14;		info.si_signo = SIGSEGV;		info.si_errno = 0;		/* info.si_code has been set above */		info.si_addr = (void __user *)address;		force_sig_info(SIGSEGV, &info, tsk);		return;	}no_context:	/* Are we prepared to handle this kernel fault?  */	if (fixup_exception(regs))		return;/* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */	bust_spinlocks(1);	if (address < PAGE_SIZE)		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");	else		printk(KERN_ALERT "Unable to handle kernel paging request");	printk(" at virtual address %08lx\n",address);	printk(KERN_ALERT " printing bpc:\n");	printk("%08lx\n", regs->bpc);	page = *(unsigned long *)MPTB;	page = ((unsigned long *) page)[address >> PGDIR_SHIFT];	printk(KERN_ALERT "*pde = %08lx\n", page);	if (page & _PAGE_PRESENT) {		page &= PAGE_MASK;		address &= 0x003ff000;		page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];		printk(KERN_ALERT "*pte = %08lx\n", page);	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -