⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fault.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
字号:
/* *  linux/arch/i386/mm/fault.c * *  Copyright (C) 1995  Linus Torvalds */#include <linux/signal.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/ptrace.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/smp.h>#include <linux/smp_lock.h>#include <linux/interrupt.h>#include <linux/init.h>#include <asm/system.h>#include <asm/uaccess.h>#include <asm/pgalloc.h>#include <asm/hardirq.h>extern void die(const char *,struct pt_regs *,long);/* * Ugly, ugly, but the goto's result in better assembly.. */int __verify_write(const void * addr, unsigned long size){	struct vm_area_struct * vma;	unsigned long start = (unsigned long) addr;	if (!size)		return 1;	vma = find_vma(current->mm, start);	if (!vma)		goto bad_area;	if (vma->vm_start > start)		goto check_stack;good_area:	if (!(vma->vm_flags & VM_WRITE))		goto bad_area;	size--;	size += start & ~PAGE_MASK;	size >>= PAGE_SHIFT;	start &= PAGE_MASK;	for (;;) {		if (handle_mm_fault(current->mm, vma, start, 1) <= 0)			goto bad_area;		if (!size)			break;		size--;		start += PAGE_SIZE;		if (start < vma->vm_end)			continue;		vma = vma->vm_next;		if (!vma || vma->vm_start != start)			goto bad_area;		if (!(vma->vm_flags & VM_WRITE))			goto bad_area;;	}	return 1;check_stack:	if (!(vma->vm_flags & VM_GROWSDOWN))		goto bad_area;	if (expand_stack(vma, start) == 0)		goto good_area;bad_area:	return 0;}extern spinlock_t console_lock, timerlist_lock;/* * Unlock any spinlocks which will prevent us from getting the * message out (timerlist_lock is aquired through the * console unblank code) */void bust_spinlocks(void){	spin_lock_init(&console_lock);	spin_lock_init(&timerlist_lock);}asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);extern unsigned long idt;/* * This routine handles page faults.  It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * error_code: *	bit 0 == 0 means no page found, 1 means protection fault *	bit 1 == 0 means read, 1 means write *	bit 2 == 0 means kernel, 1 means user-mode */asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code){	struct task_struct *tsk;	struct mm_struct *mm;	struct vm_area_struct * vma;	unsigned long address;	unsigned long page;	unsigned long fixup;	int write;	siginfo_t info;	/* get the address */	__asm__("movl %%cr2,%0":"=r" (address));	tsk = current;	/*	 * We fault-in kernel-space virtual memory on-demand. The	 * 'reference' page table is init_mm.pgd.	 *	 * NOTE! We MUST NOT take any locks for this case. We may	 * be in an interrupt or a critical region, and should	 * only copy the information from the master page table,	 * nothing more.	 */	if (address >= TASK_SIZE)		goto vmalloc_fault;	mm = tsk->mm;	info.si_code = SEGV_MAPERR;	/*	 * If we're in an interrupt or have no user	 * context, we must not take the fault..	 */	if (in_interrupt() || !mm)		goto no_context;	down(&mm->mmap_sem);	vma = find_vma(mm, address);	if (!vma)		goto bad_area;	if (vma->vm_start <= address)		goto good_area;	if (!(vma->vm_flags & VM_GROWSDOWN))		goto bad_area;	if (error_code & 4) {		/*		 * accessing the stack below %esp is always a bug.		 * The "+ 32" is there due to some instructions (like		 * pusha) doing post-decrement on the stack and that		 * doesn't show up until later..		 */		if (address + 32 < regs->esp)			goto bad_area;	}	if (expand_stack(vma, address))		goto bad_area;/* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */good_area:	info.si_code = SEGV_ACCERR;	write = 0;	switch (error_code & 3) {		default:	/* 3: write, present */#ifdef TEST_VERIFY_AREA			if (regs->cs == KERNEL_CS)				printk("WP fault at %08lx\n", regs->eip);#endif			/* fall through */		case 2:		/* write, not present */			if (!(vma->vm_flags & VM_WRITE))				goto bad_area;			write++;			break;		case 1:		/* read, present */			goto bad_area;		case 0:		/* read, not present */			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))				goto bad_area;	}	/*	 * If for any reason at all we couldn't handle the fault,	 * make sure we exit gracefully rather than endlessly redo	 * the fault.	 */	switch (handle_mm_fault(mm, vma, address, write)) {	case 1:		tsk->min_flt++;		break;	case 2:		tsk->maj_flt++;		break;	case 0:		goto do_sigbus;	default:		goto out_of_memory;	}	/*	 * Did it hit the DOS screen memory VA from vm86 mode?	 */	if (regs->eflags & VM_MASK) {		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;		if (bit < 32)			tsk->thread.screen_bitmap |= 1 << bit;	}	up(&mm->mmap_sem);	return;/* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */bad_area:	up(&mm->mmap_sem);bad_area_nosemaphore:	/* User mode accesses just cause a SIGSEGV */	if (error_code & 4) {		tsk->thread.cr2 = address;		tsk->thread.error_code = error_code;		tsk->thread.trap_no = 14;		info.si_signo = SIGSEGV;		info.si_errno = 0;		/* info.si_code has been set above */		info.si_addr = (void *)address;		force_sig_info(SIGSEGV, &info, tsk);		return;	}	/*	 * Pentium F0 0F C7 C8 bug workaround.	 */	if (boot_cpu_data.f00f_bug) {		unsigned long nr;				nr = (address - idt) >> 3;		if (nr == 6) {			do_invalid_op(regs, 0);			return;		}	}no_context:	/* Are we prepared to handle this kernel fault?  */	if ((fixup = search_exception_table(regs->eip)) != 0) {		regs->eip = fixup;		return;	}/* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */	bust_spinlocks();	if (address < PAGE_SIZE)		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");	else		printk(KERN_ALERT "Unable to handle kernel paging request");	printk(" at virtual address %08lx\n",address);	printk(" printing eip:\n");	printk("%08lx\n", regs->eip);	asm("movl %%cr3,%0":"=r" (page));	page = ((unsigned long *) __va(page))[address >> 22];	printk(KERN_ALERT "*pde = %08lx\n", page);	if (page & 1) {		page &= PAGE_MASK;		address &= 0x003ff000;		page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];		printk(KERN_ALERT "*pte = %08lx\n", page);	}	die("Oops", regs, error_code);	do_exit(SIGKILL);/* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */out_of_memory:	up(&mm->mmap_sem);	printk("VM: killing process %s\n", tsk->comm);	if (error_code & 4)		do_exit(SIGKILL);	goto no_context;do_sigbus:	up(&mm->mmap_sem);	/*	 * Send a sigbus, regardless of whether we were in kernel	 * or user mode.	 */	tsk->thread.cr2 = address;	tsk->thread.error_code = error_code;	tsk->thread.trap_no = 14;	info.si_code = SIGBUS;	info.si_errno = 0;	info.si_code = BUS_ADRERR;	info.si_addr = (void *)address;	force_sig_info(SIGBUS, &info, tsk);	/* Kernel mode? Handle exceptions or die */	if (!(error_code & 4))		goto no_context;	return;vmalloc_fault:	{		/*		 * Synchronize this task's top level page-table		 * with the 'reference' page table.		 */		int offset = __pgd_offset(address);		pgd_t *pgd, *pgd_k;		pmd_t *pmd, *pmd_k;		pgd = tsk->active_mm->pgd + offset;		pgd_k = init_mm.pgd + offset;		if (!pgd_present(*pgd)) {			if (!pgd_present(*pgd_k))				goto bad_area_nosemaphore;			set_pgd(pgd, *pgd_k);			return;		}		pmd = pmd_offset(pgd, address);		pmd_k = pmd_offset(pgd_k, address);		if (pmd_present(*pmd) || !pmd_present(*pmd_k))			goto bad_area_nosemaphore;		set_pmd(pmd, *pmd_k);		return;	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -