⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fault_64.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	mm = tsk->mm;	prefetchw(&mm->mmap_sem);	/* get the address */	address = read_cr2();	info.si_code = SEGV_MAPERR;	/*	 * We fault-in kernel-space virtual memory on-demand. The	 * 'reference' page table is init_mm.pgd.	 *	 * NOTE! We MUST NOT take any locks for this case. We may	 * be in an interrupt or a critical region, and should	 * only copy the information from the master page table,	 * nothing more.	 *	 * This verifies that the fault happens in kernel space	 * (error_code & 4) == 0, and that the fault was not a	 * protection error (error_code & 9) == 0.	 */	if (unlikely(address >= TASK_SIZE64)) {		/*		 * Don't check for the module range here: its PML4		 * is always initialized because it's shared with the main		 * kernel text. Only vmalloc may need PML4 syncups.		 */		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&		      ((address >= VMALLOC_START && address < VMALLOC_END))) {			if (vmalloc_fault(address) >= 0)				return;		}		if (notify_page_fault(regs))			return;		/*		 * Don't take the mm semaphore here. If we fixup a prefetch		 * fault we could otherwise deadlock.		 */		goto bad_area_nosemaphore;	}	if (notify_page_fault(regs))		return;	if (likely(regs->eflags & X86_EFLAGS_IF))		local_irq_enable();	if (unlikely(error_code & PF_RSVD))		pgtable_bad(address, regs, error_code);	/*	 * If we're in an interrupt or have no user	 * context, we must not take the fault..	 */	if (unlikely(in_atomic() || !mm))		goto bad_area_nosemaphore;	/*	 * User-mode registers count as a user access even for any	 * potential system fault or CPU buglet.	 */	if (user_mode_vm(regs))		error_code |= PF_USER; again:	/* When running in the kernel we expect faults to occur only to	 * addresses in user space.  All other faults represent errors in the	 * kernel and should generate an OOPS.  Unfortunately, in the case of an	 * erroneous fault occurring in a code path which already holds mmap_sem	 * we will deadlock attempting to validate the fault against the	 * address space.  Luckily the kernel only validly references user	 * space from well defined areas of code, which are listed in the	 * exceptions table.	 *	 * As the vast majority of faults will be valid we will only perform	 * the source reference check when there is a possibility of a deadlock.	 * Attempt to lock the address space, if we cannot we then validate the	 * source.  If this is invalid we can skip the address space check,	 * thus avoiding the deadlock.	 */	if (!down_read_trylock(&mm->mmap_sem)) {		if ((error_code & PF_USER) == 0 &&		    !search_exception_tables(regs->rip))			goto bad_area_nosemaphore;		down_read(&mm->mmap_sem);	}	vma = find_vma(mm, address);	if (!vma)		goto bad_area;	if (likely(vma->vm_start <= address))		goto good_area;	if (!(vma->vm_flags & VM_GROWSDOWN))		goto bad_area;	if (error_code & 4) {		/* Allow userspace just enough access below the stack pointer		 * to let the 'enter' instruction work.		 */		if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)			goto bad_area;	}	if (expand_stack(vma, address))		goto bad_area;/* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */good_area:	info.si_code = SEGV_ACCERR;	write = 0;	switch (error_code & (PF_PROT|PF_WRITE)) {		default:	/* 3: write, present */			/* fall through */		case PF_WRITE:		/* write, not present */			if (!(vma->vm_flags & VM_WRITE))				goto bad_area;			write++;			break;		case PF_PROT:		/* read, present */			goto bad_area;		case 0:			/* read, not present */			if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))				goto bad_area;	}	/*	 * If for any reason at all we couldn't handle the fault,	 * make sure we exit gracefully rather than endlessly redo	 * the fault.	 */	fault = handle_mm_fault(mm, vma, address, write);	if (unlikely(fault & VM_FAULT_ERROR)) {		if (fault & VM_FAULT_OOM)			goto out_of_memory;		else if (fault & VM_FAULT_SIGBUS)			goto do_sigbus;		BUG();	}	if (fault & VM_FAULT_MAJOR)		tsk->maj_flt++;	else		tsk->min_flt++;	up_read(&mm->mmap_sem);	return;/* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */bad_area:	up_read(&mm->mmap_sem);bad_area_nosemaphore:	/* User mode accesses just cause a SIGSEGV */	if (error_code & PF_USER) {		/*		 * It's possible to have interrupts off here.		 */		local_irq_enable();		if (is_prefetch(regs, address, error_code))			return;		/* Work around K8 erratum #100 K8 in compat mode		   occasionally jumps to illegal addresses >4GB.  We		   catch this here in the page fault handler because		   these addresses are not reachable. Just detect this		   case and return.  Any code segment in LDT is		   compatibility mode. */		if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&		    (address >> 32))			return;		if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&		    printk_ratelimit()) {			printk(		       "%s%s[%d]: segfault at %lx rip %lx rsp %lx error %lx\n",					tsk->pid > 1 ? KERN_INFO : KERN_EMERG,					tsk->comm, tsk->pid, address, regs->rip,					regs->rsp, error_code);		}       		tsk->thread.cr2 = address;		/* Kernel addresses are always protection faults */		tsk->thread.error_code = error_code | (address >= TASK_SIZE);		tsk->thread.trap_no = 14;		info.si_signo = SIGSEGV;		info.si_errno = 0;		/* info.si_code has been set above */		info.si_addr = (void __user *)address;		force_sig_info(SIGSEGV, &info, tsk);		return;	}no_context:		/* Are we prepared to handle this kernel fault?  */	fixup = search_exception_tables(regs->rip);	if (fixup) {		regs->rip = fixup->fixup;		return;	}	/* 	 * Hall of shame of CPU/BIOS bugs.	 */ 	if (is_prefetch(regs, address, error_code)) 		return;	if (is_errata93(regs, address))		return; /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */	flags = oops_begin();	if (address < PAGE_SIZE)		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");	else		printk(KERN_ALERT "Unable to handle kernel paging request");	printk(" at %016lx RIP: \n" KERN_ALERT,address);	printk_address(regs->rip);	dump_pagetable(address);	tsk->thread.cr2 = address;	tsk->thread.trap_no = 14;	tsk->thread.error_code = error_code;	__die("Oops", regs, error_code);	/* Executive summary in case the body of the oops scrolled away */	printk(KERN_EMERG "CR2: %016lx\n", address);	oops_end(flags);	do_exit(SIGKILL);/* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */out_of_memory:	up_read(&mm->mmap_sem);	if (is_global_init(current)) {		yield();		goto again;	}	printk("VM: killing process %s\n", tsk->comm);	if (error_code & 4)		do_group_exit(SIGKILL);	goto no_context;do_sigbus:	up_read(&mm->mmap_sem);	/* Kernel mode? Handle exceptions or die */	if (!(error_code & PF_USER))		goto no_context;	tsk->thread.cr2 = address;	tsk->thread.error_code = error_code;	tsk->thread.trap_no = 14;	info.si_signo = SIGBUS;	info.si_errno = 0;	info.si_code = BUS_ADRERR;	info.si_addr = (void __user *)address;	force_sig_info(SIGBUS, &info, tsk);	return;}DEFINE_SPINLOCK(pgd_lock);LIST_HEAD(pgd_list);void vmalloc_sync_all(void){	/* Note that races in the updates of insync and start aren't 	   problematic:	   insync can only get set bits added, and updates to start are only	   improving performance (without affecting correctness if undone). */	static DECLARE_BITMAP(insync, PTRS_PER_PGD);	static unsigned long start = VMALLOC_START & PGDIR_MASK;	unsigned long address;	for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {		if (!test_bit(pgd_index(address), insync)) {			const pgd_t *pgd_ref = pgd_offset_k(address);			struct page *page;			if (pgd_none(*pgd_ref))				continue;			spin_lock(&pgd_lock);			list_for_each_entry(page, &pgd_list, lru) {				pgd_t *pgd;				pgd = (pgd_t *)page_address(page) + pgd_index(address);				if (pgd_none(*pgd))					set_pgd(pgd, *pgd_ref);				else					BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));			}			spin_unlock(&pgd_lock);			set_bit(pgd_index(address), insync);		}		if (address == start)			start = address + PGDIR_SIZE;	}	/* Check that there is no need to do the same for the modules area. */	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));	BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 				(__START_KERNEL & PGDIR_MASK)));}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -