fault.c

来自「linux 内核源代码」· C语言 代码 · 共 603 行 · 第 1/2 页

C
603
字号
/* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. * */	if (address < PAGE_SIZE)		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");	else		printk(KERN_ALERT "Unable to handle kernel paging request");	printk(" at virtual address %08lx\n", address);	printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);	die("Oops", regs, writeaccess);	do_exit(SIGKILL);/* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */out_of_memory:	if (is_global_init(current)) {		panic("INIT out of memory\n");		yield();		goto survive;	}	printk("fault:Out of memory\n");	up_read(&mm->mmap_sem);	if (is_global_init(current)) {		yield();		down_read(&mm->mmap_sem);		goto survive;	}	printk("VM: killing process %s\n", tsk->comm);	if (user_mode(regs))		do_group_exit(SIGKILL);	goto no_context;do_sigbus:	printk("fault:Do sigbus\n");	up_read(&mm->mmap_sem);	/*	 * Send a sigbus, regardless of whether we were in kernel	 * or user mode.	 */	tsk->thread.address = address;	tsk->thread.error_code = writeaccess;	tsk->thread.trap_no = 14;	force_sig(SIGBUS, tsk);	/* Kernel mode? Handle exceptions or die */	if (!user_mode(regs))		goto no_context;}void flush_tlb_all(void);void update_mmu_cache(struct vm_area_struct * vma,			unsigned long address, pte_t pte){#if defined(CONFIG_SH64_PROC_TLB)	++calls_to_update_mmu_cache;#endif	/*	 * This appears to get called once for every pte entry that gets	 * established => I don't think it's efficient to try refilling the	 * TLBs with the pages - some may not get accessed even.  Also, for	 * executable pages, it is impossible to determine reliably here which	 * TLB they should be mapped into (or both even).	 *	 * So, just do nothing here and handle faults on demand.  In the	 * TLBMISS handling case, the refill is now done anyway after the pte	 * has been fixed up, so that deals with most useful cases.	 */}static void __flush_tlb_page(struct vm_area_struct *vma, unsigned long page){	unsigned long long match, pteh=0, lpage;	unsigned long tlb;	struct mm_struct *mm;	mm = vma->vm_mm;	if (mm->context == NO_CONTEXT)		return;	/*	 * Sign-extend based on neff.	 */	lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page;	match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID;	match |= lpage;        /* Do ITLB : don't bother for pages in non-exectutable VMAs */	if (vma->vm_flags & VM_EXEC) {		for_each_itlb_entry(tlb) {			asm volatile ("getcfg	%1, 0, %0"				      : "=r" (pteh)				      : "r" (tlb) );			if (pteh == match) {				__flush_tlb_slot(tlb);				break;			}		}	}        /* Do DTLB : any page could potentially be in here. */	for_each_dtlb_entry(tlb) {		asm volatile ("getcfg	%1, 0, %0"			      : "=r" (pteh)			      : "r" (tlb) );		if (pteh == match) {			__flush_tlb_slot(tlb);			break;		}	}}void flush_tlb_page(struct vm_area_struct *vma, unsigned long page){	unsigned long flags;#if defined(CONFIG_SH64_PROC_TLB)        ++calls_to_flush_tlb_page;#endif	if (vma->vm_mm) {		page &= PAGE_MASK;		local_irq_save(flags);		__flush_tlb_page(vma, page);		local_irq_restore(flags);	}}void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,		     unsigned long end){	unsigned long flags;	unsigned long long match, pteh=0, pteh_epn, pteh_low;	unsigned long tlb;	struct mm_struct *mm;	mm = vma->vm_mm;#if defined(CONFIG_SH64_PROC_TLB)	++calls_to_flush_tlb_range;	{		unsigned long size = (end - 1) - start;		size >>= 12; /* divide by PAGE_SIZE */		size++; /* end=start+4096 => 1 page */		switch (size) {		  case  1        : flush_tlb_range_1++;     break;		  case  2        : flush_tlb_range_2++;     break;		  case  3 ...  4 : flush_tlb_range_3_4++;   break;		  case  5 ...  7 : flush_tlb_range_5_7++;   break;		  case  8 ... 11 : flush_tlb_range_8_11++;  break;		  case 12 ... 15 : flush_tlb_range_12_15++; break;		  default        : flush_tlb_range_16_up++; break;		}	}#endif	if (mm->context == NO_CONTEXT)		return;	local_irq_save(flags);	start &= PAGE_MASK;	end &= PAGE_MASK;	match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID;	/* Flush ITLB */	for_each_itlb_entry(tlb) {		asm volatile ("getcfg	%1, 0, %0"			      : "=r" (pteh)			      : "r" (tlb) );		pteh_epn = pteh & PAGE_MASK;		pteh_low = pteh & ~PAGE_MASK;		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)			__flush_tlb_slot(tlb);	}	/* Flush DTLB */	for_each_dtlb_entry(tlb) {		asm volatile ("getcfg	%1, 0, %0"			      : "=r" (pteh)			      : "r" (tlb) );		pteh_epn = pteh & PAGE_MASK;		pteh_low = pteh & ~PAGE_MASK;		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)			__flush_tlb_slot(tlb);	}	local_irq_restore(flags);}void flush_tlb_mm(struct mm_struct *mm){	unsigned long flags;#if defined(CONFIG_SH64_PROC_TLB)	++calls_to_flush_tlb_mm;#endif	if (mm->context == NO_CONTEXT)		return;	local_irq_save(flags);	mm->context=NO_CONTEXT;	if(mm==current->mm)		activate_context(mm);	local_irq_restore(flags);}void flush_tlb_all(void){	/* Invalidate all, including shared pages, excluding fixed TLBs */	unsigned long flags, tlb;#if defined(CONFIG_SH64_PROC_TLB)	++calls_to_flush_tlb_all;#endif	local_irq_save(flags);	/* Flush each ITLB entry */	for_each_itlb_entry(tlb) {		__flush_tlb_slot(tlb);	}	/* Flush each DTLB entry */	for_each_dtlb_entry(tlb) {		__flush_tlb_slot(tlb);	}	local_irq_restore(flags);}void flush_tlb_kernel_range(unsigned long start, unsigned long end){        /* FIXME: Optimize this later.. */        flush_tlb_all();}#if defined(CONFIG_SH64_PROC_TLB)/* Procfs interface to read the performance information */static inttlb_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data){  int len=0;  len += sprintf(buf+len, "do_fast_page_fault   called %12lld times\n", calls_to_do_fast_page_fault);  len += sprintf(buf+len, "do_slow_page_fault   called %12lld times\n", calls_to_do_slow_page_fault);  len += sprintf(buf+len, "update_mmu_cache     called %12lld times\n", calls_to_update_mmu_cache);  len += sprintf(buf+len, "flush_tlb_page       called %12lld times\n", calls_to_flush_tlb_page);  len += sprintf(buf+len, "flush_tlb_range      called %12lld times\n", calls_to_flush_tlb_range);  len += sprintf(buf+len, "flush_tlb_mm         called %12lld times\n", calls_to_flush_tlb_mm);  len += sprintf(buf+len, "flush_tlb_all        called %12lld times\n", calls_to_flush_tlb_all);  len += sprintf(buf+len, "flush_tlb_range_sizes\n"                          " 1      : %12lld\n"                          " 2      : %12lld\n"                          " 3 -  4 : %12lld\n"                          " 5 -  7 : %12lld\n"                          " 8 - 11 : %12lld\n"                          "12 - 15 : %12lld\n"                          "16+     : %12lld\n",                          flush_tlb_range_1, flush_tlb_range_2, flush_tlb_range_3_4,                          flush_tlb_range_5_7, flush_tlb_range_8_11, flush_tlb_range_12_15,                          flush_tlb_range_16_up);  len += sprintf(buf+len, "page not present           %12lld times\n", page_not_present);  *eof = 1;  return len;}static int __init register_proc_tlb(void){  create_proc_read_entry("tlb", 0, NULL, tlb_proc_info, NULL);  return 0;}__initcall(register_proc_tlb);#endif

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?