⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 htab.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * PowerPC64 port by Mike Corrigan and Dave Engebretsen *   {mikejc|engebret}@us.ibm.com * *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> * * SMP scalability work: *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM *  *    Module name: htab.c * *    Description: *      PowerPC Hashed Page Table functions * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */#include <linux/config.h>#include <linux/spinlock.h>#include <linux/errno.h>#include <linux/sched.h>#include <linux/proc_fs.h>#include <linux/stat.h>#include <linux/sysctl.h>#include <linux/ctype.h>#include <linux/cache.h>#include <asm/ppcdebug.h>#include <asm/processor.h>#include <asm/pgtable.h>#include <asm/mmu.h>#include <asm/mmu_context.h>#include <asm/page.h>#include <asm/types.h>#include <asm/uaccess.h>#include <asm/naca.h>#include <asm/pmc.h>#include <asm/machdep.h>#include <asm/lmb.h>#include <asm/abs_addr.h>#include <asm/io.h>#include <asm/eeh.h>#include <asm/hvcall.h>#include <asm/iSeries/LparData.h>#include <asm/iSeries/HvCallHpt.h>/* * Note:  pte   --> Linux PTE *        HPTE  --> PowerPC Hashed Page Table Entry * * Execution context: *   htab_initialize is called with the MMU off (of course), but *   the kernel has been copied down to zero so it can directly *   reference global data.  At this point it is very difficult *   to print debug info. * */HTAB htab_data = {NULL, 0, 0, 0, 0};extern unsigned long _SDR1;extern unsigned long klimit;void make_pte(HPTE *htab, unsigned long va, unsigned long pa,	      int mode, unsigned long hash_mask, int large);long plpar_pte_enter(unsigned long flags,		     unsigned long ptex,		     unsigned long new_pteh, unsigned long new_ptel,		     unsigned long *old_pteh_ret, unsigned long *old_ptel_ret);static long hpte_remove(unsigned long hpte_group);static long rpa_lpar_hpte_remove(unsigned long hpte_group);static long iSeries_hpte_remove(unsigned long hpte_group);static spinlock_t pSeries_tlbie_lock = SPIN_LOCK_UNLOCKED;static spinlock_t pSeries_lpar_tlbie_lock = SPIN_LOCK_UNLOCKED;spinlock_t hash_table_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;#define KB (1024)#define MB (1024*KB)static inline voidloop_forever(void){	volatile unsigned long x = 1;	for(;x;x|=1)		;}static inline voidcreate_pte_mapping(unsigned long start, unsigned long end,		   unsigned long mode, unsigned long mask, int large){	unsigned long addr;	HPTE *htab = (HPTE *)__v2a(htab_data.htab);	unsigned int step;	if (large)		step = 16*MB;	else		step = 4*KB;	for (addr = start; addr < end; addr += step) {		unsigned long vsid = get_kernel_vsid(addr);		unsigned long va = (vsid << 28) | (addr & 0xfffffff);		make_pte(htab, va, (unsigned long)__v2a(addr), 			 mode, mask, large);	}}voidhtab_initialize(void){	unsigned long table, htab_size_bytes;	unsigned long pteg_count;	unsigned long mode_rw, mask;#if 0	/* Can't really do the call below since it calls the normal RTAS	 * entry point and we're still relocate off at the moment.	 * Temporarily diabling until it can call through the relocate off	 * RTAS entry point.  -Peter	 */	ppc64_boot_msg(0x05, "htab init");#endif	/*	 * Calculate the required size of the htab.  We want the number of	 * PTEGs to equal one half the number of real pages.	 */ 	htab_size_bytes = 1UL << naca->pftSize;	pteg_count = htab_size_bytes >> 7;	/* For debug, make the HTAB 1/8 as big as it normally would be. */	ifppcdebug(PPCDBG_HTABSIZE) {		pteg_count >>= 3;		htab_size_bytes = pteg_count << 7;	}	htab_data.htab_num_ptegs = pteg_count;	htab_data.htab_hash_mask = pteg_count - 1;	if(naca->platform == PLATFORM_PSERIES) {		/* Find storage for the HPT.  Must be contiguous in		 * the absolute address space.		 */		table = lmb_alloc(htab_size_bytes, htab_size_bytes);		if ( !table ) {			ppc64_terminate_msg(0x20, "hpt space");			loop_forever();		}		htab_data.htab = (HPTE *)__a2v(table);		/* htab absolute addr + encoded htabsize */		_SDR1 = table + __ilog2(pteg_count) - 11;		/* Initialize the HPT with no entries */		memset((void *)table, 0, htab_size_bytes);	} else {		/* Using a hypervisor which owns the htab */		htab_data.htab = NULL;		_SDR1 = 0; 	}	mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;	mask = pteg_count-1;	/* XXX we currently map kernel text rw, should fix this */	if ((naca->platform & PLATFORM_PSERIES) &&	   cpu_has_largepage() && (naca->physicalMemorySize > 256*MB)) {		create_pte_mapping((unsigned long)KERNELBASE, 				   KERNELBASE + 256*MB, mode_rw, mask, 0);		create_pte_mapping((unsigned long)KERNELBASE + 256*MB, 				   KERNELBASE + (naca->physicalMemorySize), 				   mode_rw, mask, 1);	} else {		create_pte_mapping((unsigned long)KERNELBASE, 				   KERNELBASE+(naca->physicalMemorySize), 				   mode_rw, mask, 0);	}#if 0	/* Can't really do the call below since it calls the normal RTAS	 * entry point and we're still relocate off at the moment.	 * Temporarily diabling until it can call through the relocate off	 * RTAS entry point.  -Peter	 */	ppc64_boot_msg(0x06, "htab done");#endif}#undef KB#undef MB/* * Create a pte. Used during initialization only. * We assume the PTE will fit in the primary PTEG. */void make_pte(HPTE *htab, unsigned long va, unsigned long pa,	      int mode, unsigned long hash_mask, int large){	HPTE *hptep, local_hpte, rhpte;	unsigned long hash, vpn, flags, lpar_rc;	unsigned long i, dummy1, dummy2;	long slot;	if (large)		vpn = va >> LARGE_PAGE_SHIFT;	else		vpn = va >> PAGE_SHIFT;	hash = hpt_hash(vpn, large);	local_hpte.dw1.dword1 = pa | mode;	local_hpte.dw0.dword0 = 0;	local_hpte.dw0.dw0.avpn = va >> 23;	local_hpte.dw0.dw0.bolted = 1;		/* bolted */	if (large) {		local_hpte.dw0.dw0.l = 1;	/* large page */		local_hpte.dw0.dw0.avpn &= ~0x1UL;	}	local_hpte.dw0.dw0.v = 1;	if (naca->platform == PLATFORM_PSERIES) {		hptep  = htab + ((hash & hash_mask)*HPTES_PER_GROUP);		for (i = 0; i < 8; ++i, ++hptep) {			if (hptep->dw0.dw0.v == 0) {		/* !valid */				*hptep = local_hpte;				return;			}		}	} else if (naca->platform == PLATFORM_PSERIES_LPAR) {		slot = ((hash & hash_mask)*HPTES_PER_GROUP);				/* Set CEC cookie to 0                   */		/* Zero page = 0                         */		/* I-cache Invalidate = 0                */		/* I-cache synchronize = 0               */		/* Exact = 0 - modify any entry in group */		flags = 0;				lpar_rc =  plpar_pte_enter(flags, slot, local_hpte.dw0.dword0,					   local_hpte.dw1.dword1, 					   &dummy1, &dummy2);		if (lpar_rc != H_Success) {			ppc64_terminate_msg(0x21, "hpte enter");			loop_forever();		}		return;	} else if (naca->platform == PLATFORM_ISERIES_LPAR) {		slot = HvCallHpt_findValid(&rhpte, vpn);		if (slot < 0) {			/* Must find space in primary group */			panic("hash_page: hpte already exists\n");		}		HvCallHpt_addValidate(slot, 0, (HPTE *)&local_hpte );		return;	}	/* We should _never_ get here and too early to call xmon. */	ppc64_terminate_msg(0x22, "hpte platform");	loop_forever();}/* * find_linux_pte returns the address of a linux pte for a given  * effective address and directory.  If not found, it returns zero. */pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea){	pgd_t *pg;	pmd_t *pm;	pte_t *pt = NULL;	pte_t pte;	pg = pgdir + pgd_index(ea);	if (!pgd_none(*pg)) {		pm = pmd_offset(pg, ea);		if (!pmd_none(*pm)) { 			pt = pte_offset(pm, ea);			pte = *pt;			if (!pte_present(pte))				pt = NULL;		}	}	return pt;}static inline unsigned long computeHptePP(unsigned long pte){	return (pte & _PAGE_USER) |		(((pte & _PAGE_USER) >> 1) &		 ((~((pte >> 2) &	/* _PAGE_RW */		     (pte >> 7))) &	/* _PAGE_DIRTY */		  1));}/* * Handle a fault by adding an HPTE. If the address can't be determined * to be valid via Linux page tables, return 1. If handled return 0 */int __hash_page(unsigned long ea, unsigned long access, 		unsigned long vsid, pte_t *ptep){	unsigned long va, vpn;	unsigned long newpp, prpn;	unsigned long hpteflags;	long slot;	pte_t old_pte, new_pte;	/* Search the Linux page table for a match with va */	va = (vsid << 28) | (ea & 0x0fffffff);	vpn = va >> PAGE_SHIFT;	/* Acquire the hash table lock to guarantee that the linux	 * pte we fetch will not change	 */	spin_lock( &hash_table_lock );		/* 	 * Check the user's access rights to the page.  If access should be	 * prevented then send the problem up to do_page_fault.	 */	access |= _PAGE_PRESENT;	if (unlikely(access & ~(pte_val(*ptep)))) {		spin_unlock( &hash_table_lock );		return 1;	}	/* 	 * We have found a pte (which was present).	 * The spinlocks prevent this status from changing	 * The hash_table_lock prevents the _PAGE_HASHPTE status	 * from changing (RPN, DIRTY and ACCESSED too)	 * The page_table_lock prevents the pte from being 	 * invalidated or modified	 */	/*	 * At this point, we have a pte (old_pte) which can be used to build	 * or update an HPTE. There are 2 cases:	 *	 * 1. There is a valid (present) pte with no associated HPTE (this is 	 *	the most common case)	 * 2. There is a valid (present) pte with an associated HPTE. The	 *	current values of the pp bits in the HPTE prevent access	 *	because we are doing software DIRTY bit management and the	 *	page is currently not DIRTY. 	 */	old_pte = *ptep;	new_pte = old_pte;	/* If the attempted access was a store */	if (access & _PAGE_RW)		pte_val(new_pte) |= _PAGE_ACCESSED | _PAGE_DIRTY;	else		pte_val(new_pte) |= _PAGE_ACCESSED;	newpp = computeHptePP(pte_val(new_pte));		/* Check if pte already has an hpte (case 2) */	if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) {		/* There MIGHT be an HPTE for this pte */		unsigned long hash, slot, secondary;		/* XXX fix large pte flag */		hash = hpt_hash(vpn, 0);		secondary = (pte_val(old_pte) & _PAGE_SECONDARY) >> 15;		if (secondary)			hash = ~hash;		slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;		slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12;		/* XXX fix large pte flag */		if (ppc_md.hpte_updatepp(slot, secondary, 					 newpp, va, 0) == -1) {			pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;		} else {			if (!pte_same(old_pte, new_pte)) {				*ptep = new_pte;			}		}	}	if (likely(!(pte_val(old_pte) & _PAGE_HASHPTE))) {		/* Update the linux pte with the HPTE slot */		pte_val(new_pte) &= ~_PAGE_HPTEFLAGS;		pte_val(new_pte) |= _PAGE_HASHPTE;		prpn = pte_val(old_pte) >> PTE_SHIFT;		/* copy appropriate flags from linux pte */		hpteflags = (pte_val(new_pte) & 0x1f8) | newpp;		slot = ppc_md.hpte_insert(vpn, prpn, hpteflags, 0, 0);		pte_val(new_pte) |= ((slot<<12) & 				     (_PAGE_GROUP_IX | _PAGE_SECONDARY));		*ptep = new_pte;	}	spin_unlock(&hash_table_lock);	return 0;}/* * Handle a fault by adding an HPTE. If the address can't be determined * to be valid via Linux page tables, return 1. If handled return 0 */int hash_page(unsigned long ea, unsigned long access){	void *pgdir;	unsigned long vsid;	struct mm_struct *mm;	pte_t *ptep;	int ret;	/* Check for invalid addresses. */	if (!IS_VALID_EA(ea)) return 1; 	switch (REGION_ID(ea)) {	case USER_REGION_ID:		mm = current->mm;		if (mm == NULL) return 1;		vsid = get_vsid(mm->context, ea);		break;	case IO_REGION_ID:		mm = &ioremap_mm;		vsid = get_kernel_vsid(ea);		break;	case VMALLOC_REGION_ID:		mm = &init_mm;		vsid = get_kernel_vsid(ea);		break;	case IO_UNMAPPED_REGION_ID:		udbg_printf("EEH Error ea = 0x%lx\n", ea);		PPCDBG_ENTER_DEBUGGER();		panic("EEH Error ea = 0x%lx\n", ea);		break;	case KERNEL_REGION_ID:		/*		 * As htab_initialize is now, we shouldn't ever get here since		 * we're bolting the entire 0xC0... region.		 */		udbg_printf("Little faulted on kernel address 0x%lx\n", ea);		PPCDBG_ENTER_DEBUGGER();		panic("Little faulted on kernel address 0x%lx\n", ea);		break;	default:		/* Not a valid range, send the problem up to do_page_fault */		return 1;		break;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -