⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 3 页
字号:
			 */			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"					     "flush	%%g6"					     : /* No outputs */					     : "r" (0),					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));			tag = spitfire_get_dtlb_tag(i);			if (((tag & ~(PAGE_MASK)) == 0) &&			    ((tag &  (PAGE_MASK)) >= prom_reserved_base)) {				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"						     : /* no outputs */						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));				spitfire_put_dtlb_data(i, 0x0UL);			}		}	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {		for (i = 0; i < 512; i++) {			unsigned long tag = cheetah_get_dtlb_tag(i, 2);			if ((tag & ~PAGE_MASK) == 0 &&			    (tag & PAGE_MASK) >= prom_reserved_base) {				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"						     : /* no outputs */						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));				cheetah_put_dtlb_data(i, 0x0UL, 2);			}			if (tlb_type != cheetah_plus)				continue;			tag = cheetah_get_dtlb_tag(i, 3);			if ((tag & ~PAGE_MASK) == 0 &&			    (tag & PAGE_MASK) >= prom_reserved_base) {				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"						     : /* no outputs */						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));				cheetah_put_dtlb_data(i, 0x0UL, 3);			}		}	} else {		/* Implement me :-) */		BUG();	}}static int prom_ditlb_set;struct prom_tlb_entry {	int		tlb_ent;	unsigned long	tlb_tag;	unsigned long	tlb_data;};struct prom_tlb_entry prom_itlb[16], prom_dtlb[16];void prom_world(int enter){	unsigned long pstate;	int i;	if (!enter)		set_fs((mm_segment_t) { get_thread_current_ds() });	if (!prom_ditlb_set)		return;	/* Make sure the following runs atomically. */	__asm__ __volatile__("flushw\n\t"			     "rdpr	%%pstate, %0\n\t"			     "wrpr	%0, %1, %%pstate"			     : "=r" (pstate)			     : "i" (PSTATE_IE));	if (enter) {		/* Kick out nucleus VPTEs. */		__flush_nucleus_vptes();		/* Install PROM world. */		for (i = 0; i < 16; i++) {			if (prom_dtlb[i].tlb_ent != -1) {				__asm__ __volatile__("stxa %0, [%1] %2\n\t"						     "membar #Sync"					: : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),					"i" (ASI_DMMU));				if (tlb_type == spitfire)					spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,							       prom_dtlb[i].tlb_data);				else if (tlb_type == cheetah || tlb_type == cheetah_plus)					cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,							       prom_dtlb[i].tlb_data);			}			if (prom_itlb[i].tlb_ent != -1) {				__asm__ __volatile__("stxa %0, [%1] %2\n\t"						     "membar #Sync"						     : : "r" (prom_itlb[i].tlb_tag),						     "r" (TLB_TAG_ACCESS),						     "i" (ASI_IMMU));				if (tlb_type == spitfire)					spitfire_put_itlb_data(prom_itlb[i].tlb_ent,							       prom_itlb[i].tlb_data);				else if (tlb_type == cheetah || tlb_type == cheetah_plus)					cheetah_put_litlb_data(prom_itlb[i].tlb_ent,							       prom_itlb[i].tlb_data);			}		}	} else {		for (i = 0; i < 16; i++) {			if (prom_dtlb[i].tlb_ent != -1) {				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"					: : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));				if (tlb_type == spitfire)					spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);				else					cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);			}			if (prom_itlb[i].tlb_ent != -1) {				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"						     : : "r" (TLB_TAG_ACCESS),						     "i" (ASI_IMMU));				if (tlb_type == spitfire)					spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);				else					cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL);			}		}	}	__asm__ __volatile__("wrpr	%0, 0, %%pstate"			     : : "r" (pstate));}void inherit_locked_prom_mappings(int save_p){	int i;	int dtlb_seen = 0;	int itlb_seen = 0;	/* Fucking losing PROM has more mappings in the TLB, but	 * it (conveniently) fails to mention any of these in the	 * translations property.  The only ones that matter are	 * the locked PROM tlb entries, so we impose the following	 * irrecovable rule on the PROM, it is allowed 8 locked	 * entries in the ITLB and 8 in the DTLB.	 *	 * Supposedly the upper 16GB of the address space is	 * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED	 * SOMEWHERE!!!!!!!!!!!!!!!!!  Furthermore the entire interface	 * used between the client program and the firmware on sun5	 * systems to coordinate mmu mappings is also COMPLETELY	 * UNDOCUMENTED!!!!!! Thanks S(t)un!	 */	if (save_p) {		for (i = 0; i < 16; i++) {			prom_itlb[i].tlb_ent = -1;			prom_dtlb[i].tlb_ent = -1;		}	}	if (tlb_type == spitfire) {		int high = sparc64_highest_unlocked_tlb_ent;		for (i = 0; i <= high; i++) {			unsigned long data;			/* Spitfire Errata #32 workaround */			/* NOTE: Always runs on spitfire, so no cheetah+			 *       page size encodings.			 */			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"					     "flush	%%g6"					     : /* No outputs */					     : "r" (0),					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));			data = spitfire_get_dtlb_data(i);			if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {				unsigned long tag;				/* Spitfire Errata #32 workaround */				/* NOTE: Always runs on spitfire, so no				 *       cheetah+ page size encodings.				 */				__asm__ __volatile__("stxa	%0, [%1] %2\n\t"						     "flush	%%g6"						     : /* No outputs */						     : "r" (0),						     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));				tag = spitfire_get_dtlb_tag(i);				if (save_p) {					prom_dtlb[dtlb_seen].tlb_ent = i;					prom_dtlb[dtlb_seen].tlb_tag = tag;					prom_dtlb[dtlb_seen].tlb_data = data;				}				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"						     : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));				spitfire_put_dtlb_data(i, 0x0UL);				dtlb_seen++;				if (dtlb_seen > 15)					break;			}		}		for (i = 0; i < high; i++) {			unsigned long data;			/* Spitfire Errata #32 workaround */			/* NOTE: Always runs on spitfire, so no			 *       cheetah+ page size encodings.			 */			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"					     "flush	%%g6"					     : /* No outputs */					     : "r" (0),					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));			data = spitfire_get_itlb_data(i);			if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {				unsigned long tag;				/* Spitfire Errata #32 workaround */				/* NOTE: Always runs on spitfire, so no				 *       cheetah+ page size encodings.				 */				__asm__ __volatile__("stxa	%0, [%1] %2\n\t"						     "flush	%%g6"						     : /* No outputs */						     : "r" (0),						     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));				tag = spitfire_get_itlb_tag(i);				if (save_p) {					prom_itlb[itlb_seen].tlb_ent = i;					prom_itlb[itlb_seen].tlb_tag = tag;					prom_itlb[itlb_seen].tlb_data = data;				}				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"						     : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));				spitfire_put_itlb_data(i, 0x0UL);				itlb_seen++;				if (itlb_seen > 15)					break;			}		}	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {		int high = sparc64_highest_unlocked_tlb_ent;		for (i = 0; i <= high; i++) {			unsigned long data;			data = cheetah_get_ldtlb_data(i);			if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {				unsigned long tag;				tag = cheetah_get_ldtlb_tag(i);				if (save_p) {					prom_dtlb[dtlb_seen].tlb_ent = i;					prom_dtlb[dtlb_seen].tlb_tag = tag;					prom_dtlb[dtlb_seen].tlb_data = data;				}				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"						     : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));				cheetah_put_ldtlb_data(i, 0x0UL);				dtlb_seen++;				if (dtlb_seen > 15)					break;			}		}		for (i = 0; i < high; i++) {			unsigned long data;			data = cheetah_get_litlb_data(i);			if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {				unsigned long tag;				tag = cheetah_get_litlb_tag(i);				if (save_p) {					prom_itlb[itlb_seen].tlb_ent = i;					prom_itlb[itlb_seen].tlb_tag = tag;					prom_itlb[itlb_seen].tlb_data = data;				}				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"						     : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));				cheetah_put_litlb_data(i, 0x0UL);				itlb_seen++;				if (itlb_seen > 15)					break;			}		}	} else {		/* Implement me :-) */		BUG();	}	if (save_p)		prom_ditlb_set = 1;}/* Give PROM back his world, done during reboots... */void prom_reload_locked(void){	int i;	for (i = 0; i < 16; i++) {		if (prom_dtlb[i].tlb_ent != -1) {			__asm__ __volatile__("stxa %0, [%1] %2\n\t"					     "membar #Sync"				: : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),				"i" (ASI_DMMU));			if (tlb_type == spitfire)				spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,						       prom_dtlb[i].tlb_data);			else if (tlb_type == cheetah || tlb_type == cheetah_plus)				cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,						      prom_dtlb[i].tlb_data);		}		if (prom_itlb[i].tlb_ent != -1) {			__asm__ __volatile__("stxa %0, [%1] %2\n\t"					     "membar #Sync"					     : : "r" (prom_itlb[i].tlb_tag),					     "r" (TLB_TAG_ACCESS),					     "i" (ASI_IMMU));			if (tlb_type == spitfire)				spitfire_put_itlb_data(prom_itlb[i].tlb_ent,						       prom_itlb[i].tlb_data);			else				cheetah_put_litlb_data(prom_itlb[i].tlb_ent,						       prom_itlb[i].tlb_data);		}	}}#ifdef DCACHE_ALIASING_POSSIBLEvoid __flush_dcache_range(unsigned long start, unsigned long end){	unsigned long va;	if (tlb_type == spitfire) {		int n = 0;		for (va = start; va < end; va += 32) {			spitfire_put_dcache_tag(va & 0x3fe0, 0x0);			if (++n >= 512)				break;		}	} else {		start = __pa(start);		end = __pa(end);		for (va = start; va < end; va += 32)			__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"					     "membar #Sync"					     : /* no outputs */					     : "r" (va),					       "i" (ASI_DCACHE_INVALIDATE));	}}#endif /* DCACHE_ALIASING_POSSIBLE *//* If not locked, zap it. */void __flush_tlb_all(void){	unsigned long pstate;	int i;	__asm__ __volatile__("flushw\n\t"			     "rdpr	%%pstate, %0\n\t"			     "wrpr	%0, %1, %%pstate"			     : "=r" (pstate)			     : "i" (PSTATE_IE));	if (tlb_type == spitfire) {		for (i = 0; i < 64; i++) {			/* Spitfire Errata #32 workaround */			/* NOTE: Always runs on spitfire, so no			 *       cheetah+ page size encodings.			 */			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"					     "flush	%%g6"					     : /* No outputs */					     : "r" (0),					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));			if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"						     : /* no outputs */						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));				spitfire_put_dtlb_data(i, 0x0UL);			}			/* Spitfire Errata #32 workaround */			/* NOTE: Always runs on spitfire, so no			 *       cheetah+ page size encodings.			 */			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"					     "flush	%%g6"					     : /* No outputs */					     : "r" (0),					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));			if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"						     : /* no outputs */						     : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));				spitfire_put_itlb_data(i, 0x0UL);			}		}	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {		cheetah_flush_dtlb_all();		cheetah_flush_itlb_all();	}	__asm__ __volatile__("wrpr	%0, 0, %%pstate"			     : : "r" (pstate));}/* Caller does TLB context flushing on local CPU if necessary. * The caller also ensures that CTX_VALID(mm->context) is false. * * We must be careful about boundary cases so that we never * let the user have CTX 0 (nucleus) or we ever use a CTX * version of zero (and thus NO_CONTEXT would not be caught * by version mis-match tests in mmu_context.h). */void get_new_mmu_context(struct mm_struct *mm){	unsigned long ctx, new_ctx;	unsigned long orig_pgsz_bits;		spin_lock(&ctx_alloc_lock);	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);	ctx = (tlb_context_cache + 1) & CTX_NR_MASK;	new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);	if (new_ctx >= (1 << CTX_NR_BITS)) {		new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);		if (new_ctx >= ctx) {			int i;			new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +				CTX_FIRST_VERSION;			if (new_ctx == 1)				new_ctx = CTX_FIRST_VERSION;			/* Don't call memset, for 16 entries that's just			 * plain silly...			 */			mmu_context_bmap[0] = 3;			mmu_context_bmap[1] = 0;			mmu_context_bmap[2] = 0;			mmu_context_bmap[3] = 0;			for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {				mmu_context_bmap[i + 0] = 0;				mmu_context_bmap[i + 1] = 0;				mmu_context_bmap[i + 2] = 0;				mmu_context_bmap[i + 3] = 0;			}			goto out;		}	}	mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));	new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);out:	tlb_context_cache = new_ctx;	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;	spin_unlock(&ctx_alloc_lock);}#ifndef CONFIG_SMPstruct pgtable_cache_struct pgt_quicklists;#endif/* OK, we have to color these pages. The page tables are accessed * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S * code, as well as by PAGE_OFFSET range direct-mapped addresses by  * other parts of the kernel. By coloring, we make sure that the tlbmiss  * fast handlers do not get data from old/garbage dcache lines that  * correspond to an old/stale virtual address (user/kernel) that  * previously mapped the pagetable page while accessing vpte range  * addresses. The idea is that if the vpte color and PAGE_OFFSET range  * color is the same, then when the kernel initializes the pagetable  * using the later address range, accesses with the first address * range will see the newly initialized data rather than the garbage. */#ifdef DCACHE_ALIASING_POSSIBLE#define DC_ALIAS_SHIFT	1#else#define DC_ALIAS_SHIFT	0#endifpte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address){	struct page *page;	unsigned long color;	{		pte_t *ptep = pte_alloc_one_fast(mm, address);		if (ptep)			return ptep;	}	color = VPTE_COLOR(address);	page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT);	if (page) {		unsigned long *to_free;		unsigned long paddr;		pte_t *pte;#ifdef DCACHE_ALIASING_POSSIBLE		set_page_count(page, 1);		ClearPageCompound(page);		set_page_count((page + 1), 1);		ClearPageCompound(page + 1);#endif		paddr = (unsigned long) page_address(page);		memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));		if (!color) {			pte = (pte_t *) paddr;			to_free = (unsigned long *) (paddr + PAGE_SIZE);		} else {			pte = (pte_t *) (paddr + PAGE_SIZE);			to_free = (unsigned long *) paddr;		}#ifdef DCACHE_ALIASING_POSSIBLE		/* Now free the other one up, adjust cache size. */		preempt_disable();		*to_free = (unsigned long) pte_quicklist[color ^ 0x1];		pte_quicklist[color ^ 0x1] = to_free;		pgtable_cache_size++;		preempt_enable();#endif		return pte;	}	return NULL;}void sparc_ultra_dump_itlb(void){        int slot;	if (tlb_type == spitfire) {		printk ("Contents of itlb: ");		for (slot = 0; slot < 14; slot++) printk ("    ");		printk ("%2x:%016lx,%016lx\n",			0,			spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));		for (slot = 1; slot < 64; slot+=3) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -