⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 C
📖 第 1 页 / 共 4 页
字号:
			}		}	} else {		/* Implement me :-) */		BUG();	}	if (save_p)		prom_ditlb_set = 1;}/* Give PROM back his world, done during reboots... */void prom_reload_locked(void){	int i;	for (i = 0; i < 16; i++) {		if (prom_dtlb[i].tlb_ent != -1) {			__asm__ __volatile__("stxa %0, [%1] %2\n\t"					     "membar #Sync"				: : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),				"i" (ASI_DMMU));			if (tlb_type == spitfire)				spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,						       prom_dtlb[i].tlb_data);			else if (tlb_type == cheetah)				cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,						      prom_dtlb[i].tlb_data);		}		if (prom_itlb[i].tlb_ent != -1) {			__asm__ __volatile__("stxa %0, [%1] %2\n\t"					     "membar #Sync"					     : : "r" (prom_itlb[i].tlb_tag),					     "r" (TLB_TAG_ACCESS),					     "i" (ASI_IMMU));			if (tlb_type == spitfire)				spitfire_put_itlb_data(prom_itlb[i].tlb_ent,						       prom_itlb[i].tlb_data);			else				cheetah_put_litlb_data(prom_itlb[i].tlb_ent,						       prom_itlb[i].tlb_data);		}	}}void __flush_dcache_range(unsigned long start, unsigned long end){	unsigned long va;	if (tlb_type == spitfire) {		int n = 0;		for (va = start; va < end; va += 32) {			spitfire_put_dcache_tag(va & 0x3fe0, 0x0);			if (++n >= 512)				break;		}	} else {		start = __pa(start);		end = __pa(end);		for (va = start; va < end; va += 32)			__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"					     "membar #Sync"					     : /* no outputs */					     : "r" (va),					       "i" (ASI_DCACHE_INVALIDATE));	}}void __flush_cache_all(void){	/* Cheetah should be fine here too. */	if (tlb_type == spitfire) {		unsigned long va;		flushw_all();		for (va =  0; va < (PAGE_SIZE << 1); va += 32)			spitfire_put_icache_tag(va, 0x0);		__asm__ __volatile__("flush %g6");	}}/* If not locked, zap it. */void __flush_tlb_all(void){	unsigned long pstate;	int i;	__asm__ __volatile__("flushw\n\t"			     "rdpr	%%pstate, %0\n\t"			     "wrpr	%0, %1, %%pstate"			     : "=r" (pstate)			     : "i" (PSTATE_IE));	if (tlb_type == spitfire) {		for (i = 0; i < 64; i++) {			/* Spitfire Errata #32 workaround */			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"					     "flush	%%g6"					     : /* No outputs */					     : "r" (0),					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));			if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"						     : /* no outputs */						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));				spitfire_put_dtlb_data(i, 0x0UL);			}			/* Spitfire Errata #32 workaround */			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"					     "flush	%%g6"					     : /* No outputs */					     : "r" (0),					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));			if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"						     "membar #Sync"						     : /* no outputs */						     : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));				spitfire_put_itlb_data(i, 0x0UL);			}		}	} else if (tlb_type == cheetah) {		cheetah_flush_dtlb_all();		cheetah_flush_itlb_all();	}	__asm__ __volatile__("wrpr	%0, 0, %%pstate"			     : : "r" (pstate));}/* Caller does TLB context flushing on local CPU if necessary. * The caller also ensures that CTX_VALID(mm->context) is false. * * We must be careful about boundary cases so that we never * let the user have CTX 0 (nucleus) or we ever use a CTX * version of zero (and thus NO_CONTEXT would not be caught * by version mis-match tests in mmu_context.h). */void get_new_mmu_context(struct mm_struct *mm){	unsigned long ctx, new_ctx;		spin_lock(&ctx_alloc_lock);	ctx = CTX_HWBITS(tlb_context_cache + 1);	new_ctx = find_next_zero_bit(mmu_context_bmap, 1UL << CTX_VERSION_SHIFT, ctx);	if (new_ctx >= (1UL << CTX_VERSION_SHIFT)) {		new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);		if (new_ctx >= ctx) {			int i;			new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +				CTX_FIRST_VERSION;			if (new_ctx == 1)				new_ctx = CTX_FIRST_VERSION;			/* Don't call memset, for 16 entries that's just			 * plain silly...			 */			mmu_context_bmap[0] = 3;			mmu_context_bmap[1] = 0;			mmu_context_bmap[2] = 0;			mmu_context_bmap[3] = 0;			for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {				mmu_context_bmap[i + 0] = 0;				mmu_context_bmap[i + 1] = 0;				mmu_context_bmap[i + 2] = 0;				mmu_context_bmap[i + 3] = 0;			}			goto out;		}	}	mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));	new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);out:	tlb_context_cache = new_ctx;	spin_unlock(&ctx_alloc_lock);	mm->context = new_ctx;}#ifndef CONFIG_SMPstruct pgtable_cache_struct pgt_quicklists;#endif/* OK, we have to color these pages. The page tables are accessed * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S * code, as well as by PAGE_OFFSET range direct-mapped addresses by  * other parts of the kernel. By coloring, we make sure that the tlbmiss  * fast handlers do not get data from old/garbage dcache lines that  * correspond to an old/stale virtual address (user/kernel) that  * previously mapped the pagetable page while accessing vpte range  * addresses. The idea is that if the vpte color and PAGE_OFFSET range  * color is the same, then when the kernel initializes the pagetable  * using the later address range, accesses with the first address * range will see the newly initialized data rather than the garbage. */#if (L1DCACHE_SIZE > PAGE_SIZE)			/* is there D$ aliasing problem */#define DC_ALIAS_SHIFT	1#else#define DC_ALIAS_SHIFT	0#endifpte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address){	struct page *page = alloc_pages(GFP_KERNEL, DC_ALIAS_SHIFT);	unsigned long color = VPTE_COLOR(address);	if (page) {		unsigned long *to_free;		unsigned long paddr;		pte_t *pte;#if (L1DCACHE_SIZE > PAGE_SIZE)			/* is there D$ aliasing problem */		set_page_count((page + 1), 1);#endif		paddr = (unsigned long) page_address(page);		memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));		if (!color) {			pte = (pte_t *) paddr;			to_free = (unsigned long *) (paddr + PAGE_SIZE);		} else {			pte = (pte_t *) (paddr + PAGE_SIZE);			to_free = (unsigned long *) paddr;		}#if (L1DCACHE_SIZE > PAGE_SIZE)			/* is there D$ aliasing problem */		/* Now free the other one up, adjust cache size. */		*to_free = (unsigned long) pte_quicklist[color ^ 0x1];		pte_quicklist[color ^ 0x1] = to_free;		pgtable_cache_size++;#endif		return pte;	}	return NULL;}void sparc_ultra_dump_itlb(void){        int slot;	if (tlb_type == spitfire) {		printk ("Contents of itlb: ");		for (slot = 0; slot < 14; slot++) printk ("    ");		printk ("%2x:%016lx,%016lx\n",			0,			spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));		for (slot = 1; slot < 64; slot+=3) {			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 				slot,				spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),				slot+1,				spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),				slot+2,				spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));		}	} else if (tlb_type == cheetah) {		printk ("Contents of itlb0:\n");		for (slot = 0; slot < 16; slot+=2) {			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",				slot,				cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),				slot+1,				cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));		}		printk ("Contents of itlb2:\n");		for (slot = 0; slot < 128; slot+=2) {			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",				slot,				cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),				slot+1,				cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));		}	}}void sparc_ultra_dump_dtlb(void){        int slot;	if (tlb_type == spitfire) {		printk ("Contents of dtlb: ");		for (slot = 0; slot < 14; slot++) printk ("    ");		printk ("%2x:%016lx,%016lx\n", 0,			spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));		for (slot = 1; slot < 64; slot+=3) {			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 				slot,				spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),				slot+1,				spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),				slot+2,				spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));		}	} else if (tlb_type == cheetah) {		printk ("Contents of dtlb0:\n");		for (slot = 0; slot < 16; slot+=2) {			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",				slot,				cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),				slot+1,				cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));		}		printk ("Contents of dtlb2:\n");		for (slot = 0; slot < 512; slot+=2) {			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",				slot,				cheetah_get_dtlb_tag(slot), cheetah_get_dtlb_data(slot),				slot+1,				cheetah_get_dtlb_tag(slot+1), cheetah_get_dtlb_data(slot+1));		}	}}extern unsigned long cmdline_memory_size;unsigned long __init bootmem_init(unsigned long *pages_avail){	unsigned long bootmap_size, start_pfn, end_pfn;	unsigned long end_of_phys_memory = 0UL;	unsigned long bootmap_pfn, bytes_avail, size;	int i;	bytes_avail = 0UL;	for (i = 0; sp_banks[i].num_bytes != 0; i++) {		end_of_phys_memory = sp_banks[i].base_addr +			sp_banks[i].num_bytes;		bytes_avail += sp_banks[i].num_bytes;		if (cmdline_memory_size) {			if (bytes_avail > cmdline_memory_size) {				unsigned long slack = bytes_avail - cmdline_memory_size;				bytes_avail -= slack;				end_of_phys_memory -= slack;				sp_banks[i].num_bytes -= slack;				if (sp_banks[i].num_bytes == 0) {					sp_banks[i].base_addr = 0xdeadbeef;				} else {					sp_banks[i+1].num_bytes = 0;					sp_banks[i+1].base_addr = 0xdeadbeef;				}				break;			}		}	}	*pages_avail = bytes_avail >> PAGE_SHIFT;	/* Start with page aligned address of last symbol in kernel	 * image.  The kernel is hard mapped below PAGE_OFFSET in a	 * 4MB locked TLB translation.	 */	start_pfn  = PAGE_ALIGN((unsigned long) &_end) -		((unsigned long) KERNBASE);	/* Adjust up to the physical address where the kernel begins. */	start_pfn += phys_base;	/* Now shift down to get the real physical page frame number. */	start_pfn >>= PAGE_SHIFT;		bootmap_pfn = start_pfn;	end_pfn = end_of_phys_memory >> PAGE_SHIFT;#ifdef CONFIG_BLK_DEV_INITRD	/* Now have to check initial ramdisk, so that bootmap does not overwrite it */	if (sparc_ramdisk_image) {		if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)			sparc_ramdisk_image -= KERNBASE;		initrd_start = sparc_ramdisk_image + phys_base;		initrd_end = initrd_start + sparc_ramdisk_size;		if (initrd_end > end_of_phys_memory) {			printk(KERN_CRIT "initrd extends beyond end of memory "		                 	 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",			       initrd_end, end_of_phys_memory);			initrd_start = 0;		}		if (initrd_start) {			if (initrd_start >= (start_pfn << PAGE_SHIFT) &&			    initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)				bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;		}	}#endif		/* Initialize the boot-time allocator. */	bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, phys_base>>PAGE_SHIFT, end_pfn);	/* Now register the available physical memory with the	 * allocator.	 */	for (i = 0; sp_banks[i].num_bytes != 0; i++)		free_bootmem(sp_banks[i].base_addr,			     sp_banks[i].num_bytes);#ifdef CONFIG_BLK_DEV_INITRD	if (initrd_start) {		size = initrd_end - initrd_start;		/* Resert the initrd image area. */		reserve_bootmem(initrd_start, size);		*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;		initrd_start += PAGE_OFFSET;		initrd_end += PAGE_OFFSET;	}#endif	/* Reserve the kernel text/data/bss. */	size = (start_pfn << PAGE_SHIFT) - phys_base;	reserve_bootmem(phys_base, size);	*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;	/* Reserve the bootmem map.   We do not account for it	 * in pages_avail because we will release that memory	 * in free_all_bootmem.	 */	size = bootmap_size;	reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);	*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;	return end_pfn;}/* paging_init() sets up the page tables */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -