init.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,842 行 · 第 1/4 页

C
1,842
字号
static void __init read_obp_translations(void){	int n, node, ents, first, last, i;	node = prom_finddevice("/virtual-memory");	n = prom_getproplen(node, "translations");	if (unlikely(n == 0 || n == -1)) {		prom_printf("prom_mappings: Couldn't get size.\n");		prom_halt();	}	if (unlikely(n > sizeof(prom_trans))) {		prom_printf("prom_mappings: Size %Zd is too big.\n", n);		prom_halt();	}	if ((n = prom_getproperty(node, "translations",				  (char *)&prom_trans[0],				  sizeof(prom_trans))) == -1) {		prom_printf("prom_mappings: Couldn't get property.\n");		prom_halt();	}	n = n / sizeof(struct linux_prom_translation);	ents = n;	sort(prom_trans, ents, sizeof(struct linux_prom_translation),	     cmp_ptrans, NULL);	/* Now kick out all the non-OBP entries.  */	for (i = 0; i < ents; i++) {		if (in_obp_range(prom_trans[i].virt))			break;	}	first = i;	for (; i < ents; i++) {		if (!in_obp_range(prom_trans[i].virt))			break;	}	last = i;	for (i = 0; i < (last - first); i++) {		struct linux_prom_translation *src = &prom_trans[i + first];		struct linux_prom_translation *dest = &prom_trans[i];		*dest = *src;	}	for (; i < ents; i++) {		struct linux_prom_translation *dest = &prom_trans[i];		dest->virt = dest->size = dest->data = 0x0UL;	}	prom_trans_ents = last - first;	if (tlb_type == spitfire) {		/* Clear diag TTE bits. */		for (i = 0; i < prom_trans_ents; i++)			prom_trans[i].data &= ~0x0003fe0000000000UL;	}}static void __init hypervisor_tlb_lock(unsigned long vaddr,				       unsigned long pte,				       unsigned long mmu){	register unsigned long func asm("%o5");	register unsigned long arg0 asm("%o0");	register unsigned long arg1 asm("%o1");	register unsigned long arg2 asm("%o2");	register unsigned long arg3 asm("%o3");	func = HV_FAST_MMU_MAP_PERM_ADDR;	arg0 = vaddr;	arg1 = 0;	arg2 = pte;	arg3 = mmu;	__asm__ __volatile__("ta	0x80"			     : "=&r" (func), "=&r" (arg0),			       "=&r" (arg1), "=&r" (arg2),			       "=&r" (arg3)			     : "0" (func), "1" (arg0), "2" (arg1),			       "3" (arg2), "4" (arg3));	if (arg0 != 0) {		prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "			    "errors with %lx\n", vaddr, 0, pte, mmu, arg0);		prom_halt();	}}static unsigned long kern_large_tte(unsigned long paddr);static void __init remap_kernel(void){	unsigned long phys_page, tte_vaddr, tte_data;	int tlb_ent = sparc64_highest_locked_tlbent();	tte_vaddr = (unsigned long) KERNBASE;	phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;	tte_data = kern_large_tte(phys_page);	kern_locked_tte_data = tte_data;	/* Now lock us into the TLBs via Hypervisor or OBP. */	if (tlb_type == hypervisor) {		hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);		hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);		if (bigkernel) {			tte_vaddr += 0x400000;			tte_data += 0x400000;			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);		}	} else {		prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);		prom_itlb_load(tlb_ent, tte_data, tte_vaddr);		if (bigkernel) {			tlb_ent -= 1;			prom_dtlb_load(tlb_ent,				       tte_data + 0x400000, 				       tte_vaddr + 0x400000);			prom_itlb_load(tlb_ent,				       tte_data + 0x400000, 				       tte_vaddr + 0x400000);		}		sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;	}	if (tlb_type == cheetah_plus) {		sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |					    CTX_CHEETAH_PLUS_NUC);		sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;		sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;	}}static void __init inherit_prom_mappings(void){	read_obp_translations();	/* Now fixup OBP's idea about where we really are mapped. */	prom_printf("Remapping the kernel... ");	remap_kernel();	prom_printf("done.\n");}void prom_world(int enter){	if (!enter)		set_fs((mm_segment_t) { get_thread_current_ds() });	__asm__ __volatile__("flushw");}#ifdef DCACHE_ALIASING_POSSIBLEvoid __flush_dcache_range(unsigned long start, unsigned long end){	unsigned long va;	if (tlb_type == spitfire) {		int n = 0;		for (va = start; va < end; va += 32) {			spitfire_put_dcache_tag(va & 0x3fe0, 0x0);			if (++n >= 512)				break;		}	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {		start = __pa(start);		end = __pa(end);		for (va = start; va < end; va += 32)			__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"					     "membar #Sync"					     : /* no outputs */					     : "r" (va),					       "i" (ASI_DCACHE_INVALIDATE));	}}#endif /* DCACHE_ALIASING_POSSIBLE *//* Caller does TLB context flushing on local CPU if necessary. * The caller also ensures that CTX_VALID(mm->context) is false. * * We must be careful about boundary cases so that we never * let the user have CTX 0 (nucleus) or we ever use a CTX * version of zero (and thus NO_CONTEXT would not be caught * by version mis-match tests in mmu_context.h). * * Always invoked with interrupts disabled. */void get_new_mmu_context(struct mm_struct *mm){	unsigned long ctx, new_ctx;	unsigned long orig_pgsz_bits;	unsigned long flags;	int new_version;	spin_lock_irqsave(&ctx_alloc_lock, flags);	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);	ctx = (tlb_context_cache + 1) & CTX_NR_MASK;	new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);	new_version = 0;	if (new_ctx >= (1 << CTX_NR_BITS)) {		new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);		if (new_ctx >= ctx) {			int i;			new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +				CTX_FIRST_VERSION;			if (new_ctx == 1)				new_ctx = CTX_FIRST_VERSION;			/* Don't call memset, for 16 entries that's just			 * plain silly...			 */			mmu_context_bmap[0] = 3;			mmu_context_bmap[1] = 0;			mmu_context_bmap[2] = 0;			mmu_context_bmap[3] = 0;			for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {				mmu_context_bmap[i + 0] = 0;				mmu_context_bmap[i + 1] = 0;				mmu_context_bmap[i + 2] = 0;				mmu_context_bmap[i + 3] = 0;			}			new_version = 1;			goto out;		}	}	mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));	new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);out:	tlb_context_cache = new_ctx;	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;	spin_unlock_irqrestore(&ctx_alloc_lock, flags);	if (unlikely(new_version))		smp_new_mmu_context_version();}void sparc_ultra_dump_itlb(void){        int slot;	if (tlb_type == spitfire) {		printk ("Contents of itlb: ");		for (slot = 0; slot < 14; slot++) printk ("    ");		printk ("%2x:%016lx,%016lx\n",			0,			spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));		for (slot = 1; slot < 64; slot+=3) {			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 				slot,				spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),				slot+1,				spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),				slot+2,				spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));		}	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {		printk ("Contents of itlb0:\n");		for (slot = 0; slot < 16; slot+=2) {			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",				slot,				cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),				slot+1,				cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));		}		printk ("Contents of itlb2:\n");		for (slot = 0; slot < 128; slot+=2) {			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",				slot,				cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),				slot+1,				cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));		}	}}void sparc_ultra_dump_dtlb(void){        int slot;	if (tlb_type == spitfire) {		printk ("Contents of dtlb: ");		for (slot = 0; slot < 14; slot++) printk ("    ");		printk ("%2x:%016lx,%016lx\n", 0,			spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));		for (slot = 1; slot < 64; slot+=3) {			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 				slot,				spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),				slot+1,				spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),				slot+2,				spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));		}	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {		printk ("Contents of dtlb0:\n");		for (slot = 0; slot < 16; slot+=2) {			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",				slot,				cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),				slot+1,				cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));		}		printk ("Contents of dtlb2:\n");		for (slot = 0; slot < 512; slot+=2) {			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",				slot,				cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),				slot+1,				cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));		}		if (tlb_type == cheetah_plus) {			printk ("Contents of dtlb3:\n");			for (slot = 0; slot < 512; slot+=2) {				printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",					slot,					cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),					slot+1,					cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));			}		}	}}extern unsigned long cmdline_memory_size;/* Find a free area for the bootmem map, avoiding the kernel image * and the initial ramdisk. */static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,					       unsigned long end_pfn){	unsigned long avoid_start, avoid_end, bootmap_size;	int i;	bootmap_size = ((end_pfn - start_pfn) + 7) / 8;	bootmap_size = ALIGN(bootmap_size, sizeof(long));	avoid_start = avoid_end = 0;#ifdef CONFIG_BLK_DEV_INITRD	avoid_start = initrd_start;	avoid_end = PAGE_ALIGN(initrd_end);#endif#ifdef CONFIG_DEBUG_BOOTMEM	prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n",		    kern_base, PAGE_ALIGN(kern_base + kern_size),		    avoid_start, avoid_end);#endif	for (i = 0; i < pavail_ents; i++) {		unsigned long start, end;		start = pavail[i].phys_addr;		end = start + pavail[i].reg_size;		while (start < end) {			if (start >= kern_base &&			    start < PAGE_ALIGN(kern_base + kern_size)) {				start = PAGE_ALIGN(kern_base + kern_size);				continue;			}			if (start >= avoid_start && start < avoid_end) {				start = avoid_end;				continue;			}			if ((end - start) < bootmap_size)				break;			if (start < kern_base &&			    (start + bootmap_size) > kern_base) {				start = PAGE_ALIGN(kern_base + kern_size);				continue;			}			if (start < avoid_start &&			    (start + bootmap_size) > avoid_start) {				start = avoid_end;				continue;			}			/* OK, it doesn't overlap anything, use it.  */#ifdef CONFIG_DEBUG_BOOTMEM			prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n",				    start >> PAGE_SHIFT, start);#endif			return start >> PAGE_SHIFT;		}	}	prom_printf("Cannot find free area for bootmap, aborting.\n");	prom_halt();}static unsigned long __init bootmem_init(unsigned long *pages_avail,					 unsigned long phys_base){	unsigned long bootmap_size, end_pfn;	unsigned long end_of_phys_memory = 0UL;	unsigned long bootmap_pfn, bytes_avail, size;	int i;#ifdef CONFIG_DEBUG_BOOTMEM	prom_printf("bootmem_init: Scan pavail, ");#endif	bytes_avail = 0UL;	for (i = 0; i < pavail_ents; i++) {		end_of_phys_memory = pavail[i].phys_addr +			pavail[i].reg_size;		bytes_avail += pavail[i].reg_size;		if (cmdline_memory_size) {			if (bytes_avail > cmdline_memory_size) {				unsigned long slack = bytes_avail - cmdline_memory_size;				bytes_avail -= slack;				end_of_phys_memory -= slack;				pavail[i].reg_size -= slack;				if ((long)pavail[i].reg_size <= 0L) {					pavail[i].phys_addr = 0xdeadbeefUL;					pavail[i].reg_size = 0UL;					pavail_ents = i;				} else {					pavail[i+1].reg_size = 0Ul;					pavail[i+1].phys_addr = 0xdeadbeefUL;					pavail_ents = i + 1;				}				break;			}		}	}	*pages_avail = bytes_avail >> PAGE_SHIFT;	end_pfn = end_of_phys_memory >> PAGE_SHIFT;#ifdef CONFIG_BLK_DEV_INITRD	/* Now have to check initial ramdisk, so that bootmap does not overwrite it */	if (sparc_ramdisk_image || sparc_ramdisk_image64) {		unsigned long ramdisk_image = sparc_ramdisk_image ?			sparc_ramdisk_image : sparc_ramdisk_image64;		ramdisk_image -= KERNBASE;		initrd_start = ramdisk_image + phys_base;		initrd_end = initrd_start + sparc_ramdisk_size;		if (initrd_end > end_of_phys_memory) {			printk(KERN_CRIT "initrd extends beyond end of memory "		                 	 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",			       initrd_end, end_of_phys_memory);			initrd_start = 0;			initrd_end = 0;		}	}#endif		/* Initialize the boot-time allocator. */	max_pfn = max_low_pfn = end_pfn;	min_low_pfn = (phys_base >> PAGE_SHIFT);	bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?