⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 讲述linux的初始化过程
💻 C
📖 第 1 页 / 共 3 页
字号:
	start = 0;	size = 0;	for (i = 0; i < phys_avail.n_regions; ++i) {		unsigned long a = phys_avail.regions[i].address;		unsigned long s = phys_avail.regions[i].size;		if (s <= size)			continue;		start = a;		size = s;		if (s >= 33 * PAGE_SIZE)			break;	}	start = PAGE_ALIGN(start);	boot_mapsize = init_bootmem(start >> PAGE_SHIFT,				    total_lowmem >> PAGE_SHIFT);	/* remove the bootmem bitmap from the available memory */	mem_pieces_remove(&phys_avail, start, boot_mapsize, 1);	/* add everything in phys_avail into the bootmem map */	for (i = 0; i < phys_avail.n_regions; ++i)		free_bootmem(phys_avail.regions[i].address,			     phys_avail.regions[i].size);	init_bootmem_done = 1;}/* * paging_init() sets up the page tables - in fact we've already done this. */void __init paging_init(void){	unsigned long zones_size[MAX_NR_ZONES], i;#ifdef CONFIG_HIGHMEM	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */	pkmap_page_table = pte_offset(pmd_offset(pgd_offset_k(PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);	map_page(KMAP_FIX_BEGIN, 0, 0);	/* XXX gross */	kmap_pte = pte_offset(pmd_offset(pgd_offset_k(KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);	kmap_prot = PAGE_KERNEL;#endif /* CONFIG_HIGHMEM */	/*	 * Grab some memory for bad_page and bad_pagetable to use.	 */	empty_bad_page = alloc_bootmem_pages(PAGE_SIZE);	empty_bad_page_table = alloc_bootmem_pages(PAGE_SIZE);	/*	 * All pages are DMA-able so we put them all in the DMA zone.	 */	zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;	for (i = 1; i < MAX_NR_ZONES; i++)		zones_size[i] = 0;#ifdef CONFIG_HIGHMEM	zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;#endif /* CONFIG_HIGHMEM */	free_area_init(zones_size);}void __init mem_init(void){	extern char *sysmap; 	extern unsigned long sysmap_size;	unsigned long addr;	int codepages = 0;	int datapages = 0;	int initpages = 0;#ifdef CONFIG_HIGHMEM	unsigned long highmem_mapnr;	highmem_mapnr = total_lowmem >> PAGE_SHIFT;	highmem_start_page = mem_map + highmem_mapnr;	max_mapnr = total_memory >> PAGE_SHIFT;	totalram_pages += max_mapnr - highmem_mapnr;#else	max_mapnr = max_low_pfn;#endif /* CONFIG_HIGHMEM */	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);	num_physpages = max_mapnr;	/* RAM is assumed contiguous */	totalram_pages += free_all_bootmem();#ifdef CONFIG_BLK_DEV_INITRD	/* if we are booted from BootX with an initial ramdisk,	   make sure the ramdisk pages aren't reserved. */	if (initrd_start) {		for (addr = initrd_start; addr < initrd_end; addr += PAGE_SIZE)			clear_bit(PG_reserved, &virt_to_page(addr)->flags);	}#endif /* CONFIG_BLK_DEV_INITRD */#if defined(CONFIG_ALL_PPC)		/* mark the RTAS pages as reserved */	if ( rtas_data )		for (addr = rtas_data; addr < PAGE_ALIGN(rtas_data+rtas_size) ;		     addr += PAGE_SIZE)			SetPageReserved(virt_to_page(addr));#endif /* defined(CONFIG_ALL_PPC) */	if ( sysmap_size )		for (addr = (unsigned long)sysmap;		     addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ;		     addr += PAGE_SIZE)			SetPageReserved(virt_to_page(addr));		for (addr = PAGE_OFFSET; addr < (unsigned long)end_of_DRAM;	     addr += PAGE_SIZE) {		if (!PageReserved(virt_to_page(addr)))			continue;		if (addr < (ulong) etext)			codepages++;		else if (addr >= (unsigned long)&__init_begin			 && addr < (unsigned long)&__init_end)			initpages++;		else if (addr < (ulong) klimit)			datapages++;	}#ifdef CONFIG_HIGHMEM	{		unsigned long pfn;		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {			struct page *page = mem_map + pfn;			ClearPageReserved(page);			set_bit(PG_highmem, &page->flags);			atomic_set(&page->count, 1);			__free_page(page);			totalhigh_pages++;		}		totalram_pages += totalhigh_pages;	}#endif /* CONFIG_HIGHMEM */        printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",	       (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),	       codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),	       initpages<< (PAGE_SHIFT-10),	       (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));	mem_init_done = 1;}#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)#if defined(CONFIG_ALL_PPC)/* * On systems with Open Firmware, collect information about * physical RAM and which pieces are already in use. * At this point, we have (at least) the first 8MB mapped with a BAT. * Our text, data, bss use something over 1MB, starting at 0. * Open Firmware may be using 1MB at the 4MB point. */unsigned long __init pmac_find_end_of_memory(void){	unsigned long a, total;	unsigned long ram_limit = 0xe0000000 - KERNELBASE;	memory_node = find_devices("memory");	if (memory_node == NULL) {		printk(KERN_ERR "can't find memory node\n");		abort();	}	/*	 * Find out where physical memory is, and check that it	 * starts at 0 and is contiguous.  It seems that RAM is	 * always physically contiguous on Power Macintoshes,	 * because MacOS can't cope if it isn't.	 *	 * Supporting discontiguous physical memory isn't hard,	 * it just makes the virtual <-> physical mapping functions	 * more complicated (or else you end up wasting space	 * in mem_map).	 */	get_mem_prop("reg", &phys_mem);	if (phys_mem.n_regions == 0)		panic("No RAM??");	a = phys_mem.regions[0].address;	if (a != 0)		panic("RAM doesn't start at physical address 0");	if (__max_memory == 0 || __max_memory > ram_limit)		__max_memory = ram_limit;	if (phys_mem.regions[0].size >= __max_memory) {		phys_mem.regions[0].size = __max_memory;		phys_mem.n_regions = 1;	}	total = phys_mem.regions[0].size;		if (phys_mem.n_regions > 1) {		printk("RAM starting at 0x%x is not contiguous\n",		       phys_mem.regions[1].address);		printk("Using RAM from 0 to 0x%lx\n", total-1);		phys_mem.n_regions = 1;	}	set_phys_avail(&phys_mem);	return total;}#endif /* CONFIG_ALL_PPC */#if defined(CONFIG_ALL_PPC)/* * This finds the amount of physical ram and does necessary * setup for prep.  This is pretty architecture specific so * this will likely stay separate from the pmac. * -- Cort */unsigned long __init prep_find_end_of_memory(void){	unsigned long total;	total = res->TotalMemory;	if (total == 0 )	{		/*		 * I need a way to probe the amount of memory if the residual		 * data doesn't contain it. -- Cort		 */		printk("Ramsize from residual data was 0 -- Probing for value\n");		total = 0x02000000;		printk("Ramsize default to be %ldM\n", total>>20);	}	mem_pieces_append(&phys_mem, 0, total);	set_phys_avail(&phys_mem);	return (total);}#endif /* defined(CONFIG_ALL_PPC) */#if defined(CONFIG_GEMINI)unsigned long __init gemini_find_end_of_memory(void){	unsigned long total;	unsigned char reg;	reg = readb(GEMINI_MEMCFG);	total = ((1<<((reg & 0x7) - 1)) *		 (8<<((reg >> 3) & 0x7)));	total *= (1024*1024);	phys_mem.regions[0].address = 0;	phys_mem.regions[0].size = total;	phys_mem.n_regions = 1;		set_phys_avail(&phys_mem);	return phys_mem.regions[0].size;}#endif /* defined(CONFIG_GEMINI) */#ifdef CONFIG_8260/* * Same hack as 8xx. */unsigned long __init m8260_find_end_of_memory(void){	bd_t	*binfo;	extern unsigned char __res[];		binfo = (bd_t *)__res;	phys_mem.regions[0].address = 0;	phys_mem.regions[0].size = binfo->bi_memsize;		phys_mem.n_regions = 1;		set_phys_avail(&phys_mem);	return phys_mem.regions[0].size;}#endif /* CONFIG_8260 */#ifdef CONFIG_APUS#define HARDWARE_MAPPED_SIZE (512*1024)unsigned long __init apus_find_end_of_memory(void){	int shadow = 0;	/* The memory size reported by ADOS excludes the 512KB	   reserved for PPC exception registers and possibly 512KB	   containing a shadow of the ADOS ROM. */	{		unsigned long size = memory[0].size;		/* If 2MB aligned, size was probably user                   specified. We can't tell anything about shadowing                   in this case so skip shadow assignment. */		if (0 != (size & 0x1fffff)){			/* Align to 512KB to ensure correct handling			   of both memfile and system specified			   sizes. */			size = ((size+0x0007ffff) & 0xfff80000);			/* If memory is 1MB aligned, assume                           shadowing. */			shadow = !(size & 0x80000);		}		/* Add the chunk that ADOS does not see. by aligning                   the size to the nearest 2MB limit upwards.  */		memory[0].size = ((size+0x001fffff) & 0xffe00000);	}	/* Now register the memory block. */	mem_pieces_append(&phys_mem, memory[0].addr, memory[0].size);	set_phys_avail(&phys_mem);	/* Remove the memory chunks that are controlled by special           Phase5 hardware. */	{		unsigned long top = memory[0].addr + memory[0].size;		/* Remove the upper 512KB if it contains a shadow of		   the ADOS ROM. FIXME: It might be possible to		   disable this shadow HW. Check the booter		   (ppc_boot.c) */		if (shadow)		{			top -= HARDWARE_MAPPED_SIZE;			mem_pieces_remove(&phys_avail, top,					  HARDWARE_MAPPED_SIZE, 0);		}		/* Remove the upper 512KB where the PPC exception                   vectors are mapped. */		top -= HARDWARE_MAPPED_SIZE;#if 0		/* This would be neat, but it breaks on A3000 machines!? */		mem_pieces_remove(&phys_avail, top, 16384, 0);#else		mem_pieces_remove(&phys_avail, top, HARDWARE_MAPPED_SIZE, 0);#endif	}	/* Linux/APUS only handles one block of memory -- the one on	   the PowerUP board. Other system memory is horrible slow in	   comparison. The user can use other memory for swapping	   using the z2ram device. */	return memory[0].addr + memory[0].size;}#endif /* CONFIG_APUS *//* * Initialize the hash table and patch the instructions in head.S. */static void __init hash_init(void){	int Hash_bits, mb, mb2;	unsigned int hmask, ramsize, h;	extern unsigned int hash_page_patch_A[], hash_page_patch_B[],		hash_page_patch_C[], hash_page[];	ramsize = (ulong)end_of_DRAM - KERNELBASE;#ifdef CONFIG_PPC64BRIDGE	/* The hash table has already been allocated and initialized	   in prom.c */	Hash_mask = (Hash_size >> 7) - 1;	hmask = Hash_mask >> 9;	Hash_bits = __ilog2(Hash_size) - 7;	mb = 25 - Hash_bits;	if (Hash_bits > 16)		Hash_bits = 16;	mb2 = 25 - Hash_bits;#else /* CONFIG_PPC64BRIDGE */	if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);	/*	 * Allow 64k of hash table for every 16MB of memory,	 * up to a maximum of 2MB.	 */	for (h = 64<<10; h < ramsize / 256 && h < (2<<20); h *= 2)		;	Hash_size = h;	Hash_mask = (h >> 6) - 1;	hmask = Hash_mask >> 10;	Hash_bits = __ilog2(h) - 6;	mb = 26 - Hash_bits;	if (Hash_bits > 16)		Hash_bits = 16;	mb2 = 26 - Hash_bits;	/* shrink the htab since we don't use it on 603's -- Cort */	switch (_get_PVR()>>16) {	case 3: /* 603 */	case 6: /* 603e */	case 7: /* 603ev */	case 0x0081: /* 82xx */		Hash_size = 0;		Hash_mask = 0;		break;	default:	        /* on 601/4 let things be */		break; 	}		if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);	/* Find some memory for the hash table. */	if ( Hash_size ) {		Hash = mem_pieces_find(Hash_size, Hash_size);		cacheable_memzero(Hash, Hash_size);	} else		Hash = 0;#endif /* CONFIG_PPC64BRIDGE */	printk("Total memory = %dMB; using %ldkB for hash table (at %p)\n",	       ramsize >> 20, Hash_size >> 10, Hash);	if ( Hash_size )	{		if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);		Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);		/*		 * Patch up the instructions in head.S:hash_page		 */		hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)			| (__pa(Hash) >> 16);		hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0)			| (mb << 6);		hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0)			| (mb2 << 6);		hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff)			| hmask;		hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff)			| hmask;#if 0	/* see hash_page in head.S, note also patch_C ref below */		hash_page_patch_D[0] = (hash_page_patch_D[0] & ~0xffff)			| hmask;#endif		/*		 * Ensure that the locations we've patched have been written		 * out from the data cache and invalidated in the instruction		 * cache, on those machines with split caches.		 */		flush_icache_range((unsigned long) &hash_page_patch_A[0],				   (unsigned long) &hash_page_patch_C[1]);	}	else {		Hash_end = 0;		/*		 * Put a blr (procedure return) instruction at the		 * start of hash_page, since we can still get DSI		 * exceptions on a 603.		 */		hash_page[0] = 0x4e800020;		flush_icache_range((unsigned long) &hash_page[0],				   (unsigned long) &hash_page[1]);	}	if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);}#elif defined(CONFIG_8xx)/* * This is a big hack right now, but it may turn into something real * someday. * * For the 8xx boards (at this time anyway), there is nothing to initialize * associated the PROM.  Rather than include all of the prom.c * functions in the image just to get prom_init, all we really need right * now is the initialization of the physical memory region. */unsigned long __init m8xx_find_end_of_memory(void){	bd_t	*binfo;	extern unsigned char __res[];		binfo = (bd_t *)__res;	phys_mem.regions[0].address = 0;	phys_mem.regions[0].size = binfo->bi_memsize;		phys_mem.n_regions = 1;	set_phys_avail(&phys_mem);	return phys_mem.regions[0].address + phys_mem.regions[0].size;}#endif /* !CONFIG_4xx && !CONFIG_8xx */#ifdef CONFIG_OAK/* * Return the virtual address representing the top of physical RAM * on the Oak board. */unsigned long __initoak_find_end_of_memory(void){	extern unsigned char __res[];	unsigned long *ret;	bd_t *bip = (bd_t *)__res;		phys_mem.regions[0].address = 0;	phys_mem.regions[0].size = bip->bi_memsize;	phys_mem.n_regions = 1;	set_phys_avail(&phys_mem);	return (phys_mem.regions[0].address + phys_mem.regions[0].size);}#endif/* * Set phys_avail to phys_mem less the kernel text/data/bss. */void __initset_phys_avail(struct mem_pieces *mp){	unsigned long kstart, ksize;	/*	 * Initially, available phyiscal memory is equivalent to all	 * physical memory.	 */	phys_avail = *mp;	/*	 * Map out the kernel text/data/bss from the available physical	 * memory.	 */	kstart = __pa(_stext);	/* should be 0 */	ksize = PAGE_ALIGN(klimit - _stext);	mem_pieces_remove(&phys_avail, kstart, ksize, 0);	mem_pieces_remove(&phys_avail, 0, 0x4000, 0);#if defined(CONFIG_BLK_DEV_INITRD)	/* Remove the init RAM disk from the available memory. */	if (initrd_start) {		mem_pieces_remove(&phys_avail, __pa(initrd_start),				  initrd_end - initrd_start, 1);	}#endif /* CONFIG_BLK_DEV_INITRD */#ifdef CONFIG_ALL_PPC	/* remove the RTAS pages from the available memory */	if (rtas_data)		mem_pieces_remove(&phys_avail, rtas_data, rtas_size, 1);#endif /* CONFIG_ALL_PPC */#ifdef CONFIG_PPC64BRIDGE	/* Remove the hash table from the available memory */	if (Hash)		mem_pieces_remove(&phys_avail, __pa(Hash), Hash_size, 1);#endif /* CONFIG_PPC64BRIDGE */}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -