⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 linux-2.4.29操作系统的源码
💻 C
📖 第 1 页 / 共 2 页
字号:
	if (impl_va_bits < 51 || impl_va_bits > 61)		panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);	/* place the VMLPT at the end of each page-table mapped region: */	pta = POW2(61) - POW2(vmlpt_bits);	if (POW2(mapped_space_bits) >= pta)		panic("mm/init: overlap between virtually mapped linear page table and "		      "mapped kernel space!");	/*	 * Set the (virtually mapped linear) page table address.  Bit	 * 8 selects between the short and long format, bits 2-7 the	 * size of the table, and bit 0 whether the VHPT walker is	 * enabled.	 */	ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);	ia64_tlb_init();#ifdef	CONFIG_IA64_MCA	cpu = smp_processor_id();	/* mca handler uses cr.lid as key to pick the right entry */	ia64_mca_tlb_list[cpu].cr_lid = ia64_get_lid();	/* insert this percpu data information into our list for MCA recovery purposes */	ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));	/* Also save per-cpu tlb flush recipe for use in physical mode mca handler */	ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;	ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];	ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];	ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];	ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];#endif}static intcreate_mem_map_page_table (u64 start, u64 end, void *arg){	unsigned long address, start_page, end_page, next_blk_page;	unsigned long blk_start;	struct page *map_start, *map_end;	int node=0;	pgd_t *pgd;	pmd_t *pmd;	pte_t *pte;	/* should we use platform_map_nr here? */	map_start = vmem_map + MAP_NR_DENSE(start);	map_end   = vmem_map + MAP_NR_DENSE(end);	start_page = (unsigned long) map_start & PAGE_MASK;	end_page = PAGE_ALIGN((unsigned long) map_end);	/* force the first iteration to get node id */	blk_start = start;	next_blk_page = 0;	for (address = start_page; address < end_page; address += PAGE_SIZE) {		/* if we went across a node boundary, get new nid */		if (address >= next_blk_page) {			struct page *map_next_blk;			node = paddr_to_nid(__pa(blk_start));			/* get end addr of this memblk as next blk_start */			blk_start = (unsigned long) __va(min(end, memblk_endpaddr(__pa(blk_start))));			map_next_blk = vmem_map + MAP_NR_DENSE(blk_start);			next_blk_page = PAGE_ALIGN((unsigned long) map_next_blk);		}		pgd = pgd_offset_k(address);		if (pgd_none(*pgd))			pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));		pmd = pmd_offset(pgd, address);		if (pmd_none(*pmd))			pmd_populate(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));		pte = pte_offset(pmd, address);		if (pte_none(*pte))			set_pte(pte, mk_pte_phys(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)),						 PAGE_KERNEL)); 	} 	return 0;}struct memmap_init_callback_data {	memmap_init_callback_t *memmap_init;	struct page *start;	struct page *end;	int zone;	int highmem;};struct memmap_count_callback_data {	int node;	unsigned long num_physpages;	unsigned long num_dma_physpages;	unsigned long min_pfn;	unsigned long max_pfn;} cdata;static intvirtual_memmap_init (u64 start, u64 end, void *arg){	struct memmap_init_callback_data *args;	struct page *map_start, *map_end;	args = (struct memmap_init_callback_data *) arg;	/* Should we use platform_map_nr here? */	map_start = mem_map + MAP_NR_DENSE(start);	map_end   = mem_map + MAP_NR_DENSE(end);	if (map_start < args->start)		map_start = args->start;	if (map_end > args->end)		map_end = args->end;	/*	 * We have to initialize "out of bounds" struct page elements	 * that fit completely on the same pages that were allocated	 * for the "in bounds" elements because they may be referenced	 * later (and found to be "reserved").	 */	map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))			/ sizeof(struct page);	map_end += ((PAGE_ALIGN((unsigned long) map_end) -				(unsigned long) map_end)			/ sizeof(struct page));	if (map_start < map_end)		(*args->memmap_init)(map_start, map_end, args->zone,				     page_to_phys(map_start), args->highmem);	return 0;}unsigned longarch_memmap_init (memmap_init_callback_t *memmap_init, struct page *start,	struct page *end, int zone, unsigned long start_paddr, int highmem){	if (!vmem_map) 		memmap_init(start,end,zone,page_to_phys(start),highmem);	else {		struct memmap_init_callback_data args;		args.memmap_init = memmap_init;		args.start = start;		args.end = end;		args.zone = zone;		args.highmem = highmem;		efi_memmap_walk(virtual_memmap_init, &args);	}	return page_to_phys(end-1) + PAGE_SIZE;;}intia64_page_valid (struct page *page){	char byte;	return     (__get_user(byte, (char *) page) == 0)		&& (__get_user(byte, (char *) (page + 1) - 1) == 0);}#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))static intcount_pages (u64 start, u64 end, int node){	start = __pa(start);	end = __pa(end);	if (node == cdata.node) {		cdata.num_physpages += (end - start) >> PAGE_SHIFT;		if (start <= __pa(MAX_DMA_ADDRESS))			cdata.num_dma_physpages += (min(end, __pa(MAX_DMA_ADDRESS)) - start) >> PAGE_SHIFT;		start = GRANULEROUNDDOWN(__pa(start));		start = ORDERROUNDDOWN(start);		end = GRANULEROUNDUP(__pa(end));		cdata.max_pfn = max(cdata.max_pfn, end >> PAGE_SHIFT);		cdata.min_pfn = min(cdata.min_pfn, start >> PAGE_SHIFT);	}	return 0;}static intfind_largest_hole(u64 start, u64 end, void *arg){	u64 *max_gap = arg;	static u64 last_end = PAGE_OFFSET;	/* NOTE: this algorithm assumes efi memmap table is ordered */	if (*max_gap < (start - last_end))		*max_gap = start - last_end;	last_end = end;	return 0;}/* * Set up the page tables. */voidpaging_init (void){	unsigned long max_dma;	unsigned long zones_size[MAX_NR_ZONES];	unsigned long zholes_size[MAX_NR_ZONES];	unsigned long max_gap;	int node;	/* initialize mem_map[] */	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;	max_gap = 0;	efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);	for (node=0; node < numnodes; node++) {		memset(zones_size, 0, sizeof(zones_size));		memset(zholes_size, 0, sizeof(zholes_size));		memset(&cdata, 0, sizeof(cdata));		cdata.node = node;		cdata.min_pfn = ~0;		efi_memmap_walk(filter_rsvd_memory, count_pages);		num_dma_physpages += cdata.num_dma_physpages;		num_physpages += cdata.num_physpages;		if (cdata.min_pfn >= max_dma) {			zones_size[ZONE_NORMAL] = cdata.max_pfn - cdata.min_pfn;			zholes_size[ZONE_NORMAL] = cdata.max_pfn - cdata.min_pfn - cdata.num_physpages;		} else if (cdata.max_pfn < max_dma) {			zones_size[ZONE_DMA] = cdata.max_pfn - cdata.min_pfn;			zholes_size[ZONE_DMA] = cdata.max_pfn - cdata.min_pfn - cdata.num_dma_physpages;		} else {			zones_size[ZONE_DMA] = max_dma - cdata.min_pfn;			zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - cdata.num_dma_physpages;			zones_size[ZONE_NORMAL] = cdata.max_pfn - max_dma;			zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] - (cdata.num_physpages - cdata.num_dma_physpages);		}			if (numnodes == 1 && max_gap < LARGE_GAP) {			vmem_map = (struct page *)0;			zones_size[ZONE_DMA] += cdata.min_pfn;			zholes_size[ZONE_DMA] += cdata.min_pfn;			free_area_init_core(0, NODE_DATA(node), &mem_map, zones_size, 0, zholes_size, NULL);		} else {				/* allocate virtual mem_map */				if (node == 0) {				unsigned long map_size;				map_size = PAGE_ALIGN(max_low_pfn*sizeof(struct page));				vmalloc_end -= map_size;				mem_map = vmem_map = (struct page *) vmalloc_end;				efi_memmap_walk(create_mem_map_page_table, 0);				printk(KERN_INFO "Virtual mem_map starts at 0x%p\n", mem_map);			}				free_area_init_node(node, NODE_DATA(node), vmem_map+cdata.min_pfn, zones_size, 				cdata.min_pfn<<PAGE_SHIFT, zholes_size);		}	}	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));}static intcount_reserved_pages (u64 start, u64 end, void *arg){	unsigned long num_reserved = 0;	struct page *pg;	for (pg = virt_to_page((void *)start); pg < virt_to_page((void *)end); ++pg)		if (PageReserved(pg))			++num_reserved;	reserved_pages += num_reserved;	return 0;}voidmem_init (void){	extern char __start_gate_section[];	long codesize, datasize, initsize;	unsigned long num_pgt_pages;	pg_data_t *pgdat;#ifdef CONFIG_PCI	/*	 * This needs to be called _after_ the command line has been parsed but _before_	 * any drivers that may need the PCI DMA interface are initialized or bootmem has	 * been freed.	 */	platform_pci_dma_init();#endif	if (!mem_map)		BUG();	max_mapnr = max_low_pfn;	high_memory = __va(max_low_pfn * PAGE_SIZE);	for_each_pgdat(pgdat)		totalram_pages += free_all_bootmem_node(pgdat);	reserved_pages = 0;	efi_memmap_walk(filter_rsvd_memory, count_reserved_pages);	codesize =  (unsigned long) &_etext - (unsigned long) &_stext;	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;	printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, %luk data, %luk init)\n",	       (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),	       num_physpages << (PAGE_SHIFT - 10), codesize >> 10,	       reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);	/*	 * Allow for enough (cached) page table pages so that we can map the entire memory	 * at least once.  Each task also needs a couple of page tables pages, so add in a	 * fudge factor for that (don't use "threads-max" here; that would be wrong!).	 * Don't allow the cache to be more than 10% of total memory, though.	 */#	define NUM_TASKS	500	/* typical number of tasks */	num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;	if (num_pgt_pages > nr_free_pages() / 10)		num_pgt_pages = nr_free_pages() / 10;	if (num_pgt_pages > pgt_cache_water[1])		pgt_cache_water[1] = num_pgt_pages;	/* install the gate page in the global page table: */	put_gate_page(virt_to_page(ia64_imva(__start_gate_section)), GATE_ADDR);#ifdef CONFIG_IA32_SUPPORT	ia32_gdt_init();#endif}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -