⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 linux2.6.16版本
💻 C
📖 第 1 页 / 共 2 页
字号:
#ifdef CONFIG_DISABLE_VHPT#	define VHPT_ENABLE_BIT	0#else#	define VHPT_ENABLE_BIT	1#endif	/* Pin mapping for percpu area into TLB */	psr = ia64_clear_ic();	ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,		 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),		 PERCPU_PAGE_SHIFT);	ia64_set_psr(psr);	ia64_srlz_i();	/*	 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped	 * address space.  The IA-64 architecture guarantees that at least 50 bits of	 * virtual address space are implemented but if we pick a large enough page size	 * (e.g., 64KB), the mapped address space is big enough that it will overlap with	 * VMLPT.  I assume that once we run on machines big enough to warrant 64KB pages,	 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a	 * problem in practice.  Alternatively, we could truncate the top of the mapped	 * address space to not permit mappings that would overlap with the VMLPT.	 * --davidm 00/12/06	 */#	define pte_bits			3#	define mapped_space_bits	(3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)	/*	 * The virtual page table has to cover the entire implemented address space within	 * a region even though not all of this space may be mappable.  The reason for	 * this is that the Access bit and Dirty bit fault handlers perform	 * non-speculative accesses to the virtual page table, so the address range of the	 * virtual page table itself needs to be covered by virtual page table.	 */#	define vmlpt_bits		(impl_va_bits - PAGE_SHIFT + pte_bits)#	define POW2(n)			(1ULL << (n))	impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));	if (impl_va_bits < 51 || impl_va_bits > 61)		panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);	/*	 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,	 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of	 * the test makes sure that our mapped space doesn't overlap the	 * unimplemented hole in the middle of the region.	 */	if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||	    (mapped_space_bits > impl_va_bits - 1))		panic("Cannot build a big enough virtual-linear page table"		      " to cover mapped address space.\n"		      " Try using a smaller page size.\n");	/* place the VMLPT at the end of each page-table mapped region: */	pta = POW2(61) - POW2(vmlpt_bits);	/*	 * Set the (virtually mapped linear) page table address.  Bit	 * 8 selects between the short and long format, bits 2-7 the	 * size of the table, and bit 0 whether the VHPT walker is	 * enabled.	 */	ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);	ia64_tlb_init();#ifdef	CONFIG_HUGETLB_PAGE	ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);	ia64_srlz_d();#endif}#ifdef CONFIG_VIRTUAL_MEM_MAPintcreate_mem_map_page_table (u64 start, u64 end, void *arg){	unsigned long address, start_page, end_page;	struct page *map_start, *map_end;	int node;	pgd_t *pgd;	pud_t *pud;	pmd_t *pmd;	pte_t *pte;	map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);	map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);	start_page = (unsigned long) map_start & PAGE_MASK;	end_page = PAGE_ALIGN((unsigned long) map_end);	node = paddr_to_nid(__pa(start));	for (address = start_page; address < end_page; address += PAGE_SIZE) {		pgd = pgd_offset_k(address);		if (pgd_none(*pgd))			pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));		pud = pud_offset(pgd, address);		if (pud_none(*pud))			pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));		pmd = pmd_offset(pud, address);		if (pmd_none(*pmd))			pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));		pte = pte_offset_kernel(pmd, address);		if (pte_none(*pte))			set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,					     PAGE_KERNEL));	}	return 0;}struct memmap_init_callback_data {	struct page *start;	struct page *end;	int nid;	unsigned long zone;};static intvirtual_memmap_init (u64 start, u64 end, void *arg){	struct memmap_init_callback_data *args;	struct page *map_start, *map_end;	args = (struct memmap_init_callback_data *) arg;	map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);	map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);	if (map_start < args->start)		map_start = args->start;	if (map_end > args->end)		map_end = args->end;	/*	 * We have to initialize "out of bounds" struct page elements that fit completely	 * on the same pages that were allocated for the "in bounds" elements because they	 * may be referenced later (and found to be "reserved").	 */	map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);	map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)		    / sizeof(struct page));	if (map_start < map_end)		memmap_init_zone((unsigned long)(map_end - map_start),				 args->nid, args->zone, page_to_pfn(map_start));	return 0;}voidmemmap_init (unsigned long size, int nid, unsigned long zone,	     unsigned long start_pfn){	if (!vmem_map)		memmap_init_zone(size, nid, zone, start_pfn);	else {		struct page *start;		struct memmap_init_callback_data args;		start = pfn_to_page(start_pfn);		args.start = start;		args.end = start + size;		args.nid = nid;		args.zone = zone;		efi_memmap_walk(virtual_memmap_init, &args);	}}intia64_pfn_valid (unsigned long pfn){	char byte;	struct page *pg = pfn_to_page(pfn);	return     (__get_user(byte, (char __user *) pg) == 0)		&& ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))			|| (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));}EXPORT_SYMBOL(ia64_pfn_valid);intfind_largest_hole (u64 start, u64 end, void *arg){	u64 *max_gap = arg;	static u64 last_end = PAGE_OFFSET;	/* NOTE: this algorithm assumes efi memmap table is ordered */	if (*max_gap < (start - last_end))		*max_gap = start - last_end;	last_end = end;	return 0;}#endif /* CONFIG_VIRTUAL_MEM_MAP */static intcount_reserved_pages (u64 start, u64 end, void *arg){	unsigned long num_reserved = 0;	unsigned long *count = arg;	for (; start < end; start += PAGE_SIZE)		if (PageReserved(virt_to_page(start)))			++num_reserved;	*count += num_reserved;	return 0;}/* * Boot command-line option "nolwsys" can be used to disable the use of any light-weight * system call handler.  When this option is in effect, all fsyscalls will end up bubbling * down into the kernel and calling the normal (heavy-weight) syscall handler.  This is * useful for performance testing, but conceivably could also come in handy for debugging * purposes. */static int nolwsys;static int __initnolwsys_setup (char *s){	nolwsys = 1;	return 1;}__setup("nolwsys", nolwsys_setup);voidmem_init (void){	long reserved_pages, codesize, datasize, initsize;	pg_data_t *pgdat;	int i;	static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;	BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);	BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);	BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);#ifdef CONFIG_PCI	/*	 * This needs to be called _after_ the command line has been parsed but _before_	 * any drivers that may need the PCI DMA interface are initialized or bootmem has	 * been freed.	 */	platform_dma_init();#endif#ifdef CONFIG_FLATMEM	if (!mem_map)		BUG();	max_mapnr = max_low_pfn;#endif	high_memory = __va(max_low_pfn * PAGE_SIZE);	kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);	kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);	kclist_add(&kcore_kernel, _stext, _end - _stext);	for_each_pgdat(pgdat)		if (pgdat->bdata->node_bootmem_map)			totalram_pages += free_all_bootmem_node(pgdat);	reserved_pages = 0;	efi_memmap_walk(count_reserved_pages, &reserved_pages);	codesize =  (unsigned long) _etext - (unsigned long) _stext;	datasize =  (unsigned long) _edata - (unsigned long) _etext;	initsize =  (unsigned long) __init_end - (unsigned long) __init_begin;	printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "	       "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),	       num_physpages << (PAGE_SHIFT - 10), codesize >> 10,	       reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);	/*	 * For fsyscall entrpoints with no light-weight handler, use the ordinary	 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry	 * code can tell them apart.	 */	for (i = 0; i < NR_syscalls; ++i) {		extern unsigned long fsyscall_table[NR_syscalls];		extern unsigned long sys_call_table[NR_syscalls];		if (!fsyscall_table[i] || nolwsys)			fsyscall_table[i] = sys_call_table[i] | 1;	}	setup_gate();#ifdef CONFIG_IA32_SUPPORT	ia32_mem_init();#endif}#ifdef CONFIG_MEMORY_HOTPLUGvoid online_page(struct page *page){	ClearPageReserved(page);	set_page_count(page, 1);	__free_page(page);	totalram_pages++;	num_physpages++;}int add_memory(u64 start, u64 size){	pg_data_t *pgdat;	struct zone *zone;	unsigned long start_pfn = start >> PAGE_SHIFT;	unsigned long nr_pages = size >> PAGE_SHIFT;	int ret;	pgdat = NODE_DATA(0);	zone = pgdat->node_zones + ZONE_NORMAL;	ret = __add_pages(zone, start_pfn, nr_pages);	if (ret)		printk("%s: Problem encountered in __add_pages() as ret=%d\n",		       __FUNCTION__,  ret);	return ret;}int remove_memory(u64 start, u64 size){	return -EINVAL;}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -