⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
	} else {		/*		 * For AP's, zap the low identity mappings by changing the cr3		 * to init_level4_pgt and doing local flush tlb all		 */		asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));	}	__flush_tlb_all();}/* Compute zone sizes for the DMA and DMA32 zones in a node. */__init voidsize_zones(unsigned long *z, unsigned long *h,	   unsigned long start_pfn, unsigned long end_pfn){ 	int i; 	unsigned long w; 	for (i = 0; i < MAX_NR_ZONES; i++) 		z[i] = 0; 	if (start_pfn < MAX_DMA_PFN) 		z[ZONE_DMA] = MAX_DMA_PFN - start_pfn; 	if (start_pfn < MAX_DMA32_PFN) { 		unsigned long dma32_pfn = MAX_DMA32_PFN; 		if (dma32_pfn > end_pfn) 			dma32_pfn = end_pfn; 		z[ZONE_DMA32] = dma32_pfn - start_pfn; 	} 	z[ZONE_NORMAL] = end_pfn - start_pfn; 	/* Remove lower zones from higher ones. */ 	w = 0; 	for (i = 0; i < MAX_NR_ZONES; i++) { 		if (z[i]) 			z[i] -= w; 	        w += z[i];	}	/* Compute holes */	w = start_pfn;	for (i = 0; i < MAX_NR_ZONES; i++) {		unsigned long s = w;		w += z[i];		h[i] = e820_hole_size(s, w);	}	/* Add the space pace needed for mem_map to the holes too. */	for (i = 0; i < MAX_NR_ZONES; i++)		h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;	/* The 16MB DMA zone has the kernel and other misc mappings. 	   Account them too */	if (h[ZONE_DMA]) {		h[ZONE_DMA] += dma_reserve;		if (h[ZONE_DMA] >= z[ZONE_DMA]) {			printk(KERN_WARNING				"Kernel too large and filling up ZONE_DMA?\n");			h[ZONE_DMA] = z[ZONE_DMA];		}	}}#ifndef CONFIG_NUMAvoid __init paging_init(void){	unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];	size_zones(zones, holes, 0, end_pfn);	free_area_init_node(0, NODE_DATA(0), zones,			    __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);}#endif/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches   from the CPU leading to inconsistent cache lines. address and size   must be aligned to 2MB boundaries.    Does nothing when the mapping doesn't exist. */void __init clear_kernel_mapping(unsigned long address, unsigned long size) {	unsigned long end = address + size;	BUG_ON(address & ~LARGE_PAGE_MASK);	BUG_ON(size & ~LARGE_PAGE_MASK); 		for (; address < end; address += LARGE_PAGE_SIZE) { 		pgd_t *pgd = pgd_offset_k(address);		pud_t *pud;		pmd_t *pmd;		if (pgd_none(*pgd))			continue;		pud = pud_offset(pgd, address);		if (pud_none(*pud))			continue; 		pmd = pmd_offset(pud, address);		if (!pmd || pmd_none(*pmd))			continue; 		if (0 == (pmd_val(*pmd) & _PAGE_PSE)) { 			/* Could handle this, but it should not happen currently. */			printk(KERN_ERR 	       "clear_kernel_mapping: mapping has been split. will leak memory\n"); 			pmd_ERROR(*pmd); 		}		set_pmd(pmd, __pmd(0)); 			}	__flush_tlb_all();} static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,			 kcore_vsyscall;void __init mem_init(void){	long codesize, reservedpages, datasize, initsize;#ifdef CONFIG_SWIOTLB	if (!iommu_aperture &&	    (end_pfn >= 0xffffffff>>PAGE_SHIFT || force_iommu))	       swiotlb = 1;	if (swiotlb)		swiotlb_init();	#endif	/* How many end-of-memory variables you have, grandma! */	max_low_pfn = end_pfn;	max_pfn = end_pfn;	num_physpages = end_pfn;	high_memory = (void *) __va(end_pfn * PAGE_SIZE);	/* clear the zero-page */	memset(empty_zero_page, 0, PAGE_SIZE);	reservedpages = 0;	/* this will put all low memory onto the freelists */#ifdef CONFIG_NUMA	totalram_pages = numa_free_all_bootmem();#else	totalram_pages = free_all_bootmem();#endif	reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);	after_bootmem = 1;	codesize =  (unsigned long) &_etext - (unsigned long) &_text;	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;	/* Register memory areas for /proc/kcore */	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 		   VMALLOC_END-VMALLOC_START);	kclist_add(&kcore_kernel, &_stext, _end - _stext);	kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, 				 VSYSCALL_END - VSYSCALL_START);	printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),		end_pfn << (PAGE_SHIFT-10),		codesize >> 10,		reservedpages << (PAGE_SHIFT-10),		datasize >> 10,		initsize >> 10);#ifdef CONFIG_SMP	/*	 * Sync boot_level4_pgt mappings with the init_level4_pgt	 * except for the low identity mappings which are already zapped	 * in init_level4_pgt. This sync-up is essential for AP's bringup	 */	memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));#endif}void free_initmem(void){	unsigned long addr;	addr = (unsigned long)(&__init_begin);	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {		ClearPageReserved(virt_to_page(addr));		set_page_count(virt_to_page(addr), 1);		memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE); 		free_page(addr);		totalram_pages++;	}	memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);	printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);}#ifdef CONFIG_BLK_DEV_INITRDvoid free_initrd_mem(unsigned long start, unsigned long end){	if (start < (unsigned long)&_end)		return;	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);	for (; start < end; start += PAGE_SIZE) {		ClearPageReserved(virt_to_page(start));		set_page_count(virt_to_page(start), 1);		free_page(start);		totalram_pages++;	}}#endifvoid __init reserve_bootmem_generic(unsigned long phys, unsigned len) { 	/* Should check here against the e820 map to avoid double free */ #ifdef CONFIG_NUMA	int nid = phys_to_nid(phys);  	reserve_bootmem_node(NODE_DATA(nid), phys, len);#else       			reserve_bootmem(phys, len);    #endif	if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)		dma_reserve += len / PAGE_SIZE;}int kern_addr_valid(unsigned long addr) { 	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;       pgd_t *pgd;       pud_t *pud;       pmd_t *pmd;       pte_t *pte;	if (above != 0 && above != -1UL)		return 0; 		pgd = pgd_offset_k(addr);	if (pgd_none(*pgd))		return 0;	pud = pud_offset(pgd, addr);	if (pud_none(*pud))		return 0; 	pmd = pmd_offset(pud, addr);	if (pmd_none(*pmd))		return 0;	if (pmd_large(*pmd))		return pfn_valid(pmd_pfn(*pmd));	pte = pte_offset_kernel(pmd, addr);	if (pte_none(*pte))		return 0;	return pfn_valid(pte_pfn(*pte));}#ifdef CONFIG_SYSCTL#include <linux/sysctl.h>extern int exception_trace, page_fault_trace;static ctl_table debug_table2[] = {	{ 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,	  proc_dointvec },	{ 0, }}; static ctl_table debug_root_table2[] = { 	{ .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555, 	   .child = debug_table2 }, 	{ 0 }, }; static __init int x8664_sysctl_init(void){ 	register_sysctl_table(debug_root_table2, 1);	return 0;}__initcall(x8664_sysctl_init);#endif/* A pseudo VMAs to allow ptrace access for the vsyscall page.   This only   covers the 64bit vsyscall page now. 32bit has a real VMA now and does   not need special handling anymore. */static struct vm_area_struct gate_vma = {	.vm_start = VSYSCALL_START,	.vm_end = VSYSCALL_END,	.vm_page_prot = PAGE_READONLY};struct vm_area_struct *get_gate_vma(struct task_struct *tsk){#ifdef CONFIG_IA32_EMULATION	if (test_tsk_thread_flag(tsk, TIF_IA32))		return NULL;#endif	return &gate_vma;}int in_gate_area(struct task_struct *task, unsigned long addr){	struct vm_area_struct *vma = get_gate_vma(task);	if (!vma)		return 0;	return (addr >= vma->vm_start) && (addr < vma->vm_end);}/* Use this when you have no reliable task/vma, typically from interrupt * context.  It is less reliable than using the task's vma and may give * false positives. */int in_gate_area_no_task(unsigned long addr){	return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -