init.c

来自「linux2.6.16版本」· C语言 代码 · 共 746 行 · 第 1/2 页

C
746
字号
/* *  linux/arch/x86_64/mm/init.c * *  Copyright (C) 1995  Linus Torvalds *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz> *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> */#include <linux/config.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/ptrace.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/swap.h>#include <linux/smp.h>#include <linux/init.h>#include <linux/pagemap.h>#include <linux/bootmem.h>#include <linux/proc_fs.h>#include <linux/pci.h>#include <linux/dma-mapping.h>#include <linux/module.h>#include <linux/memory_hotplug.h>#include <asm/processor.h>#include <asm/system.h>#include <asm/uaccess.h>#include <asm/pgtable.h>#include <asm/pgalloc.h>#include <asm/dma.h>#include <asm/fixmap.h>#include <asm/e820.h>#include <asm/apic.h>#include <asm/tlb.h>#include <asm/mmu_context.h>#include <asm/proto.h>#include <asm/smp.h>#include <asm/sections.h>#include <asm/dma-mapping.h>#include <asm/swiotlb.h>#ifndef Dprintk#define Dprintk(x...)#endifstruct dma_mapping_ops* dma_ops;EXPORT_SYMBOL(dma_ops);static unsigned long dma_reserve __initdata;DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);/* * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the * physical space so we can cache the place of the first one and move * around without checking the pgd every time. */void show_mem(void){	long i, total = 0, reserved = 0;	long shared = 0, cached = 0;	pg_data_t *pgdat;	struct page *page;	printk(KERN_INFO "Mem-info:\n");	show_free_areas();	printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));	for_each_pgdat(pgdat) {               for (i = 0; i < pgdat->node_spanned_pages; ++i) {			page = pfn_to_page(pgdat->node_start_pfn + i);			total++;			if (PageReserved(page))				reserved++;			else if (PageSwapCache(page))				cached++;			else if (page_count(page))				shared += page_count(page) - 1;               }	}	printk(KERN_INFO "%lu pages of RAM\n", total);	printk(KERN_INFO "%lu reserved pages\n",reserved);	printk(KERN_INFO "%lu pages shared\n",shared);	printk(KERN_INFO "%lu pages swap cached\n",cached);}/* References to section boundaries */int after_bootmem;static void *spp_getpage(void){ 	void *ptr;	if (after_bootmem)		ptr = (void *) get_zeroed_page(GFP_ATOMIC); 	else		ptr = alloc_bootmem_pages(PAGE_SIZE);	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))		panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");	Dprintk("spp_getpage %p\n", ptr);	return ptr;} static void set_pte_phys(unsigned long vaddr,			 unsigned long phys, pgprot_t prot){	pgd_t *pgd;	pud_t *pud;	pmd_t *pmd;	pte_t *pte, new_pte;	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);	pgd = pgd_offset_k(vaddr);	if (pgd_none(*pgd)) {		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");		return;	}	pud = pud_offset(pgd, vaddr);	if (pud_none(*pud)) {		pmd = (pmd_t *) spp_getpage(); 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));		if (pmd != pmd_offset(pud, 0)) {			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));			return;		}	}	pmd = pmd_offset(pud, vaddr);	if (pmd_none(*pmd)) {		pte = (pte_t *) spp_getpage();		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));		if (pte != pte_offset_kernel(pmd, 0)) {			printk("PAGETABLE BUG #02!\n");			return;		}	}	new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);	pte = pte_offset_kernel(pmd, vaddr);	if (!pte_none(*pte) &&	    pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))		pte_ERROR(*pte);	set_pte(pte, new_pte);	/*	 * It's enough to flush this one mapping.	 * (PGE mappings get flushed as well)	 */	__flush_tlb_one(vaddr);}/* NOTE: this is meant to be run only at boot */void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot){	unsigned long address = __fix_to_virt(idx);	if (idx >= __end_of_fixed_addresses) {		printk("Invalid __set_fixmap\n");		return;	}	set_pte_phys(address, phys, prot);}unsigned long __initdata table_start, table_end; extern pmd_t temp_boot_pmds[]; static  struct temp_map { 	pmd_t *pmd;	void  *address; 	int    allocated; } temp_mappings[] __initdata = { 	{ &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },	{ &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) }, 	{}}; static __meminit void *alloc_low_page(int *index, unsigned long *phys){ 	struct temp_map *ti;	int i; 	unsigned long pfn = table_end++, paddr; 	void *adr;	if (after_bootmem) {		adr = (void *)get_zeroed_page(GFP_ATOMIC);		*phys = __pa(adr);		return adr;	}	if (pfn >= end_pfn) 		panic("alloc_low_page: ran out of memory"); 	for (i = 0; temp_mappings[i].allocated; i++) {		if (!temp_mappings[i].pmd) 			panic("alloc_low_page: ran out of temp mappings"); 	} 	ti = &temp_mappings[i];	paddr = (pfn << PAGE_SHIFT) & PMD_MASK; 	set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE)); 	ti->allocated = 1; 	__flush_tlb(); 	       	adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK); 	memset(adr, 0, PAGE_SIZE);	*index = i; 	*phys  = pfn * PAGE_SIZE;  	return adr; } static __meminit void unmap_low_page(int i){ 	struct temp_map *ti;	if (after_bootmem)		return;	ti = &temp_mappings[i];	set_pmd(ti->pmd, __pmd(0));	ti->allocated = 0; } static void __meminitphys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end){	int i;	for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {		unsigned long entry;		if (address > end) {			for (; i < PTRS_PER_PMD; i++, pmd++)				set_pmd(pmd, __pmd(0));			break;		}		entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;		entry &= __supported_pte_mask;		set_pmd(pmd, __pmd(entry));	}}static void __meminitphys_pmd_update(pud_t *pud, unsigned long address, unsigned long end){	pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));	if (pmd_none(*pmd)) {		spin_lock(&init_mm.page_table_lock);		phys_pmd_init(pmd, address, end);		spin_unlock(&init_mm.page_table_lock);		__flush_tlb_all();	}}static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end){ 	long i = pud_index(address);	pud = pud + i;	if (after_bootmem && pud_val(*pud)) {		phys_pmd_update(pud, address, end);		return;	}	for (; i < PTRS_PER_PUD; pud++, i++) {		int map; 		unsigned long paddr, pmd_phys;		pmd_t *pmd;		paddr = (address & PGDIR_MASK) + i*PUD_SIZE;		if (paddr >= end)			break;		if (!after_bootmem && !e820_mapped(paddr, paddr+PUD_SIZE, 0)) {			set_pud(pud, __pud(0)); 			continue;		} 		pmd = alloc_low_page(&map, &pmd_phys);		spin_lock(&init_mm.page_table_lock);		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));		phys_pmd_init(pmd, paddr, end);		spin_unlock(&init_mm.page_table_lock);		unmap_low_page(map);	}	__flush_tlb();} static void __init find_early_table_space(unsigned long end){	unsigned long puds, pmds, tables, start;	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;	tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +		 round_up(pmds * sizeof(pmd_t), PAGE_SIZE); 	/* RED-PEN putting page tables only on node 0 could 	   cause a hotspot and fill up ZONE_DMA. The page tables 	   need roughly 0.5KB per GB. */ 	start = 0x8000; 	table_start = find_e820_area(start, end, tables);	if (table_start == -1UL)		panic("Cannot find space for the kernel page tables");	table_start >>= PAGE_SHIFT;	table_end = table_start;	early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",		end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);}/* Setup the direct mapping of the physical memory at PAGE_OFFSET.   This runs before bootmem is initialized and gets pages directly from the    physical memory. To access them they are temporarily mapped. */void __meminit init_memory_mapping(unsigned long start, unsigned long end){ 	unsigned long next; 	Dprintk("init_memory_mapping\n");	/* 	 * Find space for the kernel direct mapping tables.	 * Later we should allocate these tables in the local node of the memory	 * mapped.  Unfortunately this is done currently before the nodes are 	 * discovered.	 */	if (!after_bootmem)		find_early_table_space(end);	start = (unsigned long)__va(start);	end = (unsigned long)__va(end);	for (; start < end; start = next) {		int map;		unsigned long pud_phys; 		pgd_t *pgd = pgd_offset_k(start);		pud_t *pud;		if (after_bootmem)			pud = pud_offset_k(pgd, __PAGE_OFFSET);		else			pud = alloc_low_page(&map, &pud_phys);		next = start + PGDIR_SIZE;		if (next > end) 			next = end; 		phys_pud_init(pud, __pa(start), __pa(next));		if (!after_bootmem)			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));		unmap_low_page(map);   	} 	if (!after_bootmem)		asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));	__flush_tlb_all();}void __cpuinit zap_low_mappings(int cpu){	if (cpu == 0) {		pgd_t *pgd = pgd_offset_k(0UL);		pgd_clear(pgd);	} else {		/*		 * For AP's, zap the low identity mappings by changing the cr3		 * to init_level4_pgt and doing local flush tlb all

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?