⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init_64.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  linux/arch/x86_64/mm/init.c * *  Copyright (C) 1995  Linus Torvalds *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz> *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> */#include <linux/signal.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/ptrace.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/swap.h>#include <linux/smp.h>#include <linux/init.h>#include <linux/pagemap.h>#include <linux/bootmem.h>#include <linux/proc_fs.h>#include <linux/pci.h>#include <linux/pfn.h>#include <linux/poison.h>#include <linux/dma-mapping.h>#include <linux/module.h>#include <linux/memory_hotplug.h>#include <linux/nmi.h>#include <asm/processor.h>#include <asm/system.h>#include <asm/uaccess.h>#include <asm/pgtable.h>#include <asm/pgalloc.h>#include <asm/dma.h>#include <asm/fixmap.h>#include <asm/e820.h>#include <asm/apic.h>#include <asm/tlb.h>#include <asm/mmu_context.h>#include <asm/proto.h>#include <asm/smp.h>#include <asm/sections.h>#ifndef Dprintk#define Dprintk(x...)#endifconst struct dma_mapping_ops* dma_ops;EXPORT_SYMBOL(dma_ops);static unsigned long dma_reserve __initdata;DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);/* * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the * physical space so we can cache the place of the first one and move * around without checking the pgd every time. */void show_mem(void){	long i, total = 0, reserved = 0;	long shared = 0, cached = 0;	pg_data_t *pgdat;	struct page *page;	printk(KERN_INFO "Mem-info:\n");	show_free_areas();	printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));	for_each_online_pgdat(pgdat) {               for (i = 0; i < pgdat->node_spanned_pages; ++i) {			/* this loop can take a while with 256 GB and 4k pages			   so update the NMI watchdog */			if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) {				touch_nmi_watchdog();			}			if (!pfn_valid(pgdat->node_start_pfn + i))				continue;			page = pfn_to_page(pgdat->node_start_pfn + i);			total++;			if (PageReserved(page))				reserved++;			else if (PageSwapCache(page))				cached++;			else if (page_count(page))				shared += page_count(page) - 1;               }	}	printk(KERN_INFO "%lu pages of RAM\n", total);	printk(KERN_INFO "%lu reserved pages\n",reserved);	printk(KERN_INFO "%lu pages shared\n",shared);	printk(KERN_INFO "%lu pages swap cached\n",cached);}int after_bootmem;static __init void *spp_getpage(void){ 	void *ptr;	if (after_bootmem)		ptr = (void *) get_zeroed_page(GFP_ATOMIC); 	else		ptr = alloc_bootmem_pages(PAGE_SIZE);	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))		panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");	Dprintk("spp_getpage %p\n", ptr);	return ptr;} static __init void set_pte_phys(unsigned long vaddr,			 unsigned long phys, pgprot_t prot){	pgd_t *pgd;	pud_t *pud;	pmd_t *pmd;	pte_t *pte, new_pte;	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);	pgd = pgd_offset_k(vaddr);	if (pgd_none(*pgd)) {		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");		return;	}	pud = pud_offset(pgd, vaddr);	if (pud_none(*pud)) {		pmd = (pmd_t *) spp_getpage(); 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));		if (pmd != pmd_offset(pud, 0)) {			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));			return;		}	}	pmd = pmd_offset(pud, vaddr);	if (pmd_none(*pmd)) {		pte = (pte_t *) spp_getpage();		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));		if (pte != pte_offset_kernel(pmd, 0)) {			printk("PAGETABLE BUG #02!\n");			return;		}	}	new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);	pte = pte_offset_kernel(pmd, vaddr);	if (!pte_none(*pte) &&	    pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))		pte_ERROR(*pte);	set_pte(pte, new_pte);	/*	 * It's enough to flush this one mapping.	 * (PGE mappings get flushed as well)	 */	__flush_tlb_one(vaddr);}/* NOTE: this is meant to be run only at boot */void __init __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot){	unsigned long address = __fix_to_virt(idx);	if (idx >= __end_of_fixed_addresses) {		printk("Invalid __set_fixmap\n");		return;	}	set_pte_phys(address, phys, prot);}unsigned long __meminitdata table_start, table_end;static __meminit void *alloc_low_page(unsigned long *phys){ 	unsigned long pfn = table_end++;	void *adr;	if (after_bootmem) {		adr = (void *)get_zeroed_page(GFP_ATOMIC);		*phys = __pa(adr);		return adr;	}	if (pfn >= end_pfn) 		panic("alloc_low_page: ran out of memory"); 	adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);	memset(adr, 0, PAGE_SIZE);	*phys  = pfn * PAGE_SIZE;	return adr;}static __meminit void unmap_low_page(void *adr){ 	if (after_bootmem)		return;	early_iounmap(adr, PAGE_SIZE);} /* Must run before zap_low_mappings */__meminit void *early_ioremap(unsigned long addr, unsigned long size){	unsigned long vaddr;	pmd_t *pmd, *last_pmd;	int i, pmds;	pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;	vaddr = __START_KERNEL_map;	pmd = level2_kernel_pgt;	last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;	for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {		for (i = 0; i < pmds; i++) {			if (pmd_present(pmd[i]))				goto next;		}		vaddr += addr & ~PMD_MASK;		addr &= PMD_MASK;		for (i = 0; i < pmds; i++, addr += PMD_SIZE)			set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));		__flush_tlb();		return (void *)vaddr;	next:		;	}	printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);	return NULL;}/* To avoid virtual aliases later */__meminit void early_iounmap(void *addr, unsigned long size){	unsigned long vaddr;	pmd_t *pmd;	int i, pmds;	vaddr = (unsigned long)addr;	pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;	pmd = level2_kernel_pgt + pmd_index(vaddr);	for (i = 0; i < pmds; i++)		pmd_clear(pmd + i);	__flush_tlb();}static void __meminitphys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end){	int i = pmd_index(address);	for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {		unsigned long entry;		pmd_t *pmd = pmd_page + pmd_index(address);		if (address >= end) {			if (!after_bootmem)				for (; i < PTRS_PER_PMD; i++, pmd++)					set_pmd(pmd, __pmd(0));			break;		}		if (pmd_val(*pmd))			continue;		entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;		entry &= __supported_pte_mask;		set_pmd(pmd, __pmd(entry));	}}static void __meminitphys_pmd_update(pud_t *pud, unsigned long address, unsigned long end){	pmd_t *pmd = pmd_offset(pud,0);	spin_lock(&init_mm.page_table_lock);	phys_pmd_init(pmd, address, end);	spin_unlock(&init_mm.page_table_lock);	__flush_tlb_all();}static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end){ 	int i = pud_index(addr);	for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {		unsigned long pmd_phys;		pud_t *pud = pud_page + pud_index(addr);		pmd_t *pmd;		if (addr >= end)			break;		if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {			set_pud(pud, __pud(0)); 			continue;		} 		if (pud_val(*pud)) {			phys_pmd_update(pud, addr, end);			continue;		}		pmd = alloc_low_page(&pmd_phys);		spin_lock(&init_mm.page_table_lock);		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));		phys_pmd_init(pmd, addr, end);		spin_unlock(&init_mm.page_table_lock);		unmap_low_page(pmd);	}	__flush_tlb();} static void __init find_early_table_space(unsigned long end){	unsigned long puds, pmds, tables, start;	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;	tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +		 round_up(pmds * sizeof(pmd_t), PAGE_SIZE); 	/* RED-PEN putting page tables only on node 0 could 	   cause a hotspot and fill up ZONE_DMA. The page tables 	   need roughly 0.5KB per GB. */ 	start = 0x8000; 	table_start = find_e820_area(start, end, tables);	if (table_start == -1UL)		panic("Cannot find space for the kernel page tables");	table_start >>= PAGE_SHIFT;	table_end = table_start;	early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",		end, table_start << PAGE_SHIFT,		(table_start << PAGE_SHIFT) + tables);}/* Setup the direct mapping of the physical memory at PAGE_OFFSET.   This runs before bootmem is initialized and gets pages directly from the    physical memory. To access them they are temporarily mapped. */void __init_refok init_memory_mapping(unsigned long start, unsigned long end){ 	unsigned long next; 	Dprintk("init_memory_mapping\n");	/* 	 * Find space for the kernel direct mapping tables.	 * Later we should allocate these tables in the local node of the memory	 * mapped.  Unfortunately this is done currently before the nodes are 	 * discovered.	 */	if (!after_bootmem)		find_early_table_space(end);	start = (unsigned long)__va(start);	end = (unsigned long)__va(end);	for (; start < end; start = next) {		unsigned long pud_phys; 		pgd_t *pgd = pgd_offset_k(start);		pud_t *pud;		if (after_bootmem)			pud = pud_offset(pgd, start & PGDIR_MASK);		else			pud = alloc_low_page(&pud_phys);		next = start + PGDIR_SIZE;		if (next > end) 			next = end; 		phys_pud_init(pud, __pa(start), __pa(next));		if (!after_bootmem)			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));		unmap_low_page(pud);	} 	if (!after_bootmem)		mmu_cr4_features = read_cr4();	__flush_tlb_all();}#ifndef CONFIG_NUMAvoid __init paging_init(void){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -