⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mmu.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	pte = pte_offset_kernel(pmd, addr);	do {		set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),			    type->prot_pte_ext);		pfn++;	} while (pte++, addr += PAGE_SIZE, addr != end);}static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,				      unsigned long end, unsigned long phys,				      const struct mem_type *type){	pmd_t *pmd = pmd_offset(pgd, addr);	/*	 * Try a section mapping - end, addr and phys must all be aligned	 * to a section boundary.  Note that PMDs refer to the individual	 * L1 entries, whereas PGDs refer to a group of L1 entries making	 * up one logical pointer to an L2 table.	 */	if (((addr | end | phys) & ~SECTION_MASK) == 0) {		pmd_t *p = pmd;		if (addr & SECTION_SIZE)			pmd++;		do {			*pmd = __pmd(phys | type->prot_sect);			phys += SECTION_SIZE;		} while (pmd++, addr += SECTION_SIZE, addr != end);		flush_pmd_entry(p);	} else {		/*		 * No need to loop; pte's aren't interested in the		 * individual L1 entries.		 */		alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);	}}static void __init create_36bit_mapping(struct map_desc *md,					const struct mem_type *type){	unsigned long phys, addr, length, end;	pgd_t *pgd;	addr = md->virtual;	phys = (unsigned long)__pfn_to_phys(md->pfn);	length = PAGE_ALIGN(md->length);	if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {		printk(KERN_ERR "MM: CPU does not support supersection "		       "mapping for 0x%08llx at 0x%08lx\n",		       __pfn_to_phys((u64)md->pfn), addr);		return;	}	/* N.B.	ARMv6 supersections are only defined to work with domain 0.	 *	Since domain assignments can in fact be arbitrary, the	 *	'domain == 0' check below is required to insure that ARMv6	 *	supersections are only allocated for domain 0 regardless	 *	of the actual domain assignments in use.	 */	if (type->domain) {		printk(KERN_ERR "MM: invalid domain in supersection "		       "mapping for 0x%08llx at 0x%08lx\n",		       __pfn_to_phys((u64)md->pfn), addr);		return;	}	if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {		printk(KERN_ERR "MM: cannot create mapping for "		       "0x%08llx at 0x%08lx invalid alignment\n",		       __pfn_to_phys((u64)md->pfn), addr);		return;	}	/*	 * Shift bits [35:32] of address into bits [23:20] of PMD	 * (See ARMv6 spec).	 */	phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);	pgd = pgd_offset_k(addr);	end = addr + length;	do {		pmd_t *pmd = pmd_offset(pgd, addr);		int i;		for (i = 0; i < 16; i++)			*pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);		addr += SUPERSECTION_SIZE;		phys += SUPERSECTION_SIZE;		pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;	} while (addr != end);}/* * Create the page directory entries and any necessary * page tables for the mapping specified by `md'.  We * are able to cope here with varying sizes and address * offsets, and we take full advantage of sections and * supersections. */void __init create_mapping(struct map_desc *md){	unsigned long phys, addr, length, end;	const struct mem_type *type;	pgd_t *pgd;	if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {		printk(KERN_WARNING "BUG: not creating mapping for "		       "0x%08llx at 0x%08lx in user region\n",		       __pfn_to_phys((u64)md->pfn), md->virtual);		return;	}	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&	    md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {		printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "		       "overlaps vmalloc space\n",		       __pfn_to_phys((u64)md->pfn), md->virtual);	}	type = &mem_types[md->type];	/*	 * Catch 36-bit addresses	 */	if (md->pfn >= 0x100000) {		create_36bit_mapping(md, type);		return;	}	addr = md->virtual & PAGE_MASK;	phys = (unsigned long)__pfn_to_phys(md->pfn);	length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));	if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {		printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "		       "be mapped using pages, ignoring.\n",		       __pfn_to_phys(md->pfn), addr);		return;	}	pgd = pgd_offset_k(addr);	end = addr + length;	do {		unsigned long next = pgd_addr_end(addr, end);		alloc_init_section(pgd, addr, next, phys, type);		phys += next - addr;		addr = next;	} while (pgd++, addr != end);}/* * Create the architecture specific mappings */void __init iotable_init(struct map_desc *io_desc, int nr){	int i;	for (i = 0; i < nr; i++)		create_mapping(io_desc + i);}static inline void prepare_page_table(struct meminfo *mi){	unsigned long addr;	/*	 * Clear out all the mappings below the kernel image.	 */	for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)		pmd_clear(pmd_off_k(addr));#ifdef CONFIG_XIP_KERNEL	/* The XIP kernel is mapped in the module area -- skip over it */	addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK;#endif	for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)		pmd_clear(pmd_off_k(addr));	/*	 * Clear out all the kernel space mappings, except for the first	 * memory bank, up to the end of the vmalloc region.	 */	for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);	     addr < VMALLOC_END; addr += PGDIR_SIZE)		pmd_clear(pmd_off_k(addr));}/* * Reserve the various regions of node 0 */void __init reserve_node_zero(pg_data_t *pgdat){	unsigned long res_size = 0;	/*	 * Register the kernel text and data with bootmem.	 * Note that this can only be in node 0.	 */#ifdef CONFIG_XIP_KERNEL	reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);#else	reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);#endif	/*	 * Reserve the page tables.  These are already in use,	 * and can only be in node 0.	 */	reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),			     PTRS_PER_PGD * sizeof(pgd_t));	/*	 * Hmm... This should go elsewhere, but we really really need to	 * stop things allocating the low memory; ideally we need a better	 * implementation of GFP_DMA which does not assume that DMA-able	 * memory starts at zero.	 */	if (machine_is_integrator() || machine_is_cintegrator())		res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;	/*	 * These should likewise go elsewhere.  They pre-reserve the	 * screen memory region at the start of main system memory.	 */	if (machine_is_edb7211())		res_size = 0x00020000;	if (machine_is_p720t())		res_size = 0x00014000;	/* H1940 and RX3715 need to reserve this for suspend */	if (machine_is_h1940() || machine_is_rx3715()) {		reserve_bootmem_node(pgdat, 0x30003000, 0x1000);		reserve_bootmem_node(pgdat, 0x30081000, 0x1000);	}#ifdef CONFIG_SA1111	/*	 * Because of the SA1111 DMA bug, we want to preserve our	 * precious DMA-able memory...	 */	res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;#endif	if (res_size)		reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);}/* * Set up device the mappings.  Since we clear out the page tables for all * mappings above VMALLOC_END, we will remove any debug device mappings. * This means you have to be careful how you debug this function, or any * called function.  This means you can't use any function or debugging * method which may touch any device, otherwise the kernel _will_ crash. */static void __init devicemaps_init(struct machine_desc *mdesc){	struct map_desc map;	unsigned long addr;	void *vectors;	/*	 * Allocate the vector page early.	 */	vectors = alloc_bootmem_low_pages(PAGE_SIZE);	BUG_ON(!vectors);	for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)		pmd_clear(pmd_off_k(addr));	/*	 * Map the kernel if it is XIP.	 * It is always first in the modulearea.	 */#ifdef CONFIG_XIP_KERNEL	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);	map.virtual = MODULE_START;	map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;	map.type = MT_ROM;	create_mapping(&map);#endif	/*	 * Map the cache flushing regions.	 */#ifdef FLUSH_BASE	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);	map.virtual = FLUSH_BASE;	map.length = SZ_1M;	map.type = MT_CACHECLEAN;	create_mapping(&map);#endif#ifdef FLUSH_BASE_MINICACHE	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);	map.virtual = FLUSH_BASE_MINICACHE;	map.length = SZ_1M;	map.type = MT_MINICLEAN;	create_mapping(&map);#endif	/*	 * Create a mapping for the machine vectors at the high-vectors	 * location (0xffff0000).  If we aren't using high-vectors, also	 * create a mapping at the low-vectors virtual address.	 */	map.pfn = __phys_to_pfn(virt_to_phys(vectors));	map.virtual = 0xffff0000;	map.length = PAGE_SIZE;	map.type = MT_HIGH_VECTORS;	create_mapping(&map);	if (!vectors_high()) {		map.virtual = 0;		map.type = MT_LOW_VECTORS;		create_mapping(&map);	}	/*	 * Ask the machine support to map in the statically mapped devices.	 */	if (mdesc->map_io)		mdesc->map_io();	/*	 * Finally flush the caches and tlb to ensure that we're in a	 * consistent state wrt the writebuffer.  This also ensures that	 * any write-allocated cache lines in the vector page are written	 * back.  After this point, we can start to touch devices again.	 */	local_flush_tlb_all();	flush_cache_all();}/* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc){	void *zero_page;	build_mem_type_table();	prepare_page_table(mi);	bootmem_init(mi);	devicemaps_init(mdesc);	top_pmd = pmd_off_k(0xffff0000);	/*	 * allocate the zero page.  Note that we count on this going ok.	 */	zero_page = alloc_bootmem_low_pages(PAGE_SIZE);	memzero(zero_page, PAGE_SIZE);	empty_zero_page = virt_to_page(zero_page);	flush_dcache_page(empty_zero_page);}/* * In order to soft-boot, we need to insert a 1:1 mapping in place of * the user-mode pages.  This will then ensure that we have predictable * results when turning the mmu off */void setup_mm_for_reboot(char mode){	unsigned long base_pmdval;	pgd_t *pgd;	int i;	if (current->mm && current->mm->pgd)		pgd = current->mm->pgd;	else		pgd = init_mm.pgd;	base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())		base_pmdval |= PMD_BIT4;	for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {		unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;		pmd_t *pmd;		pmd = pmd_off(pgd, i << PGDIR_SHIFT);		pmd[0] = __pmd(pmdval);		pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));		flush_pmd_entry(pmd);	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -