⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mm-armv.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  linux/arch/arm/mm/mm-armv.c * *  Copyright (C) 1998-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * *  Page table sludge for ARM v3 and v4 processor architectures. */#include <linux/config.h>#include <linux/module.h>#include <linux/mm.h>#include <linux/init.h>#include <linux/bootmem.h>#include <linux/highmem.h>#include <linux/nodemask.h>#include <asm/pgalloc.h>#include <asm/page.h>#include <asm/io.h>#include <asm/setup.h>#include <asm/tlbflush.h>#include <asm/mach/map.h>#define CPOLICY_UNCACHED	0#define CPOLICY_BUFFERED	1#define CPOLICY_WRITETHROUGH	2#define CPOLICY_WRITEBACK	3#define CPOLICY_WRITEALLOC	4static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;static unsigned int ecc_mask __initdata = 0;pgprot_t pgprot_kernel;EXPORT_SYMBOL(pgprot_kernel);pmd_t *top_pmd;struct cachepolicy {	const char	policy[16];	unsigned int	cr_mask;	unsigned int	pmd;	unsigned int	pte;};static struct cachepolicy cache_policies[] __initdata = {	{		.policy		= "uncached",		.cr_mask	= CR_W|CR_C,		.pmd		= PMD_SECT_UNCACHED,		.pte		= 0,	}, {		.policy		= "buffered",		.cr_mask	= CR_C,		.pmd		= PMD_SECT_BUFFERED,		.pte		= PTE_BUFFERABLE,	}, {		.policy		= "writethrough",		.cr_mask	= 0,		.pmd		= PMD_SECT_WT,		.pte		= PTE_CACHEABLE,	}, {		.policy		= "writeback",		.cr_mask	= 0,		.pmd		= PMD_SECT_WB,		.pte		= PTE_BUFFERABLE|PTE_CACHEABLE,	}, {		.policy		= "writealloc",		.cr_mask	= 0,		.pmd		= PMD_SECT_WBWA,		.pte		= PTE_BUFFERABLE|PTE_CACHEABLE,	}};/* * These are useful for identifing cache coherency * problems by allowing the cache or the cache and * writebuffer to be turned off.  (Note: the write * buffer should not be on and the cache off). */static void __init early_cachepolicy(char **p){	int i;	for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {		int len = strlen(cache_policies[i].policy);		if (memcmp(*p, cache_policies[i].policy, len) == 0) {			cachepolicy = i;			cr_alignment &= ~cache_policies[i].cr_mask;			cr_no_alignment &= ~cache_policies[i].cr_mask;			*p += len;			break;		}	}	if (i == ARRAY_SIZE(cache_policies))		printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");	flush_cache_all();	set_cr(cr_alignment);}static void __init early_nocache(char **__unused){	char *p = "buffered";	printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);	early_cachepolicy(&p);}static void __init early_nowrite(char **__unused){	char *p = "uncached";	printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);	early_cachepolicy(&p);}static void __init early_ecc(char **p){	if (memcmp(*p, "on", 2) == 0) {		ecc_mask = PMD_PROTECTION;		*p += 2;	} else if (memcmp(*p, "off", 3) == 0) {		ecc_mask = 0;		*p += 3;	}}__early_param("nocache", early_nocache);__early_param("nowb", early_nowrite);__early_param("cachepolicy=", early_cachepolicy);__early_param("ecc=", early_ecc);static int __init noalign_setup(char *__unused){	cr_alignment &= ~CR_A;	cr_no_alignment &= ~CR_A;	set_cr(cr_alignment);	return 1;}__setup("noalign", noalign_setup);#define FIRST_KERNEL_PGD_NR	(FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt){	return pmd_offset(pgd, virt);}static inline pmd_t *pmd_off_k(unsigned long virt){	return pmd_off(pgd_offset_k(virt), virt);}/* * need to get a 16k page for level 1 */pgd_t *get_pgd_slow(struct mm_struct *mm){	pgd_t *new_pgd, *init_pgd;	pmd_t *new_pmd, *init_pmd;	pte_t *new_pte, *init_pte;	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);	if (!new_pgd)		goto no_pgd;	memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));	/*	 * Copy over the kernel and IO PGD entries	 */	init_pgd = pgd_offset_k(0);	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,		       (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));	if (!vectors_high()) {		/*		 * On ARM, first page must always be allocated since it		 * contains the machine vectors.		 */		new_pmd = pmd_alloc(mm, new_pgd, 0);		if (!new_pmd)			goto no_pmd;		new_pte = pte_alloc_map(mm, new_pmd, 0);		if (!new_pte)			goto no_pte;		init_pmd = pmd_offset(init_pgd, 0);		init_pte = pte_offset_map_nested(init_pmd, 0);		set_pte(new_pte, *init_pte);		pte_unmap_nested(init_pte);		pte_unmap(new_pte);	}	return new_pgd;no_pte:	pmd_free(new_pmd);no_pmd:	free_pages((unsigned long)new_pgd, 2);no_pgd:	return NULL;}void free_pgd_slow(pgd_t *pgd){	pmd_t *pmd;	struct page *pte;	if (!pgd)		return;	/* pgd is always present and good */	pmd = pmd_off(pgd, 0);	if (pmd_none(*pmd))		goto free;	if (pmd_bad(*pmd)) {		pmd_ERROR(*pmd);		pmd_clear(pmd);		goto free;	}	pte = pmd_page(*pmd);	pmd_clear(pmd);	dec_page_state(nr_page_table_pages);	pte_lock_deinit(pte);	pte_free(pte);	pmd_free(pmd);free:	free_pages((unsigned long) pgd, 2);}/* * Create a SECTION PGD between VIRT and PHYS in domain * DOMAIN with protection PROT.  This operates on half- * pgdir entry increments. */static inline voidalloc_init_section(unsigned long virt, unsigned long phys, int prot){	pmd_t *pmdp = pmd_off_k(virt);	if (virt & (1 << 20))		pmdp++;	*pmdp = __pmd(phys | prot);	flush_pmd_entry(pmdp);}/* * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT */static inline voidalloc_init_supersection(unsigned long virt, unsigned long phys, int prot){	int i;	for (i = 0; i < 16; i += 1) {		alloc_init_section(virt, phys, prot | PMD_SECT_SUPER);		virt += (PGDIR_SIZE / 2);	}}/* * Add a PAGE mapping between VIRT and PHYS in domain * DOMAIN with protection PROT.  Note that due to the * way we map the PTEs, we must allocate two PTE_SIZE'd * blocks - one for the Linux pte table, and one for * the hardware pte table. */static inline voidalloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot){	pmd_t *pmdp = pmd_off_k(virt);	pte_t *ptep;	if (pmd_none(*pmdp)) {		ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *					       sizeof(pte_t));		__pmd_populate(pmdp, __pa(ptep) | prot_l1);	}	ptep = pte_offset_kernel(pmdp, virt);	set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));}struct mem_types {	unsigned int	prot_pte;	unsigned int	prot_l1;	unsigned int	prot_sect;	unsigned int	domain;};static struct mem_types mem_types[] __initdata = {	[MT_DEVICE] = {		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |				L_PTE_WRITE,		.prot_l1   = PMD_TYPE_TABLE,		.prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |				PMD_SECT_AP_WRITE,		.domain    = DOMAIN_IO,	},	[MT_CACHECLEAN] = {		.prot_sect = PMD_TYPE_SECT,		.domain    = DOMAIN_KERNEL,	},	[MT_MINICLEAN] = {		.prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,		.domain    = DOMAIN_KERNEL,	},	[MT_LOW_VECTORS] = {		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |				L_PTE_EXEC,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -