⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 srmmu.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
/* Cypress has unified L2 VIPT, from which both instructions and data * are stored.  It does not have an onboard icache of any sort, therefore * no flush is necessary. */static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr){}static void cypress_flush_tlb_all(void){	srmmu_flush_whole_tlb();}static void cypress_flush_tlb_mm(struct mm_struct *mm){	FLUSH_BEGIN(mm)	__asm__ __volatile__("	lda	[%0] %3, %%g5	sta	%2, [%0] %3	sta	%%g0, [%1] %4	sta	%%g5, [%0] %3"	: /* no outputs */	: "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),	  "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)	: "g5");	FLUSH_END}static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end){	unsigned long size;	FLUSH_BEGIN(mm)	start &= SRMMU_PGDIR_MASK;	size = SRMMU_PGDIR_ALIGN(end) - start;	__asm__ __volatile__("		lda	[%0] %5, %%g5		sta	%1, [%0] %5	1:	subcc	%3, %4, %3		bne	1b		 sta	%%g0, [%2 + %3] %6		sta	%%g5, [%0] %5"	: /* no outputs */	: "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),	  "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),	  "i" (ASI_M_FLUSH_PROBE)	: "g5", "cc");	FLUSH_END}static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page){	struct mm_struct *mm = vma->vm_mm;	FLUSH_BEGIN(mm)	__asm__ __volatile__("	lda	[%0] %3, %%g5	sta	%1, [%0] %3	sta	%%g0, [%2] %4	sta	%%g5, [%0] %3"	: /* no outputs */	: "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),	  "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)	: "g5");	FLUSH_END}/* viking.S */extern void viking_flush_cache_all(void);extern void viking_flush_cache_mm(struct mm_struct *mm);extern void viking_flush_cache_range(struct mm_struct *mm, unsigned long start,				     unsigned long end);extern void viking_flush_cache_page(struct vm_area_struct *vma,				    unsigned long page);extern void viking_flush_page_to_ram(unsigned long page);extern void viking_flush_page_for_dma(unsigned long page);extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);extern void viking_flush_page(unsigned long page);extern void viking_mxcc_flush_page(unsigned long page);extern void viking_flush_tlb_all(void);extern void viking_flush_tlb_mm(struct mm_struct *mm);extern void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start,				   unsigned long end);extern void viking_flush_tlb_page(struct vm_area_struct *vma,				  unsigned long page);extern void sun4dsmp_flush_tlb_all(void);extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);extern void sun4dsmp_flush_tlb_range(struct mm_struct *mm, unsigned long start,				   unsigned long end);extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,				  unsigned long page);/* hypersparc.S */extern void hypersparc_flush_cache_all(void);extern void hypersparc_flush_cache_mm(struct mm_struct *mm);extern void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end);extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);extern void hypersparc_flush_page_to_ram(unsigned long page);extern void hypersparc_flush_page_for_dma(unsigned long page);extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);extern void hypersparc_flush_tlb_all(void);extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);extern void hypersparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end);extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);extern void hypersparc_setup_blockops(void);/* * NOTE: All of this startup code assumes the low 16mb (approx.) of *       kernel mappings are done with one single contiguous chunk of *       ram.  On small ram machines (classics mainly) we only get *       around 8mb mapped for us. */void __init early_pgtable_allocfail(char *type){	prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);	prom_halt();}void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end){	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	while(start < end) {		pgdp = pgd_offset_k(start);		if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {			pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);			if (pmdp == NULL)				early_pgtable_allocfail("pmd");			memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);			srmmu_pgd_set(__nocache_fix(pgdp), pmdp);		}		pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);		if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {			ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE);			if (ptep == NULL)				early_pgtable_allocfail("pte");			memset(__nocache_fix(ptep), 0, SRMMU_PTE_TABLE_SIZE);			srmmu_pmd_set(__nocache_fix(pmdp), ptep);		}		start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;	}}void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end){	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	while(start < end) {		pgdp = pgd_offset_k(start);		if(srmmu_pgd_none(*pgdp)) {			pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);			if (pmdp == NULL)				early_pgtable_allocfail("pmd");			memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);			srmmu_pgd_set(pgdp, pmdp);		}		pmdp = srmmu_pmd_offset(pgdp, start);		if(srmmu_pmd_none(*pmdp)) {			ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE);			if (ptep == NULL)				early_pgtable_allocfail("pte");			memset(ptep, 0, SRMMU_PTE_TABLE_SIZE);			srmmu_pmd_set(pmdp, ptep);		}		start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;	}}/* * This is much cleaner than poking around physical address space * looking at the prom's page table directly which is what most * other OS's do.  Yuck... this is much better. */void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end){	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */	unsigned long prompte;	while(start <= end) {		if (start == 0)			break; /* probably wrap around */		if(start == 0xfef00000)			start = KADB_DEBUGGER_BEGVM;		if(!(prompte = srmmu_hwprobe(start))) {			start += PAGE_SIZE;			continue;		}    		/* A red snapper, see what it really is. */		what = 0;    		if(!(start & ~(SRMMU_PMD_MASK))) {			if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PMD_SIZE) == prompte)				what = 1;		}    		if(!(start & ~(SRMMU_PGDIR_MASK))) {			if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==			   prompte)				what = 2;		}    		pgdp = pgd_offset_k(start);		if(what == 2) {			*(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);			start += SRMMU_PGDIR_SIZE;			continue;		}		if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {			pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);			if (pmdp == NULL)				early_pgtable_allocfail("pmd");			memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);			srmmu_pgd_set(__nocache_fix(pgdp), pmdp);		}		pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);		if(what == 1) {			*(pmd_t *)__nocache_fix(pmdp) = __pmd(prompte);			start += SRMMU_PMD_SIZE;			continue;		}		if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {			ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE);			if (ptep == NULL)				early_pgtable_allocfail("pte");			memset(__nocache_fix(ptep), 0, SRMMU_PTE_TABLE_SIZE);			srmmu_pmd_set(__nocache_fix(pmdp), ptep);		}		ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);		*(pte_t *)__nocache_fix(ptep) = __pte(prompte);		start += PAGE_SIZE;	}}#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)/* Create a third-level SRMMU 16MB page mapping. */static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base){	pgd_t *pgdp = pgd_offset_k(vaddr);	unsigned long big_pte;	big_pte = KERNEL_PTE(phys_base >> 4);	*(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);}/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */static unsigned long __init map_spbank(unsigned long vbase, int sp_entry){	unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);	unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);	unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);	while(vstart < vend) {		do_large_mapping(vstart, pstart);		vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;	}	return vstart;}static inline void memprobe_error(char *msg){	prom_printf(msg);	prom_printf("Halting now...\n");	prom_halt();}static inline void map_kernel(void){	int i;	if (phys_base > 0) {		do_large_mapping(PAGE_OFFSET, phys_base);	}	for (i = 0; sp_banks[i].num_bytes != 0; i++) {		map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);	}	init_mm.mmap->vm_start = PAGE_OFFSET;	BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE);}/* Paging initialization on the Sparc Reference MMU. */extern void sparc_context_init(int);extern int linux_num_cpus;void (*poke_srmmu)(void) __initdata = NULL;extern void bootmem_init(void);extern void sun_serial_setup(void);void __init srmmu_paging_init(void){	int i, cpunode;	char node_str[128];	pgd_t *pgd;	pmd_t *pmd;	pte_t *pte;	sparc_iomap.start = SUN4M_IOBASE_VADDR;	/* 16MB of IOSPACE on all sun4m's. */	if (sparc_cpu_model == sun4d)		num_contexts = 65536; /* We know it is Viking */	else {		/* Find the number of contexts on the srmmu. */		cpunode = prom_getchild(prom_root_node);		num_contexts = 0;		while(cpunode != 0) {			prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));			if(!strcmp(node_str, "cpu")) {				num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);				break;			}			cpunode = prom_getsibling(cpunode);		}	}	if(!num_contexts) {		prom_printf("Something wrong, can't find cpu node in paging_init.\n");		prom_halt();	}	bootmem_init();	srmmu_nocache_init();        srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));	map_kernel();	/* ctx table has to be physically aligned to its size */	srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));	srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);	for(i = 0; i < num_contexts; i++)		srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);	flush_cache_all();	srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);	flush_tlb_all();	poke_srmmu();#if CONFIG_SUN_IO	srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);	srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);#endif	srmmu_allocate_ptable_skeleton(FIX_KMAP_BEGIN, FIX_KMAP_END);	srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_BASE_END);	pgd = pgd_offset_k(PKMAP_BASE);	pmd = pmd_offset(pgd, PKMAP_BASE);	pte = pte_offset(pmd, PKMAP_BASE);	pkmap_page_table = pte;	flush_cache_all();	flush_tlb_all();	empty_bad_pmd_table = (pte_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);	empty_bad_pte_table = (pte_t *)srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE);	/*	 * This does not logically belong here, but we need to	 * call it at the moment we are able to use the bootmem	 * allocator.	 */	sun_serial_setup();	sparc_context_init(num_contexts);	kmap_init();	{		unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0};		zones_size[ZONE_DMA] = max_low_pfn;		zones_size[ZONE_HIGHMEM] = highend_pfn - max_low_pfn;		free_area_init(zones_size);	}}static int srmmu_mmu_info(char *buf){	return sprintf(buf, 		"MMU type\t: %s\n"		"contexts\t: %d\n"		"nocache total\t: %ld\n"		"nocache used\t: %d\n"		, srmmu_name,		num_contexts,		SRMMU_NOCACHE_SIZE,		(srmmu_nocache_used << SRMMU_NOCACHE_BITMAP_SHIFT)	);}static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte){}static void srmmu_destroy_context(struct mm_struct *mm){	if(mm->context != NO_CONTEXT) {		flush_cache_mm(mm);		srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);		flush_tlb_mm(mm);		spin_lock(&srmmu_context_spinlock);		free_context(mm->context);		spin_unlock(&srmmu_context_spinlock);		mm->context = NO_CONTEXT;	}}/* Init various srmmu chip types. */static void __init srmmu_is_bad(void){	prom_printf("Could not determine SRMMU chip type.\n");	prom_halt();}static void __init init_vac_layout(void){	int nd, cache_lines;	char node_str[128];#ifdef CONFIG_SMP

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -