📄 srmmu.c
字号:
sparc_context_init(num_contexts); kmap_init(); { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES]; unsigned long npages; int znum; for (znum = 0; znum < MAX_NR_ZONES; znum++) zones_size[znum] = zholes_size[znum] = 0; npages = max_low_pfn - pfn_base; zones_size[ZONE_DMA] = npages; zholes_size[ZONE_DMA] = npages - pages_avail; npages = highend_pfn - max_low_pfn; zones_size[ZONE_HIGHMEM] = npages; zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); free_area_init_node(0, &contig_page_data, zones_size, pfn_base, zholes_size); }}static void srmmu_mmu_info(struct seq_file *m){ seq_printf(m, "MMU type\t: %s\n" "contexts\t: %d\n" "nocache total\t: %ld\n" "nocache used\t: %d\n", srmmu_name, num_contexts, srmmu_nocache_size, srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);}static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte){}static void srmmu_destroy_context(struct mm_struct *mm){ if(mm->context != NO_CONTEXT) { flush_cache_mm(mm); srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); flush_tlb_mm(mm); spin_lock(&srmmu_context_spinlock); free_context(mm->context); spin_unlock(&srmmu_context_spinlock); mm->context = NO_CONTEXT; }}/* Init various srmmu chip types. */static void __init srmmu_is_bad(void){ prom_printf("Could not determine SRMMU chip type.\n"); prom_halt();}static void __init init_vac_layout(void){ int nd, cache_lines; char node_str[128];#ifdef CONFIG_SMP int cpu = 0; unsigned long max_size = 0; unsigned long min_line_size = 0x10000000;#endif nd = prom_getchild(prom_root_node); while((nd = prom_getsibling(nd)) != 0) { prom_getstring(nd, "device_type", node_str, sizeof(node_str)); if(!strcmp(node_str, "cpu")) { vac_line_size = prom_getint(nd, "cache-line-size"); if (vac_line_size == -1) { prom_printf("can't determine cache-line-size, " "halting.\n"); prom_halt(); } cache_lines = prom_getint(nd, "cache-nlines"); if (cache_lines == -1) { prom_printf("can't determine cache-nlines, halting.\n"); prom_halt(); } vac_cache_size = cache_lines * vac_line_size;#ifdef CONFIG_SMP if(vac_cache_size > max_size) max_size = vac_cache_size; if(vac_line_size < min_line_size) min_line_size = vac_line_size; cpu++; if (cpu >= NR_CPUS || !cpu_online(cpu)) break;#else break;#endif } } if(nd == 0) { prom_printf("No CPU nodes found, halting.\n"); prom_halt(); }#ifdef CONFIG_SMP vac_cache_size = max_size; vac_line_size = min_line_size;#endif printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n", (int)vac_cache_size, (int)vac_line_size);}static void __init poke_hypersparc(void){ volatile unsigned long clear; unsigned long mreg = srmmu_get_mmureg(); hyper_flush_unconditional_combined(); mreg &= ~(HYPERSPARC_CWENABLE); mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE); mreg |= (HYPERSPARC_CMODE); srmmu_set_mmureg(mreg);#if 0 /* XXX I think this is bad news... -DaveM */ hyper_clear_all_tags();#endif put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE); hyper_flush_whole_icache(); clear = srmmu_get_faddr(); clear = srmmu_get_fstatus();}static void __init init_hypersparc(void){ srmmu_name = "ROSS HyperSparc"; srmmu_modtype = HyperSparc; init_vac_layout(); is_hypersparc = 1; BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP); poke_srmmu = poke_hypersparc; hypersparc_setup_blockops();}static void __init poke_cypress(void){ unsigned long mreg = srmmu_get_mmureg(); unsigned long faddr, tagval; volatile unsigned long cypress_sucks; volatile unsigned long clear; clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); if (!(mreg & CYPRESS_CENABLE)) { for(faddr = 0x0; faddr < 0x10000; faddr += 20) { __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t" "sta %%g0, [%0] %2\n\t" : : "r" (faddr), "r" (0x40000), "i" (ASI_M_DATAC_TAG)); } } else { for(faddr = 0; faddr < 0x10000; faddr += 0x20) { __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : "=r" (tagval) : "r" (faddr), "r" (0x40000), "i" (ASI_M_DATAC_TAG)); /* If modified and valid, kick it. */ if((tagval & 0x60) == 0x60) cypress_sucks = *(unsigned long *) (0xf0020000 + faddr); } } /* And one more, for our good neighbor, Mr. Broken Cypress. */ clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE); srmmu_set_mmureg(mreg);}static void __init init_cypress_common(void){ init_vac_layout(); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP); poke_srmmu = poke_cypress;}static void __init init_cypress_604(void){ srmmu_name = "ROSS Cypress-604(UP)"; srmmu_modtype = Cypress; init_cypress_common();}static void __init init_cypress_605(unsigned long mrev){ srmmu_name = "ROSS Cypress-605(MP)"; if(mrev == 0xe) { srmmu_modtype = Cypress_vE; hwbug_bitmask |= HWBUG_COPYBACK_BROKEN; } else { if(mrev == 0xd) { srmmu_modtype = Cypress_vD; hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN; } else { srmmu_modtype = Cypress; } } init_cypress_common();}static void __init poke_swift(void){ unsigned long mreg; /* Clear any crap from the cache or else... */ swift_flush_cache_all(); /* Enable I & D caches */ mreg = srmmu_get_mmureg(); mreg |= (SWIFT_IE | SWIFT_DE); /* * The Swift branch folding logic is completely broken. At * trap time, if things are just right, if can mistakenly * think that a trap is coming from kernel mode when in fact * it is coming from user mode (it mis-executes the branch in * the trap code). So you see things like crashme completely * hosing your machine which is completely unacceptable. Turn * this shit off... nice job Fujitsu. */ mreg &= ~(SWIFT_BF); srmmu_set_mmureg(mreg);}#define SWIFT_MASKID_ADDR 0x10003018static void __init init_swift(void){ unsigned long swift_rev; __asm__ __volatile__("lda [%1] %2, %0\n\t" "srl %0, 0x18, %0\n\t" : "=r" (swift_rev) : "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); srmmu_name = "Fujitsu Swift"; switch(swift_rev) { case 0x11: case 0x20: case 0x23: case 0x30: srmmu_modtype = Swift_lots_o_bugs; hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN); /* * Gee george, I wonder why Sun is so hush hush about * this hardware bug... really braindamage stuff going * on here. However I think we can find a way to avoid * all of the workaround overhead under Linux. Basically, * any page fault can cause kernel pages to become user * accessible (the mmu gets confused and clears some of * the ACC bits in kernel ptes). Aha, sounds pretty * horrible eh? But wait, after extensive testing it appears * that if you use pgd_t level large kernel pte's (like the * 4MB pages on the Pentium) the bug does not get tripped * at all. This avoids almost all of the major overhead. * Welcome to a world where your vendor tells you to, * "apply this kernel patch" instead of "sorry for the * broken hardware, send it back and we'll give you * properly functioning parts" */ break; case 0x25: case 0x31: srmmu_modtype = Swift_bad_c; hwbug_bitmask |= HWBUG_KERN_CBITBROKEN; /* * You see Sun allude to this hardware bug but never * admit things directly, they'll say things like, * "the Swift chip cache problems" or similar. */ break; default: srmmu_modtype = Swift_ok; break; }; BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM); flush_page_for_dma_global = 0; /* * Are you now convinced that the Swift is one of the * biggest VLSI abortions of all time? Bravo Fujitsu! * Fujitsu, the !#?!%$'d up processor people. I bet if * you examined the microcode of the Swift you'd find * XXX's all over the place. */ poke_srmmu = poke_swift;}static void turbosparc_flush_cache_all(void){ flush_user_windows(); turbosparc_idflash_clear();}static void turbosparc_flush_cache_mm(struct mm_struct *mm){ FLUSH_BEGIN(mm) flush_user_windows(); turbosparc_idflash_clear(); FLUSH_END}static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end){ FLUSH_BEGIN(vma->vm_mm) flush_user_windows(); turbosparc_idflash_clear(); FLUSH_END}static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page){ FLUSH_BEGIN(vma->vm_mm) flush_user_windows(); if (vma->vm_flags & VM_EXEC) turbosparc_flush_icache(); turbosparc_flush_dcache(); FLUSH_END}/* TurboSparc is copy-back, if we turn it on, but this does not work. */static void turbosparc_flush_page_to_ram(unsigned long page){#ifdef TURBOSPARC_WRITEBACK volatile unsigned long clear; if (srmmu_hwprobe(page)) turbosparc_flush_page_cache(page); clear = srmmu_get_fstatus();#endif}static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr){}static void turbosparc_flush_page_for_dma(unsigned long page){ turbosparc_flush_dcache();}static void turbosparc_flush_tlb_all(void){ srmmu_flush_whole_tlb();}static void turbosparc_flush_tlb_mm(struct mm_struct *mm){ FLUSH_BEGIN(mm) srmmu_flush_whole_tlb(); FLUSH_END}static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end){ FLUSH_BEGIN(vma->vm_mm) srmmu_flush_whole_tlb(); FLUSH_END}static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page){ FLUSH_BEGIN(vma->vm_mm) srmmu_flush_whole_tlb(); FLUSH_END}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -