📄 srmmu.c
字号:
mreg &= ~TSUNAMI_ITD; mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB); srmmu_set_mmureg(mreg);}static void __init init_tsunami(void){ /* * Tsunami's pretty sane, Sun and TI actually got it * somewhat right this time. Fujitsu should have * taken some lessons from them. */ srmmu_name = "TI Tsunami"; srmmu_modtype = Tsunami; BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM); poke_srmmu = poke_tsunami; tsunami_setup_blockops();}static void __init poke_viking(void){ unsigned long mreg = srmmu_get_mmureg(); static int smp_catch = 0; if(viking_mxcc_present) { unsigned long mxcc_control = mxcc_get_creg(); mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); mxcc_control &= ~(MXCC_CTL_RRC); mxcc_set_creg(mxcc_control); /* * We don't need memory parity checks. * XXX This is a mess, have to dig out later. ecd. viking_mxcc_turn_off_parity(&mreg, &mxcc_control); */ /* We do cache ptables on MXCC. */ mreg |= VIKING_TCENABLE; } else { unsigned long bpreg; mreg &= ~(VIKING_TCENABLE); if(smp_catch++) { /* Must disable mixed-cmd mode here for other cpu's. */ bpreg = viking_get_bpreg(); bpreg &= ~(VIKING_ACTION_MIX); viking_set_bpreg(bpreg); /* Just in case PROM does something funny. */ msi_set_sync(); } } mreg |= VIKING_SPENABLE; mreg |= (VIKING_ICENABLE | VIKING_DCENABLE); mreg |= VIKING_SBENABLE; mreg &= ~(VIKING_ACENABLE); srmmu_set_mmureg(mreg);#ifdef CONFIG_SMP /* Avoid unnecessary cross calls. */ BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); btfixup();#endif}static void __init init_viking(void){ unsigned long mreg = srmmu_get_mmureg(); /* Ahhh, the viking. SRMMU VLSI abortion number two... */ if(mreg & VIKING_MMODE) { srmmu_name = "TI Viking"; viking_mxcc_present = 0; msi_set_sync(); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); /* * We need this to make sure old viking takes no hits * on it's cache for dma snoops to workaround the * "load from non-cacheable memory" interrupt bug. * This is only necessary because of the new way in * which we use the IOMMU. */ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); flush_page_for_dma_global = 0; } else { srmmu_name = "TI Viking/MXCC"; viking_mxcc_present = 1; srmmu_cache_pagetables = 1; /* MXCC vikings lack the DMA snooping bug. */ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP); } BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);#ifdef CONFIG_SMP if (sparc_cpu_model == sun4d) { BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM); } else#endif { BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); } BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP); poke_srmmu = poke_viking;}/* Probe for the srmmu chip version. */static void __init get_srmmu_type(void){ unsigned long mreg, psr; unsigned long mod_typ, mod_rev, psr_typ, psr_vers; srmmu_modtype = SRMMU_INVAL_MOD; hwbug_bitmask = 0; mreg = srmmu_get_mmureg(); psr = get_psr(); mod_typ = (mreg & 0xf0000000) >> 28; mod_rev = (mreg & 0x0f000000) >> 24; psr_typ = (psr >> 28) & 0xf; psr_vers = (psr >> 24) & 0xf; /* First, check for HyperSparc or Cypress. */ if(mod_typ == 1) { switch(mod_rev) { case 7: /* UP or MP Hypersparc */ init_hypersparc(); break; case 0: case 2: /* Uniprocessor Cypress */ init_cypress_604(); break; case 10: case 11: case 12: /* _REALLY OLD_ Cypress MP chips... */ case 13: case 14: case 15: /* MP Cypress mmu/cache-controller */ init_cypress_605(mod_rev); break; default: /* Some other Cypress revision, assume a 605. */ init_cypress_605(mod_rev); break; }; return; } /* * Now Fujitsu TurboSparc. It might happen that it is * in Swift emulation mode, so we will check later... */ if (psr_typ == 0 && psr_vers == 5) { init_turbosparc(); return; } /* Next check for Fujitsu Swift. */ if(psr_typ == 0 && psr_vers == 4) { int cpunode; char node_str[128]; /* Look if it is not a TurboSparc emulating Swift... */ cpunode = prom_getchild(prom_root_node); while((cpunode = prom_getsibling(cpunode)) != 0) { prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); if(!strcmp(node_str, "cpu")) { if (!prom_getintdefault(cpunode, "psr-implementation", 1) && prom_getintdefault(cpunode, "psr-version", 1) == 5) { init_turbosparc(); return; } break; } } init_swift(); return; } /* Now the Viking family of srmmu. */ if(psr_typ == 4 && ((psr_vers == 0) || ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { init_viking(); return; } /* Finally the Tsunami. */ if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { init_tsunami(); return; } /* Oh well */ srmmu_is_bad();}/* dont laugh, static pagetables */static int srmmu_check_pgt_cache(int low, int high){ return 0;}extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, tsetup_mmu_patchme, rtrap_mmu_patchme;extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk, tsetup_srmmu_stackchk, srmmu_rett_stackchk;extern unsigned long srmmu_fault;#define PATCH_BRANCH(insn, dest) do { \ iaddr = &(insn); \ daddr = &(dest); \ *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \ } while(0);static void __init patch_window_trap_handlers(void){ unsigned long *iaddr, *daddr; PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk); PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk); PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk); PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk); PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault); PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault); PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);}#ifdef CONFIG_SMP/* Local cross-calls. */static void smp_flush_page_for_dma(unsigned long page){ xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); local_flush_page_for_dma(page);}#endif/* Load up routines and constants for sun4m and sun4d mmu */void __init ld_mmu_srmmu(void){ extern void ld_mmu_iommu(void); extern void ld_mmu_iounit(void); extern void ___xchg32_sun4md(void); /* First the constants */ BTFIXUPSET_SIMM13(pmd_shift, SRMMU_PMD_SHIFT); BTFIXUPSET_SETHI(pmd_size, SRMMU_PMD_SIZE); BTFIXUPSET_SETHI(pmd_mask, SRMMU_PMD_MASK); BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT); BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE); BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK); BTFIXUPSET_SIMM13(ptrs_per_pte, SRMMU_PTRS_PER_PTE); BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD); BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED)); BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF; /* Functions */#ifndef CONFIG_SMP BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);#endif BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_page, srmmu_pte_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM); BTFIXUPSET_SETHI(none_mask, 0xF0000000); /* XXX P3: is it used? */ BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_offset, srmmu_pte_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one_fast, srmmu_pte_alloc_one_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pmd_fast, srmmu_free_pmd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_alloc_one_fast, srmmu_pmd_alloc_one_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE); BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY); BTFIXUPSET_HALF(pte_youngi, SRMMU_REF); BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE); BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY); BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF); BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE)); BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY)); BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF)); BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); /* Task struct and kernel stack allocating/freeing. */ BTFIXUPSET_CALL(alloc_task_struct, srmmu_alloc_task_struct, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_task_struct, srmmu_free_task_struct, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(get_task_struct, srmmu_get_task_struct, BTFIXUPCALL_NORM); get_srmmu_type(); patch_window_trap_handlers();#ifdef CONFIG_SMP /* El switcheroo... */ BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm); BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range); BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page); BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram); BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns); BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma); BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); if (sparc_cpu_model != sun4d) { BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); } BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);#endif if (sparc_cpu_model == sun4d) ld_mmu_iounit(); else ld_mmu_iommu();#ifdef CONFIG_SMP if (sparc_cpu_model == sun4d) sun4d_init_smp(); else sun4m_init_smp();#endif}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -