📄 tlbex.c
字号:
i_sc(p, pte, sizeof(pte_t) / 2, ptr); il_beqz(p, r, pte, label_smp_pgtable_change); /* no i_nop needed */ i_lw(p, pte, 0, ptr); } else i_nop(p);# else i_nop(p);# endif#else# ifdef CONFIG_64BIT_PHYS_ADDR if (cpu_has_64bits) i_sd(p, pte, 0, ptr); else# endif i_SW(p, pte, 0, ptr);# ifdef CONFIG_64BIT_PHYS_ADDR if (!cpu_has_64bits) { i_lw(p, pte, sizeof(pte_t) / 2, ptr); i_ori(p, pte, pte, hwmode); i_sw(p, pte, sizeof(pte_t) / 2, ptr); i_lw(p, pte, 0, ptr); }# endif#endif}/* * Check if PTE is present, if not then jump to LABEL. PTR points to * the page table where this PTE is located, PTE will be re-loaded * with it's original value. */static void __initbuild_pte_present(u32 **p, struct label **l, struct reloc **r, unsigned int pte, unsigned int ptr, enum label_id lid){ i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); il_bnez(p, r, pte, lid); iPTE_LW(p, l, pte, ptr);}/* Make PTE valid, store result in PTR. */static void __initbuild_make_valid(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr){ unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; iPTE_SW(p, r, pte, ptr, mode);}/* * Check if PTE can be written to, if not branch to LABEL. Regardless * restore PTE with value from PTR when done. */static void __initbuild_pte_writable(u32 **p, struct label **l, struct reloc **r, unsigned int pte, unsigned int ptr, enum label_id lid){ i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); il_bnez(p, r, pte, lid); iPTE_LW(p, l, pte, ptr);}/* Make PTE writable, update software status bits as well, then store * at PTR. */static void __initbuild_make_write(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr){ unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); iPTE_SW(p, r, pte, ptr, mode);}/* * Check if PTE can be modified, if not branch to LABEL. Regardless * restore PTE with value from PTR when done. */static void __initbuild_pte_modifiable(u32 **p, struct label **l, struct reloc **r, unsigned int pte, unsigned int ptr, enum label_id lid){ i_andi(p, pte, pte, _PAGE_WRITE); il_beqz(p, r, pte, lid); iPTE_LW(p, l, pte, ptr);}/* * R3000 style TLB load/store/modify handlers. *//* * This places the pte into ENTRYLO0 and writes it with tlbwi. * Then it returns. */static void __initbuild_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp){ i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ i_tlbwi(p); i_jr(p, tmp); i_rfe(p); /* branch delay */}/* * This places the pte into ENTRYLO0 and writes it with tlbwi * or tlbwr as appropriate. This is because the index register * may have the probe fail bit set as a result of a trap on a * kseg2 access, i.e. without refill. Then it returns. */static void __initbuild_r3000_tlb_reload_write(u32 **p, struct label **l, struct reloc **r, unsigned int pte, unsigned int tmp){ i_mfc0(p, tmp, C0_INDEX); i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ i_mfc0(p, tmp, C0_EPC); /* branch delay */ i_tlbwi(p); /* cp0 delay */ i_jr(p, tmp); i_rfe(p); /* branch delay */ l_r3000_write_probe_fail(l, *p); i_tlbwr(p); /* cp0 delay */ i_jr(p, tmp); i_rfe(p); /* branch delay */}static void __initbuild_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, unsigned int ptr){ long pgdc = (long)pgd_current; i_mfc0(p, pte, C0_BADVADDR); i_lui(p, ptr, rel_hi(pgdc)); /* cp0 delay */ i_lw(p, ptr, rel_lo(pgdc), ptr); i_srl(p, pte, pte, 22); /* load delay */ i_sll(p, pte, pte, 2); i_addu(p, ptr, ptr, pte); i_mfc0(p, pte, C0_CONTEXT); i_lw(p, ptr, 0, ptr); /* cp0 delay */ i_andi(p, pte, pte, 0xffc); /* load delay */ i_addu(p, ptr, ptr, pte); i_lw(p, pte, 0, ptr); i_tlbp(p); /* load delay */}static void __init build_r3000_tlb_load_handler(void){ u32 *p = handle_tlbl; struct label *l = labels; struct reloc *r = relocs; int i; memset(handle_tlbl, 0, sizeof(handle_tlbl)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); i_nop(&p); /* load delay */ build_make_valid(&p, &r, K0, K1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); l_nopage_tlbl(&l, p); i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); i_nop(&p); if ((p - handle_tlbl) > FASTPATH_SIZE) panic("TLB load handler fastpath space exceeded"); resolve_relocs(relocs, labels); pr_info("Synthesized TLB load handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbl)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (p - handle_tlbl); i++) pr_debug("\t.word 0x%08x\n", handle_tlbl[i]); pr_debug("\t.set pop\n");}static void __init build_r3000_tlb_store_handler(void){ u32 *p = handle_tlbs; struct label *l = labels; struct reloc *r = relocs; int i; memset(handle_tlbs, 0, sizeof(handle_tlbs)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); l_nopage_tlbs(&l, p); i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); i_nop(&p); if ((p - handle_tlbs) > FASTPATH_SIZE) panic("TLB store handler fastpath space exceeded"); resolve_relocs(relocs, labels); pr_info("Synthesized TLB store handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbs)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (p - handle_tlbs); i++) pr_debug("\t.word 0x%08x\n", handle_tlbs[i]); pr_debug("\t.set pop\n");}static void __init build_r3000_tlb_modify_handler(void){ u32 *p = handle_tlbm; struct label *l = labels; struct reloc *r = relocs; int i; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_pte_reload_tlbwi(&p, K0, K1); l_nopage_tlbm(&l, p); i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); i_nop(&p); if ((p - handle_tlbm) > FASTPATH_SIZE) panic("TLB modify handler fastpath space exceeded"); resolve_relocs(relocs, labels); pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbm)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (p - handle_tlbm); i++) pr_debug("\t.word 0x%08x\n", handle_tlbm[i]); pr_debug("\t.set pop\n");}/* * R4000 style TLB load/store/modify handlers. */static void __initbuild_r4000_tlbchange_handler_head(u32 **p, struct label **l, struct reloc **r, unsigned int pte, unsigned int ptr){#ifdef CONFIG_64BIT build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */#else build_get_pgde32(p, pte, ptr); /* get pgd in ptr */#endif i_MFC0(p, pte, C0_BADVADDR); i_LW(p, ptr, 0, ptr); i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); i_ADDU(p, ptr, ptr, pte);#ifdef CONFIG_SMP l_smp_pgtable_change(l, *p);# endif iPTE_LW(p, l, pte, ptr); /* get even pte */ if (!m4kc_tlbp_war()) build_tlb_probe_entry(p);}static void __initbuild_r4000_tlbchange_handler_tail(u32 **p, struct label **l, struct reloc **r, unsigned int tmp, unsigned int ptr){ i_ori(p, ptr, ptr, sizeof(pte_t)); i_xori(p, ptr, ptr, sizeof(pte_t)); build_update_entries(p, tmp, ptr); build_tlb_write_entry(p, l, r, tlb_indexed); l_leave(l, *p); i_eret(p); /* return from trap */#ifdef CONFIG_64BIT build_get_pgd_vmalloc64(p, l, r, tmp, ptr);#endif}static void __init build_r4000_tlb_load_handler(void){ u32 *p = handle_tlbl; struct label *l = labels; struct reloc *r = relocs; int i; memset(handle_tlbl, 0, sizeof(handle_tlbl)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); if (bcm1250_m3_war()) { i_MFC0(&p, K0, C0_BADVADDR); i_MFC0(&p, K1, C0_ENTRYHI); i_xor(&p, K0, K0, K1); i_SRL(&p, K0, K0, PAGE_SHIFT + 1); il_bnez(&p, &r, K0, label_leave); /* No need for i_nop */ } build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); build_make_valid(&p, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); l_nopage_tlbl(&l, p); i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); i_nop(&p); if ((p - handle_tlbl) > FASTPATH_SIZE) panic("TLB load handler fastpath space exceeded"); resolve_relocs(relocs, labels); pr_info("Synthesized TLB load handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbl)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (p - handle_tlbl); i++) pr_debug("\t.word 0x%08x\n", handle_tlbl[i]); pr_debug("\t.set pop\n");}static void __init build_r4000_tlb_store_handler(void){ u32 *p = handle_tlbs; struct label *l = labels; struct reloc *r = relocs; int i; memset(handle_tlbs, 0, sizeof(handle_tlbs)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); build_make_write(&p, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); l_nopage_tlbs(&l, p); i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); i_nop(&p); if ((p - handle_tlbs) > FASTPATH_SIZE) panic("TLB store handler fastpath space exceeded"); resolve_relocs(relocs, labels); pr_info("Synthesized TLB store handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbs)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (p - handle_tlbs); i++) pr_debug("\t.word 0x%08x\n", handle_tlbs[i]); pr_debug("\t.set pop\n");}static void __init build_r4000_tlb_modify_handler(void){ u32 *p = handle_tlbm; struct label *l = labels; struct reloc *r = relocs; int i; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); /* Present and writable bits set, set accessed and dirty bits. */ build_make_write(&p, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); l_nopage_tlbm(&l, p); i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); i_nop(&p); if ((p - handle_tlbm) > FASTPATH_SIZE) panic("TLB modify handler fastpath space exceeded"); resolve_relocs(relocs, labels); pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbm)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (p - handle_tlbm); i++) pr_debug("\t.word 0x%08x\n", handle_tlbm[i]); pr_debug("\t.set pop\n");}void __init build_tlb_refill_handler(void){ /* * The refill handler is generated per-CPU, multi-node systems * may have local storage for it. The other handlers are only * needed once. */ static int run_once = 0; switch (current_cpu_type()) { case CPU_R2000: case CPU_R3000: case CPU_R3000A: case CPU_R3081E: case CPU_TX3912: case CPU_TX3922: case CPU_TX3927: build_r3000_tlb_refill_handler(); if (!run_once) { build_r3000_tlb_load_handler(); build_r3000_tlb_store_handler(); build_r3000_tlb_modify_handler(); run_once++; } break; case CPU_R6000: case CPU_R6000A: panic("No R6000 TLB refill handler yet"); break; case CPU_R8000: panic("No R8000 TLB refill handler yet"); break; default: build_r4000_tlb_refill_handler(); if (!run_once) { build_r4000_tlb_load_handler(); build_r4000_tlb_store_handler(); build_r4000_tlb_modify_handler(); run_once++; } }}void __init flush_tlb_handlers(void){ flush_icache_range((unsigned long)handle_tlbl, (unsigned long)handle_tlbl + sizeof(handle_tlbl)); flush_icache_range((unsigned long)handle_tlbs, (unsigned long)handle_tlbs + sizeof(handle_tlbs)); flush_icache_range((unsigned long)handle_tlbm, (unsigned long)handle_tlbm + sizeof(handle_tlbm));}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -