📄 init.c
字号:
prom_dtlb[i].tlb_data); membar("#Sync"); } if (prom_itlb[i].tlb_ent != -1) { __asm__ __volatile__("stxa %0, [%1] %2" : : "r" (prom_itlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); membar("#Sync"); spitfire_put_itlb_data(prom_itlb[i].tlb_ent, prom_itlb[i].tlb_data); membar("#Sync"); } } } else { for (i = 0; i < 8; i++) { if (prom_dtlb[i].tlb_ent != -1) { __asm__ __volatile__("stxa %%g0, [%0] %1" : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); membar("#Sync"); spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); membar("#Sync"); } if (prom_itlb[i].tlb_ent != -1) { __asm__ __volatile__("stxa %%g0, [%0] %1" : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); membar("#Sync"); spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL); membar("#Sync"); } } } __asm__ __volatile__("wrpr %0, 0, %%pstate" : : "r" (pstate));}void inherit_locked_prom_mappings(int save_p){ int i; int dtlb_seen = 0; int itlb_seen = 0; /* Fucking losing PROM has more mappings in the TLB, but * it (conveniently) fails to mention any of these in the * translations property. The only ones that matter are * the locked PROM tlb entries, so we impose the following * irrecovable rule on the PROM, it is allowed 8 locked * entries in the ITLB and 8 in the DTLB. * * Supposedly the upper 16GB of the address space is * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface * used between the client program and the firmware on sun5 * systems to coordinate mmu mappings is also COMPLETELY * UNDOCUMENTED!!!!!! Thanks S(t)un! */ if (save_p) { for(i = 0; i < 8; i++) { prom_dtlb[i].tlb_ent = -1; prom_itlb[i].tlb_ent = -1; } } for(i = 0; i < 63; i++) { unsigned long data; /* Spitfire Errata #32 workaround */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); data = spitfire_get_dtlb_data(i); if((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { unsigned long tag; /* Spitfire Errata #32 workaround */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); tag = spitfire_get_dtlb_tag(i); if(save_p) { prom_dtlb[dtlb_seen].tlb_ent = i; prom_dtlb[dtlb_seen].tlb_tag = tag; prom_dtlb[dtlb_seen].tlb_data = data; } __asm__ __volatile__("stxa %%g0, [%0] %1" : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); membar("#Sync"); spitfire_put_dtlb_data(i, 0x0UL); membar("#Sync"); dtlb_seen++; if(dtlb_seen > 7) break; } } for(i = 0; i < 63; i++) { unsigned long data; /* Spitfire Errata #32 workaround */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); data = spitfire_get_itlb_data(i); if((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { unsigned long tag; /* Spitfire Errata #32 workaround */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); tag = spitfire_get_itlb_tag(i); if(save_p) { prom_itlb[itlb_seen].tlb_ent = i; prom_itlb[itlb_seen].tlb_tag = tag; prom_itlb[itlb_seen].tlb_data = data; } __asm__ __volatile__("stxa %%g0, [%0] %1" : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); membar("#Sync"); spitfire_put_itlb_data(i, 0x0UL); membar("#Sync"); itlb_seen++; if(itlb_seen > 7) break; } } if (save_p) prom_ditlb_set = 1;}/* Give PROM back his world, done during reboots... */void prom_reload_locked(void){ int i; for (i = 0; i < 8; i++) { if (prom_dtlb[i].tlb_ent != -1) { __asm__ __volatile__("stxa %0, [%1] %2" : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); membar("#Sync"); spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, prom_dtlb[i].tlb_data); membar("#Sync"); } if (prom_itlb[i].tlb_ent != -1) { __asm__ __volatile__("stxa %0, [%1] %2" : : "r" (prom_itlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); membar("#Sync"); spitfire_put_itlb_data(prom_itlb[i].tlb_ent, prom_itlb[i].tlb_data); membar("#Sync"); } }}void __flush_dcache_range(unsigned long start, unsigned long end){ unsigned long va; int n = 0; for (va = start; va < end; va += 32) { spitfire_put_dcache_tag(va & 0x3fe0, 0x0); if (++n >= 512) break; }}void __flush_cache_all(void){ unsigned long va; flushw_all(); for(va = 0; va < (PAGE_SIZE << 1); va += 32) spitfire_put_icache_tag(va, 0x0);}/* If not locked, zap it. */void __flush_tlb_all(void){ unsigned long pstate; int i; __asm__ __volatile__("flushw\n\t" "rdpr %%pstate, %0\n\t" "wrpr %0, %1, %%pstate" : "=r" (pstate) : "i" (PSTATE_IE)); for(i = 0; i < 64; i++) { /* Spitfire Errata #32 workaround */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); if(!(spitfire_get_dtlb_data(i) & _PAGE_L)) { __asm__ __volatile__("stxa %%g0, [%0] %1" : /* no outputs */ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); membar("#Sync"); spitfire_put_dtlb_data(i, 0x0UL); membar("#Sync"); } /* Spitfire Errata #32 workaround */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); if(!(spitfire_get_itlb_data(i) & _PAGE_L)) { __asm__ __volatile__("stxa %%g0, [%0] %1" : /* no outputs */ : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); membar("#Sync"); spitfire_put_itlb_data(i, 0x0UL); membar("#Sync"); } } __asm__ __volatile__("wrpr %0, 0, %%pstate" : : "r" (pstate));}/* Caller does TLB context flushing on local CPU if necessary. * * We must be careful about boundary cases so that we never * let the user have CTX 0 (nucleus) or we ever use a CTX * version of zero (and thus NO_CONTEXT would not be caught * by version mis-match tests in mmu_context.h). */void get_new_mmu_context(struct mm_struct *mm){ unsigned long ctx, new_ctx; spin_lock(&ctx_alloc_lock); ctx = CTX_HWBITS(tlb_context_cache + 1); if (ctx == 0) ctx = 1; if (CTX_VALID(mm->context)) { unsigned long nr = CTX_HWBITS(mm->context); mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); } new_ctx = find_next_zero_bit(mmu_context_bmap, 1UL << CTX_VERSION_SHIFT, ctx); if (new_ctx >= (1UL << CTX_VERSION_SHIFT)) { new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); if (new_ctx >= ctx) { int i; new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; if (new_ctx == 1) new_ctx = CTX_FIRST_VERSION; /* Don't call memset, for 16 entries that's just * plain silly... */ mmu_context_bmap[0] = 3; mmu_context_bmap[1] = 0; mmu_context_bmap[2] = 0; mmu_context_bmap[3] = 0; for(i = 4; i < CTX_BMAP_SLOTS; i += 4) { mmu_context_bmap[i + 0] = 0; mmu_context_bmap[i + 1] = 0; mmu_context_bmap[i + 2] = 0; mmu_context_bmap[i + 3] = 0; } goto out; } } mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);out: tlb_context_cache = new_ctx; spin_unlock(&ctx_alloc_lock); mm->context = new_ctx;}#ifndef CONFIG_SMPstruct pgtable_cache_struct pgt_quicklists;#endif/* For PMDs we don't care about the color, writes are * only done via Dcache which is write-thru, so non-Dcache * reads will always see correct data. */pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset){ pmd_t *pmd; pmd = (pmd_t *) __get_free_page(GFP_KERNEL); if(pmd) { memset(pmd, 0, PAGE_SIZE); pgd_set(pgd, pmd); return pmd + offset; } return NULL;}/* OK, we have to color these pages because during DTLB * protection faults we set the dirty bit via a non-Dcache * enabled mapping in the VPTE area. The kernel can end * up missing the dirty bit resulting in processes crashing * _iff_ the VPTE mapping of the ptes have a virtual address * bit 13 which is different from bit 13 of the physical address. * * The sequence is: * 1) DTLB protection fault, write dirty bit into pte via VPTE * mappings. * 2) Swapper checks pte, does not see dirty bit, frees page. * 3) Process faults back in the page, the old pre-dirtied copy * is provided and here is the corruption. */pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset, unsigned long color){ struct page *page = alloc_pages(GFP_KERNEL, 1); if (page) { unsigned long *to_free; unsigned long paddr; pte_t *pte; set_page_count((page + 1), 1); paddr = (unsigned long) page_address(page); memset((char *)paddr, 0, (PAGE_SIZE << 1)); if (!color) { pte = (pte_t *) paddr; to_free = (unsigned long *) (paddr + PAGE_SIZE); } else { pte = (pte_t *) (paddr + PAGE_SIZE); to_free = (unsigned long *) paddr; } /* Now free the other one up, adjust cache size. */ *to_free = (unsigned long) pte_quicklist[color ^ 0x1]; pte_quicklist[color ^ 0x1] = to_free; pgtable_cache_size++; pmd_set(pmd, pte); return pte + offset; } return NULL;}void sparc_ultra_dump_itlb(void){ int slot; printk ("Contents of itlb: "); for (slot = 0; slot < 14; slot++) printk (" "); printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0)); for (slot = 1; slot < 64; slot+=3) { printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", slot, spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot), slot+1, spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1), slot+2, spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2)); }}void sparc_ultra_dump_dtlb(void){ int slot; printk ("Contents of dtlb: "); for (slot = 0; slot < 14; slot++) printk (" "); printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0)); for (slot = 1; slot < 64; slot+=3) { printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", slot, spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot), slot+1, spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1), slot+2, spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2)); }}extern unsigned long cmdline_memory_size;unsigned long __init bootmem_init(unsigned long *pages_avail){ unsigned long bootmap_size, start_pfn, end_pfn; unsigned long end_of_phys_memory = 0UL; unsigned long bootmap_pfn, bytes_avail, size; int i; bytes_avail = 0UL; for (i = 0; sp_banks[i].num_bytes != 0; i++) { end_of_phys_memory = sp_banks[i].base_addr + sp_banks[i].num_bytes; bytes_avail += sp_banks[i].num_bytes; if (cmdline_memory_size) { if (bytes_avail > cmdline_memory_size) { unsigned long slack = bytes_avail - cmdline_memory_size; bytes_avail -= slack; end_of_phys_memory -= slack; sp_banks[i].num_bytes -= slack; if (sp_banks[i].num_bytes == 0) { sp_banks[i].base_addr = 0xdeadbeef; } else { sp_banks[i+1].num_bytes = 0; sp_banks[i+1].base_addr = 0xdeadbeef; } break; } } } *pages_avail = bytes_avail >> PAGE_SHIFT; /* Start with page aligned address of last symbol in kernel * image. The kernel is hard mapped below PAGE_OFFSET in a * 4MB locked TLB translation. */ start_pfn = PAGE_ALIGN((unsigned long) &_end) - ((unsigned long) &empty_zero_page); /* Adjust up to the physical address where the kernel begins. */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -