📄 sun4c.c
字号:
{ if (ARCH_SUN4) { switch (idprom->id_machtype) { case (SM_SUN4|SM_4_110): prom_printf("No support for 4100 yet\n"); prom_halt(); num_segmaps = 256; num_contexts = 8; break; case (SM_SUN4|SM_4_260): /* should be 512 segmaps. when it get fixed */ num_segmaps = 256; num_contexts = 16; break; case (SM_SUN4|SM_4_330): num_segmaps = 256; num_contexts = 16; break; case (SM_SUN4|SM_4_470): /* should be 1024 segmaps. when it get fixed */ num_segmaps = 256; num_contexts = 64; break; default: prom_printf("Invalid SUN4 model\n"); prom_halt(); }; } else { if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { /* Hardcode these just to be safe, PROM on SS1 does * not have this info available in the root node. */ num_segmaps = 128; num_contexts = 8; } else { num_segmaps = prom_getintdefault(prom_root_node, "mmu-npmg", 128); num_contexts = prom_getintdefault(prom_root_node, "mmu-nctx", 0x8); } } patch_kernel_fault_handler();}volatile unsigned long *sun4c_memerr_reg = 0;void __init sun4c_probe_memerr_reg(void){ int node; struct linux_prom_registers regs[1]; if (ARCH_SUN4) { sun4c_memerr_reg = ioremap(sun4_memreg_physaddr, PAGE_SIZE); } else { node = prom_getchild(prom_root_node); node = prom_searchsiblings(prom_root_node, "memory-error"); if (!node) return; prom_getproperty(node, "reg", (char *)regs, sizeof(regs)); /* hmm I think regs[0].which_io is zero here anyways */ sun4c_memerr_reg = ioremap(regs[0].phys_addr, regs[0].reg_size); }}static inline void sun4c_init_ss2_cache_bug(void){ extern unsigned long start; if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) || (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) || (idprom->id_machtype == (SM_SUN4 | SM_4_330)) || (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) { /* Whee.. */ printk("SS2 cache bug detected, uncaching trap table page\n"); sun4c_flush_page((unsigned int) &start); sun4c_put_pte(((unsigned long) &start), (sun4c_get_pte((unsigned long) &start) | _SUN4C_PAGE_NOCACHE)); }}/* Addr is always aligned on a page boundry for us already. */static void sun4c_map_dma_area(unsigned long va, u32 addr, int len){ unsigned long page, end; end = PAGE_ALIGN((addr + len)); while (addr < end) { page = va; sun4c_flush_page(page); page -= PAGE_OFFSET; page >>= PAGE_SHIFT; page |= (_SUN4C_PAGE_VALID | _SUN4C_PAGE_DIRTY | _SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_PRIV); sun4c_put_pte(addr, page); addr += PAGE_SIZE; va += PAGE_SIZE; }}static unsigned long sun4c_translate_dvma(unsigned long busa){ /* Fortunately for us, bus_addr == uncached_virt in sun4c. */ unsigned long pte = sun4c_get_pte(busa); return (pte << PAGE_SHIFT) + PAGE_OFFSET;}static void sun4c_unmap_dma_area(unsigned long busa, int len){ /* Fortunately for us, bus_addr == uncached_virt in sun4c. */ /* XXX Implement this */}/* TLB management. *//* Don't change this struct without changing entry.S. This is used * in the in-window kernel fault handler, and you don't want to mess * with that. (See sun4c_fault in entry.S). */struct sun4c_mmu_entry { struct sun4c_mmu_entry *next; struct sun4c_mmu_entry *prev; unsigned long vaddr; unsigned char pseg; unsigned char locked; /* For user mappings only, and completely hidden from kernel * TLB miss code. */ unsigned char ctx; struct sun4c_mmu_entry *lru_next; struct sun4c_mmu_entry *lru_prev;};static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS];static void __init sun4c_init_mmu_entry_pool(void){ int i; for (i=0; i < SUN4C_MAX_SEGMAPS; i++) { mmu_entry_pool[i].pseg = i; mmu_entry_pool[i].next = 0; mmu_entry_pool[i].prev = 0; mmu_entry_pool[i].vaddr = 0; mmu_entry_pool[i].locked = 0; mmu_entry_pool[i].ctx = 0; mmu_entry_pool[i].lru_next = 0; mmu_entry_pool[i].lru_prev = 0; } mmu_entry_pool[invalid_segment].locked = 1;}static inline void fix_permissions(unsigned long vaddr, unsigned long bits_on, unsigned long bits_off){ unsigned long start, end; end = vaddr + SUN4C_REAL_PGDIR_SIZE; for (start = vaddr; start < end; start += PAGE_SIZE) if (sun4c_get_pte(start) & _SUN4C_PAGE_VALID) sun4c_put_pte(start, (sun4c_get_pte(start) | bits_on) & ~bits_off);}static inline void sun4c_init_map_kernelprom(unsigned long kernel_end){ unsigned long vaddr; unsigned char pseg, ctx;#ifdef CONFIG_SUN4 /* sun4/110 and 260 have no kadb. */ if ((idprom->id_machtype != (SM_SUN4 | SM_4_260)) && (idprom->id_machtype != (SM_SUN4 | SM_4_110))) {#endif for (vaddr = KADB_DEBUGGER_BEGVM; vaddr < LINUX_OPPROM_ENDVM; vaddr += SUN4C_REAL_PGDIR_SIZE) { pseg = sun4c_get_segmap(vaddr); if (pseg != invalid_segment) { mmu_entry_pool[pseg].locked = 1; for (ctx = 0; ctx < num_contexts; ctx++) prom_putsegment(ctx, vaddr, pseg); fix_permissions(vaddr, _SUN4C_PAGE_PRIV, 0); } }#ifdef CONFIG_SUN4 }#endif for (vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) { pseg = sun4c_get_segmap(vaddr); mmu_entry_pool[pseg].locked = 1; for (ctx = 0; ctx < num_contexts; ctx++) prom_putsegment(ctx, vaddr, pseg); fix_permissions(vaddr, _SUN4C_PAGE_PRIV, _SUN4C_PAGE_NOCACHE); }}static void __init sun4c_init_lock_area(unsigned long start, unsigned long end){ int i, ctx; while (start < end) { for (i = 0; i < invalid_segment; i++) if (!mmu_entry_pool[i].locked) break; mmu_entry_pool[i].locked = 1; sun4c_init_clean_segmap(i); for (ctx = 0; ctx < num_contexts; ctx++) prom_putsegment(ctx, start, mmu_entry_pool[i].pseg); start += SUN4C_REAL_PGDIR_SIZE; }}/* Don't change this struct without changing entry.S. This is used * in the in-window kernel fault handler, and you don't want to mess * with that. (See sun4c_fault in entry.S). */struct sun4c_mmu_ring { struct sun4c_mmu_entry ringhd; int num_entries;};static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */static struct sun4c_mmu_ring sun4c_ufree_ring; /* free user entries */static struct sun4c_mmu_ring sun4c_ulru_ring; /* LRU user entries */struct sun4c_mmu_ring sun4c_kernel_ring; /* used kernel entries */struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */static inline void sun4c_init_rings(void){ int i; for (i = 0; i < SUN4C_MAX_CONTEXTS; i++) { sun4c_context_ring[i].ringhd.next = sun4c_context_ring[i].ringhd.prev = &sun4c_context_ring[i].ringhd; sun4c_context_ring[i].num_entries = 0; } sun4c_ufree_ring.ringhd.next = sun4c_ufree_ring.ringhd.prev = &sun4c_ufree_ring.ringhd; sun4c_ufree_ring.num_entries = 0; sun4c_ulru_ring.ringhd.lru_next = sun4c_ulru_ring.ringhd.lru_prev = &sun4c_ulru_ring.ringhd; sun4c_ulru_ring.num_entries = 0; sun4c_kernel_ring.ringhd.next = sun4c_kernel_ring.ringhd.prev = &sun4c_kernel_ring.ringhd; sun4c_kernel_ring.num_entries = 0; sun4c_kfree_ring.ringhd.next = sun4c_kfree_ring.ringhd.prev = &sun4c_kfree_ring.ringhd; sun4c_kfree_ring.num_entries = 0;}static void add_ring(struct sun4c_mmu_ring *ring, struct sun4c_mmu_entry *entry){ struct sun4c_mmu_entry *head = &ring->ringhd; entry->prev = head; (entry->next = head->next)->prev = entry; head->next = entry; ring->num_entries++;}static __inline__ void add_lru(struct sun4c_mmu_entry *entry){ struct sun4c_mmu_ring *ring = &sun4c_ulru_ring; struct sun4c_mmu_entry *head = &ring->ringhd; entry->lru_next = head; (entry->lru_prev = head->lru_prev)->lru_next = entry; head->lru_prev = entry;}static void add_ring_ordered(struct sun4c_mmu_ring *ring, struct sun4c_mmu_entry *entry){ struct sun4c_mmu_entry *head = &ring->ringhd; unsigned long addr = entry->vaddr; while ((head->next != &ring->ringhd) && (head->next->vaddr < addr)) head = head->next; entry->prev = head; (entry->next = head->next)->prev = entry; head->next = entry; ring->num_entries++; add_lru(entry);}static __inline__ void remove_ring(struct sun4c_mmu_ring *ring, struct sun4c_mmu_entry *entry){ struct sun4c_mmu_entry *next = entry->next; (next->prev = entry->prev)->next = next; ring->num_entries--;}static void remove_lru(struct sun4c_mmu_entry *entry){ struct sun4c_mmu_entry *next = entry->lru_next; (next->lru_prev = entry->lru_prev)->lru_next = next;}static void free_user_entry(int ctx, struct sun4c_mmu_entry *entry){ remove_ring(sun4c_context_ring+ctx, entry); remove_lru(entry); add_ring(&sun4c_ufree_ring, entry);}static void free_kernel_entry(struct sun4c_mmu_entry *entry, struct sun4c_mmu_ring *ring){ remove_ring(ring, entry); add_ring(&sun4c_kfree_ring, entry);}static void __init sun4c_init_fill_kernel_ring(int howmany){ int i; while (howmany) { for (i = 0; i < invalid_segment; i++) if (!mmu_entry_pool[i].locked) break; mmu_entry_pool[i].locked = 1; sun4c_init_clean_segmap(i); add_ring(&sun4c_kfree_ring, &mmu_entry_pool[i]); howmany--; }}static void __init sun4c_init_fill_user_ring(void){ int i; for (i = 0; i < invalid_segment; i++) { if (mmu_entry_pool[i].locked) continue; sun4c_init_clean_segmap(i); add_ring(&sun4c_ufree_ring, &mmu_entry_pool[i]); }}static void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry){ int savectx, ctx; savectx = sun4c_get_context(); for (ctx = 0; ctx < num_contexts; ctx++) { sun4c_set_context(ctx); sun4c_put_segmap(kentry->vaddr, invalid_segment); } sun4c_set_context(savectx);}static void sun4c_kernel_map(struct sun4c_mmu_entry *kentry){ int savectx, ctx; savectx = sun4c_get_context(); for (ctx = 0; ctx < num_contexts; ctx++) { sun4c_set_context(ctx); sun4c_put_segmap(kentry->vaddr, kentry->pseg); } sun4c_set_context(savectx);}#define sun4c_user_unmap(__entry) \ sun4c_put_segmap((__entry)->vaddr, invalid_segment)static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx){ struct sun4c_mmu_entry *head = &crp->ringhd; unsigned long flags; save_and_cli(flags); if (head->next != head) { struct sun4c_mmu_entry *entry = head->next; int savectx = sun4c_get_context(); flush_user_windows(); sun4c_set_context(ctx); sun4c_flush_context_hw(); do { struct sun4c_mmu_entry *next = entry->next; sun4c_user_unmap(entry); free_user_entry(ctx, entry); entry = next; } while (entry != head); sun4c_set_context(savectx); } restore_flags(flags);}static void sun4c_demap_context_sw(struct sun4c_mmu_ring *crp, unsigned char ctx){ struct sun4c_mmu_entry *head = &crp->ringhd; unsigned long flags; save_and_cli(flags); if (head->next != head) { struct sun4c_mmu_entry *entry = head->next; int savectx = sun4c_get_context(); flush_user_windows(); sun4c_set_context(ctx); sun4c_flush_context_sw(); do { struct sun4c_mmu_entry *next = entry->next; sun4c_user_unmap(entry); free_user_entry(ctx, entry); entry = next; } while (entry != head); sun4c_set_context(savectx); } restore_flags(flags);}static int sun4c_user_taken_entries = 0; /* This is how much we have. */static int max_user_taken_entries = 0; /* This limits us and prevents deadlock. */static struct sun4c_mmu_entry *sun4c_kernel_strategy(void){ struct sun4c_mmu_entry *this_entry; /* If some are free, return first one. */ if (sun4c_kfree_ring.num_entries) { this_entry = sun4c_kfree_ring.ringhd.next; return this_entry; } /* Else free one up. */ this_entry = sun4c_kernel_ring.ringhd.prev; if (sun4c_vacinfo.do_hwflushes) sun4c_flush_segment_hw(this_entry->vaddr); else sun4c_flush_segment_sw(this_entry->vaddr); sun4c_kernel_unmap(this_entry); free_kernel_entry(this_entry, &sun4c_kernel_ring); this_entry = sun4c_kfree_ring.ringhd.next; return this_entry;}/* Using this method to free up mmu entries eliminates a lot of * potential races since we have a kernel that incurs tlb * replacement faults. There may be performance penalties. * * NOTE: Must be called with interrupts disabled. */static struct sun4c_mmu_entry *sun4c_user_strategy(void){ struct sun4c_mmu_entry *entry; unsigned char ctx; int savectx; /* If some are free, return first one. */ if (sun4c_ufree_ring.num_entries) { entry = sun4c_ufree_ring.ringhd.next; goto unlink_out; } if (sun4c_user_taken_entries) { entry = sun4c_kernel_strategy(); sun4c_user_taken_entries--; goto kunlink_out; } /* Grab from the beginning of the LRU list. */ entry = sun4c_ulru_ring.ringhd.lru_next; ctx = entry->ctx; savectx = sun4c_get_context(); flush_user_windows(); sun4c_set_context(ctx); if (sun4c_vacinfo.do_hwflushes) sun4c_flush_segment_hw(entry->vaddr); else sun4c_flush_segment_sw(entry->vaddr); sun4c_user_unmap(entry); remove_ring(sun4c_context_ring + ctx, entry);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -