📄 sun4c.c
字号:
begin += 512; } } else { while (begin < end) { __asm__ __volatile__( "ld [%0 + 0x00], %%g0\n\t" "ld [%0 + 0x10], %%g0\n\t" "ld [%0 + 0x20], %%g0\n\t" "ld [%0 + 0x30], %%g0\n\t" "ld [%0 + 0x40], %%g0\n\t" "ld [%0 + 0x50], %%g0\n\t" "ld [%0 + 0x60], %%g0\n\t" "ld [%0 + 0x70], %%g0\n\t" "ld [%0 + 0x80], %%g0\n\t" "ld [%0 + 0x90], %%g0\n\t" "ld [%0 + 0xa0], %%g0\n\t" "ld [%0 + 0xb0], %%g0\n\t" "ld [%0 + 0xc0], %%g0\n\t" "ld [%0 + 0xd0], %%g0\n\t" "ld [%0 + 0xe0], %%g0\n\t" "ld [%0 + 0xf0], %%g0\n" : : "r" (begin)); begin += 256; } }}static void sun4c_flush_cache_mm(struct mm_struct *mm){ int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { flush_user_windows(); if (sun4c_context_ring[new_ctx].num_entries) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; unsigned long flags; save_and_cli(flags); if (head->next != head) { struct sun4c_mmu_entry *entry = head->next; int savectx = sun4c_get_context(); sun4c_set_context(new_ctx); sun4c_flush_context(); do { struct sun4c_mmu_entry *next = entry->next; sun4c_user_unmap(entry); free_user_entry(new_ctx, entry); entry = next; } while (entry != head); sun4c_set_context(savectx); } restore_flags(flags); } }}static void sun4c_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end){ int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; struct sun4c_mmu_entry *entry; unsigned long flags; flush_user_windows(); save_and_cli(flags); /* All user segmap chains are ordered on entry->vaddr. */ for (entry = head->next; (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); entry = entry->next) ; /* Tracing various job mixtures showed that this conditional * only passes ~35% of the time for most worse case situations, * therefore we avoid all of this gross overhead ~65% of the time. */ if ((entry != head) && (entry->vaddr < end)) { int octx = sun4c_get_context(); sun4c_set_context(new_ctx); /* At this point, always, (start >= entry->vaddr) and * (entry->vaddr < end), once the latter condition * ceases to hold, or we hit the end of the list, we * exit the loop. The ordering of all user allocated * segmaps makes this all work out so beautifully. */ do { struct sun4c_mmu_entry *next = entry->next; unsigned long realend; /* "realstart" is always >= entry->vaddr */ realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE; if (end < realend) realend = end; if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) { unsigned long page = entry->vaddr; while (page < realend) { sun4c_flush_page(page); page += PAGE_SIZE; } } else { sun4c_flush_segment(entry->vaddr); sun4c_user_unmap(entry); free_user_entry(new_ctx, entry); } entry = next; } while ((entry != head) && (entry->vaddr < end)); sun4c_set_context(octx); } restore_flags(flags); }}static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long page){ struct mm_struct *mm = vma->vm_mm; int new_ctx = mm->context; /* Sun4c has no separate I/D caches so cannot optimize for non * text page flushes. */ if (new_ctx != NO_CONTEXT) { int octx = sun4c_get_context(); unsigned long flags; flush_user_windows(); save_and_cli(flags); sun4c_set_context(new_ctx); sun4c_flush_page(page); sun4c_set_context(octx); restore_flags(flags); }}static void sun4c_flush_page_to_ram(unsigned long page){ unsigned long flags; save_and_cli(flags); sun4c_flush_page(page); restore_flags(flags);}/* Sun4c cache is unified, both instructions and data live there, so * no need to flush the on-stack instructions for new signal handlers. */static void sun4c_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr){}/* TLB flushing on the sun4c. These routines count on the cache * flushing code to flush the user register windows so that we need * not do so when we get here. */static void sun4c_flush_tlb_all(void){ struct sun4c_mmu_entry *this_entry, *next_entry; unsigned long flags; int savectx, ctx; save_and_cli(flags); this_entry = sun4c_kernel_ring.ringhd.next; savectx = sun4c_get_context(); flush_user_windows(); while (sun4c_kernel_ring.num_entries) { next_entry = this_entry->next; sun4c_flush_segment(this_entry->vaddr); for (ctx = 0; ctx < num_contexts; ctx++) { sun4c_set_context(ctx); sun4c_put_segmap(this_entry->vaddr, invalid_segment); } free_kernel_entry(this_entry, &sun4c_kernel_ring); this_entry = next_entry; } sun4c_set_context(savectx); restore_flags(flags);}static void sun4c_flush_tlb_mm(struct mm_struct *mm){ int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; unsigned long flags; save_and_cli(flags); if (head->next != head) { struct sun4c_mmu_entry *entry = head->next; int savectx = sun4c_get_context(); sun4c_set_context(new_ctx); sun4c_flush_context(); do { struct sun4c_mmu_entry *next = entry->next; sun4c_user_unmap(entry); free_user_entry(new_ctx, entry); entry = next; } while (entry != head); sun4c_set_context(savectx); } restore_flags(flags); }}static void sun4c_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end){ int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; struct sun4c_mmu_entry *entry; unsigned long flags; save_and_cli(flags); /* See commentary in sun4c_flush_cache_range(). */ for (entry = head->next; (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); entry = entry->next) ; if ((entry != head) && (entry->vaddr < end)) { int octx = sun4c_get_context(); sun4c_set_context(new_ctx); do { struct sun4c_mmu_entry *next = entry->next; sun4c_flush_segment(entry->vaddr); sun4c_user_unmap(entry); free_user_entry(new_ctx, entry); entry = next; } while ((entry != head) && (entry->vaddr < end)); sun4c_set_context(octx); } restore_flags(flags); }}static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page){ struct mm_struct *mm = vma->vm_mm; int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { int savectx = sun4c_get_context(); unsigned long flags; save_and_cli(flags); sun4c_set_context(new_ctx); page &= PAGE_MASK; sun4c_flush_page(page); sun4c_put_pte(page, 0); sun4c_set_context(savectx); restore_flags(flags); }}void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly){ unsigned long page_entry; page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK); page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT)); if (rdonly) page_entry &= ~_SUN4C_WRITEABLE; sun4c_put_pte(virt_addr, page_entry);}void sun4c_unmapioaddr(unsigned long virt_addr){ sun4c_put_pte(virt_addr, 0);}static void sun4c_alloc_context(struct mm_struct *old_mm, struct mm_struct *mm){ struct ctx_list *ctxp; ctxp = ctx_free.next; if (ctxp != &ctx_free) { remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); mm->context = ctxp->ctx_number; ctxp->ctx_mm = mm; return; } ctxp = ctx_used.next; if (ctxp->ctx_mm == old_mm) ctxp = ctxp->next; remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); ctxp->ctx_mm->context = NO_CONTEXT; ctxp->ctx_mm = mm; mm->context = ctxp->ctx_number; sun4c_demap_context(&sun4c_context_ring[ctxp->ctx_number], ctxp->ctx_number);}/* Switch the current MM context. */static void sun4c_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu){ struct ctx_list *ctx; int dirty = 0; if (mm->context == NO_CONTEXT) { dirty = 1; sun4c_alloc_context(old_mm, mm); } else { /* Update the LRU ring of contexts. */ ctx = ctx_list_pool + mm->context; remove_from_ctx_list(ctx); add_to_used_ctxlist(ctx); } if (dirty || old_mm != mm) sun4c_set_context(mm->context);}static void sun4c_destroy_context(struct mm_struct *mm){ struct ctx_list *ctx_old; if (mm->context != NO_CONTEXT) { sun4c_demap_context(&sun4c_context_ring[mm->context], mm->context); ctx_old = ctx_list_pool + mm->context; remove_from_ctx_list(ctx_old); add_to_free_ctxlist(ctx_old); mm->context = NO_CONTEXT; }}static void sun4c_mmu_info(struct seq_file *m){ int used_user_entries, i; used_user_entries = 0; for (i = 0; i < num_contexts; i++) used_user_entries += sun4c_context_ring[i].num_entries; seq_printf(m, "vacsize\t\t: %d bytes\n" "vachwflush\t: %s\n" "vaclinesize\t: %d bytes\n" "mmuctxs\t\t: %d\n" "mmupsegs\t: %d\n" "kernelpsegs\t: %d\n" "kfreepsegs\t: %d\n" "usedpsegs\t: %d\n" "ufreepsegs\t: %d\n" "user_taken\t: %d\n" "max_taken\t: %d\n", sun4c_vacinfo.num_bytes, (sun4c_vacinfo.do_hwflushes ? "yes" : "no"), sun4c_vacinfo.linesize, num_contexts, (invalid_segment + 1), sun4c_kernel_ring.num_entries, sun4c_kfree_ring.num_entries, used_user_entries, sun4c_ufree_ring.num_entries, sun4c_user_taken_entries, max_user_taken_entries);}/* Nothing below here should touch the mmu hardware nor the mmu_entry * data structures. *//* First the functions which the mid-level code uses to directly * manipulate the software page tables. Some defines since we are * emulating the i386 page directory layout. */#define PGD_PRESENT 0x001#define PGD_RW 0x002#define PGD_USER 0x004#define PGD_ACCESSED 0x020#define PGD_DIRTY 0x040#define PGD_TABLE (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)static void sun4c_set_pte(pte_t *ptep, pte_t pte){ *ptep = pte;}static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp){}static void sun4c_pmd_set(pmd_t * pmdp, pte_t * ptep){ *pmdp = __pmd(PGD_TABLE | (unsigned long) ptep);}static int sun4c_pte_present(pte_t pte){ return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0);}static void sun4c_pte_clear(pte_t *ptep) { *ptep = __pte(0); }static int sun4c_pmd_bad(pmd_t pmd){ return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) || (!VALID_PAGE(virt_to_page(pmd_val(pmd)))));}static int sun4c_pmd_present(pmd_t pmd){ return ((pmd_val(pmd) & PGD_PRESENT) != 0);}static void sun4c_pmd_clear(pmd_t *pmdp) { *pmdp = __pmd(0); }static int sun4c_pgd_none(pgd_t pgd) { return 0; }static int sun4c_pgd_bad(pgd_t pgd) { return 0; }static int sun4c_pgd_present(pgd_t pgd) { return 1; }static void sun4c_pgd_clear(pgd_t * pgdp) { }/* * The following only work if pte_present() is true. * Undefined behaviour if not.. */static pte_t sun4c_pte_mkwrite(pte_t pte){ pte = __pte(pte_val(pte) | _SUN4C_PAGE_WRITE); if (pte_val(pte) & _SUN4C_PAGE_MODIFIED) pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE); return pte;}static pte_t sun4c_pte_mkdirty(pte_t pte)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -