📄 sun4c.c
字号:
entry = next; } while (entry != head); sun4c_set_context(savectx); } restore_flags(flags); } }}static void sun4c_flush_cache_range_hw(struct mm_struct *mm, unsigned long start, unsigned long end){ int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; struct sun4c_mmu_entry *entry; unsigned long flags; flush_user_windows(); save_and_cli(flags); /* All user segmap chains are ordered on entry->vaddr. */ for (entry = head->next; (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); entry = entry->next) ; /* Tracing various job mixtures showed that this conditional * only passes ~35% of the time for most worse case situations, * therefore we avoid all of this gross overhead ~65% of the time. */ if ((entry != head) && (entry->vaddr < end)) { int octx = sun4c_get_context(); sun4c_set_context(new_ctx); /* At this point, always, (start >= entry->vaddr) and * (entry->vaddr < end), once the latter condition * ceases to hold, or we hit the end of the list, we * exit the loop. The ordering of all user allocated * segmaps makes this all work out so beautifully. */ do { struct sun4c_mmu_entry *next = entry->next; unsigned long realend; /* "realstart" is always >= entry->vaddr */ realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE; if (end < realend) realend = end; if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) { unsigned long page = entry->vaddr; while (page < realend) { sun4c_flush_page_hw(page); page += PAGE_SIZE; } } else { sun4c_flush_segment_hw(entry->vaddr); sun4c_user_unmap(entry); free_user_entry(new_ctx, entry); } entry = next; } while ((entry != head) && (entry->vaddr < end)); sun4c_set_context(octx); } restore_flags(flags); }}static void sun4c_flush_cache_page_hw(struct vm_area_struct *vma, unsigned long page){ struct mm_struct *mm = vma->vm_mm; int new_ctx = mm->context; /* Sun4c has no separate I/D caches so cannot optimize for non * text page flushes. */ if (new_ctx != NO_CONTEXT) { int octx = sun4c_get_context(); unsigned long flags; flush_user_windows(); save_and_cli(flags); sun4c_set_context(new_ctx); sun4c_flush_page_hw(page); sun4c_set_context(octx); restore_flags(flags); }}static void sun4c_flush_page_to_ram_hw(unsigned long page){ unsigned long flags; save_and_cli(flags); sun4c_flush_page_hw(page); restore_flags(flags);}static void sun4c_flush_cache_mm_sw(struct mm_struct *mm){ int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { flush_user_windows(); if (sun4c_context_ring[new_ctx].num_entries) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; unsigned long flags; save_and_cli(flags); if (head->next != head) { struct sun4c_mmu_entry *entry = head->next; int savectx = sun4c_get_context(); sun4c_set_context(new_ctx); sun4c_flush_context_sw(); do { struct sun4c_mmu_entry *next = entry->next; sun4c_user_unmap(entry); free_user_entry(new_ctx, entry); entry = next; } while (entry != head); sun4c_set_context(savectx); } restore_flags(flags); } }}static void sun4c_flush_cache_range_sw(struct mm_struct *mm, unsigned long start, unsigned long end){ int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; struct sun4c_mmu_entry *entry; unsigned long flags; flush_user_windows(); save_and_cli(flags); /* All user segmap chains are ordered on entry->vaddr. */ for (entry = head->next; (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); entry = entry->next) ; /* Tracing various job mixtures showed that this conditional * only passes ~35% of the time for most worse case situations, * therefore we avoid all of this gross overhead ~65% of the time. */ if ((entry != head) && (entry->vaddr < end)) { int octx = sun4c_get_context(); sun4c_set_context(new_ctx); /* At this point, always, (start >= entry->vaddr) and * (entry->vaddr < end), once the latter condition * ceases to hold, or we hit the end of the list, we * exit the loop. The ordering of all user allocated * segmaps makes this all work out so beautifully. */ do { struct sun4c_mmu_entry *next = entry->next; unsigned long realend; /* "realstart" is always >= entry->vaddr */ realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE; if (end < realend) realend = end; if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) { unsigned long page = entry->vaddr; while (page < realend) { sun4c_flush_page_sw(page); page += PAGE_SIZE; } } else { sun4c_flush_segment_sw(entry->vaddr); sun4c_user_unmap(entry); free_user_entry(new_ctx, entry); } entry = next; } while ((entry != head) && (entry->vaddr < end)); sun4c_set_context(octx); } restore_flags(flags); }}static void sun4c_flush_cache_page_sw(struct vm_area_struct *vma, unsigned long page){ struct mm_struct *mm = vma->vm_mm; int new_ctx = mm->context; /* Sun4c has no separate I/D caches so cannot optimize for non * text page flushes. */ if (new_ctx != NO_CONTEXT) { int octx = sun4c_get_context(); unsigned long flags; flush_user_windows(); save_and_cli(flags); sun4c_set_context(new_ctx); sun4c_flush_page_sw(page); sun4c_set_context(octx); restore_flags(flags); }}static void sun4c_flush_page_to_ram_sw(unsigned long page){ unsigned long flags; save_and_cli(flags); sun4c_flush_page_sw(page); restore_flags(flags);}/* Sun4c cache is unified, both instructions and data live there, so * no need to flush the on-stack instructions for new signal handlers. */static void sun4c_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr){}/* TLB flushing on the sun4c. These routines count on the cache * flushing code to flush the user register windows so that we need * not do so when we get here. */static void sun4c_flush_tlb_all(void){ struct sun4c_mmu_entry *this_entry, *next_entry; unsigned long flags; int savectx, ctx; save_and_cli(flags); this_entry = sun4c_kernel_ring.ringhd.next; savectx = sun4c_get_context(); flush_user_windows(); while (sun4c_kernel_ring.num_entries) { next_entry = this_entry->next; if (sun4c_vacinfo.do_hwflushes) sun4c_flush_segment_hw(this_entry->vaddr); else sun4c_flush_segment_sw(this_entry->vaddr); for (ctx = 0; ctx < num_contexts; ctx++) { sun4c_set_context(ctx); sun4c_put_segmap(this_entry->vaddr, invalid_segment); } free_kernel_entry(this_entry, &sun4c_kernel_ring); this_entry = next_entry; } sun4c_set_context(savectx); restore_flags(flags);}static void sun4c_flush_tlb_mm_hw(struct mm_struct *mm){ int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; unsigned long flags; save_and_cli(flags); if (head->next != head) { struct sun4c_mmu_entry *entry = head->next; int savectx = sun4c_get_context(); sun4c_set_context(new_ctx); sun4c_flush_context_hw(); do { struct sun4c_mmu_entry *next = entry->next; sun4c_user_unmap(entry); free_user_entry(new_ctx, entry); entry = next; } while (entry != head); sun4c_set_context(savectx); } restore_flags(flags); }}static void sun4c_flush_tlb_range_hw(struct mm_struct *mm, unsigned long start, unsigned long end){ int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; struct sun4c_mmu_entry *entry; unsigned long flags; save_and_cli(flags); /* See commentary in sun4c_flush_cache_range_*(). */ for (entry = head->next; (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); entry = entry->next) ; if ((entry != head) && (entry->vaddr < end)) { int octx = sun4c_get_context(); sun4c_set_context(new_ctx); do { struct sun4c_mmu_entry *next = entry->next; sun4c_flush_segment_hw(entry->vaddr); sun4c_user_unmap(entry); free_user_entry(new_ctx, entry); entry = next; } while ((entry != head) && (entry->vaddr < end)); sun4c_set_context(octx); } restore_flags(flags); }}static void sun4c_flush_tlb_page_hw(struct vm_area_struct *vma, unsigned long page){ struct mm_struct *mm = vma->vm_mm; int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { int savectx = sun4c_get_context(); unsigned long flags; save_and_cli(flags); sun4c_set_context(new_ctx); page &= PAGE_MASK; sun4c_flush_page_hw(page); sun4c_put_pte(page, 0); sun4c_set_context(savectx); restore_flags(flags); }}static void sun4c_flush_tlb_mm_sw(struct mm_struct *mm){ int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; unsigned long flags; save_and_cli(flags); if (head->next != head) { struct sun4c_mmu_entry *entry = head->next; int savectx = sun4c_get_context(); sun4c_set_context(new_ctx); sun4c_flush_context_sw(); do { struct sun4c_mmu_entry *next = entry->next; sun4c_user_unmap(entry); free_user_entry(new_ctx, entry); entry = next; } while (entry != head); sun4c_set_context(savectx); } restore_flags(flags); }}static void sun4c_flush_tlb_range_sw(struct mm_struct *mm, unsigned long start, unsigned long end){ int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; struct sun4c_mmu_entry *entry; unsigned long flags; save_and_cli(flags); /* See commentary in sun4c_flush_cache_range_*(). */ for (entry = head->next; (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); entry = entry->next) ; if ((entry != head) && (entry->vaddr < end)) { int octx = sun4c_get_context(); sun4c_set_context(new_ctx); do { struct sun4c_mmu_entry *next = entry->next; sun4c_flush_segment_sw(entry->vaddr); sun4c_user_unmap(entry); free_user_entry(new_ctx, entry); entry = next; } while ((entry != head) && (entry->vaddr < end)); sun4c_set_context(octx); } restore_flags(flags); }}static void sun4c_flush_tlb_page_sw(struct vm_area_struct *vma, unsigned long page){ struct mm_struct *mm = vma->vm_mm; int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { int savectx = sun4c_get_context(); unsigned long flags; save_and_cli(flags); sun4c_set_context(new_ctx); page &= PAGE_MASK; sun4c_flush_page_sw(page); sun4c_put_pte(page, 0); sun4c_set_context(savectx); restore_flags(flags); }}static void sun4c_set_pte(pte_t *ptep, pte_t pte){ *ptep = pte;}static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp){}static void sun4c_pmd_set(pmd_t * pmdp, pte_t * ptep){}void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly){ unsigned long page_entry; page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK); page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT)); if (rdonly) page_entry &= ~_SUN4C_WRITEABLE; sun4c_put_pte(virt_addr, page_entry);}void sun4c_unmapioaddr(unsigned long virt_addr){ sun4c_put_pte(virt_addr, 0);}static void sun4c_alloc_context_hw(struct mm_struct *old_mm, struct mm_struct *mm){ struct ctx_list *ctxp; ctxp = ctx_free.next; if (ctxp != &ctx_free) { remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); mm->context = ctxp->ctx_number; ctxp->ctx_mm = mm; return; } ctxp = ctx_used.next; if (ctxp->ctx_mm == old_mm) ctxp = ctxp->next; remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); ctxp->ctx_mm->context = NO_CONTEXT; ctxp->ctx_mm = mm; mm->context = ctxp->ctx_number; sun4c_demap_context_hw(&sun4c_context_ring[ctxp->ctx_number], ctxp->ctx_number);}/* Switch the current MM context. */static void sun4c_switch_mm_hw(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu){ struct ctx_list *ctx; int dirty = 0; if (mm->context == NO_CONTEXT) { dirty = 1; sun4c_alloc_context_hw(old_mm, mm); } else {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -