⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sun4c.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 5 页
字号:
					free_user_entry(new_ctx, entry);					entry = next;				} while (entry != head);				sun4c_set_context(savectx);			}			local_irq_restore(flags);		}	}}static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end){	struct mm_struct *mm = vma->vm_mm;	int new_ctx = mm->context;	if (new_ctx != NO_CONTEXT) {		struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;		struct sun4c_mmu_entry *entry;		unsigned long flags;		flush_user_windows();		local_irq_save(flags);		/* All user segmap chains are ordered on entry->vaddr. */		for (entry = head->next;		     (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);		     entry = entry->next)			;		/* Tracing various job mixtures showed that this conditional		 * only passes ~35% of the time for most worse case situations,		 * therefore we avoid all of this gross overhead ~65% of the time.		 */		if ((entry != head) && (entry->vaddr < end)) {			int octx = sun4c_get_context();			sun4c_set_context(new_ctx);			/* At this point, always, (start >= entry->vaddr) and			 * (entry->vaddr < end), once the latter condition			 * ceases to hold, or we hit the end of the list, we			 * exit the loop.  The ordering of all user allocated			 * segmaps makes this all work out so beautifully.			 */			do {				struct sun4c_mmu_entry *next = entry->next;				unsigned long realend;				/* "realstart" is always >= entry->vaddr */				realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE;				if (end < realend)					realend = end;				if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {					unsigned long page = entry->vaddr;					while (page < realend) {						sun4c_flush_page(page);						page += PAGE_SIZE;					}				} else {					sun4c_flush_segment(entry->vaddr);					sun4c_user_unmap(entry);					free_user_entry(new_ctx, entry);				}				entry = next;			} while ((entry != head) && (entry->vaddr < end));			sun4c_set_context(octx);		}		local_irq_restore(flags);	}}static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long page){	struct mm_struct *mm = vma->vm_mm;	int new_ctx = mm->context;	/* Sun4c has no separate I/D caches so cannot optimize for non	 * text page flushes.	 */	if (new_ctx != NO_CONTEXT) {		int octx = sun4c_get_context();		unsigned long flags;		flush_user_windows();		local_irq_save(flags);		sun4c_set_context(new_ctx);		sun4c_flush_page(page);		sun4c_set_context(octx);		local_irq_restore(flags);	}}static void sun4c_flush_page_to_ram(unsigned long page){	unsigned long flags;	local_irq_save(flags);	sun4c_flush_page(page);	local_irq_restore(flags);}/* Sun4c cache is unified, both instructions and data live there, so * no need to flush the on-stack instructions for new signal handlers. */static void sun4c_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr){}/* TLB flushing on the sun4c.  These routines count on the cache * flushing code to flush the user register windows so that we need * not do so when we get here. */static void sun4c_flush_tlb_all(void){	struct sun4c_mmu_entry *this_entry, *next_entry;	unsigned long flags;	int savectx, ctx;	local_irq_save(flags);	this_entry = sun4c_kernel_ring.ringhd.next;	savectx = sun4c_get_context();	flush_user_windows();	while (sun4c_kernel_ring.num_entries) {		next_entry = this_entry->next;		sun4c_flush_segment(this_entry->vaddr);		for (ctx = 0; ctx < num_contexts; ctx++) {			sun4c_set_context(ctx);			sun4c_put_segmap(this_entry->vaddr, invalid_segment);		}		free_kernel_entry(this_entry, &sun4c_kernel_ring);		this_entry = next_entry;	}	sun4c_set_context(savectx);	local_irq_restore(flags);}static void sun4c_flush_tlb_mm(struct mm_struct *mm){	int new_ctx = mm->context;	if (new_ctx != NO_CONTEXT) {		struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;		unsigned long flags;		local_irq_save(flags);		if (head->next != head) {			struct sun4c_mmu_entry *entry = head->next;			int savectx = sun4c_get_context();			sun4c_set_context(new_ctx);			sun4c_flush_context();			do {				struct sun4c_mmu_entry *next = entry->next;				sun4c_user_unmap(entry);				free_user_entry(new_ctx, entry);				entry = next;			} while (entry != head);			sun4c_set_context(savectx);		}		local_irq_restore(flags);	}}static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end){	struct mm_struct *mm = vma->vm_mm;	int new_ctx = mm->context;	if (new_ctx != NO_CONTEXT) {		struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;		struct sun4c_mmu_entry *entry;		unsigned long flags;		local_irq_save(flags);		/* See commentary in sun4c_flush_cache_range(). */		for (entry = head->next;		     (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);		     entry = entry->next)			;		if ((entry != head) && (entry->vaddr < end)) {			int octx = sun4c_get_context();			sun4c_set_context(new_ctx);			do {				struct sun4c_mmu_entry *next = entry->next;				sun4c_flush_segment(entry->vaddr);				sun4c_user_unmap(entry);				free_user_entry(new_ctx, entry);				entry = next;			} while ((entry != head) && (entry->vaddr < end));			sun4c_set_context(octx);		}		local_irq_restore(flags);	}}static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page){	struct mm_struct *mm = vma->vm_mm;	int new_ctx = mm->context;	if (new_ctx != NO_CONTEXT) {		int savectx = sun4c_get_context();		unsigned long flags;		local_irq_save(flags);		sun4c_set_context(new_ctx);		page &= PAGE_MASK;		sun4c_flush_page(page);		sun4c_put_pte(page, 0);		sun4c_set_context(savectx);		local_irq_restore(flags);	}}static inline void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr){	unsigned long page_entry;	page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);	page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));	sun4c_put_pte(virt_addr, page_entry);}static void sun4c_mapiorange(unsigned int bus, unsigned long xpa,    unsigned long xva, unsigned int len){	while (len != 0) {		len -= PAGE_SIZE;		sun4c_mapioaddr(xpa, xva);		xva += PAGE_SIZE;		xpa += PAGE_SIZE;	}}static void sun4c_unmapiorange(unsigned long virt_addr, unsigned int len){	while (len != 0) {		len -= PAGE_SIZE;		sun4c_put_pte(virt_addr, 0);		virt_addr += PAGE_SIZE;	}}static void sun4c_alloc_context(struct mm_struct *old_mm, struct mm_struct *mm){	struct ctx_list *ctxp;	ctxp = ctx_free.next;	if (ctxp != &ctx_free) {		remove_from_ctx_list(ctxp);		add_to_used_ctxlist(ctxp);		mm->context = ctxp->ctx_number;		ctxp->ctx_mm = mm;		return;	}	ctxp = ctx_used.next;	if (ctxp->ctx_mm == old_mm)		ctxp = ctxp->next;	remove_from_ctx_list(ctxp);	add_to_used_ctxlist(ctxp);	ctxp->ctx_mm->context = NO_CONTEXT;	ctxp->ctx_mm = mm;	mm->context = ctxp->ctx_number;	sun4c_demap_context(&sun4c_context_ring[ctxp->ctx_number],			       ctxp->ctx_number);}/* Switch the current MM context. */static void sun4c_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu){	struct ctx_list *ctx;	int dirty = 0;	if (mm->context == NO_CONTEXT) {		dirty = 1;		sun4c_alloc_context(old_mm, mm);	} else {		/* Update the LRU ring of contexts. */		ctx = ctx_list_pool + mm->context;		remove_from_ctx_list(ctx);		add_to_used_ctxlist(ctx);	}	if (dirty || old_mm != mm)		sun4c_set_context(mm->context);}static void sun4c_destroy_context(struct mm_struct *mm){	struct ctx_list *ctx_old;	if (mm->context != NO_CONTEXT) {		sun4c_demap_context(&sun4c_context_ring[mm->context], mm->context);		ctx_old = ctx_list_pool + mm->context;		remove_from_ctx_list(ctx_old);		add_to_free_ctxlist(ctx_old);		mm->context = NO_CONTEXT;	}}static void sun4c_mmu_info(struct seq_file *m){	int used_user_entries, i;	used_user_entries = 0;	for (i = 0; i < num_contexts; i++)		used_user_entries += sun4c_context_ring[i].num_entries;	seq_printf(m, 		   "vacsize\t\t: %d bytes\n"		   "vachwflush\t: %s\n"		   "vaclinesize\t: %d bytes\n"		   "mmuctxs\t\t: %d\n"		   "mmupsegs\t: %d\n"		   "kernelpsegs\t: %d\n"		   "kfreepsegs\t: %d\n"		   "usedpsegs\t: %d\n"		   "ufreepsegs\t: %d\n"		   "user_taken\t: %d\n"		   "max_taken\t: %d\n",		   sun4c_vacinfo.num_bytes,		   (sun4c_vacinfo.do_hwflushes ? "yes" : "no"),		   sun4c_vacinfo.linesize,		   num_contexts,		   (invalid_segment + 1),		   sun4c_kernel_ring.num_entries,		   sun4c_kfree_ring.num_entries,		   used_user_entries,		   sun4c_ufree_ring.num_entries,		   sun4c_user_taken_entries,		   max_user_taken_entries);}/* Nothing below here should touch the mmu hardware nor the mmu_entry * data structures. *//* First the functions which the mid-level code uses to directly * manipulate the software page tables.  Some defines since we are * emulating the i386 page directory layout. */#define PGD_PRESENT  0x001#define PGD_RW       0x002#define PGD_USER     0x004#define PGD_ACCESSED 0x020#define PGD_DIRTY    0x040#define PGD_TABLE    (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)static void sun4c_set_pte(pte_t *ptep, pte_t pte){	*ptep = pte;}static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp){}static void sun4c_pmd_set(pmd_t * pmdp, pte_t * ptep){	pmdp->pmdv[0] = PGD_TABLE | (unsigned long) ptep;}static void sun4c_pmd_populate(pmd_t * pmdp, struct page * ptep){	if (page_address(ptep) == NULL) BUG();	/* No highmem on sun4c */	pmdp->pmdv[0] = PGD_TABLE | (unsigned long) page_address(ptep);}static int sun4c_pte_present(pte_t pte){	return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0);}static void sun4c_pte_clear(pte_t *ptep)	{ *ptep = __pte(0); }static int sun4c_pte_read(pte_t pte){	return (pte_val(pte) & _SUN4C_PAGE_READ);}static int sun4c_pmd_bad(pmd_t pmd){	return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||		(!virt_addr_valid(pmd_val(pmd))));}static int sun4c_pmd_present(pmd_t pmd){	return ((pmd_val(pmd) & PGD_PRESENT) != 0);}#if 0 /* if PMD takes one word */static void sun4c_pmd_clear(pmd_t *pmdp)	{ *pmdp = __pmd(0); }#else /* if pmd_t is a longish aggregate */static void sun4c_pmd_clear(pmd_t *pmdp) {	memset((void *)pmdp, 0, sizeof(pmd_t));}#endifstatic int sun4c_pgd_none(pgd_t pgd)		{ return 0; }static int sun4c_pgd_bad(pgd_t pgd)		{ return 0; }static int sun4c_pgd_present(pgd_t pgd)	        { return 1; }static void sun4c_pgd_clear(pgd_t * pgdp)	{ }/* * The following only work if pte_present() is true. * Undefined behaviour if not.. */static pte_t sun4c_pte_mkwrite(pte_t pte){	pte = __pte(pte_val(pte) | _SUN4C_PAGE_WRITE);	if (pte_val(pte) & _SUN4C_PAGE_MODIFIED)		pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);	return pte;}static pte_t sun4c_pte_mkdirty(pte_t pte){	pte = __pte(pte_val(pte) | _SUN4C_PAGE_MODIFIED);	if (pte_val(pte) & _SUN4C_PAGE_WRITE)		pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);	return pte;}static pte_t sun4c_pte_mkyoung(pte_t pte){	pte = __pte(pte_val(pte) | _SUN4C_PAGE_ACCESSED);	if (pte_val(pte) & _SUN4C_PAGE_READ)		pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_READ);	return pte;}/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */static pte_t sun4c_mk_pte(struct page *page, pgprot_t pgprot){	return __pte(page_to_pfn(page) | pgprot_val(pgprot));}static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot){	return __pte((phys_page >> PAGE_SHIFT) | pgprot_val(pgprot));}static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space){	return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -