⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 srmmu.c

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 C
📖 第 1 页 / 共 5 页
字号:
		ctxp->ctx_mm = mm;		return;	}	ctxp = ctx_used.next;	if(ctxp->ctx_mm == old_mm)		ctxp = ctxp->next;	if(ctxp == &ctx_used)		panic("out of mmu contexts");	flush_cache_mm(ctxp->ctx_mm);	flush_tlb_mm(ctxp->ctx_mm);	remove_from_ctx_list(ctxp);	add_to_used_ctxlist(ctxp);	ctxp->ctx_mm->context = NO_CONTEXT;	ctxp->ctx_mm = mm;	mm->context = ctxp->ctx_number;}static inline void free_context(int context){	struct ctx_list *ctx_old;	ctx_old = ctx_list_pool + context;	remove_from_ctx_list(ctx_old);	add_to_free_ctxlist(ctx_old);}static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,    struct task_struct *tsk, int cpu){	if(mm->context == NO_CONTEXT) {		spin_lock(&srmmu_context_spinlock);		alloc_context(old_mm, mm);		spin_unlock(&srmmu_context_spinlock);		srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);	}	if (is_hypersparc)		hyper_flush_whole_icache();	srmmu_set_context(mm->context);}/* Low level IO area allocation on the SRMMU. */void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly){	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	unsigned long tmp;	physaddr &= PAGE_MASK;	pgdp = pgd_offset_k(virt_addr);	pmdp = srmmu_pmd_offset(pgdp, virt_addr);	ptep = srmmu_pte_offset(pmdp, virt_addr);	tmp = (physaddr >> 4) | SRMMU_ET_PTE;	/*	 * I need to test whether this is consistent over all	 * sun4m's.  The bus_type represents the upper 4 bits of	 * 36-bit physical address on the I/O space lines...	 */	tmp |= (bus_type << 28);	if(rdonly)		tmp |= SRMMU_PRIV_RDONLY;	else		tmp |= SRMMU_PRIV;	__flush_page_to_ram(virt_addr);	srmmu_set_pte(ptep, __pte(tmp));	flush_tlb_all();}void srmmu_unmapioaddr(unsigned long virt_addr){	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	pgdp = pgd_offset_k(virt_addr);	pmdp = srmmu_pmd_offset(pgdp, virt_addr);	ptep = srmmu_pte_offset(pmdp, virt_addr);	/* No need to flush uncacheable page. */	srmmu_pte_clear(ptep);	flush_tlb_all();}/* * On the SRMMU we do not have the problems with limited tlb entries * for mapping kernel pages, so we just take things from the free page * pool.  As a side effect we are putting a little too much pressure * on the gfp() subsystem.  This setup also makes the logic of the * iommu mapping code a lot easier as we can transparently handle * mappings on the kernel stack without any special code as we did * need on the sun4c. */struct task_struct *srmmu_alloc_task_struct(void){	return (struct task_struct *) __get_free_pages(GFP_KERNEL, 1);}static void srmmu_free_task_struct(struct task_struct *tsk){	free_pages((unsigned long)tsk, 1);}static void srmmu_get_task_struct(struct task_struct *tsk){	atomic_inc(&virt_to_page(tsk)->count);}/* tsunami.S */extern void tsunami_flush_cache_all(void);extern void tsunami_flush_cache_mm(struct mm_struct *mm);extern void tsunami_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end);extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);extern void tsunami_flush_page_to_ram(unsigned long page);extern void tsunami_flush_page_for_dma(unsigned long page);extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);extern void tsunami_flush_tlb_all(void);extern void tsunami_flush_tlb_mm(struct mm_struct *mm);extern void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end);extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);extern void tsunami_setup_blockops(void);/* * Workaround, until we find what's going on with Swift. When low on memory, * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find * out it is already in page tables/ fault again on the same instruction. * I really don't understand it, have checked it and contexts * are right, flush_tlb_all is done as well, and it faults again... * Strange. -jj * * The following code is a deadwood that may be necessary when * we start to make precise page flushes again. --zaitcev */static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte){#if 0	static unsigned long last;	unsigned int val;	/* unsigned int n; */	if (address == last) {		val = srmmu_hwprobe(address);		if (val != 0 && pte_val(pte) != val) {			printk("swift_update_mmu_cache: "			    "addr %lx put %08x probed %08x from %p\n",			    address, pte_val(pte), val,			    __builtin_return_address(0));			srmmu_flush_whole_tlb();		}	}	last = address;#endif}/* swift.S */extern void swift_flush_cache_all(void);extern void swift_flush_cache_mm(struct mm_struct *mm);extern void swift_flush_cache_range(struct mm_struct *mm,				    unsigned long start, unsigned long end);extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);extern void swift_flush_page_to_ram(unsigned long page);extern void swift_flush_page_for_dma(unsigned long page);extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);extern void swift_flush_tlb_all(void);extern void swift_flush_tlb_mm(struct mm_struct *mm);extern void swift_flush_tlb_range(struct mm_struct *mm,				  unsigned long start, unsigned long end);extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);#if 0  /* P3: deadwood to debug precise flushes on Swift. */void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page){	int cctx, ctx1;	page &= PAGE_MASK;	if ((ctx1 = vma->vm_mm->context) != -1) {		cctx = srmmu_get_context();/* Is context # ever different from current context? P3 */		if (cctx != ctx1) {			printk("flush ctx %02x curr %02x\n", ctx1, cctx);			srmmu_set_context(ctx1);			swift_flush_page(page);			__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :					"r" (page), "i" (ASI_M_FLUSH_PROBE));			srmmu_set_context(cctx);		} else {			 /* Rm. prot. bits from virt. c. */			/* swift_flush_cache_all(); */			/* swift_flush_cache_page(vma, page); */			swift_flush_page(page);			__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :				"r" (page), "i" (ASI_M_FLUSH_PROBE));			/* same as above: srmmu_flush_tlb_page() */		}	}}#endif/* * The following are all MBUS based SRMMU modules, and therefore could * be found in a multiprocessor configuration.  On the whole, these * chips seems to be much more touchy about DVMA and page tables * with respect to cache coherency. *//* Cypress flushes. */static void cypress_flush_cache_all(void){	volatile unsigned long cypress_sucks;	unsigned long faddr, tagval;	flush_user_windows();	for(faddr = 0; faddr < 0x10000; faddr += 0x20) {		__asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :				     "=r" (tagval) :				     "r" (faddr), "r" (0x40000),				     "i" (ASI_M_DATAC_TAG));		/* If modified and valid, kick it. */		if((tagval & 0x60) == 0x60)			cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);	}}static void cypress_flush_cache_mm(struct mm_struct *mm){	register unsigned long a, b, c, d, e, f, g;	unsigned long flags, faddr;	int octx;	FLUSH_BEGIN(mm)	flush_user_windows();	__save_and_cli(flags);	octx = srmmu_get_context();	srmmu_set_context(mm->context);	a = 0x20; b = 0x40; c = 0x60;	d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;	faddr = (0x10000 - 0x100);	goto inside;	do {		faddr -= 0x100;	inside:		__asm__ __volatile__("sta %%g0, [%0] %1\n\t"				     "sta %%g0, [%0 + %2] %1\n\t"				     "sta %%g0, [%0 + %3] %1\n\t"				     "sta %%g0, [%0 + %4] %1\n\t"				     "sta %%g0, [%0 + %5] %1\n\t"				     "sta %%g0, [%0 + %6] %1\n\t"				     "sta %%g0, [%0 + %7] %1\n\t"				     "sta %%g0, [%0 + %8] %1\n\t" : :				     "r" (faddr), "i" (ASI_M_FLUSH_CTX),				     "r" (a), "r" (b), "r" (c), "r" (d),				     "r" (e), "r" (f), "r" (g));	} while(faddr);	srmmu_set_context(octx);	__restore_flags(flags);	FLUSH_END}static void cypress_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end){	register unsigned long a, b, c, d, e, f, g;	unsigned long flags, faddr;	int octx;	FLUSH_BEGIN(mm)	flush_user_windows();	__save_and_cli(flags);	octx = srmmu_get_context();	srmmu_set_context(mm->context);	a = 0x20; b = 0x40; c = 0x60;	d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;	start &= SRMMU_PMD_MASK;	while(start < end) {		faddr = (start + (0x10000 - 0x100));		goto inside;		do {			faddr -= 0x100;		inside:			__asm__ __volatile__("sta %%g0, [%0] %1\n\t"					     "sta %%g0, [%0 + %2] %1\n\t"					     "sta %%g0, [%0 + %3] %1\n\t"					     "sta %%g0, [%0 + %4] %1\n\t"					     "sta %%g0, [%0 + %5] %1\n\t"					     "sta %%g0, [%0 + %6] %1\n\t"					     "sta %%g0, [%0 + %7] %1\n\t"					     "sta %%g0, [%0 + %8] %1\n\t" : :					     "r" (faddr),					     "i" (ASI_M_FLUSH_SEG),					     "r" (a), "r" (b), "r" (c), "r" (d),					     "r" (e), "r" (f), "r" (g));		} while (faddr != start);		start += SRMMU_PMD_SIZE;	}	srmmu_set_context(octx);	__restore_flags(flags);	FLUSH_END}static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page){	register unsigned long a, b, c, d, e, f, g;	struct mm_struct *mm = vma->vm_mm;	unsigned long flags, line;	int octx;	FLUSH_BEGIN(mm)	flush_user_windows();	__save_and_cli(flags);	octx = srmmu_get_context();	srmmu_set_context(mm->context);	a = 0x20; b = 0x40; c = 0x60;	d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;	page &= PAGE_MASK;	line = (page + PAGE_SIZE) - 0x100;	goto inside;	do {		line -= 0x100;	inside:			__asm__ __volatile__("sta %%g0, [%0] %1\n\t"					     "sta %%g0, [%0 + %2] %1\n\t"					     "sta %%g0, [%0 + %3] %1\n\t"					     "sta %%g0, [%0 + %4] %1\n\t"					     "sta %%g0, [%0 + %5] %1\n\t"					     "sta %%g0, [%0 + %6] %1\n\t"					     "sta %%g0, [%0 + %7] %1\n\t"					     "sta %%g0, [%0 + %8] %1\n\t" : :					     "r" (line),					     "i" (ASI_M_FLUSH_PAGE),					     "r" (a), "r" (b), "r" (c), "r" (d),					     "r" (e), "r" (f), "r" (g));	} while(line != page);	srmmu_set_context(octx);	__restore_flags(flags);	FLUSH_END}/* Cypress is copy-back, at least that is how we configure it. */static void cypress_flush_page_to_ram(unsigned long page){	register unsigned long a, b, c, d, e, f, g;	unsigned long line;	a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;	page &= PAGE_MASK;	line = (page + PAGE_SIZE) - 0x100;	goto inside;	do {		line -= 0x100;	inside:		__asm__ __volatile__("sta %%g0, [%0] %1\n\t"				     "sta %%g0, [%0 + %2] %1\n\t"				     "sta %%g0, [%0 + %3] %1\n\t"				     "sta %%g0, [%0 + %4] %1\n\t"				     "sta %%g0, [%0 + %5] %1\n\t"				     "sta %%g0, [%0 + %6] %1\n\t"				     "sta %%g0, [%0 + %7] %1\n\t"				     "sta %%g0, [%0 + %8] %1\n\t" : :				     "r" (line),				     "i" (ASI_M_FLUSH_PAGE),				     "r" (a), "r" (b), "r" (c), "r" (d),				     "r" (e), "r" (f), "r" (g));	} while(line != page);}/* Cypress is also IO cache coherent. */static void cypress_flush_page_for_dma(unsigned long page){}/* Cypress has unified L2 VIPT, from which both instructions and data * are stored.  It does not have an onboard icache of any sort, therefore * no flush is necessary. */static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr){}static void cypress_flush_tlb_all(void){	srmmu_flush_whole_tlb();}static void cypress_flush_tlb_mm(struct mm_struct *mm){	FLUSH_BEGIN(mm)	__asm__ __volatile__(	"lda	[%0] %3, %%g5\n\t"	"sta	%2, [%0] %3\n\t"	"sta	%%g0, [%1] %4\n\t"	"sta	%%g5, [%0] %3\n"	: /* no outputs */	: "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),	  "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)	: "g5");	FLUSH_END}static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end){	unsigned long size;	FLUSH_BEGIN(mm)	start &= SRMMU_PGDIR_MASK;	size = SRMMU_PGDIR_ALIGN(end) - start;	__asm__ __volatile__(		"lda	[%0] %5, %%g5\n\t"		"sta	%1, [%0] %5\n"		"1:\n\t"		"subcc	%3, %4, %3\n\t"		"bne	1b\n\t"		" sta	%%g0, [%2 + %3] %6\n\t"		"sta	%%g5, [%0] %5\n"	: /* no outputs */	: "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),	  "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),	  "i" (ASI_M_FLUSH_PROBE)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -