⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 c-sb1.c

📁 linux-2.4.29操作系统的源码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org) * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation * Copyright (C) 2004  Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA. */#include <linux/config.h>#include <linux/init.h>#include <asm/mmu_context.h>#include <asm/bootinfo.h>#include <asm/cacheops.h>#include <asm/cpu.h>#include <asm/uaccess.h>extern void sb1_dma_init(void);/* These are probed at ld_mmu time */static unsigned long icache_size;static unsigned long dcache_size;static unsigned short icache_line_size;static unsigned short dcache_line_size;static unsigned int icache_index_mask;static unsigned int dcache_index_mask;static unsigned short icache_assoc;static unsigned short dcache_assoc;static unsigned short icache_sets;static unsigned short dcache_sets;static unsigned int icache_range_cutoff;static unsigned int dcache_range_cutoff;/* * The dcache is fully coherent to the system, with one * big caveat:  the instruction stream.  In other words, * if we miss in the icache, and have dirty data in the * L1 dcache, then we'll go out to memory (or the L2) and * get the not-as-recent data. * * So the only time we have to flush the dcache is when * we're flushing the icache.  Since the L2 is fully * coherent to everything, including I/O, we never have * to flush it *//* * Writeback and invalidate the entire dcache */static inline void __sb1_writeback_inv_dcache_all(void){	__asm__ __volatile__ (		".set push                  \n"		".set noreorder             \n"		".set noat                  \n"		".set mips4                 \n"		"     move   $1, $0         \n" /* Start at index 0 */		"1:   cache  %2, 0($1)      \n" /* Invalidate this index */		"     cache  %2, (1<<13)($1)\n" /* Invalidate this index */		"     cache  %2, (2<<13)($1)\n" /* Invalidate this index */		"     cache  %2, (3<<13)($1)\n" /* Invalidate this index */		"     addiu  %1, %1, -1     \n" /* Decrement loop count */		"     bnez   %1, 1b         \n" /* loop test */		"      addu  $1, $1, %0     \n" /* Next address */		".set pop                   \n"		:		: "r" (dcache_line_size), "r" (dcache_sets),		  "i" (Index_Writeback_Inv_D));}/* * Writeback and invalidate a range of the dcache.  The addresses are * virtual, and since we're using index ops and bit 12 is part of both * the virtual frame and physical index, we have to clear both sets * (bit 12 set and cleared). */static inline void __sb1_writeback_inv_dcache_range(unsigned long start,	unsigned long end){	__asm__ __volatile__ (	"	.set	push		\n"	"	.set	noreorder	\n"	"	.set	noat		\n"	"	.set	mips4		\n"	"	and	$1, %0, %3	\n" /* mask non-index bits */	"1:	cache	%4, (0<<13)($1)	\n" /* Index-WB-inval this address */	"	cache	%4, (1<<13)($1)	\n" /* Index-WB-inval this address */	"	cache	%4, (2<<13)($1)	\n" /* Index-WB-inval this address */	"	cache	%4, (3<<13)($1)	\n" /* Index-WB-inval this address */	"	xori	$1, $1, 1<<12 	\n" /* flip bit 12 (va/pa alias) */	"	cache	%4, (0<<13)($1)	\n" /* Index-WB-inval this address */	"	cache	%4, (1<<13)($1)	\n" /* Index-WB-inval this address */	"	cache	%4, (2<<13)($1)	\n" /* Index-WB-inval this address */	"	cache	%4, (3<<13)($1)	\n" /* Index-WB-inval this address */	"	addu	%0, %0, %2	\n" /* next line */	"	bne	%0, %1, 1b	\n" /* loop test */	"	 and	$1, %0, %3	\n" /* mask non-index bits */	"	sync			\n"	"	.set pop		\n"	:	: "r" (start & ~(dcache_line_size - 1)),	  "r" ((end + dcache_line_size - 1) & ~(dcache_line_size - 1)),	  "r" (dcache_line_size),	  "r" (dcache_index_mask),	  "i" (Index_Writeback_Inv_D));}/* * Writeback and invalidate a range of the dcache.  With physical * addresseses, we don't have to worry about possible bit 12 aliasing. * XXXKW is it worth turning on KX and using hit ops with xkphys? */static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start,	unsigned long end){	__asm__ __volatile__ (		"	.set	push		\n"		"	.set	noreorder	\n"		"	.set	noat		\n"		"	.set	mips4		\n"		"	and	$1, %0, %3	\n" /* mask non-index bits */		"1:	cache	%4, (0<<13)($1)	\n" /* Index-WB-inval this address */		"	cache	%4, (1<<13)($1)	\n" /* Index-WB-inval this address */		"	cache	%4, (2<<13)($1)	\n" /* Index-WB-inval this address */		"	cache	%4, (3<<13)($1)	\n" /* Index-WB-inval this address */		"	addu	%0, %0, %2	\n" /* next line */		"	bne	%0, %1, 1b	\n" /* loop test */		"	 and	$1, %0, %3	\n" /* mask non-index bits */		"	sync			\n"		"	.set pop		\n"		:		: "r" (start  & ~(dcache_line_size - 1)),		  "r" ((end + dcache_line_size - 1) & ~(dcache_line_size - 1)),		  "r" (dcache_line_size),		  "r" (dcache_index_mask),		  "i" (Index_Writeback_Inv_D));}/* * Invalidate the entire icache */static inline void __sb1_flush_icache_all(void){	__asm__ __volatile__ (		".set push                  \n"		".set noreorder             \n"		".set noat                  \n"		".set mips4                 \n"		"     move   $1, $0         \n" /* Start at index 0 */		"1:   cache  %2, 0($1)      \n" /* Invalidate this index */		"     cache  %2, (1<<13)($1)\n" /* Invalidate this index */		"     cache  %2, (2<<13)($1)\n" /* Invalidate this index */		"     cache  %2, (3<<13)($1)\n" /* Invalidate this index */		"     addiu  %1, %1, -1     \n" /* Decrement loop count */		"     bnez   %1, 1b         \n" /* loop test */		"      addu  $1, $1, %0     \n" /* Next address */		"     bnezl  $0, 2f         \n" /* Force mispredict */		"      nop                  \n"		"2:   sync                  \n"		".set pop                   \n"		:		: "r" (icache_line_size), "r" (icache_sets),		  "i" (Index_Invalidate_I));}/* * Flush the icache for a given physical page.  Need to writeback the * dcache first, then invalidate the icache.  If the page isn't * executable, nothing is required. */static void local_sb1_flush_cache_page(struct vm_area_struct *vma,	unsigned long addr){	int cpu = smp_processor_id();#ifndef CONFIG_SMP	if (!(vma->vm_flags & VM_EXEC))		return;#endif	__sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE);	/*	 * Bumping the ASID is probably cheaper than the flush ...	 */	if (cpu_context(cpu, vma->vm_mm) != 0)		drop_mmu_context(vma->vm_mm, cpu);}#ifdef CONFIG_SMPstruct flush_cache_page_args {	struct vm_area_struct *vma;	unsigned long addr;};static void sb1_flush_cache_page_ipi(void *info){	struct flush_cache_page_args *args = info;	local_sb1_flush_cache_page(args->vma, args->addr);}/* Dirty dcache could be on another CPU, so do the IPIs */static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr){	struct flush_cache_page_args args;	if (!(vma->vm_flags & VM_EXEC))		return;	addr &= PAGE_MASK;	args.vma = vma;	args.addr = addr;	smp_call_function(sb1_flush_cache_page_ipi, (void *) &args, 1, 1);	local_sb1_flush_cache_page(vma, addr);}#elsevoid sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr)	__attribute__((alias("local_sb1_flush_cache_page")));#endif/* * Invalidate a range of the icache.  The addresses are virtual, and * the cache is virtually indexed and tagged.  However, we don't * necessarily have the right ASID context, so use index ops instead * of hit ops. */static inline void __sb1_flush_icache_range(unsigned long start,	unsigned long end){	__asm__ __volatile__ (		".set push                  \n"		".set noreorder             \n"		".set noat                  \n"		".set mips4                 \n"		"     and    $1, %0, %3     \n" /* mask non-index bits */		"1:   cache  %4, (0<<13)($1) \n" /* Index-inval this address */		"     cache  %4, (1<<13)($1) \n" /* Index-inval this address */		"     cache  %4, (2<<13)($1) \n" /* Index-inval this address */		"     cache  %4, (3<<13)($1) \n" /* Index-inval this address */		"     addu   %0, %0, %2     \n" /* next line */		"     bne    %0, %1, 1b     \n" /* loop test */		"      and   $1, %0, %3     \n" /* mask non-index bits */		"     bnezl  $0, 2f         \n" /* Force mispredict */		"      nop                  \n"		"2:   sync                  \n"		".set pop                   \n"		:		: "r" (start  & ~(icache_line_size - 1)),		  "r" ((end + icache_line_size - 1) & ~(icache_line_size - 1)),		  "r" (icache_line_size),		  "r" (icache_index_mask),		  "i" (Index_Invalidate_I));}/* * Invalidate all caches on this CPU */static void local_sb1___flush_cache_all(void){	__sb1_writeback_inv_dcache_all();	__sb1_flush_icache_all();}#ifdef CONFIG_SMPvoid sb1___flush_cache_all_ipi(void *ignored)	__attribute__((alias("local_sb1___flush_cache_all")));static void sb1___flush_cache_all(void){	smp_call_function(sb1___flush_cache_all_ipi, 0, 1, 1);	local_sb1___flush_cache_all();}#elsevoid sb1___flush_cache_all(void)	__attribute__((alias("local_sb1___flush_cache_all")));#endif/* * When flushing a range in the icache, we have to first writeback * the dcache for the same range, so new ifetches will see any * data that was dirty in the dcache. * * The start/end arguments are Kseg addresses (possibly mapped Kseg). */static void local_sb1_flush_icache_range(unsigned long start,	unsigned long end){	/* Just wb-inv the whole dcache if the range is big enough */	if ((end - start) > dcache_range_cutoff)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -