⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 c-r4k.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * This file is subject to the terms and conditions of the GNU General Public * License.  See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */#include <linux/hardirq.h>#include <linux/init.h>#include <linux/highmem.h>#include <linux/kernel.h>#include <linux/linkage.h>#include <linux/sched.h>#include <linux/mm.h>#include <linux/bitops.h>#include <asm/bcache.h>#include <asm/bootinfo.h>#include <asm/cache.h>#include <asm/cacheops.h>#include <asm/cpu.h>#include <asm/cpu-features.h>#include <asm/io.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/r4kcache.h>#include <asm/sections.h>#include <asm/system.h>#include <asm/mmu_context.h>#include <asm/war.h>#include <asm/cacheflush.h> /* for run_uncached() *//* * Special Variant of smp_call_function for use by cache functions: * *  o No return value *  o collapses to normal function call on UP kernels *  o collapses to normal function call on systems with a single shared *    primary cache. */static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,                                   int retry, int wait){	preempt_disable();#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)	smp_call_function(func, info, retry, wait);#endif	func(info);	preempt_enable();}/* * Must die. */static unsigned long icache_size __read_mostly;static unsigned long dcache_size __read_mostly;static unsigned long scache_size __read_mostly;/* * Dummy cache handling routines for machines without boardcaches */static void cache_noop(void) {}static struct bcache_ops no_sc_ops = {	.bc_enable = (void *)cache_noop,	.bc_disable = (void *)cache_noop,	.bc_wback_inv = (void *)cache_noop,	.bc_inv = (void *)cache_noop};struct bcache_ops *bcops = &no_sc_ops;#define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)#define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)#define R4600_HIT_CACHEOP_WAR_IMPL					\do {									\	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())		\		*(volatile unsigned long *)CKSEG1;			\	if (R4600_V1_HIT_CACHEOP_WAR)					\		__asm__ __volatile__("nop;nop;nop;nop");		\} while (0)static void (*r4k_blast_dcache_page)(unsigned long addr);static inline void r4k_blast_dcache_page_dc32(unsigned long addr){	R4600_HIT_CACHEOP_WAR_IMPL;	blast_dcache32_page(addr);}static void __init r4k_blast_dcache_page_setup(void){	unsigned long  dc_lsize = cpu_dcache_line_size();	if (dc_lsize == 0)		r4k_blast_dcache_page = (void *)cache_noop;	else if (dc_lsize == 16)		r4k_blast_dcache_page = blast_dcache16_page;	else if (dc_lsize == 32)		r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;}static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);static void __init r4k_blast_dcache_page_indexed_setup(void){	unsigned long dc_lsize = cpu_dcache_line_size();	if (dc_lsize == 0)		r4k_blast_dcache_page_indexed = (void *)cache_noop;	else if (dc_lsize == 16)		r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;	else if (dc_lsize == 32)		r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;}static void (* r4k_blast_dcache)(void);static void __init r4k_blast_dcache_setup(void){	unsigned long dc_lsize = cpu_dcache_line_size();	if (dc_lsize == 0)		r4k_blast_dcache = (void *)cache_noop;	else if (dc_lsize == 16)		r4k_blast_dcache = blast_dcache16;	else if (dc_lsize == 32)		r4k_blast_dcache = blast_dcache32;}/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */#define JUMP_TO_ALIGN(order) \	__asm__ __volatile__( \		"b\t1f\n\t" \		".align\t" #order "\n\t" \		"1:\n\t" \		)#define CACHE32_UNROLL32_ALIGN	JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */#define CACHE32_UNROLL32_ALIGN2	JUMP_TO_ALIGN(11)static inline void blast_r4600_v1_icache32(void){	unsigned long flags;	local_irq_save(flags);	blast_icache32();	local_irq_restore(flags);}static inline void tx49_blast_icache32(void){	unsigned long start = INDEX_BASE;	unsigned long end = start + current_cpu_data.icache.waysize;	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;	unsigned long ws_end = current_cpu_data.icache.ways <<	                       current_cpu_data.icache.waybit;	unsigned long ws, addr;	CACHE32_UNROLL32_ALIGN2;	/* I'm in even chunk.  blast odd chunks */	for (ws = 0; ws < ws_end; ws += ws_inc)		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)			cache32_unroll32(addr|ws, Index_Invalidate_I);	CACHE32_UNROLL32_ALIGN;	/* I'm in odd chunk.  blast even chunks */	for (ws = 0; ws < ws_end; ws += ws_inc)		for (addr = start; addr < end; addr += 0x400 * 2)			cache32_unroll32(addr|ws, Index_Invalidate_I);}static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page){	unsigned long flags;	local_irq_save(flags);	blast_icache32_page_indexed(page);	local_irq_restore(flags);}static inline void tx49_blast_icache32_page_indexed(unsigned long page){	unsigned long indexmask = current_cpu_data.icache.waysize - 1;	unsigned long start = INDEX_BASE + (page & indexmask);	unsigned long end = start + PAGE_SIZE;	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;	unsigned long ws_end = current_cpu_data.icache.ways <<	                       current_cpu_data.icache.waybit;	unsigned long ws, addr;	CACHE32_UNROLL32_ALIGN2;	/* I'm in even chunk.  blast odd chunks */	for (ws = 0; ws < ws_end; ws += ws_inc)		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)			cache32_unroll32(addr|ws, Index_Invalidate_I);	CACHE32_UNROLL32_ALIGN;	/* I'm in odd chunk.  blast even chunks */	for (ws = 0; ws < ws_end; ws += ws_inc)		for (addr = start; addr < end; addr += 0x400 * 2)			cache32_unroll32(addr|ws, Index_Invalidate_I);}static void (* r4k_blast_icache_page)(unsigned long addr);static void __init r4k_blast_icache_page_setup(void){	unsigned long ic_lsize = cpu_icache_line_size();	if (ic_lsize == 0)		r4k_blast_icache_page = (void *)cache_noop;	else if (ic_lsize == 16)		r4k_blast_icache_page = blast_icache16_page;	else if (ic_lsize == 32)		r4k_blast_icache_page = blast_icache32_page;	else if (ic_lsize == 64)		r4k_blast_icache_page = blast_icache64_page;}static void (* r4k_blast_icache_page_indexed)(unsigned long addr);static void __init r4k_blast_icache_page_indexed_setup(void){	unsigned long ic_lsize = cpu_icache_line_size();	if (ic_lsize == 0)		r4k_blast_icache_page_indexed = (void *)cache_noop;	else if (ic_lsize == 16)		r4k_blast_icache_page_indexed = blast_icache16_page_indexed;	else if (ic_lsize == 32) {		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())			r4k_blast_icache_page_indexed =				blast_icache32_r4600_v1_page_indexed;		else if (TX49XX_ICACHE_INDEX_INV_WAR)			r4k_blast_icache_page_indexed =				tx49_blast_icache32_page_indexed;		else			r4k_blast_icache_page_indexed =				blast_icache32_page_indexed;	} else if (ic_lsize == 64)		r4k_blast_icache_page_indexed = blast_icache64_page_indexed;}static void (* r4k_blast_icache)(void);static void __init r4k_blast_icache_setup(void){	unsigned long ic_lsize = cpu_icache_line_size();	if (ic_lsize == 0)		r4k_blast_icache = (void *)cache_noop;	else if (ic_lsize == 16)		r4k_blast_icache = blast_icache16;	else if (ic_lsize == 32) {		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())			r4k_blast_icache = blast_r4600_v1_icache32;		else if (TX49XX_ICACHE_INDEX_INV_WAR)			r4k_blast_icache = tx49_blast_icache32;		else			r4k_blast_icache = blast_icache32;	} else if (ic_lsize == 64)		r4k_blast_icache = blast_icache64;}static void (* r4k_blast_scache_page)(unsigned long addr);static void __init r4k_blast_scache_page_setup(void){	unsigned long sc_lsize = cpu_scache_line_size();	if (scache_size == 0)		r4k_blast_scache_page = (void *)cache_noop;	else if (sc_lsize == 16)		r4k_blast_scache_page = blast_scache16_page;	else if (sc_lsize == 32)		r4k_blast_scache_page = blast_scache32_page;	else if (sc_lsize == 64)		r4k_blast_scache_page = blast_scache64_page;	else if (sc_lsize == 128)		r4k_blast_scache_page = blast_scache128_page;}static void (* r4k_blast_scache_page_indexed)(unsigned long addr);static void __init r4k_blast_scache_page_indexed_setup(void){	unsigned long sc_lsize = cpu_scache_line_size();	if (scache_size == 0)		r4k_blast_scache_page_indexed = (void *)cache_noop;	else if (sc_lsize == 16)		r4k_blast_scache_page_indexed = blast_scache16_page_indexed;	else if (sc_lsize == 32)		r4k_blast_scache_page_indexed = blast_scache32_page_indexed;	else if (sc_lsize == 64)		r4k_blast_scache_page_indexed = blast_scache64_page_indexed;	else if (sc_lsize == 128)		r4k_blast_scache_page_indexed = blast_scache128_page_indexed;}static void (* r4k_blast_scache)(void);static void __init r4k_blast_scache_setup(void){	unsigned long sc_lsize = cpu_scache_line_size();	if (scache_size == 0)		r4k_blast_scache = (void *)cache_noop;	else if (sc_lsize == 16)		r4k_blast_scache = blast_scache16;	else if (sc_lsize == 32)		r4k_blast_scache = blast_scache32;	else if (sc_lsize == 64)		r4k_blast_scache = blast_scache64;	else if (sc_lsize == 128)		r4k_blast_scache = blast_scache128;}static inline void local_r4k___flush_cache_all(void * args){#if defined(CONFIG_CPU_LOONGSON2)	r4k_blast_scache();	return;#endif	r4k_blast_dcache();	r4k_blast_icache();	switch (current_cpu_type()) {	case CPU_R4000SC:	case CPU_R4000MC:	case CPU_R4400SC:	case CPU_R4400MC:	case CPU_R10000:	case CPU_R12000:	case CPU_R14000:		r4k_blast_scache();	}}static void r4k___flush_cache_all(void){	r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);}static inline int has_valid_asid(const struct mm_struct *mm){#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)	int i;	for_each_online_cpu(i)		if (cpu_context(i, mm))			return 1;	return 0;#else	return cpu_context(smp_processor_id(), mm);#endif}static inline void local_r4k_flush_cache_range(void * args){	struct vm_area_struct *vma = args;	if (!(has_valid_asid(vma->vm_mm)))		return;	r4k_blast_dcache();}static void r4k_flush_cache_range(struct vm_area_struct *vma,	unsigned long start, unsigned long end){	if (!cpu_has_dc_aliases)		return;	r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);}static inline void local_r4k_flush_cache_mm(void * args){	struct mm_struct *mm = args;	if (!has_valid_asid(mm))		return;	/*	 * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we	 * only flush the primary caches but R10000 and R12000 behave sane ...	 * R4000SC and R4400SC indexed S-cache ops also invalidate primary	 * caches, so we can bail out early.	 */	if (current_cpu_type() == CPU_R4000SC ||	    current_cpu_type() == CPU_R4000MC ||	    current_cpu_type() == CPU_R4400SC ||	    current_cpu_type() == CPU_R4400MC) {		r4k_blast_scache();		return;	}	r4k_blast_dcache();}static void r4k_flush_cache_mm(struct mm_struct *mm){	if (!cpu_has_dc_aliases)		return;	r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);}struct flush_cache_page_args {	struct vm_area_struct *vma;	unsigned long addr;	unsigned long pfn;};static inline void local_r4k_flush_cache_page(void *args){	struct flush_cache_page_args *fcp_args = args;	struct vm_area_struct *vma = fcp_args->vma;	unsigned long addr = fcp_args->addr;	struct page *page = pfn_to_page(fcp_args->pfn);	int exec = vma->vm_flags & VM_EXEC;	struct mm_struct *mm = vma->vm_mm;	pgd_t *pgdp;	pud_t *pudp;	pmd_t *pmdp;	pte_t *ptep;	void *vaddr;	/*	 * If ownes no valid ASID yet, cannot possibly have gotten	 * this page into the cache.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -