⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 traps.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 5 页
字号:
}static void cheetah_flush_icache(void){	unsigned long dcu_save;	/* Save current DCU, disable I-cache. */	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"			     "or %0, %2, %%g1\n\t"			     "stxa %%g1, [%%g0] %1\n\t"			     "membar #Sync"			     : "=r" (dcu_save)			     : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)			     : "g1");	__cheetah_flush_icache();	/* Restore DCU register */	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"			     "membar #Sync"			     : /* no outputs */			     : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));}static void cheetah_flush_dcache(void){	unsigned int dcache_size, dcache_line_size;	unsigned long addr;	dcache_size = local_cpu_data().dcache_size;	dcache_line_size = local_cpu_data().dcache_line_size;	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"				     "membar #Sync"				     : /* no outputs */				     : "r" (addr), "i" (ASI_DCACHE_TAG));	}}/* In order to make the even parity correct we must do two things. * First, we clear DC_data_parity and set DC_utag to an appropriate value. * Next, we clear out all 32-bytes of data for that line.  Data of * all-zero + tag parity value of zero == correct parity. */static void cheetah_plus_zap_dcache_parity(void){	unsigned int dcache_size, dcache_line_size;	unsigned long addr;	dcache_size = local_cpu_data().dcache_size;	dcache_line_size = local_cpu_data().dcache_line_size;	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {		unsigned long tag = (addr >> 14);		unsigned long line;		__asm__ __volatile__("membar	#Sync\n\t"				     "stxa	%0, [%1] %2\n\t"				     "membar	#Sync"				     : /* no outputs */				     : "r" (tag), "r" (addr),				       "i" (ASI_DCACHE_UTAG));		for (line = addr; line < addr + dcache_line_size; line += 8)			__asm__ __volatile__("membar	#Sync\n\t"					     "stxa	%%g0, [%0] %1\n\t"					     "membar	#Sync"					     : /* no outputs */					     : "r" (line),					       "i" (ASI_DCACHE_DATA));	}}/* Conversion tables used to frob Cheetah AFSR syndrome values into * something palatable to the memory controller driver get_unumber * routine. */#define MT0	137#define MT1	138#define MT2	139#define NONE	254#define MTC0	140#define MTC1	141#define MTC2	142#define MTC3	143#define C0	128#define C1	129#define C2	130#define C3	131#define C4	132#define C5	133#define C6	134#define C7	135#define C8	136#define M2	144#define M3	145#define M4	146#define M	147static unsigned char cheetah_ecc_syntab[] = {/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M};static unsigned char cheetah_mtag_syntab[] = {       NONE, MTC0,       MTC1, NONE,       MTC2, NONE,       NONE, MT0,       MTC3, NONE,       NONE, MT1,       NONE, MT2,       NONE, NONE};/* Return the highest priority error conditon mentioned. */static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr){	unsigned long tmp = 0;	int i;	for (i = 0; cheetah_error_table[i].mask; i++) {		if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)			return tmp;	}	return tmp;}static const char *cheetah_get_string(unsigned long bit){	int i;	for (i = 0; cheetah_error_table[i].mask; i++) {		if ((bit & cheetah_error_table[i].mask) != 0UL)			return cheetah_error_table[i].name;	}	return "???";}extern int chmc_getunumber(int, unsigned long, char *, int);static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,			       unsigned long afsr, unsigned long afar, int recoverable){	unsigned long hipri;	char unum[256];	printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),	       afsr, afar,	       (afsr & CHAFSR_TL1) ? 1 : 0);	printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),	       regs->tpc, regs->tnpc, regs->tstate);	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,	       (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,	       (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",	       (afsr & CHAFSR_PRIV) ? ", Privileged" : "");	hipri = cheetah_get_hipri(afsr);	printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),	       hipri, cheetah_get_string(hipri));	/* Try to get unumber if relevant. */#define ESYND_ERRORS	(CHAFSR_IVC | CHAFSR_IVU | \			 CHAFSR_CPC | CHAFSR_CPU | \			 CHAFSR_UE  | CHAFSR_CE  | \			 CHAFSR_EDC | CHAFSR_EDU  | \			 CHAFSR_UCC | CHAFSR_UCU  | \			 CHAFSR_WDU | CHAFSR_WDC)#define MSYND_ERRORS	(CHAFSR_EMC | CHAFSR_EMU)	if (afsr & ESYND_ERRORS) {		int syndrome;		int ret;		syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;		syndrome = cheetah_ecc_syntab[syndrome];		ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));		if (ret != -1)			printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",			       (recoverable ? KERN_WARNING : KERN_CRIT),			       smp_processor_id(), unum);	} else if (afsr & MSYND_ERRORS) {		int syndrome;		int ret;		syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;		syndrome = cheetah_mtag_syntab[syndrome];		ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));		if (ret != -1)			printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",			       (recoverable ? KERN_WARNING : KERN_CRIT),			       smp_processor_id(), unum);	}	/* Now dump the cache snapshots. */	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),	       (int) info->dcache_index,	       info->dcache_tag,	       info->dcache_utag,	       info->dcache_stag);	printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),	       info->dcache_data[0],	       info->dcache_data[1],	       info->dcache_data[2],	       info->dcache_data[3]);	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "	       "u[%016lx] l[%016lx]\n",	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),	       (int) info->icache_index,	       info->icache_tag,	       info->icache_utag,	       info->icache_stag,	       info->icache_upper,	       info->icache_lower);	printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),	       info->icache_data[0],	       info->icache_data[1],	       info->icache_data[2],	       info->icache_data[3]);	printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),	       info->icache_data[4],	       info->icache_data[5],	       info->icache_data[6],	       info->icache_data[7]);	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),	       (int) info->ecache_index, info->ecache_tag);	printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),	       info->ecache_data[0],	       info->ecache_data[1],	       info->ecache_data[2],	       info->ecache_data[3]);	afsr = (afsr & ~hipri) & cheetah_afsr_errors;	while (afsr != 0UL) {		unsigned long bit = cheetah_get_hipri(afsr);		printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",		       (recoverable ? KERN_WARNING : KERN_CRIT),		       bit, cheetah_get_string(bit));		afsr &= ~bit;	}	if (!recoverable)		printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");}static int cheetah_recheck_errors(struct cheetah_err_info *logp){	unsigned long afsr, afar;	int ret = 0;	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"			     : "=r" (afsr)			     : "i" (ASI_AFSR));	if ((afsr & cheetah_afsr_errors) != 0) {		if (logp != NULL) {			__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"					     : "=r" (afar)					     : "i" (ASI_AFAR));			logp->afsr = afsr;			logp->afar = afar;		}		ret = 1;	}	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"			     "membar #Sync\n\t"			     : : "r" (afsr), "i" (ASI_AFSR));	return ret;}void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar){	struct cheetah_err_info local_snapshot, *p;	int recoverable;	/* Flush E-cache */	cheetah_flush_ecache();	p = cheetah_get_error_log(afsr);	if (!p) {		prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",			    afsr, afar);		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);		prom_halt();	}	/* Grab snapshot of logged error. */	memcpy(&local_snapshot, p, sizeof(local_snapshot));	/* If the current trap snapshot does not match what the	 * trap handler passed along into our args, big trouble.	 * In such a case, mark the local copy as invalid.	 *	 * Else, it matches and we mark the afsr in the non-local	 * copy as invalid so we may log new error traps there.	 */	if (p->afsr != afsr || p->afar != afar)		local_snapshot.afsr = CHAFSR_INVALID;	else		p->afsr = CHAFSR_INVALID;	cheetah_flush_icache();	cheetah_flush_dcache();	/* Re-enable I-cache/D-cache */	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"			     "or %%g1, %1, %%g1\n\t"			     "stxa %%g1, [%%g0] %0\n\t"			     "membar #Sync"			     : /* no outputs */			     : "i" (ASI_DCU_CONTROL_REG),			       "i" (DCU_DC | DCU_IC)			     : "g1");	/* Re-enable error reporting */	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"			     "or %%g1, %1, %%g1\n\t"			     "stxa %%g1, [%%g0] %0\n\t"			     "membar #Sync"			     : /* no outputs */			     : "i" (ASI_ESTATE_ERROR_EN),			       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)			     : "g1");	/* Decide if we can continue after handling this trap and	 * logging the error.	 */	recoverable = 1;	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))		recoverable = 0;	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new	 * error was logged while we had error reporting traps disabled.	 */	if (cheetah_recheck_errors(&local_snapshot)) {		unsigned long new_afsr = local_snapshot.afsr;		/* If we got a new asynchronous error, die... */		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |				CHAFSR_WDU | CHAFSR_CPU |				CHAFSR_IVU | CHAFSR_UE |				CHAFSR_BERR | CHAFSR_TO))			recoverable = 0;	}	/* Log errors. */	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);	if (!recoverable)		panic("Irrecoverable Fast-ECC error trap.\n");	/* Flush E-cache to kick the error trap handlers out. */	cheetah_flush_ecache();}/* Try to fix a correctable error by pushing the line out from * the E-cache.  Recheck error reporting registers to see if the * problem is intermittent. */static int cheetah_fix_ce(unsigned long physaddr){	unsigned long orig_estate;	unsigned long alias1, alias2;	int ret;	/* Make sure correctable error traps are disabled. */	__asm__ __volatile__("ldxa	[%%g0] %2, %0\n\t"			     "andn	%0, %1, %%g1\n\t"			     "stxa	%%g1, [%%g0] %2\n\t"			     "membar	#Sync"			     : "=&r" (orig_estate)			     : "i" (ESTATE_ERROR_CEEN),			       "i" (ASI_ESTATE_ERROR_EN)			     : "g1");	/* We calculate alias addresses that will force the	 * cache line in question out of the E-cache.  Then	 * we bring it back in with an atomic instruction so	 * that we get it in some modified/exclusive state,	 * then we displace it again to try and get proper ECC	 * pushed back into the system.	 */	physaddr &= ~(8UL - 1UL);	alias1 = (ecache_flush_physbase +		  (physaddr & ((ecache_flush_size >> 1) - 1)));	alias2 = alias1 + (ecache_flush_size >> 1);	__asm__ __volatile__("ldxa	[%0] %3, %%g0\n\t"			     "ldxa	[%1] %3, %%g0\n\t"			     "casxa	[%2] %3, %%g0, %%g0\n\t"			     "membar	#StoreLoad | #StoreStore\n\t"			     "ldxa	[%0] %3, %%g0\n\t"			     "ldxa	[%1] %3, %%g0\n\t"			     "membar	#Sync"			     : /* no outputs */			     : "r" (alias1), "r" (alias2),

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -