⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mca.c

📁 h内核
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * File:	mca.c * Purpose:	Generic MCA handling layer * * Updated for latest kernel * Copyright (C) 2003 Hewlett-Packard Co *	David Mosberger-Tang <davidm@hpl.hp.com> * * Copyright (C) 2002 Dell Inc. * Copyright (C) Matt Domsch (Matt_Domsch@dell.com) * * Copyright (C) 2002 Intel * Copyright (C) Jenna Hall (jenna.s.hall@intel.com) * * Copyright (C) 2001 Intel * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com) * * Copyright (C) 2000 Intel * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com) * * Copyright (C) 1999, 2004 Silicon Graphics, Inc. * Copyright (C) Vijay Chander(vijay@engr.sgi.com) * * 03/04/15 D. Mosberger Added INIT backtrace support. * 02/03/25 M. Domsch	GUID cleanups * * 02/01/04 J. Hall	Aligned MCA stack to 16 bytes, added platform vs. CPU *			error flag, set SAL default return values, changed *			error record structure to linked list, added init call *			to sal_get_state_info_size(). * * 01/01/03 F. Lewis    Added setup of CMCI and CPEI IRQs, logging of corrected *                      platform errors, completed code for logging of *                      corrected & uncorrected machine check errors, and *                      updated for conformance with Nov. 2000 revision of the *                      SAL 3.0 spec. * 00/03/29 C. Fleckenstein  Fixed PAL/SAL update issues, began MCA bug fixes, logging issues, *                           added min save state dump, added INIT handler. * * 2003-12-08 Keith Owens <kaos@sgi.com> *            smp_call_function() must not be called from interrupt context (can *            deadlock on tasklist_lock).  Use keventd to call smp_call_function(). * * 2004-02-01 Keith Owens <kaos@sgi.com> *            Avoid deadlock when using printk() for MCA and INIT records. *            Delete all record printing code, moved to salinfo_decode in user space. *            Mark variables and functions static where possible. *            Delete dead variables and functions. *            Reorder to remove the need for forward declarations and to consolidate *            related code. */#include <linux/config.h>#include <linux/types.h>#include <linux/init.h>#include <linux/sched.h>#include <linux/interrupt.h>#include <linux/irq.h>#include <linux/kallsyms.h>#include <linux/smp_lock.h>#include <linux/bootmem.h>#include <linux/acpi.h>#include <linux/timer.h>#include <linux/module.h>#include <linux/kernel.h>#include <linux/smp.h>#include <linux/workqueue.h>#include <asm/delay.h>#include <asm/machvec.h>#include <asm/meminit.h>#include <asm/page.h>#include <asm/ptrace.h>#include <asm/system.h>#include <asm/sal.h>#include <asm/mca.h>#include <asm/irq.h>#include <asm/hw_irq.h>#if defined(IA64_MCA_DEBUG_INFO)# define IA64_MCA_DEBUG(fmt...)	printk(fmt)#else# define IA64_MCA_DEBUG(fmt...)#endif/* Used by mca_asm.S */ia64_mca_sal_to_os_state_t	ia64_sal_to_os_handoff_state;ia64_mca_os_to_sal_state_t	ia64_os_to_sal_handoff_state;u64				ia64_mca_serialize;DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */DEFINE_PER_CPU(u64, ia64_mca_pal_pte);	    /* PTE to map PAL code */DEFINE_PER_CPU(u64, ia64_mca_pal_base);    /* vaddr PAL code granule */unsigned long __per_cpu_mca[NR_CPUS];/* In mca_asm.S */extern void			ia64_monarch_init_handler (void);extern void			ia64_slave_init_handler (void);static ia64_mc_info_t		ia64_mc_info;#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */#define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */#define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */#define CPE_HISTORY_LENGTH    5#define CMC_HISTORY_LENGTH    5static struct timer_list cpe_poll_timer;static struct timer_list cmc_poll_timer;/* * This variable tells whether we are currently in polling mode. * Start with this in the wrong state so we won't play w/ timers * before the system is ready. */static int cmc_polling_enabled = 1;/* * Clearing this variable prevents CPE polling from getting activated * in mca_late_init.  Use it if your system doesn't provide a CPEI, * but encounters problems retrieving CPE logs.  This should only be * necessary for debugging. */static int cpe_poll_enabled = 1;extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);static int mca_init;/* * IA64_MCA log support */#define IA64_MAX_LOGS		2	/* Double-buffering for nested MCAs */#define IA64_MAX_LOG_TYPES      4   /* MCA, INIT, CMC, CPE */typedef struct ia64_state_log_s{	spinlock_t	isl_lock;	int		isl_index;	unsigned long	isl_count;	ia64_err_rec_t  *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */} ia64_state_log_t;static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];#define IA64_LOG_ALLOCATE(it, size) \	{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \		(ia64_err_rec_t *)alloc_bootmem(size); \	ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \		(ia64_err_rec_t *)alloc_bootmem(size);}#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)#define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)#define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)#define IA64_LOG_NEXT_INDEX(it)    ia64_state_log[it].isl_index#define IA64_LOG_CURR_INDEX(it)    1 - ia64_state_log[it].isl_index#define IA64_LOG_INDEX_INC(it) \    {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \    ia64_state_log[it].isl_count++;}#define IA64_LOG_INDEX_DEC(it) \    ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index#define IA64_LOG_NEXT_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))#define IA64_LOG_CURR_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))#define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count/* * ia64_log_init *	Reset the OS ia64 log buffer * Inputs   :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) * Outputs	:	None */static voidia64_log_init(int sal_info_type){	u64	max_size = 0;	IA64_LOG_NEXT_INDEX(sal_info_type) = 0;	IA64_LOG_LOCK_INIT(sal_info_type);	// SAL will tell us the maximum size of any error record of this type	max_size = ia64_sal_get_state_info_size(sal_info_type);	if (!max_size)		/* alloc_bootmem() doesn't like zero-sized allocations! */		return;	// set up OS data structures to hold error info	IA64_LOG_ALLOCATE(sal_info_type, max_size);	memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);	memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);}/* * ia64_log_get * *	Get the current MCA log from SAL and copy it into the OS log buffer. * *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) *              irq_safe    whether you can use printk at this point *  Outputs :   size        (total record length) *              *buffer     (ptr to error record) * */static u64ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe){	sal_log_record_header_t     *log_buffer;	u64                         total_len = 0;	int                         s;	IA64_LOG_LOCK(sal_info_type);	/* Get the process state information */	log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);	total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);	if (total_len) {		IA64_LOG_INDEX_INC(sal_info_type);		IA64_LOG_UNLOCK(sal_info_type);		if (irq_safe) {			IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "				       "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len);		}		*buffer = (u8 *) log_buffer;		return total_len;	} else {		IA64_LOG_UNLOCK(sal_info_type);		return 0;	}}/* *  ia64_mca_log_sal_error_record * *  This function retrieves a specified error record type from SAL *  and wakes up any processes waiting for error records. * *  Inputs  :   sal_info_type   (Type of error record MCA/CMC/CPE/INIT) */static voidia64_mca_log_sal_error_record(int sal_info_type){	u8 *buffer;	sal_log_record_header_t *rh;	u64 size;	int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT;#ifdef IA64_MCA_DEBUG_INFO	static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };#endif	size = ia64_log_get(sal_info_type, &buffer, irq_safe);	if (!size)		return;	salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);	if (irq_safe)		IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",			smp_processor_id(),			sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");	/* Clear logs from corrected errors in case there's no user-level logger */	rh = (sal_log_record_header_t *)buffer;	if (rh->severity == sal_log_severity_corrected)		ia64_sal_clear_state_info(sal_info_type);}/* * platform dependent error handling */#ifndef PLATFORM_MCA_HANDLERS#ifdef CONFIG_ACPIstatic int cpe_vector = -1;static irqreturn_tia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs){	static unsigned long	cpe_history[CPE_HISTORY_LENGTH];	static int		index;	static DEFINE_SPINLOCK(cpe_history_lock);	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",		       __FUNCTION__, cpe_irq, smp_processor_id());	/* SAL spec states this should run w/ interrupts enabled */	local_irq_enable();	/* Get the CPE error record and log it */	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);	spin_lock(&cpe_history_lock);	if (!cpe_poll_enabled && cpe_vector >= 0) {		int i, count = 1; /* we know 1 happened now */		unsigned long now = jiffies;		for (i = 0; i < CPE_HISTORY_LENGTH; i++) {			if (now - cpe_history[i] <= HZ)				count++;		}		IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);		if (count >= CPE_HISTORY_LENGTH) {			cpe_poll_enabled = 1;			spin_unlock(&cpe_history_lock);			disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));			/*			 * Corrected errors will still be corrected, but			 * make sure there's a log somewhere that indicates			 * something is generating more than we can handle.			 */			printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");			mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);			/* lock already released, get out now */			return IRQ_HANDLED;		} else {			cpe_history[index++] = now;			if (index == CPE_HISTORY_LENGTH)				index = 0;		}	}	spin_unlock(&cpe_history_lock);	return IRQ_HANDLED;}#endif /* CONFIG_ACPI */static voidshow_min_state (pal_min_state_area_t *minstate){	u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri;	u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri;	printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);	printk("pr\t\t%016lx\n", minstate->pmsa_pr);	printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0);	printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);	printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);	printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);	printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);	printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);	printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);	printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);	printk("b1\t\t%016lx ", minstate->pmsa_br1);	print_symbol("%s\n", minstate->pmsa_br1);	printk("\nstatic registers r0-r15:\n");	printk(" r0- 3 %016lx %016lx %016lx %016lx\n",	       0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]);	printk(" r4- 7 %016lx %016lx %016lx %016lx\n",	       minstate->pmsa_gr[3], minstate->pmsa_gr[4],	       minstate->pmsa_gr[5], minstate->pmsa_gr[6]);	printk(" r8-11 %016lx %016lx %016lx %016lx\n",	       minstate->pmsa_gr[7], minstate->pmsa_gr[8],	       minstate->pmsa_gr[9], minstate->pmsa_gr[10]);	printk("r12-15 %016lx %016lx %016lx %016lx\n",	       minstate->pmsa_gr[11], minstate->pmsa_gr[12],	       minstate->pmsa_gr[13], minstate->pmsa_gr[14]);	printk("\nbank 0:\n");	printk("r16-19 %016lx %016lx %016lx %016lx\n",	       minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],	       minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);	printk("r20-23 %016lx %016lx %016lx %016lx\n",	       minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],	       minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);	printk("r24-27 %016lx %016lx %016lx %016lx\n",	       minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],	       minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);	printk("r28-31 %016lx %016lx %016lx %016lx\n",	       minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],	       minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);	printk("\nbank 1:\n");	printk("r16-19 %016lx %016lx %016lx %016lx\n",	       minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],	       minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);	printk("r20-23 %016lx %016lx %016lx %016lx\n",	       minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],	       minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);	printk("r24-27 %016lx %016lx %016lx %016lx\n",	       minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],	       minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);	printk("r28-31 %016lx %016lx %016lx %016lx\n",	       minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],	       minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);}static voidfetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw){	u64 *dst_banked, *src_banked, bit, shift, nat_bits;	int i;	/*	 * First, update the pt-regs and switch-stack structures with the contents stored	 * in the min-state area:	 */	if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {		pt->cr_ipsr = ms->pmsa_xpsr;		pt->cr_iip = ms->pmsa_xip;		pt->cr_ifs = ms->pmsa_xfs;	} else {		pt->cr_ipsr = ms->pmsa_ipsr;		pt->cr_iip = ms->pmsa_iip;		pt->cr_ifs = ms->pmsa_ifs;	}	pt->ar_rsc = ms->pmsa_rsc;	pt->pr = ms->pmsa_pr;	pt->r1 = ms->pmsa_gr[0];	pt->r2 = ms->pmsa_gr[1];	pt->r3 = ms->pmsa_gr[2];	sw->r4 = ms->pmsa_gr[3];	sw->r5 = ms->pmsa_gr[4];	sw->r6 = ms->pmsa_gr[5];	sw->r7 = ms->pmsa_gr[6];	pt->r8 = ms->pmsa_gr[7];	pt->r9 = ms->pmsa_gr[8];	pt->r10 = ms->pmsa_gr[9];	pt->r11 = ms->pmsa_gr[10];	pt->r12 = ms->pmsa_gr[11];	pt->r13 = ms->pmsa_gr[12];	pt->r14 = ms->pmsa_gr[13];	pt->r15 = ms->pmsa_gr[14];	dst_banked = &pt->r16;		/* r16-r31 are contiguous in struct pt_regs */	src_banked = ms->pmsa_bank1_gr;	for (i = 0; i < 16; ++i)		dst_banked[i] = src_banked[i];	pt->b0 = ms->pmsa_br0;	sw->b1 = ms->pmsa_br1;	/* construct the NaT bits for the pt-regs structure: */#	define PUT_NAT_BIT(dst, addr)					\	do {								\		bit = nat_bits & 1; nat_bits >>= 1;			\		shift = ((unsigned long) addr >> 3) & 0x3f;		\		dst = ((dst) & ~(1UL << shift)) | (bit << shift);	\	} while (0)	/* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */	shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;	nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift));	PUT_NAT_BIT(sw->caller_unat, &pt->r1);	PUT_NAT_BIT(sw->caller_unat, &pt->r2);	PUT_NAT_BIT(sw->caller_unat, &pt->r3);	PUT_NAT_BIT(sw->ar_unat, &sw->r4);	PUT_NAT_BIT(sw->ar_unat, &sw->r5);	PUT_NAT_BIT(sw->ar_unat, &sw->r6);	PUT_NAT_BIT(sw->ar_unat, &sw->r7);	PUT_NAT_BIT(sw->caller_unat, &pt->r8);	PUT_NAT_BIT(sw->caller_unat, &pt->r9);	PUT_NAT_BIT(sw->caller_unat, &pt->r10);	PUT_NAT_BIT(sw->caller_unat, &pt->r11);	PUT_NAT_BIT(sw->caller_unat, &pt->r12);	PUT_NAT_BIT(sw->caller_unat, &pt->r13);	PUT_NAT_BIT(sw->caller_unat, &pt->r14);	PUT_NAT_BIT(sw->caller_unat, &pt->r15);	nat_bits >>= 16;	/* skip over bank0 NaT bits */	PUT_NAT_BIT(sw->caller_unat, &pt->r16);	PUT_NAT_BIT(sw->caller_unat, &pt->r17);	PUT_NAT_BIT(sw->caller_unat, &pt->r18);	PUT_NAT_BIT(sw->caller_unat, &pt->r19);	PUT_NAT_BIT(sw->caller_unat, &pt->r20);	PUT_NAT_BIT(sw->caller_unat, &pt->r21);	PUT_NAT_BIT(sw->caller_unat, &pt->r22);	PUT_NAT_BIT(sw->caller_unat, &pt->r23);	PUT_NAT_BIT(sw->caller_unat, &pt->r24);	PUT_NAT_BIT(sw->caller_unat, &pt->r25);	PUT_NAT_BIT(sw->caller_unat, &pt->r26);	PUT_NAT_BIT(sw->caller_unat, &pt->r27);	PUT_NAT_BIT(sw->caller_unat, &pt->r28);	PUT_NAT_BIT(sw->caller_unat, &pt->r29);	PUT_NAT_BIT(sw->caller_unat, &pt->r30);	PUT_NAT_BIT(sw->caller_unat, &pt->r31);}static voidinit_handler_platform (pal_min_state_area_t *ms,		       struct pt_regs *pt, struct switch_stack *sw){	struct unw_frame_info info;	/* if a kernel debugger is available call it here else just dump the registers */	/*	 * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be	 * generated via the BMC's command-line interface, but since the console is on the	 * same serial line, the user will need some time to switch out of the BMC before	 * the dump begins.	 */	printk("Delaying for 5 seconds...\n");	udelay(5*1000000);	show_min_state(ms);	printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);	fetch_min_state(ms, pt, sw);	unw_init_from_interruption(&info, current, pt, sw);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -