⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mca.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat);	copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat);	copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat);	copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat);	copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat);	copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat);	copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat);	copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat);	copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat);	copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat);	copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat);	copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat);	copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat);	copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat);	/* Next stack a struct switch_stack.  mca_asm.S built a partial	 * switch_stack, copy it and fill in the blanks using pt_regs and	 * minstate.	 *	 * In the synthesized switch_stack, b0 points to ia64_leave_kernel,	 * ar.pfs is set to 0.	 *	 * unwind.c::unw_unwind() does special processing for interrupt frames.	 * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate	 * is clear then unw_unwind() does _not_ adjust bsp over pt_regs.  Not	 * that this is documented, of course.  Set PRED_NON_SYSCALL in the	 * switch_stack on the original stack so it will unwind correctly when	 * unwind.c reads pt_regs.	 *	 * thread.ksp is updated to point to the synthesized switch_stack.	 */	p -= sizeof(struct switch_stack);	old_sw = (struct switch_stack *)p;	memcpy(old_sw, sw, sizeof(*sw));	old_sw->caller_unat = old_unat;	old_sw->ar_fpsr = old_regs->ar_fpsr;	copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat);	copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat);	copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat);	copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat);	old_sw->b0 = (u64)ia64_leave_kernel;	old_sw->b1 = ms->pmsa_br1;	old_sw->ar_pfs = 0;	old_sw->ar_unat = old_unat;	old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL);	previous_current->thread.ksp = (u64)p - 16;	/* Finally copy the original stack's registers back to its RBS.	 * Registers from ar.bspstore through ar.bsp at the time of the event	 * are in the current RBS, copy them back to the original stack.  The	 * copy must be done register by register because the original bspstore	 * and the current one have different alignments, so the saved RNAT	 * data occurs at different places.	 *	 * mca_asm does cover, so the old_bsp already includes all registers at	 * the time of MCA/INIT.  It also does flushrs, so all registers before	 * this function have been written to backing store on the MCA/INIT	 * stack.	 */	new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore));	old_rnat = regs->ar_rnat;	while (slots--) {		if (ia64_rse_is_rnat_slot(new_bspstore)) {			new_rnat = ia64_get_rnat(new_bspstore++);		}		if (ia64_rse_is_rnat_slot(old_bspstore)) {			*old_bspstore++ = old_rnat;			old_rnat = 0;		}		nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL;		old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore));		old_rnat |= (nat << ia64_rse_slot_num(old_bspstore));		*old_bspstore++ = *new_bspstore++;	}	old_sw->ar_bspstore = (unsigned long)old_bspstore;	old_sw->ar_rnat = old_rnat;	sos->prev_task = previous_current;	return previous_current;no_mod:	printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",			smp_processor_id(), type, msg);	return previous_current;}/* The monarch/slave interaction is based on monarch_cpu and requires that all * slaves have entered rendezvous before the monarch leaves.  If any cpu has * not entered rendezvous yet then wait a bit.  The assumption is that any * slave that has not rendezvoused after a reasonable time is never going to do * so.  In this context, slave includes cpus that respond to the MCA rendezvous * interrupt, as well as cpus that receive the INIT slave event. */static voidia64_wait_for_slaves(int monarch, const char *type){	int c, i , wait;	/*	 * wait 5 seconds total for slaves (arbitrary)	 */	for (i = 0; i < 5000; i++) {		wait = 0;		for_each_online_cpu(c) {			if (c == monarch)				continue;			if (ia64_mc_info.imi_rendez_checkin[c]					== IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {				udelay(1000);		/* short wait */				wait = 1;				break;			}		}		if (!wait)			goto all_in;	}	/*	 * Maybe slave(s) dead. Print buffered messages immediately.	 */	ia64_mlogbuf_finish(0);	mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);	for_each_online_cpu(c) {		if (c == monarch)			continue;		if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)			mprintk(" %d", c);	}	mprintk("\n");	return;all_in:	mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);	return;}/* * ia64_mca_handler * *	This is uncorrectable machine check handler called from OS_MCA *	dispatch code which is in turn called from SAL_CHECK(). *	This is the place where the core of OS MCA handling is done. *	Right now the logs are extracted and displayed in a well-defined *	format. This handler code is supposed to be run only on the *	monarch processor. Once the monarch is done with MCA handling *	further MCA logging is enabled by clearing logs. *	Monarch also has the duty of sending wakeup-IPIs to pull the *	slave processors out of rendezvous spinloop. * *	If multiple processors call into OS_MCA, the first will become *	the monarch.  Subsequent cpus will be recorded in the mca_cpu *	bitmask.  After the first monarch has processed its MCA, it *	will wake up the next cpu in the mca_cpu bitmask and then go *	into the rendezvous loop.  When all processors have serviced *	their MCA, the last monarch frees up the rest of the processors. */voidia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,		 struct ia64_sal_os_state *sos){	int recover, cpu = smp_processor_id();	struct task_struct *previous_current;	struct ia64_mca_notify_die nd =		{ .sos = sos, .monarch_cpu = &monarch_cpu };	static atomic_t mca_count;	static cpumask_t mca_cpu;	if (atomic_add_return(1, &mca_count) == 1) {		monarch_cpu = cpu;		sos->monarch = 1;	} else {		cpu_set(cpu, mca_cpu);		sos->monarch = 0;	}	mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "		"monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);	previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");	if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)			== NOTIFY_STOP)		ia64_mca_spin(__FUNCTION__);	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;	if (sos->monarch) {		ia64_wait_for_slaves(cpu, "MCA");		/* Wakeup all the processors which are spinning in the		 * rendezvous loop.  They will leave SAL, then spin in the OS		 * with interrupts disabled until this monarch cpu leaves the		 * MCA handler.  That gets control back to the OS so we can		 * backtrace the other cpus, backtrace when spinning in SAL		 * does not work.		 */		ia64_mca_wakeup_all();		if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)				== NOTIFY_STOP)			ia64_mca_spin(__FUNCTION__);	} else {		while (cpu_isset(cpu, mca_cpu))			cpu_relax();	/* spin until monarch wakes us */        }	/* Get the MCA error record and log it */	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);	/* MCA error recovery */	recover = (ia64_mca_ucmc_extension		&& ia64_mca_ucmc_extension(			IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),			sos));	if (recover) {		sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);		rh->severity = sal_log_severity_corrected;		ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);		sos->os_status = IA64_MCA_CORRECTED;	} else {		/* Dump buffered message to console */		ia64_mlogbuf_finish(1);#ifdef CONFIG_KEXEC		atomic_set(&kdump_in_progress, 1);		monarch_cpu = -1;#endif	}	if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)			== NOTIFY_STOP)		ia64_mca_spin(__FUNCTION__);	if (atomic_dec_return(&mca_count) > 0) {		int i;		/* wake up the next monarch cpu,		 * and put this cpu in the rendez loop.		 */		for_each_online_cpu(i) {			if (cpu_isset(i, mca_cpu)) {				monarch_cpu = i;				cpu_clear(i, mca_cpu);	/* wake next cpu */				while (monarch_cpu != -1)					cpu_relax();	/* spin until last cpu leaves */				set_curr_task(cpu, previous_current);				ia64_mc_info.imi_rendez_checkin[cpu]						= IA64_MCA_RENDEZ_CHECKIN_NOTDONE;				return;			}		}	}	set_curr_task(cpu, previous_current);	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;	monarch_cpu = -1;	/* This frees the slaves and previous monarchs */}static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);/* * ia64_mca_cmc_int_handler * *  This is corrected machine check interrupt handler. *	Right now the logs are extracted and displayed in a well-defined *	format. * * Inputs *      interrupt number *      client data arg ptr * * Outputs *	None */static irqreturn_tia64_mca_cmc_int_handler(int cmc_irq, void *arg){	static unsigned long	cmc_history[CMC_HISTORY_LENGTH];	static int		index;	static DEFINE_SPINLOCK(cmc_history_lock);	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",		       __FUNCTION__, cmc_irq, smp_processor_id());	/* SAL spec states this should run w/ interrupts enabled */	local_irq_enable();	spin_lock(&cmc_history_lock);	if (!cmc_polling_enabled) {		int i, count = 1; /* we know 1 happened now */		unsigned long now = jiffies;		for (i = 0; i < CMC_HISTORY_LENGTH; i++) {			if (now - cmc_history[i] <= HZ)				count++;		}		IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);		if (count >= CMC_HISTORY_LENGTH) {			cmc_polling_enabled = 1;			spin_unlock(&cmc_history_lock);			/* If we're being hit with CMC interrupts, we won't			 * ever execute the schedule_work() below.  Need to			 * disable CMC interrupts on this processor now.			 */			ia64_mca_cmc_vector_disable(NULL);			schedule_work(&cmc_disable_work);			/*			 * Corrected errors will still be corrected, but			 * make sure there's a log somewhere that indicates			 * something is generating more than we can handle.			 */			printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);			/* lock already released, get out now */			goto out;		} else {			cmc_history[index++] = now;			if (index == CMC_HISTORY_LENGTH)				index = 0;		}	}	spin_unlock(&cmc_history_lock);out:	/* Get the CMC error record and log it */	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);	return IRQ_HANDLED;}/* *  ia64_mca_cmc_int_caller * * 	Triggered by sw interrupt from CMC polling routine.  Calls * 	real interrupt handler and either triggers a sw interrupt * 	on the next cpu or does cleanup at the end. * * Inputs *	interrupt number *	client data arg ptr * Outputs * 	handled */static irqreturn_tia64_mca_cmc_int_caller(int cmc_irq, void *arg){	static int start_count = -1;	unsigned int cpuid;	cpuid = smp_processor_id();	/* If first cpu, update count */	if (start_count == -1)		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);	ia64_mca_cmc_int_handler(cmc_irq, arg);	for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);	if (cpuid < NR_CPUS) {		platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);	} else {		/* If no log record, switch out of polling mode */		if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {			printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");			schedule_work(&cmc_enable_work);			cmc_polling_enabled = 0;		} else {			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);		}		start_count = -1;	}	return IRQ_HANDLED;}/* *  ia64_mca_cmc_poll * *	Poll for Corrected Machine Checks (CMCs) * * Inputs   :   dummy(unused) * Outputs  :   None * */static voidia64_mca_cmc_poll (unsigned long dummy){	/* Trigger a CMC interrupt cascade  */	platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);}/* *  ia64_mca_cpe_int_caller * * 	Triggered by sw interrupt from CPE polling routine.  Calls * 	real interrupt handler and either triggers a sw interrupt * 	on the next cpu or does cleanup at the end. * * Inputs *	interrupt number *	client data arg ptr * Outputs * 	handled */#ifdef CONFIG_ACPIstatic irqreturn_tia64_mca_cpe_int_caller(int cpe_irq, void *arg){	static int start_count = -1;	static int poll_time = MIN_CPE_POLL_INTERVAL;	unsigned int cpuid;	cpuid = smp_processor_id();	/* If first cpu, update count */	if (start_count == -1)		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);	ia64_mca_cpe_int_handler(cpe_irq, arg);	for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);	if (cpuid < NR_CPUS) {		platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);	} else {		/*		 * If a log was recorded, increase our polling frequency,		 * otherwise, backoff or return to interrupt mode.		 */		if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {			poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);		} else if (cpe_vector < 0) {			poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);		} else {			poll_time = MIN_CPE_POLL_INTERVAL;			printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");			enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));			cpe_poll_enabled = 0;		}		if (cpe_poll_enabled)			mod_timer(&cpe_poll_timer, jiffies + poll_time);		start_count = -1;	}	return IRQ_HANDLED;}/* *  ia64_mca_cpe_poll * *	Poll for Corrected Platform Errors (CPEs), trigger interrupt *	on first cpu, from there it will trickle through all the cpus. * * Inputs   :   dummy(unused) * Outputs  :   None * */static voidia64_mca_cpe_poll (unsigned long dummy){	/* Trigger a CPE interrupt cascade  */	platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);}#endif /* CONFIG_ACPI */static intdefault_monarch_init_process(struct notifier_block *self, unsigned long val, void *data){	int c;	struct task_struct *g, *t;	if (val != DIE_INIT_MONARCH_PROCESS)		return NOTIFY_DONE;#ifdef CONFIG_KEXEC	if (atomic_read(&kdump_in_progress))		return NOTIFY_DONE;#endif	/*	 * FIXME: mlogbuf will brim over with INIT stack dumps.	 * To enable show_stack from INIT, we use oops_in_progress which should	 * be used in real oops. This would cause something wrong after INIT.	 */	BREAK_LOGLEVEL(console_loglevel);	ia64_mlogbuf_dump_from_init();	printk(KERN_ERR "Processes interrupted by INIT -");	for_each_online_cpu(c) {		struct ia64_sal_os_state *s;		t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);		s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);		g = s->prev_task;		if (g) {			if (g->pid)				printk(" %d", g->pid);			else				printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);		}	}	printk("\n\n");	if (read_trylock(&tasklist_lock)) {		do_each_thread (g, t) {			printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);			show_stack(t, NULL);		} while_each_thread (g, t);		read_unlock(&tasklist_lock);	}	/* FIXME: This will not restore zapped printk locks. */	RESTORE_LOGLEVEL(console_loglevel);	return NOTIFY_DONE;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -