⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smtc.c

📁 LINUX 2.6.17.4的源码
💻 C
📖 第 1 页 / 共 3 页
字号:
				cpu++;			}			printk(" %d", tc);			tc++;		}		if (slop) {			if (tc != 0) {				smtc_tc_setup(vpe,tc, cpu);				cpu++;			}			printk(" %d", tc);			tc++;			slop--;		}		if (vpe != 0) {			/*			 * Clear any stale software interrupts from VPE's Cause			 */			write_vpe_c0_cause(0);			/*			 * Clear ERL/EXL of VPEs other than 0			 * and set restricted interrupt enable/mask.			 */			write_vpe_c0_status((read_vpe_c0_status()				& ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))				| (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7				| ST0_IE));			/*			 * set config to be the same as vpe0,			 *  particularly kseg0 coherency alg			 */			write_vpe_c0_config(read_c0_config());			/* Clear any pending timer interrupt */			write_vpe_c0_compare(0);			/* Propagate Config7 */			write_vpe_c0_config7(read_c0_config7());		}		/* enable multi-threading within VPE */		write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);		/* enable the VPE */		write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);	}	/*	 * Pull any physically present but unused TCs out of circulation.	 */	while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {		cpu_clear(tc, phys_cpu_present_map);		cpu_clear(tc, cpu_present_map);		tc++;	}	/* release config state */	write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );	printk("\n");	/* Set up coprocessor affinity CPU mask(s) */	for (tc = 0; tc < ntc; tc++) {		if(cpu_data[tc].options & MIPS_CPU_FPU)			cpu_set(tc, mt_fpu_cpumask);	}	/* set up ipi interrupts... */	/* If we have multiple VPEs running, set up the cross-VPE interrupt */	if (nvpe > 1)		setup_cross_vpe_interrupts();	/* Set up queue of free IPI "messages". */	nipi = NR_CPUS * IPIBUF_PER_CPU;	if (ipibuffers > 0)		nipi = ipibuffers;	pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);	if (pipi == NULL)		panic("kmalloc of IPI message buffers failed\n");	else		printk("IPI buffer pool of %d buffers\n", nipi);	for (i = 0; i < nipi; i++) {		smtc_ipi_nq(&freeIPIq, pipi);		pipi++;	}	/* Arm multithreading and enable other VPEs - but all TCs are Halted */	emt(EMT_ENABLE);	evpe(EVPE_ENABLE);	local_irq_restore(flags);	/* Initialize SMTC /proc statistics/diagnostics */	init_smtc_stats();}/* * Setup the PC, SP, and GP of a secondary processor and start it * running! * smp_bootstrap is the place to resume from * __KSTK_TOS(idle) is apparently the stack pointer * (unsigned long)idle->thread_info the gp * */void smtc_boot_secondary(int cpu, struct task_struct *idle){	extern u32 kernelsp[NR_CPUS];	long flags;	int mtflags;	LOCK_MT_PRA();	if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {		dvpe();	}	settc(cpu_data[cpu].tc_id);	/* pc */	write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);	/* stack pointer */	kernelsp[cpu] = __KSTK_TOS(idle);	write_tc_gpr_sp(__KSTK_TOS(idle));	/* global pointer */	write_tc_gpr_gp((unsigned long)idle->thread_info);	smtc_status |= SMTC_MTC_ACTIVE;	write_tc_c0_tchalt(0);	if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {		evpe(EVPE_ENABLE);	}	UNLOCK_MT_PRA();}void smtc_init_secondary(void){	/*	 * Start timer on secondary VPEs if necessary.	 * mips_timer_setup should already have been invoked by init/main	 * on "boot" TC.  Like per_cpu_trap_init() hack, this assumes that	 * SMTC init code assigns TCs consdecutively and in ascending order	 * to across available VPEs.	 */	if(((read_c0_tcbind() & TCBIND_CURTC) != 0)	&& ((read_c0_tcbind() & TCBIND_CURVPE)	    != cpu_data[smp_processor_id() - 1].vpe_id)){		write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);	}	local_irq_enable();}void smtc_smp_finish(void){	printk("TC %d going on-line as CPU %d\n",		cpu_data[smp_processor_id()].tc_id, smp_processor_id());}void smtc_cpus_done(void){}/* * Support for SMTC-optimized driver IRQ registration *//* * SMTC Kernel needs to manipulate low-level CPU interrupt mask * in do_IRQ. These are passed in setup_irq_smtc() and stored * in this table. */int setup_irq_smtc(unsigned int irq, struct irqaction * new,			unsigned long hwmask){	irq_hwmask[irq] = hwmask;	return setup_irq(irq, new);}/* * IPI model for SMTC is tricky, because interrupts aren't TC-specific. * Within a VPE one TC can interrupt another by different approaches. * The easiest to get right would probably be to make all TCs except * the target IXMT and set a software interrupt, but an IXMT-based * scheme requires that a handler must run before a new IPI could * be sent, which would break the "broadcast" loops in MIPS MT. * A more gonzo approach within a VPE is to halt the TC, extract * its Restart, Status, and a couple of GPRs, and program the Restart * address to emulate an interrupt. * * Within a VPE, one can be confident that the target TC isn't in * a critical EXL state when halted, since the write to the Halt * register could not have issued on the writing thread if the * halting thread had EXL set. So k0 and k1 of the target TC * can be used by the injection code.  Across VPEs, one can't * be certain that the target TC isn't in a critical exception * state. So we try a two-step process of sending a software * interrupt to the target VPE, which either handles the event * itself (if it was the target) or injects the event within * the VPE. */void smtc_ipi_qdump(void){	int i;	for (i = 0; i < NR_CPUS ;i++) {		printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",			i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,			IPIQ[i].depth);	}}/* * The standard atomic.h primitives don't quite do what we want * here: We need an atomic add-and-return-previous-value (which * could be done with atomic_add_return and a decrement) and an * atomic set/zero-and-return-previous-value (which can't really * be done with the atomic.h primitives). And since this is * MIPS MT, we can assume that we have LL/SC. */static __inline__ int atomic_postincrement(unsigned int *pv){	unsigned long result;	unsigned long temp;	__asm__ __volatile__(	"1:	ll	%0, %2					\n"	"	addu	%1, %0, 1				\n"	"	sc	%1, %2					\n"	"	beqz	%1, 1b					\n"	"	sync						\n"	: "=&r" (result), "=&r" (temp), "=m" (*pv)	: "m" (*pv)	: "memory");	return result;}/* No longer used in IPI dispatch, but retained for future recycling */static __inline__ int atomic_postclear(unsigned int *pv){	unsigned long result;	unsigned long temp;	__asm__ __volatile__(	"1:	ll	%0, %2					\n"	"	or	%1, $0, $0				\n"	"	sc	%1, %2					\n"	"	beqz	%1, 1b					\n"	"	sync						\n"	: "=&r" (result), "=&r" (temp), "=m" (*pv)	: "m" (*pv)	: "memory");	return result;}void smtc_send_ipi(int cpu, int type, unsigned int action){	int tcstatus;	struct smtc_ipi *pipi;	long flags;	int mtflags;	if (cpu == smp_processor_id()) {		printk("Cannot Send IPI to self!\n");		return;	}	/* Set up a descriptor, to be delivered either promptly or queued */	pipi = smtc_ipi_dq(&freeIPIq);	if (pipi == NULL) {		bust_spinlocks(1);		mips_mt_regdump(dvpe());		panic("IPI Msg. Buffers Depleted\n");	}	pipi->type = type;	pipi->arg = (void *)action;	pipi->dest = cpu;	if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {		/* If not on same VPE, enqueue and send cross-VPE interupt */		smtc_ipi_nq(&IPIQ[cpu], pipi);		LOCK_CORE_PRA();		settc(cpu_data[cpu].tc_id);		write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);		UNLOCK_CORE_PRA();	} else {		/*		 * Not sufficient to do a LOCK_MT_PRA (dmt) here,		 * since ASID shootdown on the other VPE may		 * collide with this operation.		 */		LOCK_CORE_PRA();		settc(cpu_data[cpu].tc_id);		/* Halt the targeted TC */		write_tc_c0_tchalt(TCHALT_H);		mips_ihb();		/*	 	 * Inspect TCStatus - if IXMT is set, we have to queue		 * a message. Otherwise, we set up the "interrupt"		 * of the other TC	 	 */		tcstatus = read_tc_c0_tcstatus();		if ((tcstatus & TCSTATUS_IXMT) != 0) {			/*			 * Spin-waiting here can deadlock,			 * so we queue the message for the target TC.			 */			write_tc_c0_tchalt(0);			UNLOCK_CORE_PRA();			/* Try to reduce redundant timer interrupt messages */			if(type == SMTC_CLOCK_TICK) {			    if(atomic_postincrement(&ipi_timer_latch[cpu])!=0) {				smtc_ipi_nq(&freeIPIq, pipi);				return;			    }			}			smtc_ipi_nq(&IPIQ[cpu], pipi);		} else {			post_direct_ipi(cpu, pipi);			write_tc_c0_tchalt(0);			UNLOCK_CORE_PRA();		}	}}/* * Send IPI message to Halted TC, TargTC/TargVPE already having been set */void post_direct_ipi(int cpu, struct smtc_ipi *pipi){	struct pt_regs *kstack;	unsigned long tcstatus;	unsigned long tcrestart;	extern u32 kernelsp[NR_CPUS];	extern void __smtc_ipi_vector(void);	/* Extract Status, EPC from halted TC */	tcstatus = read_tc_c0_tcstatus();	tcrestart = read_tc_c0_tcrestart();	/* If TCRestart indicates a WAIT instruction, advance the PC */	if ((tcrestart & 0x80000000)	    && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {		tcrestart += 4;	}	/*	 * Save on TC's future kernel stack	 *	 * CU bit of Status is indicator that TC was	 * already running on a kernel stack...	 */	if(tcstatus & ST0_CU0)  {		/* Note that this "- 1" is pointer arithmetic */		kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;	} else {		kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;	}	kstack->cp0_epc = (long)tcrestart;	/* Save TCStatus */	kstack->cp0_tcstatus = tcstatus;	/* Pass token of operation to be performed kernel stack pad area */	kstack->pad0[4] = (unsigned long)pipi;	/* Pass address of function to be called likewise */	kstack->pad0[5] = (unsigned long)&ipi_decode;	/* Set interrupt exempt and kernel mode */	tcstatus |= TCSTATUS_IXMT;	tcstatus &= ~TCSTATUS_TKSU;	write_tc_c0_tcstatus(tcstatus);	ehb();	/* Set TC Restart address to be SMTC IPI vector */	write_tc_c0_tcrestart(__smtc_ipi_vector);}void ipi_resched_interrupt(struct pt_regs *regs){	/* Return from interrupt should be enough to cause scheduler check */}void ipi_call_interrupt(struct pt_regs *regs){	/* Invoke generic function invocation code in smp.c */	smp_call_function_interrupt();}void ipi_decode(struct pt_regs *regs, struct smtc_ipi *pipi){	void *arg_copy = pipi->arg;	int type_copy = pipi->type;	int dest_copy = pipi->dest;	smtc_ipi_nq(&freeIPIq, pipi);	switch (type_copy) {		case SMTC_CLOCK_TICK:			/* Invoke Clock "Interrupt" */			ipi_timer_latch[dest_copy] = 0;#ifdef SMTC_IDLE_HOOK_DEBUG			clock_hang_reported[dest_copy] = 0;#endif /* SMTC_IDLE_HOOK_DEBUG */			local_timer_interrupt(0, NULL, regs);			break;		case LINUX_SMP_IPI:			switch ((int)arg_copy) {			case SMP_RESCHEDULE_YOURSELF:				ipi_resched_interrupt(regs);				break;			case SMP_CALL_FUNCTION:				ipi_call_interrupt(regs);				break;			default:				printk("Impossible SMTC IPI Argument 0x%x\n",					(int)arg_copy);				break;			}			break;		default:			printk("Impossible SMTC IPI Type 0x%x\n", type_copy);			break;	}}void deferred_smtc_ipi(struct pt_regs *regs){	struct smtc_ipi *pipi;	unsigned long flags;/* DEBUG */	int q = smp_processor_id();	/*	 * Test is not atomic, but much faster than a dequeue,	 * and the vast majority of invocations will have a null queue.	 */	if(IPIQ[q].head != NULL) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -