⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smtc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
			/*			 * Clear ERL/EXL of VPEs other than 0			 * and set restricted interrupt enable/mask.			 */			write_vpe_c0_status((read_vpe_c0_status()				& ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))				| (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7				| ST0_IE));			/*			 * set config to be the same as vpe0,			 *  particularly kseg0 coherency alg			 */			write_vpe_c0_config(read_c0_config());			/* Clear any pending timer interrupt */			write_vpe_c0_compare(0);			/* Propagate Config7 */			write_vpe_c0_config7(read_c0_config7());			write_vpe_c0_count(read_c0_count());		}		/* enable multi-threading within VPE */		write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);		/* enable the VPE */		write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);	}	/*	 * Pull any physically present but unused TCs out of circulation.	 */	while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {		cpu_clear(tc, phys_cpu_present_map);		cpu_clear(tc, cpu_present_map);		tc++;	}	/* release config state */	write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );	printk("\n");	/* Set up coprocessor affinity CPU mask(s) */#ifdef CONFIG_MIPS_MT_FPAFF	for (tc = 0; tc < ntc; tc++) {		if (cpu_data[tc].options & MIPS_CPU_FPU)			cpu_set(tc, mt_fpu_cpumask);	}#endif	/* set up ipi interrupts... */	/* If we have multiple VPEs running, set up the cross-VPE interrupt */	setup_cross_vpe_interrupts(nvpe);	/* Set up queue of free IPI "messages". */	nipi = NR_CPUS * IPIBUF_PER_CPU;	if (ipibuffers > 0)		nipi = ipibuffers;	pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);	if (pipi == NULL)		panic("kmalloc of IPI message buffers failed\n");	else		printk("IPI buffer pool of %d buffers\n", nipi);	for (i = 0; i < nipi; i++) {		smtc_ipi_nq(&freeIPIq, pipi);		pipi++;	}	/* Arm multithreading and enable other VPEs - but all TCs are Halted */	emt(EMT_ENABLE);	evpe(EVPE_ENABLE);	local_irq_restore(flags);	/* Initialize SMTC /proc statistics/diagnostics */	init_smtc_stats();}/* * Setup the PC, SP, and GP of a secondary processor and start it * running! * smp_bootstrap is the place to resume from * __KSTK_TOS(idle) is apparently the stack pointer * (unsigned long)idle->thread_info the gp * */void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle){	extern u32 kernelsp[NR_CPUS];	long flags;	int mtflags;	LOCK_MT_PRA();	if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {		dvpe();	}	settc(cpu_data[cpu].tc_id);	/* pc */	write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);	/* stack pointer */	kernelsp[cpu] = __KSTK_TOS(idle);	write_tc_gpr_sp(__KSTK_TOS(idle));	/* global pointer */	write_tc_gpr_gp((unsigned long)task_thread_info(idle));	smtc_status |= SMTC_MTC_ACTIVE;	write_tc_c0_tchalt(0);	if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {		evpe(EVPE_ENABLE);	}	UNLOCK_MT_PRA();}void smtc_init_secondary(void){	/*	 * Start timer on secondary VPEs if necessary.	 * plat_timer_setup has already have been invoked by init/main	 * on "boot" TC.  Like per_cpu_trap_init() hack, this assumes that	 * SMTC init code assigns TCs consdecutively and in ascending order	 * to across available VPEs.	 */	if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&	    ((read_c0_tcbind() & TCBIND_CURVPE)	    != cpu_data[smp_processor_id() - 1].vpe_id)){		write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);	}	local_irq_enable();}void smtc_smp_finish(void){	printk("TC %d going on-line as CPU %d\n",		cpu_data[smp_processor_id()].tc_id, smp_processor_id());}void smtc_cpus_done(void){}/* * Support for SMTC-optimized driver IRQ registration *//* * SMTC Kernel needs to manipulate low-level CPU interrupt mask * in do_IRQ. These are passed in setup_irq_smtc() and stored * in this table. */int setup_irq_smtc(unsigned int irq, struct irqaction * new,			unsigned long hwmask){#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG	unsigned int vpe = current_cpu_data.vpe_id;	vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;#endif	irq_hwmask[irq] = hwmask;	return setup_irq(irq, new);}#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF/* * Support for IRQ affinity to TCs */void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity){	/*	 * If a "fast path" cache of quickly decodable affinity state	 * is maintained, this is where it gets done, on a call up	 * from the platform affinity code.	 */}void smtc_forward_irq(unsigned int irq){	int target;	/*	 * OK wise guy, now figure out how to get the IRQ	 * to be serviced on an authorized "CPU".	 *	 * Ideally, to handle the situation where an IRQ has multiple	 * eligible CPUS, we would maintain state per IRQ that would	 * allow a fair distribution of service requests.  Since the	 * expected use model is any-or-only-one, for simplicity	 * and efficiency, we just pick the easiest one to find.	 */	target = first_cpu(irq_desc[irq].affinity);	/*	 * We depend on the platform code to have correctly processed	 * IRQ affinity change requests to ensure that the IRQ affinity	 * mask has been purged of bits corresponding to nonexistent and	 * offline "CPUs", and to TCs bound to VPEs other than the VPE	 * connected to the physical interrupt input for the interrupt	 * in question.  Otherwise we have a nasty problem with interrupt	 * mask management.  This is best handled in non-performance-critical	 * platform IRQ affinity setting code,  to minimize interrupt-time	 * checks.	 */	/* If no one is eligible, service locally */	if (target >= NR_CPUS) {		do_IRQ_no_affinity(irq);		return;	}	smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);}#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF *//* * IPI model for SMTC is tricky, because interrupts aren't TC-specific. * Within a VPE one TC can interrupt another by different approaches. * The easiest to get right would probably be to make all TCs except * the target IXMT and set a software interrupt, but an IXMT-based * scheme requires that a handler must run before a new IPI could * be sent, which would break the "broadcast" loops in MIPS MT. * A more gonzo approach within a VPE is to halt the TC, extract * its Restart, Status, and a couple of GPRs, and program the Restart * address to emulate an interrupt. * * Within a VPE, one can be confident that the target TC isn't in * a critical EXL state when halted, since the write to the Halt * register could not have issued on the writing thread if the * halting thread had EXL set. So k0 and k1 of the target TC * can be used by the injection code.  Across VPEs, one can't * be certain that the target TC isn't in a critical exception * state. So we try a two-step process of sending a software * interrupt to the target VPE, which either handles the event * itself (if it was the target) or injects the event within * the VPE. */static void smtc_ipi_qdump(void){	int i;	for (i = 0; i < NR_CPUS ;i++) {		printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",			i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,			IPIQ[i].depth);	}}/* * The standard atomic.h primitives don't quite do what we want * here: We need an atomic add-and-return-previous-value (which * could be done with atomic_add_return and a decrement) and an * atomic set/zero-and-return-previous-value (which can't really * be done with the atomic.h primitives). And since this is * MIPS MT, we can assume that we have LL/SC. */static inline int atomic_postincrement(atomic_t *v){	unsigned long result;	unsigned long temp;	__asm__ __volatile__(	"1:	ll	%0, %2					\n"	"	addu	%1, %0, 1				\n"	"	sc	%1, %2					\n"	"	beqz	%1, 1b					\n"	__WEAK_LLSC_MB	: "=&r" (result), "=&r" (temp), "=m" (v->counter)	: "m" (v->counter)	: "memory");	return result;}void smtc_send_ipi(int cpu, int type, unsigned int action){	int tcstatus;	struct smtc_ipi *pipi;	long flags;	int mtflags;	if (cpu == smp_processor_id()) {		printk("Cannot Send IPI to self!\n");		return;	}	/* Set up a descriptor, to be delivered either promptly or queued */	pipi = smtc_ipi_dq(&freeIPIq);	if (pipi == NULL) {		bust_spinlocks(1);		mips_mt_regdump(dvpe());		panic("IPI Msg. Buffers Depleted\n");	}	pipi->type = type;	pipi->arg = (void *)action;	pipi->dest = cpu;	if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {		if (type == SMTC_CLOCK_TICK)			atomic_inc(&ipi_timer_latch[cpu]);		/* If not on same VPE, enqueue and send cross-VPE interupt */		smtc_ipi_nq(&IPIQ[cpu], pipi);		LOCK_CORE_PRA();		settc(cpu_data[cpu].tc_id);		write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);		UNLOCK_CORE_PRA();	} else {		/*		 * Not sufficient to do a LOCK_MT_PRA (dmt) here,		 * since ASID shootdown on the other VPE may		 * collide with this operation.		 */		LOCK_CORE_PRA();		settc(cpu_data[cpu].tc_id);		/* Halt the targeted TC */		write_tc_c0_tchalt(TCHALT_H);		mips_ihb();		/*	 	 * Inspect TCStatus - if IXMT is set, we have to queue		 * a message. Otherwise, we set up the "interrupt"		 * of the other TC	 	 */		tcstatus = read_tc_c0_tcstatus();		if ((tcstatus & TCSTATUS_IXMT) != 0) {			/*			 * Spin-waiting here can deadlock,			 * so we queue the message for the target TC.			 */			write_tc_c0_tchalt(0);			UNLOCK_CORE_PRA();			/* Try to reduce redundant timer interrupt messages */			if (type == SMTC_CLOCK_TICK) {			    if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){				smtc_ipi_nq(&freeIPIq, pipi);				return;			    }			}			smtc_ipi_nq(&IPIQ[cpu], pipi);		} else {			if (type == SMTC_CLOCK_TICK)				atomic_inc(&ipi_timer_latch[cpu]);			post_direct_ipi(cpu, pipi);			write_tc_c0_tchalt(0);			UNLOCK_CORE_PRA();		}	}}/* * Send IPI message to Halted TC, TargTC/TargVPE already having been set */static void post_direct_ipi(int cpu, struct smtc_ipi *pipi){	struct pt_regs *kstack;	unsigned long tcstatus;	unsigned long tcrestart;	extern u32 kernelsp[NR_CPUS];	extern void __smtc_ipi_vector(void);//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);	/* Extract Status, EPC from halted TC */	tcstatus = read_tc_c0_tcstatus();	tcrestart = read_tc_c0_tcrestart();	/* If TCRestart indicates a WAIT instruction, advance the PC */	if ((tcrestart & 0x80000000)	    && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {		tcrestart += 4;	}	/*	 * Save on TC's future kernel stack	 *	 * CU bit of Status is indicator that TC was	 * already running on a kernel stack...	 */	if (tcstatus & ST0_CU0)  {		/* Note that this "- 1" is pointer arithmetic */		kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;	} else {		kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;	}	kstack->cp0_epc = (long)tcrestart;	/* Save TCStatus */	kstack->cp0_tcstatus = tcstatus;	/* Pass token of operation to be performed kernel stack pad area */	kstack->pad0[4] = (unsigned long)pipi;	/* Pass address of function to be called likewise */	kstack->pad0[5] = (unsigned long)&ipi_decode;	/* Set interrupt exempt and kernel mode */	tcstatus |= TCSTATUS_IXMT;	tcstatus &= ~TCSTATUS_TKSU;	write_tc_c0_tcstatus(tcstatus);	ehb();	/* Set TC Restart address to be SMTC IPI vector */	write_tc_c0_tcrestart(__smtc_ipi_vector);}static void ipi_resched_interrupt(void){	/* Return from interrupt should be enough to cause scheduler check */}static void ipi_call_interrupt(void){	/* Invoke generic function invocation code in smp.c */	smp_call_function_interrupt();}DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);void ipi_decode(struct smtc_ipi *pipi){	unsigned int cpu = smp_processor_id();	struct clock_event_device *cd;	void *arg_copy = pipi->arg;	int type_copy = pipi->type;	int ticks;	smtc_ipi_nq(&freeIPIq, pipi);	switch (type_copy) {	case SMTC_CLOCK_TICK:		irq_enter();		kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;		cd = &per_cpu(smtc_dummy_clockevent_device, cpu);		ticks = atomic_read(&ipi_timer_latch[cpu]);		atomic_sub(ticks, &ipi_timer_latch[cpu]);		while (ticks) {			cd->event_handler(cd);			ticks--;		}		irq_exit();		break;	case LINUX_SMP_IPI:		switch ((int)arg_copy) {		case SMP_RESCHEDULE_YOURSELF:			ipi_resched_interrupt();			break;		case SMP_CALL_FUNCTION:			ipi_call_interrupt();			break;		default:			printk("Impossible SMTC IPI Argument 0x%x\n",				(int)arg_copy);			break;		}		break;#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF	case IRQ_AFFINITY_IPI:		/*		 * Accept a "forwarded" interrupt that was initially		 * taken by a TC who doesn't have affinity for the IRQ.		 */		do_IRQ_no_affinity((int)arg_copy);		break;#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */	default:		printk("Impossible SMTC IPI Type 0x%x\n", type_copy);		break;	}}void deferred_smtc_ipi(void){	struct smtc_ipi *pipi;	unsigned long flags;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -