⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
	if (!cpu_isset(cpu, flush_cpumask))		goto out;		/* 		 * This was a BUG() but until someone can quote me the		 * line from the intel manual that guarantees an IPI to		 * multiple CPUs is retried _only_ on the erroring CPUs		 * its staying as a return		 *		 * BUG();		 */		 	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {			if (flush_va == FLUSH_ALL)				local_flush_tlb();			else				__flush_tlb_one(flush_va);		} else			leave_mm(cpu);	}	ack_APIC_irq();	smp_mb__before_clear_bit();	cpu_clear(cpu, flush_cpumask);	smp_mb__after_clear_bit();out:	put_cpu_no_resched();}static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,						unsigned long va){	/*	 * A couple of (to be removed) sanity checks:	 *	 * - current CPU must not be in mask	 * - mask must exist :)	 */	BUG_ON(cpus_empty(cpumask));	BUG_ON(cpu_isset(smp_processor_id(), cpumask));	BUG_ON(!mm);	/* If a CPU which we ran on has gone down, OK. */	cpus_and(cpumask, cpumask, cpu_online_map);	if (cpus_empty(cpumask))		return;	/*	 * i'm not happy about this global shared spinlock in the	 * MM hot path, but we'll see how contended it is.	 * Temporarily this turns IRQs off, so that lockups are	 * detected by the NMI watchdog.	 */	spin_lock(&tlbstate_lock);		flush_mm = mm;	flush_va = va;#if NR_CPUS <= BITS_PER_LONG	atomic_set_mask(cpumask, &flush_cpumask);#else	{		int k;		unsigned long *flush_mask = (unsigned long *)&flush_cpumask;		unsigned long *cpu_mask = (unsigned long *)&cpumask;		for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)			atomic_set_mask(cpu_mask[k], &flush_mask[k]);	}#endif	/*	 * We have to send the IPI only to	 * CPUs affected.	 */	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);	while (!cpus_empty(flush_cpumask))		/* nothing. lockup detection does not belong here */		mb();	flush_mm = NULL;	flush_va = 0;	spin_unlock(&tlbstate_lock);}	void flush_tlb_current_task(void){	struct mm_struct *mm = current->mm;	cpumask_t cpu_mask;	preempt_disable();	cpu_mask = mm->cpu_vm_mask;	cpu_clear(smp_processor_id(), cpu_mask);	local_flush_tlb();	if (!cpus_empty(cpu_mask))		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);	preempt_enable();}void flush_tlb_mm (struct mm_struct * mm){	cpumask_t cpu_mask;	preempt_disable();	cpu_mask = mm->cpu_vm_mask;	cpu_clear(smp_processor_id(), cpu_mask);	if (current->active_mm == mm) {		if (current->mm)			local_flush_tlb();		else			leave_mm(smp_processor_id());	}	if (!cpus_empty(cpu_mask))		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);	preempt_enable();}void flush_tlb_page(struct vm_area_struct * vma, unsigned long va){	struct mm_struct *mm = vma->vm_mm;	cpumask_t cpu_mask;	preempt_disable();	cpu_mask = mm->cpu_vm_mask;	cpu_clear(smp_processor_id(), cpu_mask);	if (current->active_mm == mm) {		if(current->mm)			__flush_tlb_one(va);		 else		 	leave_mm(smp_processor_id());	}	if (!cpus_empty(cpu_mask))		flush_tlb_others(cpu_mask, mm, va);	preempt_enable();}EXPORT_SYMBOL(flush_tlb_page);static void do_flush_tlb_all(void* info){	unsigned long cpu = smp_processor_id();	__flush_tlb_all();	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)		leave_mm(cpu);}void flush_tlb_all(void){	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);}/* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ... */void smp_send_reschedule(int cpu){	WARN_ON(cpu_is_offline(cpu));	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);}/* * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. */static DEFINE_SPINLOCK(call_lock);struct call_data_struct {	void (*func) (void *info);	void *info;	atomic_t started;	atomic_t finished;	int wait;};void lock_ipi_call_lock(void){	spin_lock_irq(&call_lock);}void unlock_ipi_call_lock(void){	spin_unlock_irq(&call_lock);}static struct call_data_struct * call_data;/* * this function sends a 'generic call function' IPI to all other CPUs * in the system. */int smp_call_function (void (*func) (void *info), void *info, int nonatomic,			int wait)/* * [SUMMARY] Run a function on all other CPUs. * <func> The function to run. This must be fast and non-blocking. * <info> An arbitrary pointer to pass to the function. * <nonatomic> currently unused. * <wait> If true, wait (atomically) until function has completed on other CPUs. * [RETURNS] 0 on success, else a negative status code. Does not return until * remote CPUs are nearly ready to execute <<func>> or are or have executed. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */{	struct call_data_struct data;	int cpus;	/* Holding any lock stops cpus from going down. */	spin_lock(&call_lock);	cpus = num_online_cpus() - 1;	if (!cpus) {		spin_unlock(&call_lock);		return 0;	}	/* Can deadlock when called with interrupts disabled */	WARN_ON(irqs_disabled());	data.func = func;	data.info = info;	atomic_set(&data.started, 0);	data.wait = wait;	if (wait)		atomic_set(&data.finished, 0);	call_data = &data;	mb();		/* Send a message to all other CPUs and wait for them to respond */	send_IPI_allbutself(CALL_FUNCTION_VECTOR);	/* Wait for response */	while (atomic_read(&data.started) != cpus)		cpu_relax();	if (wait)		while (atomic_read(&data.finished) != cpus)			cpu_relax();	spin_unlock(&call_lock);	return 0;}EXPORT_SYMBOL(smp_call_function);static void stop_this_cpu (void * dummy){	/*	 * Remove this CPU:	 */	cpu_clear(smp_processor_id(), cpu_online_map);	local_irq_disable();	disable_local_APIC();	if (cpu_data[smp_processor_id()].hlt_works_ok)		for(;;) halt();	for (;;);}/* * this function calls the 'stop' function on all other CPUs in the system. */void smp_send_stop(void){	smp_call_function(stop_this_cpu, NULL, 1, 0);	local_irq_disable();	disable_local_APIC();	local_irq_enable();}/* * Reschedule call back. Nothing to do, * all the work is done automatically when * we return from the interrupt. */fastcall void smp_reschedule_interrupt(struct pt_regs *regs){	ack_APIC_irq();}fastcall void smp_call_function_interrupt(struct pt_regs *regs){	void (*func) (void *info) = call_data->func;	void *info = call_data->info;	int wait = call_data->wait;	ack_APIC_irq();	/*	 * Notify initiating CPU that I've grabbed the data and am	 * about to execute the function	 */	mb();	atomic_inc(&call_data->started);	/*	 * At this point the info structure may be out of scope unless wait==1	 */	irq_enter();	(*func)(info);	irq_exit();	if (wait) {		mb();		atomic_inc(&call_data->finished);	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -