⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp_32.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	 * - mask must exist :)	 */	BUG_ON(cpus_empty(cpumask));	BUG_ON(cpu_isset(smp_processor_id(), cpumask));	BUG_ON(!mm);#ifdef CONFIG_HOTPLUG_CPU	/* If a CPU which we ran on has gone down, OK. */	cpus_and(cpumask, cpumask, cpu_online_map);	if (unlikely(cpus_empty(cpumask)))		return;#endif	/*	 * i'm not happy about this global shared spinlock in the	 * MM hot path, but we'll see how contended it is.	 * AK: x86-64 has a faster method that could be ported.	 */	spin_lock(&tlbstate_lock);		flush_mm = mm;	flush_va = va;	cpus_or(flush_cpumask, cpumask, flush_cpumask);	/*	 * We have to send the IPI only to	 * CPUs affected.	 */	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);	while (!cpus_empty(flush_cpumask))		/* nothing. lockup detection does not belong here */		cpu_relax();	flush_mm = NULL;	flush_va = 0;	spin_unlock(&tlbstate_lock);}	void flush_tlb_current_task(void){	struct mm_struct *mm = current->mm;	cpumask_t cpu_mask;	preempt_disable();	cpu_mask = mm->cpu_vm_mask;	cpu_clear(smp_processor_id(), cpu_mask);	local_flush_tlb();	if (!cpus_empty(cpu_mask))		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);	preempt_enable();}void flush_tlb_mm (struct mm_struct * mm){	cpumask_t cpu_mask;	preempt_disable();	cpu_mask = mm->cpu_vm_mask;	cpu_clear(smp_processor_id(), cpu_mask);	if (current->active_mm == mm) {		if (current->mm)			local_flush_tlb();		else			leave_mm(smp_processor_id());	}	if (!cpus_empty(cpu_mask))		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);	preempt_enable();}void flush_tlb_page(struct vm_area_struct * vma, unsigned long va){	struct mm_struct *mm = vma->vm_mm;	cpumask_t cpu_mask;	preempt_disable();	cpu_mask = mm->cpu_vm_mask;	cpu_clear(smp_processor_id(), cpu_mask);	if (current->active_mm == mm) {		if(current->mm)			__flush_tlb_one(va);		 else		 	leave_mm(smp_processor_id());	}	if (!cpus_empty(cpu_mask))		flush_tlb_others(cpu_mask, mm, va);	preempt_enable();}EXPORT_SYMBOL(flush_tlb_page);static void do_flush_tlb_all(void* info){	unsigned long cpu = smp_processor_id();	__flush_tlb_all();	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)		leave_mm(cpu);}void flush_tlb_all(void){	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);}/* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ... */static void native_smp_send_reschedule(int cpu){	WARN_ON(cpu_is_offline(cpu));	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);}/* * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. */static DEFINE_SPINLOCK(call_lock);struct call_data_struct {	void (*func) (void *info);	void *info;	atomic_t started;	atomic_t finished;	int wait;};void lock_ipi_call_lock(void){	spin_lock_irq(&call_lock);}void unlock_ipi_call_lock(void){	spin_unlock_irq(&call_lock);}static struct call_data_struct *call_data;static void __smp_call_function(void (*func) (void *info), void *info,				int nonatomic, int wait){	struct call_data_struct data;	int cpus = num_online_cpus() - 1;	if (!cpus)		return;	data.func = func;	data.info = info;	atomic_set(&data.started, 0);	data.wait = wait;	if (wait)		atomic_set(&data.finished, 0);	call_data = &data;	mb();		/* Send a message to all other CPUs and wait for them to respond */	send_IPI_allbutself(CALL_FUNCTION_VECTOR);	/* Wait for response */	while (atomic_read(&data.started) != cpus)		cpu_relax();	if (wait)		while (atomic_read(&data.finished) != cpus)			cpu_relax();}/** * smp_call_function_mask(): Run a function on a set of other CPUs. * @mask: The set of cpus to run on.  Must not include the current cpu. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed on other CPUs. *  * Returns 0 on success, else a negative status code. * * If @wait is true, then returns once @func has returned; otherwise * it returns just before the target cpu calls @func. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */static intnative_smp_call_function_mask(cpumask_t mask,			      void (*func)(void *), void *info,			      int wait){	struct call_data_struct data;	cpumask_t allbutself;	int cpus;	/* Can deadlock when called with interrupts disabled */	WARN_ON(irqs_disabled());	/* Holding any lock stops cpus from going down. */	spin_lock(&call_lock);	allbutself = cpu_online_map;	cpu_clear(smp_processor_id(), allbutself);	cpus_and(mask, mask, allbutself);	cpus = cpus_weight(mask);	if (!cpus) {		spin_unlock(&call_lock);		return 0;	}	data.func = func;	data.info = info;	atomic_set(&data.started, 0);	data.wait = wait;	if (wait)		atomic_set(&data.finished, 0);	call_data = &data;	mb();	/* Send a message to other CPUs */	if (cpus_equal(mask, allbutself))		send_IPI_allbutself(CALL_FUNCTION_VECTOR);	else		send_IPI_mask(mask, CALL_FUNCTION_VECTOR);	/* Wait for response */	while (atomic_read(&data.started) != cpus)		cpu_relax();	if (wait)		while (atomic_read(&data.finished) != cpus)			cpu_relax();	spin_unlock(&call_lock);	return 0;}static void stop_this_cpu (void * dummy){	local_irq_disable();	/*	 * Remove this CPU:	 */	cpu_clear(smp_processor_id(), cpu_online_map);	disable_local_APIC();	if (cpu_data(smp_processor_id()).hlt_works_ok)		for(;;) halt();	for (;;);}/* * this function calls the 'stop' function on all other CPUs in the system. */static void native_smp_send_stop(void){	/* Don't deadlock on the call lock in panic */	int nolock = !spin_trylock(&call_lock);	unsigned long flags;	local_irq_save(flags);	__smp_call_function(stop_this_cpu, NULL, 0, 0);	if (!nolock)		spin_unlock(&call_lock);	disable_local_APIC();	local_irq_restore(flags);}/* * Reschedule call back. Nothing to do, * all the work is done automatically when * we return from the interrupt. */fastcall void smp_reschedule_interrupt(struct pt_regs *regs){	ack_APIC_irq();	__get_cpu_var(irq_stat).irq_resched_count++;}fastcall void smp_call_function_interrupt(struct pt_regs *regs){	void (*func) (void *info) = call_data->func;	void *info = call_data->info;	int wait = call_data->wait;	ack_APIC_irq();	/*	 * Notify initiating CPU that I've grabbed the data and am	 * about to execute the function	 */	mb();	atomic_inc(&call_data->started);	/*	 * At this point the info structure may be out of scope unless wait==1	 */	irq_enter();	(*func)(info);	__get_cpu_var(irq_stat).irq_call_count++;	irq_exit();	if (wait) {		mb();		atomic_inc(&call_data->finished);	}}static int convert_apicid_to_cpu(int apic_id){	int i;	for (i = 0; i < NR_CPUS; i++) {		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)			return i;	}	return -1;}int safe_smp_processor_id(void){	int apicid, cpuid;	if (!boot_cpu_has(X86_FEATURE_APIC))		return 0;	apicid = hard_smp_processor_id();	if (apicid == BAD_APICID)		return 0;	cpuid = convert_apicid_to_cpu(apicid);	return cpuid >= 0 ? cpuid : 0;}struct smp_ops smp_ops = {	.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,	.smp_prepare_cpus = native_smp_prepare_cpus,	.cpu_up = native_cpu_up,	.smp_cpus_done = native_smp_cpus_done,	.smp_send_stop = native_smp_send_stop,	.smp_send_reschedule = native_smp_send_reschedule,	.smp_call_function_mask = native_smp_call_function_mask,};EXPORT_SYMBOL_GPL(smp_ops);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -