⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp.c

📁 优龙2410linux2.6.8内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
		/* spare */		break;#endif#ifdef CONFIG_DEBUGGER	case PPC_MSG_DEBUGGER_BREAK:		debugger_ipi(regs);		break;#endif	default:		printk("SMP %d: smp_message_recv(): unknown msg %d\n",		       smp_processor_id(), msg);		break;	}}void smp_send_reschedule(int cpu){	smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);}#ifdef CONFIG_DEBUGGERvoid smp_send_debugger_break(int cpu){	smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);}#endifstatic void stop_this_cpu(void *dummy){	local_irq_disable();	while (1)		;}void smp_send_stop(void){	smp_call_function(stop_this_cpu, NULL, 1, 0);}/* * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. * Stolen from the i386 version. */static spinlock_t call_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;static struct call_data_struct {	void (*func) (void *info);	void *info;	atomic_t started;	atomic_t finished;	int wait;} *call_data;/* delay of at least 8 seconds on 1GHz cpu */#define SMP_CALL_TIMEOUT (1UL << (30 + 3))/* * This function sends a 'generic call function' IPI to all other CPUs * in the system. * * [SUMMARY] Run a function on all other CPUs. * <func> The function to run. This must be fast and non-blocking. * <info> An arbitrary pointer to pass to the function. * <nonatomic> currently unused. * <wait> If true, wait (atomically) until function has completed on other CPUs. * [RETURNS] 0 on success, else a negative status code. Does not return until * remote CPUs are nearly ready to execute <<func>> or are or have executed. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */int smp_call_function (void (*func) (void *info), void *info, int nonatomic,		       int wait){ 	struct call_data_struct data;	int ret = -1, cpus;	unsigned long timeout;	/* Can deadlock when called with interrupts disabled */	WARN_ON(irqs_disabled());	data.func = func;	data.info = info;	atomic_set(&data.started, 0);	data.wait = wait;	if (wait)		atomic_set(&data.finished, 0);	spin_lock(&call_lock);	/* Must grab online cpu count with preempt disabled, otherwise	 * it can change. */	cpus = num_online_cpus() - 1;	if (!cpus) {		ret = 0;		goto out;	}	call_data = &data;	wmb();	/* Send a message to all other CPUs and wait for them to respond */	smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);	/* Wait for response */	timeout = SMP_CALL_TIMEOUT;	while (atomic_read(&data.started) != cpus) {		HMT_low();		if (--timeout == 0) {			printk("smp_call_function on cpu %d: other cpus not "			       "responding (%d)\n", smp_processor_id(),			       atomic_read(&data.started));			debugger(NULL);			goto out;		}	}	if (wait) {		timeout = SMP_CALL_TIMEOUT;		while (atomic_read(&data.finished) != cpus) {			HMT_low();			if (--timeout == 0) {				printk("smp_call_function on cpu %d: other "				       "cpus not finishing (%d/%d)\n",				       smp_processor_id(),				       atomic_read(&data.finished),				       atomic_read(&data.started));				debugger(NULL);				goto out;			}		}	}	ret = 0;out:	call_data = NULL;	HMT_medium();	spin_unlock(&call_lock);	return ret;}void smp_call_function_interrupt(void){	void (*func) (void *info);	void *info;	int wait;	/* call_data will be NULL if the sender timed out while	 * waiting on us to receive the call.	 */	if (!call_data)		return;	func = call_data->func;	info = call_data->info;	wait = call_data->wait;	if (!wait)		smp_mb__before_atomic_inc();	/*	 * Notify initiating CPU that I've grabbed the data and am	 * about to execute the function	 */	atomic_inc(&call_data->started);	/*	 * At this point the info structure may be out of scope unless wait==1	 */	(*func)(info);	if (wait) {		smp_mb__before_atomic_inc();		atomic_inc(&call_data->finished);	}}extern unsigned long decr_overclock;extern struct gettimeofday_struct do_gtod;struct thread_info *current_set[NR_CPUS];DECLARE_PER_CPU(unsigned int, pvr);static void __devinit smp_store_cpu_info(int id){	per_cpu(pvr, id) = _get_PVR();}static void __init smp_create_idle(unsigned int cpu){	struct pt_regs regs;	struct task_struct *p;	/* create a process for the processor */	/* only regs.msr is actually used, and 0 is OK for it */	memset(&regs, 0, sizeof(struct pt_regs));	p = copy_process(CLONE_VM | CLONE_IDLETASK,			 0, &regs, 0, NULL, NULL);	if (IS_ERR(p))		panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));	wake_up_forked_process(p);	init_idle(p, cpu);	unhash_process(p);	paca[cpu].__current = p;	current_set[cpu] = p->thread_info;}void __init smp_prepare_cpus(unsigned int max_cpus){	unsigned int cpu;	/* 	 * setup_cpu may need to be called on the boot cpu. We havent	 * spun any cpus up but lets be paranoid.	 */	BUG_ON(boot_cpuid != smp_processor_id());	/* Fixup boot cpu */	smp_store_cpu_info(boot_cpuid);	cpu_callin_map[boot_cpuid] = 1;	paca[boot_cpuid].prof_counter = 1;	paca[boot_cpuid].prof_multiplier = 1;#ifndef CONFIG_PPC_ISERIES	paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb();	/*	 * Should update do_gtod.stamp_xsec.	 * For now we leave it which means the time can be some	 * number of msecs off until someone does a settimeofday()	 */	do_gtod.tb_orig_stamp = tb_last_stamp;	look_for_more_cpus();#endif	max_cpus = smp_ops->probe(); 	/* Backup CPU 0 state if necessary */	__save_cpu_setup();	smp_space_timers(max_cpus);	for_each_cpu(cpu)		if (cpu != boot_cpuid)			smp_create_idle(cpu);}void __devinit smp_prepare_boot_cpu(void){	BUG_ON(smp_processor_id() != boot_cpuid);	/* cpu_possible is set up in prom.c */	cpu_set(boot_cpuid, cpu_online_map);	paca[boot_cpuid].__current = current;	current_set[boot_cpuid] = current->thread_info;}int __devinit __cpu_up(unsigned int cpu){	int c;	/* At boot, don't bother with non-present cpus -JSCHOPP */	if (system_state == SYSTEM_BOOTING && !cpu_present_at_boot(cpu))		return -ENOENT;	paca[cpu].prof_counter = 1;	paca[cpu].prof_multiplier = 1;	paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;	if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {		void *tmp;		/* maximum of 48 CPUs on machines with a segment table */		if (cpu >= 48)			BUG();		tmp = &stab_array[PAGE_SIZE * cpu];		memset(tmp, 0, PAGE_SIZE); 		paca[cpu].stab_addr = (unsigned long)tmp;		paca[cpu].stab_real = virt_to_abs(tmp);	}	/* The information for processor bringup must	 * be written out to main store before we release	 * the processor.	 */	mb();	/* wake up cpus */	smp_ops->kick_cpu(cpu);	/*	 * wait to see if the cpu made a callin (is actually up).	 * use this value that I found through experimentation.	 * -- Cort	 */	if (system_state == SYSTEM_BOOTING)		for (c = 5000; c && !cpu_callin_map[cpu]; c--)			udelay(100);#ifdef CONFIG_HOTPLUG_CPU	else		/*		 * CPUs can take much longer to come up in the		 * hotplug case.  Wait five seconds.		 */		for (c = 25; c && !cpu_callin_map[cpu]; c--) {			set_current_state(TASK_UNINTERRUPTIBLE);			schedule_timeout(HZ/5);		}#endif	if (!cpu_callin_map[cpu]) {		printk("Processor %u is stuck.\n", cpu);		return -ENOENT;	}	printk("Processor %u found.\n", cpu);	if (smp_ops->give_timebase)		smp_ops->give_timebase();	/* Wait until cpu puts itself in the online map */	while (!cpu_online(cpu))		cpu_relax();	return 0;}extern unsigned int default_distrib_server;/* Activate a secondary processor. */int __devinit start_secondary(void *unused){	unsigned int cpu = smp_processor_id();	atomic_inc(&init_mm.mm_count);	current->active_mm = &init_mm;	smp_store_cpu_info(cpu);	set_dec(paca[cpu].default_decr);	cpu_callin_map[cpu] = 1;	smp_ops->setup_cpu(cpu);	if (smp_ops->take_timebase)		smp_ops->take_timebase();#ifdef CONFIG_PPC_PSERIES	if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {		vpa_init(cpu); 	}#ifdef CONFIG_IRQ_ALL_CPUS	/* Put the calling processor into the GIQ.  This is really only	 * necessary from a secondary thread as the OF start-cpu interface	 * performs this function for us on primary threads.	 */	/* TODO: 9005 is #defined in rtas-proc.c -- move to a header */	rtas_set_indicator(9005, default_distrib_server, 1);#endif#endif	spin_lock(&call_lock);	cpu_set(cpu, cpu_online_map);	spin_unlock(&call_lock);	local_irq_enable();	return cpu_idle(NULL);}int setup_profiling_timer(unsigned int multiplier){	return 0;}void __init smp_cpus_done(unsigned int max_cpus){	cpumask_t old_mask;	/* We want the setup_cpu() here to be called from CPU 0, but our	 * init thread may have been "borrowed" by another CPU in the meantime	 * se we pin us down to CPU 0 for a short while	 */	old_mask = current->cpus_allowed;	set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));		smp_ops->setup_cpu(boot_cpuid);	/* XXX fix this, xics currently relies on it - Anton */	smp_threads_ready = 1;	set_cpus_allowed(current, old_mask);}#ifdef CONFIG_SCHED_SMT#ifdef CONFIG_NUMAstatic struct sched_group sched_group_cpus[NR_CPUS];static struct sched_group sched_group_phys[NR_CPUS];static struct sched_group sched_group_nodes[MAX_NUMNODES];static DEFINE_PER_CPU(struct sched_domain, cpu_domains);static DEFINE_PER_CPU(struct sched_domain, phys_domains);static DEFINE_PER_CPU(struct sched_domain, node_domains);__init void arch_init_sched_domains(void){	int i;	struct sched_group *first = NULL, *last = NULL;	/* Set up domains */	for_each_cpu(i) {		struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);		struct sched_domain *phys_domain = &per_cpu(phys_domains, i);		struct sched_domain *node_domain = &per_cpu(node_domains, i);		int node = cpu_to_node(i);		cpumask_t nodemask = node_to_cpumask(node);		cpumask_t my_cpumask = cpumask_of_cpu(i);		cpumask_t sibling_cpumask = cpumask_of_cpu(i ^ 0x1);		*cpu_domain = SD_SIBLING_INIT;		if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)			cpus_or(cpu_domain->span, my_cpumask, sibling_cpumask);		else			cpu_domain->span = my_cpumask;		cpu_domain->parent = phys_domain;		cpu_domain->groups = &sched_group_cpus[i];		*phys_domain = SD_CPU_INIT;		phys_domain->span = nodemask;		phys_domain->parent = node_domain;		phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];		*node_domain = SD_NODE_INIT;		node_domain->span = cpu_possible_map;		node_domain->groups = &sched_group_nodes[node];	}	/* Set up CPU (sibling) groups */	for_each_cpu(i) {		struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);		int j;		first = last = NULL;		if (i != first_cpu(cpu_domain->span))			continue;		for_each_cpu_mask(j, cpu_domain->span) {			struct sched_group *cpu = &sched_group_cpus[j];			cpus_clear(cpu->cpumask);			cpu_set(j, cpu->cpumask);			cpu->cpu_power = SCHED_LOAD_SCALE;			if (!first)				first = cpu;			if (last)				last->next = cpu;			last = cpu;		}		last->next = first;	}	for (i = 0; i < MAX_NUMNODES; i++) {		int j;		cpumask_t nodemask;		struct sched_group *node = &sched_group_nodes[i];		cpumask_t node_cpumask = node_to_cpumask(i);		cpus_and(nodemask, node_cpumask, cpu_possible_map);		if (cpus_empty(nodemask))			continue;		first = last = NULL;		/* Set up physical groups */		for_each_cpu_mask(j, nodemask) {			struct sched_domain *cpu_domain = &per_cpu(cpu_domains, j);			struct sched_group *cpu = &sched_group_phys[j];			if (j != first_cpu(cpu_domain->span))				continue;			cpu->cpumask = cpu_domain->span;			/*			 * Make each extra sibling increase power by 10% of			 * the basic CPU. This is very arbitrary.			 */			cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;			node->cpu_power += cpu->cpu_power;			if (!first)				first = cpu;			if (last)				last->next = cpu;			last = cpu;		}		last->next = first;	}	/* Set up nodes */	first = last = NULL;	for (i = 0; i < MAX_NUMNODES; i++) {		struct sched_group *cpu = &sched_group_nodes[i];		cpumask_t nodemask;		cpumask_t node_cpumask = node_to_cpumask(i);		cpus_and(nodemask, node_cpumask, cpu_possible_map);		if (cpus_empty(nodemask))			continue;		cpu->cpumask = nodemask;		/* ->cpu_power already setup */		if (!first)			first = cpu;		if (last)			last->next = cpu;		last = cpu;	}	last->next = first;	mb();	for_each_cpu(i) {		struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);		cpu_attach_domain(cpu_domain, i);	}}#else /* !CONFIG_NUMA */static struct sched_group sched_group_cpus[NR_CPUS];static struct sched_group sched_group_phys[NR_CPUS];static DEFINE_PER_CPU(struct sched_domain, cpu_domains);static DEFINE_PER_CPU(struct sched_domain, phys_domains);__init void arch_init_sched_domains(void){	int i;	struct sched_group *first = NULL, *last = NULL;	/* Set up domains */	for_each_cpu(i) {		struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);		struct sched_domain *phys_domain = &per_cpu(phys_domains, i);		cpumask_t my_cpumask = cpumask_of_cpu(i);		cpumask_t sibling_cpumask = cpumask_of_cpu(i ^ 0x1);		*cpu_domain = SD_SIBLING_INIT;		if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)			cpus_or(cpu_domain->span, my_cpumask, sibling_cpumask);		else			cpu_domain->span = my_cpumask;		cpu_domain->parent = phys_domain;		cpu_domain->groups = &sched_group_cpus[i];		*phys_domain = SD_CPU_INIT;		phys_domain->span = cpu_possible_map;		phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];	}	/* Set up CPU (sibling) groups */	for_each_cpu(i) {		struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);		int j;		first = last = NULL;		if (i != first_cpu(cpu_domain->span))			continue;		for_each_cpu_mask(j, cpu_domain->span) {			struct sched_group *cpu = &sched_group_cpus[j];			cpus_clear(cpu->cpumask);			cpu_set(j, cpu->cpumask);			cpu->cpu_power = SCHED_LOAD_SCALE;			if (!first)				first = cpu;			if (last)				last->next = cpu;			last = cpu;		}		last->next = first;	}	first = last = NULL;	/* Set up physical groups */	for_each_cpu(i) {		struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);		struct sched_group *cpu = &sched_group_phys[i];		if (i != first_cpu(cpu_domain->span))			continue;		cpu->cpumask = cpu_domain->span;		/* See SMT+NUMA setup for comment */		cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;		if (!first)			first = cpu;		if (last)			last->next = cpu;		last = cpu;	}	last->next = first;	mb();	for_each_cpu(i) {		struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);		cpu_attach_domain(cpu_domain, i);	}}#endif /* CONFIG_NUMA */#endif /* CONFIG_SCHED_SMT */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -