⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp.c

📁 优龙2410linux2.6.8内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * SMP support for ppc. * * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great * deal of code from the sparc and intel versions. * * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> * * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com * *      This program is free software; you can redistribute it and/or *      modify it under the terms of the GNU General Public License *      as published by the Free Software Foundation; either version *      2 of the License, or (at your option) any later version. */#include <linux/config.h>#include <linux/kernel.h>#include <linux/module.h>#include <linux/sched.h>#include <linux/smp.h>#include <linux/smp_lock.h>#include <linux/interrupt.h>#include <linux/kernel_stat.h>#include <linux/delay.h>#include <linux/init.h>#include <linux/spinlock.h>#include <linux/cache.h>#include <linux/err.h>#include <linux/sysdev.h>#include <linux/cpu.h>#include <asm/ptrace.h>#include <asm/atomic.h>#include <asm/irq.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/hardirq.h>#include <asm/io.h>#include <asm/prom.h>#include <asm/smp.h>#include <asm/naca.h>#include <asm/paca.h>#include <asm/iSeries/LparData.h>#include <asm/iSeries/HvCall.h>#include <asm/iSeries/HvCallCfg.h>#include <asm/time.h>#include <asm/ppcdebug.h>#include "open_pic.h"#include <asm/machdep.h>#include <asm/xics.h>#include <asm/cputable.h>#include <asm/system.h>#include <asm/rtas.h>int smp_threads_ready;unsigned long cache_decay_ticks;cpumask_t cpu_possible_map = CPU_MASK_NONE;cpumask_t cpu_online_map = CPU_MASK_NONE;cpumask_t cpu_available_map = CPU_MASK_NONE;cpumask_t cpu_present_at_boot = CPU_MASK_NONE;EXPORT_SYMBOL(cpu_online_map);EXPORT_SYMBOL(cpu_possible_map);struct smp_ops_t *smp_ops;static volatile unsigned int cpu_callin_map[NR_CPUS];extern unsigned char stab_array[];extern int cpu_idle(void *unused);void smp_call_function_interrupt(void);extern long register_vpa(unsigned long flags, unsigned long proc,			 unsigned long vpa);/* Low level assembly function used to backup CPU 0 state */extern void __save_cpu_setup(void);#ifdef CONFIG_PPC_ISERIESstatic unsigned long iSeries_smp_message[NR_CPUS];void iSeries_smp_message_recv( struct pt_regs * regs ){	int cpu = smp_processor_id();	int msg;	if ( num_online_cpus() < 2 )		return;	for ( msg = 0; msg < 4; ++msg )		if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )			smp_message_recv( msg, regs );}static inline void smp_iSeries_do_message(int cpu, int msg){	set_bit(msg, &iSeries_smp_message[cpu]);	HvCall_sendIPI(&(paca[cpu]));}static void smp_iSeries_message_pass(int target, int msg){	int i;	if (target < NR_CPUS)		smp_iSeries_do_message(target, msg);	else {		for_each_online_cpu(i) {			if (target == MSG_ALL_BUT_SELF			    && i == smp_processor_id())				continue;			smp_iSeries_do_message(i, msg);		}	}}static int smp_iSeries_numProcs(void){	unsigned np, i;	np = 0;        for (i=0; i < NR_CPUS; ++i) {                if (paca[i].lppaca.xDynProcStatus < 2) {			cpu_set(i, cpu_available_map);			cpu_set(i, cpu_possible_map);			cpu_set(i, cpu_present_at_boot);                        ++np;                }        }	return np;}static int smp_iSeries_probe(void){	unsigned i;	unsigned np = 0;	for (i=0; i < NR_CPUS; ++i) {		if (paca[i].lppaca.xDynProcStatus < 2) {			/*paca[i].active = 1;*/			++np;		}	}	return np;}static void smp_iSeries_kick_cpu(int nr){	BUG_ON(nr < 0 || nr >= NR_CPUS);	/* Verify that our partition has a processor nr */	if (paca[nr].lppaca.xDynProcStatus >= 2)		return;	/* The processor is currently spinning, waiting	 * for the cpu_start field to become non-zero	 * After we set cpu_start, the processor will	 * continue on to secondary_start in iSeries_head.S	 */	paca[nr].cpu_start = 1;}static void __devinit smp_iSeries_setup_cpu(int nr){}static struct smp_ops_t iSeries_smp_ops = {	.message_pass = smp_iSeries_message_pass,	.probe        = smp_iSeries_probe,	.kick_cpu     = smp_iSeries_kick_cpu,	.setup_cpu    = smp_iSeries_setup_cpu,};/* This is called very early. */void __init smp_init_iSeries(void){	smp_ops = &iSeries_smp_ops;	systemcfg->processorCount	= smp_iSeries_numProcs();}#endif#ifdef CONFIG_PPC_PSERIESvoid smp_openpic_message_pass(int target, int msg){	/* make sure we're sending something that translates to an IPI */	if ( msg > 0x3 ){		printk("SMP %d: smp_message_pass: unknown msg %d\n",		       smp_processor_id(), msg);		return;	}	switch ( target )	{	case MSG_ALL:		openpic_cause_IPI(msg, 0xffffffff);		break;	case MSG_ALL_BUT_SELF:		openpic_cause_IPI(msg,				  0xffffffff & ~(1 << smp_processor_id()));		break;	default:		openpic_cause_IPI(msg, 1<<target);		break;	}}static int __init smp_openpic_probe(void){	int nr_cpus;	nr_cpus = cpus_weight(cpu_possible_map);	if (nr_cpus > 1)		openpic_request_IPIs();	return nr_cpus;}static void __devinit smp_openpic_setup_cpu(int cpu){	do_openpic_setup_cpu();}#ifdef CONFIG_HOTPLUG_CPU/* Get state of physical CPU. * Return codes: *	0	- The processor is in the RTAS stopped state *	1	- stop-self is in progress *	2	- The processor is not in the RTAS stopped state *	-1	- Hardware Error *	-2	- Hardware Busy, Try again later. */static int query_cpu_stopped(unsigned int pcpu){	int cpu_status;	int status, qcss_tok;	qcss_tok = rtas_token("query-cpu-stopped-state");	BUG_ON(qcss_tok == RTAS_UNKNOWN_SERVICE);	status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);	if (status != 0) {		printk(KERN_ERR		       "RTAS query-cpu-stopped-state failed: %i\n", status);		return status;	}	return cpu_status;}int __cpu_disable(void){	/* FIXME: go put this in a header somewhere */	extern void xics_migrate_irqs_away(void);	systemcfg->processorCount--;	/*fix boot_cpuid here*/	if (smp_processor_id() == boot_cpuid)		boot_cpuid = any_online_cpu(cpu_online_map);	/* FIXME: abstract this to not be platform specific later on */	xics_migrate_irqs_away();	return 0;}void __cpu_die(unsigned int cpu){	int tries;	int cpu_status;	unsigned int pcpu = get_hard_smp_processor_id(cpu);	for (tries = 0; tries < 5; tries++) {		cpu_status = query_cpu_stopped(pcpu);		if (cpu_status == 0)			break;		set_current_state(TASK_UNINTERRUPTIBLE);		schedule_timeout(HZ);	}	if (cpu_status != 0) {		printk("Querying DEAD? cpu %i (%i) shows %i\n",		       cpu, pcpu, cpu_status);	}	/* Isolation and deallocation are definatly done by	 * drslot_chrp_cpu.  If they were not they would be	 * done here.  Change isolate state to Isolate and	 * change allocation-state to Unusable.	 */	paca[cpu].cpu_start = 0;	/* So we can recognize if it fails to come up next time. */	cpu_callin_map[cpu] = 0;}/* Kill this cpu */void cpu_die(void){	local_irq_disable();	/* Some hardware requires clearing the CPPR, while other hardware does not	 * it is safe either way	 */	pSeriesLP_cppr_info(0, 0);	rtas_stop_self();	/* Should never get here... */	BUG();	for(;;);}/* Search all cpu device nodes for an offline logical cpu.  If a * device node has a "ibm,my-drc-index" property (meaning this is an * LPAR), paranoid-check whether we own the cpu.  For each "thread" * of a cpu, if it is offline and has the same hw index as before, * grab that in preference. */static unsigned int find_physical_cpu_to_start(unsigned int old_hwindex){	struct device_node *np = NULL;	unsigned int best = -1U;	while ((np = of_find_node_by_type(np, "cpu"))) {		int nr_threads, len;		u32 *index = (u32 *)get_property(np, "ibm,my-drc-index", NULL);		u32 *tid = (u32 *)			get_property(np, "ibm,ppc-interrupt-server#s", &len);		if (!tid)			tid = (u32 *)get_property(np, "reg", &len);		if (!tid)			continue;		/* If there is a drc-index, make sure that we own		 * the cpu.		 */		if (index) {			int state;			int rc = rtas_get_sensor(9003, *index, &state);			if (rc != 0 || state != 1)				continue;		}		nr_threads = len / sizeof(u32);		while (nr_threads--) {			if (0 == query_cpu_stopped(tid[nr_threads])) {				best = tid[nr_threads];				if (best == old_hwindex)					goto out;			}		}	}out:	of_node_put(np);	return best;}/** * smp_startup_cpu() - start the given cpu * * At boot time, there is nothing to do.  At run-time, call RTAS with * the appropriate start location, if the cpu is in the RTAS stopped * state. * * Returns: *	0	- failure *	1	- success */static inline int __devinit smp_startup_cpu(unsigned int lcpu){	int status;	extern void (*pseries_secondary_smp_init)(unsigned int cpu);	unsigned long start_here = __pa(pseries_secondary_smp_init);	unsigned int pcpu;	/* At boot time the cpus are already spinning in hold	 * loops, so nothing to do. */ 	if (system_state == SYSTEM_BOOTING)		return 1;	pcpu = find_physical_cpu_to_start(get_hard_smp_processor_id(lcpu));	if (pcpu == -1U) {		printk(KERN_INFO "No more cpus available, failing\n");		return 0;	}	/* Fixup atomic count: it exited inside IRQ handler. */	paca[lcpu].__current->thread_info->preempt_count	= 0;	/* At boot this is done in prom.c. */	paca[lcpu].hw_cpu_id = pcpu;	status = rtas_call(rtas_token("start-cpu"), 3, 1, NULL,			   pcpu, start_here, lcpu);	if (status != 0) {		printk(KERN_ERR "start-cpu failed: %i\n", status);		return 0;	}	return 1;}static inline void look_for_more_cpus(void){	int num_addr_cell, num_size_cell, len, i, maxcpus;	struct device_node *np;	unsigned int *ireg;	/* Find the property which will tell us about how many CPUs	 * we're allowed to have. */	if ((np = find_path_device("/rtas")) == NULL) {		printk(KERN_ERR "Could not find /rtas in device tree!");		return;	}	num_addr_cell = prom_n_addr_cells(np);	num_size_cell = prom_n_size_cells(np);	ireg = (unsigned int *)get_property(np, "ibm,lrdr-capacity", &len);	if (ireg == NULL) {		/* FIXME: make sure not marked as lrdr_capable() */		return;	}	maxcpus = ireg[num_addr_cell + num_size_cell];	/* Double maxcpus for processors which have SMT capability */	if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)		maxcpus *= 2;	if (maxcpus > NR_CPUS) {		printk(KERN_WARNING		       "Partition configured for %d cpus, "		       "operating system maximum is %d.\n", maxcpus, NR_CPUS);		maxcpus = NR_CPUS;	} else		printk(KERN_INFO "Partition configured for %d cpus.\n",		       maxcpus);	/* Make those cpus (which might appear later) possible too. */	for (i = 0; i < maxcpus; i++)		cpu_set(i, cpu_possible_map);}#else /* ... CONFIG_HOTPLUG_CPU */static inline int __devinit smp_startup_cpu(unsigned int lcpu){	return 1;}static inline void look_for_more_cpus(void){}#endif /* CONFIG_HOTPLUG_CPU */static void smp_pSeries_kick_cpu(int nr){	BUG_ON(nr < 0 || nr >= NR_CPUS);	if (!smp_startup_cpu(nr))		return;	/*	 * The processor is currently spinning, waiting for the	 * cpu_start field to become non-zero After we set cpu_start,	 * the processor will continue on to secondary_start	 */	paca[nr].cpu_start = 1;}#endif /* CONFIG_PPC_PSERIES */static void __init smp_space_timers(unsigned int max_cpus){	int i;	unsigned long offset = tb_ticks_per_jiffy / max_cpus;	unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;	for_each_cpu(i) {		if (i != boot_cpuid) {			paca[i].next_jiffy_update_tb =				previous_tb + offset;			previous_tb = paca[i].next_jiffy_update_tb;		}	}}#ifdef CONFIG_PPC_PSERIESvoid vpa_init(int cpu){	unsigned long flags;	/* Register the Virtual Processor Area (VPA) */	flags = 1UL << (63 - 18);	register_vpa(flags, cpu, __pa((unsigned long)&(paca[cpu].lppaca)));}static inline void smp_xics_do_message(int cpu, int msg){	set_bit(msg, &xics_ipi_message[cpu].value);	mb();	xics_cause_IPI(cpu);}static void smp_xics_message_pass(int target, int msg){	unsigned int i;	if (target < NR_CPUS) {		smp_xics_do_message(target, msg);	} else {		for_each_online_cpu(i) {			if (target == MSG_ALL_BUT_SELF			    && i == smp_processor_id())				continue;			smp_xics_do_message(i, msg);		}	}}extern void xics_request_IPIs(void);static int __init smp_xics_probe(void){#ifdef CONFIG_SMP	xics_request_IPIs();#endif	return cpus_weight(cpu_possible_map);}static void __devinit smp_xics_setup_cpu(int cpu){	if (cpu != boot_cpuid)		xics_setup_cpu();}static spinlock_t timebase_lock = SPIN_LOCK_UNLOCKED;static unsigned long timebase = 0;static void __devinit pSeries_give_timebase(void){	spin_lock(&timebase_lock);	rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);	timebase = get_tb();	spin_unlock(&timebase_lock);	while (timebase)		barrier();	rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);}static void __devinit pSeries_take_timebase(void){	while (!timebase)		barrier();	spin_lock(&timebase_lock);	set_tb(timebase >> 32, timebase & 0xffffffff);	timebase = 0;	spin_unlock(&timebase_lock);}static struct smp_ops_t pSeries_openpic_smp_ops = {	.message_pass	= smp_openpic_message_pass,	.probe		= smp_openpic_probe,	.kick_cpu	= smp_pSeries_kick_cpu,	.setup_cpu	= smp_openpic_setup_cpu,};static struct smp_ops_t pSeries_xics_smp_ops = {	.message_pass	= smp_xics_message_pass,	.probe		= smp_xics_probe,	.kick_cpu	= smp_pSeries_kick_cpu,	.setup_cpu	= smp_xics_setup_cpu,};/* This is called very early */void __init smp_init_pSeries(void){	if (naca->interrupt_controller == IC_OPEN_PIC)		smp_ops = &pSeries_openpic_smp_ops;	else		smp_ops = &pSeries_xics_smp_ops;	/* Non-lpar has additional take/give timebase */	if (systemcfg->platform == PLATFORM_PSERIES) {		smp_ops->give_timebase = pSeries_give_timebase;		smp_ops->take_timebase = pSeries_take_timebase;	}}#endifvoid smp_local_timer_interrupt(struct pt_regs * regs){	if (!--(get_paca()->prof_counter)) {		update_process_times(user_mode(regs));		(get_paca()->prof_counter)=get_paca()->prof_multiplier;	}}void smp_message_recv(int msg, struct pt_regs *regs){	switch(msg) {	case PPC_MSG_CALL_FUNCTION:		smp_call_function_interrupt();		break;	case PPC_MSG_RESCHEDULE: 		/* XXX Do we have to do this? */		set_need_resched();		break;#if 0	case PPC_MSG_MIGRATE_TASK:

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -