⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * parameter area for the set/clear control bit callbacks */typedef struct{	__u16 start_ctl;	__u16 end_ctl;	unsigned long orvals[16];	unsigned long andvals[16];} ec_creg_mask_parms;/* * callback for setting/clearing control bits */void smp_ctl_bit_callback(void *info) {	ec_creg_mask_parms *pp;	unsigned long cregs[16];	int i;		pp = (ec_creg_mask_parms *) info;	__ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);	for (i = pp->start_ctl; i <= pp->end_ctl; i++)		cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];	__ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);}/* * Set a bit in a control register of all cpus */void smp_ctl_set_bit(int cr, int bit) {        ec_creg_mask_parms parms;	parms.start_ctl = cr;	parms.end_ctl = cr;	parms.orvals[cr] = 1 << bit;	parms.andvals[cr] = -1L;	preempt_disable();	smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);        __ctl_set_bit(cr, bit);	preempt_enable();}/* * Clear a bit in a control register of all cpus */void smp_ctl_clear_bit(int cr, int bit) {        ec_creg_mask_parms parms;	parms.start_ctl = cr;	parms.end_ctl = cr;	parms.orvals[cr] = 0;	parms.andvals[cr] = ~(1L << bit);	preempt_disable();	smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);        __ctl_clear_bit(cr, bit);	preempt_enable();}/* * Lets check how many CPUs we have. */void__init smp_check_cpus(unsigned int max_cpus){	int cpu, num_cpus;	__u16 boot_cpu_addr;	/*	 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.	 */	boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;	current_thread_info()->cpu = 0;	num_cpus = 1;	for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) {		if ((__u16) cpu == boot_cpu_addr)			continue;		__cpu_logical_map[num_cpus] = (__u16) cpu;		if (signal_processor(num_cpus, sigp_sense) ==		    sigp_not_operational)			continue;		cpu_set(num_cpus, cpu_present_map);		num_cpus++;	}	for (cpu = 1; cpu < max_cpus; cpu++)		cpu_set(cpu, cpu_possible_map);	printk("Detected %d CPU's\n",(int) num_cpus);	printk("Boot cpu address %2X\n", boot_cpu_addr);}/* *      Activate a secondary processor. */extern void init_cpu_timer(void);extern void init_cpu_vtimer(void);extern int pfault_init(void);extern void pfault_fini(void);int __devinit start_secondary(void *cpuvoid){        /* Setup the cpu */        cpu_init();	preempt_disable();        /* init per CPU timer */        init_cpu_timer();#ifdef CONFIG_VIRT_TIMER        init_cpu_vtimer();#endif#ifdef CONFIG_PFAULT	/* Enable pfault pseudo page faults on this cpu. */	if (MACHINE_IS_VM)		pfault_init();#endif	/* Mark this cpu as online */	cpu_set(smp_processor_id(), cpu_online_map);	/* Switch on interrupts */	local_irq_enable();        /* Print info about this processor */        print_cpu_info(&S390_lowcore.cpu_data);        /* cpu_idle will call schedule for us */        cpu_idle();        return 0;}static void __init smp_create_idle(unsigned int cpu){	struct task_struct *p;	/*	 *  don't care about the psw and regs settings since we'll never	 *  reschedule the forked task.	 */	p = fork_idle(cpu);	if (IS_ERR(p))		panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));	current_set[cpu] = p;}/* Reserving and releasing of CPUs */static DEFINE_SPINLOCK(smp_reserve_lock);static int smp_cpu_reserved[NR_CPUS];intsmp_get_cpu(cpumask_t cpu_mask){	unsigned long flags;	int cpu;	spin_lock_irqsave(&smp_reserve_lock, flags);	/* Try to find an already reserved cpu. */	for_each_cpu_mask(cpu, cpu_mask) {		if (smp_cpu_reserved[cpu] != 0) {			smp_cpu_reserved[cpu]++;			/* Found one. */			goto out;		}	}	/* Reserve a new cpu from cpu_mask. */	for_each_cpu_mask(cpu, cpu_mask) {		if (cpu_online(cpu)) {			smp_cpu_reserved[cpu]++;			goto out;		}	}	cpu = -ENODEV;out:	spin_unlock_irqrestore(&smp_reserve_lock, flags);	return cpu;}voidsmp_put_cpu(int cpu){	unsigned long flags;	spin_lock_irqsave(&smp_reserve_lock, flags);	smp_cpu_reserved[cpu]--;	spin_unlock_irqrestore(&smp_reserve_lock, flags);}static inline intcpu_stopped(int cpu){	__u32 status;	/* Check for stopped state */	if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {		if (status & 0x40)			return 1;	}	return 0;}/* Upping and downing of CPUs */int__cpu_up(unsigned int cpu){	struct task_struct *idle;        struct _lowcore    *cpu_lowcore;	struct stack_frame *sf;        sigp_ccode          ccode;	int                 curr_cpu;	for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {		__cpu_logical_map[cpu] = (__u16) curr_cpu;		if (cpu_stopped(cpu))			break;	}	if (!cpu_stopped(cpu))		return -ENODEV;	ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),				   cpu, sigp_set_prefix);	if (ccode){		printk("sigp_set_prefix failed for cpu %d "		       "with condition code %d\n",		       (int) cpu, (int) ccode);		return -EIO;	}	idle = current_set[cpu];        cpu_lowcore = lowcore_ptr[cpu];	cpu_lowcore->kernel_stack = (unsigned long)		idle->thread_info + (THREAD_SIZE);	sf = (struct stack_frame *) (cpu_lowcore->kernel_stack				     - sizeof(struct pt_regs)				     - sizeof(struct stack_frame));	memset(sf, 0, sizeof(struct stack_frame));	sf->gprs[9] = (unsigned long) sf;	cpu_lowcore->save_area[15] = (unsigned long) sf;	__ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);	__asm__ __volatile__("stam  0,15,0(%0)"			     : : "a" (&cpu_lowcore->access_regs_save_area)			     : "memory");	cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];        cpu_lowcore->current_task = (unsigned long) idle;        cpu_lowcore->cpu_data.cpu_nr = cpu;	eieio();	signal_processor(cpu,sigp_restart);	while (!cpu_online(cpu))		cpu_relax();	return 0;}int__cpu_disable(void){	unsigned long flags;	ec_creg_mask_parms cr_parms;	int cpu = smp_processor_id();	spin_lock_irqsave(&smp_reserve_lock, flags);	if (smp_cpu_reserved[cpu] != 0) {		spin_unlock_irqrestore(&smp_reserve_lock, flags);		return -EBUSY;	}	cpu_clear(cpu, cpu_online_map);#ifdef CONFIG_PFAULT	/* Disable pfault pseudo page faults on this cpu. */	if (MACHINE_IS_VM)		pfault_fini();#endif	/* disable all external interrupts */	cr_parms.start_ctl = 0;	cr_parms.end_ctl = 0;	cr_parms.orvals[0] = 0;	cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |				1<<11 | 1<<10 | 1<< 6 | 1<< 4);	smp_ctl_bit_callback(&cr_parms);	/* disable all I/O interrupts */	cr_parms.start_ctl = 6;	cr_parms.end_ctl = 6;	cr_parms.orvals[6] = 0;	cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |				1<<27 | 1<<26 | 1<<25 | 1<<24);	smp_ctl_bit_callback(&cr_parms);	/* disable most machine checks */	cr_parms.start_ctl = 14;	cr_parms.end_ctl = 14;	cr_parms.orvals[14] = 0;	cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);	smp_ctl_bit_callback(&cr_parms);	spin_unlock_irqrestore(&smp_reserve_lock, flags);	return 0;}void__cpu_die(unsigned int cpu){	/* Wait until target cpu is down */	while (!smp_cpu_not_running(cpu))		cpu_relax();	printk("Processor %d spun down\n", cpu);}voidcpu_die(void){	idle_task_exit();	signal_processor(smp_processor_id(), sigp_stop);	BUG();	for(;;);}/* *	Cycle through the processors and setup structures. */void __init smp_prepare_cpus(unsigned int max_cpus){	unsigned long stack;	unsigned int cpu;        int i;        /* request the 0x1201 emergency signal external interrupt */        if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)                panic("Couldn't request external interrupt 0x1201");        smp_check_cpus(max_cpus);        memset(lowcore_ptr,0,sizeof(lowcore_ptr));          /*         *  Initialize prefix pages and stacks for all possible cpus         */	print_cpu_info(&S390_lowcore.cpu_data);        for(i = 0; i < NR_CPUS; i++) {		if (!cpu_possible(i))			continue;		lowcore_ptr[i] = (struct _lowcore *)			__get_free_pages(GFP_KERNEL|GFP_DMA, 					sizeof(void*) == 8 ? 1 : 0);		stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);		if (lowcore_ptr[i] == NULL || stack == 0ULL)			panic("smp_boot_cpus failed to allocate memory\n");		*(lowcore_ptr[i]) = S390_lowcore;		lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);		stack = __get_free_pages(GFP_KERNEL,0);		if (stack == 0ULL)			panic("smp_boot_cpus failed to allocate memory\n");		lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);#ifndef __s390x__		if (MACHINE_HAS_IEEE) {			lowcore_ptr[i]->extended_save_area_addr =				(__u32) __get_free_pages(GFP_KERNEL,0);			if (lowcore_ptr[i]->extended_save_area_addr == 0)				panic("smp_boot_cpus failed to "				      "allocate memory\n");		}#endif	}#ifndef __s390x__	if (MACHINE_HAS_IEEE)		ctl_set_bit(14, 29); /* enable extended save area */#endif	set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);	for_each_cpu(cpu)		if (cpu != smp_processor_id())			smp_create_idle(cpu);}void __devinit smp_prepare_boot_cpu(void){	BUG_ON(smp_processor_id() != 0);	cpu_set(0, cpu_online_map);	cpu_set(0, cpu_present_map);	cpu_set(0, cpu_possible_map);	S390_lowcore.percpu_offset = __per_cpu_offset[0];	current_set[0] = current;}void smp_cpus_done(unsigned int max_cpus){	cpu_present_map = cpu_possible_map;}/* * the frequency of the profiling timer can be changed * by writing a multiplier value into /proc/profile. * * usually you want to run this on all CPUs ;) */int setup_profiling_timer(unsigned int multiplier){        return 0;}static DEFINE_PER_CPU(struct cpu, cpu_devices);static int __init topology_init(void){	int cpu;	int ret;	for_each_cpu(cpu) {		ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);		if (ret)			printk(KERN_WARNING "topology_init: register_cpu %d "			       "failed (%d)\n", cpu, ret);	}	return 0;}subsys_initcall(topology_init);EXPORT_SYMBOL(cpu_possible_map);EXPORT_SYMBOL(lowcore_ptr);EXPORT_SYMBOL(smp_ctl_set_bit);EXPORT_SYMBOL(smp_ctl_clear_bit);EXPORT_SYMBOL(smp_call_function);EXPORT_SYMBOL(smp_get_cpu);EXPORT_SYMBOL(smp_put_cpu);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -