⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;	current_thread_info()->cpu = 0;	num_cpus = 1;	for (cpu = 0; cpu <= 65535; cpu++) {		if ((__u16) cpu == boot_cpu_addr)			continue;		__cpu_logical_map[1] = (__u16) cpu;		if (signal_processor(1, sigp_sense) == sigp_not_operational)			continue;		smp_get_save_area(num_cpus, cpu);		num_cpus++;	}	printk("Detected %d CPU's\n", (int) num_cpus);	printk("Boot cpu address %2X\n", boot_cpu_addr);	return num_cpus;}/* *	Activate a secondary processor. */int __cpuinit start_secondary(void *cpuvoid){	/* Setup the cpu */	cpu_init();	preempt_disable();	/* Enable TOD clock interrupts on the secondary cpu. */	init_cpu_timer();#ifdef CONFIG_VIRT_TIMER	/* Enable cpu timer interrupts on the secondary cpu. */	init_cpu_vtimer();#endif	/* Enable pfault pseudo page faults on this cpu. */	pfault_init();	/* Mark this cpu as online */	cpu_set(smp_processor_id(), cpu_online_map);	/* Switch on interrupts */	local_irq_enable();	/* Print info about this processor */	print_cpu_info(&S390_lowcore.cpu_data);	/* cpu_idle will call schedule for us */	cpu_idle();	return 0;}DEFINE_PER_CPU(struct s390_idle_data, s390_idle);static void __init smp_create_idle(unsigned int cpu){	struct task_struct *p;	/*	 *  don't care about the psw and regs settings since we'll never	 *  reschedule the forked task.	 */	p = fork_idle(cpu);	if (IS_ERR(p))		panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));	current_set[cpu] = p;	spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);}static int cpu_stopped(int cpu){	__u32 status;	/* Check for stopped state */	if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==	    sigp_status_stored) {		if (status & 0x40)			return 1;	}	return 0;}/* Upping and downing of CPUs */int __cpu_up(unsigned int cpu){	struct task_struct *idle;	struct _lowcore *cpu_lowcore;	struct stack_frame *sf;	sigp_ccode ccode;	int curr_cpu;	for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {		__cpu_logical_map[cpu] = (__u16) curr_cpu;		if (cpu_stopped(cpu))			break;	}	if (!cpu_stopped(cpu))		return -ENODEV;	ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),				   cpu, sigp_set_prefix);	if (ccode) {		printk("sigp_set_prefix failed for cpu %d "		       "with condition code %d\n",		       (int) cpu, (int) ccode);		return -EIO;	}	idle = current_set[cpu];	cpu_lowcore = lowcore_ptr[cpu];	cpu_lowcore->kernel_stack = (unsigned long)		task_stack_page(idle) + THREAD_SIZE;	sf = (struct stack_frame *) (cpu_lowcore->kernel_stack				     - sizeof(struct pt_regs)				     - sizeof(struct stack_frame));	memset(sf, 0, sizeof(struct stack_frame));	sf->gprs[9] = (unsigned long) sf;	cpu_lowcore->save_area[15] = (unsigned long) sf;	__ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);	asm volatile(		"	stam	0,15,0(%0)"		: : "a" (&cpu_lowcore->access_regs_save_area) : "memory");	cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];	cpu_lowcore->current_task = (unsigned long) idle;	cpu_lowcore->cpu_data.cpu_nr = cpu;	eieio();	while (signal_processor(cpu, sigp_restart) == sigp_busy)		udelay(10);	while (!cpu_online(cpu))		cpu_relax();	return 0;}static unsigned int __initdata additional_cpus;static unsigned int __initdata possible_cpus;void __init smp_setup_cpu_possible_map(void){	unsigned int phy_cpus, pos_cpus, cpu;	phy_cpus = smp_count_cpus();	pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);	if (possible_cpus)		pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);	for (cpu = 0; cpu < pos_cpus; cpu++)		cpu_set(cpu, cpu_possible_map);	phy_cpus = min(phy_cpus, pos_cpus);	for (cpu = 0; cpu < phy_cpus; cpu++)		cpu_set(cpu, cpu_present_map);}#ifdef CONFIG_HOTPLUG_CPUstatic int __init setup_additional_cpus(char *s){	additional_cpus = simple_strtoul(s, NULL, 0);	return 0;}early_param("additional_cpus", setup_additional_cpus);static int __init setup_possible_cpus(char *s){	possible_cpus = simple_strtoul(s, NULL, 0);	return 0;}early_param("possible_cpus", setup_possible_cpus);int __cpu_disable(void){	struct ec_creg_mask_parms cr_parms;	int cpu = smp_processor_id();	cpu_clear(cpu, cpu_online_map);	/* Disable pfault pseudo page faults on this cpu. */	pfault_fini();	memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));	memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));	/* disable all external interrupts */	cr_parms.orvals[0] = 0;	cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |				1 << 11 | 1 << 10 | 1 <<  6 | 1 <<  4);	/* disable all I/O interrupts */	cr_parms.orvals[6] = 0;	cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |				1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);	/* disable most machine checks */	cr_parms.orvals[14] = 0;	cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |				 1 << 25 | 1 << 24);	smp_ctl_bit_callback(&cr_parms);	return 0;}void __cpu_die(unsigned int cpu){	/* Wait until target cpu is down */	while (!smp_cpu_not_running(cpu))		cpu_relax();	printk("Processor %d spun down\n", cpu);}void cpu_die(void){	idle_task_exit();	signal_processor(smp_processor_id(), sigp_stop);	BUG();	for (;;);}#endif /* CONFIG_HOTPLUG_CPU *//* *	Cycle through the processors and setup structures. */void __init smp_prepare_cpus(unsigned int max_cpus){	unsigned long stack;	unsigned int cpu;	int i;	/* request the 0x1201 emergency signal external interrupt */	if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)		panic("Couldn't request external interrupt 0x1201");	memset(lowcore_ptr, 0, sizeof(lowcore_ptr));	/*	 *  Initialize prefix pages and stacks for all possible cpus	 */	print_cpu_info(&S390_lowcore.cpu_data);	for_each_possible_cpu(i) {		lowcore_ptr[i] = (struct _lowcore *)			__get_free_pages(GFP_KERNEL | GFP_DMA,					 sizeof(void*) == 8 ? 1 : 0);		stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);		if (!lowcore_ptr[i] || !stack)			panic("smp_boot_cpus failed to allocate memory\n");		*(lowcore_ptr[i]) = S390_lowcore;		lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;		stack = __get_free_pages(GFP_KERNEL, 0);		if (!stack)			panic("smp_boot_cpus failed to allocate memory\n");		lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;#ifndef CONFIG_64BIT		if (MACHINE_HAS_IEEE) {			lowcore_ptr[i]->extended_save_area_addr =				(__u32) __get_free_pages(GFP_KERNEL, 0);			if (!lowcore_ptr[i]->extended_save_area_addr)				panic("smp_boot_cpus failed to "				      "allocate memory\n");		}#endif	}#ifndef CONFIG_64BIT	if (MACHINE_HAS_IEEE)		ctl_set_bit(14, 29); /* enable extended save area */#endif	set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);	for_each_possible_cpu(cpu)		if (cpu != smp_processor_id())			smp_create_idle(cpu);}void __init smp_prepare_boot_cpu(void){	BUG_ON(smp_processor_id() != 0);	cpu_set(0, cpu_online_map);	S390_lowcore.percpu_offset = __per_cpu_offset[0];	current_set[0] = current;	spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);}void __init smp_cpus_done(unsigned int max_cpus){	cpu_present_map = cpu_possible_map;}/* * the frequency of the profiling timer can be changed * by writing a multiplier value into /proc/profile. * * usually you want to run this on all CPUs ;) */int setup_profiling_timer(unsigned int multiplier){	return 0;}static DEFINE_PER_CPU(struct cpu, cpu_devices);static ssize_t show_capability(struct sys_device *dev, char *buf){	unsigned int capability;	int rc;	rc = get_cpu_capability(&capability);	if (rc)		return rc;	return sprintf(buf, "%u\n", capability);}static SYSDEV_ATTR(capability, 0444, show_capability, NULL);static ssize_t show_idle_count(struct sys_device *dev, char *buf){	struct s390_idle_data *idle;	unsigned long long idle_count;	idle = &per_cpu(s390_idle, dev->id);	spin_lock_irq(&idle->lock);	idle_count = idle->idle_count;	spin_unlock_irq(&idle->lock);	return sprintf(buf, "%llu\n", idle_count);}static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);static ssize_t show_idle_time(struct sys_device *dev, char *buf){	struct s390_idle_data *idle;	unsigned long long new_time;	idle = &per_cpu(s390_idle, dev->id);	spin_lock_irq(&idle->lock);	if (idle->in_idle) {		new_time = get_clock();		idle->idle_time += new_time - idle->idle_enter;		idle->idle_enter = new_time;	}	new_time = idle->idle_time;	spin_unlock_irq(&idle->lock);	return sprintf(buf, "%llu\n", new_time >> 12);}static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);static struct attribute *cpu_attrs[] = {	&attr_capability.attr,	&attr_idle_count.attr,	&attr_idle_time_us.attr,	NULL,};static struct attribute_group cpu_attr_group = {	.attrs = cpu_attrs,};static int __cpuinit smp_cpu_notify(struct notifier_block *self,				    unsigned long action, void *hcpu){	unsigned int cpu = (unsigned int)(long)hcpu;	struct cpu *c = &per_cpu(cpu_devices, cpu);	struct sys_device *s = &c->sysdev;	struct s390_idle_data *idle;	switch (action) {	case CPU_ONLINE:	case CPU_ONLINE_FROZEN:		idle = &per_cpu(s390_idle, cpu);		spin_lock_irq(&idle->lock);		idle->idle_enter = 0;		idle->idle_time = 0;		idle->idle_count = 0;		spin_unlock_irq(&idle->lock);		if (sysfs_create_group(&s->kobj, &cpu_attr_group))			return NOTIFY_BAD;		break;	case CPU_DEAD:	case CPU_DEAD_FROZEN:		sysfs_remove_group(&s->kobj, &cpu_attr_group);		break;	}	return NOTIFY_OK;}static struct notifier_block __cpuinitdata smp_cpu_nb = {	.notifier_call = smp_cpu_notify,};static int __init topology_init(void){	int cpu;	int rc;	register_cpu_notifier(&smp_cpu_nb);	for_each_possible_cpu(cpu) {		struct cpu *c = &per_cpu(cpu_devices, cpu);		struct sys_device *s = &c->sysdev;		c->hotpluggable = 1;		register_cpu(c, cpu);		if (!cpu_online(cpu))			continue;		s = &c->sysdev;		rc = sysfs_create_group(&s->kobj, &cpu_attr_group);		if (rc)			return rc;	}	return 0;}subsys_initcall(topology_init);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -