⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  linux/arch/arm/kernel/smp.c * *  Copyright (C) 2002 ARM Limited, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */#include <linux/config.h>#include <linux/delay.h>#include <linux/init.h>#include <linux/spinlock.h>#include <linux/sched.h>#include <linux/interrupt.h>#include <linux/cache.h>#include <linux/profile.h>#include <linux/errno.h>#include <linux/mm.h>#include <linux/cpu.h>#include <linux/smp.h>#include <linux/seq_file.h>#include <asm/atomic.h>#include <asm/cacheflush.h>#include <asm/cpu.h>#include <asm/mmu_context.h>#include <asm/pgtable.h>#include <asm/pgalloc.h>#include <asm/processor.h>#include <asm/tlbflush.h>#include <asm/ptrace.h>/* * bitmask of present and online CPUs. * The present bitmask indicates that the CPU is physically present. * The online bitmask indicates that the CPU is up and running. */cpumask_t cpu_possible_map;cpumask_t cpu_online_map;/* * as from 2.5, kernels no longer have an init_tasks structure * so we need some other way of telling a new secondary core * where to place its SVC stack */struct secondary_data secondary_data;/* * structures for inter-processor calls * - A collection of single bit ipi messages. */struct ipi_data {	spinlock_t lock;	unsigned long ipi_count;	unsigned long bits;};static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {	.lock	= SPIN_LOCK_UNLOCKED,};enum ipi_msg_type {	IPI_TIMER,	IPI_RESCHEDULE,	IPI_CALL_FUNC,	IPI_CPU_STOP,};struct smp_call_struct {	void (*func)(void *info);	void *info;	int wait;	cpumask_t pending;	cpumask_t unfinished;};static struct smp_call_struct * volatile smp_call_function_data;static DEFINE_SPINLOCK(smp_call_function_lock);int __cpuinit __cpu_up(unsigned int cpu){	struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);	struct task_struct *idle = ci->idle;	pgd_t *pgd;	pmd_t *pmd;	int ret;	/*	 * Spawn a new process manually, if not already done.	 * Grab a pointer to its task struct so we can mess with it	 */	if (!idle) {		idle = fork_idle(cpu);		if (IS_ERR(idle)) {			printk(KERN_ERR "CPU%u: fork() failed\n", cpu);			return PTR_ERR(idle);		}		ci->idle = idle;	}	/*	 * Allocate initial page tables to allow the new CPU to	 * enable the MMU safely.  This essentially means a set	 * of our "standard" page tables, with the addition of	 * a 1:1 mapping for the physical address of the kernel.	 */	pgd = pgd_alloc(&init_mm);	pmd = pmd_offset(pgd, PHYS_OFFSET);	*pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |		     PMD_TYPE_SECT | PMD_SECT_AP_WRITE);	/*	 * We need to tell the secondary core where to find	 * its stack and the page tables.	 */	secondary_data.stack = (void *)idle->thread_info + THREAD_START_SP;	secondary_data.pgdir = virt_to_phys(pgd);	wmb();	/*	 * Now bring the CPU into our world.	 */	ret = boot_secondary(cpu, idle);	if (ret == 0) {		unsigned long timeout;		/*		 * CPU was successfully started, wait for it		 * to come online or time out.		 */		timeout = jiffies + HZ;		while (time_before(jiffies, timeout)) {			if (cpu_online(cpu))				break;			udelay(10);			barrier();		}		if (!cpu_online(cpu))			ret = -EIO;	}	secondary_data.stack = NULL;	secondary_data.pgdir = 0;	*pmd_offset(pgd, PHYS_OFFSET) = __pmd(0);	pgd_free(pgd);	if (ret) {		printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);		/*		 * FIXME: We need to clean up the new idle thread. --rmk		 */	}	return ret;}#ifdef CONFIG_HOTPLUG_CPU/* * __cpu_disable runs on the processor to be shutdown. */int __cpuexit __cpu_disable(void){	unsigned int cpu = smp_processor_id();	struct task_struct *p;	int ret;	ret = mach_cpu_disable(cpu);	if (ret)		return ret;	/*	 * Take this CPU offline.  Once we clear this, we can't return,	 * and we must not schedule until we're ready to give up the cpu.	 */	cpu_clear(cpu, cpu_online_map);	/*	 * OK - migrate IRQs away from this CPU	 */	migrate_irqs();	/*	 * Stop the local timer for this CPU.	 */	local_timer_stop(cpu);	/*	 * Flush user cache and TLB mappings, and then remove this CPU	 * from the vm mask set of all processes.	 */	flush_cache_all();	local_flush_tlb_all();	read_lock(&tasklist_lock);	for_each_process(p) {		if (p->mm)			cpu_clear(cpu, p->mm->cpu_vm_mask);	}	read_unlock(&tasklist_lock);	return 0;}/* * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */void __cpuexit __cpu_die(unsigned int cpu){	if (!platform_cpu_kill(cpu))		printk("CPU%u: unable to kill\n", cpu);}/* * Called from the idle thread for the CPU which has been shutdown. * * Note that we disable IRQs here, but do not re-enable them * before returning to the caller. This is also the behaviour * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */void __cpuexit cpu_die(void){	unsigned int cpu = smp_processor_id();	local_irq_disable();	idle_task_exit();	/*	 * actual CPU shutdown procedure is at least platform (if not	 * CPU) specific	 */	platform_cpu_die(cpu);	/*	 * Do not return to the idle loop - jump back to the secondary	 * cpu initialisation.  There's some initialisation which needs	 * to be repeated to undo the effects of taking the CPU offline.	 */	__asm__("mov	sp, %0\n"	"	b	secondary_start_kernel"		:		: "r" ((void *)current->thread_info + THREAD_SIZE - 8));}#endif /* CONFIG_HOTPLUG_CPU *//* * This is the secondary CPU boot entry.  We're using this CPUs * idle thread stack, but a set of temporary page tables. */asmlinkage void __cpuinit secondary_start_kernel(void){	struct mm_struct *mm = &init_mm;	unsigned int cpu = smp_processor_id();	printk("CPU%u: Booted secondary processor\n", cpu);	/*	 * All kernel threads share the same mm context; grab a	 * reference and switch to it.	 */	atomic_inc(&mm->mm_users);	atomic_inc(&mm->mm_count);	current->active_mm = mm;	cpu_set(cpu, mm->cpu_vm_mask);	cpu_switch_mm(mm->pgd, mm);	enter_lazy_tlb(mm, current);	local_flush_tlb_all();	cpu_init();	preempt_disable();	/*	 * Give the platform a chance to do its own initialisation.	 */	platform_secondary_init(cpu);	/*	 * Enable local interrupts.	 */	local_irq_enable();	local_fiq_enable();	calibrate_delay();	smp_store_cpu_info(cpu);	/*	 * OK, now it's safe to let the boot CPU continue	 */	cpu_set(cpu, cpu_online_map);	/*	 * Setup local timer for this CPU.	 */	local_timer_setup(cpu);	/*	 * OK, it's off to the idle thread for us	 */	cpu_idle();}/* * Called by both boot and secondaries to move global data into * per-processor storage. */void __cpuinit smp_store_cpu_info(unsigned int cpuid){	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);	cpu_info->loops_per_jiffy = loops_per_jiffy;}void __init smp_cpus_done(unsigned int max_cpus){	int cpu;	unsigned long bogosum = 0;	for_each_online_cpu(cpu)		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;	printk(KERN_INFO "SMP: Total of %d processors activated "	       "(%lu.%02lu BogoMIPS).\n",	       num_online_cpus(),	       bogosum / (500000/HZ),	       (bogosum / (5000/HZ)) % 100);}void __init smp_prepare_boot_cpu(void){	unsigned int cpu = smp_processor_id();	per_cpu(cpu_data, cpu).idle = current;	cpu_set(cpu, cpu_possible_map);	cpu_set(cpu, cpu_present_map);	cpu_set(cpu, cpu_online_map);}static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg){	unsigned long flags;	unsigned int cpu;	local_irq_save(flags);	for_each_cpu_mask(cpu, callmap) {		struct ipi_data *ipi = &per_cpu(ipi_data, cpu);		spin_lock(&ipi->lock);		ipi->bits |= 1 << msg;		spin_unlock(&ipi->lock);	}	/*	 * Call the platform specific cross-CPU call function.	 */	smp_cross_call(callmap);	local_irq_restore(flags);}/* * You must not call this function with disabled interrupts, from a * hardware interrupt handler, nor from a bottom half handler. */static int smp_call_function_on_cpu(void (*func)(void *info), void *info,				    int retry, int wait, cpumask_t callmap){	struct smp_call_struct data;	unsigned long timeout;	int ret = 0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -