📄 smp.c
字号:
/* * SMP Support * * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> * * Lots of stuff stolen from arch/alpha/kernel/smp.c * * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy calibration on each CPU. * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor & cpu_online_map * now gets done here (instead of setup.c) * 99/10/05 davidm Update to bring it in sync with new command-line processing scheme. * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and * smp_call_function_single to resend IPI on timeouts */#define __KERNEL_SYSCALLS__#include <linux/config.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/init.h>#include <linux/interrupt.h>#include <linux/smp.h>#include <linux/kernel_stat.h>#include <linux/mm.h>#include <linux/delay.h>#include <asm/atomic.h>#include <asm/bitops.h>#include <asm/current.h>#include <asm/delay.h>#include <asm/efi.h>#include <asm/machvec.h>#include <asm/io.h>#include <asm/irq.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/pgalloc.h>#include <asm/processor.h>#include <asm/ptrace.h>#include <asm/sal.h>#include <asm/system.h>#include <asm/unistd.h>extern void __init calibrate_delay(void);extern int cpu_idle(void * unused);extern void machine_halt(void);extern void start_ap(void);extern int cpu_now_booting; /* Used by head.S to find idle task */extern volatile unsigned long cpu_online_map; /* Bitmap of available cpu's */extern struct cpuinfo_ia64 cpu_data[NR_CPUS]; /* Duh... */struct smp_boot_data smp_boot_data __initdata;spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;char __initdata no_int_routing;unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */volatile int __cpu_physical_id[NR_CPUS] = { -1, }; /* Logical ID -> SAPIC ID */int smp_num_cpus = 1; volatile int smp_threads_ready; /* Set when the idlers are all forked */cycles_t cacheflush_time;unsigned long ap_wakeup_vector = -1; /* External Int to use to wakeup AP's */static volatile unsigned long cpu_callin_map;static volatile int smp_commenced;static int max_cpus = -1; /* Command line */static unsigned long ipi_op[NR_CPUS];struct smp_call_struct { void (*func) (void *info); void *info; long wait; atomic_t unstarted_count; atomic_t unfinished_count;};static volatile struct smp_call_struct *smp_call_function_data;#define IPI_RESCHEDULE 0#define IPI_CALL_FUNC 1#define IPI_CPU_STOP 2#ifndef CONFIG_ITANIUM_PTCG# define IPI_FLUSH_TLB 3#endif /*!CONFIG_ITANIUM_PTCG *//* * Setup routine for controlling SMP activation * * Command-line option of "nosmp" or "maxcpus=0" will disable SMP * activation entirely (the MPS table probe still happens, though). * * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer * greater than 0, limits the maximum number of CPUs activated in * SMP mode to <NUM>. */static int __init nosmp(char *str){ max_cpus = 0; return 1;}__setup("nosmp", nosmp);static int __init maxcpus(char *str){ get_option(&str, &max_cpus); return 1;}__setup("maxcpus=", maxcpus);static int __initnointroute(char *str){ no_int_routing = 1; return 1;}__setup("nointroute", nointroute);/* * Yoink this CPU from the runnable list... */voidhalt_processor(void) { clear_bit(smp_processor_id(), &cpu_online_map); max_xtp(); __cli(); for (;;) ;}static inline intpointer_lock(void *lock, void *data, int retry){ volatile long *ptr = lock; again: if (cmpxchg_acq((void **) lock, 0, data) == 0) return 0; if (!retry) return -EBUSY; while (*ptr) ; goto again;}voidhandle_IPI(int irq, void *dev_id, struct pt_regs *regs) { int this_cpu = smp_processor_id(); unsigned long *pending_ipis = &ipi_op[this_cpu]; unsigned long ops; /* Count this now; we may make a call that never returns. */ cpu_data[this_cpu].ipi_count++; mb(); /* Order interrupt and bit testing. */ while ((ops = xchg(pending_ipis, 0)) != 0) { mb(); /* Order bit clearing and data access. */ do { unsigned long which; which = ffz(~ops); ops &= ~(1 << which); switch (which) { case IPI_RESCHEDULE: /* * Reschedule callback. Everything to be done is done by the * interrupt return path. */ break; case IPI_CALL_FUNC: { struct smp_call_struct *data; void (*func)(void *info); void *info; int wait; /* release the 'pointer lock' */ data = (struct smp_call_struct *) smp_call_function_data; func = data->func; info = data->info; wait = data->wait; mb(); atomic_dec(&data->unstarted_count); /* At this point the structure may be gone unless wait is true. */ (*func)(info); /* Notify the sending CPU that the task is done. */ mb(); if (wait) atomic_dec(&data->unfinished_count); } break; case IPI_CPU_STOP: halt_processor(); break;#ifndef CONFIG_ITANIUM_PTCG case IPI_FLUSH_TLB: { extern unsigned long flush_start, flush_end, flush_nbits, flush_rid; extern atomic_t flush_cpu_count; unsigned long saved_rid = ia64_get_rr(flush_start); unsigned long end = flush_end; unsigned long start = flush_start; unsigned long nbits = flush_nbits; /* * Current CPU may be running with different * RID so we need to reload the RID of flushed * address. Purging the translation also * needs ALAT invalidation; we do not need * "invala" here since it is done in * ia64_leave_kernel. */ ia64_srlz_d(); if (saved_rid != flush_rid) { ia64_set_rr(flush_start, flush_rid); ia64_srlz_d(); } do { /* * Purge local TLB entries. */ __asm__ __volatile__ ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory"); start += (1UL << nbits); } while (start < end); ia64_insn_group_barrier(); ia64_srlz_i(); /* srlz.i implies srlz.d */ if (saved_rid != flush_rid) { ia64_set_rr(flush_start, saved_rid); ia64_srlz_d(); } atomic_dec(&flush_cpu_count); break; }#endif /* !CONFIG_ITANIUM_PTCG */ default: printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); break; } /* Switch */ } while (ops); mb(); /* Order data access and bit testing. */ }}static inline voidsend_IPI_single (int dest_cpu, int op) { if (dest_cpu == -1) return; set_bit(op, &ipi_op[dest_cpu]); platform_send_ipi(dest_cpu, IPI_IRQ, IA64_IPI_DM_INT, 0);}static inline voidsend_IPI_allbutself(int op){ int i; for (i = 0; i < smp_num_cpus; i++) { if (i != smp_processor_id()) send_IPI_single(i, op); }}static inline voidsend_IPI_all(int op){ int i; for (i = 0; i < smp_num_cpus; i++) send_IPI_single(i, op);}static inline voidsend_IPI_self(int op){ send_IPI_single(smp_processor_id(), op);}voidsmp_send_reschedule(int cpu){ send_IPI_single(cpu, IPI_RESCHEDULE);}voidsmp_send_stop(void){ send_IPI_allbutself(IPI_CPU_STOP);}#ifndef CONFIG_ITANIUM_PTCGvoidsmp_send_flush_tlb(void){ send_IPI_allbutself(IPI_FLUSH_TLB);}#endif /* !CONFIG_ITANIUM_PTCG *//* * Run a function on another CPU * <func> The function to run. This must be fast and non-blocking. * <info> An arbitrary pointer to pass to the function. * <retry> If true, keep retrying until ready. * <wait> If true, wait until function has completed on other CPUs. * [RETURNS] 0 on success, else a negative status code. * * Does not return until the remote CPU is nearly ready to execute <func> * or is or has executed. */intsmp_call_function_single (int cpuid, void (*func) (void *info), void *info, int retry, int wait){ struct smp_call_struct data; unsigned long timeout; int cpus = 1; if (cpuid == smp_processor_id()) { printk(__FUNCTION__" trying to call self\n"); return -EBUSY; } data.func = func; data.info = info; data.wait = wait; atomic_set(&data.unstarted_count, cpus); atomic_set(&data.unfinished_count, cpus); if (pointer_lock(&smp_call_function_data, &data, retry)) return -EBUSY;resend: /* Send a message to all other CPUs and wait for them to respond */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -