⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  arch/s390/kernel/smp.c * *  S390 version *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), *               Martin Schwidefsky (schwidefsky@de.ibm.com) * *  based on other smp stuff by  *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net> *    (c) 1998 Ingo Molnar * * We work with logical cpu numbering everywhere we can. The only * functions using the real cpu address (got from STAP) are the sigp * functions. For all other functions we use the identity mapping. * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is * used e.g. to find the idle task belonging to a logical cpu. Every array * in the kernel is sorted by the logical cpu number and not by the physical * one which is causing all the confusion with __cpu_logical_map and * cpu_number_map in other architectures. */#include <linux/init.h>#include <linux/mm.h>#include <linux/spinlock.h>#include <linux/kernel_stat.h>#include <linux/smp_lock.h>#include <linux/delay.h>#include <asm/sigp.h>#include <asm/pgalloc.h>#include <asm/irq.h>#include "cpcmd.h"/* prototypes */extern void update_one_process( struct task_struct *p,                                unsigned long ticks, unsigned long user,                                unsigned long system, int cpu);extern int cpu_idle(void * unused);extern __u16 boot_cpu_addr;/* * An array with a pointer the lowcore of every CPU. */static int       max_cpus = NR_CPUS;	  /* Setup configured maximum number of CPUs to activate	*/int              smp_num_cpus;struct _lowcore *lowcore_ptr[NR_CPUS];unsigned int     prof_multiplier[NR_CPUS];unsigned int     prof_old_multiplier[NR_CPUS];unsigned int     prof_counter[NR_CPUS];volatile int     __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */cycles_t         cacheflush_time=0;int              smp_threads_ready=0;      /* Set when the idlers are all forked. */unsigned long    ipi_count=0;              /* Number of IPIs delivered. */static atomic_t  smp_commenced = ATOMIC_INIT(0);spinlock_t       kernel_flag = SPIN_LOCK_UNLOCKED;/* *      Setup routine for controlling SMP activation * *      Command-line option of "nosmp" or "maxcpus=0" will disable SMP *      activation entirely (the MPS table probe still happens, though). * *      Command-line option of "maxcpus=<NUM>", where <NUM> is an integer *      greater than 0, limits the maximum number of CPUs activated in *      SMP mode to <NUM>. */static int __init nosmp(char *str){	max_cpus = 0;	return 1;}__setup("nosmp", nosmp);static int __init maxcpus(char *str){	get_option(&str, &max_cpus);	return 1;}__setup("maxcpus=", maxcpus);/* * Reboot, halt and power_off routines for SMP. */extern char vmhalt_cmd[];extern char vmpoff_cmd[];extern void reipl(unsigned long devno);void do_machine_restart(void){        smp_send_stop();	reipl(S390_lowcore.ipl_device);}void machine_restart(char * __unused) {        if (smp_processor_id() != 0) {                smp_ext_call_async(0, ec_restart);                for (;;);        } else                do_machine_restart();}void do_machine_halt(void){        smp_send_stop();        if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)                cpcmd(vmhalt_cmd, NULL, 0);        disabled_wait(0);}void machine_halt(void){        if (smp_processor_id() != 0) {                smp_ext_call_async(0, ec_halt);                for (;;);        } else                do_machine_halt();}void do_machine_power_off(void){        smp_send_stop();        if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)                cpcmd(vmpoff_cmd, NULL, 0);        disabled_wait(0);}void machine_power_off(void){        if (smp_processor_id() != 0) {                smp_ext_call_async(0, ec_power_off);                for (;;);        } else                do_machine_power_off();}/* * This is the main routine where commands issued by other * cpus are handled. */void do_ext_call_interrupt(__u16 source_cpu_addr){        ec_ext_call *ec, *next;        int bits;        /*         * handle bit signal external calls         *         * For the ec_schedule signal we have to do nothing. All the work         * is done automatically when we return from the interrupt.	 * For the ec_restart, ec_halt and ec_power_off we call the         * appropriate routine.         */        do {                bits = atomic_read(&S390_lowcore.ext_call_fast);        } while (atomic_compare_and_swap(bits,0,&S390_lowcore.ext_call_fast));        if (test_bit(ec_restart, &bits))		do_machine_restart();        if (test_bit(ec_halt, &bits))		do_machine_halt();        if (test_bit(ec_power_off, &bits))		do_machine_power_off();        /*         * Handle external call commands with a parameter area         */        do {                ec = (ec_ext_call *) atomic_read(&S390_lowcore.ext_call_queue);        } while (atomic_compare_and_swap((int) ec, 0,                                         &S390_lowcore.ext_call_queue));        if (ec == NULL)                return;   /* no command signals */        /* Make a fifo out of the lifo */        next = ec;        ec->next = NULL;        while (next != NULL) {                ec_ext_call *tmp = next->next;                next->next = ec;                ec = next;                next = tmp;        }        /* Execute every sigp command on the queue */        while (ec != NULL) {                switch (ec->cmd) {                case ec_get_ctl: {                        ec_creg_parms *pp;                        pp = (ec_creg_parms *) ec->parms;                        atomic_set(&ec->status,ec_executing);                        asm volatile (                                "   bras  1,0f\n"                                "   stctl 0,0,0(%0)\n"                                "0: ex    %1,0(1)\n"                                : : "a" (pp->cregs+pp->start_ctl),                                "a" ((pp->start_ctl<<4) + pp->end_ctl)                                : "memory", "1" );                        atomic_set(&ec->status,ec_done);                        return;                }                case ec_set_ctl: {                        ec_creg_parms *pp;                        pp = (ec_creg_parms *) ec->parms;                        atomic_set(&ec->status,ec_executing);                        asm volatile (                                "   bras  1,0f\n"                                "   lctl 0,0,0(%0)\n"                                "0: ex    %1,0(1)\n"                                : : "a" (pp->cregs+pp->start_ctl),                                "a" ((pp->start_ctl<<4) + pp->end_ctl)                                : "memory", "1" );                        atomic_set(&ec->status,ec_done);                        return;                }                case ec_set_ctl_masked: {                        ec_creg_mask_parms *pp;                        u32 cregs[16];                        int i;                        pp = (ec_creg_mask_parms *) ec->parms;                        atomic_set(&ec->status,ec_executing);                        asm volatile (                                "   bras  1,0f\n"                                "   stctl 0,0,0(%0)\n"                                "0: ex    %1,0(1)\n"                                : : "a" (cregs+pp->start_ctl),                                "a" ((pp->start_ctl<<4) + pp->end_ctl)                                : "memory", "1" );                        for (i = pp->start_ctl; i <= pp->end_ctl; i++)                                cregs[i] = (cregs[i] & pp->andvals[i])                                                     | pp->orvals[i];                        asm volatile (                                "   bras  1,0f\n"                                "   lctl 0,0,0(%0)\n"                                "0: ex    %1,0(1)\n"                                : : "a" (cregs+pp->start_ctl),                                "a" ((pp->start_ctl<<4) + pp->end_ctl)                                : "memory", "1" );                        atomic_set(&ec->status,ec_done);                        return;                }                default:                }                ec = ec->next;        }}/* * Send an external call sigp to another cpu and wait for its completion. */sigp_ccode smp_ext_call_sync(int cpu, ec_cmd_sig cmd, void *parms){        struct _lowcore *lowcore = &get_cpu_lowcore(cpu);        sigp_ccode ccode;        ec_ext_call ec;        ec.cmd = cmd;        atomic_set(&ec.status, ec_pending);        ec.parms = parms;        do {                ec.next = (ec_ext_call*) atomic_read(&lowcore->ext_call_queue);        } while (atomic_compare_and_swap((int) ec.next, (int)(&ec),                                         &lowcore->ext_call_queue));        /*         * We try once to deliver the signal. There are four possible         * return codes:         * 0) Order code accepted - can't show up on an external call         * 1) Status stored - fine, wait for completion.         * 2) Busy - there is another signal pending. Thats fine too, because         *    do_ext_call from the pending signal will execute all signals on         *    the queue. We wait for completion.         * 3) Not operational - something very bad has happened to the cpu.         *    do not wait for completion.         */        ccode = signal_processor(cpu, sigp_external_call);        if (ccode != sigp_not_operational)                /* wait for completion, FIXME: possible seed of a deadlock */                while (atomic_read(&ec.status) != ec_done);        return ccode;}/* * Send an external call sigp to another cpu and return without waiting * for its completion. Currently we do not support parameters with * asynchronous sigps. */sigp_ccode smp_ext_call_async(int cpu, ec_bit_sig sig){        struct _lowcore *lowcore = &get_cpu_lowcore(cpu);        sigp_ccode ccode;        /*         * Set signaling bit in lowcore of target cpu and kick it         */        atomic_set_mask(1<<sig, &lowcore->ext_call_fast);        ccode = signal_processor(cpu, sigp_external_call);        return ccode;}/* * Send an external call sigp to every other cpu in the system and * wait for the completion of the sigps. */void smp_ext_call_sync_others(ec_cmd_sig cmd, void *parms){        struct _lowcore *lowcore;        ec_ext_call ec[NR_CPUS];        sigp_ccode ccode;        int i;        for (i = 0; i < smp_num_cpus; i++) {                if (smp_processor_id() == i)                        continue;                lowcore = &get_cpu_lowcore(i);                ec[i].cmd = cmd;                atomic_set(&ec[i].status, ec_pending);                ec[i].parms = parms;                do {                        ec[i].next = (ec_ext_call *)                                        atomic_read(&lowcore->ext_call_queue);                } while (atomic_compare_and_swap((int) ec[i].next, (int)(ec+i),                                                 &lowcore->ext_call_queue));                ccode = signal_processor(i, sigp_external_call);        }        /* wait for completion, FIXME: possible seed of a deadlock */        for (i = 0; i < smp_num_cpus; i++) {                if (smp_processor_id() == i)                        continue;                while (atomic_read(&ec[i].status) != ec_done);        }}/* * Send an external call sigp to every other cpu in the system and * return without waiting for the completion of the sigps. Currently * we do not support parameters with asynchronous sigps. */void smp_ext_call_async_others(ec_bit_sig sig){        struct _lowcore *lowcore;        sigp_ccode ccode;        int i;        for (i = 0; i < smp_num_cpus; i++) {                if (smp_processor_id() == i)                        continue;                lowcore = &get_cpu_lowcore(i);                /*                 * Set signaling bit in lowcore of target cpu and kick it                 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -