⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp.c

📁 嵌入式系统设计与实验教材二源码linux内核移植与编译
💻 C
📖 第 1 页 / 共 2 页
字号:
                                ccode = signal_processor_ps(                                   &dummy,                                   0,                                   i,                                   sigp_stop);                        } while(ccode == sigp_busy);                }        }        /* store status of all processors in their lowcores (real 0) */        for (i =  0; i < smp_num_cpus; i++) {                if (smp_processor_id() != i) {                        int ccode;                        low_core_addr = (unsigned long)&get_cpu_lowcore(i);                        do {                                ccode = signal_processor_ps(                                   &dummy,                                   low_core_addr,                                   i,                                   sigp_store_status_at_address);                        } while(ccode == sigp_busy);                }        }}/* * this function sends a 'purge tlb' signal to another CPU. */void smp_ptlb_callback(void *info){	local_flush_tlb();}void smp_ptlb_all(void){        smp_call_function(smp_ptlb_callback, NULL, 0, 1);	local_flush_tlb();}/* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ... */void smp_send_reschedule(int cpu){        smp_ext_bitcall(cpu, ec_schedule);}/* * parameter area for the set/clear control bit callbacks */typedef struct{	__u16 start_ctl;	__u16 end_ctl;	__u32 orvals[16];	__u32 andvals[16];} ec_creg_mask_parms;/* * callback for setting/clearing control bits */void smp_ctl_bit_callback(void *info) {	ec_creg_mask_parms *pp;	u32 cregs[16];	int i;		pp = (ec_creg_mask_parms *) info;	asm volatile ("   bras  1,0f\n"		      "   stctl 0,0,0(%0)\n"		      "0: ex    %1,0(1)\n"		      : : "a" (cregs+pp->start_ctl),		          "a" ((pp->start_ctl<<4) + pp->end_ctl)		      : "memory", "1" );	for (i = pp->start_ctl; i <= pp->end_ctl; i++)		cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];	asm volatile ("   bras  1,0f\n"		      "   lctl 0,0,0(%0)\n"		      "0: ex    %1,0(1)\n"		      : : "a" (cregs+pp->start_ctl),		          "a" ((pp->start_ctl<<4) + pp->end_ctl)		      : "memory", "1" );	return;}/* * Set a bit in a control register of all cpus */void smp_ctl_set_bit(int cr, int bit) {        ec_creg_mask_parms parms;        if (atomic_read(&smp_commenced) != 0) {                parms.start_ctl = cr;                parms.end_ctl = cr;                parms.orvals[cr] = 1 << bit;                parms.andvals[cr] = 0xFFFFFFFF;                smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);        }        __ctl_set_bit(cr, bit);}/* * Clear a bit in a control register of all cpus */void smp_ctl_clear_bit(int cr, int bit) {        ec_creg_mask_parms parms;        if (atomic_read(&smp_commenced) != 0) {                parms.start_ctl = cr;                parms.end_ctl = cr;                parms.orvals[cr] = 0x00000000;                parms.andvals[cr] = ~(1 << bit);                smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);        }        __ctl_clear_bit(cr, bit);}/* * Lets check how many CPUs we have. */void smp_count_cpus(void){        int curr_cpu;        current->processor = 0;        smp_num_cpus = 1;	cpu_online_map = 1;        for (curr_cpu = 0;             curr_cpu <= 65535 && smp_num_cpus < max_cpus; curr_cpu++) {                if ((__u16) curr_cpu == boot_cpu_addr)                        continue;                __cpu_logical_map[smp_num_cpus] = (__u16) curr_cpu;                if (signal_processor(smp_num_cpus, sigp_sense) ==                    sigp_not_operational)                        continue;                smp_num_cpus++;        }        printk("Detected %d CPU's\n",(int) smp_num_cpus);        printk("Boot cpu address %2X\n", boot_cpu_addr);}/* *      Activate a secondary processor. */extern void init_cpu_timer(void);extern int pfault_init(void);extern int pfault_token(void);int __init start_secondary(void *cpuvoid){        /* Setup the cpu */        cpu_init();        /* Print info about this processor */        print_cpu_info(&safe_get_cpu_lowcore(smp_processor_id()).cpu_data);        /* Wait for completion of smp startup */        while (!atomic_read(&smp_commenced))                /* nothing */ ;        /* init per CPU timer */        init_cpu_timer();#ifdef CONFIG_PFAULT	/* Enable pfault pseudo page faults on this cpu. */	pfault_init();#endif        /* cpu_idle will call schedule for us */        return cpu_idle(NULL);}/* * The restart interrupt handler jumps to start_secondary directly * without the detour over initialize_secondary. We defined it here * so that the linker doesn't complain. */void __init initialize_secondary(void){}static int __init fork_by_hand(void){       struct pt_regs regs;       /* don't care about the psw and regs settings since we'll never          reschedule the forked task. */       memset(&regs,0,sizeof(struct pt_regs));       return do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0);}static void __init do_boot_cpu(int cpu){        struct task_struct *idle;        struct _lowcore    *cpu_lowcore;        /* We can't use kernel_thread since we must _avoid_ to reschedule           the child. */        if (fork_by_hand() < 0)                panic("failed fork for CPU %d", cpu);        /*         * We remove it from the pidhash and the runqueue         * once we got the process:         */        idle = init_task.prev_task;        if (!idle)                panic("No idle process for CPU %d",cpu);        idle->processor = cpu;	idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */        del_from_runqueue(idle);        unhash_process(idle);        init_tasks[cpu] = idle;        cpu_lowcore=&get_cpu_lowcore(cpu);	cpu_lowcore->save_area[15] = idle->thread.ksp;	cpu_lowcore->kernel_stack = (__u32) idle + 8192;        __asm__ __volatile__("la    1,%0\n\t"			     "stctl 0,15,0(1)\n\t"			     "la    1,%1\n\t"                             "stam  0,15,0(1)"                             : "=m" (cpu_lowcore->cregs_save_area[0]),                               "=m" (cpu_lowcore->access_regs_save_area[0])                             : : "1", "memory");        eieio();        signal_processor(cpu,sigp_restart);	/* Mark this cpu as online */	set_bit(cpu, &cpu_online_map);}/* *      Architecture specific routine called by the kernel just before init is *      fired off. This allows the BP to have everything in order [we hope]. *      At the end of this all the APs will hit the system scheduling and off *      we go. Each AP will load the system gdt's and jump through the kernel *      init into idle(). At this point the scheduler will one day take over *      and give them jobs to do. smp_callin is a standard routine *      we use to track CPUs as they power up. */void __init smp_commence(void){        /*         *      Lets the callins below out of their loop.         */        atomic_set(&smp_commenced,1);}/* *	Cycle through the processors sending sigp_restart to boot each. */void __init smp_boot_cpus(void){	unsigned long async_stack;        sigp_ccode   ccode;        int i;        /* request the 0x1202 external interrupt */        if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)                panic("Couldn't request external interrupt 0x1202");        smp_count_cpus();        memset(lowcore_ptr,0,sizeof(lowcore_ptr));                  /*         *      Initialize the logical to physical CPU number mapping         */        print_cpu_info(&safe_get_cpu_lowcore(0).cpu_data);        for(i = 0; i < smp_num_cpus; i++)        {		lowcore_ptr[i] = (struct _lowcore *)			__get_free_page(GFP_KERNEL|GFP_DMA);                if (lowcore_ptr[i] == NULL)                        panic("smp_boot_cpus failed to "			      "allocate prefix memory\n");		async_stack = __get_free_pages(GFP_KERNEL,1);		if (async_stack == 0)			panic("smp_boot_cpus failed to allocate "			      "asyncronous interrupt stack\n");                memcpy(lowcore_ptr[i], &S390_lowcore, sizeof(struct _lowcore));		lowcore_ptr[i]->async_stack = async_stack + (2 * PAGE_SIZE);                /*                 * Most of the parameters are set up when the cpu is                 * started up.                 */		if (smp_processor_id() == i)			set_prefix((u32) lowcore_ptr[i]);		else {			ccode = signal_processor_p((u32)(lowcore_ptr[i]),						   i, sigp_set_prefix);			if (ccode)				/* if this gets troublesome I'll have to do				 * something about it. */				printk("ccode %d for cpu %d  returned when "				       "setting prefix in smp_boot_cpus not good.\n",				       (int) ccode, (int) i);			else				do_boot_cpu(i);		}	}}/* * the frequency of the profiling timer can be changed * by writing a multiplier value into /proc/profile. * * usually you want to run this on all CPUs ;) */int setup_profiling_timer(unsigned int multiplier){        return 0;}EXPORT_SYMBOL(lowcore_ptr);EXPORT_SYMBOL(kernel_flag);EXPORT_SYMBOL(smp_ctl_set_bit);EXPORT_SYMBOL(smp_ctl_clear_bit);EXPORT_SYMBOL(smp_num_cpus);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -