📄 smp.c
字号:
atomic_set_mask(1<<sig, &lowcore->ext_call_fast); ccode = signal_processor(i, sigp_external_call); }}/* * cycles through all the cpus, * returns early if info is not NULL & the processor has something * of intrest to report in the info structure. * it returns the next cpu to check if it returns early. * i.e. it should be used as follows if you wish to receive info. * next_cpu=0; * do * { * info->cpu=next_cpu; * next_cpu=smp_signal_others(order_code,parameter,1,info); * ... check info here * } while(next_cpu<=smp_num_cpus) * * if you are lazy just use it like * smp_signal_others(order_code,parameter,0,1,NULL); */int smp_signal_others(sigp_order_code order_code, u32 parameter, int spin, sigp_info *info){ sigp_ccode ccode; u32 dummy; u16 i; if (info) info->intresting = 0; for (i = (info ? info->cpu : 0); i < smp_num_cpus; i++) { if (smp_processor_id() != i) { do { ccode = signal_processor_ps( (info ? &info->status : &dummy), parameter, i, order_code); } while(spin && ccode == sigp_busy); if (info && ccode != sigp_order_code_accepted) { info->intresting = 1; info->cpu = i; info->ccode = ccode; i++; break; } } } return i;}/* * this function sends a 'stop' sigp to all other CPUs in the system. * it goes straight through. */void smp_send_stop(void){ smp_signal_others(sigp_stop, 0, 1, NULL);}/* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ... */void smp_send_reschedule(int cpu){ smp_ext_call_async(cpu, ec_schedule);}/* * Set a bit in a control register of all cpus */void smp_ctl_set_bit(int cr, int bit) { ec_creg_mask_parms parms; if (atomic_read(&smp_commenced) != 0) { parms.start_ctl = cr; parms.end_ctl = cr; parms.orvals[cr] = 1 << bit; parms.andvals[cr] = 0xFFFFFFFF; smp_ext_call_sync_others(ec_set_ctl_masked,&parms); } __ctl_set_bit(cr, bit);}/* * Clear a bit in a control register of all cpus */void smp_ctl_clear_bit(int cr, int bit) { ec_creg_mask_parms parms; if (atomic_read(&smp_commenced) != 0) { parms.start_ctl = cr; parms.end_ctl = cr; parms.orvals[cr] = 0x00000000; parms.andvals[cr] = ~(1 << bit); smp_ext_call_sync_others(ec_set_ctl_masked,&parms); } __ctl_clear_bit(cr, bit);}/* * Lets check how many CPUs we have. */void smp_count_cpus(void){ int curr_cpu; __cpu_logical_map[0] = boot_cpu_addr; current->processor = 0; smp_num_cpus = 1; for (curr_cpu = 0; curr_cpu <= 65535 && smp_num_cpus < max_cpus; curr_cpu++) { if ((__u16) curr_cpu == boot_cpu_addr) continue; __cpu_logical_map[smp_num_cpus] = (__u16) curr_cpu; if (signal_processor(smp_num_cpus, sigp_sense) == sigp_not_operational) continue; smp_num_cpus++; } printk("Detected %d CPU's\n",(int) smp_num_cpus); printk("Boot cpu address %2X\n", boot_cpu_addr);}/* * Activate a secondary processor. */extern void init_100hz_timer(void);int __init start_secondary(void *cpuvoid){ /* Setup the cpu */ cpu_init(); /* Print info about this processor */ print_cpu_info(&safe_get_cpu_lowcore(smp_processor_id()).cpu_data); /* Wait for completion of smp startup */ while (!atomic_read(&smp_commenced)) /* nothing */ ; /* init per CPU 100 hz timer */ init_100hz_timer(); /* cpu_idle will call schedule for us */ return cpu_idle(NULL);}/* * The restart interrupt handler jumps to start_secondary directly * without the detour over initialize_secondary. We defined it here * so that the linker doesn't complain. */void __init initialize_secondary(void){}static int __init fork_by_hand(void){ struct pt_regs regs; /* don't care about the psw and regs settings since we'll never reschedule the forked task. */ memset(®s,sizeof(pt_regs),0); return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);}static void __init do_boot_cpu(int cpu){ struct task_struct *idle; struct _lowcore *cpu_lowcore; /* We can't use kernel_thread since we must _avoid_ to reschedule the child. */ if (fork_by_hand() < 0) panic("failed fork for CPU %d", cpu); /* * We remove it from the pidhash and the runqueue * once we got the process: */ idle = init_task.prev_task; if (!idle) panic("No idle process for CPU %d",cpu); idle->processor = cpu; idle->has_cpu = 1; /* we schedule the first task manually */ del_from_runqueue(idle); unhash_process(idle); init_tasks[cpu] = idle; cpu_lowcore=&get_cpu_lowcore(cpu); cpu_lowcore->kernel_stack=idle->thread.ksp; __asm__ __volatile__("stctl 0,15,%0\n\t" "stam 0,15,%1" : "=m" (cpu_lowcore->cregs_save_area[0]), "=m" (cpu_lowcore->access_regs_save_area[0]) : : "memory"); eieio(); signal_processor(cpu,sigp_restart);}/* * Architecture specific routine called by the kernel just before init is * fired off. This allows the BP to have everything in order [we hope]. * At the end of this all the APs will hit the system scheduling and off * we go. Each AP will load the system gdt's and jump through the kernel * init into idle(). At this point the scheduler will one day take over * and give them jobs to do. smp_callin is a standard routine * we use to track CPUs as they power up. */void __init smp_commence(void){ /* * Lets the callins below out of their loop. */ atomic_set(&smp_commenced,1);}/* * Cycle through the processors sending APIC IPIs to boot each. */void __init smp_boot_cpus(void){ struct _lowcore *curr_lowcore; sigp_ccode ccode; int i; smp_count_cpus(); memset(lowcore_ptr,0,sizeof(lowcore_ptr)); /* * Initialize the logical to physical CPU number mapping * and the per-CPU profiling counter/multiplier */ for (i = 0; i < NR_CPUS; i++) { prof_counter[i] = 1; prof_old_multiplier[i] = 1; prof_multiplier[i] = 1; } print_cpu_info(&safe_get_cpu_lowcore(0).cpu_data); for(i = 0; i < smp_num_cpus; i++) { curr_lowcore = (struct _lowcore *) __get_free_page(GFP_KERNEL|GFP_DMA); if (curr_lowcore == NULL) { printk("smp_boot_cpus failed to allocate prefix memory\n"); break; } lowcore_ptr[i] = curr_lowcore; memcpy(curr_lowcore, &S390_lowcore, sizeof(struct _lowcore)); /* * Most of the parameters are set up when the cpu is * started up. */ if (smp_processor_id() == i) set_prefix((u32) curr_lowcore); else { ccode = signal_processor_p((u32)(curr_lowcore), i, sigp_set_prefix); if(ccode) { /* if this gets troublesome I'll have to do * something about it. */ printk("ccode %d for cpu %d returned when " "setting prefix in smp_boot_cpus not good.\n", (int) ccode, (int) i); } else do_boot_cpu(i); } }}/* * the frequency of the profiling timer can be changed * by writing a multiplier value into /proc/profile. * * usually you want to run this on all CPUs ;) */int setup_profiling_timer(unsigned int multiplier){ return 0;}/* * Local timer interrupt handler. It does both profiling and * process statistics/rescheduling. * * We do profiling in every local tick, statistics/rescheduling * happen only every 'profiling multiplier' ticks. The default * multiplier is 1 and it can be changed by writing the new multiplier * value into /proc/profile. */void smp_local_timer_interrupt(struct pt_regs * regs){ int user = (user_mode(regs) != 0); int cpu = smp_processor_id(); /* * The profiling function is SMP safe. (nothing can mess * around with "current", and the profiling counters are * updated with atomic operations). This is especially * useful with a profiling multiplier != 1 */ if (!user_mode(regs)) s390_do_profile(regs->psw.addr); if (!--prof_counter[cpu]) { int system = 1-user; struct task_struct * p = current; /* * The multiplier may have changed since the last time we got * to this point as a result of the user writing to * /proc/profile. In this case we need to adjust the APIC * timer accordingly. * * Interrupts are already masked off at this point. */ prof_counter[cpu] = prof_multiplier[cpu]; if (prof_counter[cpu] != prof_old_multiplier[cpu]) { /* FIXME setup_APIC_timer(calibration_result/prof_counter[cpu] ); */ prof_old_multiplier[cpu] = prof_counter[cpu]; } /* * After doing the above, we need to make like * a normal interrupt - otherwise timer interrupts * ignore the global interrupt lock, which is the * WrongThing (tm) to do. */ irq_enter(cpu, 0); update_one_process(p, 1, user, system, cpu); if (p->pid) { p->counter -= 1; if (p->counter <= 0) { p->counter = 0; p->need_resched = 1; } if (p->nice > 0) { kstat.cpu_nice += user; kstat.per_cpu_nice[cpu] += user; } else { kstat.cpu_user += user; kstat.per_cpu_user[cpu] += user; } kstat.cpu_system += system; kstat.per_cpu_system[cpu] += system; } irq_exit(cpu, 0); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -