📄 smp.c
字号:
*/void smp_ptlb_callback(void *info){ local_flush_tlb();}void smp_ptlb_all(void){ smp_call_function(smp_ptlb_callback, NULL, 0, 1); local_flush_tlb();}/* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ... */void smp_send_reschedule(int cpu){ smp_ext_bitcall(cpu, ec_schedule);}/* * parameter area for the set/clear control bit callbacks */typedef struct{ __u16 start_ctl; __u16 end_ctl; __u32 orvals[16]; __u32 andvals[16];} ec_creg_mask_parms;/* * callback for setting/clearing control bits */void smp_ctl_bit_callback(void *info) { ec_creg_mask_parms *pp; u32 cregs[16]; int i; pp = (ec_creg_mask_parms *) info; asm volatile (" bras 1,0f\n" " stctl 0,0,0(%0)\n" "0: ex %1,0(1)\n" : : "a" (cregs+pp->start_ctl), "a" ((pp->start_ctl<<4) + pp->end_ctl) : "memory", "1" ); for (i = pp->start_ctl; i <= pp->end_ctl; i++) cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; asm volatile (" bras 1,0f\n" " lctl 0,0,0(%0)\n" "0: ex %1,0(1)\n" : : "a" (cregs+pp->start_ctl), "a" ((pp->start_ctl<<4) + pp->end_ctl) : "memory", "1" ); return;}/* * Set a bit in a control register of all cpus */void smp_ctl_set_bit(int cr, int bit) { ec_creg_mask_parms parms; if (atomic_read(&smp_commenced) != 0) { parms.start_ctl = cr; parms.end_ctl = cr; parms.orvals[cr] = 1 << bit; parms.andvals[cr] = 0xFFFFFFFF; smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); } __ctl_set_bit(cr, bit);}/* * Clear a bit in a control register of all cpus */void smp_ctl_clear_bit(int cr, int bit) { ec_creg_mask_parms parms; if (atomic_read(&smp_commenced) != 0) { parms.start_ctl = cr; parms.end_ctl = cr; parms.orvals[cr] = 0x00000000; parms.andvals[cr] = ~(1 << bit); smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); } __ctl_clear_bit(cr, bit);}/* * Lets check how many CPUs we have. */void smp_count_cpus(void){ int curr_cpu; current->processor = 0; smp_num_cpus = 1; cpu_online_map = 1; for (curr_cpu = 0; curr_cpu <= 65535 && smp_num_cpus < max_cpus; curr_cpu++) { if ((__u16) curr_cpu == boot_cpu_addr) continue; __cpu_logical_map[smp_num_cpus] = (__u16) curr_cpu; if (signal_processor(smp_num_cpus, sigp_sense) == sigp_not_operational) continue; smp_num_cpus++; } printk("Detected %d CPU's\n",(int) smp_num_cpus); printk("Boot cpu address %2X\n", boot_cpu_addr);}/* * Activate a secondary processor. */extern void init_100hz_timer(void);extern int pfault_init(void);extern int pfault_token(void);int __init start_secondary(void *cpuvoid){ /* Setup the cpu */ cpu_init(); /* Print info about this processor */ print_cpu_info(&safe_get_cpu_lowcore(smp_processor_id()).cpu_data); /* Wait for completion of smp startup */ while (!atomic_read(&smp_commenced)) /* nothing */ ; /* init per CPU 100 hz timer */ init_100hz_timer();#ifdef CONFIG_PFAULT /* Enable pfault pseudo page faults on this cpu. */ pfault_init();#endif /* cpu_idle will call schedule for us */ return cpu_idle(NULL);}/* * The restart interrupt handler jumps to start_secondary directly * without the detour over initialize_secondary. We defined it here * so that the linker doesn't complain. */void __init initialize_secondary(void){}static int __init fork_by_hand(void){ struct pt_regs regs; /* don't care about the psw and regs settings since we'll never reschedule the forked task. */ memset(®s,0,sizeof(struct pt_regs)); return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);}static void __init do_boot_cpu(int cpu){ struct task_struct *idle; struct _lowcore *cpu_lowcore; /* We can't use kernel_thread since we must _avoid_ to reschedule the child. */ if (fork_by_hand() < 0) panic("failed fork for CPU %d", cpu); /* * We remove it from the pidhash and the runqueue * once we got the process: */ idle = prev_task(&init_task); if (!idle) panic("No idle process for CPU %d",cpu); idle->processor = cpu; idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */ del_from_runqueue(idle); unhash_process(idle); init_tasks[cpu] = idle; cpu_lowcore=&get_cpu_lowcore(cpu); cpu_lowcore->save_area[15] = idle->thread.ksp; cpu_lowcore->kernel_stack = (idle->thread.ksp | 8191) + 1; __asm__ __volatile__("la 1,%0\n\t" "stctl 0,15,0(1)\n\t" "la 1,%1\n\t" "stam 0,15,0(1)" : "=m" (cpu_lowcore->cregs_save_area[0]), "=m" (cpu_lowcore->access_regs_save_area[0]) : : "1", "memory"); eieio(); signal_processor(cpu,sigp_restart); /* Mark this cpu as online */ set_bit(cpu, &cpu_online_map);}/* * Architecture specific routine called by the kernel just before init is * fired off. This allows the BP to have everything in order [we hope]. * At the end of this all the APs will hit the system scheduling and off * we go. Each AP will load the system gdt's and jump through the kernel * init into idle(). At this point the scheduler will one day take over * and give them jobs to do. smp_callin is a standard routine * we use to track CPUs as they power up. */void __init smp_commence(void){ /* * Lets the callins below out of their loop. */ atomic_set(&smp_commenced,1);}/* * Cycle through the processors sending sigp_restart to boot each. */void __init smp_boot_cpus(void){ unsigned long async_stack; sigp_ccode ccode; int i; /* request the 0x1202 external interrupt */ if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) panic("Couldn't request external interrupt 0x1202"); smp_count_cpus(); memset(lowcore_ptr,0,sizeof(lowcore_ptr)); /* * Initialize the logical to physical CPU number mapping * and the per-CPU profiling counter/multiplier */ for (i = 0; i < NR_CPUS; i++) { prof_counter[i] = 1; prof_old_multiplier[i] = 1; prof_multiplier[i] = 1; } print_cpu_info(&safe_get_cpu_lowcore(0).cpu_data); for(i = 0; i < smp_num_cpus; i++) { lowcore_ptr[i] = (struct _lowcore *) __get_free_page(GFP_KERNEL|GFP_DMA); if (lowcore_ptr[i] == NULL) panic("smp_boot_cpus failed to " "allocate prefix memory\n"); async_stack = __get_free_pages(GFP_KERNEL,1); if (async_stack == 0) panic("smp_boot_cpus failed to allocate " "asyncronous interrupt stack\n"); memcpy(lowcore_ptr[i], &S390_lowcore, sizeof(struct _lowcore)); lowcore_ptr[i]->async_stack = async_stack + (2 * PAGE_SIZE); /* * Most of the parameters are set up when the cpu is * started up. */ if (smp_processor_id() == i) set_prefix((u32) lowcore_ptr[i]); else { ccode = signal_processor_p((u32)(lowcore_ptr[i]), i, sigp_set_prefix); if (ccode) /* if this gets troublesome I'll have to do * something about it. */ printk("ccode %d for cpu %d returned when " "setting prefix in smp_boot_cpus not good.\n", (int) ccode, (int) i); else do_boot_cpu(i); } }}/* * the frequency of the profiling timer can be changed * by writing a multiplier value into /proc/profile. * * usually you want to run this on all CPUs ;) */int setup_profiling_timer(unsigned int multiplier){ return 0;}/* * Local timer interrupt handler. It does both profiling and * process statistics/rescheduling. * * We do profiling in every local tick, statistics/rescheduling * happen only every 'profiling multiplier' ticks. The default * multiplier is 1 and it can be changed by writing the new multiplier * value into /proc/profile. */void smp_local_timer_interrupt(struct pt_regs * regs){ int user = (user_mode(regs) != 0); int cpu = smp_processor_id(); /* * The profiling function is SMP safe. (nothing can mess * around with "current", and the profiling counters are * updated with atomic operations). This is especially * useful with a profiling multiplier != 1 */ if (!user_mode(regs)) s390_do_profile(regs->psw.addr); if (!--prof_counter[cpu]) { /* * The multiplier may have changed since the last time we got * to this point as a result of the user writing to * /proc/profile. In this case we need to adjust the APIC * timer accordingly. * * Interrupts are already masked off at this point. */ prof_counter[cpu] = prof_multiplier[cpu]; if (prof_counter[cpu] != prof_old_multiplier[cpu]) { /* FIXME setup_APIC_timer(calibration_result/prof_counter[cpu] ); */ prof_old_multiplier[cpu] = prof_counter[cpu]; } /* * After doing the above, we need to make like * a normal interrupt - otherwise timer interrupts * ignore the global interrupt lock, which is the * WrongThing (tm) to do. */ update_process_times(user); }}EXPORT_SYMBOL(lowcore_ptr);EXPORT_SYMBOL(kernel_flag);EXPORT_SYMBOL(smp_ctl_set_bit);EXPORT_SYMBOL(smp_ctl_clear_bit);EXPORT_SYMBOL(smp_num_cpus);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -