📄 rcupreempt.c
字号:
struct rcu_head **tail = &list; /* * Remove all callbacks from the newly dead CPU, retaining order. * Otherwise rcu_barrier() will fail */ spin_lock_irqsave(&rdp->lock, flags); rcu_offline_cpu_enqueue(rdp->donelist, rdp->donetail, list, tail); for (i = GP_STAGES - 1; i >= 0; i--) rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i], list, tail); rcu_offline_cpu_enqueue(rdp->nextlist, rdp->nexttail, list, tail); rcu_offline_cpu_enqueue(rdp->waitschedlist, rdp->waitschedtail, schedlist, schedtail); rcu_offline_cpu_enqueue(rdp->nextschedlist, rdp->nextschedtail, schedlist, schedtail); rdp->rcu_sched_sleeping = 0; spin_unlock_irqrestore(&rdp->lock, flags); rdp->waitlistcount = 0; /* Disengage the newly dead CPU from the grace-period computation. */ spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); rcu_check_mb(cpu); if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) { smp_mb(); /* Subsequent counter accesses must see new value */ per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen; smp_mb(); /* Subsequent RCU read-side critical sections */ /* seen -after- acknowledgement. */ } RCU_DATA_ME()->rcu_flipctr[0] += RCU_DATA_CPU(cpu)->rcu_flipctr[0]; RCU_DATA_ME()->rcu_flipctr[1] += RCU_DATA_CPU(cpu)->rcu_flipctr[1]; RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; cpu_clear(cpu, rcu_cpu_online_map); spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); /* * Place the removed callbacks on the current CPU's queue. * Make them all start a new grace period: simple approach, * in theory could starve a given set of callbacks, but * you would need to be doing some serious CPU hotplugging * to make this happen. If this becomes a problem, adding * a synchronize_rcu() to the hotplug path would be a simple * fix. */ local_irq_save(flags); /* disable preempt till we know what lock. */ rdp = RCU_DATA_ME(); spin_lock(&rdp->lock); *rdp->nexttail = list; if (list) rdp->nexttail = tail; *rdp->nextschedtail = schedlist; if (schedlist) rdp->nextschedtail = schedtail; spin_unlock_irqrestore(&rdp->lock, flags);}#else /* #ifdef CONFIG_HOTPLUG_CPU */void rcu_offline_cpu(int cpu){}#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */void __cpuinit rcu_online_cpu(int cpu){ unsigned long flags; struct rcu_data *rdp; spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); cpu_set(cpu, rcu_cpu_online_map); spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); /* * The rcu_sched grace-period processing might have bypassed * this CPU, given that it was not in the rcu_cpu_online_map * when the grace-period scan started. This means that the * grace-period task might sleep. So make sure that if this * should happen, the first callback posted to this CPU will * wake up the grace-period task if need be. */ rdp = RCU_DATA_CPU(cpu); spin_lock_irqsave(&rdp->lock, flags); rdp->rcu_sched_sleeping = 1; spin_unlock_irqrestore(&rdp->lock, flags);}static void rcu_process_callbacks(struct softirq_action *unused){ unsigned long flags; struct rcu_head *next, *list; struct rcu_data *rdp; local_irq_save(flags); rdp = RCU_DATA_ME(); spin_lock(&rdp->lock); list = rdp->donelist; if (list == NULL) { spin_unlock_irqrestore(&rdp->lock, flags); return; } rdp->donelist = NULL; rdp->donetail = &rdp->donelist; RCU_TRACE_RDP(rcupreempt_trace_done_remove, rdp); spin_unlock_irqrestore(&rdp->lock, flags); while (list) { next = list->next; list->func(list); list = next; RCU_TRACE_ME(rcupreempt_trace_invoke); }}void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)){ unsigned long flags; struct rcu_data *rdp; head->func = func; head->next = NULL; local_irq_save(flags); rdp = RCU_DATA_ME(); spin_lock(&rdp->lock); __rcu_advance_callbacks(rdp); *rdp->nexttail = head; rdp->nexttail = &head->next; RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp); spin_unlock_irqrestore(&rdp->lock, flags);}EXPORT_SYMBOL_GPL(call_rcu);void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)){ unsigned long flags; struct rcu_data *rdp; int wake_gp = 0; head->func = func; head->next = NULL; local_irq_save(flags); rdp = RCU_DATA_ME(); spin_lock(&rdp->lock); *rdp->nextschedtail = head; rdp->nextschedtail = &head->next; if (rdp->rcu_sched_sleeping) { /* Grace-period processing might be sleeping... */ rdp->rcu_sched_sleeping = 0; wake_gp = 1; } spin_unlock_irqrestore(&rdp->lock, flags); if (wake_gp) { /* Wake up grace-period processing, unless someone beat us. */ spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags); if (rcu_ctrlblk.sched_sleep != rcu_sched_sleeping) wake_gp = 0; rcu_ctrlblk.sched_sleep = rcu_sched_not_sleeping; spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); if (wake_gp) wake_up_interruptible(&rcu_ctrlblk.sched_wq); }}EXPORT_SYMBOL_GPL(call_rcu_sched);/* * Wait until all currently running preempt_disable() code segments * (including hardware-irq-disable segments) complete. Note that * in -rt this does -not- necessarily result in all currently executing * interrupt -handlers- having completed. */synchronize_rcu_xxx(__synchronize_sched, call_rcu_sched)EXPORT_SYMBOL_GPL(__synchronize_sched);/* * kthread function that manages call_rcu_sched grace periods. */static int rcu_sched_grace_period(void *arg){ int couldsleep; /* might sleep after current pass. */ int couldsleepnext = 0; /* might sleep after next pass. */ int cpu; unsigned long flags; struct rcu_data *rdp; int ret; /* * Each pass through the following loop handles one * rcu_sched grace period cycle. */ do { /* Save each CPU's current state. */ for_each_online_cpu(cpu) { dyntick_save_progress_counter_sched(cpu); save_qsctr_sched(cpu); } /* * Sleep for about an RCU grace-period's worth to * allow better batching and to consume less CPU. */ schedule_timeout_interruptible(RCU_SCHED_BATCH_TIME); /* * If there was nothing to do last time, prepare to * sleep at the end of the current grace period cycle. */ couldsleep = couldsleepnext; couldsleepnext = 1; if (couldsleep) { spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags); rcu_ctrlblk.sched_sleep = rcu_sched_sleep_prep; spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); } /* * Wait on each CPU in turn to have either visited * a quiescent state or been in dynticks-idle mode. */ for_each_online_cpu(cpu) { while (rcu_qsctr_inc_needed(cpu) && rcu_qsctr_inc_needed_dyntick(cpu)) { /* resched_cpu(cpu); @@@ */ schedule_timeout_interruptible(1); } } /* Advance callbacks for each CPU. */ for_each_online_cpu(cpu) { rdp = RCU_DATA_CPU(cpu); spin_lock_irqsave(&rdp->lock, flags); /* * We are running on this CPU irq-disabled, so no * CPU can go offline until we re-enable irqs. * The current CPU might have already gone * offline (between the for_each_offline_cpu and * the spin_lock_irqsave), but in that case all its * callback lists will be empty, so no harm done. * * Advance the callbacks! We share normal RCU's * donelist, since callbacks are invoked the * same way in either case. */ if (rdp->waitschedlist != NULL) { *rdp->donetail = rdp->waitschedlist; rdp->donetail = rdp->waitschedtail; /* * Next rcu_check_callbacks() will * do the required raise_softirq(). */ } if (rdp->nextschedlist != NULL) { rdp->waitschedlist = rdp->nextschedlist; rdp->waitschedtail = rdp->nextschedtail; couldsleep = 0; couldsleepnext = 0; } else { rdp->waitschedlist = NULL; rdp->waitschedtail = &rdp->waitschedlist; } rdp->nextschedlist = NULL; rdp->nextschedtail = &rdp->nextschedlist; /* Mark sleep intention. */ rdp->rcu_sched_sleeping = couldsleep; spin_unlock_irqrestore(&rdp->lock, flags); } /* If we saw callbacks on the last scan, go deal with them. */ if (!couldsleep) continue; /* Attempt to block... */ spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags); if (rcu_ctrlblk.sched_sleep != rcu_sched_sleep_prep) { /* * Someone posted a callback after we scanned. * Go take care of it. */ spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); couldsleepnext = 0; continue; } /* Block until the next person posts a callback. */ rcu_ctrlblk.sched_sleep = rcu_sched_sleeping; spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); ret = 0; __wait_event_interruptible(rcu_ctrlblk.sched_wq, rcu_ctrlblk.sched_sleep != rcu_sched_sleeping, ret); /* * Signals would prevent us from sleeping, and we cannot * do much with them in any case. So flush them. */ if (ret) flush_signals(current); couldsleepnext = 0; } while (!kthread_should_stop()); return (0);}/* * Check to see if any future RCU-related work will need to be done * by the current CPU, even if none need be done immediately, returning * 1 if so. Assumes that notifiers would take care of handling any * outstanding requests from the RCU core. * * This function is part of the RCU implementation; it is -not- * an exported member of the RCU API. */int rcu_needs_cpu(int cpu){ struct rcu_data *rdp = RCU_DATA_CPU(cpu); return (rdp->donelist != NULL || !!rdp->waitlistcount || rdp->nextlist != NULL || rdp->nextschedlist != NULL || rdp->waitschedlist != NULL);}int rcu_pending(int cpu){ struct rcu_data *rdp = RCU_DATA_CPU(cpu); /* The CPU has at least one callback queued somewhere. */ if (rdp->donelist != NULL || !!rdp->waitlistcount || rdp->nextlist != NULL || rdp->nextschedlist != NULL || rdp->waitschedlist != NULL) return 1; /* The RCU core needs an acknowledgement from this CPU. */ if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) || (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed)) return 1; /* This CPU has fallen behind the global grace-period number. */ if (rdp->completed != rcu_ctrlblk.completed) return 1; /* Nothing needed from this CPU. */ return 0;}static int __cpuinit rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu){ long cpu = (long)hcpu; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: rcu_online_cpu(cpu); break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: rcu_offline_cpu(cpu); break; default: break; } return NOTIFY_OK;}static struct notifier_block __cpuinitdata rcu_nb = { .notifier_call = rcu_cpu_notify,};void __init __rcu_init(void){ int cpu; int i; struct rcu_data *rdp; printk(KERN_NOTICE "Preemptible RCU implementation.\n"); for_each_possible_cpu(cpu) { rdp = RCU_DATA_CPU(cpu); spin_lock_init(&rdp->lock); rdp->completed = 0; rdp->waitlistcount = 0; rdp->nextlist = NULL; rdp->nexttail = &rdp->nextlist; for (i = 0; i < GP_STAGES; i++) { rdp->waitlist[i] = NULL; rdp->waittail[i] = &rdp->waitlist[i]; } rdp->donelist = NULL; rdp->donetail = &rdp->donelist; rdp->rcu_flipctr[0] = 0; rdp->rcu_flipctr[1] = 0; rdp->nextschedlist = NULL; rdp->nextschedtail = &rdp->nextschedlist; rdp->waitschedlist = NULL; rdp->waitschedtail = &rdp->waitschedlist; rdp->rcu_sched_sleeping = 0; } register_cpu_notifier(&rcu_nb); /* * We don't need protection against CPU-Hotplug here * since * a) If a CPU comes online while we are iterating over the * cpu_online_map below, we would only end up making a * duplicate call to rcu_online_cpu() which sets the corresponding * CPU's mask in the rcu_cpu_online_map. * * b) A CPU cannot go offline at this point in time since the user * does not have access to the sysfs interface, nor do we * suspend the system. */ for_each_online_cpu(cpu) rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu); open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);}/* * Late-boot-time RCU initialization that must wait until after scheduler * has been initialized. */void __init rcu_init_sched(void){ rcu_sched_grace_period_task = kthread_run(rcu_sched_grace_period, NULL, "rcu_sched_grace_period"); WARN_ON(IS_ERR(rcu_sched_grace_period_task));}#ifdef CONFIG_RCU_TRACElong *rcupreempt_flipctr(int cpu){ return &RCU_DATA_CPU(cpu)->rcu_flipctr[0];}EXPORT_SYMBOL_GPL(rcupreempt_flipctr);int rcupreempt_flip_flag(int cpu){ return per_cpu(rcu_flip_flag, cpu);}EXPORT_SYMBOL_GPL(rcupreempt_flip_flag);int rcupreempt_mb_flag(int cpu){ return per_cpu(rcu_mb_flag, cpu);}EXPORT_SYMBOL_GPL(rcupreempt_mb_flag);char *rcupreempt_try_flip_state_name(void){ return rcu_try_flip_state_names[rcu_ctrlblk.rcu_try_flip_state];}EXPORT_SYMBOL_GPL(rcupreempt_try_flip_state_name);struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu){ struct rcu_data *rdp = RCU_DATA_CPU(cpu); return &rdp->trace;}EXPORT_SYMBOL_GPL(rcupreempt_trace_cpu);#endif /* #ifdef RCU_TRACE */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -