📄 rcupdate.c
字号:
}/* * Check if the cpu has gone through a quiescent state (say context * switch). If so and if it already hasn't done so in this RCU * quiescent cycle, then indicate that it has done so. */static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, struct rcu_data *rdp){ if (rdp->quiescbatch != rcp->cur) { /* start new grace period: */ rdp->qs_pending = 1; rdp->passed_quiesc = 0; rdp->quiescbatch = rcp->cur; return; } /* Grace period already completed for this cpu? * qs_pending is checked instead of the actual bitmap to avoid * cacheline trashing. */ if (!rdp->qs_pending) return; /* * Was there a quiescent state since the beginning of the grace * period? If no, then exit and wait for the next call. */ if (!rdp->passed_quiesc) return; rdp->qs_pending = 0; spin_lock(&rcp->lock); /* * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync * during cpu startup. Ignore the quiescent state. */ if (likely(rdp->quiescbatch == rcp->cur)) cpu_quiet(rdp->cpu, rcp); spin_unlock(&rcp->lock);}#ifdef CONFIG_HOTPLUG_CPU/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing * locking requirements, the list it's pulling from has to belong to a cpu * which is dead and hence not processing interrupts. */static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, struct rcu_head **tail){ local_irq_disable(); *this_rdp->nxttail = list; if (list) this_rdp->nxttail = tail; local_irq_enable();}static void __rcu_offline_cpu(struct rcu_data *this_rdp, struct rcu_ctrlblk *rcp, struct rcu_data *rdp){ /* if the cpu going offline owns the grace period * we can block indefinitely waiting for it, so flush * it here */ spin_lock_bh(&rcp->lock); if (rcp->cur != rcp->completed) cpu_quiet(rdp->cpu, rcp); spin_unlock_bh(&rcp->lock); rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);}static void rcu_offline_cpu(int cpu){ struct rcu_data *this_rdp = &get_cpu_var(rcu_data); struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, &per_cpu(rcu_data, cpu)); __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); put_cpu_var(rcu_data); put_cpu_var(rcu_bh_data); tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu);}#elsestatic void rcu_offline_cpu(int cpu){}#endif/* * This does the RCU processing work from tasklet context. */static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, struct rcu_data *rdp){ if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { *rdp->donetail = rdp->curlist; rdp->donetail = rdp->curtail; rdp->curlist = NULL; rdp->curtail = &rdp->curlist; } if (rdp->nxtlist && !rdp->curlist) { local_irq_disable(); rdp->curlist = rdp->nxtlist; rdp->curtail = rdp->nxttail; rdp->nxtlist = NULL; rdp->nxttail = &rdp->nxtlist; local_irq_enable(); /* * start the next batch of callbacks */ /* determine batch number */ rdp->batch = rcp->cur + 1; /* see the comment and corresponding wmb() in * the rcu_start_batch() */ smp_rmb(); if (!rcp->next_pending) { /* and start it/schedule start if it's a new batch */ spin_lock(&rcp->lock); rcp->next_pending = 1; rcu_start_batch(rcp); spin_unlock(&rcp->lock); } } rcu_check_quiescent_state(rcp, rdp); if (rdp->donelist) rcu_do_batch(rdp);}static void rcu_process_callbacks(unsigned long unused){ __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));}static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp){ /* This cpu has pending rcu entries and the grace period * for them has completed. */ if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) return 1; /* This cpu has no pending entries, but there are new entries */ if (!rdp->curlist && rdp->nxtlist) return 1; /* This cpu has finished callbacks to invoke */ if (rdp->donelist) return 1; /* The rcu core waits for a quiescent state from the cpu */ if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) return 1; /* nothing to do */ return 0;}/* * Check to see if there is any immediate RCU-related work to be done * by the current CPU, returning 1 if so. This function is part of the * RCU implementation; it is -not- an exported member of the RCU API. */int rcu_pending(int cpu){ return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));}/* * Check to see if any future RCU-related work will need to be done * by the current CPU, even if none need be done immediately, returning * 1 if so. This function is part of the RCU implementation; it is -not- * an exported member of the RCU API. */int rcu_needs_cpu(int cpu){ struct rcu_data *rdp = &per_cpu(rcu_data, cpu); struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));}void rcu_check_callbacks(int cpu, int user){ if (user || (idle_cpu(cpu) && !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { rcu_qsctr_inc(cpu); rcu_bh_qsctr_inc(cpu); } else if (!in_softirq()) rcu_bh_qsctr_inc(cpu); tasklet_schedule(&per_cpu(rcu_tasklet, cpu));}static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, struct rcu_data *rdp){ memset(rdp, 0, sizeof(*rdp)); rdp->curtail = &rdp->curlist; rdp->nxttail = &rdp->nxtlist; rdp->donetail = &rdp->donelist; rdp->quiescbatch = rcp->completed; rdp->qs_pending = 0; rdp->cpu = cpu; rdp->blimit = blimit;}static void __devinit rcu_online_cpu(int cpu){ struct rcu_data *rdp = &per_cpu(rcu_data, cpu); struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);}static int __cpuinit rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu){ long cpu = (long)hcpu; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: rcu_online_cpu(cpu); break; case CPU_DEAD: case CPU_DEAD_FROZEN: rcu_offline_cpu(cpu); break; default: break; } return NOTIFY_OK;}static struct notifier_block __cpuinitdata rcu_nb = { .notifier_call = rcu_cpu_notify,};/* * Initializes rcu mechanism. Assumed to be called early. * That is before local timer(SMP) or jiffie timer (uniproc) is setup. * Note that rcu_qsctr and friends are implicitly * initialized due to the choice of ``0'' for RCU_CTR_INVALID. */void __init rcu_init(void){ rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)smp_processor_id()); /* Register notifier for non-boot CPUs */ register_cpu_notifier(&rcu_nb);}struct rcu_synchronize { struct rcu_head head; struct completion completion;};/* Because of FASTCALL declaration of complete, we use this wrapper */static void wakeme_after_rcu(struct rcu_head *head){ struct rcu_synchronize *rcu; rcu = container_of(head, struct rcu_synchronize, head); complete(&rcu->completion);}/** * synchronize_rcu - wait until a grace period has elapsed. * * Control will return to the caller some time after a full grace * period has elapsed, in other words after all currently executing RCU * read-side critical sections have completed. RCU read-side critical * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. * * If your read-side code is not protected by rcu_read_lock(), do -not- * use synchronize_rcu(). */void synchronize_rcu(void){ struct rcu_synchronize rcu; init_completion(&rcu.completion); /* Will wake me after RCU finished */ call_rcu(&rcu.head, wakeme_after_rcu); /* Wait for it */ wait_for_completion(&rcu.completion);}module_param(blimit, int, 0);module_param(qhimark, int, 0);module_param(qlowmark, int, 0);EXPORT_SYMBOL_GPL(rcu_batches_completed);EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);EXPORT_SYMBOL_GPL(call_rcu);EXPORT_SYMBOL_GPL(call_rcu_bh);EXPORT_SYMBOL_GPL(synchronize_rcu);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -