📄 rcuclassic.c
字号:
/* * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2001 * * Authors: Dipankar Sarma <dipankar@in.ibm.com> * Manfred Spraul <manfred@colorfullife.com> * * Based on the original work by Paul McKenney <paulmck@us.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU * */#include <linux/types.h>#include <linux/kernel.h>#include <linux/init.h>#include <linux/spinlock.h>#include <linux/smp.h>#include <linux/rcupdate.h>#include <linux/interrupt.h>#include <linux/sched.h>#include <asm/atomic.h>#include <linux/bitops.h>#include <linux/module.h>#include <linux/completion.h>#include <linux/moduleparam.h>#include <linux/percpu.h>#include <linux/notifier.h>#include <linux/cpu.h>#include <linux/mutex.h>#ifdef CONFIG_DEBUG_LOCK_ALLOCstatic struct lock_class_key rcu_lock_key;struct lockdep_map rcu_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);EXPORT_SYMBOL_GPL(rcu_lock_map);#endif/* Definition for rcupdate control block. */static struct rcu_ctrlblk rcu_ctrlblk = { .cur = -300, .completed = -300, .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), .cpumask = CPU_MASK_NONE,};static struct rcu_ctrlblk rcu_bh_ctrlblk = { .cur = -300, .completed = -300, .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), .cpumask = CPU_MASK_NONE,};DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };static int blimit = 10;static int qhimark = 10000;static int qlowmark = 100;#ifdef CONFIG_SMPstatic void force_quiescent_state(struct rcu_data *rdp, struct rcu_ctrlblk *rcp){ int cpu; cpumask_t cpumask; set_need_resched(); if (unlikely(!rcp->signaled)) { rcp->signaled = 1; /* * Don't send IPI to itself. With irqs disabled, * rdp->cpu is the current cpu. * * cpu_online_map is updated by the _cpu_down() * using __stop_machine(). Since we're in irqs disabled * section, __stop_machine() is not exectuting, hence * the cpu_online_map is stable. * * However, a cpu might have been offlined _just_ before * we disabled irqs while entering here. * And rcu subsystem might not yet have handled the CPU_DEAD * notification, leading to the offlined cpu's bit * being set in the rcp->cpumask. * * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent * sending smp_reschedule() to an offlined CPU. */ cpus_and(cpumask, rcp->cpumask, cpu_online_map); cpu_clear(rdp->cpu, cpumask); for_each_cpu_mask_nr(cpu, cpumask) smp_send_reschedule(cpu); }}#elsestatic inline void force_quiescent_state(struct rcu_data *rdp, struct rcu_ctrlblk *rcp){ set_need_resched();}#endif/** * call_rcu - Queue an RCU callback for invocation after a grace period. * @head: structure to be used for queueing the RCU updates. * @func: actual update function to be invoked after the grace period * * The update function will be invoked some time after a full grace * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. RCU read-side critical * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. */void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)){ unsigned long flags; struct rcu_data *rdp; head->func = func; head->next = NULL; local_irq_save(flags); rdp = &__get_cpu_var(rcu_data); *rdp->nxttail = head; rdp->nxttail = &head->next; if (unlikely(++rdp->qlen > qhimark)) { rdp->blimit = INT_MAX; force_quiescent_state(rdp, &rcu_ctrlblk); } local_irq_restore(flags);}EXPORT_SYMBOL_GPL(call_rcu);/** * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. * @func: actual update function to be invoked after the grace period * * The update function will be invoked some time after a full grace * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. call_rcu_bh() assumes * that the read-side critical sections end on completion of a softirq * handler. This means that read-side critical sections in process * context must not be interrupted by softirqs. This interface is to be * used when most of the read-side critical sections are in softirq context. * RCU read-side critical sections are delimited by rcu_read_lock() and * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() * and rcu_read_unlock_bh(), if in process context. These may be nested. */void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)){ unsigned long flags; struct rcu_data *rdp; head->func = func; head->next = NULL; local_irq_save(flags); rdp = &__get_cpu_var(rcu_bh_data); *rdp->nxttail = head; rdp->nxttail = &head->next; if (unlikely(++rdp->qlen > qhimark)) { rdp->blimit = INT_MAX; force_quiescent_state(rdp, &rcu_bh_ctrlblk); } local_irq_restore(flags);}EXPORT_SYMBOL_GPL(call_rcu_bh);/* * Return the number of RCU batches processed thus far. Useful * for debug and statistics. */long rcu_batches_completed(void){ return rcu_ctrlblk.completed;}EXPORT_SYMBOL_GPL(rcu_batches_completed);/* * Return the number of RCU batches processed thus far. Useful * for debug and statistics. */long rcu_batches_completed_bh(void){ return rcu_bh_ctrlblk.completed;}EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);/* Raises the softirq for processing rcu_callbacks. */static inline void raise_rcu_softirq(void){ raise_softirq(RCU_SOFTIRQ); /* * The smp_mb() here is required to ensure that this cpu's * __rcu_process_callbacks() reads the most recently updated * value of rcu->cur. */ smp_mb();}/* * Invoke the completed RCU callbacks. They are expected to be in * a per-cpu list. */static void rcu_do_batch(struct rcu_data *rdp){ struct rcu_head *next, *list; int count = 0; list = rdp->donelist; while (list) { next = list->next; prefetch(next); list->func(list); list = next; if (++count >= rdp->blimit) break; } rdp->donelist = list; local_irq_disable(); rdp->qlen -= count; local_irq_enable(); if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) rdp->blimit = blimit; if (!rdp->donelist) rdp->donetail = &rdp->donelist; else raise_rcu_softirq();}/* * Grace period handling: * The grace period handling consists out of two steps: * - A new grace period is started. * This is done by rcu_start_batch. The start is not broadcasted to * all cpus, they must pick this up by comparing rcp->cur with * rdp->quiescbatch. All cpus are recorded in the * rcu_ctrlblk.cpumask bitmap. * - All cpus must go through a quiescent state. * Since the start of the grace period is not broadcasted, at least two * calls to rcu_check_quiescent_state are required: * The first call just notices that a new grace period is running. The * following calls check if there was a quiescent state since the beginning * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If * the bitmap is empty, then the grace period is completed. * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace * period (if necessary). *//* * Register a new batch of callbacks, and start it up if there is currently no * active batch and the batch to be registered has not already occurred. * Caller must hold rcu_ctrlblk.lock. */static void rcu_start_batch(struct rcu_ctrlblk *rcp){ if (rcp->next_pending && rcp->completed == rcp->cur) { rcp->next_pending = 0; /* * next_pending == 0 must be visible in * __rcu_process_callbacks() before it can see new value of cur. */ smp_wmb(); rcp->cur++; /* * Accessing nohz_cpu_mask before incrementing rcp->cur needs a * Barrier Otherwise it can cause tickless idle CPUs to be * included in rcp->cpumask, which will extend graceperiods * unnecessarily. */ smp_mb(); cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); rcp->signaled = 0; }}/* * cpu went through a quiescent state since the beginning of the grace period. * Clear it from the cpu mask and complete the grace period if it was the last * cpu. Start another grace period if someone has further entries pending */static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp){ cpu_clear(cpu, rcp->cpumask); if (cpus_empty(rcp->cpumask)) { /* batch completed ! */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -