⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rcuclassic.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 2 页
字号:
		rcp->completed = rcp->cur;		rcu_start_batch(rcp);	}}/* * Check if the cpu has gone through a quiescent state (say context * switch). If so and if it already hasn't done so in this RCU * quiescent cycle, then indicate that it has done so. */static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,					struct rcu_data *rdp){	if (rdp->quiescbatch != rcp->cur) {		/* start new grace period: */		rdp->qs_pending = 1;		rdp->passed_quiesc = 0;		rdp->quiescbatch = rcp->cur;		return;	}	/* Grace period already completed for this cpu?	 * qs_pending is checked instead of the actual bitmap to avoid	 * cacheline trashing.	 */	if (!rdp->qs_pending)		return;	/*	 * Was there a quiescent state since the beginning of the grace	 * period? If no, then exit and wait for the next call.	 */	if (!rdp->passed_quiesc)		return;	rdp->qs_pending = 0;	spin_lock(&rcp->lock);	/*	 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync	 * during cpu startup. Ignore the quiescent state.	 */	if (likely(rdp->quiescbatch == rcp->cur))		cpu_quiet(rdp->cpu, rcp);	spin_unlock(&rcp->lock);}#ifdef CONFIG_HOTPLUG_CPU/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing * locking requirements, the list it's pulling from has to belong to a cpu * which is dead and hence not processing interrupts. */static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,				struct rcu_head **tail){	local_irq_disable();	*this_rdp->nxttail = list;	if (list)		this_rdp->nxttail = tail;	local_irq_enable();}static void __rcu_offline_cpu(struct rcu_data *this_rdp,				struct rcu_ctrlblk *rcp, struct rcu_data *rdp){	/* if the cpu going offline owns the grace period	 * we can block indefinitely waiting for it, so flush	 * it here	 */	spin_lock_bh(&rcp->lock);	if (rcp->cur != rcp->completed)		cpu_quiet(rdp->cpu, rcp);	spin_unlock_bh(&rcp->lock);	rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);	rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);	rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);	local_irq_disable();	this_rdp->qlen += rdp->qlen;	local_irq_enable();}static void rcu_offline_cpu(int cpu){	struct rcu_data *this_rdp = &get_cpu_var(rcu_data);	struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);	__rcu_offline_cpu(this_rdp, &rcu_ctrlblk,					&per_cpu(rcu_data, cpu));	__rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,					&per_cpu(rcu_bh_data, cpu));	put_cpu_var(rcu_data);	put_cpu_var(rcu_bh_data);}#elsestatic void rcu_offline_cpu(int cpu){}#endif/* * This does the RCU processing work from softirq context. */static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,					struct rcu_data *rdp){	if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {		*rdp->donetail = rdp->curlist;		rdp->donetail = rdp->curtail;		rdp->curlist = NULL;		rdp->curtail = &rdp->curlist;	}	if (rdp->nxtlist && !rdp->curlist) {		local_irq_disable();		rdp->curlist = rdp->nxtlist;		rdp->curtail = rdp->nxttail;		rdp->nxtlist = NULL;		rdp->nxttail = &rdp->nxtlist;		local_irq_enable();		/*		 * start the next batch of callbacks		 */		/* determine batch number */		rdp->batch = rcp->cur + 1;		/* see the comment and corresponding wmb() in		 * the rcu_start_batch()		 */		smp_rmb();		if (!rcp->next_pending) {			/* and start it/schedule start if it's a new batch */			spin_lock(&rcp->lock);			rcp->next_pending = 1;			rcu_start_batch(rcp);			spin_unlock(&rcp->lock);		}	}	rcu_check_quiescent_state(rcp, rdp);	if (rdp->donelist)		rcu_do_batch(rdp);}static void rcu_process_callbacks(struct softirq_action *unused){	__rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));	__rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));}static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp){	/* This cpu has pending rcu entries and the grace period	 * for them has completed.	 */	if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))		return 1;	/* This cpu has no pending entries, but there are new entries */	if (!rdp->curlist && rdp->nxtlist)		return 1;	/* This cpu has finished callbacks to invoke */	if (rdp->donelist)		return 1;	/* The rcu core waits for a quiescent state from the cpu */	if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)		return 1;	/* nothing to do */	return 0;}/* * Check to see if there is any immediate RCU-related work to be done * by the current CPU, returning 1 if so.  This function is part of the * RCU implementation; it is -not- an exported member of the RCU API. */int rcu_pending(int cpu){	return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||		__rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));}/* * Check to see if any future RCU-related work will need to be done * by the current CPU, even if none need be done immediately, returning * 1 if so.  This function is part of the RCU implementation; it is -not- * an exported member of the RCU API. */int rcu_needs_cpu(int cpu){	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);	struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);	return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));}void rcu_check_callbacks(int cpu, int user){	if (user ||	    (idle_cpu(cpu) && !in_softirq() &&				hardirq_count() <= (1 << HARDIRQ_SHIFT))) {		/*		 * Get here if this CPU took its interrupt from user		 * mode or from the idle loop, and if this is not a		 * nested interrupt.  In this case, the CPU is in		 * a quiescent state, so count it.		 *		 * Also do a memory barrier.  This is needed to handle		 * the case where writes from a preempt-disable section		 * of code get reordered into schedule() by this CPU's		 * write buffer.  The memory barrier makes sure that		 * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see		 * by other CPUs to happen after any such write.		 */		smp_mb();  /* See above block comment. */		rcu_qsctr_inc(cpu);		rcu_bh_qsctr_inc(cpu);	} else if (!in_softirq()) {		/*		 * Get here if this CPU did not take its interrupt from		 * softirq, in other words, if it is not interrupting		 * a rcu_bh read-side critical section.  This is an _bh		 * critical section, so count it.  The memory barrier		 * is needed for the same reason as is the above one.		 */		smp_mb();  /* See above block comment. */		rcu_bh_qsctr_inc(cpu);	}	raise_rcu_softirq();}static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,						struct rcu_data *rdp){	memset(rdp, 0, sizeof(*rdp));	rdp->curtail = &rdp->curlist;	rdp->nxttail = &rdp->nxtlist;	rdp->donetail = &rdp->donelist;	rdp->quiescbatch = rcp->completed;	rdp->qs_pending = 0;	rdp->cpu = cpu;	rdp->blimit = blimit;}static void __cpuinit rcu_online_cpu(int cpu){	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);	struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);	rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);	rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);}static int __cpuinit rcu_cpu_notify(struct notifier_block *self,				unsigned long action, void *hcpu){	long cpu = (long)hcpu;	switch (action) {	case CPU_UP_PREPARE:	case CPU_UP_PREPARE_FROZEN:		rcu_online_cpu(cpu);		break;	case CPU_DEAD:	case CPU_DEAD_FROZEN:		rcu_offline_cpu(cpu);		break;	default:		break;	}	return NOTIFY_OK;}static struct notifier_block __cpuinitdata rcu_nb = {	.notifier_call	= rcu_cpu_notify,};/* * Initializes rcu mechanism.  Assumed to be called early. * That is before local timer(SMP) or jiffie timer (uniproc) is setup. * Note that rcu_qsctr and friends are implicitly * initialized due to the choice of ``0'' for RCU_CTR_INVALID. */void __init __rcu_init(void){	rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,			(void *)(long)smp_processor_id());	/* Register notifier for non-boot CPUs */	register_cpu_notifier(&rcu_nb);}module_param(blimit, int, 0);module_param(qhimark, int, 0);module_param(qlowmark, int, 0);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -