⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rcutorture.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 3 页
字号:
	call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);	wait_for_completion(&rcu.completion);}static struct rcu_torture_ops rcu_bh_ops = {	.init = NULL,	.cleanup = NULL,	.readlock = rcu_bh_torture_read_lock,	.readdelay = rcu_read_delay,  /* just reuse rcu's version. */	.readunlock = rcu_bh_torture_read_unlock,	.completed = rcu_bh_torture_completed,	.deferredfree = rcu_bh_torture_deferred_free,	.sync = rcu_bh_torture_synchronize,	.cb_barrier = rcu_barrier_bh,	.stats = NULL,	.irqcapable = 1,	.name = "rcu_bh"};static struct rcu_torture_ops rcu_bh_sync_ops = {	.init = rcu_sync_torture_init,	.cleanup = NULL,	.readlock = rcu_bh_torture_read_lock,	.readdelay = rcu_read_delay,  /* just reuse rcu's version. */	.readunlock = rcu_bh_torture_read_unlock,	.completed = rcu_bh_torture_completed,	.deferredfree = rcu_sync_torture_deferred_free,	.sync = rcu_bh_torture_synchronize,	.cb_barrier = NULL,	.stats = NULL,	.irqcapable = 1,	.name = "rcu_bh_sync"};/* * Definitions for srcu torture testing. */static struct srcu_struct srcu_ctl;static void srcu_torture_init(void){	init_srcu_struct(&srcu_ctl);	rcu_sync_torture_init();}static void srcu_torture_cleanup(void){	synchronize_srcu(&srcu_ctl);	cleanup_srcu_struct(&srcu_ctl);}static int srcu_torture_read_lock(void) __acquires(&srcu_ctl){	return srcu_read_lock(&srcu_ctl);}static void srcu_read_delay(struct rcu_random_state *rrsp){	long delay;	const long uspertick = 1000000 / HZ;	const long longdelay = 10;	/* We want there to be long-running readers, but not all the time. */	delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);	if (!delay)		schedule_timeout_interruptible(longdelay);}static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl){	srcu_read_unlock(&srcu_ctl, idx);}static int srcu_torture_completed(void){	return srcu_batches_completed(&srcu_ctl);}static void srcu_torture_synchronize(void){	synchronize_srcu(&srcu_ctl);}static int srcu_torture_stats(char *page){	int cnt = 0;	int cpu;	int idx = srcu_ctl.completed & 0x1;	cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",		       torture_type, TORTURE_FLAG, idx);	for_each_possible_cpu(cpu) {		cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,			       per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],			       per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);	}	cnt += sprintf(&page[cnt], "\n");	return cnt;}static struct rcu_torture_ops srcu_ops = {	.init = srcu_torture_init,	.cleanup = srcu_torture_cleanup,	.readlock = srcu_torture_read_lock,	.readdelay = srcu_read_delay,	.readunlock = srcu_torture_read_unlock,	.completed = srcu_torture_completed,	.deferredfree = rcu_sync_torture_deferred_free,	.sync = srcu_torture_synchronize,	.cb_barrier = NULL,	.stats = srcu_torture_stats,	.name = "srcu"};/* * Definitions for sched torture testing. */static int sched_torture_read_lock(void){	preempt_disable();	return 0;}static void sched_torture_read_unlock(int idx){	preempt_enable();}static int sched_torture_completed(void){	return 0;}static void rcu_sched_torture_deferred_free(struct rcu_torture *p){	call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);}static void sched_torture_synchronize(void){	synchronize_sched();}static struct rcu_torture_ops sched_ops = {	.init = rcu_sync_torture_init,	.cleanup = NULL,	.readlock = sched_torture_read_lock,	.readdelay = rcu_read_delay,  /* just reuse rcu's version. */	.readunlock = sched_torture_read_unlock,	.completed = sched_torture_completed,	.deferredfree = rcu_sched_torture_deferred_free,	.sync = sched_torture_synchronize,	.cb_barrier = rcu_barrier_sched,	.stats = NULL,	.irqcapable = 1,	.name = "sched"};static struct rcu_torture_ops sched_ops_sync = {	.init = rcu_sync_torture_init,	.cleanup = NULL,	.readlock = sched_torture_read_lock,	.readdelay = rcu_read_delay,  /* just reuse rcu's version. */	.readunlock = sched_torture_read_unlock,	.completed = sched_torture_completed,	.deferredfree = rcu_sync_torture_deferred_free,	.sync = sched_torture_synchronize,	.cb_barrier = NULL,	.stats = NULL,	.name = "sched_sync"};/* * RCU torture writer kthread.  Repeatedly substitutes a new structure * for that pointed to by rcu_torture_current, freeing the old structure * after a series of grace periods (the "pipeline"). */static intrcu_torture_writer(void *arg){	int i;	long oldbatch = rcu_batches_completed();	struct rcu_torture *rp;	struct rcu_torture *old_rp;	static DEFINE_RCU_RANDOM(rand);	VERBOSE_PRINTK_STRING("rcu_torture_writer task started");	set_user_nice(current, 19);	do {		schedule_timeout_uninterruptible(1);		if ((rp = rcu_torture_alloc()) == NULL)			continue;		rp->rtort_pipe_count = 0;		udelay(rcu_random(&rand) & 0x3ff);		old_rp = rcu_torture_current;		rp->rtort_mbtest = 1;		rcu_assign_pointer(rcu_torture_current, rp);		smp_wmb();		if (old_rp) {			i = old_rp->rtort_pipe_count;			if (i > RCU_TORTURE_PIPE_LEN)				i = RCU_TORTURE_PIPE_LEN;			atomic_inc(&rcu_torture_wcount[i]);			old_rp->rtort_pipe_count++;			cur_ops->deferredfree(old_rp);		}		rcu_torture_current_version++;		oldbatch = cur_ops->completed();		rcu_stutter_wait();	} while (!kthread_should_stop() && !fullstop);	VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");	while (!kthread_should_stop())		schedule_timeout_uninterruptible(1);	return 0;}/* * RCU torture fake writer kthread.  Repeatedly calls sync, with a random * delay between calls. */static intrcu_torture_fakewriter(void *arg){	DEFINE_RCU_RANDOM(rand);	VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");	set_user_nice(current, 19);	do {		schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);		udelay(rcu_random(&rand) & 0x3ff);		cur_ops->sync();		rcu_stutter_wait();	} while (!kthread_should_stop() && !fullstop);	VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");	while (!kthread_should_stop())		schedule_timeout_uninterruptible(1);	return 0;}/* * RCU torture reader from timer handler.  Dereferences rcu_torture_current, * incrementing the corresponding element of the pipeline array.  The * counter in the element should never be greater than 1, otherwise, the * RCU implementation is broken. */static void rcu_torture_timer(unsigned long unused){	int idx;	int completed;	static DEFINE_RCU_RANDOM(rand);	static DEFINE_SPINLOCK(rand_lock);	struct rcu_torture *p;	int pipe_count;	idx = cur_ops->readlock();	completed = cur_ops->completed();	p = rcu_dereference(rcu_torture_current);	if (p == NULL) {		/* Leave because rcu_torture_writer is not yet underway */		cur_ops->readunlock(idx);		return;	}	if (p->rtort_mbtest == 0)		atomic_inc(&n_rcu_torture_mberror);	spin_lock(&rand_lock);	cur_ops->readdelay(&rand);	n_rcu_torture_timers++;	spin_unlock(&rand_lock);	preempt_disable();	pipe_count = p->rtort_pipe_count;	if (pipe_count > RCU_TORTURE_PIPE_LEN) {		/* Should not happen, but... */		pipe_count = RCU_TORTURE_PIPE_LEN;	}	++__get_cpu_var(rcu_torture_count)[pipe_count];	completed = cur_ops->completed() - completed;	if (completed > RCU_TORTURE_PIPE_LEN) {		/* Should not happen, but... */		completed = RCU_TORTURE_PIPE_LEN;	}	++__get_cpu_var(rcu_torture_batch)[completed];	preempt_enable();	cur_ops->readunlock(idx);}/* * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current, * incrementing the corresponding element of the pipeline array.  The * counter in the element should never be greater than 1, otherwise, the * RCU implementation is broken. */static intrcu_torture_reader(void *arg){	int completed;	int idx;	DEFINE_RCU_RANDOM(rand);	struct rcu_torture *p;	int pipe_count;	struct timer_list t;	VERBOSE_PRINTK_STRING("rcu_torture_reader task started");	set_user_nice(current, 19);	if (irqreader && cur_ops->irqcapable)		setup_timer_on_stack(&t, rcu_torture_timer, 0);	do {		if (irqreader && cur_ops->irqcapable) {			if (!timer_pending(&t))				mod_timer(&t, 1);		}		idx = cur_ops->readlock();		completed = cur_ops->completed();		p = rcu_dereference(rcu_torture_current);		if (p == NULL) {			/* Wait for rcu_torture_writer to get underway */			cur_ops->readunlock(idx);			schedule_timeout_interruptible(HZ);			continue;		}		if (p->rtort_mbtest == 0)			atomic_inc(&n_rcu_torture_mberror);		cur_ops->readdelay(&rand);		preempt_disable();		pipe_count = p->rtort_pipe_count;		if (pipe_count > RCU_TORTURE_PIPE_LEN) {			/* Should not happen, but... */			pipe_count = RCU_TORTURE_PIPE_LEN;		}		++__get_cpu_var(rcu_torture_count)[pipe_count];		completed = cur_ops->completed() - completed;		if (completed > RCU_TORTURE_PIPE_LEN) {			/* Should not happen, but... */			completed = RCU_TORTURE_PIPE_LEN;		}		++__get_cpu_var(rcu_torture_batch)[completed];		preempt_enable();		cur_ops->readunlock(idx);		schedule();		rcu_stutter_wait();	} while (!kthread_should_stop() && !fullstop);	VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");	if (irqreader && cur_ops->irqcapable)		del_timer_sync(&t);	while (!kthread_should_stop())		schedule_timeout_uninterruptible(1);	return 0;}/* * Create an RCU-torture statistics message in the specified buffer. */static intrcu_torture_printk(char *page){	int cnt = 0;	int cpu;	int i;	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };	for_each_possible_cpu(cpu) {		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {			pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];			batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];		}	}	for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {		if (pipesummary[i] != 0)			break;	}	cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);	cnt += sprintf(&page[cnt],		       "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "		       "rtmbe: %d nt: %ld",		       rcu_torture_current,		       rcu_torture_current_version,		       list_empty(&rcu_torture_freelist),		       atomic_read(&n_rcu_torture_alloc),		       atomic_read(&n_rcu_torture_alloc_fail),

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -