⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	}	spin_unlock(&spu_prio->runq_lock);	__set_current_state(TASK_RUNNING);	remove_wait_queue(&ctx->stop_wq, &wait);}static struct spu *spu_get_idle(struct spu_context *ctx){	struct spu *spu, *aff_ref_spu;	int node, n;	if (ctx->gang) {		mutex_lock(&ctx->gang->aff_mutex);		if (has_affinity(ctx)) {			aff_ref_spu = ctx->gang->aff_ref_spu;			atomic_inc(&ctx->gang->aff_sched_count);			mutex_unlock(&ctx->gang->aff_mutex);			node = aff_ref_spu->node;			mutex_lock(&cbe_spu_info[node].list_mutex);			spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);			if (spu && spu->alloc_state == SPU_FREE)				goto found;			mutex_unlock(&cbe_spu_info[node].list_mutex);			mutex_lock(&ctx->gang->aff_mutex);			if (atomic_dec_and_test(&ctx->gang->aff_sched_count))				ctx->gang->aff_ref_spu = NULL;			mutex_unlock(&ctx->gang->aff_mutex);			return NULL;		}		mutex_unlock(&ctx->gang->aff_mutex);	}	node = cpu_to_node(raw_smp_processor_id());	for (n = 0; n < MAX_NUMNODES; n++, node++) {		node = (node < MAX_NUMNODES) ? node : 0;		if (!node_allowed(ctx, node))			continue;		mutex_lock(&cbe_spu_info[node].list_mutex);		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {			if (spu->alloc_state == SPU_FREE)				goto found;		}		mutex_unlock(&cbe_spu_info[node].list_mutex);	}	return NULL; found:	spu->alloc_state = SPU_USED;	mutex_unlock(&cbe_spu_info[node].list_mutex);	pr_debug("Got SPU %d %d\n", spu->number, spu->node);	spu_init_channels(spu);	return spu;}/** * find_victim - find a lower priority context to preempt * @ctx:	canidate context for running * * Returns the freed physical spu to run the new context on. */static struct spu *find_victim(struct spu_context *ctx){	struct spu_context *victim = NULL;	struct spu *spu;	int node, n;	/*	 * Look for a possible preemption candidate on the local node first.	 * If there is no candidate look at the other nodes.  This isn't	 * exactly fair, but so far the whole spu schedule tries to keep	 * a strong node affinity.  We might want to fine-tune this in	 * the future.	 */ restart:	node = cpu_to_node(raw_smp_processor_id());	for (n = 0; n < MAX_NUMNODES; n++, node++) {		node = (node < MAX_NUMNODES) ? node : 0;		if (!node_allowed(ctx, node))			continue;		mutex_lock(&cbe_spu_info[node].list_mutex);		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {			struct spu_context *tmp = spu->ctx;			if (tmp && tmp->prio > ctx->prio &&			    (!victim || tmp->prio > victim->prio))				victim = spu->ctx;		}		mutex_unlock(&cbe_spu_info[node].list_mutex);		if (victim) {			/*			 * This nests ctx->state_mutex, but we always lock			 * higher priority contexts before lower priority			 * ones, so this is safe until we introduce			 * priority inheritance schemes.			 */			if (!mutex_trylock(&victim->state_mutex)) {				victim = NULL;				goto restart;			}			spu = victim->spu;			if (!spu) {				/*				 * This race can happen because we've dropped				 * the active list mutex.  No a problem, just				 * restart the search.				 */				mutex_unlock(&victim->state_mutex);				victim = NULL;				goto restart;			}			mutex_lock(&cbe_spu_info[node].list_mutex);			cbe_spu_info[node].nr_active--;			spu_unbind_context(spu, victim);			mutex_unlock(&cbe_spu_info[node].list_mutex);			victim->stats.invol_ctx_switch++;			spu->stats.invol_ctx_switch++;			mutex_unlock(&victim->state_mutex);			/*			 * We need to break out of the wait loop in spu_run			 * manually to ensure this context gets put on the			 * runqueue again ASAP.			 */			wake_up(&victim->stop_wq);			return spu;		}	}	return NULL;}/** * spu_activate - find a free spu for a context and execute it * @ctx:	spu context to schedule * @flags:	flags (currently ignored) * * Tries to find a free spu to run @ctx.  If no free spu is available * add the context to the runqueue so it gets woken up once an spu * is available. */int spu_activate(struct spu_context *ctx, unsigned long flags){	do {		struct spu *spu;		/*		 * If there are multiple threads waiting for a single context		 * only one actually binds the context while the others will		 * only be able to acquire the state_mutex once the context		 * already is in runnable state.		 */		if (ctx->spu)			return 0;		spu = spu_get_idle(ctx);		/*		 * If this is a realtime thread we try to get it running by		 * preempting a lower priority thread.		 */		if (!spu && rt_prio(ctx->prio))			spu = find_victim(ctx);		if (spu) {			int node = spu->node;			mutex_lock(&cbe_spu_info[node].list_mutex);			spu_bind_context(spu, ctx);			cbe_spu_info[node].nr_active++;			mutex_unlock(&cbe_spu_info[node].list_mutex);			return 0;		}		spu_prio_wait(ctx);	} while (!signal_pending(current));	return -ERESTARTSYS;}/** * grab_runnable_context - try to find a runnable context * * Remove the highest priority context on the runqueue and return it * to the caller.  Returns %NULL if no runnable context was found. */static struct spu_context *grab_runnable_context(int prio, int node){	struct spu_context *ctx;	int best;	spin_lock(&spu_prio->runq_lock);	best = find_first_bit(spu_prio->bitmap, prio);	while (best < prio) {		struct list_head *rq = &spu_prio->runq[best];		list_for_each_entry(ctx, rq, rq) {			/* XXX(hch): check for affinity here aswell */			if (__node_allowed(ctx, node)) {				__spu_del_from_rq(ctx);				goto found;			}		}		best++;	}	ctx = NULL; found:	spin_unlock(&spu_prio->runq_lock);	return ctx;}static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio){	struct spu *spu = ctx->spu;	struct spu_context *new = NULL;	if (spu) {		new = grab_runnable_context(max_prio, spu->node);		if (new || force) {			int node = spu->node;			mutex_lock(&cbe_spu_info[node].list_mutex);			spu_unbind_context(spu, ctx);			spu->alloc_state = SPU_FREE;			cbe_spu_info[node].nr_active--;			mutex_unlock(&cbe_spu_info[node].list_mutex);			ctx->stats.vol_ctx_switch++;			spu->stats.vol_ctx_switch++;			if (new)				wake_up(&new->stop_wq);		}	}	return new != NULL;}/** * spu_deactivate - unbind a context from it's physical spu * @ctx:	spu context to unbind * * Unbind @ctx from the physical spu it is running on and schedule * the highest priority context to run on the freed physical spu. */void spu_deactivate(struct spu_context *ctx){	__spu_deactivate(ctx, 1, MAX_PRIO);}/** * spu_yield -	yield a physical spu if others are waiting * @ctx:	spu context to yield * * Check if there is a higher priority context waiting and if yes * unbind @ctx from the physical spu and schedule the highest * priority context to run on the freed physical spu instead. */void spu_yield(struct spu_context *ctx){	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {		mutex_lock(&ctx->state_mutex);		__spu_deactivate(ctx, 0, MAX_PRIO);		mutex_unlock(&ctx->state_mutex);	}}static noinline void spusched_tick(struct spu_context *ctx){	if (ctx->flags & SPU_CREATE_NOSCHED)		return;	if (ctx->policy == SCHED_FIFO)		return;	if (--ctx->time_slice)		return;	/*	 * Unfortunately list_mutex ranks outside of state_mutex, so	 * we have to trylock here.  If we fail give the context another	 * tick and try again.	 */	if (mutex_trylock(&ctx->state_mutex)) {		struct spu *spu = ctx->spu;		struct spu_context *new;		new = grab_runnable_context(ctx->prio + 1, spu->node);		if (new) {			spu_unbind_context(spu, ctx);			ctx->stats.invol_ctx_switch++;			spu->stats.invol_ctx_switch++;			spu->alloc_state = SPU_FREE;			cbe_spu_info[spu->node].nr_active--;			wake_up(&new->stop_wq);			/*			 * We need to break out of the wait loop in			 * spu_run manually to ensure this context			 * gets put on the runqueue again ASAP.			 */			wake_up(&ctx->stop_wq);		}		spu_set_timeslice(ctx);		mutex_unlock(&ctx->state_mutex);	} else {		ctx->time_slice++;	}}/** * count_active_contexts - count nr of active tasks * * Return the number of tasks currently running or waiting to run. * * Note that we don't take runq_lock / list_mutex here.  Reading * a single 32bit value is atomic on powerpc, and we don't care * about memory ordering issues here. */static unsigned long count_active_contexts(void){	int nr_active = 0, node;	for (node = 0; node < MAX_NUMNODES; node++)		nr_active += cbe_spu_info[node].nr_active;	nr_active += spu_prio->nr_waiting;	return nr_active;}/** * spu_calc_load - given tick count, update the avenrun load estimates. * @tick:	tick count * * No locking against reading these values from userspace, as for * the CPU loadavg code. */static void spu_calc_load(unsigned long ticks){	unsigned long active_tasks; /* fixed-point */	static int count = LOAD_FREQ;	count -= ticks;	if (unlikely(count < 0)) {		active_tasks = count_active_contexts() * FIXED_1;		do {			CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);			CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);			CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);			count += LOAD_FREQ;		} while (count < 0);	}}static void spusched_wake(unsigned long data){	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);	wake_up_process(spusched_task);	spu_calc_load(SPUSCHED_TICK);}static int spusched_thread(void *unused){	struct spu *spu;	int node;	while (!kthread_should_stop()) {		set_current_state(TASK_INTERRUPTIBLE);		schedule();		for (node = 0; node < MAX_NUMNODES; node++) {			mutex_lock(&cbe_spu_info[node].list_mutex);			list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)				if (spu->ctx)					spusched_tick(spu->ctx);			mutex_unlock(&cbe_spu_info[node].list_mutex);		}	}	return 0;}#define LOAD_INT(x) ((x) >> FSHIFT)#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)static int show_spu_loadavg(struct seq_file *s, void *private){	int a, b, c;	a = spu_avenrun[0] + (FIXED_1/200);	b = spu_avenrun[1] + (FIXED_1/200);	c = spu_avenrun[2] + (FIXED_1/200);	/*	 * Note that last_pid doesn't really make much sense for the	 * SPU loadavg (it even seems very odd on the CPU side..),	 * but we include it here to have a 100% compatible interface.	 */	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",		LOAD_INT(a), LOAD_FRAC(a),		LOAD_INT(b), LOAD_FRAC(b),		LOAD_INT(c), LOAD_FRAC(c),		count_active_contexts(),		atomic_read(&nr_spu_contexts),		current->nsproxy->pid_ns->last_pid);	return 0;}static int spu_loadavg_open(struct inode *inode, struct file *file){	return single_open(file, show_spu_loadavg, NULL);}static const struct file_operations spu_loadavg_fops = {	.open		= spu_loadavg_open,	.read		= seq_read,	.llseek		= seq_lseek,	.release	= single_release,};int __init spu_sched_init(void){	struct proc_dir_entry *entry;	int err = -ENOMEM, i;	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);	if (!spu_prio)		goto out;	for (i = 0; i < MAX_PRIO; i++) {		INIT_LIST_HEAD(&spu_prio->runq[i]);		__clear_bit(i, spu_prio->bitmap);	}	spin_lock_init(&spu_prio->runq_lock);	setup_timer(&spusched_timer, spusched_wake, 0);	spusched_task = kthread_run(spusched_thread, NULL, "spusched");	if (IS_ERR(spusched_task)) {		err = PTR_ERR(spusched_task);		goto out_free_spu_prio;	}	entry = create_proc_entry("spu_loadavg", 0, NULL);	if (!entry)		goto out_stop_kthread;	entry->proc_fops = &spu_loadavg_fops;	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);	return 0; out_stop_kthread:	kthread_stop(spusched_task); out_free_spu_prio:	kfree(spu_prio); out:	return err;}void spu_sched_exit(void){	struct spu *spu;	int node;	remove_proc_entry("spu_loadavg", NULL);	del_timer_sync(&spusched_timer);	kthread_stop(spusched_task);	for (node = 0; node < MAX_NUMNODES; node++) {		mutex_lock(&cbe_spu_info[node].list_mutex);		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)			if (spu->alloc_state != SPU_FREE)				spu->alloc_state = SPU_FREE;		mutex_unlock(&cbe_spu_info[node].list_mutex);	}	kfree(spu_prio);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -