⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 5 页
字号:
	 * over all CPUs matters. A task can increase this counter on	 * one CPU and if it got migrated afterwards it may decrease	 * it on another CPU. Always updated under the runqueue lock:	 */	unsigned long nr_uninterruptible;	struct task_struct *curr, *idle;	unsigned long next_balance;	struct mm_struct *prev_mm;	u64 clock;	atomic_t nr_iowait;#ifdef CONFIG_SMP	struct root_domain *rd;	struct sched_domain *sd;	/* For active balancing */	int active_balance;	int push_cpu;	/* cpu of this runqueue: */	int cpu;	int online;	unsigned long avg_load_per_task;	struct task_struct *migration_thread;	struct list_head migration_queue;#endif#ifdef CONFIG_SCHED_HRTICK#ifdef CONFIG_SMP	int hrtick_csd_pending;	struct call_single_data hrtick_csd;#endif	struct hrtimer hrtick_timer;#endif#ifdef CONFIG_SCHEDSTATS	/* latency stats */	struct sched_info rq_sched_info;	/* sys_sched_yield() stats */	unsigned int yld_exp_empty;	unsigned int yld_act_empty;	unsigned int yld_both_empty;	unsigned int yld_count;	/* schedule() stats */	unsigned int sched_switch;	unsigned int sched_count;	unsigned int sched_goidle;	/* try_to_wake_up() stats */	unsigned int ttwu_count;	unsigned int ttwu_local;	/* BKL stats */	unsigned int bkl_count;#endif};static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);static inline void check_preempt_curr(struct rq *rq, struct task_struct *p){	rq->curr->sched_class->check_preempt_curr(rq, p);}static inline int cpu_of(struct rq *rq){#ifdef CONFIG_SMP	return rq->cpu;#else	return 0;#endif}/* * The domain tree (rq->sd) is protected by RCU's quiescent state transition. * See detach_destroy_domains: synchronize_sched for details. * * The domain tree of any CPU may only be accessed from within * preempt-disabled sections. */#define for_each_domain(cpu, __sd) \	for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))#define this_rq()		(&__get_cpu_var(runqueues))#define task_rq(p)		cpu_rq(task_cpu(p))#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)static inline void update_rq_clock(struct rq *rq){	rq->clock = sched_clock_cpu(cpu_of(rq));}/* * Tunables that become constants when CONFIG_SCHED_DEBUG is off: */#ifdef CONFIG_SCHED_DEBUG# define const_debug __read_mostly#else# define const_debug static const#endif/** * runqueue_is_locked * * Returns true if the current cpu runqueue is locked. * This interface allows printk to be called with the runqueue lock * held and know whether or not it is OK to wake up the klogd. */int runqueue_is_locked(void){	int cpu = get_cpu();	struct rq *rq = cpu_rq(cpu);	int ret;	ret = spin_is_locked(&rq->lock);	put_cpu();	return ret;}/* * Debugging: various feature bits */#define SCHED_FEAT(name, enabled)	\	__SCHED_FEAT_##name ,enum {#include "sched_features.h"};#undef SCHED_FEAT#define SCHED_FEAT(name, enabled)	\	(1UL << __SCHED_FEAT_##name) * enabled |const_debug unsigned int sysctl_sched_features =#include "sched_features.h"	0;#undef SCHED_FEAT#ifdef CONFIG_SCHED_DEBUG#define SCHED_FEAT(name, enabled)	\	#name ,static __read_mostly char *sched_feat_names[] = {#include "sched_features.h"	NULL};#undef SCHED_FEATstatic int sched_feat_open(struct inode *inode, struct file *filp){	filp->private_data = inode->i_private;	return 0;}static ssize_tsched_feat_read(struct file *filp, char __user *ubuf,		size_t cnt, loff_t *ppos){	char *buf;	int r = 0;	int len = 0;	int i;	for (i = 0; sched_feat_names[i]; i++) {		len += strlen(sched_feat_names[i]);		len += 4;	}	buf = kmalloc(len + 2, GFP_KERNEL);	if (!buf)		return -ENOMEM;	for (i = 0; sched_feat_names[i]; i++) {		if (sysctl_sched_features & (1UL << i))			r += sprintf(buf + r, "%s ", sched_feat_names[i]);		else			r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]);	}	r += sprintf(buf + r, "\n");	WARN_ON(r >= len + 2);	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);	kfree(buf);	return r;}static ssize_tsched_feat_write(struct file *filp, const char __user *ubuf,		size_t cnt, loff_t *ppos){	char buf[64];	char *cmp = buf;	int neg = 0;	int i;	if (cnt > 63)		cnt = 63;	if (copy_from_user(&buf, ubuf, cnt))		return -EFAULT;	buf[cnt] = 0;	if (strncmp(buf, "NO_", 3) == 0) {		neg = 1;		cmp += 3;	}	for (i = 0; sched_feat_names[i]; i++) {		int len = strlen(sched_feat_names[i]);		if (strncmp(cmp, sched_feat_names[i], len) == 0) {			if (neg)				sysctl_sched_features &= ~(1UL << i);			else				sysctl_sched_features |= (1UL << i);			break;		}	}	if (!sched_feat_names[i])		return -EINVAL;	filp->f_pos += cnt;	return cnt;}static struct file_operations sched_feat_fops = {	.open	= sched_feat_open,	.read	= sched_feat_read,	.write	= sched_feat_write,};static __init int sched_init_debug(void){	debugfs_create_file("sched_features", 0644, NULL, NULL,			&sched_feat_fops);	return 0;}late_initcall(sched_init_debug);#endif#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))/* * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */const_debug unsigned int sysctl_sched_nr_migrate = 32;/* * ratelimit for updating the group shares. * default: 0.25ms */unsigned int sysctl_sched_shares_ratelimit = 250000;/* * period over which we measure -rt task cpu usage in us. * default: 1s */unsigned int sysctl_sched_rt_period = 1000000;static __read_mostly int scheduler_running;/* * part of the period that we allow rt tasks to run in us. * default: 0.95s */int sysctl_sched_rt_runtime = 950000;static inline u64 global_rt_period(void){	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;}static inline u64 global_rt_runtime(void){	if (sysctl_sched_rt_runtime < 0)		return RUNTIME_INF;	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;}#ifndef prepare_arch_switch# define prepare_arch_switch(next)	do { } while (0)#endif#ifndef finish_arch_switch# define finish_arch_switch(prev)	do { } while (0)#endifstatic inline int task_current(struct rq *rq, struct task_struct *p){	return rq->curr == p;}#ifndef __ARCH_WANT_UNLOCKED_CTXSWstatic inline int task_running(struct rq *rq, struct task_struct *p){	return task_current(rq, p);}static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next){}static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev){#ifdef CONFIG_DEBUG_SPINLOCK	/* this is a valid case when another task releases the spinlock */	rq->lock.owner = current;#endif	/*	 * If we are tracking spinlock dependencies then we have to	 * fix up the runqueue lock - which gets 'carried over' from	 * prev into current:	 */	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);	spin_unlock_irq(&rq->lock);}#else /* __ARCH_WANT_UNLOCKED_CTXSW */static inline int task_running(struct rq *rq, struct task_struct *p){#ifdef CONFIG_SMP	return p->oncpu;#else	return task_current(rq, p);#endif}static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next){#ifdef CONFIG_SMP	/*	 * We can optimise this out completely for !SMP, because the	 * SMP rebalancing from interrupt is the only thing that cares	 * here.	 */	next->oncpu = 1;#endif#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW	spin_unlock_irq(&rq->lock);#else	spin_unlock(&rq->lock);#endif}static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev){#ifdef CONFIG_SMP	/*	 * After ->oncpu is cleared, the task can be moved to a different CPU.	 * We must ensure this doesn't happen until the switch is completely	 * finished.	 */	smp_wmb();	prev->oncpu = 0;#endif#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW	local_irq_enable();#endif}#endif /* __ARCH_WANT_UNLOCKED_CTXSW *//* * __task_rq_lock - lock the runqueue a given task resides on. * Must be called interrupts disabled. */static inline struct rq *__task_rq_lock(struct task_struct *p)	__acquires(rq->lock){	for (;;) {		struct rq *rq = task_rq(p);		spin_lock(&rq->lock);		if (likely(rq == task_rq(p)))			return rq;		spin_unlock(&rq->lock);	}}/* * task_rq_lock - lock the runqueue a given task resides on and disable * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)	__acquires(rq->lock){	struct rq *rq;	for (;;) {		local_irq_save(*flags);		rq = task_rq(p);		spin_lock(&rq->lock);		if (likely(rq == task_rq(p)))			return rq;		spin_unlock_irqrestore(&rq->lock, *flags);	}}static void __task_rq_unlock(struct rq *rq)	__releases(rq->lock){	spin_unlock(&rq->lock);}static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)	__releases(rq->lock){	spin_unlock_irqrestore(&rq->lock, *flags);}/* * this_rq_lock - lock this runqueue and disable interrupts. */static struct rq *this_rq_lock(void)	__acquires(rq->lock){	struct rq *rq;	local_irq_disable();	rq = this_rq();	spin_lock(&rq->lock);	return rq;}#ifdef CONFIG_SCHED_HRTICK/* * Use HR-timers to deliver accurate preemption points. * * Its all a bit involved since we cannot program an hrt while holding the * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a * reschedule event. * * When we get rescheduled we reprogram the hrtick_timer outside of the * rq->lock. *//* * Use hrtick when: *  - enabled by features *  - hrtimer is actually high res */static inline int hrtick_enabled(struct rq *rq){	if (!sched_feat(HRTICK))		return 0;	if (!cpu_active(cpu_of(rq)))		return 0;	return hrtimer_is_hres_active(&rq->hrtick_timer);}static void hrtick_clear(struct rq *rq){	if (hrtimer_active(&rq->hrtick_timer))		hrtimer_cancel(&rq->hrtick_timer);}/* * High-resolution timer tick. * Runs from hardirq context with interrupts disabled. */static enum hrtimer_restart hrtick(struct hrtimer *timer){	struct rq *rq = container_of(timer, struct rq, hrtick_timer);	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());	spin_lock(&rq->lock);	update_rq_clock(rq);	rq->curr->sched_class->task_tick(rq, rq->curr, 1);	spin_unlock(&rq->lock);	return HRTIMER_NORESTART;}#ifdef CONFIG_SMP/* * called from hardirq (IPI) context */static void __hrtick_start(void *arg){	struct rq *rq = arg;	spin_lock(&rq->lock);	hrtimer_restart(&rq->hrtick_timer);	rq->hrtick_csd_pending = 0;	spin_unlock(&rq->lock);}/* * Called to set the hrtick timer state. * * called with rq->lock held and irqs disabled */static void hrtick_start(struct rq *rq, u64 delay){	struct hrtimer *timer = &rq->hrtick_timer;	ktime_t time = ktime_add_ns(timer->base->get_time(), delay);	timer->expires = time;	if (rq == this_rq()) {		hrtimer_restart(timer);	} else if (!rq->hrtick_csd_pending) {		__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd);		rq->hrtick_csd_pending = 1;	}}static inthotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu){	int cpu = (int)(long)hcpu;	switch (action) {	case CPU_UP_CANCELED:	case CPU_UP_CANCELED_FROZEN:	case CPU_DOWN_PREPARE:	case CPU_DOWN_PREPARE_FROZEN:	case CPU_DEAD:	case CPU_DEAD_FROZEN:

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -