sched.h

来自「linux 内核源代码」· C头文件 代码 · 共 1,993 行 · 第 1/4 页

H
1,993
字号
	 * tasks I'm ptracing.	 */	struct list_head children;	/* list of my children */	struct list_head sibling;	/* linkage in my parent's children list */	struct task_struct *group_leader;	/* threadgroup leader */	/* PID/PID hash table linkage. */	struct pid_link pids[PIDTYPE_MAX];	struct list_head thread_group;	struct completion *vfork_done;		/* for vfork() */	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */	unsigned int rt_priority;	cputime_t utime, stime, utimescaled, stimescaled;	cputime_t gtime;	cputime_t prev_utime, prev_stime;	unsigned long nvcsw, nivcsw; /* context switch counts */	struct timespec start_time; 		/* monotonic time */	struct timespec real_start_time;	/* boot based time *//* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */	unsigned long min_flt, maj_flt;  	cputime_t it_prof_expires, it_virt_expires;	unsigned long long it_sched_expires;	struct list_head cpu_timers[3];/* process credentials */	uid_t uid,euid,suid,fsuid;	gid_t gid,egid,sgid,fsgid;	struct group_info *group_info;	kernel_cap_t   cap_effective, cap_inheritable, cap_permitted;	unsigned keep_capabilities:1;	struct user_struct *user;#ifdef CONFIG_KEYS	struct key *request_key_auth;	/* assumed request_key authority */	struct key *thread_keyring;	/* keyring private to this thread */	unsigned char jit_keyring;	/* default keyring to attach requested keys to */#endif	char comm[TASK_COMM_LEN]; /* executable name excluding path				     - access with [gs]et_task_comm (which lock				       it with task_lock())				     - initialized normally by flush_old_exec *//* file system info */	int link_count, total_link_count;#ifdef CONFIG_SYSVIPC/* ipc stuff */	struct sysv_sem sysvsem;#endif/* CPU-specific state of this task */	struct thread_struct thread;/* filesystem information */	struct fs_struct *fs;/* open file information */	struct files_struct *files;/* namespaces */	struct nsproxy *nsproxy;/* signal handlers */	struct signal_struct *signal;	struct sighand_struct *sighand;	sigset_t blocked, real_blocked;	sigset_t saved_sigmask;		/* To be restored with TIF_RESTORE_SIGMASK */	struct sigpending pending;	unsigned long sas_ss_sp;	size_t sas_ss_size;	int (*notifier)(void *priv);	void *notifier_data;	sigset_t *notifier_mask;#ifdef CONFIG_SECURITY	void *security;#endif	struct audit_context *audit_context;	seccomp_t seccomp;/* Thread group tracking */   	u32 parent_exec_id;   	u32 self_exec_id;/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */	spinlock_t alloc_lock;	/* Protection of the PI data structures: */	spinlock_t pi_lock;#ifdef CONFIG_RT_MUTEXES	/* PI waiters blocked on a rt_mutex held by this task */	struct plist_head pi_waiters;	/* Deadlock detection and priority inheritance handling */	struct rt_mutex_waiter *pi_blocked_on;#endif#ifdef CONFIG_DEBUG_MUTEXES	/* mutex deadlock detection */	struct mutex_waiter *blocked_on;#endif#ifdef CONFIG_TRACE_IRQFLAGS	unsigned int irq_events;	int hardirqs_enabled;	unsigned long hardirq_enable_ip;	unsigned int hardirq_enable_event;	unsigned long hardirq_disable_ip;	unsigned int hardirq_disable_event;	int softirqs_enabled;	unsigned long softirq_disable_ip;	unsigned int softirq_disable_event;	unsigned long softirq_enable_ip;	unsigned int softirq_enable_event;	int hardirq_context;	int softirq_context;#endif#ifdef CONFIG_LOCKDEP# define MAX_LOCK_DEPTH 30UL	u64 curr_chain_key;	int lockdep_depth;	struct held_lock held_locks[MAX_LOCK_DEPTH];	unsigned int lockdep_recursion;#endif/* journalling filesystem info */	void *journal_info;/* stacked block device info */	struct bio *bio_list, **bio_tail;/* VM state */	struct reclaim_state *reclaim_state;	struct backing_dev_info *backing_dev_info;	struct io_context *io_context;	unsigned long ptrace_message;	siginfo_t *last_siginfo; /* For ptrace use.  */#ifdef CONFIG_TASK_XACCT/* i/o counters(bytes read/written, #syscalls */	u64 rchar, wchar, syscr, syscw;#endif	struct task_io_accounting ioac;#if defined(CONFIG_TASK_XACCT)	u64 acct_rss_mem1;	/* accumulated rss usage */	u64 acct_vm_mem1;	/* accumulated virtual memory usage */	cputime_t acct_stimexpd;/* stime since last update */#endif#ifdef CONFIG_NUMA  	struct mempolicy *mempolicy;	short il_next;#endif#ifdef CONFIG_CPUSETS	nodemask_t mems_allowed;	int cpuset_mems_generation;	int cpuset_mem_spread_rotor;#endif#ifdef CONFIG_CGROUPS	/* Control Group info protected by css_set_lock */	struct css_set *cgroups;	/* cg_list protected by css_set_lock and tsk->alloc_lock */	struct list_head cg_list;#endif#ifdef CONFIG_FUTEX	struct robust_list_head __user *robust_list;#ifdef CONFIG_COMPAT	struct compat_robust_list_head __user *compat_robust_list;#endif	struct list_head pi_state_list;	struct futex_pi_state *pi_state_cache;#endif	atomic_t fs_excl;	/* holding fs exclusive resources */	struct rcu_head rcu;	/*	 * cache last used pipe for splice	 */	struct pipe_inode_info *splice_pipe;#ifdef	CONFIG_TASK_DELAY_ACCT	struct task_delay_info *delays;#endif#ifdef CONFIG_FAULT_INJECTION	int make_it_fail;#endif	struct prop_local_single dirties;};/* * Priority of a process goes from 0..MAX_PRIO-1, valid RT * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority * values are inverted: lower p->prio value means higher priority. * * The MAX_USER_RT_PRIO value allows the actual maximum * RT priority to be separate from the value exported to * user-space.  This allows kernel threads to set their * priority to a value higher than any user task. Note: * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. */#define MAX_USER_RT_PRIO	100#define MAX_RT_PRIO		MAX_USER_RT_PRIO#define MAX_PRIO		(MAX_RT_PRIO + 40)#define DEFAULT_PRIO		(MAX_RT_PRIO + 20)static inline int rt_prio(int prio){	if (unlikely(prio < MAX_RT_PRIO))		return 1;	return 0;}static inline int rt_task(struct task_struct *p){	return rt_prio(p->prio);}static inline void set_task_session(struct task_struct *tsk, pid_t session){	tsk->signal->__session = session;}static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp){	tsk->signal->__pgrp = pgrp;}static inline struct pid *task_pid(struct task_struct *task){	return task->pids[PIDTYPE_PID].pid;}static inline struct pid *task_tgid(struct task_struct *task){	return task->group_leader->pids[PIDTYPE_PID].pid;}static inline struct pid *task_pgrp(struct task_struct *task){	return task->group_leader->pids[PIDTYPE_PGID].pid;}static inline struct pid *task_session(struct task_struct *task){	return task->group_leader->pids[PIDTYPE_SID].pid;}struct pid_namespace;/* * the helpers to get the task's different pids as they are seen * from various namespaces * * task_xid_nr()     : global id, i.e. the id seen from the init namespace; * task_xid_vnr()    : virtual id, i.e. the id seen from the namespace the task *                     belongs to. this only makes sence when called in the *                     context of the task that belongs to the same namespace; * task_xid_nr_ns()  : id seen from the ns specified; * * set_task_vxid()   : assigns a virtual id to a task; * * see also pid_nr() etc in include/linux/pid.h */static inline pid_t task_pid_nr(struct task_struct *tsk){	return tsk->pid;}pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);static inline pid_t task_pid_vnr(struct task_struct *tsk){	return pid_vnr(task_pid(tsk));}static inline pid_t task_tgid_nr(struct task_struct *tsk){	return tsk->tgid;}pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);static inline pid_t task_tgid_vnr(struct task_struct *tsk){	return pid_vnr(task_tgid(tsk));}static inline pid_t task_pgrp_nr(struct task_struct *tsk){	return tsk->signal->__pgrp;}pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);static inline pid_t task_pgrp_vnr(struct task_struct *tsk){	return pid_vnr(task_pgrp(tsk));}static inline pid_t task_session_nr(struct task_struct *tsk){	return tsk->signal->__session;}pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);static inline pid_t task_session_vnr(struct task_struct *tsk){	return pid_vnr(task_session(tsk));}/** * pid_alive - check that a task structure is not stale * @p: Task structure to be checked. * * Test if a process is not yet dead (at most zombie state) * If pid_alive fails, then pointers within the task structure * can be stale and must not be dereferenced. */static inline int pid_alive(struct task_struct *p){	return p->pids[PIDTYPE_PID].pid != NULL;}/** * is_global_init - check if a task structure is init * @tsk: Task structure to be checked. * * Check if a task structure is the first user space task the kernel created. */static inline int is_global_init(struct task_struct *tsk){	return tsk->pid == 1;}/* * is_container_init: * check whether in the task is init in its own pid namespace. */extern int is_container_init(struct task_struct *tsk);extern struct pid *cad_pid;extern void free_task(struct task_struct *tsk);#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)extern void __put_task_struct(struct task_struct *t);static inline void put_task_struct(struct task_struct *t){	if (atomic_dec_and_test(&t->usage))		__put_task_struct(t);}/* * Per process flags */#define PF_ALIGNWARN	0x00000001	/* Print alignment warning msgs */					/* Not implemented yet, only for 486*/#define PF_STARTING	0x00000002	/* being created */#define PF_EXITING	0x00000004	/* getting shut down */#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */#define PF_VCPU		0x00000010	/* I'm a virtual CPU */#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */#define PF_DUMPCORE	0x00000200	/* dumped core */#define PF_SIGNALED	0x00000400	/* killed by a signal */#define PF_MEMALLOC	0x00000800	/* Allocating memory */#define PF_FLUSHER	0x00001000	/* responsible for disk writeback */#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */#define PF_FROZEN	0x00010000	/* frozen for system suspend */#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */#define PF_KSWAPD	0x00040000	/* I am kswapd */#define PF_SWAPOFF	0x00080000	/* I am in swapoff */#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */#define PF_BORROWED_MM	0x00200000	/* I am a kthread doing use_mm */#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */#define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezeable *//* * Only the _current_ task can read/write to tsk->flags, but other * tasks can access tsk->flags in readonly mode for example * with tsk_used_math (like during threaded core dumping). * There is however an exception to this rule during ptrace * or during fork: the ptracer task is allowed to write to the * child->flags of its traced child (same goes for fork, the parent * can write to the child->flags), because we're guaranteed the * child is not running and in turn not changing child->flags * at the same time the parent does it. */#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)#define clear_used_math() clear_stopped_child_used_math(current)#define set_used_math() set_stopped_child_used_math(current)#define conditional_stopped_child_used_math(condition, child) \	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)#define conditional_used_math(condition) \	conditional_stopped_child_used_math(condition, current)#define copy_to_stopped_child_used_math(child) \	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)#define used_math() tsk_used_math(current)#ifdef CONFIG_SMPextern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);#elsestatic inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask){	if (!cpu_isset(0, new_mask))		return -EINVAL;	return 0;}#endifextern unsigned long long sched_clock(void);/* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu * clock constructed from sched_clock(): */extern unsigned long long cpu_clock(int cpu);extern unsigned long longtask_sched_runtime(struct task_struct *task);/* sched_exec is called by processes performing an exec */#ifdef CONFIG_SMPextern void sched_exec(void);#else#define sched_exec()   {}#endifextern void sched_clock_idle_sleep_event(void);extern void sched_clock_idle_wakeup_event(u64 delta_ns);#ifdef CONFIG_HOTPLUG_CPUextern void idle_task_exit(void);#elsestatic inline void idle_task_exit(void) {}#endifextern void sched_idle_next(void);#ifdef CONFIG_SCHED_DEBUGextern unsigned int sysctl_sched_latency;extern unsigned int sysctl_sched_min_granularity;extern unsigned int sysctl_sched_wakeup_granularity;extern unsigned int sysctl_sched_batch_wakeup_granularity;extern unsigned int sysctl_sched_child_runs_first;extern unsigned int sysctl_sched_features;extern unsigned int sysctl_sched_migration_cost;extern unsigned int sysctl_sched_nr_migrate;int sched_nr_latency_handler(struct ctl_table *table, int write,		struct file *file, void __user *buffer, size_t *length,		loff_t *ppos);#endifextern unsigned int sysctl_sched_compat_yield;#ifdef CONFIG_RT_MUTEXESextern int rt_mutex_getprio(struct task_struct *p);extern void rt_mutex_setprio(struct task_struct *p, int prio);extern void rt_mutex_adjust_pi(struct task_struct *p);#elsestatic inline int rt_mutex_getprio(struct task_struct *p){	return p->normal_prio;}# define rt_mutex_adjust_pi(p)		do { } while (0)#endifextern void set_user_nice(struct task_struct *p, long nice);extern int task_prio(const struct task_struct *p);extern int task_nice(const struct task_struct *p);extern int can_nice(const struct task_struct *p, const int nice);extern int task_curr(const struct task_struct *p);extern int idle_cpu(int cpu);extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);extern struct task_struct *idle_task(int cpu);extern struct task_struct *curr_task(int cpu);extern void set_curr_task(int cpu, struct task_struct *p);void yield(void);/* * The default (Linux) execution domain. */extern struct exec_domain	default_exec_domain;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?