sched.h

来自「linux 内核源代码」· C头文件 代码 · 共 1,993 行 · 第 1/4 页

H
1,993
字号
#ifndef _LINUX_SCHED_H#define _LINUX_SCHED_H/* * cloning flags: */#define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */#define CLONE_VM	0x00000100	/* set if VM shared between processes */#define CLONE_FS	0x00000200	/* set if fs info shared between processes */#define CLONE_FILES	0x00000400	/* set if open files shared between processes */#define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */#define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */#define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */#define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */#define CLONE_THREAD	0x00010000	/* Same thread group? */#define CLONE_NEWNS	0x00020000	/* New namespace group? */#define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */#define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */#define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */#define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */#define CLONE_DETACHED		0x00400000	/* Unused, ignored */#define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */#define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */#define CLONE_STOPPED		0x02000000	/* Start in stopped state */#define CLONE_NEWUTS		0x04000000	/* New utsname group? */#define CLONE_NEWIPC		0x08000000	/* New ipcs */#define CLONE_NEWUSER		0x10000000	/* New user namespace */#define CLONE_NEWPID		0x20000000	/* New pid namespace */#define CLONE_NEWNET		0x40000000	/* New network namespace *//* * Scheduling policies */#define SCHED_NORMAL		0#define SCHED_FIFO		1#define SCHED_RR		2#define SCHED_BATCH		3/* SCHED_ISO: reserved but not implemented yet */#define SCHED_IDLE		5#ifdef __KERNEL__struct sched_param {	int sched_priority;};#include <asm/param.h>	/* for HZ */#include <linux/capability.h>#include <linux/threads.h>#include <linux/kernel.h>#include <linux/types.h>#include <linux/timex.h>#include <linux/jiffies.h>#include <linux/rbtree.h>#include <linux/thread_info.h>#include <linux/cpumask.h>#include <linux/errno.h>#include <linux/nodemask.h>#include <linux/mm_types.h>#include <asm/system.h>#include <asm/semaphore.h>#include <asm/page.h>#include <asm/ptrace.h>#include <asm/cputime.h>#include <linux/smp.h>#include <linux/sem.h>#include <linux/signal.h>#include <linux/securebits.h>#include <linux/fs_struct.h>#include <linux/compiler.h>#include <linux/completion.h>#include <linux/pid.h>#include <linux/percpu.h>#include <linux/topology.h>#include <linux/proportions.h>#include <linux/seccomp.h>#include <linux/rcupdate.h>#include <linux/futex.h>#include <linux/rtmutex.h>#include <linux/time.h>#include <linux/param.h>#include <linux/resource.h>#include <linux/timer.h>#include <linux/hrtimer.h>#include <linux/task_io_accounting.h>#include <linux/kobject.h>#include <asm/processor.h>struct exec_domain;struct futex_pi_state;struct bio;/* * List of flags we want to share for kernel threads, * if only because they are not used by them anyway. */#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)/* * These are the constant used to fake the fixed-point load-average * counting. Some notes: *  - 11 bit fractions expand to 22 bits by the multiplies: this gives *    a load-average precision of 10 bits integer + 11 bits fractional *  - if you want to count load-averages more often, you need more *    precision, or rounding will get you. With 2-second counting freq, *    the EXP_n values would be 1981, 2034 and 2043 if still using only *    11 bit fractions. */extern unsigned long avenrun[];		/* Load averages */#define FSHIFT		11		/* nr of bits of precision */#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */#define EXP_5		2014		/* 1/exp(5sec/5min) */#define EXP_15		2037		/* 1/exp(5sec/15min) */#define CALC_LOAD(load,exp,n) \	load *= exp; \	load += n*(FIXED_1-exp); \	load >>= FSHIFT;extern unsigned long total_forks;extern int nr_threads;DECLARE_PER_CPU(unsigned long, process_counts);extern int nr_processes(void);extern unsigned long nr_running(void);extern unsigned long nr_uninterruptible(void);extern unsigned long nr_active(void);extern unsigned long nr_iowait(void);extern unsigned long weighted_cpuload(const int cpu);struct seq_file;struct cfs_rq;struct task_group;#ifdef CONFIG_SCHED_DEBUGextern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);extern void proc_sched_set_task(struct task_struct *p);extern voidprint_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);#elsestatic inline voidproc_sched_show_task(struct task_struct *p, struct seq_file *m){}static inline void proc_sched_set_task(struct task_struct *p){}static inline voidprint_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq){}#endif/* * Task state bitmask. NOTE! These bits are also * encoded in fs/proc/array.c: get_task_state(). * * We have two separate sets of flags: task->state * is about runnability, while task->exit_state are * about the task exiting. Confusing, but this way * modifying one set can't modify the other one by * mistake. */#define TASK_RUNNING		0#define TASK_INTERRUPTIBLE	1#define TASK_UNINTERRUPTIBLE	2#define TASK_STOPPED		4#define TASK_TRACED		8/* in tsk->exit_state */#define EXIT_ZOMBIE		16#define EXIT_DEAD		32/* in tsk->state again */#define TASK_DEAD		64#define __set_task_state(tsk, state_value)		\	do { (tsk)->state = (state_value); } while (0)#define set_task_state(tsk, state_value)		\	set_mb((tsk)->state, (state_value))/* * set_current_state() includes a barrier so that the write of current->state * is correctly serialised wrt the caller's subsequent test of whether to * actually sleep: * *	set_current_state(TASK_UNINTERRUPTIBLE); *	if (do_i_need_to_sleep()) *		schedule(); * * If the caller does not need such serialisation then use __set_current_state() */#define __set_current_state(state_value)			\	do { current->state = (state_value); } while (0)#define set_current_state(state_value)		\	set_mb(current->state, (state_value))/* Task command name length */#define TASK_COMM_LEN 16#include <linux/spinlock.h>/* * This serializes "schedule()" and also protects * the run-queue from deletions/modifications (but * _adding_ to the beginning of the run-queue has * a separate lock). */extern rwlock_t tasklist_lock;extern spinlock_t mmlist_lock;struct task_struct;extern void sched_init(void);extern void sched_init_smp(void);extern void init_idle(struct task_struct *idle, int cpu);extern void init_idle_bootup_task(struct task_struct *idle);extern cpumask_t nohz_cpu_mask;#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)extern int select_nohz_load_balancer(int cpu);#elsestatic inline int select_nohz_load_balancer(int cpu){	return 0;}#endif/* * Only dump TASK_* tasks. (0 for all tasks) */extern void show_state_filter(unsigned long state_filter);static inline void show_state(void){	show_state_filter(0);}extern void show_regs(struct pt_regs *);/* * TASK is a pointer to the task whose backtrace we want to see (or NULL for current * task), SP is the stack pointer of the first frame that should be shown in the back * trace (or NULL if the entire call-chain of the task should be shown). */extern void show_stack(struct task_struct *task, unsigned long *sp);void io_schedule(void);long io_schedule_timeout(long timeout);extern void cpu_init (void);extern void trap_init(void);extern void account_process_tick(struct task_struct *task, int user);extern void update_process_times(int user);extern void scheduler_tick(void);#ifdef CONFIG_DETECT_SOFTLOCKUPextern void softlockup_tick(void);extern void spawn_softlockup_task(void);extern void touch_softlockup_watchdog(void);extern void touch_all_softlockup_watchdogs(void);extern int softlockup_thresh;#elsestatic inline void softlockup_tick(void){}static inline void spawn_softlockup_task(void){}static inline void touch_softlockup_watchdog(void){}static inline void touch_all_softlockup_watchdogs(void){}#endif/* Attach to any functions which should be ignored in wchan output. */#define __sched		__attribute__((__section__(".sched.text")))/* Linker adds these: start and end of __sched functions */extern char __sched_text_start[], __sched_text_end[];/* Is this address in the __sched functions? */extern int in_sched_functions(unsigned long addr);#define	MAX_SCHEDULE_TIMEOUT	LONG_MAXextern signed long FASTCALL(schedule_timeout(signed long timeout));extern signed long schedule_timeout_interruptible(signed long timeout);extern signed long schedule_timeout_uninterruptible(signed long timeout);asmlinkage void schedule(void);struct nsproxy;struct user_namespace;/* Maximum number of active map areas.. This is a random (large) number */#define DEFAULT_MAX_MAP_COUNT	65536extern int sysctl_max_map_count;#include <linux/aio.h>extern unsigned longarch_get_unmapped_area(struct file *, unsigned long, unsigned long,		       unsigned long, unsigned long);extern unsigned longarch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,			  unsigned long len, unsigned long pgoff,			  unsigned long flags);extern void arch_unmap_area(struct mm_struct *, unsigned long);extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS/* * The mm counters are not protected by its page_table_lock, * so must be incremented atomically. */#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)#else  /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS *//* * The mm counters are protected by its page_table_lock, * so can be incremented directly. */#define set_mm_counter(mm, member, value) (mm)->_##member = (value)#define get_mm_counter(mm, member) ((mm)->_##member)#define add_mm_counter(mm, member, value) (mm)->_##member += (value)#define inc_mm_counter(mm, member) (mm)->_##member++#define dec_mm_counter(mm, member) (mm)->_##member--#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */#define get_mm_rss(mm)					\	(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))#define update_hiwater_rss(mm)	do {			\	unsigned long _rss = get_mm_rss(mm);		\	if ((mm)->hiwater_rss < _rss)			\		(mm)->hiwater_rss = _rss;		\} while (0)#define update_hiwater_vm(mm)	do {			\	if ((mm)->hiwater_vm < (mm)->total_vm)		\		(mm)->hiwater_vm = (mm)->total_vm;	\} while (0)extern void set_dumpable(struct mm_struct *mm, int value);extern int get_dumpable(struct mm_struct *mm);/* mm flags *//* dumpable bits */#define MMF_DUMPABLE      0  /* core dump is permitted */#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */#define MMF_DUMPABLE_BITS 2/* coredump filter bits */#define MMF_DUMP_ANON_PRIVATE	2#define MMF_DUMP_ANON_SHARED	3#define MMF_DUMP_MAPPED_PRIVATE	4#define MMF_DUMP_MAPPED_SHARED	5#define MMF_DUMP_ELF_HEADERS	6#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS#define MMF_DUMP_FILTER_BITS	5#define MMF_DUMP_FILTER_MASK \	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)#define MMF_DUMP_FILTER_DEFAULT \	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED))struct sighand_struct {	atomic_t		count;	struct k_sigaction	action[_NSIG];	spinlock_t		siglock;	wait_queue_head_t	signalfd_wqh;};struct pacct_struct {	int			ac_flag;	long			ac_exitcode;	unsigned long		ac_mem;	cputime_t		ac_utime, ac_stime;	unsigned long		ac_minflt, ac_majflt;};/* * NOTE! "signal_struct" does not have it's own * locking, because a shared signal_struct always * implies a shared sighand_struct, so locking * sighand_struct is always a proper superset of * the locking of signal_struct. */struct signal_struct {	atomic_t		count;	atomic_t		live;	wait_queue_head_t	wait_chldexit;	/* for wait4() */	/* current thread group signal load-balancing target: */	struct task_struct	*curr_target;	/* shared signal handling: */	struct sigpending	shared_pending;	/* thread group exit support */	int			group_exit_code;	/* overloaded:	 * - notify group_exit_task when ->count is equal to notify_count	 * - everyone except group_exit_task is stopped during signal delivery	 *   of fatal signals, group_exit_task processes the signal.	 */	struct task_struct	*group_exit_task;	int			notify_count;	/* thread group stop support, overloads group_exit_code too */	int			group_stop_count;	unsigned int		flags; /* see SIGNAL_* flags below */	/* POSIX.1b Interval Timers */	struct list_head posix_timers;	/* ITIMER_REAL timer for the process */	struct hrtimer real_timer;	struct task_struct *tsk;	ktime_t it_real_incr;	/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */	cputime_t it_prof_expires, it_virt_expires;	cputime_t it_prof_incr, it_virt_incr;	/* job control IDs */	/*	 * pgrp and session fields are deprecated.	 * use the task_session_Xnr and task_pgrp_Xnr routines below	 */	union {		pid_t pgrp __deprecated;		pid_t __pgrp;	};	struct pid *tty_old_pgrp;	union {		pid_t session __deprecated;		pid_t __session;	};	/* boolean value for session group leader */	int leader;	struct tty_struct *tty; /* NULL if no tty */	/*	 * Cumulative resource counters for dead threads in the group,	 * and for reaped dead child processes forked by this group.	 * Live threads maintain their own counters and add to these	 * in __exit_signal, except for the group leader.	 */	cputime_t utime, stime, cutime, cstime;	cputime_t gtime;	cputime_t cgtime;	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;	unsigned long inblock, oublock, cinblock, coublock;	/*	 * Cumulative ns of scheduled CPU time for dead threads in the	 * group, not including a zombie group leader.  (This only differs	 * from jiffies_to_ns(utime + stime) if sched_clock uses something	 * other than jiffies.)	 */	unsigned long long sum_sched_runtime;	/*	 * We don't bother to synchronize most readers of this at all,	 * because there is no reader checking a limit that actually needs	 * to get both rlim_cur and rlim_max atomically, and either one	 * alone is a single word that can safely be read normally.	 * getrlimit/setrlimit use task_lock(current->group_leader) to	 * protect this instead of the siglock, because they really	 * have no need to disable irqs.	 */	struct rlimit rlim[RLIM_NLIMITS];	struct list_head cpu_timers[3];	/* keep the process-shared keyrings here so that they do the right	 * thing in threads created with CLONE_THREAD */#ifdef CONFIG_KEYS	struct key *session_keyring;	/* keyring inherited over fork */	struct key *process_keyring;	/* keyring private to this process */#endif

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?