sched.h
来自「linux 内核源代码」· C头文件 代码 · 共 1,993 行 · 第 1/4 页
H
1,993 行
#ifdef CONFIG_BSD_PROCESS_ACCT struct pacct_struct pacct; /* per-process accounting information */#endif#ifdef CONFIG_TASKSTATS struct taskstats *stats;#endif#ifdef CONFIG_AUDIT unsigned audit_tty; struct tty_audit_buf *tty_audit_buf;#endif};/* Context switch must be unlocked if interrupts are to be enabled */#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW# define __ARCH_WANT_UNLOCKED_CTXSW#endif/* * Bits in flags field of signal_struct. */#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress *//* * Some day this will be a full-fledged user tracking system.. */struct user_struct { atomic_t __count; /* reference count */ atomic_t processes; /* How many processes does this user have? */ atomic_t files; /* How many open files does this user have? */ atomic_t sigpending; /* How many pending signals does this user have? */#ifdef CONFIG_INOTIFY_USER atomic_t inotify_watches; /* How many inotify watches does this user have? */ atomic_t inotify_devs; /* How many inotify devs does this user have opened? */#endif#ifdef CONFIG_POSIX_MQUEUE /* protected by mq_lock */ unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */#endif unsigned long locked_shm; /* How many pages of mlocked shm ? */#ifdef CONFIG_KEYS struct key *uid_keyring; /* UID specific keyring */ struct key *session_keyring; /* UID's default session keyring */#endif /* Hash table maintenance information */ struct hlist_node uidhash_node; uid_t uid;#ifdef CONFIG_FAIR_USER_SCHED struct task_group *tg;#ifdef CONFIG_SYSFS struct kset kset; struct subsys_attribute user_attr; struct work_struct work;#endif#endif};#ifdef CONFIG_FAIR_USER_SCHEDextern int uids_kobject_init(void);#elsestatic inline int uids_kobject_init(void) { return 0; }#endifextern struct user_struct *find_user(uid_t);extern struct user_struct root_user;#define INIT_USER (&root_user)struct backing_dev_info;struct reclaim_state;#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)struct sched_info { /* cumulative counters */ unsigned long pcount; /* # of times run on this cpu */ unsigned long long cpu_time, /* time spent on the cpu */ run_delay; /* time spent waiting on a runqueue */ /* timestamps */ unsigned long long last_arrival,/* when we last ran on a cpu */ last_queued; /* when we were last queued to run */#ifdef CONFIG_SCHEDSTATS /* BKL stats */ unsigned int bkl_count;#endif};#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */#ifdef CONFIG_SCHEDSTATSextern const struct file_operations proc_schedstat_operations;#endif /* CONFIG_SCHEDSTATS */#ifdef CONFIG_TASK_DELAY_ACCTstruct task_delay_info { spinlock_t lock; unsigned int flags; /* Private per-task flags */ /* For each stat XXX, add following, aligned appropriately * * struct timespec XXX_start, XXX_end; * u64 XXX_delay; * u32 XXX_count; * * Atomicity of updates to XXX_delay, XXX_count protected by * single lock above (split into XXX_lock if contention is an issue). */ /* * XXX_count is incremented on every XXX operation, the delay * associated with the operation is added to XXX_delay. * XXX_delay contains the accumulated delay time in nanoseconds. */ struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ u64 blkio_delay; /* wait for sync block io completion */ u64 swapin_delay; /* wait for swapin block io completion */ u32 blkio_count; /* total count of the number of sync block */ /* io operations performed */ u32 swapin_count; /* total count of the number of swapin block */ /* io operations performed */};#endif /* CONFIG_TASK_DELAY_ACCT */static inline int sched_info_on(void){#ifdef CONFIG_SCHEDSTATS return 1;#elif defined(CONFIG_TASK_DELAY_ACCT) extern int delayacct_on; return delayacct_on;#else return 0;#endif}enum cpu_idle_type { CPU_IDLE, CPU_NOT_IDLE, CPU_NEWLY_IDLE, CPU_MAX_IDLE_TYPES};/* * sched-domains (multiprocessor balancing) declarations: *//* * Increase resolution of nice-level calculations: */#define SCHED_LOAD_SHIFT 10#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE#ifdef CONFIG_SMP#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */#define SD_BALANCE_EXEC 4 /* Balance on exec */#define SD_BALANCE_FORK 8 /* Balance on fork, clone */#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */#define SD_SERIALIZE 1024 /* Only a single load balancing instance */#define BALANCE_FOR_MC_POWER \ (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)#define BALANCE_FOR_PKG_POWER \ ((sched_mc_power_savings || sched_smt_power_savings) ? \ SD_POWERSAVINGS_BALANCE : 0)#define test_sd_parent(sd, flag) ((sd->parent && \ (sd->parent->flags & flag)) ? 1 : 0)struct sched_group { struct sched_group *next; /* Must be a circular list */ cpumask_t cpumask; /* * CPU power of this group, SCHED_LOAD_SCALE being max power for a * single CPU. This is read only (except for setup, hotplug CPU). * Note : Never change cpu_power without recompute its reciprocal */ unsigned int __cpu_power; /* * reciprocal value of cpu_power to avoid expensive divides * (see include/linux/reciprocal_div.h) */ u32 reciprocal_cpu_power;};struct sched_domain { /* These fields must be setup */ struct sched_domain *parent; /* top domain must be null terminated */ struct sched_domain *child; /* bottom domain must be null terminated */ struct sched_group *groups; /* the balancing groups of the domain */ cpumask_t span; /* span of all CPUs in this domain */ unsigned long min_interval; /* Minimum balance interval ms */ unsigned long max_interval; /* Maximum balance interval ms */ unsigned int busy_factor; /* less balancing by factor if busy */ unsigned int imbalance_pct; /* No balance until over watermark */ unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ unsigned int busy_idx; unsigned int idle_idx; unsigned int newidle_idx; unsigned int wake_idx; unsigned int forkexec_idx; int flags; /* See SD_* */ /* Runtime fields. */ unsigned long last_balance; /* init to jiffies. units in jiffies */ unsigned int balance_interval; /* initialise to 1. units in ms. */ unsigned int nr_balance_failed; /* initialise to 0 */#ifdef CONFIG_SCHEDSTATS /* load_balance() stats */ unsigned int lb_count[CPU_MAX_IDLE_TYPES]; unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; /* Active load balancing */ unsigned int alb_count; unsigned int alb_failed; unsigned int alb_pushed; /* SD_BALANCE_EXEC stats */ unsigned int sbe_count; unsigned int sbe_balanced; unsigned int sbe_pushed; /* SD_BALANCE_FORK stats */ unsigned int sbf_count; unsigned int sbf_balanced; unsigned int sbf_pushed; /* try_to_wake_up() stats */ unsigned int ttwu_wake_remote; unsigned int ttwu_move_affine; unsigned int ttwu_move_balance;#endif};extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new);#endif /* CONFIG_SMP *//* * A runqueue laden with a single nice 0 task scores a weighted_cpuload of * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a * task of nice 0 or enough lower priority tasks to bring up the * weighted_cpuload */static inline int above_background_load(void){ unsigned long cpu; for_each_online_cpu(cpu) { if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE) return 1; } return 0;}struct io_context; /* See blkdev.h */#define NGROUPS_SMALL 32#define NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t)))struct group_info { int ngroups; atomic_t usage; gid_t small_block[NGROUPS_SMALL]; int nblocks; gid_t *blocks[0];};/* * get_group_info() must be called with the owning task locked (via task_lock()) * when task != current. The reason being that the vast majority of callers are * looking at current->group_info, which can not be changed except by the * current task. Changing current->group_info requires the task lock, too. */#define get_group_info(group_info) do { \ atomic_inc(&(group_info)->usage); \} while (0)#define put_group_info(group_info) do { \ if (atomic_dec_and_test(&(group_info)->usage)) \ groups_free(group_info); \} while (0)extern struct group_info *groups_alloc(int gidsetsize);extern void groups_free(struct group_info *group_info);extern int set_current_groups(struct group_info *group_info);extern int groups_search(struct group_info *group_info, gid_t grp);/* access the groups "array" with this macro */#define GROUP_AT(gi, i) \ ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])#ifdef ARCH_HAS_PREFETCH_SWITCH_STACKextern void prefetch_stack(struct task_struct *t);#elsestatic inline void prefetch_stack(struct task_struct *t) { }#endifstruct audit_context; /* See audit.c */struct mempolicy;struct pipe_inode_info;struct uts_namespace;struct rq;struct sched_domain;struct sched_class { const struct sched_class *next; void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); void (*yield_task) (struct rq *rq); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); struct task_struct * (*pick_next_task) (struct rq *rq); void (*put_prev_task) (struct rq *rq, struct task_struct *p);#ifdef CONFIG_SMP unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, int *all_pinned, int *this_best_prio); int (*move_one_task) (struct rq *this_rq, int this_cpu, struct rq *busiest, struct sched_domain *sd, enum cpu_idle_type idle);#endif void (*set_curr_task) (struct rq *rq); void (*task_tick) (struct rq *rq, struct task_struct *p); void (*task_new) (struct rq *rq, struct task_struct *p);};struct load_weight { unsigned long weight, inv_weight;};/* * CFS stats for a schedulable entity (task, task-group etc) * * Current field usage histogram: * * 4 se->block_start * 4 se->run_node * 4 se->sleep_start * 6 se->load.weight */struct sched_entity { struct load_weight load; /* for load-balancing */ struct rb_node run_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime;#ifdef CONFIG_SCHEDSTATS u64 wait_start; u64 wait_max; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_forced2_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle;#endif#ifdef CONFIG_FAIR_GROUP_SCHED struct sched_entity *parent; /* rq on which this entity is (to be) queued: */ struct cfs_rq *cfs_rq; /* rq "owned" by this entity/group: */ struct cfs_rq *my_q;#endif};struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ void *stack; atomic_t usage; unsigned int flags; /* per process flags, defined below */ unsigned int ptrace; int lock_depth; /* BKL lock depth */#ifdef CONFIG_SMP#ifdef __ARCH_WANT_UNLOCKED_CTXSW int oncpu;#endif#endif int prio, static_prio, normal_prio; struct list_head run_list; const struct sched_class *sched_class; struct sched_entity se;#ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ struct hlist_head preempt_notifiers;#endif unsigned short ioprio; /* * fpu_counter contains the number of consecutive context switches * that the FPU is used. If this is over a threshold, the lazy fpu * saving becomes unlazy to save the trap. This is an unsigned char * so that after 256 times the counter wraps and the behavior turns * lazy again; this to deal with bursty apps that only use FPU for * a short time */ unsigned char fpu_counter; s8 oomkilladj; /* OOM kill score adjustment (bit shift). */#ifdef CONFIG_BLK_DEV_IO_TRACE unsigned int btrace_seq;#endif unsigned int policy; cpumask_t cpus_allowed; unsigned int time_slice;#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info sched_info;#endif struct list_head tasks; /* * ptrace_list/ptrace_children forms the list of my children * that were stolen by a ptracer. */ struct list_head ptrace_children; struct list_head ptrace_list; struct mm_struct *mm, *active_mm;/* task state */ struct linux_binfmt *binfmt; int exit_state; int exit_code, exit_signal; int pdeath_signal; /* The signal sent when the parent dies */ /* ??? */ unsigned int personality; unsigned did_exec:1; pid_t pid; pid_t tgid;#ifdef CONFIG_CC_STACKPROTECTOR /* Canary value for the -fstack-protector gcc feature */ unsigned long stack_canary;#endif /* * pointers to (original) parent process, youngest child, younger sibling, * older sibling, respectively. (p->father can be replaced with * p->parent->pid) */ struct task_struct *real_parent; /* real parent process (when being debugged) */ struct task_struct *parent; /* parent process */ /* * children/sibling forms the list of my children plus the
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?