⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.h

📁 Axis 221 camera embedded programing interface
💻 H
📖 第 1 页 / 共 4 页
字号:
	 * - everyone except group_exit_task is stopped during signal delivery	 *   of fatal signals, group_exit_task processes the signal.	 */	struct task_struct	*group_exit_task;	int			notify_count;	/* thread group stop support, overloads group_exit_code too */	int			group_stop_count;	unsigned int		flags; /* see SIGNAL_* flags below */	/* POSIX.1b Interval Timers */	struct list_head posix_timers;	/* ITIMER_REAL timer for the process */	struct hrtimer real_timer;	struct task_struct *tsk;	ktime_t it_real_incr;	/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */	cputime_t it_prof_expires, it_virt_expires;	cputime_t it_prof_incr, it_virt_incr;	/* job control IDs */	pid_t pgrp;	pid_t tty_old_pgrp;	union {		pid_t session __deprecated;		pid_t __session;	};	/* boolean value for session group leader */	int leader;	struct tty_struct *tty; /* NULL if no tty */	/*	 * Cumulative resource counters for dead threads in the group,	 * and for reaped dead child processes forked by this group.	 * Live threads maintain their own counters and add to these	 * in __exit_signal, except for the group leader.	 */	cputime_t utime, stime, cutime, cstime;	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;	/*	 * Cumulative ns of scheduled CPU time for dead threads in the	 * group, not including a zombie group leader.  (This only differs	 * from jiffies_to_ns(utime + stime) if sched_clock uses something	 * other than jiffies.)	 */	unsigned long long sched_time;	/*	 * We don't bother to synchronize most readers of this at all,	 * because there is no reader checking a limit that actually needs	 * to get both rlim_cur and rlim_max atomically, and either one	 * alone is a single word that can safely be read normally.	 * getrlimit/setrlimit use task_lock(current->group_leader) to	 * protect this instead of the siglock, because they really	 * have no need to disable irqs.	 */	struct rlimit rlim[RLIM_NLIMITS];	struct list_head cpu_timers[3];	/* keep the process-shared keyrings here so that they do the right	 * thing in threads created with CLONE_THREAD */#ifdef CONFIG_KEYS	struct key *session_keyring;	/* keyring inherited over fork */	struct key *process_keyring;	/* keyring private to this process */#endif#ifdef CONFIG_BSD_PROCESS_ACCT	struct pacct_struct pacct;	/* per-process accounting information */#endif#ifdef CONFIG_TASKSTATS	struct taskstats *stats;#endif};/* Context switch must be unlocked if interrupts are to be enabled */#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW# define __ARCH_WANT_UNLOCKED_CTXSW#endif/* * Bits in flags field of signal_struct. */#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */#define SIGNAL_STOP_DEQUEUED	0x00000002 /* stop signal dequeued */#define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */#define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress *//* * Priority of a process goes from 0..MAX_PRIO-1, valid RT * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority * values are inverted: lower p->prio value means higher priority. * * The MAX_USER_RT_PRIO value allows the actual maximum * RT priority to be separate from the value exported to * user-space.  This allows kernel threads to set their * priority to a value higher than any user task. Note: * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. */#define MAX_USER_RT_PRIO	100#define MAX_RT_PRIO		MAX_USER_RT_PRIO#define MAX_PRIO		(MAX_RT_PRIO + 40)#define rt_prio(prio)		unlikely((prio) < MAX_RT_PRIO)#define rt_task(p)		rt_prio((p)->prio)#define batch_task(p)		(unlikely((p)->policy == SCHED_BATCH))#define is_rt_policy(p)		((p) != SCHED_NORMAL && (p) != SCHED_BATCH)#define has_rt_policy(p)	unlikely(is_rt_policy((p)->policy))/* * Some day this will be a full-fledged user tracking system.. */struct user_struct {	atomic_t __count;	/* reference count */	atomic_t processes;	/* How many processes does this user have? */	atomic_t files;		/* How many open files does this user have? */	atomic_t sigpending;	/* How many pending signals does this user have? */#ifdef CONFIG_INOTIFY_USER	atomic_t inotify_watches; /* How many inotify watches does this user have? */	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */#endif	/* protected by mq_lock	*/	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */	unsigned long locked_shm; /* How many pages of mlocked shm ? */#ifdef CONFIG_KEYS	struct key *uid_keyring;	/* UID specific keyring */	struct key *session_keyring;	/* UID's default session keyring */#endif	/* Hash table maintenance information */	struct list_head uidhash_list;	uid_t uid;};extern struct user_struct *find_user(uid_t);extern struct user_struct root_user;#define INIT_USER (&root_user)struct backing_dev_info;struct reclaim_state;#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)struct sched_info {	/* cumulative counters */	unsigned long	cpu_time,	/* time spent on the cpu */			run_delay,	/* time spent waiting on a runqueue */			pcnt;		/* # of timeslices run on this cpu */	/* timestamps */	unsigned long	last_arrival,	/* when we last ran on a cpu */			last_queued;	/* when we were last queued to run */};#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */#ifdef CONFIG_SCHEDSTATSextern const struct file_operations proc_schedstat_operations;#endif /* CONFIG_SCHEDSTATS */#ifdef CONFIG_TASK_DELAY_ACCTstruct task_delay_info {	spinlock_t	lock;	unsigned int	flags;	/* Private per-task flags */	/* For each stat XXX, add following, aligned appropriately	 *	 * struct timespec XXX_start, XXX_end;	 * u64 XXX_delay;	 * u32 XXX_count;	 *	 * Atomicity of updates to XXX_delay, XXX_count protected by	 * single lock above (split into XXX_lock if contention is an issue).	 */	/*	 * XXX_count is incremented on every XXX operation, the delay	 * associated with the operation is added to XXX_delay.	 * XXX_delay contains the accumulated delay time in nanoseconds.	 */	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */	u64 blkio_delay;	/* wait for sync block io completion */	u64 swapin_delay;	/* wait for swapin block io completion */	u32 blkio_count;	/* total count of the number of sync block */				/* io operations performed */	u32 swapin_count;	/* total count of the number of swapin block */				/* io operations performed */};#endif	/* CONFIG_TASK_DELAY_ACCT */static inline int sched_info_on(void){#ifdef CONFIG_SCHEDSTATS	return 1;#elif defined(CONFIG_TASK_DELAY_ACCT)	extern int delayacct_on;	return delayacct_on;#else	return 0;#endif}enum idle_type{	SCHED_IDLE,	NOT_IDLE,	NEWLY_IDLE,	MAX_IDLE_TYPES};/* * sched-domains (multiprocessor balancing) declarations: */#define SCHED_LOAD_SCALE	128UL	/* increase resolution of load */#ifdef CONFIG_SMP#define SD_LOAD_BALANCE		1	/* Do load balancing on this domain. */#define SD_BALANCE_NEWIDLE	2	/* Balance when about to become idle */#define SD_BALANCE_EXEC		4	/* Balance on exec */#define SD_BALANCE_FORK		8	/* Balance on fork, clone */#define SD_WAKE_IDLE		16	/* Wake to idle CPU on task wakeup */#define SD_WAKE_AFFINE		32	/* Wake task to waking CPU */#define SD_WAKE_BALANCE		64	/* Perform balancing at task wakeup */#define SD_SHARE_CPUPOWER	128	/* Domain members share cpu power */#define SD_POWERSAVINGS_BALANCE	256	/* Balance for power savings */#define SD_SHARE_PKG_RESOURCES	512	/* Domain members share cpu pkg resources */#define SD_SERIALIZE		1024	/* Only a single load balancing instance */#define BALANCE_FOR_MC_POWER	\	(sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)#define BALANCE_FOR_PKG_POWER	\	((sched_mc_power_savings || sched_smt_power_savings) ?	\	 SD_POWERSAVINGS_BALANCE : 0)#define test_sd_parent(sd, flag)	((sd->parent &&		\					 (sd->parent->flags & flag)) ? 1 : 0)struct sched_group {	struct sched_group *next;	/* Must be a circular list */	cpumask_t cpumask;	/*	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a	 * single CPU. This is read only (except for setup, hotplug CPU).	 */	unsigned long cpu_power;};struct sched_domain {	/* These fields must be setup */	struct sched_domain *parent;	/* top domain must be null terminated */	struct sched_domain *child;	/* bottom domain must be null terminated */	struct sched_group *groups;	/* the balancing groups of the domain */	cpumask_t span;			/* span of all CPUs in this domain */	unsigned long min_interval;	/* Minimum balance interval ms */	unsigned long max_interval;	/* Maximum balance interval ms */	unsigned int busy_factor;	/* less balancing by factor if busy */	unsigned int imbalance_pct;	/* No balance until over watermark */	unsigned long long cache_hot_time; /* Task considered cache hot (ns) */	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */	unsigned int per_cpu_gain;	/* CPU % gained by adding domain cpus */	unsigned int busy_idx;	unsigned int idle_idx;	unsigned int newidle_idx;	unsigned int wake_idx;	unsigned int forkexec_idx;	int flags;			/* See SD_* */	/* Runtime fields. */	unsigned long last_balance;	/* init to jiffies. units in jiffies */	unsigned int balance_interval;	/* initialise to 1. units in ms. */	unsigned int nr_balance_failed; /* initialise to 0 */#ifdef CONFIG_SCHEDSTATS	/* load_balance() stats */	unsigned long lb_cnt[MAX_IDLE_TYPES];	unsigned long lb_failed[MAX_IDLE_TYPES];	unsigned long lb_balanced[MAX_IDLE_TYPES];	unsigned long lb_imbalance[MAX_IDLE_TYPES];	unsigned long lb_gained[MAX_IDLE_TYPES];	unsigned long lb_hot_gained[MAX_IDLE_TYPES];	unsigned long lb_nobusyg[MAX_IDLE_TYPES];	unsigned long lb_nobusyq[MAX_IDLE_TYPES];	/* Active load balancing */	unsigned long alb_cnt;	unsigned long alb_failed;	unsigned long alb_pushed;	/* SD_BALANCE_EXEC stats */	unsigned long sbe_cnt;	unsigned long sbe_balanced;	unsigned long sbe_pushed;	/* SD_BALANCE_FORK stats */	unsigned long sbf_cnt;	unsigned long sbf_balanced;	unsigned long sbf_pushed;	/* try_to_wake_up() stats */	unsigned long ttwu_wake_remote;	unsigned long ttwu_move_affine;	unsigned long ttwu_move_balance;#endif};extern int partition_sched_domains(cpumask_t *partition1,				    cpumask_t *partition2);/* * Maximum cache size the migration-costs auto-tuning code will * search from: */extern unsigned int max_cache_size;#endif	/* CONFIG_SMP */struct io_context;			/* See blkdev.h */struct cpuset;#define NGROUPS_SMALL		32#define NGROUPS_PER_BLOCK	((int)(PAGE_SIZE / sizeof(gid_t)))struct group_info {	int ngroups;	atomic_t usage;	gid_t small_block[NGROUPS_SMALL];	int nblocks;	gid_t *blocks[0];};/* * get_group_info() must be called with the owning task locked (via task_lock()) * when task != current.  The reason being that the vast majority of callers are * looking at current->group_info, which can not be changed except by the * current task.  Changing current->group_info requires the task lock, too. */#define get_group_info(group_info) do { \	atomic_inc(&(group_info)->usage); \} while (0)#define put_group_info(group_info) do { \	if (atomic_dec_and_test(&(group_info)->usage)) \		groups_free(group_info); \} while (0)extern struct group_info *groups_alloc(int gidsetsize);extern void groups_free(struct group_info *group_info);extern int set_current_groups(struct group_info *group_info);extern int groups_search(struct group_info *group_info, gid_t grp);/* access the groups "array" with this macro */#define GROUP_AT(gi, i) \    ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])#ifdef ARCH_HAS_PREFETCH_SWITCH_STACKextern void prefetch_stack(struct task_struct *t);#elsestatic inline void prefetch_stack(struct task_struct *t) { }#endifstruct audit_context;		/* See audit.c */struct mempolicy;struct pipe_inode_info;struct uts_namespace;enum sleep_type {	SLEEP_NORMAL,	SLEEP_NONINTERACTIVE,	SLEEP_INTERACTIVE,	SLEEP_INTERRUPTED,};struct prio_array;struct task_struct {	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */	struct thread_info *thread_info;	atomic_t usage;	unsigned long flags;	/* per process flags, defined below */	unsigned long ptrace;	int lock_depth;		/* BKL lock depth */#ifdef CONFIG_SMP#ifdef __ARCH_WANT_UNLOCKED_CTXSW	int oncpu;#endif#endif	int load_weight;	/* for niceness load balancing purposes */	int prio, static_prio, normal_prio;	struct list_head run_list;	struct prio_array *array;	unsigned short ioprio;#ifdef CONFIG_BLK_DEV_IO_TRACE	unsigned int btrace_seq;#endif	unsigned long sleep_avg;	unsigned long long timestamp, last_ran;	unsigned long long sched_time; /* sched_clock time spent running */	enum sleep_type sleep_type;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -