⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.h

📁 Axis 221 camera embedded programing interface
💻 H
📖 第 1 页 / 共 4 页
字号:
	unsigned long policy;	cpumask_t cpus_allowed;	unsigned int time_slice, first_time_slice;#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)	struct sched_info sched_info;#endif	struct list_head tasks;	/*	 * ptrace_list/ptrace_children forms the list of my children	 * that were stolen by a ptracer.	 */	struct list_head ptrace_children;	struct list_head ptrace_list;	struct mm_struct *mm, *active_mm;/* task state */	struct linux_binfmt *binfmt;	long exit_state;	int exit_code, exit_signal;	int pdeath_signal;  /*  The signal sent when the parent dies  */	/* ??? */	unsigned long personality;	unsigned did_exec:1;	pid_t pid;	pid_t tgid;#ifdef CONFIG_CC_STACKPROTECTOR	/* Canary value for the -fstack-protector gcc feature */	unsigned long stack_canary;#endif	/* 	 * pointers to (original) parent process, youngest child, younger sibling,	 * older sibling, respectively.  (p->father can be replaced with 	 * p->parent->pid)	 */	struct task_struct *real_parent; /* real parent process (when being debugged) */	struct task_struct *parent;	/* parent process */	/*	 * children/sibling forms the list of my children plus the	 * tasks I'm ptracing.	 */	struct list_head children;	/* list of my children */	struct list_head sibling;	/* linkage in my parent's children list */	struct task_struct *group_leader;	/* threadgroup leader */	/* PID/PID hash table linkage. */	struct pid_link pids[PIDTYPE_MAX];	struct list_head thread_group;	struct completion *vfork_done;		/* for vfork() */	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */	unsigned long rt_priority;	cputime_t utime, stime;	unsigned long nvcsw, nivcsw; /* context switch counts */	struct timespec start_time;/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */	unsigned long min_flt, maj_flt;  	cputime_t it_prof_expires, it_virt_expires;	unsigned long long it_sched_expires;	struct list_head cpu_timers[3];/* process credentials */	uid_t uid,euid,suid,fsuid;	gid_t gid,egid,sgid,fsgid;	struct group_info *group_info;	kernel_cap_t   cap_effective, cap_inheritable, cap_permitted;	unsigned keep_capabilities:1;	struct user_struct *user;#ifdef CONFIG_KEYS	struct key *request_key_auth;	/* assumed request_key authority */	struct key *thread_keyring;	/* keyring private to this thread */	unsigned char jit_keyring;	/* default keyring to attach requested keys to */#endif	/*	 * fpu_counter contains the number of consecutive context switches	 * that the FPU is used. If this is over a threshold, the lazy fpu	 * saving becomes unlazy to save the trap. This is an unsigned char	 * so that after 256 times the counter wraps and the behavior turns	 * lazy again; this to deal with bursty apps that only use FPU for	 * a short time	 */	unsigned char fpu_counter;	int oomkilladj; /* OOM kill score adjustment (bit shift). */	char comm[TASK_COMM_LEN]; /* executable name excluding path				     - access with [gs]et_task_comm (which lock				       it with task_lock())				     - initialized normally by flush_old_exec *//* file system info */	int link_count, total_link_count;#ifdef CONFIG_SYSVIPC/* ipc stuff */	struct sysv_sem sysvsem;#endif/* CPU-specific state of this task */	struct thread_struct thread;/* filesystem information */	struct fs_struct *fs;/* open file information */	struct files_struct *files;/* namespaces */	struct nsproxy *nsproxy;/* signal handlers */	struct signal_struct *signal;	struct sighand_struct *sighand;	sigset_t blocked, real_blocked;	sigset_t saved_sigmask;		/* To be restored with TIF_RESTORE_SIGMASK */	struct sigpending pending;	unsigned long sas_ss_sp;	size_t sas_ss_size;	int (*notifier)(void *priv);	void *notifier_data;	sigset_t *notifier_mask;		void *security;	struct audit_context *audit_context;	seccomp_t seccomp;/* Thread group tracking */   	u32 parent_exec_id;   	u32 self_exec_id;/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */	spinlock_t alloc_lock;	/* Protection of the PI data structures: */	spinlock_t pi_lock;#ifdef CONFIG_RT_MUTEXES	/* PI waiters blocked on a rt_mutex held by this task */	struct plist_head pi_waiters;	/* Deadlock detection and priority inheritance handling */	struct rt_mutex_waiter *pi_blocked_on;#endif#ifdef CONFIG_DEBUG_MUTEXES	/* mutex deadlock detection */	struct mutex_waiter *blocked_on;#endif#ifdef CONFIG_TRACE_IRQFLAGS	unsigned int irq_events;	int hardirqs_enabled;	unsigned long hardirq_enable_ip;	unsigned int hardirq_enable_event;	unsigned long hardirq_disable_ip;	unsigned int hardirq_disable_event;	int softirqs_enabled;	unsigned long softirq_disable_ip;	unsigned int softirq_disable_event;	unsigned long softirq_enable_ip;	unsigned int softirq_enable_event;	int hardirq_context;	int softirq_context;#endif#ifdef CONFIG_LOCKDEP# define MAX_LOCK_DEPTH 30UL	u64 curr_chain_key;	int lockdep_depth;	struct held_lock held_locks[MAX_LOCK_DEPTH];	unsigned int lockdep_recursion;#endif/* journalling filesystem info */	void *journal_info;/* VM state */	struct reclaim_state *reclaim_state;	struct backing_dev_info *backing_dev_info;	struct io_context *io_context;	unsigned long ptrace_message;	siginfo_t *last_siginfo; /* For ptrace use.  *//* * current io wait handle: wait queue entry to use for io waits * If this thread is processing aio, this points at the waitqueue * inside the currently handled kiocb. It may be NULL (i.e. default * to a stack based synchronous wait) if its doing sync IO. */	wait_queue_t *io_wait;/* i/o counters(bytes read/written, #syscalls */	u64 rchar, wchar, syscr, syscw;	struct task_io_accounting ioac;#if defined(CONFIG_TASK_XACCT)	u64 acct_rss_mem1;	/* accumulated rss usage */	u64 acct_vm_mem1;	/* accumulated virtual memory usage */	cputime_t acct_stimexpd;/* stime since last update */#endif#ifdef CONFIG_NUMA  	struct mempolicy *mempolicy;	short il_next;#endif#ifdef CONFIG_CPUSETS	struct cpuset *cpuset;	nodemask_t mems_allowed;	int cpuset_mems_generation;	int cpuset_mem_spread_rotor;#endif	struct robust_list_head __user *robust_list;#ifdef CONFIG_COMPAT	struct compat_robust_list_head __user *compat_robust_list;#endif	struct list_head pi_state_list;	struct futex_pi_state *pi_state_cache;	atomic_t fs_excl;	/* holding fs exclusive resources */	struct rcu_head rcu;	/*	 * cache last used pipe for splice	 */	struct pipe_inode_info *splice_pipe;#ifdef	CONFIG_TASK_DELAY_ACCT	struct task_delay_info *delays;#endif#ifdef CONFIG_FAULT_INJECTION	int make_it_fail;#endif};static inline pid_t process_group(struct task_struct *tsk){	return tsk->signal->pgrp;}static inline pid_t signal_session(struct signal_struct *sig){	return sig->__session;}static inline pid_t process_session(struct task_struct *tsk){	return signal_session(tsk->signal);}static inline void set_signal_session(struct signal_struct *sig, pid_t session){	sig->__session = session;}static inline struct pid *task_pid(struct task_struct *task){	return task->pids[PIDTYPE_PID].pid;}static inline struct pid *task_tgid(struct task_struct *task){	return task->group_leader->pids[PIDTYPE_PID].pid;}static inline struct pid *task_pgrp(struct task_struct *task){	return task->group_leader->pids[PIDTYPE_PGID].pid;}static inline struct pid *task_session(struct task_struct *task){	return task->group_leader->pids[PIDTYPE_SID].pid;}/** * pid_alive - check that a task structure is not stale * @p: Task structure to be checked. * * Test if a process is not yet dead (at most zombie state) * If pid_alive fails, then pointers within the task structure * can be stale and must not be dereferenced. */static inline int pid_alive(struct task_struct *p){	return p->pids[PIDTYPE_PID].pid != NULL;}/** * is_init - check if a task structure is init * @tsk: Task structure to be checked. * * Check if a task structure is the first user space task the kernel created. */static inline int is_init(struct task_struct *tsk){	return tsk->pid == 1;}extern struct pid *cad_pid;extern void free_task(struct task_struct *tsk);#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)extern void __put_task_struct(struct task_struct *t);static inline void put_task_struct(struct task_struct *t){	if (atomic_dec_and_test(&t->usage))		__put_task_struct(t);}/* * Per process flags */#define PF_ALIGNWARN	0x00000001	/* Print alignment warning msgs */					/* Not implemented yet, only for 486*/#define PF_STARTING	0x00000002	/* being created */#define PF_EXITING	0x00000004	/* getting shut down */#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */#define PF_DUMPCORE	0x00000200	/* dumped core */#define PF_SIGNALED	0x00000400	/* killed by a signal */#define PF_MEMALLOC	0x00000800	/* Allocating memory */#define PF_FLUSHER	0x00001000	/* responsible for disk writeback */#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */#define PF_FROZEN	0x00010000	/* frozen for system suspend */#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */#define PF_KSWAPD	0x00040000	/* I am kswapd */#define PF_SWAPOFF	0x00080000	/* I am in swapoff */#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */#define PF_BORROWED_MM	0x00200000	/* I am a kthread doing use_mm */#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */#define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester *//* * Only the _current_ task can read/write to tsk->flags, but other * tasks can access tsk->flags in readonly mode for example * with tsk_used_math (like during threaded core dumping). * There is however an exception to this rule during ptrace * or during fork: the ptracer task is allowed to write to the * child->flags of its traced child (same goes for fork, the parent * can write to the child->flags), because we're guaranteed the * child is not running and in turn not changing child->flags * at the same time the parent does it. */#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)#define clear_used_math() clear_stopped_child_used_math(current)#define set_used_math() set_stopped_child_used_math(current)#define conditional_stopped_child_used_math(condition, child) \	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)#define conditional_used_math(condition) \	conditional_stopped_child_used_math(condition, current)#define copy_to_stopped_child_used_math(child) \	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)#define used_math() tsk_used_math(current)#ifdef CONFIG_SMPextern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);#elsestatic inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask){	if (!cpu_isset(0, new_mask))		return -EINVAL;	return 0;}#endifextern unsigned long long sched_clock(void);extern unsigned long longcurrent_sched_time(const struct task_struct *current_task);/* sched_exec is called by processes performing an exec */#ifdef CONFIG_SMPextern void sched_exec(void);#else#define sched_exec()   {}#endif#ifdef CONFIG_HOTPLUG_CPUextern void idle_task_exit(void);#elsestatic inline void idle_task_exit(void) {}#endifextern void sched_idle_next(void);#ifdef CONFIG_RT_MUTEXESextern int rt_mutex_getprio(struct task_struct *p);extern void rt_mutex_setprio(struct task_struct *p, int prio);extern void rt_mutex_adjust_pi(struct task_struct *p);#elsestatic inline int rt_mutex_getprio(struct task_struct *p){	return p->normal_prio;}# define rt_mutex_adjust_pi(p)		do { } while (0)#endifextern void set_user_nice(struct task_struct *p, long nice);extern int task_prio(const struct task_struct *p);extern int task_nice(const struct task_struct *p);extern int can_nice(const struct task_struct *p, const int nice);extern int task_curr(const struct task_struct *p);extern int idle_cpu(int cpu);extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);extern struct task_struct *idle_task(int cpu);extern struct task_struct *curr_task(int cpu);extern void set_curr_task(int cpu, struct task_struct *p);void yield(void);/* * The default (Linux) execution domain.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -