⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.h

📁 Axis 221 camera embedded programing interface
💻 H
📖 第 1 页 / 共 4 页
字号:
#ifndef _LINUX_SCHED_H#define _LINUX_SCHED_H#include <linux/auxvec.h>	/* For AT_VECTOR_SIZE *//* * cloning flags: */#define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */#define CLONE_VM	0x00000100	/* set if VM shared between processes */#define CLONE_FS	0x00000200	/* set if fs info shared between processes */#define CLONE_FILES	0x00000400	/* set if open files shared between processes */#define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */#define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */#define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */#define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */#define CLONE_THREAD	0x00010000	/* Same thread group? */#define CLONE_NEWNS	0x00020000	/* New namespace group? */#define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */#define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */#define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */#define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */#define CLONE_DETACHED		0x00400000	/* Unused, ignored */#define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */#define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */#define CLONE_STOPPED		0x02000000	/* Start in stopped state */#define CLONE_NEWUTS		0x04000000	/* New utsname group? */#define CLONE_NEWIPC		0x08000000	/* New ipcs *//* * Scheduling policies */#define SCHED_NORMAL		0#define SCHED_FIFO		1#define SCHED_RR		2#define SCHED_BATCH		3#ifdef __KERNEL__struct sched_param {	int sched_priority;};#include <asm/param.h>	/* for HZ */#include <linux/capability.h>#include <linux/threads.h>#include <linux/kernel.h>#include <linux/types.h>#include <linux/timex.h>#include <linux/jiffies.h>#include <linux/rbtree.h>#include <linux/thread_info.h>#include <linux/cpumask.h>#include <linux/errno.h>#include <linux/nodemask.h>#include <asm/system.h>#include <asm/semaphore.h>#include <asm/page.h>#include <asm/ptrace.h>#include <asm/mmu.h>#include <asm/cputime.h>#include <linux/smp.h>#include <linux/sem.h>#include <linux/signal.h>#include <linux/securebits.h>#include <linux/fs_struct.h>#include <linux/compiler.h>#include <linux/completion.h>#include <linux/pid.h>#include <linux/percpu.h>#include <linux/topology.h>#include <linux/seccomp.h>#include <linux/rcupdate.h>#include <linux/futex.h>#include <linux/rtmutex.h>#include <linux/time.h>#include <linux/param.h>#include <linux/resource.h>#include <linux/timer.h>#include <linux/hrtimer.h>#include <linux/task_io_accounting.h>#include <asm/processor.h>struct exec_domain;struct futex_pi_state;/* * List of flags we want to share for kernel threads, * if only because they are not used by them anyway. */#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)/* * These are the constant used to fake the fixed-point load-average * counting. Some notes: *  - 11 bit fractions expand to 22 bits by the multiplies: this gives *    a load-average precision of 10 bits integer + 11 bits fractional *  - if you want to count load-averages more often, you need more *    precision, or rounding will get you. With 2-second counting freq, *    the EXP_n values would be 1981, 2034 and 2043 if still using only *    11 bit fractions. */extern unsigned long avenrun[];		/* Load averages */#define FSHIFT		11		/* nr of bits of precision */#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */#define LOAD_FREQ	(5*HZ)		/* 5 sec intervals */#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */#define EXP_5		2014		/* 1/exp(5sec/5min) */#define EXP_15		2037		/* 1/exp(5sec/15min) */#define CALC_LOAD(load,exp,n) \	load *= exp; \	load += n*(FIXED_1-exp); \	load >>= FSHIFT;extern unsigned long total_forks;extern int nr_threads;DECLARE_PER_CPU(unsigned long, process_counts);extern int nr_processes(void);extern unsigned long nr_running(void);extern unsigned long nr_uninterruptible(void);extern unsigned long nr_active(void);extern unsigned long nr_iowait(void);extern unsigned long weighted_cpuload(const int cpu);/* * Task state bitmask. NOTE! These bits are also * encoded in fs/proc/array.c: get_task_state(). * * We have two separate sets of flags: task->state * is about runnability, while task->exit_state are * about the task exiting. Confusing, but this way * modifying one set can't modify the other one by * mistake. */#define TASK_RUNNING		0#define TASK_INTERRUPTIBLE	1#define TASK_UNINTERRUPTIBLE	2#define TASK_STOPPED		4#define TASK_TRACED		8/* in tsk->exit_state */#define EXIT_ZOMBIE		16#define EXIT_DEAD		32/* in tsk->state again */#define TASK_NONINTERACTIVE	64#define TASK_DEAD		128#define __set_task_state(tsk, state_value)		\	do { (tsk)->state = (state_value); } while (0)#define set_task_state(tsk, state_value)		\	set_mb((tsk)->state, (state_value))/* * set_current_state() includes a barrier so that the write of current->state * is correctly serialised wrt the caller's subsequent test of whether to * actually sleep: * *	set_current_state(TASK_UNINTERRUPTIBLE); *	if (do_i_need_to_sleep()) *		schedule(); * * If the caller does not need such serialisation then use __set_current_state() */#define __set_current_state(state_value)			\	do { current->state = (state_value); } while (0)#define set_current_state(state_value)		\	set_mb(current->state, (state_value))/* Task command name length */#define TASK_COMM_LEN 16#include <linux/spinlock.h>/* * This serializes "schedule()" and also protects * the run-queue from deletions/modifications (but * _adding_ to the beginning of the run-queue has * a separate lock). */extern rwlock_t tasklist_lock;extern spinlock_t mmlist_lock;struct task_struct;extern void sched_init(void);extern void sched_init_smp(void);extern void init_idle(struct task_struct *idle, int cpu);extern cpumask_t nohz_cpu_mask;/* * Only dump TASK_* tasks. (-1 for all tasks) */extern void show_state_filter(unsigned long state_filter);static inline void show_state(void){	show_state_filter(-1);}extern void show_regs(struct pt_regs *);/* * TASK is a pointer to the task whose backtrace we want to see (or NULL for current * task), SP is the stack pointer of the first frame that should be shown in the back * trace (or NULL if the entire call-chain of the task should be shown). */extern void show_stack(struct task_struct *task, unsigned long *sp);void io_schedule(void);long io_schedule_timeout(long timeout);extern void cpu_init (void);extern void trap_init(void);extern void update_process_times(int user);extern void scheduler_tick(void);#ifdef CONFIG_DETECT_SOFTLOCKUPextern void softlockup_tick(void);extern void spawn_softlockup_task(void);extern void touch_softlockup_watchdog(void);#elsestatic inline void softlockup_tick(void){}static inline void spawn_softlockup_task(void){}static inline void touch_softlockup_watchdog(void){}#endif/* Attach to any functions which should be ignored in wchan output. */#define __sched		__attribute__((__section__(".sched.text")))/* Is this address in the __sched functions? */extern int in_sched_functions(unsigned long addr);#define	MAX_SCHEDULE_TIMEOUT	LONG_MAXextern signed long FASTCALL(schedule_timeout(signed long timeout));extern signed long schedule_timeout_interruptible(signed long timeout);extern signed long schedule_timeout_uninterruptible(signed long timeout);asmlinkage void schedule(void);struct nsproxy;/* Maximum number of active map areas.. This is a random (large) number */#define DEFAULT_MAX_MAP_COUNT	65536extern int sysctl_max_map_count;#include <linux/aio.h>extern unsigned longarch_get_unmapped_area(struct file *, unsigned long, unsigned long,		       unsigned long, unsigned long);extern unsigned longarch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,			  unsigned long len, unsigned long pgoff,			  unsigned long flags);extern void arch_unmap_area(struct mm_struct *, unsigned long);extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS/* * The mm counters are not protected by its page_table_lock, * so must be incremented atomically. */#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)typedef atomic_long_t mm_counter_t;#else  /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS *//* * The mm counters are protected by its page_table_lock, * so can be incremented directly. */#define set_mm_counter(mm, member, value) (mm)->_##member = (value)#define get_mm_counter(mm, member) ((mm)->_##member)#define add_mm_counter(mm, member, value) (mm)->_##member += (value)#define inc_mm_counter(mm, member) (mm)->_##member++#define dec_mm_counter(mm, member) (mm)->_##member--typedef unsigned long mm_counter_t;#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */#define get_mm_rss(mm)					\	(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))#define update_hiwater_rss(mm)	do {			\	unsigned long _rss = get_mm_rss(mm);		\	if ((mm)->hiwater_rss < _rss)			\		(mm)->hiwater_rss = _rss;		\} while (0)#define update_hiwater_vm(mm)	do {			\	if ((mm)->hiwater_vm < (mm)->total_vm)		\		(mm)->hiwater_vm = (mm)->total_vm;	\} while (0)struct mm_struct {	struct vm_area_struct * mmap;		/* list of VMAs */	struct rb_root mm_rb;	struct vm_area_struct * mmap_cache;	/* last find_vma result */	unsigned long (*get_unmapped_area) (struct file *filp,				unsigned long addr, unsigned long len,				unsigned long pgoff, unsigned long flags);	void (*unmap_area) (struct mm_struct *mm, unsigned long addr);	unsigned long mmap_base;		/* base of mmap area */	unsigned long task_size;		/* size of task vm space */	unsigned long cached_hole_size;         /* if non-zero, the largest hole below free_area_cache */	unsigned long free_area_cache;		/* first hole of size cached_hole_size or larger */	pgd_t * pgd;	atomic_t mm_users;			/* How many users with user space? */	atomic_t mm_count;			/* How many references to "struct mm_struct" (users count as 1) */	int map_count;				/* number of VMAs */	struct rw_semaphore mmap_sem;	spinlock_t page_table_lock;		/* Protects page tables and some counters */	struct list_head mmlist;		/* List of maybe swapped mm's.  These are globally strung						 * together off init_mm.mmlist, and are protected						 * by mmlist_lock						 */	/* Special counters, in some configurations protected by the	 * page_table_lock, in other configurations by being atomic.	 */	mm_counter_t _file_rss;	mm_counter_t _anon_rss;	unsigned long hiwater_rss;	/* High-watermark of RSS usage */	unsigned long hiwater_vm;	/* High-water virtual memory usage */	unsigned long total_vm, locked_vm, shared_vm, exec_vm;	unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;	unsigned long start_code, end_code, start_data, end_data;	unsigned long start_brk, brk, start_stack;	unsigned long arg_start, arg_end, env_start, env_end;	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */	cpumask_t cpu_vm_mask;	/* Architecture-specific MM context */	mm_context_t context;	/* Swap token stuff */	/*	 * Last value of global fault stamp as seen by this process.	 * In other words, this value gives an indication of how long	 * it has been since this task got the token.	 * Look at mm/thrash.c	 */	unsigned int faultstamp;	unsigned int token_priority;	unsigned int last_interval;	unsigned char dumpable:2;	/* coredumping support */	int core_waiters;	struct completion *core_startup_done, core_done;	/* aio bits */	rwlock_t		ioctx_list_lock;	struct kioctx		*ioctx_list;};struct sighand_struct {	atomic_t		count;	struct k_sigaction	action[_NSIG];	spinlock_t		siglock;};struct pacct_struct {	int			ac_flag;	long			ac_exitcode;	unsigned long		ac_mem;	cputime_t		ac_utime, ac_stime;	unsigned long		ac_minflt, ac_majflt;};/* * NOTE! "signal_struct" does not have it's own * locking, because a shared signal_struct always * implies a shared sighand_struct, so locking * sighand_struct is always a proper superset of * the locking of signal_struct. */struct signal_struct {	atomic_t		count;	atomic_t		live;	wait_queue_head_t	wait_chldexit;	/* for wait4() */	/* current thread group signal load-balancing target: */	struct task_struct	*curr_target;	/* shared signal handling: */	struct sigpending	shared_pending;	/* thread group exit support */	int			group_exit_code;	/* overloaded:	 * - notify group_exit_task when ->count is equal to notify_count

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -