sched.h

来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 1,036 行 · 第 1/3 页

H
1,036
字号
extern void switch_uid(struct user_struct *);#include <asm/current.h>extern unsigned long itimer_ticks;extern unsigned long itimer_next;extern void do_timer(struct pt_regs *);extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state));extern int FASTCALL(wake_up_process(struct task_struct * tsk));extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,						unsigned long clone_flags));#ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk);#else static inline void kick_process(struct task_struct *tsk) { }#endifextern void FASTCALL(sched_fork(task_t * p));extern void FASTCALL(sched_exit(task_t * p));extern int in_group_p(gid_t);extern int in_egroup_p(gid_t);extern void proc_caches_init(void);extern void flush_signals(struct task_struct *);extern void flush_signal_handlers(struct task_struct *, int force_default);extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info){	unsigned long flags;	int ret;	spin_lock_irqsave(&tsk->sighand->siglock, flags);	ret = dequeue_signal(tsk, mask, info);	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);	return ret;}	extern void block_all_signals(int (*notifier)(void *priv), void *priv,			      sigset_t *mask);extern void unblock_all_signals(void);extern void release_task(struct task_struct * p);extern int send_sig_info(int, struct siginfo *, struct task_struct *);extern int send_group_sig_info(int, struct siginfo *, struct task_struct *);extern int force_sigsegv(int, struct task_struct *);extern int force_sig_info(int, struct siginfo *, struct task_struct *);extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);extern int kill_pg_info(int, struct siginfo *, pid_t);extern int kill_sl_info(int, struct siginfo *, pid_t);extern int kill_proc_info(int, struct siginfo *, pid_t);extern void do_notify_parent(struct task_struct *, int);extern void force_sig(int, struct task_struct *);extern void force_sig_specific(int, struct task_struct *);extern int send_sig(int, struct task_struct *, int);extern void zap_other_threads(struct task_struct *p);extern int kill_pg(pid_t, int, int);extern int kill_sl(pid_t, int, int);extern int kill_proc(pid_t, int, int);extern struct sigqueue *sigqueue_alloc(void);extern void sigqueue_free(struct sigqueue *);extern int send_sigqueue(int, struct sigqueue *,  struct task_struct *);extern int send_group_sigqueue(int, struct sigqueue *,  struct task_struct *);extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *);extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);/* These can be the second arg to send_sig_info/send_group_sig_info.  */#define SEND_SIG_NOINFO ((struct siginfo *) 0)#define SEND_SIG_PRIV	((struct siginfo *) 1)#define SEND_SIG_FORCED	((struct siginfo *) 2)/* True if we are on the alternate signal stack.  */static inline int on_sig_stack(unsigned long sp){	return (sp - current->sas_ss_sp < current->sas_ss_size);}static inline int sas_ss_flags(unsigned long sp){	return (current->sas_ss_size == 0 ? SS_DISABLE		: on_sig_stack(sp) ? SS_ONSTACK : 0);}#ifdef CONFIG_SECURITY/* code is in security.c */extern int capable(int cap);#elsestatic inline int capable(int cap){	if (cap_raised(current->cap_effective, cap)) {		current->flags |= PF_SUPERPRIV;		return 1;	}	return 0;}#endif/* * Routines for handling mm_structs */extern struct mm_struct * mm_alloc(void);/* mmdrop drops the mm and the page tables */extern void FASTCALL(__mmdrop(struct mm_struct *));static inline void mmdrop(struct mm_struct * mm){	if (atomic_dec_and_test(&mm->mm_count))		__mmdrop(mm);}/* mmput gets rid of the mappings and all user-space */extern void mmput(struct mm_struct *);/* Grab a reference to a task's mm, if it is not already going away */extern struct mm_struct *get_task_mm(struct task_struct *task);/* Remove the current tasks stale references to the old mm_struct */extern void mm_release(struct task_struct *, struct mm_struct *);extern int  copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);extern void flush_thread(void);extern void exit_thread(void);extern void exit_mm(struct task_struct *);extern void exit_files(struct task_struct *);extern void exit_signal(struct task_struct *);extern void __exit_signal(struct task_struct *);extern void exit_sighand(struct task_struct *);extern void __exit_sighand(struct task_struct *);extern void exit_itimers(struct signal_struct *);extern NORET_TYPE void do_group_exit(int);extern void reparent_to_init(void);extern void daemonize(const char *, ...);extern int allow_signal(int);extern int disallow_signal(int);extern task_t *child_reaper;extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);task_t *fork_idle(int);extern void set_task_comm(struct task_struct *tsk, char *from);extern void get_task_comm(char *to, struct task_struct *tsk);#ifdef CONFIG_SMPextern void wait_task_inactive(task_t * p);#else#define wait_task_inactive(p)	do { } while (0)#endif#define remove_parent(p)	list_del_init(&(p)->sibling)#define add_parent(p, parent)	list_add_tail(&(p)->sibling,&(parent)->children)#define REMOVE_LINKS(p) do {					\	if (thread_group_leader(p))				\		list_del_init(&(p)->tasks);			\	remove_parent(p);					\	} while (0)#define SET_LINKS(p) do {					\	if (thread_group_leader(p))				\		list_add_tail(&(p)->tasks,&init_task.tasks);	\	add_parent(p, (p)->parent);				\	} while (0)#define next_task(p)	list_entry((p)->tasks.next, struct task_struct, tasks)#define prev_task(p)	list_entry((p)->tasks.prev, struct task_struct, tasks)#define for_each_process(p) \	for (p = &init_task ; (p = next_task(p)) != &init_task ; )/* * Careful: do_each_thread/while_each_thread is a double loop so *          'break' will not work as expected - use goto instead. */#define do_each_thread(g, t) \	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do#define while_each_thread(g, t) \	while ((t = next_thread(t)) != g)extern task_t * FASTCALL(next_thread(const task_t *p));#define thread_group_leader(p)	(p->pid == p->tgid)static inline int thread_group_empty(task_t *p){	return list_empty(&p->pids[PIDTYPE_TGID].pid_list);}#define delay_group_leader(p) \		(thread_group_leader(p) && !thread_group_empty(p))extern void unhash_process(struct task_struct *p);/* * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm and * synchronises with wait4(). * * Nests both inside and outside of read_lock(&tasklist_lock). * It must not be nested with write_lock_irq(&tasklist_lock), * neither inside nor outside. */static inline void task_lock(struct task_struct *p){	spin_lock(&p->alloc_lock);}static inline void task_unlock(struct task_struct *p){	spin_unlock(&p->alloc_lock);}/* set thread flags in other task's structures * - see asm/thread_info.h for TIF_xxxx flags available */static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag){	set_ti_thread_flag(tsk->thread_info,flag);}static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag){	clear_ti_thread_flag(tsk->thread_info,flag);}static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag){	return test_and_set_ti_thread_flag(tsk->thread_info,flag);}static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag){	return test_and_clear_ti_thread_flag(tsk->thread_info,flag);}static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag){	return test_ti_thread_flag(tsk->thread_info,flag);}static inline void set_tsk_need_resched(struct task_struct *tsk){	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);}static inline void clear_tsk_need_resched(struct task_struct *tsk){	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);}static inline int signal_pending(struct task_struct *p){	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));}  static inline int need_resched(void){	return unlikely(test_thread_flag(TIF_NEED_RESCHED));}extern void __cond_resched(void);static inline void cond_resched(void){	if (need_resched())		__cond_resched();}/* * cond_resched_lock() - if a reschedule is pending, drop the given lock, * call schedule, and on return reacquire the lock. * * This works OK both with and without CONFIG_PREEMPT.  We do strange low-level * operations here to prevent schedule() from being called twice (once via * spin_unlock(), once by hand). */static inline void cond_resched_lock(spinlock_t * lock){	if (need_resched()) {		_raw_spin_unlock(lock);		preempt_enable_no_resched();		__cond_resched();		spin_lock(lock);	}}/* Reevaluate whether the task has signals pending delivery.   This is required every time the blocked sigset_t changes.   callers must hold sighand->siglock.  */extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t));extern void recalc_sigpending(void);extern void signal_wake_up(struct task_struct *t, int resume_stopped);/* * Wrappers for p->thread_info->cpu access. No-op on UP. */#ifdef CONFIG_SMPstatic inline unsigned int task_cpu(const struct task_struct *p){	return p->thread_info->cpu;}static inline void set_task_cpu(struct task_struct *p, unsigned int cpu){	p->thread_info->cpu = cpu;}#elsestatic inline unsigned int task_cpu(const struct task_struct *p){	return 0;}static inline void set_task_cpu(struct task_struct *p, unsigned int cpu){}#endif /* CONFIG_SMP */#ifdef HAVE_ARCH_PICK_MMAP_LAYOUTextern void arch_pick_mmap_layout(struct mm_struct *mm);#elsestatic inline void arch_pick_mmap_layout(struct mm_struct *mm){	mm->mmap_base = TASK_UNMAPPED_BASE;	mm->get_unmapped_area = arch_get_unmapped_area;	mm->unmap_area = arch_unmap_area;}#endifextern long sched_setaffinity(pid_t pid, cpumask_t new_mask);extern long sched_getaffinity(pid_t pid, cpumask_t *mask);#endif /* __KERNEL__ */#endif

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?