📄 sched.h
字号:
*/extern struct exec_domain default_exec_domain;union thread_union { struct thread_info thread_info; unsigned long stack[THREAD_SIZE/sizeof(long)];};#ifndef __HAVE_ARCH_KSTACK_ENDstatic inline int kstack_end(void *addr){ /* Reliable end of stack detection: * Some APM bios versions misalign the stack */ return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));}#endifextern union thread_union init_thread_union;extern struct task_struct init_task;extern struct mm_struct init_mm;#define find_task_by_pid(nr) find_task_by_pid_type(PIDTYPE_PID, nr)extern struct task_struct *find_task_by_pid_type(int type, int pid);extern void __set_special_pids(pid_t session, pid_t pgrp);/* per-UID process charging. */extern struct user_struct * alloc_uid(uid_t);static inline struct user_struct *get_uid(struct user_struct *u){ atomic_inc(&u->__count); return u;}extern void free_uid(struct user_struct *);extern void switch_uid(struct user_struct *);#include <asm/current.h>extern void do_timer(unsigned long ticks);extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state));extern int FASTCALL(wake_up_process(struct task_struct * tsk));extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, unsigned long clone_flags));#ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk);#else static inline void kick_process(struct task_struct *tsk) { }#endifextern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));extern void FASTCALL(sched_exit(struct task_struct * p));extern int in_group_p(gid_t);extern int in_egroup_p(gid_t);extern void proc_caches_init(void);extern void flush_signals(struct task_struct *);extern void flush_signal_handlers(struct task_struct *, int force_default);extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info){ unsigned long flags; int ret; spin_lock_irqsave(&tsk->sighand->siglock, flags); ret = dequeue_signal(tsk, mask, info); spin_unlock_irqrestore(&tsk->sighand->siglock, flags); return ret;} extern void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask);extern void unblock_all_signals(void);extern void release_task(struct task_struct * p);extern int send_sig_info(int, struct siginfo *, struct task_struct *);extern int send_group_sig_info(int, struct siginfo *, struct task_struct *);extern int force_sigsegv(int, struct task_struct *);extern int force_sig_info(int, struct siginfo *, struct task_struct *);extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);extern int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);extern int kill_pgrp(struct pid *pid, int sig, int priv);extern int kill_pid(struct pid *pid, int sig, int priv);extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);extern int kill_pg_info(int, struct siginfo *, pid_t);extern void do_notify_parent(struct task_struct *, int);extern void force_sig(int, struct task_struct *);extern void force_sig_specific(int, struct task_struct *);extern int send_sig(int, struct task_struct *, int);extern void zap_other_threads(struct task_struct *p);extern int kill_pg(pid_t, int, int);extern int kill_proc(pid_t, int, int);extern struct sigqueue *sigqueue_alloc(void);extern void sigqueue_free(struct sigqueue *);extern int send_sigqueue(int, struct sigqueue *, struct task_struct *);extern int send_group_sigqueue(int, struct sigqueue *, struct task_struct *);extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);static inline int kill_cad_pid(int sig, int priv){ return kill_pid(cad_pid, sig, priv);}/* These can be the second arg to send_sig_info/send_group_sig_info. */#define SEND_SIG_NOINFO ((struct siginfo *) 0)#define SEND_SIG_PRIV ((struct siginfo *) 1)#define SEND_SIG_FORCED ((struct siginfo *) 2)static inline int is_si_special(const struct siginfo *info){ return info <= SEND_SIG_FORCED;}/* True if we are on the alternate signal stack. */static inline int on_sig_stack(unsigned long sp){ return (sp - current->sas_ss_sp < current->sas_ss_size);}static inline int sas_ss_flags(unsigned long sp){ return (current->sas_ss_size == 0 ? SS_DISABLE : on_sig_stack(sp) ? SS_ONSTACK : 0);}/* * Routines for handling mm_structs */extern struct mm_struct * mm_alloc(void);/* mmdrop drops the mm and the page tables */extern void FASTCALL(__mmdrop(struct mm_struct *));static inline void mmdrop(struct mm_struct * mm){ if (atomic_dec_and_test(&mm->mm_count)) __mmdrop(mm);}/* mmput gets rid of the mappings and all user-space */extern void mmput(struct mm_struct *);/* Grab a reference to a task's mm, if it is not already going away */extern struct mm_struct *get_task_mm(struct task_struct *task);/* Remove the current tasks stale references to the old mm_struct */extern void mm_release(struct task_struct *, struct mm_struct *);extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);extern void flush_thread(void);extern void exit_thread(void);extern void exit_files(struct task_struct *);extern void __cleanup_signal(struct signal_struct *);extern void __cleanup_sighand(struct sighand_struct *);extern void exit_itimers(struct signal_struct *);extern NORET_TYPE void do_group_exit(int);extern void daemonize(const char *, ...);extern int allow_signal(int);extern int disallow_signal(int);extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);struct task_struct *fork_idle(int);extern void set_task_comm(struct task_struct *tsk, char *from);extern void get_task_comm(char *to, struct task_struct *tsk);#ifdef CONFIG_SMPextern void wait_task_inactive(struct task_struct * p);#else#define wait_task_inactive(p) do { } while (0)#endif#define remove_parent(p) list_del_init(&(p)->sibling)#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)#define for_each_process(p) \ for (p = &init_task ; (p = next_task(p)) != &init_task ; )/* * Careful: do_each_thread/while_each_thread is a double loop so * 'break' will not work as expected - use goto instead. */#define do_each_thread(g, t) \ for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do#define while_each_thread(g, t) \ while ((t = next_thread(t)) != g)/* de_thread depends on thread_group_leader not being a pid based check */#define thread_group_leader(p) (p == p->group_leader)/* Do to the insanities of de_thread it is possible for a process * to have the pid of the thread group leader without actually being * the thread group leader. For iteration through the pids in proc * all we care about is that we have a task with the appropriate * pid, we don't actually care if we have the right task. */static inline int has_group_leader_pid(struct task_struct *p){ return p->pid == p->tgid;}static inline struct task_struct *next_thread(const struct task_struct *p){ return list_entry(rcu_dereference(p->thread_group.next), struct task_struct, thread_group);}static inline int thread_group_empty(struct task_struct *p){ return list_empty(&p->thread_group);}#define delay_group_leader(p) \ (thread_group_leader(p) && !thread_group_empty(p))/* * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring * subscriptions and synchronises with wait4(). Also used in procfs. Also * pins the final release of task.io_context. Also protects ->cpuset. * * Nests both inside and outside of read_lock(&tasklist_lock). * It must not be nested with write_lock_irq(&tasklist_lock), * neither inside nor outside. */static inline void task_lock(struct task_struct *p){ spin_lock(&p->alloc_lock);}static inline void task_unlock(struct task_struct *p){ spin_unlock(&p->alloc_lock);}extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags);static inline void unlock_task_sighand(struct task_struct *tsk, unsigned long *flags){ spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);}#ifndef __HAVE_THREAD_FUNCTIONS#define task_thread_info(task) (task)->thread_info#define task_stack_page(task) ((void*)((task)->thread_info))static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org){ *task_thread_info(p) = *task_thread_info(org); task_thread_info(p)->task = p;}static inline unsigned long *end_of_stack(struct task_struct *p){ return (unsigned long *)(p->thread_info + 1);}#endif/* set thread flags in other task's structures * - see asm/thread_info.h for TIF_xxxx flags available */static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag){ set_ti_thread_flag(task_thread_info(tsk), flag);}static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag){ clear_ti_thread_flag(task_thread_info(tsk), flag);}static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag){ return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);}static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag){ return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);}static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag){ return test_ti_thread_flag(task_thread_info(tsk), flag);}static inline void set_tsk_need_resched(struct task_struct *tsk){ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);}static inline void clear_tsk_need_resched(struct task_struct *tsk){ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);}static inline int signal_pending(struct task_struct *p){ return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));} static inline int need_resched(void){ return unlikely(test_thread_flag(TIF_NEED_RESCHED));}/* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return * value indicates whether a reschedule was done in fact. * cond_resched_lock() will drop the spinlock before scheduling, * cond_resched_softirq() will enable bhs before scheduling. */extern int cond_resched(void);extern int cond_resched_lock(spinlock_t * lock);extern int cond_resched_softirq(void);/* * Does a critical section need to be broken due to another * task waiting?: */#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)# define need_lockbreak(lock) ((lock)->break_lock)#else# define need_lockbreak(lock) 0#endif/* * Does a critical section need to be broken due to another * task waiting or preemption being signalled: */static inline int lock_need_resched(spinlock_t *lock){ if (need_lockbreak(lock) || need_resched()) return 1; return 0;}/* Reevaluate whether the task has signals pending delivery. This is required every time the blocked sigset_t changes. callers must hold sighand->siglock. */extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t));extern void recalc_sigpending(void);extern void signal_wake_up(struct task_struct *t, int resume_stopped);/* * Wrappers for p->thread_info->cpu access. No-op on UP. */#ifdef CONFIG_SMPstatic inline unsigned int task_cpu(const struct task_struct *p){ return task_thread_info(p)->cpu;}static inline void set_task_cpu(struct task_struct *p, unsigned int cpu){ task_thread_info(p)->cpu = cpu;}#elsestatic inline unsigned int task_cpu(const struct task_struct *p){ return 0;}static inline void set_task_cpu(struct task_struct *p, unsigned int cpu){}#endif /* CONFIG_SMP */#ifdef HAVE_ARCH_PICK_MMAP_LAYOUTextern void arch_pick_mmap_layout(struct mm_struct *mm);#elsestatic inline void arch_pick_mmap_layout(struct mm_struct *mm){ mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area;}#endifextern long sched_setaffinity(pid_t pid, cpumask_t new_mask);extern long sched_getaffinity(pid_t pid, cpumask_t *mask);#include <linux/sysdev.h>extern int sched_mc_power_savings, sched_smt_power_savings;extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings;extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);extern void normalize_rt_tasks(void);#endif /* __KERNEL__ */#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -