⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.h

📁 xen虚拟机源代码安装包
💻 H
📖 第 1 页 / 共 2 页
字号:
    /* Initialised by loader: Private. */    unsigned long elf_paddr_offset;    unsigned int  load_symtab;    unsigned long symtab_addr;    unsigned long symtab_len;};extern struct vcpu *idle_vcpu[NR_CPUS];#define IDLE_DOMAIN_ID   (0x7FFFU)#define is_idle_domain(d) ((d)->domain_id == IDLE_DOMAIN_ID)#define is_idle_vcpu(v)   (is_idle_domain((v)->domain))#define DOMAIN_DESTROYED (1<<31) /* assumes atomic_t is >= 32 bits */#define put_domain(_d) \  if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destroy(_d)/* * Use this when you don't have an existing reference to @d. It returns * FALSE if @d is being destroyed. */static always_inline int get_domain(struct domain *d){    atomic_t old, new, seen = d->refcnt;    do    {        old = seen;        if ( unlikely(_atomic_read(old) & DOMAIN_DESTROYED) )            return 0;        _atomic_set(new, _atomic_read(old) + 1);        seen = atomic_compareandswap(old, new, &d->refcnt);    }    while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );    return 1;}/* * Use this when you already have, or are borrowing, a reference to @d. * In this case we know that @d cannot be destroyed under our feet. */static inline void get_knownalive_domain(struct domain *d){    atomic_inc(&d->refcnt);    ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTROYED));}/* Obtain a reference to the currently-running domain. */static inline struct domain *get_current_domain(void){    struct domain *d = current->domain;    get_knownalive_domain(d);    return d;}struct domain *domain_create(    domid_t domid, unsigned int domcr_flags, ssidref_t ssidref); /* DOMCRF_hvm: Create an HVM domain, as opposed to a PV domain. */#define _DOMCRF_hvm   0#define DOMCRF_hvm    (1U<<_DOMCRF_hvm) /* DOMCRF_hap: Create a domain with hardware-assisted paging. */#define _DOMCRF_hap   1#define DOMCRF_hap    (1U<<_DOMCRF_hap) /* DOMCRF_dummy: Create a dummy domain (not scheduled; not on domain list) */#define _DOMCRF_dummy 2#define DOMCRF_dummy  (1U<<_DOMCRF_dummy)int construct_dom0(    struct domain *d,    unsigned long image_start, unsigned long image_len,     unsigned long initrd_start, unsigned long initrd_len,    char *cmdline);/* * rcu_lock_domain_by_id() is more efficient than get_domain_by_id(). * This is the preferred function if the returned domain reference * is short lived,  but it cannot be used if the domain reference needs  * to be kept beyond the current scope (e.g., across a softirq). * The returned domain reference must be discarded using rcu_unlock_domain(). */struct domain *rcu_lock_domain_by_id(domid_t dom);/* Finish a RCU critical region started by rcu_lock_domain_by_id(). */static inline void rcu_unlock_domain(struct domain *d){    rcu_read_unlock(&domlist_read_lock);}static inline struct domain *rcu_lock_domain(struct domain *d){    rcu_read_lock(d);    return d;}static inline struct domain *rcu_lock_current_domain(void){    return rcu_lock_domain(current->domain);}struct domain *get_domain_by_id(domid_t dom);void domain_destroy(struct domain *d);int domain_kill(struct domain *d);void domain_shutdown(struct domain *d, u8 reason);void domain_resume(struct domain *d);void domain_pause_for_debugger(void);int vcpu_start_shutdown_deferral(struct vcpu *v);void vcpu_end_shutdown_deferral(struct vcpu *v);/* * Mark specified domain as crashed. This function always returns, even if the * caller is the specified domain. The domain is not synchronously descheduled * from any processor. */void __domain_crash(struct domain *d);#define domain_crash(d) do {                                              \    printk("domain_crash called from %s:%d\n", __FILE__, __LINE__);       \    __domain_crash(d);                                                    \} while (0)/* * Mark current domain as crashed and synchronously deschedule from the local * processor. This function never returns. */void __domain_crash_synchronous(void) __attribute__((noreturn));#define domain_crash_synchronous() do {                                   \    printk("domain_crash_sync called from %s:%d\n", __FILE__, __LINE__);  \    __domain_crash_synchronous();                                         \} while (0)#define set_current_state(_s) do { current->state = (_s); } while (0)void scheduler_init(void);int  sched_init_vcpu(struct vcpu *v, unsigned int processor);void sched_destroy_vcpu(struct vcpu *v);int  sched_init_domain(struct domain *d);void sched_destroy_domain(struct domain *d);long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);int  sched_id(void);void vcpu_wake(struct vcpu *d);void vcpu_sleep_nosync(struct vcpu *d);void vcpu_sleep_sync(struct vcpu *d);/* * Force synchronisation of given VCPU's state. If it is currently descheduled, * this call will ensure that all its state is committed to memory and that * no CPU is using critical state (e.g., page tables) belonging to the VCPU. */void sync_vcpu_execstate(struct vcpu *v);/* * Called by the scheduler to switch to another VCPU. This function must * call context_saved(@prev) when the local CPU is no longer running in * @prev's context, and that context is saved to memory. Alternatively, if * implementing lazy context switching, it suffices to ensure that invoking * sync_vcpu_execstate() will switch and commit @prev's state. */void context_switch(    struct vcpu *prev,     struct vcpu *next);/* * As described above, context_switch() must call this function when the * local CPU is no longer running in @prev's context, and @prev's context is * saved to memory. Alternatively, if implementing lazy context switching, * ensure that invoking sync_vcpu_execstate() will switch and commit @prev. */void context_saved(struct vcpu *prev);/* Called by the scheduler to continue running the current VCPU. */void continue_running(    struct vcpu *same);void startup_cpu_idle_loop(void);/* * Creates a continuation to resume the current hypercall. The caller should * return immediately, propagating the value returned from this invocation. * The format string specifies the types and number of hypercall arguments. * It contains one character per argument as follows: *  'i' [unsigned] {char, int} *  'l' [unsigned] long *  'h' guest handle (XEN_GUEST_HANDLE(foo)) */unsigned long hypercall_create_continuation(    unsigned int op, const char *format, ...);#define hypercall_preempt_check() (unlikely(    \        softirq_pending(smp_processor_id()) |   \        local_events_need_delivery()            \    ))/* Protect updates/reads (resp.) of domain_list and domain_hash. */extern spinlock_t domlist_update_lock;extern rcu_read_lock_t domlist_read_lock;extern struct domain *domain_list;/* Caller must hold the domlist_read_lock or domlist_update_lock. */#define for_each_domain(_d)                     \ for ( (_d) = rcu_dereference(domain_list);     \       (_d) != NULL;                            \       (_d) = rcu_dereference((_d)->next_in_list )) \#define for_each_vcpu(_d,_v)                    \ for ( (_v) = (_d)->vcpu[0];                    \       (_v) != NULL;                            \       (_v) = (_v)->next_in_list )/* * Per-VCPU pause flags. */ /* Domain is blocked waiting for an event. */#define _VPF_blocked         0#define VPF_blocked          (1UL<<_VPF_blocked) /* VCPU is offline. */#define _VPF_down            1#define VPF_down             (1UL<<_VPF_down) /* VCPU is blocked awaiting an event to be consumed by Xen. */#define _VPF_blocked_in_xen  2#define VPF_blocked_in_xen   (1UL<<_VPF_blocked_in_xen) /* VCPU affinity has changed: migrating to a new CPU. */#define _VPF_migrating       3#define VPF_migrating        (1UL<<_VPF_migrating)static inline int vcpu_runnable(struct vcpu *v){    return !(v->pause_flags |             atomic_read(&v->pause_count) |             atomic_read(&v->domain->pause_count));}void vcpu_pause(struct vcpu *v);void vcpu_pause_nosync(struct vcpu *v);void domain_pause(struct domain *d);void vcpu_unpause(struct vcpu *v);void domain_unpause(struct domain *d);void domain_pause_by_systemcontroller(struct domain *d);void domain_unpause_by_systemcontroller(struct domain *d);void cpu_init(void);void vcpu_force_reschedule(struct vcpu *v);int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity);void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity);void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);static inline void vcpu_unblock(struct vcpu *v){    if ( test_and_clear_bit(_VPF_blocked, &v->pause_flags) )        vcpu_wake(v);}#define IS_PRIV(_d) ((_d)->is_privileged)#define IS_PRIV_FOR(_d, _t) (IS_PRIV(_d) || ((_d)->target && (_d)->target == (_t)))#ifndef IS_COMPAT#define IS_COMPAT(d) 0#endif#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))#define is_hvm_domain(d) ((d)->is_hvm)#define is_hvm_vcpu(v)   (is_hvm_domain(v->domain))#define need_iommu(d)    ((d)->need_iommu && !(d)->is_hvm)extern enum cpufreq_controller {    FREQCTL_none, FREQCTL_dom0_kernel, FREQCTL_xen} cpufreq_controller;#endif /* __SCHED_H__ *//* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -