⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.h

📁 xen虚拟机源代码安装包
💻 H
📖 第 1 页 / 共 2 页
字号:
#ifndef __SCHED_H__#define __SCHED_H__#include <xen/config.h>#include <xen/types.h>#include <xen/spinlock.h>#include <xen/smp.h>#include <xen/shared.h>#include <public/xen.h>#include <public/domctl.h>#include <public/vcpu.h>#include <public/xsm/acm.h>#include <xen/time.h>#include <xen/timer.h>#include <xen/grant_table.h>#include <xen/rangeset.h>#include <asm/domain.h>#include <xen/xenoprof.h>#include <xen/rcupdate.h>#include <xen/irq.h>#ifdef CONFIG_COMPAT#include <compat/vcpu.h>DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t);#endif/* A global pointer to the initial domain (DOM0). */extern struct domain *dom0;#ifndef CONFIG_COMPAT#define MAX_EVTCHNS(d)     NR_EVENT_CHANNELS#else#define MAX_EVTCHNS(d)     (!IS_COMPAT(d) ? \                            NR_EVENT_CHANNELS : \                            sizeof(unsigned int) * sizeof(unsigned int) * 64)#endif#define EVTCHNS_PER_BUCKET 128#define NR_EVTCHN_BUCKETS  (NR_EVENT_CHANNELS / EVTCHNS_PER_BUCKET)struct evtchn{#define ECS_FREE         0 /* Channel is available for use.                  */#define ECS_RESERVED     1 /* Channel is reserved.                           */#define ECS_UNBOUND      2 /* Channel is waiting to bind to a remote domain. */#define ECS_INTERDOMAIN  3 /* Channel is bound to another domain.            */#define ECS_PIRQ         4 /* Channel is bound to a physical IRQ line.       */#define ECS_VIRQ         5 /* Channel is bound to a virtual IRQ line.        */#define ECS_IPI          6 /* Channel is bound to a virtual IPI line.        */    u8  state;             /* ECS_* */    u8  consumer_is_xen;   /* Consumed by Xen or by guest? */    u16 notify_vcpu_id;    /* VCPU for local delivery notification */    union {        struct {            domid_t remote_domid;        } unbound;     /* state == ECS_UNBOUND */        struct {            u16            remote_port;            struct domain *remote_dom;        } interdomain; /* state == ECS_INTERDOMAIN */        u16 pirq;      /* state == ECS_PIRQ */        u16 virq;      /* state == ECS_VIRQ */    } u;#ifdef FLASK_ENABLE    void *ssid;#endif};int  evtchn_init(struct domain *d);void evtchn_destroy(struct domain *d);struct vcpu {    int              vcpu_id;    int              processor;    vcpu_info_t     *vcpu_info;    struct domain   *domain;    struct vcpu     *next_in_list;    uint64_t         periodic_period;    uint64_t         periodic_last_event;    struct timer     periodic_timer;    struct timer     singleshot_timer;    struct timer     poll_timer;    /* timeout for SCHEDOP_poll */    void            *sched_priv;    /* scheduler-specific data */    struct vcpu_runstate_info runstate;#ifndef CONFIG_COMPAT# define runstate_guest(v) ((v)->runstate_guest)    XEN_GUEST_HANDLE(vcpu_runstate_info_t) runstate_guest; /* guest address */#else# define runstate_guest(v) ((v)->runstate_guest.native)    union {        XEN_GUEST_HANDLE(vcpu_runstate_info_t) native;        XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t) compat;    } runstate_guest; /* guest address */#endif    /* Has the FPU been initialised? */    bool_t           fpu_initialised;    /* Has the FPU been used since it was last saved? */    bool_t           fpu_dirtied;    /* Is this VCPU polling any event channels (SCHEDOP_poll)? */    bool_t           is_polling;    /* Initialization completed for this VCPU? */    bool_t           is_initialised;    /* Currently running on a CPU? */    bool_t           is_running;    /* MCE callback pending for this VCPU? */    bool_t           mce_pending;    /* NMI callback pending for this VCPU? */    bool_t           nmi_pending;    /* Higher priorized traps may interrupt lower priorized traps,     * lower priorized traps wait until higher priorized traps finished.     * Note: This concept is known as "system priority level" (spl)     * in the UNIX world. */    uint16_t         old_trap_priority;    uint16_t         trap_priority;#define VCPU_TRAP_NONE    0#define VCPU_TRAP_NMI     1#define VCPU_TRAP_MCE     2    /* Require shutdown to be deferred for some asynchronous operation? */    bool_t           defer_shutdown;    /* VCPU is paused following shutdown request (d->is_shutting_down)? */    bool_t           paused_for_shutdown;    /* VCPU affinity is temporarily locked from controller changes? */    bool_t           affinity_locked;    unsigned long    pause_flags;    atomic_t         pause_count;    /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */    u16              virq_to_evtchn[NR_VIRQS];    spinlock_t       virq_lock;    /* Bitmask of CPUs on which this VCPU may run. */    cpumask_t        cpu_affinity;    /* Used to change affinity temporarily. */    cpumask_t        cpu_affinity_tmp;    /* Bitmask of CPUs which are holding onto this VCPU's state. */    cpumask_t        vcpu_dirty_cpumask;    struct arch_vcpu arch;};/* Per-domain lock can be recursively acquired in fault handlers. */#define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)#define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)#define domain_is_locked(d) spin_is_locked(&(d)->domain_lock)struct domain{    domid_t          domain_id;    shared_info_t   *shared_info;     /* shared data area */    spinlock_t       domain_lock;    spinlock_t       page_alloc_lock; /* protects all the following fields  */    struct list_head page_list;       /* linked list, of size tot_pages     */    struct list_head xenpage_list;    /* linked list, of size xenheap_pages */    unsigned int     tot_pages;       /* number of pages currently possesed */    unsigned int     max_pages;       /* maximum value for tot_pages        */    unsigned int     xenheap_pages;   /* # pages allocated from Xen heap    */    /* Scheduling. */    void            *sched_priv;    /* scheduler-specific data */    struct domain   *next_in_list;    struct domain   *next_in_hashbucket;    struct list_head rangesets;    spinlock_t       rangesets_lock;    /* Event channel information. */    struct evtchn   *evtchn[NR_EVTCHN_BUCKETS];    spinlock_t       evtchn_lock;    struct grant_table *grant_table;    /*     * Interrupt to event-channel mappings. Updates should be protected by the      * domain's event-channel spinlock. Read accesses can also synchronise on      * the lock, but races don't usually matter.     */    u16              pirq_to_evtchn[NR_IRQS];    DECLARE_BITMAP(pirq_mask, NR_IRQS);    /* I/O capabilities (access to IRQs and memory-mapped I/O). */    struct rangeset *iomem_caps;    struct rangeset *irq_caps;    /* Is this an HVM guest? */    bool_t           is_hvm;    /* Does this guest need iommu mappings? */    bool_t           need_iommu;    /* Is this guest fully privileged (aka dom0)? */    bool_t           is_privileged;    /* Which guest this guest has privileges on */    struct domain   *target;    /* Is this guest being debugged by dom0? */    bool_t           debugger_attached;    /* Are any VCPUs polling event channels (SCHEDOP_poll)? */    bool_t           is_polling;    /* Is this guest dying (i.e., a zombie)? */    enum { DOMDYING_alive, DOMDYING_dying, DOMDYING_dead } is_dying;    /* Domain is paused by controller software? */    bool_t           is_paused_by_controller;    /* Domain's VCPUs are pinned 1:1 to physical CPUs? */    bool_t           is_pinned;    /* Guest has shut down (inc. reason code)? */    spinlock_t       shutdown_lock;    bool_t           is_shutting_down; /* in process of shutting down? */    bool_t           is_shut_down;     /* fully shut down? */    int              shutdown_code;    /* If this is not 0, send suspend notification here instead of     * raising DOM_EXC */    int              suspend_evtchn;    atomic_t         pause_count;    unsigned long    vm_assist;    atomic_t         refcnt;    struct vcpu *vcpu[MAX_VIRT_CPUS];    /* Bitmask of CPUs which are holding onto this domain's state. */    cpumask_t        domain_dirty_cpumask;    struct arch_domain arch;    void *ssid; /* sHype security subject identifier */    /* Control-plane tools handle for this domain. */    xen_domain_handle_t handle;    /* OProfile support. */    struct xenoprof *xenoprof;    int32_t time_offset_seconds;    struct rcu_head rcu;    /*     * Hypercall deadlock avoidance lock. Used if a hypercall might     * cause a deadlock. Acquirers don't spin waiting; they preempt.     */    spinlock_t hypercall_deadlock_mutex;    /* VRAM dirty support. */    struct sh_dirty_vram *dirty_vram;};struct domain_setup_info{    /* Initialised by caller. */    unsigned long image_addr;    unsigned long image_len;    /* Initialised by loader: Public. */    unsigned long v_start;    unsigned long v_end;    unsigned long v_kernstart;    unsigned long v_kernend;    unsigned long v_kernentry;#define PAEKERN_no           0#define PAEKERN_yes          1#define PAEKERN_extended_cr3 2#define PAEKERN_bimodal      3    unsigned int  pae_kernel;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -