kvm.h

来自「linux 内核源代码」· C头文件 代码 · 共 797 行 · 第 1/2 页

H
797
字号
#ifndef __KVM_H#define __KVM_H/* * This work is licensed under the terms of the GNU GPL, version 2.  See * the COPYING file in the top-level directory. */#include <linux/types.h>#include <linux/list.h>#include <linux/mutex.h>#include <linux/spinlock.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/mm.h>#include <linux/preempt.h>#include <asm/signal.h>#include <linux/kvm.h>#include <linux/kvm_para.h>#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)#define KVM_GUEST_CR0_MASK \	(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \	 | X86_CR0_NW | X86_CR0_CD)#define KVM_VM_CR0_ALWAYS_ON \	(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \	 | X86_CR0_MP)#define KVM_GUEST_CR4_MASK \	(X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)#define INVALID_PAGE (~(hpa_t)0)#define UNMAPPED_GVA (~(gpa_t)0)#define KVM_MAX_VCPUS 4#define KVM_ALIAS_SLOTS 4#define KVM_MEMORY_SLOTS 8#define KVM_NUM_MMU_PAGES 1024#define KVM_MIN_FREE_MMU_PAGES 5#define KVM_REFILL_PAGES 25#define KVM_MAX_CPUID_ENTRIES 40#define DE_VECTOR 0#define NM_VECTOR 7#define DF_VECTOR 8#define TS_VECTOR 10#define NP_VECTOR 11#define SS_VECTOR 12#define GP_VECTOR 13#define PF_VECTOR 14#define SELECTOR_TI_MASK (1 << 2)#define SELECTOR_RPL_MASK 0x03#define IOPL_SHIFT 12#define KVM_PIO_PAGE_OFFSET 1/* * vcpu->requests bit members */#define KVM_TLB_FLUSH 0/* * Address types: * *  gva - guest virtual address *  gpa - guest physical address *  gfn - guest frame number *  hva - host virtual address *  hpa - host physical address *  hfn - host frame number */typedef unsigned long  gva_t;typedef u64            gpa_t;typedef unsigned long  gfn_t;typedef unsigned long  hva_t;typedef u64            hpa_t;typedef unsigned long  hfn_t;#define NR_PTE_CHAIN_ENTRIES 5struct kvm_pte_chain {	u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];	struct hlist_node link;};/* * kvm_mmu_page_role, below, is defined as: * *   bits 0:3 - total guest paging levels (2-4, or zero for real mode) *   bits 4:7 - page table level for this shadow (1-4) *   bits 8:9 - page table quadrant for 2-level guests *   bit   16 - "metaphysical" - gfn is not a real page (huge page/real mode) *   bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde */union kvm_mmu_page_role {	unsigned word;	struct {		unsigned glevels : 4;		unsigned level : 4;		unsigned quadrant : 2;		unsigned pad_for_nice_hex_output : 6;		unsigned metaphysical : 1;		unsigned hugepage_access : 3;	};};struct kvm_mmu_page {	struct list_head link;	struct hlist_node hash_link;	/*	 * The following two entries are used to key the shadow page in the	 * hash table.	 */	gfn_t gfn;	union kvm_mmu_page_role role;	u64 *spt;	unsigned long slot_bitmap; /* One bit set per slot which has memory				    * in this shadow page.				    */	int multimapped;         /* More than one parent_pte? */	int root_count;          /* Currently serving as active root */	union {		u64 *parent_pte;               /* !multimapped */		struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */	};};struct kvm_vcpu;extern struct kmem_cache *kvm_vcpu_cache;/* * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level * 32-bit).  The kvm_mmu structure abstracts the details of the current mmu * mode. */struct kvm_mmu {	void (*new_cr3)(struct kvm_vcpu *vcpu);	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);	void (*free)(struct kvm_vcpu *vcpu);	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);	hpa_t root_hpa;	int root_level;	int shadow_root_level;	u64 *pae_root;};#define KVM_NR_MEM_OBJS 20struct kvm_mmu_memory_cache {	int nobjs;	void *objects[KVM_NR_MEM_OBJS];};/* * We don't want allocation failures within the mmu code, so we preallocate * enough memory for a single page fault in a cache. */struct kvm_guest_debug {	int enabled;	unsigned long bp[4];	int singlestep;};enum {	VCPU_REGS_RAX = 0,	VCPU_REGS_RCX = 1,	VCPU_REGS_RDX = 2,	VCPU_REGS_RBX = 3,	VCPU_REGS_RSP = 4,	VCPU_REGS_RBP = 5,	VCPU_REGS_RSI = 6,	VCPU_REGS_RDI = 7,#ifdef CONFIG_X86_64	VCPU_REGS_R8 = 8,	VCPU_REGS_R9 = 9,	VCPU_REGS_R10 = 10,	VCPU_REGS_R11 = 11,	VCPU_REGS_R12 = 12,	VCPU_REGS_R13 = 13,	VCPU_REGS_R14 = 14,	VCPU_REGS_R15 = 15,#endif	NR_VCPU_REGS};enum {	VCPU_SREG_CS,	VCPU_SREG_DS,	VCPU_SREG_ES,	VCPU_SREG_FS,	VCPU_SREG_GS,	VCPU_SREG_SS,	VCPU_SREG_TR,	VCPU_SREG_LDTR,};struct kvm_pio_request {	unsigned long count;	int cur_count;	struct page *guest_pages[2];	unsigned guest_page_offset;	int in;	int port;	int size;	int string;	int down;	int rep;};struct kvm_stat {	u32 pf_fixed;	u32 pf_guest;	u32 tlb_flush;	u32 invlpg;	u32 exits;	u32 io_exits;	u32 mmio_exits;	u32 signal_exits;	u32 irq_window_exits;	u32 halt_exits;	u32 halt_wakeup;	u32 request_irq_exits;	u32 irq_exits;	u32 light_exits;	u32 efer_reload;};struct kvm_io_device {	void (*read)(struct kvm_io_device *this,		     gpa_t addr,		     int len,		     void *val);	void (*write)(struct kvm_io_device *this,		      gpa_t addr,		      int len,		      const void *val);	int (*in_range)(struct kvm_io_device *this, gpa_t addr);	void (*destructor)(struct kvm_io_device *this);	void             *private;};static inline void kvm_iodevice_read(struct kvm_io_device *dev,				     gpa_t addr,				     int len,				     void *val){	dev->read(dev, addr, len, val);}static inline void kvm_iodevice_write(struct kvm_io_device *dev,				      gpa_t addr,				      int len,				      const void *val){	dev->write(dev, addr, len, val);}static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr){	return dev->in_range(dev, addr);}static inline void kvm_iodevice_destructor(struct kvm_io_device *dev){	if (dev->destructor)		dev->destructor(dev);}/* * It would be nice to use something smarter than a linear search, TBD... * Thankfully we dont expect many devices to register (famous last words :), * so until then it will suffice.  At least its abstracted so we can change * in one place. */struct kvm_io_bus {	int                   dev_count;#define NR_IOBUS_DEVS 6	struct kvm_io_device *devs[NR_IOBUS_DEVS];};void kvm_io_bus_init(struct kvm_io_bus *bus);void kvm_io_bus_destroy(struct kvm_io_bus *bus);struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);void kvm_io_bus_register_dev(struct kvm_io_bus *bus,			     struct kvm_io_device *dev);struct kvm_vcpu {	struct kvm *kvm;	struct preempt_notifier preempt_notifier;	int vcpu_id;	struct mutex mutex;	int   cpu;	u64 host_tsc;	struct kvm_run *run;	int interrupt_window_open;	int guest_mode;	unsigned long requests;	unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */	DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);	unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */	unsigned long rip;      /* needs vcpu_load_rsp_rip() */	unsigned long cr0;	unsigned long cr2;	unsigned long cr3;	gpa_t para_state_gpa;	struct page *para_state_page;	gpa_t hypercall_gpa;	unsigned long cr4;	unsigned long cr8;	u64 pdptrs[4]; /* pae */	u64 shadow_efer;	u64 apic_base;	struct kvm_lapic *apic;    /* kernel irqchip context */#define VCPU_MP_STATE_RUNNABLE          0#define VCPU_MP_STATE_UNINITIALIZED     1#define VCPU_MP_STATE_INIT_RECEIVED     2#define VCPU_MP_STATE_SIPI_RECEIVED     3#define VCPU_MP_STATE_HALTED            4	int mp_state;	int sipi_vector;	u64 ia32_misc_enable_msr;	struct kvm_mmu mmu;	struct kvm_mmu_memory_cache mmu_pte_chain_cache;	struct kvm_mmu_memory_cache mmu_rmap_desc_cache;	struct kvm_mmu_memory_cache mmu_page_cache;	struct kvm_mmu_memory_cache mmu_page_header_cache;	gfn_t last_pt_write_gfn;	int   last_pt_write_count;	struct kvm_guest_debug guest_debug;	struct i387_fxsave_struct host_fx_image;	struct i387_fxsave_struct guest_fx_image;	int fpu_active;	int guest_fpu_loaded;	int mmio_needed;	int mmio_read_completed;	int mmio_is_write;	int mmio_size;	unsigned char mmio_data[8];	gpa_t mmio_phys_addr;	gva_t mmio_fault_cr2;	struct kvm_pio_request pio;	void *pio_data;	wait_queue_head_t wq;	int sigset_active;	sigset_t sigset;	struct kvm_stat stat;	struct {		int active;		u8 save_iopl;		struct kvm_save_segment {			u16 selector;			unsigned long base;			u32 limit;			u32 ar;		} tr, es, ds, fs, gs;	} rmode;	int halt_request; /* real mode on Intel only */	int cpuid_nent;	struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];};struct kvm_mem_alias {	gfn_t base_gfn;	unsigned long npages;	gfn_t target_gfn;};struct kvm_memory_slot {	gfn_t base_gfn;	unsigned long npages;	unsigned long flags;	struct page **phys_mem;	unsigned long *dirty_bitmap;};

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?