hypervisor.h
来自「linux 内核源代码」· C头文件 代码 · 共 1,941 行 · 第 1/5 页
H
1,941 行
* In all cases, error or not, the CPUs in the CPU list to which the * mondo has been successfully delivered will be indicated by having * their entry in CPU list updated with the value 0xffff. */#define HV_FAST_CPU_MONDO_SEND 0x42#ifndef __ASSEMBLY__extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa);#endif/* cpu_myid() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_CPU_MYID * RET0: status * RET1: CPU ID * ERRORS: No errors defined. * * Return the hypervisor ID handle for the current CPU. Use by a * virtual CPU to discover it's own identity. */#define HV_FAST_CPU_MYID 0x16/* cpu_state() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_CPU_STATE * ARG0: CPU ID * RET0: status * RET1: state * ERRORS: ENOCPU Invalid CPU ID * * Retrieve the current state of the CPU with the given CPU ID. */#define HV_FAST_CPU_STATE 0x17#define HV_CPU_STATE_STOPPED 0x01#define HV_CPU_STATE_RUNNING 0x02#define HV_CPU_STATE_ERROR 0x03#ifndef __ASSEMBLY__extern long sun4v_cpu_state(unsigned long cpuid);#endif/* cpu_set_rtba() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_CPU_SET_RTBA * ARG0: RTBA * RET0: status * RET1: previous RTBA * ERRORS: ENORADDR Invalid RTBA real address * EBADALIGN RTBA is incorrectly aligned for a trap table * * Set the real trap base address of the local cpu to the given RTBA. * The supplied RTBA must be aligned on a 256 byte boundary. Upon * success the previous value of the RTBA is returned in RET1. * * Note: This service does not affect %tba */#define HV_FAST_CPU_SET_RTBA 0x18/* cpu_set_rtba() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_CPU_GET_RTBA * RET0: status * RET1: previous RTBA * ERRORS: No possible error. * * Returns the current value of RTBA in RET1. */#define HV_FAST_CPU_GET_RTBA 0x19/* MMU services. * * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls. */#ifndef __ASSEMBLY__struct hv_tsb_descr { unsigned short pgsz_idx; unsigned short assoc; unsigned int num_ttes; /* in TTEs */ unsigned int ctx_idx; unsigned int pgsz_mask; unsigned long tsb_base; unsigned long resv;};#endif#define HV_TSB_DESCR_PGSZ_IDX_OFFSET 0x00#define HV_TSB_DESCR_ASSOC_OFFSET 0x02#define HV_TSB_DESCR_NUM_TTES_OFFSET 0x04#define HV_TSB_DESCR_CTX_IDX_OFFSET 0x08#define HV_TSB_DESCR_PGSZ_MASK_OFFSET 0x0c#define HV_TSB_DESCR_TSB_BASE_OFFSET 0x10#define HV_TSB_DESCR_RESV_OFFSET 0x18/* Page size bitmask. */#define HV_PGSZ_MASK_8K (1 << 0)#define HV_PGSZ_MASK_64K (1 << 1)#define HV_PGSZ_MASK_512K (1 << 2)#define HV_PGSZ_MASK_4MB (1 << 3)#define HV_PGSZ_MASK_32MB (1 << 4)#define HV_PGSZ_MASK_256MB (1 << 5)#define HV_PGSZ_MASK_2GB (1 << 6)#define HV_PGSZ_MASK_16GB (1 << 7)/* Page size index. The value given in the TSB descriptor must correspond * to the smallest page size specified in the pgsz_mask page size bitmask. */#define HV_PGSZ_IDX_8K 0#define HV_PGSZ_IDX_64K 1#define HV_PGSZ_IDX_512K 2#define HV_PGSZ_IDX_4MB 3#define HV_PGSZ_IDX_32MB 4#define HV_PGSZ_IDX_256MB 5#define HV_PGSZ_IDX_2GB 6#define HV_PGSZ_IDX_16GB 7/* MMU fault status area. * * MMU related faults have their status and fault address information * placed into a memory region made available by privileged code. Each * virtual processor must make a mmu_fault_area_conf() call to tell the * hypervisor where that processor's fault status should be stored. * * The fault status block is a multiple of 64-bytes and must be aligned * on a 64-byte boundary. */#ifndef __ASSEMBLY__struct hv_fault_status { unsigned long i_fault_type; unsigned long i_fault_addr; unsigned long i_fault_ctx; unsigned long i_reserved[5]; unsigned long d_fault_type; unsigned long d_fault_addr; unsigned long d_fault_ctx; unsigned long d_reserved[5];};#endif#define HV_FAULT_I_TYPE_OFFSET 0x00#define HV_FAULT_I_ADDR_OFFSET 0x08#define HV_FAULT_I_CTX_OFFSET 0x10#define HV_FAULT_D_TYPE_OFFSET 0x40#define HV_FAULT_D_ADDR_OFFSET 0x48#define HV_FAULT_D_CTX_OFFSET 0x50#define HV_FAULT_TYPE_FAST_MISS 1#define HV_FAULT_TYPE_FAST_PROT 2#define HV_FAULT_TYPE_MMU_MISS 3#define HV_FAULT_TYPE_INV_RA 4#define HV_FAULT_TYPE_PRIV_VIOL 5#define HV_FAULT_TYPE_PROT_VIOL 6#define HV_FAULT_TYPE_NFO 7#define HV_FAULT_TYPE_NFO_SEFF 8#define HV_FAULT_TYPE_INV_VA 9#define HV_FAULT_TYPE_INV_ASI 10#define HV_FAULT_TYPE_NC_ATOMIC 11#define HV_FAULT_TYPE_PRIV_ACT 12#define HV_FAULT_TYPE_RESV1 13#define HV_FAULT_TYPE_UNALIGNED 14#define HV_FAULT_TYPE_INV_PGSZ 15/* Values 16 --> -2 are reserved. */#define HV_FAULT_TYPE_MULTIPLE -1/* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(), * and mmu_{map,unmap}_perm_addr(). */#define HV_MMU_DMMU 0x01#define HV_MMU_IMMU 0x02#define HV_MMU_ALL (HV_MMU_DMMU | HV_MMU_IMMU)/* mmu_map_addr() * TRAP: HV_MMU_MAP_ADDR_TRAP * ARG0: virtual address * ARG1: mmu context * ARG2: TTE * ARG3: flags (HV_MMU_{IMMU,DMMU}) * ERRORS: EINVAL Invalid virtual address, mmu context, or flags * EBADPGSZ Invalid page size value * ENORADDR Invalid real address in TTE * * Create a non-permanent mapping using the given TTE, virtual * address, and mmu context. The flags argument determines which * (data, or instruction, or both) TLB the mapping gets loaded into. * * The behavior is undefined if the valid bit is clear in the TTE. * * Note: This API call is for privileged code to specify temporary translation * mappings without the need to create and manage a TSB. *//* mmu_unmap_addr() * TRAP: HV_MMU_UNMAP_ADDR_TRAP * ARG0: virtual address * ARG1: mmu context * ARG2: flags (HV_MMU_{IMMU,DMMU}) * ERRORS: EINVAL Invalid virtual address, mmu context, or flags * * Demaps the given virtual address in the given mmu context on this * CPU. This function is intended to be used to demap pages mapped * with mmu_map_addr. This service is equivalent to invoking * mmu_demap_page() with only the current CPU in the CPU list. The * flags argument determines which (data, or instruction, or both) TLB * the mapping gets unmapped from. * * Attempting to perform an unmap operation for a previously defined * permanent mapping will have undefined results. *//* mmu_tsb_ctx0() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_MMU_TSB_CTX0 * ARG0: number of TSB descriptions * ARG1: TSB descriptions pointer * RET0: status * ERRORS: ENORADDR Invalid TSB descriptions pointer or * TSB base within a descriptor * EBADALIGN TSB descriptions pointer is not aligned * to an 8-byte boundary, or TSB base * within a descriptor is not aligned for * the given TSB size * EBADPGSZ Invalid page size in a TSB descriptor * EBADTSB Invalid associativity or size in a TSB * descriptor * EINVAL Invalid number of TSB descriptions, or * invalid context index in a TSB * descriptor, or index page size not * equal to smallest page size in page * size bitmask field. * * Configures the TSBs for the current CPU for virtual addresses with * context zero. The TSB descriptions pointer is a pointer to an * array of the given number of TSB descriptions. * * Note: The maximum number of TSBs available to a virtual CPU is given by the * mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the * machine description. */#define HV_FAST_MMU_TSB_CTX0 0x20#ifndef __ASSEMBLY__extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions, unsigned long tsb_desc_ra);#endif/* mmu_tsb_ctxnon0() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_MMU_TSB_CTXNON0 * ARG0: number of TSB descriptions * ARG1: TSB descriptions pointer * RET0: status * ERRORS: Same as for mmu_tsb_ctx0() above. * * Configures the TSBs for the current CPU for virtual addresses with * non-zero contexts. The TSB descriptions pointer is a pointer to an * array of the given number of TSB descriptions. * * Note: A maximum of 16 TSBs may be specified in the TSB description list. */#define HV_FAST_MMU_TSB_CTXNON0 0x21/* mmu_demap_page() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_MMU_DEMAP_PAGE * ARG0: reserved, must be zero * ARG1: reserved, must be zero * ARG2: virtual address * ARG3: mmu context * ARG4: flags (HV_MMU_{IMMU,DMMU}) * RET0: status * ERRORS: EINVAL Invalid virutal address, context, or * flags value * ENOTSUPPORTED ARG0 or ARG1 is non-zero * * Demaps any page mapping of the given virtual address in the given * mmu context for the current virtual CPU. Any virtually tagged * caches are guaranteed to be kept consistent. The flags argument * determines which TLB (instruction, or data, or both) participate in * the operation. * * ARG0 and ARG1 are both reserved and must be set to zero. */#define HV_FAST_MMU_DEMAP_PAGE 0x22/* mmu_demap_ctx() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_MMU_DEMAP_CTX * ARG0: reserved, must be zero * ARG1: reserved, must be zero * ARG2: mmu context * ARG3: flags (HV_MMU_{IMMU,DMMU}) * RET0: status * ERRORS: EINVAL Invalid context or flags value * ENOTSUPPORTED ARG0 or ARG1 is non-zero * * Demaps all non-permanent virtual page mappings previously specified * for the given context for the current virtual CPU. Any virtual * tagged caches are guaranteed to be kept consistent. The flags * argument determines which TLB (instruction, or data, or both) * participate in the operation. * * ARG0 and ARG1 are both reserved and must be set to zero. */#define HV_FAST_MMU_DEMAP_CTX 0x23/* mmu_demap_all() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_MMU_DEMAP_ALL * ARG0: reserved, must be zero * ARG1: reserved, must be zero * ARG2: flags (HV_MMU_{IMMU,DMMU}) * RET0: status * ERRORS: EINVAL Invalid flags value * ENOTSUPPORTED ARG0 or ARG1 is non-zero * * Demaps all non-permanent virtual page mappings previously specified * for the current virtual CPU. Any virtual tagged caches are * guaranteed to be kept consistent. The flags argument determines * which TLB (instruction, or data, or both) participate in the * operation. * * ARG0 and ARG1 are both reserved and must be set to zero. */#define HV_FAST_MMU_DEMAP_ALL 0x24#ifndef __ASSEMBLY__extern void sun4v_mmu_demap_all(void);#endif/* mmu_map_perm_addr() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR * ARG0: virtual address * ARG1: reserved, must be zero * ARG2: TTE * ARG3: flags (HV_MMU_{IMMU,DMMU}) * RET0: status * ERRORS: EINVAL Invalid virutal address or flags value * EBADPGSZ Invalid page size value * ENORADDR Invalid real address in TTE * ETOOMANY Too many mappings (max of 8 reached) * * Create a permanent mapping using the given TTE and virtual address * for context 0 on the calling virtual CPU. A maximum of 8 such * permanent mappings may be specified by privileged code. Mappings * may be removed with mmu_unmap_perm_addr(). * * The behavior is undefined if a TTE with the valid bit clear is given. * * Note: This call is used to specify address space mappings for which * privileged code does not expect to receive misses. For example, * this mechanism can be used to map kernel nucleus code and data. */#define HV_FAST_MMU_MAP_PERM_ADDR 0x25#ifndef __ASSEMBLY__extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr, unsigned long set_to_zero, unsigned long tte, unsigned long flags);#endif/* mmu_fault_area_conf() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF * ARG0: real address * RET0: status * RET1: previous mmu fault area real address * ERRORS: ENORADDR Invalid real address * EBADALIGN Invalid alignment for fault area * * Configure the MMU fault status area for the calling CPU. A 64-byte * aligned real address specifies where MMU fault status information * is placed. The return value is the previously specified area, or 0 * for the first invocation. Specifying a fault area at real address * 0 is not allowed. */#define HV_FAST_MMU_FAULT_AREA_CONF 0x26/* mmu_enable() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_MMU_ENABLE * ARG0: enable flag * ARG1: return target address * RET0: status * ERRORS: ENORADDR Invalid real address when disabling * translation. * EBADALIGN The return target address is not * aligned to an instruction. * EINVAL The enable flag request the current * operating mode (e.g. disable if already * disabled)
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?