📄 pal.h
字号:
ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail, pal_bus_features_u_t *features_status, pal_bus_features_u_t *features_control){ struct ia64_pal_retval iprv; PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0); if (features_avail) features_avail->pal_bus_features_val = iprv.v0; if (features_status) features_status->pal_bus_features_val = iprv.v1; if (features_control) features_control->pal_bus_features_val = iprv.v2; return iprv.status;}/* Enables/disables specific processor bus features */static inline s64ia64_pal_bus_set_features (pal_bus_features_u_t feature_select){ struct ia64_pal_retval iprv; PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0); return iprv.status;}/* Get detailed cache information */static inline s64ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, pal_cache_config_info_t *conf){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0); if (iprv.status == 0) { conf->pcci_status = iprv.status; conf->pcci_info_1.pcci1_data = iprv.v0; conf->pcci_info_2.pcci2_data = iprv.v1; conf->pcci_reserved = iprv.v2; } return iprv.status;}/* Get detailed cche protection information */static inline s64ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, pal_cache_protection_info_t *prot){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0); if (iprv.status == 0) { prot->pcpi_status = iprv.status; prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff; prot->pcp_info[1].pcpi_data = iprv.v0 >> 32; prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff; prot->pcp_info[3].pcpi_data = iprv.v1 >> 32; prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff; prot->pcp_info[5].pcpi_data = iprv.v2 >> 32; } return iprv.status;}/* * Flush the processor instruction or data caches. *PROGRESS must be * initialized to zero before calling this for the first time.. */static inline s64ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, *progress); if (vector) *vector = iprv.v0; *progress = iprv.v1; return iprv.status;}/* Initialize the processor controlled caches */static inline s64ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest); return iprv.status;}/* Initialize the tags and data of a data or unified cache line of * processor controlled cache to known values without the availability * of backing memory. */static inline s64ia64_pal_cache_line_init (u64 physical_addr, u64 data_value){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_CACHE_LINE_INIT, physical_addr, data_value, 0); return iprv.status;}/* Read the data and tag of a processor controlled cache line for diags */static inline s64ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr){ struct ia64_pal_retval iprv; PAL_CALL_PHYS_STK(iprv, PAL_CACHE_READ, line_id.pclid_data, physical_addr, 0); return iprv.status;}/* Return summary information about the hierarchy of caches controlled by the processor */static inline s64ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0); if (cache_levels) *cache_levels = iprv.v0; if (unique_caches) *unique_caches = iprv.v1; return iprv.status;}/* Write the data and tag of a processor-controlled cache line for diags */static inline s64ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data){ struct ia64_pal_retval iprv; PAL_CALL_PHYS_STK(iprv, PAL_CACHE_WRITE, line_id.pclid_data, physical_addr, data); return iprv.status;}/* Return the parameters needed to copy relocatable PAL procedures from ROM to memory */static inline s64ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics, u64 *buffer_size, u64 *buffer_align){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_COPY_INFO, copy_type, num_procs, num_iopics); if (buffer_size) *buffer_size = iprv.v0; if (buffer_align) *buffer_align = iprv.v1; return iprv.status;}/* Copy relocatable PAL procedures from ROM to memory */static inline s64ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 *pal_proc_offset){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_COPY_PAL, target_addr, alloc_size, processor); if (pal_proc_offset) *pal_proc_offset = iprv.v0; return iprv.status;}/* Return the number of instruction and data debug register pairs */static inline s64ia64_pal_debug_info (u64 *inst_regs, u64 *data_regs){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0); if (inst_regs) *inst_regs = iprv.v0; if (data_regs) *data_regs = iprv.v1; return iprv.status;}#ifdef TBD/* Switch from IA64-system environment to IA-32 system environment */static inline s64ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_ENTER_IA_32_ENV, ia32_env1, ia32_env2, ia32_env3); return iprv.status;}#endif/* Get unique geographical address of this processor on its bus */static inline s64ia64_pal_fixed_addr (u64 *global_unique_addr){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_FIXED_ADDR, 0, 0, 0); if (global_unique_addr) *global_unique_addr = iprv.v0; return iprv.status;}/* Get base frequency of the platform if generated by the processor */static inline s64ia64_pal_freq_base (u64 *platform_base_freq){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0); if (platform_base_freq) *platform_base_freq = iprv.v0; return iprv.status;}/* * Get the ratios for processor frequency, bus frequency and interval timer to * to base frequency of the platform */static inline s64ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio, struct pal_freq_ratio *itc_ratio){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_FREQ_RATIOS, 0, 0, 0); if (proc_ratio) *(u64 *)proc_ratio = iprv.v0; if (bus_ratio) *(u64 *)bus_ratio = iprv.v1; if (itc_ratio) *(u64 *)itc_ratio = iprv.v2; return iprv.status;}/* * Get the current hardware resource sharing policy of the processor */static inline s64ia64_pal_get_hw_policy (u64 proc_num, u64 *cur_policy, u64 *num_impacted, u64 *la){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_GET_HW_POLICY, proc_num, 0, 0); if (cur_policy) *cur_policy = iprv.v0; if (num_impacted) *num_impacted = iprv.v1; if (la) *la = iprv.v2; return iprv.status;}/* Make the processor enter HALT or one of the implementation dependent low * power states where prefetching and execution are suspended and cache and * TLB coherency is not maintained. */static inline s64ia64_pal_halt (u64 halt_state){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_HALT, halt_state, 0, 0); return iprv.status;}typedef union pal_power_mgmt_info_u { u64 ppmi_data; struct { u64 exit_latency : 16, entry_latency : 16, power_consumption : 28, im : 1, co : 1, reserved : 2; } pal_power_mgmt_info_s;} pal_power_mgmt_info_u_t;/* Return information about processor's optional power management capabilities. */static inline s64ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf){ struct ia64_pal_retval iprv; PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0); return iprv.status;}/* Get the current P-state information */static inline s64ia64_pal_get_pstate (u64 *pstate_index, unsigned long type){ struct ia64_pal_retval iprv; PAL_CALL_STK(iprv, PAL_GET_PSTATE, type, 0, 0); *pstate_index = iprv.v0; return iprv.status;}/* Set the P-state */static inline s64ia64_pal_set_pstate (u64 pstate_index){ struct ia64_pal_retval iprv; PAL_CALL_STK(iprv, PAL_SET_PSTATE, pstate_index, 0, 0); return iprv.status;}/* Processor branding information*/static inline s64ia64_pal_get_brand_info (char *brand_info){ struct ia64_pal_retval iprv; PAL_CALL_STK(iprv, PAL_BRAND_INFO, 0, (u64)brand_info, 0); return iprv.status;}/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are * suspended, but cache and TLB coherency is maintained. */static inline s64ia64_pal_halt_light (void){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_HALT_LIGHT, 0, 0, 0); return iprv.status;}/* Clear all the processor error logging registers and reset the indicator that allows * the error logging registers to be written. This procedure also checks the pending * machine check bit and pending INIT bit and reports their states. */static inline s64ia64_pal_mc_clear_log (u64 *pending_vector){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_MC_CLEAR_LOG, 0, 0, 0); if (pending_vector) *pending_vector = iprv.v0; return iprv.status;}/* Ensure that all outstanding transactions in a processor are completed or that any * MCA due to thes outstanding transaction is taken. */static inline s64ia64_pal_mc_drain (void){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_MC_DRAIN, 0, 0, 0); return iprv.status;}/* Return the machine check dynamic processor state */static inline s64ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0); if (size) *size = iprv.v0; if (pds) *pds = iprv.v1; return iprv.status;}/* Return processor machine check information */static inline s64ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 *error_info){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_MC_ERROR_INFO, info_index, type_index, 0); if (size) *size = iprv.v0; if (error_info) *error_info = iprv.v1; return iprv.status;}/* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK willnot * attempt to correct any expected machine checks. */static inline s64ia64_pal_mc_expected (u64 expected, u64 *previous){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_MC_EXPECTED, expected, 0, 0); if (previous) *previous = iprv.v0; return iprv.status;}/* Register a platform dependent location with PAL to which it can save * minimal processor state in the event of a machine check or initialization * event. */static inline s64ia64_pal_mc_register_mem (u64 physical_addr){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0); return iprv.status;}/* Restore minimal architectural processor state, set CMC interrupt if necessary * and resume execution */static inline s64ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_MC_RESUME, set_cmci, save_ptr, 0); return iprv.status;}/* Return the memory attributes implemented by the processor */static inline s64ia64_pal_mem_attrib (u64 *mem_attrib){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0); if (mem_attrib) *mem_attrib = iprv.v0 & 0xff; return iprv.status;}/* Return the amount of memory needed for second phase of processor * self-test and the required alignment of memory. */static inline s64ia64_pal_mem_for_test (u64 *bytes_needed, u64 *alignment){ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_MEM_FOR_TEST, 0, 0, 0); if (bytes_needed) *bytes_needed = iprv.v0; if (alignment) *alignment = iprv.v1; return iprv.status;}typedef union pal_perf_mon_info_u { u64 ppmi_data; struct { u64 generic : 8, width : 8, cycles : 8, retired : 8, reserved : 32; } pal_perf_mon_info_s;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -