⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 processor.h

📁 xen虚拟机源代码安装包
💻 H
📖 第 1 页 / 共 2 页
字号:
								\	switch (regnum) {					\	    case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break;	\	    case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break;	\	    case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break;	\	    case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break;	\	    case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break;	\	    case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break;	\	    case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break;	\	    case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break;	\	    default: ia64_getreg_unknown_kr(); break;		\	}							\	r;							\})#define ia64_set_kr(regnum, r) 					\({								\	switch (regnum) {					\	    case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break;	\	    case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break;	\	    case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break;	\	    case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break;	\	    case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break;	\	    case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break;	\	    case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break;	\	    case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break;	\	    default: ia64_setreg_unknown_kr(); break;		\	}							\})/* * The following three macros can't be inline functions because we don't have struct * task_struct at this point. *//* * Return TRUE if task T owns the fph partition of the CPU we're running on. * Must be called from code that has preemption disabled. */#ifndef XEN#define ia64_is_local_fpu_owner(t)								\({												\	struct task_struct *__ia64_islfo_task = (t);						\	(__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()				\	 && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER));	\})#endif/* * Mark task T as owning the fph partition of the CPU we're running on. * Must be called from code that has preemption disabled. */#define ia64_set_local_fpu_owner(t) do {						\	struct task_struct *__ia64_slfo_task = (t);					\	__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();			\	ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task);		\} while (0)/* Mark the fph partition of task T as being invalid on all CPUs.  */#define ia64_drop_fpu(t)	((t)->thread.last_fph_cpu = -1)extern void __ia64_init_fpu (void);extern void __ia64_save_fpu (struct ia64_fpreg *fph);extern void __ia64_load_fpu (struct ia64_fpreg *fph);extern void ia64_save_debug_regs (unsigned long *save_area);extern void ia64_load_debug_regs (unsigned long *save_area);#ifdef XENextern void dump_stack(void);#endif#ifdef CONFIG_IA32_SUPPORTextern void ia32_save_state (struct task_struct *task);extern void ia32_load_state (struct task_struct *task);#endif#define ia64_fph_enable()	do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)#define ia64_fph_disable()	do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)/* load fp 0.0 into fph */static inline voidia64_init_fpu (void) {	ia64_fph_enable();	__ia64_init_fpu();	ia64_fph_disable();}/* save f32-f127 at FPH */static inline voidia64_save_fpu (struct ia64_fpreg *fph) {	ia64_fph_enable();	__ia64_save_fpu(fph);	ia64_fph_disable();}/* load f32-f127 from FPH */static inline voidia64_load_fpu (struct ia64_fpreg *fph) {	ia64_fph_enable();	__ia64_load_fpu(fph);	ia64_fph_disable();}static inline __u64ia64_clear_ic (void){	__u64 psr;	psr = ia64_getreg(_IA64_REG_PSR);	ia64_stop();	ia64_rsm(IA64_PSR_I | IA64_PSR_IC);	ia64_srlz_i();	return psr;}/* * Restore the psr. */static inline voidia64_set_psr (__u64 psr){	ia64_stop();	ia64_setreg(_IA64_REG_PSR_L, psr);	ia64_srlz_d();}/* * Insert a translation into an instruction and/or data translation * register. */static inline voidia64_itr (__u64 target_mask, __u64 tr_num,	  __u64 vmaddr, __u64 pte,	  __u64 log_page_size){	ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);	ia64_stop();	if (target_mask & 0x1)		ia64_itri(tr_num, pte);	if (target_mask & 0x2)		ia64_itrd(tr_num, pte);}/* * Insert a translation into the instruction and/or data translation * cache. */#ifdef XENstatic inline voidia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, __u64 itir){	ia64_setreg(_IA64_REG_CR_ITIR, itir);	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);	ia64_stop();	/* as per EAS2.6, itc must be the last instruction in an instruction group */	if (target_mask & 0x1)		ia64_itci(pte);	if (target_mask & 0x2)		ia64_itcd(pte);}#elsestatic inline voidia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,	  __u64 log_page_size){	ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);	ia64_stop();	/* as per EAS2.6, itc must be the last instruction in an instruction group */	if (target_mask & 0x1)		ia64_itci(pte);	if (target_mask & 0x2)		ia64_itcd(pte);}#endif/* * Purge a range of addresses from instruction and/or data translation * register(s). */static inline voidia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size){	if (target_mask & 0x1)		ia64_ptri(vmaddr, (log_size << 2));	if (target_mask & 0x2)		ia64_ptrd(vmaddr, (log_size << 2));}/* Set the interrupt vector address.  The address must be suitably aligned (32KB).  */static inline voidia64_set_iva (void *ivt_addr){	ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);	ia64_srlz_i();}/* Set the page table address and control bits.  */static inline voidia64_set_pta (__u64 pta){	/* Note: srlz.i implies srlz.d */	ia64_setreg(_IA64_REG_CR_PTA, pta);	ia64_srlz_i();}static inline voidia64_eoi (void){	ia64_setreg(_IA64_REG_CR_EOI, 0);	ia64_srlz_d();}#define cpu_relax()	ia64_hint(ia64_hint_pause)static inline intia64_get_irr(unsigned int vector){	unsigned int reg = vector / 64;	unsigned int bit = vector % 64;	u64 irr;	switch (reg) {	case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;	case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;	case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;	case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;	}	return test_bit(bit, &irr);}static inline voidia64_set_lrr0 (unsigned long val){	ia64_setreg(_IA64_REG_CR_LRR0, val);	ia64_srlz_d();}static inline voidia64_set_lrr1 (unsigned long val){	ia64_setreg(_IA64_REG_CR_LRR1, val);	ia64_srlz_d();}/* * Given the address to which a spill occurred, return the unat bit * number that corresponds to this address. */static inline __u64ia64_unat_pos (void *spill_addr){	return ((__u64) spill_addr >> 3) & 0x3f;}/* * Set the NaT bit of an integer register which was spilled at address * SPILL_ADDR.  UNAT is the mask to be updated. */static inline voidia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat){	__u64 bit = ia64_unat_pos(spill_addr);	__u64 mask = 1UL << bit;	*unat = (*unat & ~mask) | (nat << bit);}/* * Return saved PC of a blocked thread. * Note that the only way T can block is through a call to schedule() -> switch_to(). */static inline unsigned longthread_saved_pc (struct task_struct *t){	struct unw_frame_info info;	unsigned long ip;	unw_init_from_blocked_task(&info, t);	if (unw_unwind(&info) < 0)		return 0;	unw_get_ip(&info, &ip);	return ip;}/* * Get the current instruction/program counter value. */#define current_text_addr() \	({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })static inline __u64ia64_get_ivr (void){	__u64 r;	ia64_srlz_d();	r = ia64_getreg(_IA64_REG_CR_IVR);	ia64_srlz_d();	return r;}#ifdef XEN/* Get the page table address and control bits.  */static inline __u64ia64_get_pta (void){   __u64 r;   ia64_srlz_d();   r = ia64_getreg(_IA64_REG_CR_PTA);   ia64_srlz_d();   return r;}#endifstatic inline voidia64_set_dbr (__u64 regnum, __u64 value){	__ia64_set_dbr(regnum, value);#ifdef CONFIG_ITANIUM	ia64_srlz_d();#endif}static inline __u64ia64_get_dbr (__u64 regnum){	__u64 retval;	retval = __ia64_get_dbr(regnum);#ifdef CONFIG_ITANIUM	ia64_srlz_d();#endif	return retval;}static inline __u64ia64_rotr (__u64 w, __u64 n){	return (w >> n) | (w << (64 - n));}#define ia64_rotl(w,n)	ia64_rotr((w), (64) - (n))/* * Take a mapped kernel address and return the equivalent address * in the region 7 identity mapped virtual area. */static inline void *ia64_imva (void *addr){	void *result;	result = (void *) ia64_tpa(addr);	return __va(result);}#define ARCH_HAS_PREFETCH#define ARCH_HAS_PREFETCHW#define ARCH_HAS_SPINLOCK_PREFETCH#define PREFETCH_STRIDE			L1_CACHE_BYTESstatic inline voidprefetch (const void *x){	 ia64_lfetch(ia64_lfhint_none, x);}static inline voidprefetchw (const void *x){	ia64_lfetch_excl(ia64_lfhint_none, x);}#define spin_lock_prefetch(x)	prefetchw(x)extern unsigned long boot_option_idle_override;#ifdef XENstatic inline unsigned intia64_get_cpl(unsigned long psr){  return (psr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;}#endif#endif /* !__ASSEMBLY__ */#endif /* _ASM_IA64_PROCESSOR_H */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -