⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 processor.h

📁 讲述linux的初始化过程
💻 H
📖 第 1 页 / 共 2 页
字号:
ia64_init_fpu (void) {	ia64_fph_enable();	__ia64_init_fpu();	ia64_fph_disable();}/* save f32-f127 at FPH */static inline voidia64_save_fpu (struct ia64_fpreg *fph) {	ia64_fph_enable();	__ia64_save_fpu(fph);	ia64_fph_disable();}/* load f32-f127 from FPH */static inline voidia64_load_fpu (struct ia64_fpreg *fph) {	ia64_fph_enable();	__ia64_load_fpu(fph);	ia64_fph_disable();}static inline voidia64_fc (void *addr){	__asm__ __volatile__ ("fc %0" :: "r"(addr) : "memory");}static inline voidia64_sync_i (void){	__asm__ __volatile__ (";; sync.i" ::: "memory");}static inline voidia64_srlz_i (void){	__asm__ __volatile__ (";; srlz.i ;;" ::: "memory");}static inline voidia64_srlz_d (void){	__asm__ __volatile__ (";; srlz.d" ::: "memory");}static inline __u64ia64_get_rr (__u64 reg_bits){	__u64 r;	__asm__ __volatile__ ("mov %0=rr[%1]" : "=r"(r) : "r"(reg_bits) : "memory");	return r;}static inline voidia64_set_rr (__u64 reg_bits, __u64 rr_val){	__asm__ __volatile__ ("mov rr[%0]=%1" :: "r"(reg_bits), "r"(rr_val) : "memory");}static inline __u64ia64_get_dcr (void){	__u64 r;	__asm__ ("mov %0=cr.dcr" : "=r"(r));	return r;}static inline voidia64_set_dcr (__u64 val){	__asm__ __volatile__ ("mov cr.dcr=%0;;" :: "r"(val) : "memory");	ia64_srlz_d();}static inline __u64ia64_get_lid (void){	__u64 r;	__asm__ ("mov %0=cr.lid" : "=r"(r));	return r;}static inline voidia64_invala (void){	__asm__ __volatile__ ("invala" ::: "memory");}/* * Save the processor status flags in FLAGS and then clear the * interrupt collection and interrupt enable bits. */#define ia64_clear_ic(flags)							\	__asm__ __volatile__ ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;"	\			      : "=r"(flags) :: "memory");/* * Insert a translation into an instruction and/or data translation * register. */static inline voidia64_itr (__u64 target_mask, __u64 tr_num,	  __u64 vmaddr, __u64 pte,	  __u64 log_page_size){	__asm__ __volatile__ ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");	__asm__ __volatile__ ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");	if (target_mask & 0x1)		__asm__ __volatile__ ("itr.i itr[%0]=%1"				      :: "r"(tr_num), "r"(pte) : "memory");	if (target_mask & 0x2)		__asm__ __volatile__ (";;itr.d dtr[%0]=%1"				      :: "r"(tr_num), "r"(pte) : "memory");}/* * Insert a translation into the instruction and/or data translation * cache. */static inline voidia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,	  __u64 log_page_size){	__asm__ __volatile__ ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");	__asm__ __volatile__ ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");	/* as per EAS2.6, itc must be the last instruction in an instruction group */	if (target_mask & 0x1)		__asm__ __volatile__ ("itc.i %0;;" :: "r"(pte) : "memory");	if (target_mask & 0x2)		__asm__ __volatile__ (";;itc.d %0;;" :: "r"(pte) : "memory");}/* * Purge a range of addresses from instruction and/or data translation * register(s). */static inline voidia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size){	if (target_mask & 0x1)		__asm__ __volatile__ ("ptr.i %0,%1" :: "r"(vmaddr), "r"(log_size << 2));	if (target_mask & 0x2)		__asm__ __volatile__ ("ptr.d %0,%1" :: "r"(vmaddr), "r"(log_size << 2));}/* Set the interrupt vector address.  The address must be suitably aligned (32KB).  */static inline voidia64_set_iva (void *ivt_addr){	__asm__ __volatile__ ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr) : "memory");}/* Set the page table address and control bits.  */static inline voidia64_set_pta (__u64 pta){	/* Note: srlz.i implies srlz.d */	__asm__ __volatile__ ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta) : "memory");}static inline __u64ia64_get_cpuid (__u64 regnum){	__u64 r;	__asm__ ("mov %0=cpuid[%r1]" : "=r"(r) : "rO"(regnum));	return r;}static inline voidia64_eoi (void){	__asm__ ("mov cr.eoi=r0;; srlz.d;;" ::: "memory");}static inline voidia64_set_lrr0 (__u8 vector, __u8 masked){	if (masked > 1)		masked = 1;	__asm__ __volatile__ ("mov cr.lrr0=%0;; srlz.d"			      :: "r"((masked << 16) | vector) : "memory");}static inline voidia64_set_lrr1 (__u8 vector, __u8 masked){	if (masked > 1)		masked = 1;	__asm__ __volatile__ ("mov cr.lrr1=%0;; srlz.d"			      :: "r"((masked << 16) | vector) : "memory");}static inline voidia64_set_pmv (__u64 val){	__asm__ __volatile__ ("mov cr.pmv=%0" :: "r"(val) : "memory");}static inline __u64ia64_get_pmc (__u64 regnum){	__u64 retval;	__asm__ __volatile__ ("mov %0=pmc[%1]" : "=r"(retval) : "r"(regnum));	return retval;}static inline voidia64_set_pmc (__u64 regnum, __u64 value){	__asm__ __volatile__ ("mov pmc[%0]=%1" :: "r"(regnum), "r"(value));}static inline __u64ia64_get_pmd (__u64 regnum){	__u64 retval;	__asm__ __volatile__ ("mov %0=pmd[%1]" : "=r"(retval) : "r"(regnum));	return retval;}static inline voidia64_set_pmd (__u64 regnum, __u64 value){	__asm__ __volatile__ ("mov pmd[%0]=%1" :: "r"(regnum), "r"(value));}/* * Given the address to which a spill occurred, return the unat bit * number that corresponds to this address. */static inline __u64ia64_unat_pos (void *spill_addr){	return ((__u64) spill_addr >> 3) & 0x3f;}/* * Set the NaT bit of an integer register which was spilled at address * SPILL_ADDR.  UNAT is the mask to be updated. */static inline voidia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat){	__u64 bit = ia64_unat_pos(spill_addr);	__u64 mask = 1UL << bit;	*unat = (*unat & ~mask) | (nat << bit);}/* * Return saved PC of a blocked thread. * Note that the only way T can block is through a call to schedule() -> switch_to(). */static inline unsigned longthread_saved_pc (struct thread_struct *t){	struct unw_frame_info info;	unsigned long ip;	/* XXX ouch: Linus, please pass the task pointer to thread_saved_pc() instead! */	struct task_struct *p = (void *) ((unsigned long) t - IA64_TASK_THREAD_OFFSET);	unw_init_from_blocked_task(&info, p);	if (unw_unwind(&info) < 0)		return 0;	unw_get_ip(&info, &ip);	return ip;}/* * Get the current instruction/program counter value. */#define current_text_addr() \	({ void *_pc; __asm__ ("mov %0=ip" : "=r" (_pc)); _pc; })#define THREAD_SIZE	IA64_STK_OFFSET/* NOTE: The task struct and the stacks are allocated together.  */#define alloc_task_struct() \        ((struct task_struct *) __get_free_pages(GFP_KERNEL, IA64_TASK_STRUCT_LOG_NUM_PAGES))#define free_task_struct(p)     free_pages((unsigned long)(p), IA64_TASK_STRUCT_LOG_NUM_PAGES)#define get_task_struct(tsk)	atomic_inc(&virt_to_page(tsk)->count)#define init_task	(init_task_union.task)#define init_stack	(init_task_union.stack)/* * Set the correctable machine check vector register */static inline voidia64_set_cmcv (__u64 val){	__asm__ __volatile__ ("mov cr.cmcv=%0" :: "r"(val) : "memory");}/* * Read the correctable machine check vector register */static inline __u64ia64_get_cmcv (void){	__u64 val;	__asm__ ("mov %0=cr.cmcv" : "=r"(val) :: "memory");	return val;}static inline __u64ia64_get_ivr (void){	__u64 r;	__asm__ __volatile__ ("srlz.d;; mov %0=cr.ivr;; srlz.d;;" : "=r"(r));	return r;}static inline voidia64_set_tpr (__u64 val){	__asm__ __volatile__ ("mov cr.tpr=%0" :: "r"(val));}static inline __u64ia64_get_tpr (void){	__u64 r;	__asm__ ("mov %0=cr.tpr" : "=r"(r));	return r;}static inline voidia64_set_irr0 (__u64 val){	__asm__ __volatile__("mov cr.irr0=%0;;" :: "r"(val) : "memory");	ia64_srlz_d();}static inline __u64ia64_get_irr0 (void){	__u64 val;	/* this is volatile because irr may change unbeknownst to gcc... */	__asm__ __volatile__("mov %0=cr.irr0" : "=r"(val));	return val;}static inline voidia64_set_irr1 (__u64 val){	__asm__ __volatile__("mov cr.irr1=%0;;" :: "r"(val) : "memory");	ia64_srlz_d();}static inline __u64ia64_get_irr1 (void){	__u64 val;	/* this is volatile because irr may change unbeknownst to gcc... */	__asm__ __volatile__("mov %0=cr.irr1" : "=r"(val));	return val;}static inline voidia64_set_irr2 (__u64 val){	__asm__ __volatile__("mov cr.irr2=%0;;" :: "r"(val) : "memory");	ia64_srlz_d();}static inline __u64ia64_get_irr2 (void){	__u64 val;	/* this is volatile because irr may change unbeknownst to gcc... */	__asm__ __volatile__("mov %0=cr.irr2" : "=r"(val));	return val;}static inline voidia64_set_irr3 (__u64 val){	__asm__ __volatile__("mov cr.irr3=%0;;" :: "r"(val) : "memory");	ia64_srlz_d();}static inline __u64ia64_get_irr3 (void){	__u64 val;	/* this is volatile because irr may change unbeknownst to gcc... */	__asm__ __volatile__("mov %0=cr.irr3" : "=r"(val));	return val;}static inline __u64ia64_get_gp(void){	__u64 val;	__asm__ ("mov %0=gp" : "=r"(val));	return val;}/* XXX remove the handcoded version once we have a sufficiently clever compiler... */#ifdef SMART_COMPILER# define ia64_rotr(w,n)				\  ({						\	__u64 _w = (w), _n = (n);		\						\	(_w >> _n) | (_w << (64 - _n));		\  })#else# define ia64_rotr(w,n)							\  ({									\	__u64 result;							\	asm ("shrp %0=%1,%1,%2" : "=r"(result) : "r"(w), "i"(n));	\	result;								\  })#endif#define ia64_rotl(w,n)	ia64_rotr((w),(64)-(n))static inline __u64ia64_thash (__u64 addr){	__u64 result;	asm ("thash %0=%1" : "=r"(result) : "r" (addr));	return result;}#endif /* !__ASSEMBLY__ */#endif /* _ASM_IA64_PROCESSOR_H */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -