📄 gcc_intrin.h
字号:
ia64_intri_res; \})#define ia64_cmpxchg1_rel(ptr, new, old) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ ia64_intri_res; \})#define ia64_cmpxchg2_acq(ptr, new, old) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ ia64_intri_res; \})#define ia64_cmpxchg2_rel(ptr, new, old) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ \ asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ ia64_intri_res; \})#define ia64_cmpxchg4_acq(ptr, new, old) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ ia64_intri_res; \})#define ia64_cmpxchg4_rel(ptr, new, old) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ ia64_intri_res; \})#define ia64_cmpxchg8_acq(ptr, new, old) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ ia64_intri_res; \})#define ia64_cmpxchg8_rel(ptr, new, old) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ \ asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ ia64_intri_res; \})#define ia64_mf() asm volatile ("mf" ::: "memory")#define ia64_mfa() asm volatile ("mf.a" ::: "memory")#define ia64_invala() asm volatile ("invala" ::: "memory")#define ia64_thash(addr) \({ \ __u64 ia64_intri_res; \ asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ ia64_intri_res; \})#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");#ifdef HAVE_SERIALIZE_DIRECTIVE# define ia64_dv_serialize_data() asm volatile (".serialize.data");# define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");#else# define ia64_dv_serialize_data()# define ia64_dv_serialize_instruction()#endif#define ia64_nop(x) asm volatile ("nop %0"::"i"(x));#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \ :: "r"(trnum), "r"(addr) : "memory")#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \ :: "r"(trnum), "r"(addr) : "memory")#define ia64_tpa(addr) \({ \ __u64 ia64_pa; \ asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \ ia64_pa; \})#define __ia64_set_dbr(index, val) \ asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")#define ia64_set_ibr(index, val) \ asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")#define ia64_set_pkr(index, val) \ asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")#define ia64_set_pmc(index, val) \ asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")#define ia64_set_pmd(index, val) \ asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")#define ia64_set_rr(index, val) \ asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");#define ia64_get_cpuid(index) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \ ia64_intri_res; \})#define __ia64_get_dbr(index) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ ia64_intri_res; \})#define ia64_get_ibr(index) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ ia64_intri_res; \})#define ia64_get_pkr(index) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ ia64_intri_res; \})#define ia64_get_pmc(index) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ ia64_intri_res; \})#define ia64_get_pmd(index) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ ia64_intri_res; \})#define ia64_get_rr(index) \({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \ ia64_intri_res; \})#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))#define ia64_ptcga(addr, size) \do { \ asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \ ia64_dv_serialize_data(); \} while (0)#define ia64_ptcl(addr, size) \do { \ asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \ ia64_dv_serialize_data(); \} while (0)#define ia64_ptri(addr, size) \ asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")#define ia64_ptrd(addr, size) \ asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */#define ia64_lfhint_none 0#define ia64_lfhint_nt1 1#define ia64_lfhint_nt2 2#define ia64_lfhint_nta 3#define ia64_lfetch(lfhint, y) \({ \ switch (lfhint) { \ case ia64_lfhint_none: \ asm volatile ("lfetch [%0]" : : "r"(y)); \ break; \ case ia64_lfhint_nt1: \ asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \ break; \ case ia64_lfhint_nt2: \ asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \ break; \ case ia64_lfhint_nta: \ asm volatile ("lfetch.nta [%0]" : : "r"(y)); \ break; \ } \})#define ia64_lfetch_excl(lfhint, y) \({ \ switch (lfhint) { \ case ia64_lfhint_none: \ asm volatile ("lfetch.excl [%0]" :: "r"(y)); \ break; \ case ia64_lfhint_nt1: \ asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \ break; \ case ia64_lfhint_nt2: \ asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \ break; \ case ia64_lfhint_nta: \ asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \ break; \ } \})#define ia64_lfetch_fault(lfhint, y) \({ \ switch (lfhint) { \ case ia64_lfhint_none: \ asm volatile ("lfetch.fault [%0]" : : "r"(y)); \ break; \ case ia64_lfhint_nt1: \ asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \ break; \ case ia64_lfhint_nt2: \ asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \ break; \ case ia64_lfhint_nta: \ asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \ break; \ } \})#define ia64_lfetch_fault_excl(lfhint, y) \({ \ switch (lfhint) { \ case ia64_lfhint_none: \ asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \ break; \ case ia64_lfhint_nt1: \ asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \ break; \ case ia64_lfhint_nt2: \ asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \ break; \ case ia64_lfhint_nta: \ asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \ break; \ } \})#define ia64_intrin_local_irq_restore(x) \do { \ asm volatile (";; cmp.ne p6,p7=%0,r0;;" \ "(p6) ssm psr.i;" \ "(p7) rsm psr.i;;" \ "(p6) srlz.d" \ :: "r"((x)) : "p6", "p7", "memory"); \} while (0)#ifdef XEN#include <asm/xengcc_intrin.h>#endif#endif /* _ASM_IA64_GCC_INTRIN_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -