📄 xpseudo_asm_gcc.h
字号:
/* $Id: xpseudo_asm_gcc.h,v 1.8 2005/08/16 01:17:05 vasanth Exp $ *//******************************************************************************** Copyright (c) 2004 Xilinx, Inc. All rights reserved. * * Xilinx, Inc. * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A * COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS * ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR * STANDARD, XILINX IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION * IS FREE FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE * FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION. * XILINX EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO * THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO * ANY WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE * FROM CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE.*******************************************************************************//*****************************************************************************//**** @file xpseudo_asm_gcc.h** This header file contains macros for using inline assembler code. It is* written specifically for the GNU compiler.** <pre>* MODIFICATION HISTORY:** Ver Who Date Changes* ----- ---- -------- -----------------------------------------------* 1.00a ch 06/18/02 First release* </pre>*******************************************************************************/#ifndef XPSEUDO_ASM_H /* prevent circular inclusions */#define XPSEUDO_ASM_H /* by using protection macros *//***************************** Include Files ********************************/#include <ppc-asm.h>#include "xreg405.h"#ifdef __cplusplusextern "C" {#endif#define r2 2#define r1 1/************************** Constant Definitions ****************************//**************************** Type Definitions ******************************//***************** Macros (Inline Functions) Definitions ********************//* necessary for pre-processor */#define stringify(s) tostring(s)#define tostring(s) #s/* pseudo assembler instructions */#define mtgpr(rn, v) __asm__ __volatile__(\ "mr " stringify(rn) ",%0\n"\ : : "r" (v)\ )#define mfgpr(rn) ({unsigned int rval; \ __asm__ __volatile__(\ "mr %0," stringify(rn) "\n"\ : "=r" (rval)\ );\ rval;\ })#define mtspr(rn, v) __asm__ __volatile__(\ "mtspr " stringify(rn) ",%0\n"\ : : "r" (v)\ )#define mfspr(rn) ({unsigned int rval; \ __asm__ __volatile__(\ "mfspr %0," stringify(rn) "\n"\ : "=r" (rval)\ );\ rval;\ })#define mtdcr(rn, v) __asm__ __volatile__(\ "mtdcr " stringify(rn) ",%0\n"\ : : "r" (v)\ )#define mfdcr(rn) ({unsigned int rval; \ __asm__ __volatile__(\ "mfdcr %0," stringify(rn) "\n"\ : "=r" (rval)\ );\ rval;\ })#define mtmsr(v) __asm__ __volatile__(\ "mtmsr %0\n"\ : : "r" (v)\ )#define mfmsr() ({unsigned int rval; \ __asm__ __volatile__(\ "mfmsr %0\n"\ : "=r" (rval)\ );\ rval;\ })#define mtevpr(adr) mtspr(XREG_SPR_EVPR, (adr))#define iccci __asm__ __volatile__("iccci 0,0\n")#define isync __asm__ __volatile__("isync\n")#define icbi(adr) __asm__ __volatile__("icbi 0,%0\n" : : "r" (adr))#define dccci(adr) __asm__ __volatile__("dccci 0,%0\n" : : "r" (adr))#define dcbst(adr) __asm__ __volatile__("dcbst 0,%0\n" : : "r" (adr))#define dcbf(adr) __asm__ __volatile__("dcbf 0,%0\n" : : "r" (adr))#define dcbi(adr) __asm__ __volatile__("dcbi 0,%0\n" : : "r" (adr))#define dcread(adr) ({register unsigned int rval; \ __asm__ __volatile__("\ dcread %0,0,%1\n"\ : "=r" (rval) : "r" (adr)\ );\ rval;\ }) #define sync __asm__ __volatile__("sync\n")/* memory operations */#define eieio __asm__ __volatile__("eieio\n")#define lbz(adr) ({unsigned char rval; \ __asm__ __volatile__(\ "lbz %0,0(%1)\n"\ : "=r" (rval) : "b" (adr)\ );\ rval;\ })#define lhz(adr) ({unsigned short rval; \ __asm__ __volatile__(\ "lhz %0,0(%1)\n"\ : "=r" (rval) : "b" (adr)\ );\ rval;\ })#define lwz(adr) ({unsigned int rval; \ __asm__ __volatile__(\ "lwz %0,0(%1)\n"\ : "=r" (rval) : "b" (adr)\ );\ rval;\ })#define stb(adr, val) __asm__ __volatile__(\ "stb %0,0(%1)\n"\ : : "r" (val), "b" (adr)\ )#define sth(adr, val) __asm__ __volatile__(\ "sth %0,0(%1)\n"\ : : "r" (val), "b" (adr)\ )#define stw(adr, val) __asm__ __volatile__(\ "stw %0,0(%1)\n"\ : : "r" (val), "b" (adr)\ )#define lhbrx(adr) ({unsigned short rval; \ __asm__ __volatile__(\ "lhbrx %0,0,%1\n"\ : "=r" (rval) : "r" (adr)\ );\ rval;\ })#define lwbrx(adr) ({unsigned int rval; \ __asm__ __volatile__(\ "lwbrx %0,0,%1\n"\ : "=r" (rval) : "r" (adr)\ );\ rval;\ })#define sthbrx(adr, val) __asm__ __volatile__(\ "sthbrx %0,0,%1\n"\ : : "r" (val), "r" (adr)\ )#define stwbrx(adr, val) __asm__ __volatile__(\ "stwbrx %0,0,%1\n"\ : : "r" (val), "r" (adr)\ )/* Blocking Data Read and Write to FSL no. id */#define getfsl(val, id) __asm__ __volatile__ (\ "get %0, " #id : "=r" (val))#define putfsl(val, id) __asm__ __volatile__(\ "put %0, " #id :: "r" (val))/* Non-blocking Data Read and Write to FSL no. id */#define ngetfsl(val, id) __asm__ __volatile__(\ "nget %0, " #id : "=r" (val))#define nputfsl(val, id) __asm__ __volatile__(\ "nput %0, " #id :: "r" (val))/* Blocking Control Read and Write to FSL no. id */#define cgetfsl(val, id) __asm__ __volatile__(\ "cget %0, " #id : "=r" (val))#define cputfsl(val, id) __asm__ __volatile__(\ "cput %0, " #id :: "r" (val))/* Non-blocking Control Read and Write to FSL no. id */#define ncgetfsl(val, id) __asm__ __volatile__(\ "ncget %0, " #id : "=r" (val))#define ncputfsl(val, id) __asm__ __volatile__(\ "ncput %0, " #id :: "r" (val))/* Interruptible versions of the FSL access macros. On PowerPC, even the "blocking" version is interruptible. So just map to the blocking versions */#define getfsl_interruptible(val, id) getfsl(val, id)#define putfsl_interruptible(val, id) putfsl(val, id)#define cgetfsl_interruptible(val, id) cgetfsl(val, id)#define cputfsl_interruptible(val, id) cputfsl(val, id)#define fsl_isinvalid(invalid) __asm__ __volatile__( \ "mfspr\t%0,0x001\n\t" \ "rlwinm\t%0,%0,3,31,31": "=r"(valid))#define fsl_iserror(error) __asm__ __volatile__( \ "mfspr\t%0,0x001\n\t" \ "rlwinm\t%0,%0,2,31,31": "=r"(error))/************************** APU UDI FCM Level 2 Internal Macros ****************************//************************** udi<n>fcm. Instruction Combinations ****************************//* udi0fcm. */#define UDI0FCMCR_GPR_GPR_GPR(a, b, c) \ __asm__ __volatile__("udi0fcm. %0,%1,%2" : "=r"(a) : "r"(b), "r"(c))#define UDI0FCMCR_GPR_GPR_IMM(a, b, c) \ __asm__ __volatile__("udi0fcm. %0,%1," #c : "=r"(a) : "r"(b))#define UDI0FCMCR_GPR_IMM_IMM(a, b, c) \ __asm__ __volatile__("udi0fcm. %0," #b "," #c : "=r"(a))#define UDI0FCMCR_IMM_GPR_GPR(a, b, c) \ __asm__ __volatile__("udi0fcm. " #a ",%0,%1" : : "r"(b), "r"(c))#define UDI0FCMCR_IMM_IMM_GPR(a, b, c) \ __asm__ __volatile__("udi0fcm. " #a "," #b ",%0" :: "r"(c))#define UDI0FCMCR_IMM_IMM_IMM(a, b, c) \ __asm__ __volatile__("udi0fcm. " #a "," #b "," #c)/* udi1fcm. */#define UDI1FCMCR_GPR_GPR_GPR(a, b, c) \ __asm__ __volatile__("udi1fcm. %0,%1,%2" : "=r"(a) : "r"(b), "r"(c))#define UDI1FCMCR_GPR_GPR_IMM(a, b, c) \ __asm__ __volatile__("udi1fcm. %0,%1," #c : "=r"(a) : "r"(b))#define UDI1FCMCR_GPR_IMM_IMM(a, b, c) \ __asm__ __volatile__("udi1fcm. %0," #b "," #c : "=r"(a))#define UDI1FCMCR_IMM_GPR_GPR(a, b, c) \ __asm__ __volatile__("udi1fcm. " #a ",%0,%1" : : "r"(b), "r"(c))#define UDI1FCMCR_IMM_IMM_GPR(a, b, c) \ __asm__ __volatile__("udi1fcm. " #a "," #b ",%0" :: "r"(c))#define UDI1FCMCR_IMM_IMM_IMM(a, b, c) \ __asm__ __volatile__("udi1fcm. " #a "," #b "," #c)/* udi2fcm. */#define UDI2FCMCR_GPR_GPR_GPR(a, b, c) \ __asm__ __volatile__("udi2fcm. %0,%1,%2" : "=r"(a) : "r"(b), "r"(c))#define UDI2FCMCR_GPR_GPR_IMM(a, b, c) \ __asm__ __volatile__("udi2fcm. %0,%1," #c : "=r"(a) : "r"(b))#define UDI2FCMCR_GPR_IMM_IMM(a, b, c) \ __asm__ __volatile__("udi2fcm. %0," #b "," #c : "=r"(a))#define UDI2FCMCR_IMM_GPR_GPR(a, b, c) \ __asm__ __volatile__("udi2fcm. " #a ",%0,%1" : : "r"(b), "r"(c))#define UDI2FCMCR_IMM_IMM_GPR(a, b, c) \ __asm__ __volatile__("udi2fcm. " #a "," #b ",%0" :: "r"(c))#define UDI2FCMCR_IMM_IMM_IMM(a, b, c) \ __asm__ __volatile__("udi2fcm. " #a "," #b "," #c)/* udi3fcm. */#define UDI3FCMCR_GPR_GPR_GPR(a, b, c) \ __asm__ __volatile__("udi3fcm. %0,%1,%2" : "=r"(a) : "r"(b), "r"(c))#define UDI3FCMCR_GPR_GPR_IMM(a, b, c) \ __asm__ __volatile__("udi3fcm. %0,%1," #c : "=r"(a) : "r"(b))#define UDI3FCMCR_GPR_IMM_IMM(a, b, c) \ __asm__ __volatile__("udi3fcm. %0," #b "," #c : "=r"(a))#define UDI3FCMCR_IMM_GPR_GPR(a, b, c) \ __asm__ __volatile__("udi3fcm. " #a ",%0,%1" : : "r"(b), "r"(c))#define UDI3FCMCR_IMM_IMM_GPR(a, b, c) \ __asm__ __volatile__("udi3fcm. " #a "," #b ",%0" :: "r"(c))#define UDI3FCMCR_IMM_IMM_IMM(a, b, c) \ __asm__ __volatile__("udi3fcm. " #a "," #b "," #c)/* udi4fcm. */#define UDI4FCMCR_GPR_GPR_GPR(a, b, c) \ __asm__ __volatile__("udi4fcm. %0,%1,%2" : "=r"(a) : "r"(b), "r"(c))#define UDI4FCMCR_GPR_GPR_IMM(a, b, c) \ __asm__ __volatile__("udi4fcm. %0,%1," #c : "=r"(a) : "r"(b))
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -