📄 cpu-all.h.svn-base
字号:
/* * defines common to all virtual CPUs * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */#ifndef CPU_ALL_H#define CPU_ALL_H#if defined(__arm__) || defined(__sparc__) || defined(__mips__)#define WORDS_ALIGNED#endif/* some important defines: * * WORDS_ALIGNED : if defined, the host cpu can only make word aligned * memory accesses. * * WORDS_BIGENDIAN : if defined, the host cpu is big endian and * otherwise little endian. * * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet)) * * TARGET_WORDS_BIGENDIAN : same for target cpu */#include "bswap.h"#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)#define BSWAP_NEEDED#endif#ifdef BSWAP_NEEDEDstatic inline uint16_t tswap16(uint16_t s){ return bswap16(s);}static inline uint32_t tswap32(uint32_t s){ return bswap32(s);}static inline uint64_t tswap64(uint64_t s){ return bswap64(s);}static inline void tswap16s(uint16_t *s){ *s = bswap16(*s);}static inline void tswap32s(uint32_t *s){ *s = bswap32(*s);}static inline void tswap64s(uint64_t *s){ *s = bswap64(*s);}#elsestatic inline uint16_t tswap16(uint16_t s){ return s;}static inline uint32_t tswap32(uint32_t s){ return s;}static inline uint64_t tswap64(uint64_t s){ return s;}static inline void tswap16s(uint16_t *s){}static inline void tswap32s(uint32_t *s){}static inline void tswap64s(uint64_t *s){}#endif#if TARGET_LONG_SIZE == 4#define tswapl(s) tswap32(s)#define tswapls(s) tswap32s((uint32_t *)(s))#define bswaptls(s) bswap32s(s)#else#define tswapl(s) tswap64(s)#define tswapls(s) tswap64s((uint64_t *)(s))#define bswaptls(s) bswap64s(s)#endif/* NOTE: arm FPA is horrible as double 32 bit words are stored in big endian ! */typedef union { float64 d;#if defined(WORDS_BIGENDIAN) \ || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) struct { uint32_t upper; uint32_t lower; } l;#else struct { uint32_t lower; uint32_t upper; } l;#endif uint64_t ll;} CPU_DoubleU;#ifdef TARGET_SPARCtypedef union { float128 q;#if defined(WORDS_BIGENDIAN) \ || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) struct { uint32_t upmost; uint32_t upper; uint32_t lower; uint32_t lowest; } l; struct { uint64_t upper; uint64_t lower; } ll;#else struct { uint32_t lowest; uint32_t lower; uint32_t upper; uint32_t upmost; } l; struct { uint64_t lower; uint64_t upper; } ll;#endif} CPU_QuadU;#endif/* CPU memory access without any memory or io remapping *//* * the generic syntax for the memory accesses is: * * load: ld{type}{sign}{size}{endian}_{access_type}(ptr) * * store: st{type}{size}{endian}_{access_type}(ptr, val) * * type is: * (empty): integer access * f : float access * * sign is: * (empty): for floats or 32 bit size * u : unsigned * s : signed * * size is: * b: 8 bits * w: 16 bits * l: 32 bits * q: 64 bits * * endian is: * (empty): target cpu endianness or 8 bit access * r : reversed target cpu endianness (not implemented yet) * be : big endian (not implemented yet) * le : little endian (not implemented yet) * * access_type is: * raw : host memory access * user : user mode access using soft MMU * kernel : kernel mode access using soft MMU */static inline int ldub_p(void *ptr){ return *(uint8_t *)ptr;}static inline int ldsb_p(void *ptr){ return *(int8_t *)ptr;}static inline void stb_p(void *ptr, int v){ *(uint8_t *)ptr = v;}/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the kernel handles unaligned load/stores may give better results, but it is a system wide setting : bad */#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)/* conservative code for little endian unaligned accesses */static inline int lduw_le_p(void *ptr){#ifdef __powerpc__ int val; __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); return val;#else uint8_t *p = ptr; return p[0] | (p[1] << 8);#endif}static inline int ldsw_le_p(void *ptr){#ifdef __powerpc__ int val; __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); return (int16_t)val;#else uint8_t *p = ptr; return (int16_t)(p[0] | (p[1] << 8));#endif}static inline int ldl_le_p(void *ptr){#ifdef __powerpc__ int val; __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr)); return val;#else uint8_t *p = ptr; return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);#endif}static inline uint64_t ldq_le_p(void *ptr){ uint8_t *p = ptr; uint32_t v1, v2; v1 = ldl_le_p(p); v2 = ldl_le_p(p + 4); return v1 | ((uint64_t)v2 << 32);}static inline void stw_le_p(void *ptr, int v){#ifdef __powerpc__ __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));#else uint8_t *p = ptr; p[0] = v; p[1] = v >> 8;#endif}static inline void stl_le_p(void *ptr, int v){#ifdef __powerpc__ __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));#else uint8_t *p = ptr; p[0] = v; p[1] = v >> 8; p[2] = v >> 16; p[3] = v >> 24;#endif}static inline void stq_le_p(void *ptr, uint64_t v){ uint8_t *p = ptr; stl_le_p(p, (uint32_t)v); stl_le_p(p + 4, v >> 32);}/* float access */static inline float32 ldfl_le_p(void *ptr){ union { float32 f; uint32_t i; } u; u.i = ldl_le_p(ptr); return u.f;}static inline void stfl_le_p(void *ptr, float32 v){ union { float32 f; uint32_t i; } u; u.f = v; stl_le_p(ptr, u.i);}static inline float64 ldfq_le_p(void *ptr){ CPU_DoubleU u; u.l.lower = ldl_le_p(ptr); u.l.upper = ldl_le_p(ptr + 4); return u.d;}static inline void stfq_le_p(void *ptr, float64 v){ CPU_DoubleU u; u.d = v; stl_le_p(ptr, u.l.lower); stl_le_p(ptr + 4, u.l.upper);}#elsestatic inline int lduw_le_p(void *ptr){ return *(uint16_t *)ptr;}static inline int ldsw_le_p(void *ptr){ return *(int16_t *)ptr;}static inline int ldl_le_p(void *ptr){ return *(uint32_t *)ptr;}static inline uint64_t ldq_le_p(void *ptr){ return *(uint64_t *)ptr;}static inline void stw_le_p(void *ptr, int v){ *(uint16_t *)ptr = v;}static inline void stl_le_p(void *ptr, int v){ *(uint32_t *)ptr = v;}static inline void stq_le_p(void *ptr, uint64_t v){ *(uint64_t *)ptr = v;}/* float access */static inline float32 ldfl_le_p(void *ptr){ return *(float32 *)ptr;}static inline float64 ldfq_le_p(void *ptr){ return *(float64 *)ptr;}static inline void stfl_le_p(void *ptr, float32 v){ *(float32 *)ptr = v;}static inline void stfq_le_p(void *ptr, float64 v){ *(float64 *)ptr = v;}#endif#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)static inline int lduw_be_p(void *ptr){#if defined(__i386__) int val; asm volatile ("movzwl %1, %0\n" "xchgb %b0, %h0\n" : "=q" (val) : "m" (*(uint16_t *)ptr)); return val;#else uint8_t *b = (uint8_t *) ptr; return ((b[0] << 8) | b[1]);#endif}static inline int ldsw_be_p(void *ptr){#if defined(__i386__) int val; asm volatile ("movzwl %1, %0\n" "xchgb %b0, %h0\n" : "=q" (val) : "m" (*(uint16_t *)ptr)); return (int16_t)val;#else uint8_t *b = (uint8_t *) ptr; return (int16_t)((b[0] << 8) | b[1]);#endif}static inline int ldl_be_p(void *ptr){#if defined(__i386__) || defined(__x86_64__) int val; asm volatile ("movl %1, %0\n" "bswap %0\n" : "=r" (val) : "m" (*(uint32_t *)ptr)); return val;#else uint8_t *b = (uint8_t *) ptr; return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];#endif}static inline uint64_t ldq_be_p(void *ptr){ uint32_t a,b; a = ldl_be_p(ptr); b = ldl_be_p(ptr+4); return (((uint64_t)a<<32)|b);}static inline void stw_be_p(void *ptr, int v){#if defined(__i386__) asm volatile ("xchgb %b0, %h0\n" "movw %w0, %1\n" : "=q" (v) : "m" (*(uint16_t *)ptr), "0" (v));#else uint8_t *d = (uint8_t *) ptr; d[0] = v >> 8; d[1] = v;#endif}static inline void stl_be_p(void *ptr, int v){#if defined(__i386__) || defined(__x86_64__) asm volatile ("bswap %0\n" "movl %0, %1\n" : "=r" (v) : "m" (*(uint32_t *)ptr), "0" (v));#else uint8_t *d = (uint8_t *) ptr; d[0] = v >> 24; d[1] = v >> 16; d[2] = v >> 8; d[3] = v;#endif}static inline void stq_be_p(void *ptr, uint64_t v){ stl_be_p(ptr, v >> 32); stl_be_p(ptr + 4, v);}/* float access */static inline float32 ldfl_be_p(void *ptr){ union { float32 f; uint32_t i; } u; u.i = ldl_be_p(ptr); return u.f;}static inline void stfl_be_p(void *ptr, float32 v){ union { float32 f; uint32_t i; } u; u.f = v; stl_be_p(ptr, u.i);}static inline float64 ldfq_be_p(void *ptr){ CPU_DoubleU u; u.l.upper = ldl_be_p(ptr); u.l.lower = ldl_be_p(ptr + 4); return u.d;}static inline void stfq_be_p(void *ptr, float64 v){
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -