📄 exec-all.h
字号:
#if defined(__powerpc__)/* we patch the jump instruction directly */#define GOTO_TB(opname, tbparam, n)\do {\ asm volatile (ASM_DATA_SECTION\ ASM_OP_LABEL_NAME(n, opname) ":\n"\ ".long 1f\n"\ ASM_PREVIOUS_SECTION \ "b " ASM_NAME(__op_jmp) #n "\n"\ "1:\n");\} while (0)#elif defined(__i386__) && defined(USE_DIRECT_JUMP)/* we patch the jump instruction directly */#define GOTO_TB(opname, tbparam, n)\do {\ asm volatile (".section .data\n"\ ASM_OP_LABEL_NAME(n, opname) ":\n"\ ".long 1f\n"\ ASM_PREVIOUS_SECTION \ "jmp " ASM_NAME(__op_jmp) #n "\n"\ "1:\n");\} while (0)#else/* jump to next block operations (more portable code, does not need cache flushing, but slower because of indirect jump) */#define GOTO_TB(opname, tbparam, n)\do {\ static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\ static void __attribute__((unused)) *__op_label ## n \ __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\ goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\label ## n: ;\dummy_label ## n: ;\} while (0)#endifextern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];#ifdef __powerpc__static inline int testandset (int *p){ int ret; __asm__ __volatile__ ( "0: lwarx %0,0,%1\n" " xor. %0,%3,%0\n" " bne 1f\n" " stwcx. %2,0,%1\n" " bne- 0b\n" "1: " : "=&r" (ret) : "r" (p), "r" (1), "r" (0) : "cr0", "memory"); return ret;}#endif#ifdef __i386__static inline int testandset (int *p){ long int readval = 0; __asm__ __volatile__ ("lock; cmpxchgl %2, %0" : "+m" (*p), "+a" (readval) : "r" (1) : "cc"); return readval;}#endif#ifdef __x86_64__static inline int testandset (int *p){ long int readval = 0; __asm__ __volatile__ ("lock; cmpxchgl %2, %0" : "+m" (*p), "+a" (readval) : "r" (1) : "cc"); return readval;}#endif#ifdef __s390__static inline int testandset (int *p){ int ret; __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n" " jl 0b" : "=&d" (ret) : "r" (1), "a" (p), "0" (*p) : "cc", "memory" ); return ret;}#endif#ifdef __alpha__static inline int testandset (int *p){ int ret; unsigned long one; __asm__ __volatile__ ("0: mov 1,%2\n" " ldl_l %0,%1\n" " stl_c %2,%1\n" " beq %2,1f\n" ".subsection 2\n" "1: br 0b\n" ".previous" : "=r" (ret), "=m" (*p), "=r" (one) : "m" (*p)); return ret;}#endif#ifdef __sparc__static inline int testandset (int *p){ int ret; __asm__ __volatile__("ldstub [%1], %0" : "=r" (ret) : "r" (p) : "memory"); return (ret ? 1 : 0);}#endif#ifdef __arm__static inline int testandset (int *spinlock){ register unsigned int ret; __asm__ __volatile__("swp %0, %1, [%2]" : "=r"(ret) : "0"(1), "r"(spinlock)); return ret;}#endif#ifdef __mc68000static inline int testandset (int *p){ char ret; __asm__ __volatile__("tas %1; sne %0" : "=r" (ret) : "m" (p) : "cc","memory"); return ret;}#endif#ifdef __ia64#include <ia64intrin.h>static inline int testandset (int *p){ return __sync_lock_test_and_set (p, 1);}#endiftypedef int spinlock_t;#define SPIN_LOCK_UNLOCKED 0#if defined(CONFIG_USER_ONLY)static inline void spin_lock(spinlock_t *lock){ while (testandset(lock));}static inline void spin_unlock(spinlock_t *lock){ *lock = 0;}static inline int spin_trylock(spinlock_t *lock){ return !testandset(lock);}#elsestatic inline void spin_lock(spinlock_t *lock){}static inline void spin_unlock(spinlock_t *lock){}static inline int spin_trylock(spinlock_t *lock){ return 1;}#endifextern spinlock_t tb_lock;extern int tb_invalidated_flag;#if !defined(CONFIG_USER_ONLY)void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr);#define ACCESS_TYPE 3#define MEMSUFFIX _code#define env cpu_single_env#define DATA_SIZE 1#include "softmmu_header.h"#define DATA_SIZE 2#include "softmmu_header.h"#define DATA_SIZE 4#include "softmmu_header.h"#define DATA_SIZE 8#include "softmmu_header.h"#undef ACCESS_TYPE#undef MEMSUFFIX#undef env#endif#if defined(CONFIG_USER_ONLY)static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr){ return addr;}#else/* NOTE: this function can trigger an exception *//* NOTE2: the returned address is not exactly the physical address: it is the offset relative to phys_ram_base */static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr){ int is_user, index, pd; index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);#if defined(TARGET_I386) is_user = ((env->hflags & HF_CPL_MASK) == 3);#elif defined (TARGET_PPC) is_user = msr_pr;#elif defined (TARGET_MIPS) is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM);#elif defined (TARGET_SPARC) is_user = (env->psrs == 0);#elif defined (TARGET_ARM) is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR);#elif defined (TARGET_SH4) is_user = ((env->sr & SR_MD) == 0);#else#error unimplemented CPU#endif if (__builtin_expect(env->tlb_table[is_user][index].addr_code != (addr & TARGET_PAGE_MASK), 0)) { ldub_code(addr); } pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK; if (pd > IO_MEM_ROM) { cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr); } return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base;}#endif#ifdef USE_KQEMU#define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))int kqemu_init(CPUState *env);int kqemu_cpu_exec(CPUState *env);void kqemu_flush_page(CPUState *env, target_ulong addr);void kqemu_flush(CPUState *env, int global);void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);void kqemu_cpu_interrupt(CPUState *env);void kqemu_record_dump(void);static inline int kqemu_is_ok(CPUState *env){ return(env->kqemu_enabled && (env->cr[0] & CR0_PE_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK) && (env->eflags & IF_MASK) && !(env->eflags & VM_MASK) && (env->kqemu_enabled == 2 || ((env->hflags & HF_CPL_MASK) == 3 && (env->eflags & IOPL_MASK) != IOPL_MASK)));}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -