ngx_gcc_atomic_amd64.h

来自「nginx 反向代理0.7.1版本 用于实现反向代理」· C头文件 代码 · 共 82 行

H
82
字号
/* * Copyright (C) Igor Sysoev */#if (NGX_SMP)#define NGX_SMP_LOCK  "lock;"#else#define NGX_SMP_LOCK#endif/* * "cmpxchgq  r, [m]": * *     if (rax == [m]) { *         zf = 1; *         [m] = r; *     } else { *         zf = 0; *         rax = [m]; *     } * * * The "r" is any register, %rax (%r0) - %r16. * The "=a" and "a" are the %rax register. * Although we can return result in any register, we use "a" because it is * used in cmpxchgq anyway.  The result is actually in %al but not in $rax, * however as the code is inlined gcc can test %al as well as %rax. * * The "cc" means that flags were changed. */static ngx_inline ngx_atomic_uint_tngx_atomic_cmp_set(ngx_atomic_t *lock, ngx_atomic_uint_t old,    ngx_atomic_uint_t set){    u_char  res;    __asm__ volatile (         NGX_SMP_LOCK    "    cmpxchgq  %3, %1;   "    "    sete      %0;       "    : "=a" (res) : "m" (*lock), "a" (old), "r" (set) : "cc", "memory");    return res;}/* * "xaddq  r, [m]": * *     temp = [m]; *     [m] += r; *     r = temp; * * * The "+r" is any register, %rax (%r0) - %r16. * The "cc" means that flags were changed. */static ngx_inline ngx_atomic_int_tngx_atomic_fetch_add(ngx_atomic_t *value, ngx_atomic_int_t add){    __asm__ volatile (         NGX_SMP_LOCK    "    xaddq  %0, %1;   "    : "+r" (add) : "m" (*value) : "cc", "memory");    return add;}#define ngx_memory_barrier()    __asm__ volatile ("" ::: "memory")#define ngx_cpu_pause()         __asm__ ("pause")

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?