📄 op_neon.h.svn-base
字号:
uint32_t low; uint32_t high; DO_MULL(low, T0, T1, uint8_t, uint16_t); DO_MULL(tmp, T0 >> 8, T1 >> 8, uint8_t, uint16_t); low |= tmp << 16; DO_MULL(high, T0 >> 16, T1 >> 16, uint8_t, uint16_t); DO_MULL(tmp, T0 >> 24, T1 >> 24, uint8_t, uint16_t); high |= tmp << 16; T0 = low; T1 = high; FORCE_RET();}NEON_OP(mull_s8){ uint32_t tmp; uint32_t low; uint32_t high; DO_MULL(low, T0, T1, int8_t, uint16_t); DO_MULL(tmp, T0 >> 8, T1 >> 8, int8_t, uint16_t); low |= tmp << 16; DO_MULL(high, T0 >> 16, T1 >> 16, int8_t, uint16_t); DO_MULL(tmp, T0 >> 24, T1 >> 24, int8_t, uint16_t); high |= tmp << 16; T0 = low; T1 = high; FORCE_RET();}NEON_OP(mull_u16){ uint32_t low; uint32_t high; DO_MULL(low, T0, T1, uint16_t, uint32_t); DO_MULL(high, T0 >> 16, T1 >> 16, uint16_t, uint32_t); T0 = low; T1 = high; FORCE_RET();}NEON_OP(mull_s16){ uint32_t low; uint32_t high; DO_MULL(low, T0, T1, int16_t, uint32_t); DO_MULL(high, T0 >> 16, T1 >> 16, int16_t, uint32_t); T0 = low; T1 = high; FORCE_RET();}NEON_OP(addl_saturate_s32){ uint32_t tmp; uint32_t res; tmp = env->vfp.scratch[0]; res = T0 + tmp; if (((res ^ T0) & SIGNBIT) && !((T0 ^ tmp) & SIGNBIT)) { env->QF = 1; T0 = (T0 >> 31) ^ 0x7fffffff; } else { T0 = res; } tmp = env->vfp.scratch[1]; res = T1 + tmp; if (((res ^ T1) & SIGNBIT) && !((T1 ^ tmp) & SIGNBIT)) { env->QF = 1; T1 = (T1 >> 31) ^ 0x7fffffff; } else { T1 = res; } FORCE_RET();}NEON_OP(addl_saturate_s64){ uint64_t src1; uint64_t src2; uint64_t res; src1 = T0 + ((uint64_t)T1 << 32); src2 = env->vfp.scratch[0] + ((uint64_t)env->vfp.scratch[1] << 32); res = src1 + src2; if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) { env->QF = 1; T0 = ~(int64_t)src1 >> 63; T1 = T0 ^ 0x80000000; } else { T0 = res; T1 = res >> 32; } FORCE_RET();}NEON_OP(addl_saturate_u64){ uint64_t src1; uint64_t src2; uint64_t res; src1 = T0 + ((uint64_t)T1 << 32); src2 = env->vfp.scratch[0] + ((uint64_t)env->vfp.scratch[1] << 32); res = src1 + src2; if (res < src1) { env->QF = 1; T0 = 0xffffffff; T1 = 0xffffffff; } else { T0 = res; T1 = res >> 32; } FORCE_RET();}NEON_OP(subl_saturate_s64){ uint64_t src1; uint64_t src2; uint64_t res; src1 = T0 + ((uint64_t)T1 << 32); src2 = env->vfp.scratch[0] + ((uint64_t)env->vfp.scratch[1] << 32); res = src1 - src2; if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) { env->QF = 1; T0 = ~(int64_t)src1 >> 63; T1 = T0 ^ 0x80000000; } else { T0 = res; T1 = res >> 32; } FORCE_RET();}NEON_OP(subl_saturate_u64){ uint64_t src1; uint64_t src2; uint64_t res; src1 = T0 + ((uint64_t)T1 << 32); src2 = env->vfp.scratch[0] + ((uint64_t)env->vfp.scratch[1] << 32); if (src1 < src2) { env->QF = 1; T0 = 0; T1 = 0; } else { res = src1 - src2; T0 = res; T1 = res >> 32; } FORCE_RET();}NEON_OP(negl_u16){ uint32_t tmp; tmp = T0 >> 16; tmp = -tmp; T0 = (-T0 & 0xffff) | (tmp << 16); tmp = T1 >> 16; tmp = -tmp; T1 = (-T1 & 0xffff) | (tmp << 16); FORCE_RET();}NEON_OP(negl_u32){ T0 = -T0; T1 = -T1; FORCE_RET();}NEON_OP(negl_u64){ uint64_t val; val = T0 | ((uint64_t)T1 << 32); val = -val; T0 = val; T1 = val >> 32; FORCE_RET();}/* Scalar operations. */NEON_OP(dup_low16){ T0 = (T0 & 0xffff) | (T0 << 16); FORCE_RET();}NEON_OP(dup_high16){ T0 = (T0 >> 16) | (T0 & 0xffff0000); FORCE_RET();}/* Helper for VEXT */NEON_OP(extract){ int shift = PARAM1; T0 = (T0 >> shift) | (T1 << (32 - shift)); FORCE_RET();}/* Pairwise add long. Named type is source type. */NEON_OP(paddl_s8){ int8_t src1; int8_t src2; uint16_t result; src1 = T0 >> 24; src2 = T0 >> 16; result = (uint16_t)src1 + src2; src1 = T0 >> 8; src2 = T0; T0 = (uint16_t)((uint16_t)src1 + src2) | ((uint32_t)result << 16); FORCE_RET();}NEON_OP(paddl_u8){ uint8_t src1; uint8_t src2; uint16_t result; src1 = T0 >> 24; src2 = T0 >> 16; result = (uint16_t)src1 + src2; src1 = T0 >> 8; src2 = T0; T0 = (uint16_t)((uint16_t)src1 + src2) | ((uint32_t)result << 16); FORCE_RET();}NEON_OP(paddl_s16){ T0 = (uint32_t)(int16_t)T0 + (uint32_t)(int16_t)(T0 >> 16); FORCE_RET();}NEON_OP(paddl_u16){ T0 = (uint32_t)(uint16_t)T0 + (uint32_t)(uint16_t)(T0 >> 16); FORCE_RET();}NEON_OP(paddl_s32){ int64_t tmp; tmp = (int64_t)(int32_t)T0 + (int64_t)(int32_t)T1; T0 = tmp; T1 = tmp >> 32; FORCE_RET();}NEON_OP(paddl_u32){ uint64_t tmp; tmp = (uint64_t)T0 + (uint64_t)T1; T0 = tmp; T1 = tmp >> 32; FORCE_RET();}/* Count Leading Sign/Zero Bits. */static inline int do_clz8(uint8_t x){ int n; for (n = 8; x; n--) x >>= 1; return n;}static inline int do_clz16(uint16_t x){ int n; for (n = 16; x; n--) x >>= 1; return n;}NEON_OP(clz_u8){ uint32_t result; uint32_t tmp; tmp = T0; result = do_clz8(tmp); result |= do_clz8(tmp >> 8) << 8; result |= do_clz8(tmp >> 16) << 16; result |= do_clz8(tmp >> 24) << 24; T0 = result; FORCE_RET();}NEON_OP(clz_u16){ uint32_t result; uint32_t tmp; tmp = T0; result = do_clz16(tmp); result |= do_clz16(tmp >> 16) << 16; T0 = result; FORCE_RET();}NEON_OP(cls_s8){ uint32_t result; int8_t tmp; tmp = T0; result = do_clz8((tmp < 0) ? ~tmp : tmp) - 1; tmp = T0 >> 8; result |= (do_clz8((tmp < 0) ? ~tmp : tmp) - 1) << 8; tmp = T0 >> 16; result |= (do_clz8((tmp < 0) ? ~tmp : tmp) - 1) << 16; tmp = T0 >> 24; result |= (do_clz8((tmp < 0) ? ~tmp : tmp) - 1) << 24; T0 = result; FORCE_RET();}NEON_OP(cls_s16){ uint32_t result; int16_t tmp; tmp = T0; result = do_clz16((tmp < 0) ? ~tmp : tmp) - 1; tmp = T0 >> 16; result |= (do_clz16((tmp < 0) ? ~tmp : tmp) - 1) << 16; T0 = result; FORCE_RET();}NEON_OP(cls_s32){ int count; if ((int32_t)T0 < 0) T0 = ~T0; for (count = 32; T0 > 0; count--) T0 = T0 >> 1; T0 = count - 1; FORCE_RET();}/* Bit count. */NEON_OP(cnt_u8){ T0 = (T0 & 0x55555555) + ((T0 >> 1) & 0x55555555); T0 = (T0 & 0x33333333) + ((T0 >> 2) & 0x33333333); T0 = (T0 & 0x0f0f0f0f) + ((T0 >> 4) & 0x0f0f0f0f); FORCE_RET();}/* Saturnating negation. *//* ??? Make these use NEON_VOP1 */#define DO_QABS8(x) do { \ if (x == (int8_t)0x80) { \ x = 0x7f; \ env->QF = 1; \ } else if (x < 0) { \ x = -x; \ }} while (0)NEON_OP(qabs_s8){ neon_s8 vec; NEON_UNPACK(neon_s8, vec, T0); DO_QABS8(vec.v1); DO_QABS8(vec.v2); DO_QABS8(vec.v3); DO_QABS8(vec.v4); NEON_PACK(neon_s8, T0, vec); FORCE_RET();}#undef DO_QABS8#define DO_QNEG8(x) do { \ if (x == (int8_t)0x80) { \ x = 0x7f; \ env->QF = 1; \ } else { \ x = -x; \ }} while (0)NEON_OP(qneg_s8){ neon_s8 vec; NEON_UNPACK(neon_s8, vec, T0); DO_QNEG8(vec.v1); DO_QNEG8(vec.v2); DO_QNEG8(vec.v3); DO_QNEG8(vec.v4); NEON_PACK(neon_s8, T0, vec); FORCE_RET();}#undef DO_QNEG8#define DO_QABS16(x) do { \ if (x == (int16_t)0x8000) { \ x = 0x7fff; \ env->QF = 1; \ } else if (x < 0) { \ x = -x; \ }} while (0)NEON_OP(qabs_s16){ neon_s16 vec; NEON_UNPACK(neon_s16, vec, T0); DO_QABS16(vec.v1); DO_QABS16(vec.v2); NEON_PACK(neon_s16, T0, vec); FORCE_RET();}#undef DO_QABS16#define DO_QNEG16(x) do { \ if (x == (int16_t)0x8000) { \ x = 0x7fff; \ env->QF = 1; \ } else { \ x = -x; \ }} while (0)NEON_OP(qneg_s16){ neon_s16 vec; NEON_UNPACK(neon_s16, vec, T0); DO_QNEG16(vec.v1); DO_QNEG16(vec.v2); NEON_PACK(neon_s16, T0, vec); FORCE_RET();}#undef DO_QNEG16NEON_OP(qabs_s32){ if (T0 == 0x80000000) { T0 = 0x7fffffff; env->QF = 1; } else if ((int32_t)T0 < 0) { T0 = -T0; } FORCE_RET();}NEON_OP(qneg_s32){ if (T0 == 0x80000000) { T0 = 0x7fffffff; env->QF = 1; } else { T0 = -T0; } FORCE_RET();}/* Unary opperations */#define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : srcNEON_VOP1(abs_s8, neon_s8, 4)NEON_VOP1(abs_s16, neon_s16, 2)NEON_OP(abs_s32){ if ((int32_t)T0 < 0) T0 = -T0; FORCE_RET();}#undef NEON_FN/* Transpose. Argument order is rather strange to avoid special casing the tranlation code. On input T0 = rm, T1 = rd. On output T0 = rd, T1 = rm */NEON_OP(trn_u8){ uint32_t rd; uint32_t rm; rd = ((T0 & 0x00ff00ff) << 8) | (T1 & 0x00ff00ff); rm = ((T1 & 0xff00ff00) >> 8) | (T0 & 0xff00ff00); T0 = rd; T1 = rm; FORCE_RET();}NEON_OP(trn_u16){ uint32_t rd; uint32_t rm; rd = (T0 << 16) | (T1 & 0xffff); rm = (T1 >> 16) | (T0 & 0xffff0000); T0 = rd; T1 = rm; FORCE_RET();}/* Worker routines for zip and unzip. */NEON_OP(unzip_u8){ uint32_t rd; uint32_t rm; rd = (T0 & 0xff) | ((T0 >> 8) & 0xff00) | ((T1 << 16) & 0xff0000) | ((T1 << 8) & 0xff000000); rm = ((T0 >> 8) & 0xff) | ((T0 >> 16) & 0xff00) | ((T1 << 8) & 0xff0000) | (T1 & 0xff000000); T0 = rd; T1 = rm; FORCE_RET();}NEON_OP(zip_u8){ uint32_t rd; uint32_t rm; rd = (T0 & 0xff) | ((T1 << 8) & 0xff00) | ((T0 << 16) & 0xff0000) | ((T1 << 24) & 0xff000000); rm = ((T0 >> 16) & 0xff) | ((T1 >> 8) & 0xff00) | ((T0 >> 8) & 0xff0000) | (T1 & 0xff000000); T0 = rd; T1 = rm; FORCE_RET();}NEON_OP(zip_u16){ uint32_t tmp; tmp = (T0 & 0xffff) | (T1 << 16); T1 = (T1 & 0xffff0000) | (T0 >> 16); T0 = tmp; FORCE_RET();}/* Reciprocal/root estimate. */NEON_OP(recpe_u32){ T0 = helper_recpe_u32(T0);}NEON_OP(rsqrte_u32){ T0 = helper_rsqrte_u32(T0);}NEON_OP(recpe_f32){ FT0s = helper_recpe_f32(FT0s);}NEON_OP(rsqrte_f32){ FT0s = helper_rsqrte_f32(FT0s);}/* Table lookup. This accessed the register file directly. */NEON_OP(tbl){ helper_neon_tbl(PARAM1, PARAM2);}NEON_OP(dup_u8){ T0 = (T0 >> PARAM1) & 0xff; T0 |= T0 << 8; T0 |= T0 << 16; FORCE_RET();}/* Helpers for element load/store. */NEON_OP(insert_elt){ int shift = PARAM1; uint32_t mask = PARAM2; T2 = (T2 & mask) | (T0 << shift); FORCE_RET();}NEON_OP(extract_elt){ int shift = PARAM1; uint32_t mask = PARAM2; T0 = (T2 & mask) >> shift; FORCE_RET();}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -