⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dsputil_mmx.c

📁 君正早期ucos系统(只有早期的才不没有打包成库),MPLAYER,文件系统,图片解码,浏览,电子书,录音,想学ucos,识货的人就下吧 russblock fmradio explore set
💻 C
📖 第 1 页 / 共 5 页
字号:
    SBUTTERFLY(h,b,c,dq,dqa)\    SBUTTERFLY(e,g,b,dq,dqa)\    SBUTTERFLY(d,f,g,dq,dqa)\    SBUTTERFLY(a,e,f,qdq,dqa)\    SBUTTERFLY(h,d,e,qdq,dqa)\    "movdqa "#h", 16"#t"              \n\t"\    "movdqa "#t", "#h"                \n\t"\    SBUTTERFLY(h,b,d,qdq,dqa)\    SBUTTERFLY(c,g,b,qdq,dqa)\    "movdqa 16"#t", "#g"              \n\t"#endif#define LBUTTERFLY2(a1,b1,a2,b2)\    "paddw " #b1 ", " #a1 "           \n\t"\    "paddw " #b2 ", " #a2 "           \n\t"\    "paddw " #b1 ", " #b1 "           \n\t"\    "paddw " #b2 ", " #b2 "           \n\t"\    "psubw " #a1 ", " #b1 "           \n\t"\    "psubw " #a2 ", " #b2 "           \n\t"#define HADAMARD8(m0, m1, m2, m3, m4, m5, m6, m7)\        LBUTTERFLY2(m0, m1, m2, m3)\        LBUTTERFLY2(m4, m5, m6, m7)\        LBUTTERFLY2(m0, m2, m1, m3)\        LBUTTERFLY2(m4, m6, m5, m7)\        LBUTTERFLY2(m0, m4, m1, m5)\        LBUTTERFLY2(m2, m6, m3, m7)\#define HADAMARD48 HADAMARD8(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm6, %%mm7)#define MMABS_MMX(a,z)\    "pxor " #z ", " #z "              \n\t"\    "pcmpgtw " #a ", " #z "           \n\t"\    "pxor " #z ", " #a "              \n\t"\    "psubw " #z ", " #a "             \n\t"#define MMABS_MMX2(a,z)\    "pxor " #z ", " #z "              \n\t"\    "psubw " #a ", " #z "             \n\t"\    "pmaxsw " #z ", " #a "            \n\t"#define MMABS_SSSE3(a,z)\    "pabsw " #a ", " #a "             \n\t"#define MMABS_SUM(a,z, sum)\    MMABS(a,z)\    "paddusw " #a ", " #sum "         \n\t"#define MMABS_SUM_8x8_NOSPILL\    MMABS(%%xmm0, %%xmm8)\    MMABS(%%xmm1, %%xmm9)\    MMABS_SUM(%%xmm2, %%xmm8, %%xmm0)\    MMABS_SUM(%%xmm3, %%xmm9, %%xmm1)\    MMABS_SUM(%%xmm4, %%xmm8, %%xmm0)\    MMABS_SUM(%%xmm5, %%xmm9, %%xmm1)\    MMABS_SUM(%%xmm6, %%xmm8, %%xmm0)\    MMABS_SUM(%%xmm7, %%xmm9, %%xmm1)\    "paddusw %%xmm1, %%xmm0           \n\t"#ifdef ARCH_X86_64#define MMABS_SUM_8x8_SSE2 MMABS_SUM_8x8_NOSPILL#else#define MMABS_SUM_8x8_SSE2\    "movdqa %%xmm7, (%1)              \n\t"\    MMABS(%%xmm0, %%xmm7)\    MMABS(%%xmm1, %%xmm7)\    MMABS_SUM(%%xmm2, %%xmm7, %%xmm0)\    MMABS_SUM(%%xmm3, %%xmm7, %%xmm1)\    MMABS_SUM(%%xmm4, %%xmm7, %%xmm0)\    MMABS_SUM(%%xmm5, %%xmm7, %%xmm1)\    MMABS_SUM(%%xmm6, %%xmm7, %%xmm0)\    "movdqa (%1), %%xmm2              \n\t"\    MMABS_SUM(%%xmm2, %%xmm7, %%xmm1)\    "paddusw %%xmm1, %%xmm0           \n\t"#endif#define LOAD4(o, a, b, c, d)\    "movq "#o"(%1),    "#a"           \n\t"\    "movq "#o"+8(%1),  "#b"           \n\t"\    "movq "#o"+16(%1), "#c"           \n\t"\    "movq "#o"+24(%1), "#d"           \n\t"\#define STORE4(o, a, b, c, d)\    "movq "#a", "#o"(%1)              \n\t"\    "movq "#b", "#o"+8(%1)            \n\t"\    "movq "#c", "#o"+16(%1)           \n\t"\    "movq "#d", "#o"+24(%1)           \n\t"\/* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to * about 100k on extreme inputs. But that's very unlikely to occur in natural video, * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */#define HSUM_MMX(a, t, dst)\    "movq "#a", "#t"                  \n\t"\    "psrlq $32, "#a"                  \n\t"\    "paddusw "#t", "#a"               \n\t"\    "movq "#a", "#t"                  \n\t"\    "psrlq $16, "#a"                  \n\t"\    "paddusw "#t", "#a"               \n\t"\    "movd "#a", "#dst"                \n\t"\#define HSUM_MMX2(a, t, dst)\    "pshufw $0x0E, "#a", "#t"         \n\t"\    "paddusw "#t", "#a"               \n\t"\    "pshufw $0x01, "#a", "#t"         \n\t"\    "paddusw "#t", "#a"               \n\t"\    "movd "#a", "#dst"                \n\t"\#define HSUM_SSE2(a, t, dst)\    "movhlps "#a", "#t"               \n\t"\    "paddusw "#t", "#a"               \n\t"\    "pshuflw $0x0E, "#a", "#t"        \n\t"\    "paddusw "#t", "#a"               \n\t"\    "pshuflw $0x01, "#a", "#t"        \n\t"\    "paddusw "#t", "#a"               \n\t"\    "movd "#a", "#dst"                \n\t"\#define HADAMARD8_DIFF_MMX(cpu) \static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\    DECLARE_ALIGNED_8(uint64_t, temp[13]);\    int sum;\\    assert(h==8);\\    DIFF_PIXELS_4x8(src1, src2, stride, temp[0]);\\    asm volatile(\        HADAMARD48\\        "movq %%mm7, 96(%1)             \n\t"\\        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\        STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)\\        "movq 96(%1), %%mm7             \n\t"\        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\        STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)\\        : "=r" (sum)\        : "r"(temp)\    );\\    DIFF_PIXELS_4x8(src1+4, src2+4, stride, temp[4]);\\    asm volatile(\        HADAMARD48\\        "movq %%mm7, 96(%1)             \n\t"\\        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\        STORE4(32, %%mm0, %%mm3, %%mm7, %%mm2)\\        "movq 96(%1), %%mm7             \n\t"\        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\        "movq %%mm7, %%mm5              \n\t"/*FIXME remove*/\        "movq %%mm6, %%mm7              \n\t"\        "movq %%mm0, %%mm6              \n\t"\\        LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)\\        HADAMARD48\        "movq %%mm7, 64(%1)             \n\t"\        MMABS(%%mm0, %%mm7)\        MMABS(%%mm1, %%mm7)\        MMABS_SUM(%%mm2, %%mm7, %%mm0)\        MMABS_SUM(%%mm3, %%mm7, %%mm1)\        MMABS_SUM(%%mm4, %%mm7, %%mm0)\        MMABS_SUM(%%mm5, %%mm7, %%mm1)\        MMABS_SUM(%%mm6, %%mm7, %%mm0)\        "movq 64(%1), %%mm2             \n\t"\        MMABS_SUM(%%mm2, %%mm7, %%mm1)\        "paddusw %%mm1, %%mm0           \n\t"\        "movq %%mm0, 64(%1)             \n\t"\\        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)\        LOAD4(32, %%mm4, %%mm5, %%mm6, %%mm7)\\        HADAMARD48\        "movq %%mm7, (%1)               \n\t"\        MMABS(%%mm0, %%mm7)\        MMABS(%%mm1, %%mm7)\        MMABS_SUM(%%mm2, %%mm7, %%mm0)\        MMABS_SUM(%%mm3, %%mm7, %%mm1)\        MMABS_SUM(%%mm4, %%mm7, %%mm0)\        MMABS_SUM(%%mm5, %%mm7, %%mm1)\        MMABS_SUM(%%mm6, %%mm7, %%mm0)\        "movq (%1), %%mm2               \n\t"\        MMABS_SUM(%%mm2, %%mm7, %%mm1)\        "paddusw 64(%1), %%mm0          \n\t"\        "paddusw %%mm1, %%mm0           \n\t"\\        HSUM(%%mm0, %%mm1, %0)\\        : "=r" (sum)\        : "r"(temp)\    );\    return sum&0xFFFF;\}\WARPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu)#define HADAMARD8_DIFF_SSE2(cpu) \static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\    DECLARE_ALIGNED_16(uint64_t, temp[4]);\    int sum;\\    assert(h==8);\\    DIFF_PIXELS_8x8(src1, src2, stride, temp[0]);\\    asm volatile(\        HADAMARD8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)\        TRANSPOSE8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7, (%1))\        HADAMARD8(%%xmm0, %%xmm5, %%xmm7, %%xmm3, %%xmm6, %%xmm4, %%xmm2, %%xmm1)\        MMABS_SUM_8x8\        HSUM_SSE2(%%xmm0, %%xmm1, %0)\        : "=r" (sum)\        : "r"(temp)\    );\    return sum&0xFFFF;\}\WARPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu)#define MMABS(a,z)         MMABS_MMX(a,z)#define HSUM(a,t,dst)      HSUM_MMX(a,t,dst)HADAMARD8_DIFF_MMX(mmx)#undef MMABS#undef HSUM#define MMABS(a,z)         MMABS_MMX2(a,z)#define MMABS_SUM_8x8      MMABS_SUM_8x8_SSE2#define HSUM(a,t,dst)      HSUM_MMX2(a,t,dst)HADAMARD8_DIFF_MMX(mmx2)HADAMARD8_DIFF_SSE2(sse2)#undef MMABS#undef MMABS_SUM_8x8#undef HSUM#ifdef HAVE_SSSE3#define MMABS(a,z)         MMABS_SSSE3(a,z)#define MMABS_SUM_8x8      MMABS_SUM_8x8_NOSPILLHADAMARD8_DIFF_SSE2(ssse3)#undef MMABS#undef MMABS_SUM_8x8#endif#define DCT_SAD4(m,mm,o)\    "mov"#m" "#o"+ 0(%1), "#mm"2      \n\t"\    "mov"#m" "#o"+16(%1), "#mm"3      \n\t"\    "mov"#m" "#o"+32(%1), "#mm"4      \n\t"\    "mov"#m" "#o"+48(%1), "#mm"5      \n\t"\    MMABS_SUM(mm##2, mm##6, mm##0)\    MMABS_SUM(mm##3, mm##7, mm##1)\    MMABS_SUM(mm##4, mm##6, mm##0)\    MMABS_SUM(mm##5, mm##7, mm##1)\#define DCT_SAD_MMX\    "pxor %%mm0, %%mm0                \n\t"\    "pxor %%mm1, %%mm1                \n\t"\    DCT_SAD4(q, %%mm, 0)\    DCT_SAD4(q, %%mm, 8)\    DCT_SAD4(q, %%mm, 64)\    DCT_SAD4(q, %%mm, 72)\    "paddusw %%mm1, %%mm0             \n\t"\    HSUM(%%mm0, %%mm1, %0)#define DCT_SAD_SSE2\    "pxor %%xmm0, %%xmm0              \n\t"\    "pxor %%xmm1, %%xmm1              \n\t"\    DCT_SAD4(dqa, %%xmm, 0)\    DCT_SAD4(dqa, %%xmm, 64)\    "paddusw %%xmm1, %%xmm0           \n\t"\    HSUM(%%xmm0, %%xmm1, %0)#define DCT_SAD_FUNC(cpu) \static int sum_abs_dctelem_##cpu(DCTELEM *block){\    int sum;\    asm volatile(\        DCT_SAD\        :"=r"(sum)\        :"r"(block)\    );\    return sum&0xFFFF;\}#define DCT_SAD       DCT_SAD_MMX#define HSUM(a,t,dst) HSUM_MMX(a,t,dst)#define MMABS(a,z)    MMABS_MMX(a,z)DCT_SAD_FUNC(mmx)#undef MMABS#undef HSUM#define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)#define MMABS(a,z)    MMABS_MMX2(a,z)DCT_SAD_FUNC(mmx2)#undef HSUM#undef DCT_SAD#define DCT_SAD       DCT_SAD_SSE2#define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)DCT_SAD_FUNC(sse2)#undef MMABS#ifdef HAVE_SSSE3#define MMABS(a,z)    MMABS_SSSE3(a,z)DCT_SAD_FUNC(ssse3)#undef MMABS#endif#undef HSUM#undef DCT_SADstatic int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){    int sum;    long i=size;    asm volatile(        "pxor %%mm4, %%mm4 \n"        "1: \n"        "sub $8, %0 \n"        "movq (%2,%0), %%mm2 \n"        "movq (%3,%0,2), %%mm0 \n"        "movq 8(%3,%0,2), %%mm1 \n"        "punpckhbw %%mm2, %%mm3 \n"        "punpcklbw %%mm2, %%mm2 \n"        "psraw $8, %%mm3 \n"        "psraw $8, %%mm2 \n"        "psubw %%mm3, %%mm1 \n"        "psubw %%mm2, %%mm0 \n"        "pmaddwd %%mm1, %%mm1 \n"        "pmaddwd %%mm0, %%mm0 \n"        "paddd %%mm1, %%mm4 \n"        "paddd %%mm0, %%mm4 \n"        "jg 1b \n"        "movq %%mm4, %%mm3 \n"        "psrlq $32, %%mm3 \n"        "paddd %%mm3, %%mm4 \n"        "movd %%mm4, %1 \n"        :"+r"(i), "=r"(sum)        :"r"(pix1), "r"(pix2)    );    return sum;}#endif //CONFIG_ENCODERS#define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)#define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\        "paddw " #m4 ", " #m3 "           \n\t" /* x1 */\        "movq "MANGLE(ff_pw_20)", %%mm4   \n\t" /* 20 */\        "pmullw " #m3 ", %%mm4            \n\t" /* 20x1 */\        "movq "#in7", " #m3 "             \n\t" /* d */\        "movq "#in0", %%mm5               \n\t" /* D */\        "paddw " #m3 ", %%mm5             \n\t" /* x4 */\        "psubw %%mm5, %%mm4               \n\t" /* 20x1 - x4 */\        "movq "#in1", %%mm5               \n\t" /* C */\        "movq "#in2", %%mm6               \n\t" /* B */\        "paddw " #m6 ", %%mm5             \n\t" /* x3 */\        "paddw " #m5 ", %%mm6             \n\t" /* x2 */\        "paddw %%mm6, %%mm6               \n\t" /* 2x2 */\        "psubw %%mm6, %%mm5               \n\t" /* -2x2 + x3 */\        "pmullw "MANGLE(ff_pw_3)", %%mm5  \n\t" /* -6x2 + 3x3 */\        "paddw " #rnd ", %%mm4            \n\t" /* x2 */\        "paddw %%mm4, %%mm5               \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\        "psraw $5, %%mm5                  \n\t"\        "packuswb %%mm5, %%mm5            \n\t"\        OP(%%mm5, out, %%mm7, d)#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\    uint64_t temp;\\    asm volatile(\        "pxor %%mm7, %%mm7                \n\t"\        "1:                               \n\t"\        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\        "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\        "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\        "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\        "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\        "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\        "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\        "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\        "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\        "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\        "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\        "paddw %%mm3, %%mm5               \n\t" /* b */\        "paddw %%mm2, %%mm6               \n\t" /* c */\        "paddw %%mm5, %%mm5               \n\t" /* 2b */\        "psubw %%mm5, %%mm6               \n\t" /* c

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -