📄 dsputil_mmx.c
字号:
"paddb %%mm1, %%mm6 \n\t"static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){ if(ENABLE_ANY_H263) { const int strength= ff_h263_loop_filter_strength[qscale]; asm volatile( H263_LOOP_FILTER "movq %%mm3, %1 \n\t" "movq %%mm4, %2 \n\t" "movq %%mm5, %0 \n\t" "movq %%mm6, %3 \n\t" : "+m" (*(uint64_t*)(src - 2*stride)), "+m" (*(uint64_t*)(src - 1*stride)), "+m" (*(uint64_t*)(src + 0*stride)), "+m" (*(uint64_t*)(src + 1*stride)) : "g" (2*strength), "m"(ff_pb_FC) ); }}static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){ asm volatile( //FIXME could save 1 instruction if done as 8x4 ... "movd %4, %%mm0 \n\t" "movd %5, %%mm1 \n\t" "movd %6, %%mm2 \n\t" "movd %7, %%mm3 \n\t" "punpcklbw %%mm1, %%mm0 \n\t" "punpcklbw %%mm3, %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "punpcklwd %%mm2, %%mm0 \n\t" "punpckhwd %%mm2, %%mm1 \n\t" "movd %%mm0, %0 \n\t" "punpckhdq %%mm0, %%mm0 \n\t" "movd %%mm0, %1 \n\t" "movd %%mm1, %2 \n\t" "punpckhdq %%mm1, %%mm1 \n\t" "movd %%mm1, %3 \n\t" : "=m" (*(uint32_t*)(dst + 0*dst_stride)), "=m" (*(uint32_t*)(dst + 1*dst_stride)), "=m" (*(uint32_t*)(dst + 2*dst_stride)), "=m" (*(uint32_t*)(dst + 3*dst_stride)) : "m" (*(uint32_t*)(src + 0*src_stride)), "m" (*(uint32_t*)(src + 1*src_stride)), "m" (*(uint32_t*)(src + 2*src_stride)), "m" (*(uint32_t*)(src + 3*src_stride)) );}static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){ if(ENABLE_ANY_H263) { const int strength= ff_h263_loop_filter_strength[qscale]; DECLARE_ALIGNED(8, uint64_t, temp[4]); uint8_t *btemp= (uint8_t*)temp; src -= 2; transpose4x4(btemp , src , 8, stride); transpose4x4(btemp+4, src + 4*stride, 8, stride); asm volatile( H263_LOOP_FILTER // 5 3 4 6 : "+m" (temp[0]), "+m" (temp[1]), "+m" (temp[2]), "+m" (temp[3]) : "g" (2*strength), "m"(ff_pb_FC) ); asm volatile( "movq %%mm5, %%mm1 \n\t" "movq %%mm4, %%mm0 \n\t" "punpcklbw %%mm3, %%mm5 \n\t" "punpcklbw %%mm6, %%mm4 \n\t" "punpckhbw %%mm3, %%mm1 \n\t" "punpckhbw %%mm6, %%mm0 \n\t" "movq %%mm5, %%mm3 \n\t" "movq %%mm1, %%mm6 \n\t" "punpcklwd %%mm4, %%mm5 \n\t" "punpcklwd %%mm0, %%mm1 \n\t" "punpckhwd %%mm4, %%mm3 \n\t" "punpckhwd %%mm0, %%mm6 \n\t" "movd %%mm5, (%0) \n\t" "punpckhdq %%mm5, %%mm5 \n\t" "movd %%mm5, (%0,%2) \n\t" "movd %%mm3, (%0,%2,2) \n\t" "punpckhdq %%mm3, %%mm3 \n\t" "movd %%mm3, (%0,%3) \n\t" "movd %%mm1, (%1) \n\t" "punpckhdq %%mm1, %%mm1 \n\t" "movd %%mm1, (%1,%2) \n\t" "movd %%mm6, (%1,%2,2) \n\t" "punpckhdq %%mm6, %%mm6 \n\t" "movd %%mm6, (%1,%3) \n\t" :: "r" (src), "r" (src + 4*stride), "r" ((x86_reg) stride ), "r" ((x86_reg)(3*stride)) ); }}/* draw the edges of width 'w' of an image of size width, height this mmx version can only handle w==8 || w==16 */static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w){ uint8_t *ptr, *last_line; int i; last_line = buf + (height - 1) * wrap; /* left and right */ ptr = buf; if(w==8) { asm volatile( "1: \n\t" "movd (%0), %%mm0 \n\t" "punpcklbw %%mm0, %%mm0 \n\t" "punpcklwd %%mm0, %%mm0 \n\t" "punpckldq %%mm0, %%mm0 \n\t" "movq %%mm0, -8(%0) \n\t" "movq -8(%0, %2), %%mm1 \n\t" "punpckhbw %%mm1, %%mm1 \n\t" "punpckhwd %%mm1, %%mm1 \n\t" "punpckhdq %%mm1, %%mm1 \n\t" "movq %%mm1, (%0, %2) \n\t" "add %1, %0 \n\t" "cmp %3, %0 \n\t" " jb 1b \n\t" : "+r" (ptr) : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) ); } else { asm volatile( "1: \n\t" "movd (%0), %%mm0 \n\t" "punpcklbw %%mm0, %%mm0 \n\t" "punpcklwd %%mm0, %%mm0 \n\t" "punpckldq %%mm0, %%mm0 \n\t" "movq %%mm0, -8(%0) \n\t" "movq %%mm0, -16(%0) \n\t" "movq -8(%0, %2), %%mm1 \n\t" "punpckhbw %%mm1, %%mm1 \n\t" "punpckhwd %%mm1, %%mm1 \n\t" "punpckhdq %%mm1, %%mm1 \n\t" "movq %%mm1, (%0, %2) \n\t" "movq %%mm1, 8(%0, %2) \n\t" "add %1, %0 \n\t" "cmp %3, %0 \n\t" " jb 1b \n\t" : "+r" (ptr) : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) ); } for(i=0;i<w;i+=4) { /* top and bottom (and hopefully also the corners) */ ptr= buf - (i + 1) * wrap - w; asm volatile( "1: \n\t" "movq (%1, %0), %%mm0 \n\t" "movq %%mm0, (%0) \n\t" "movq %%mm0, (%0, %2) \n\t" "movq %%mm0, (%0, %2, 2) \n\t" "movq %%mm0, (%0, %3) \n\t" "add $8, %0 \n\t" "cmp %4, %0 \n\t" " jb 1b \n\t" : "+r" (ptr) : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w) ); ptr= last_line + (i + 1) * wrap - w; asm volatile( "1: \n\t" "movq (%1, %0), %%mm0 \n\t" "movq %%mm0, (%0) \n\t" "movq %%mm0, (%0, %2) \n\t" "movq %%mm0, (%0, %2, 2) \n\t" "movq %%mm0, (%0, %3) \n\t" "add $8, %0 \n\t" "cmp %4, %0 \n\t" " jb 1b \n\t" : "+r" (ptr) : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w) ); }}#define PAETH(cpu, abs3)\void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\{\ x86_reg i = -bpp;\ x86_reg end = w-3;\ asm volatile(\ "pxor %%mm7, %%mm7 \n"\ "movd (%1,%0), %%mm0 \n"\ "movd (%2,%0), %%mm1 \n"\ "punpcklbw %%mm7, %%mm0 \n"\ "punpcklbw %%mm7, %%mm1 \n"\ "add %4, %0 \n"\ "1: \n"\ "movq %%mm1, %%mm2 \n"\ "movd (%2,%0), %%mm1 \n"\ "movq %%mm2, %%mm3 \n"\ "punpcklbw %%mm7, %%mm1 \n"\ "movq %%mm2, %%mm4 \n"\ "psubw %%mm1, %%mm3 \n"\ "psubw %%mm0, %%mm4 \n"\ "movq %%mm3, %%mm5 \n"\ "paddw %%mm4, %%mm5 \n"\ abs3\ "movq %%mm4, %%mm6 \n"\ "pminsw %%mm5, %%mm6 \n"\ "pcmpgtw %%mm6, %%mm3 \n"\ "pcmpgtw %%mm5, %%mm4 \n"\ "movq %%mm4, %%mm6 \n"\ "pand %%mm3, %%mm4 \n"\ "pandn %%mm3, %%mm6 \n"\ "pandn %%mm0, %%mm3 \n"\ "movd (%3,%0), %%mm0 \n"\ "pand %%mm1, %%mm6 \n"\ "pand %%mm4, %%mm2 \n"\ "punpcklbw %%mm7, %%mm0 \n"\ "movq %6, %%mm5 \n"\ "paddw %%mm6, %%mm0 \n"\ "paddw %%mm2, %%mm3 \n"\ "paddw %%mm3, %%mm0 \n"\ "pand %%mm5, %%mm0 \n"\ "movq %%mm0, %%mm3 \n"\ "packuswb %%mm3, %%mm3 \n"\ "movd %%mm3, (%1,%0) \n"\ "add %4, %0 \n"\ "cmp %5, %0 \n"\ "jle 1b \n"\ :"+r"(i)\ :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\ "m"(ff_pw_255)\ :"memory"\ );\}#define ABS3_MMX2\ "psubw %%mm5, %%mm7 \n"\ "pmaxsw %%mm7, %%mm5 \n"\ "pxor %%mm6, %%mm6 \n"\ "pxor %%mm7, %%mm7 \n"\ "psubw %%mm3, %%mm6 \n"\ "psubw %%mm4, %%mm7 \n"\ "pmaxsw %%mm6, %%mm3 \n"\ "pmaxsw %%mm7, %%mm4 \n"\ "pxor %%mm7, %%mm7 \n"#define ABS3_SSSE3\ "pabsw %%mm3, %%mm3 \n"\ "pabsw %%mm4, %%mm4 \n"\ "pabsw %%mm5, %%mm5 \n"PAETH(mmx2, ABS3_MMX2)#ifdef HAVE_SSSE3PAETH(ssse3, ABS3_SSSE3)#endif#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\ "paddw " #m4 ", " #m3 " \n\t" /* x1 */\ "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\ "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\ "movq "#in7", " #m3 " \n\t" /* d */\ "movq "#in0", %%mm5 \n\t" /* D */\ "paddw " #m3 ", %%mm5 \n\t" /* x4 */\ "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\ "movq "#in1", %%mm5 \n\t" /* C */\ "movq "#in2", %%mm6 \n\t" /* B */\ "paddw " #m6 ", %%mm5 \n\t" /* x3 */\ "paddw " #m5 ", %%mm6 \n\t" /* x2 */\ "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\ "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\ "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\ "paddw " #rnd ", %%mm4 \n\t" /* x2 */\ "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\ "psraw $5, %%mm5 \n\t"\ "packuswb %%mm5, %%mm5 \n\t"\ OP(%%mm5, out, %%mm7, d)#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ uint64_t temp;\\ asm volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "1: \n\t"\ "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -