📄 deinterlace.c
字号:
{ for( ; p_out < p_out_end ; ) { Merge( p_out, p_in, p_in + p_pic->p[i_plane].i_pitch, p_pic->p[i_plane].i_pitch ); p_out += p_pic->p[i_plane].i_pitch; p_in += p_pic->p[i_plane].i_pitch; } } else { for( ; p_out < p_out_end ; ) { Merge( p_out, p_in, p_in + p_pic->p[i_plane].i_pitch, p_pic->p[i_plane].i_pitch ); p_out += p_pic->p[i_plane].i_pitch; p_in += 2*p_pic->p[i_plane].i_pitch; } } break; } } EndMerge();}#undef Mergestatic void MergeGeneric( void *_p_dest, const void *_p_s1, const void *_p_s2, size_t i_bytes ){ uint8_t* p_dest = (uint8_t*)_p_dest; const uint8_t *p_s1 = (const uint8_t *)_p_s1; const uint8_t *p_s2 = (const uint8_t *)_p_s2; uint8_t* p_end = p_dest + i_bytes - 8; while( p_dest < p_end ) { *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; } p_end += 8; while( p_dest < p_end ) { *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; }}#if defined(CAN_COMPILE_MMXEXT)static void MergeMMX( void *_p_dest, const void *_p_s1, const void *_p_s2, size_t i_bytes ){ uint8_t* p_dest = (uint8_t*)_p_dest; const uint8_t *p_s1 = (const uint8_t *)_p_s1; const uint8_t *p_s2 = (const uint8_t *)_p_s2; uint8_t* p_end = p_dest + i_bytes - 8; while( p_dest < p_end ) { __asm__ __volatile__( "movq %2,%%mm1;" "pavgb %1, %%mm1;" "movq %%mm1, %0" :"=m" (*p_dest): "m" (*p_s1), "m" (*p_s2) ); p_dest += 8; p_s1 += 8; p_s2 += 8; } p_end += 8; while( p_dest < p_end ) { *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; }}#endif#if defined(CAN_COMPILE_SSE)static void MergeSSE2( void *_p_dest, const void *_p_s1, const void *_p_s2, size_t i_bytes ){ uint8_t* p_dest = (uint8_t*)_p_dest; const uint8_t *p_s1 = (const uint8_t *)_p_s1; const uint8_t *p_s2 = (const uint8_t *)_p_s2; uint8_t* p_end; while( (int)p_s1 % 16 ) { *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; } p_end = p_dest + i_bytes - 16; while( p_dest < p_end ) { __asm__ __volatile__( "movdqu %2,%%xmm1;" "pavgb %1, %%xmm1;" "movdqu %%xmm1, %0" :"=m" (*p_dest): "m" (*p_s1), "m" (*p_s2) ); p_dest += 16; p_s1 += 16; p_s2 += 16; } p_end += 16; while( p_dest < p_end ) { *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; }}#endif#if defined(CAN_COMPILE_MMXEXT) || defined(CAN_COMPILE_SSE)static void EndMMX( void ){ __asm__ __volatile__( "emms" :: );}#endif#ifdef CAN_COMPILE_C_ALTIVECstatic void MergeAltivec( void *_p_dest, const void *_p_s1, const void *_p_s2, size_t i_bytes ){ uint8_t *p_dest = (uint8_t *)_p_dest; uint8_t *p_s1 = (uint8_t *)_p_s1; uint8_t *p_s2 = (uint8_t *)_p_s2; uint8_t *p_end = p_dest + i_bytes - 15; /* Use C until the first 16-bytes aligned destination pixel */ while( (int)p_dest & 0xF ) { *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; } if( ( (int)p_s1 & 0xF ) | ( (int)p_s2 & 0xF ) ) { /* Unaligned source */ vector unsigned char s1v, s2v, destv; vector unsigned char s1oldv, s2oldv, s1newv, s2newv; vector unsigned char perm1v, perm2v; perm1v = vec_lvsl( 0, p_s1 ); perm2v = vec_lvsl( 0, p_s2 ); s1oldv = vec_ld( 0, p_s1 ); s2oldv = vec_ld( 0, p_s2 ); while( p_dest < p_end ) { s1newv = vec_ld( 16, p_s1 ); s2newv = vec_ld( 16, p_s2 ); s1v = vec_perm( s1oldv, s1newv, perm1v ); s2v = vec_perm( s2oldv, s2newv, perm2v ); s1oldv = s1newv; s2oldv = s2newv; destv = vec_avg( s1v, s2v ); vec_st( destv, 0, p_dest ); p_s1 += 16; p_s2 += 16; p_dest += 16; } } else { /* Aligned source */ vector unsigned char s1v, s2v, destv; while( p_dest < p_end ) { s1v = vec_ld( 0, p_s1 ); s2v = vec_ld( 0, p_s2 ); destv = vec_avg( s1v, s2v ); vec_st( destv, 0, p_dest ); p_s1 += 16; p_s2 += 16; p_dest += 16; } } p_end += 15; while( p_dest < p_end ) { *p_dest++ = ( (uint16_t)(*p_s1++) + (uint16_t)(*p_s2++) ) >> 1; }}#endif/***************************************************************************** * RenderX: This algo works on a 8x8 block basic, it copies the top field * and apply a process to recreate the bottom field : * If a 8x8 block is classified as : * - progressive: it applies a small blend (1,6,1) * - interlaced: * * in the MMX version: we do a ME between the 2 fields, if there is a * good match we use MC to recreate the bottom field (with a small * blend (1,6,1) ) * * otherwise: it recreates the bottom field by an edge oriented * interpolation. *****************************************************************************//* XDeint8x8Detect: detect if a 8x8 block is interlaced. * XXX: It need to access to 8x10 * We use more than 8 lines to help with scrolling (text) * (and because XDeint8x8Frame use line 9) * XXX: smooth/uniform area with noise detection doesn't works well * but it's not really a problem because they don't have much details anyway */static inline int ssd( int a ) { return a*a; }static inline int XDeint8x8DetectC( uint8_t *src, int i_src ){ int y, x; int ff, fr; int fc; /* Detect interlacing */ fc = 0; for( y = 0; y < 7; y += 2 ) { ff = fr = 0; for( x = 0; x < 8; x++ ) { fr += ssd(src[ x] - src[1*i_src+x]) + ssd(src[i_src+x] - src[2*i_src+x]); ff += ssd(src[ x] - src[2*i_src+x]) + ssd(src[i_src+x] - src[3*i_src+x]); } if( ff < 6*fr/8 && fr > 32 ) fc++; src += 2*i_src; } return fc < 1 ? VLC_FALSE : VLC_TRUE;}#ifdef CAN_COMPILE_MMXEXTstatic inline int XDeint8x8DetectMMXEXT( uint8_t *src, int i_src ){ int y, x; int32_t ff, fr; int fc; /* Detect interlacing */ fc = 0; pxor_r2r( mm7, mm7 ); for( y = 0; y < 9; y += 2 ) { ff = fr = 0; pxor_r2r( mm5, mm5 ); pxor_r2r( mm6, mm6 ); for( x = 0; x < 8; x+=4 ) { movd_m2r( src[ x], mm0 ); movd_m2r( src[1*i_src+x], mm1 ); movd_m2r( src[2*i_src+x], mm2 ); movd_m2r( src[3*i_src+x], mm3 ); punpcklbw_r2r( mm7, mm0 ); punpcklbw_r2r( mm7, mm1 ); punpcklbw_r2r( mm7, mm2 ); punpcklbw_r2r( mm7, mm3 ); movq_r2r( mm0, mm4 ); psubw_r2r( mm1, mm0 ); psubw_r2r( mm2, mm4 ); psubw_r2r( mm1, mm2 ); psubw_r2r( mm1, mm3 ); pmaddwd_r2r( mm0, mm0 ); pmaddwd_r2r( mm4, mm4 ); pmaddwd_r2r( mm2, mm2 ); pmaddwd_r2r( mm3, mm3 ); paddd_r2r( mm0, mm2 ); paddd_r2r( mm4, mm3 ); paddd_r2r( mm2, mm5 ); paddd_r2r( mm3, mm6 ); } movq_r2r( mm5, mm0 ); psrlq_i2r( 32, mm0 ); paddd_r2r( mm0, mm5 ); movd_r2m( mm5, fr ); movq_r2r( mm6, mm0 ); psrlq_i2r( 32, mm0 ); paddd_r2r( mm0, mm6 ); movd_r2m( mm6, ff ); if( ff < 6*fr/8 && fr > 32 ) fc++; src += 2*i_src; } return fc;}#endif/* XDeint8x8Frame: apply a small blend between field (1,6,1). * This won't destroy details, and help if there is a bit of interlacing. * (It helps with paning to avoid flickers) * (Use 8x9 pixels) */#if 0static inline void XDeint8x8FrameC( uint8_t *dst, int i_dst, uint8_t *src, int i_src ){ int y, x; /* Progressive */ for( y = 0; y < 8; y += 2 ) { memcpy( dst, src, 8 ); dst += i_dst; for( x = 0; x < 8; x++ ) dst[x] = (src[x] + 6*src[1*i_src+x] + src[2*i_src+x] + 4 ) >> 3; dst += 1*i_dst; src += 2*i_src; }}#endifstatic inline void XDeint8x8MergeC( uint8_t *dst, int i_dst, uint8_t *src1, int i_src1, uint8_t *src2, int i_src2 ){ int y, x; /* Progressive */ for( y = 0; y < 8; y += 2 ) { memcpy( dst, src1, 8 ); dst += i_dst; for( x = 0; x < 8; x++ ) dst[x] = (src1[x] + 6*src2[x] + src1[i_src1+x] + 4 ) >> 3; dst += i_dst; src1 += i_src1; src2 += i_src2; }}#ifdef CAN_COMPILE_MMXEXTstatic inline void XDeint8x8MergeMMXEXT( uint8_t *dst, int i_dst, uint8_t *src1, int i_src1, uint8_t *src2, int i_src2 ){ static const uint64_t m_4 = I64C(0x0004000400040004); int y, x; /* Progressive */ pxor_r2r( mm7, mm7 ); for( y = 0; y < 8; y += 2 ) { for( x = 0; x < 8; x +=4 ) { movd_m2r( src1[x], mm0 ); movd_r2m( mm0, dst[x] ); movd_m2r( src2[x], mm1 ); movd_m2r( src1[i_src1+x], mm2 ); punpcklbw_r2r( mm7, mm0 ); punpcklbw_r2r( mm7, mm1 ); punpcklbw_r2r( mm7, mm2 ); paddw_r2r( mm1, mm1 ); movq_r2r( mm1, mm3 ); paddw_r2r( mm3, mm3 ); paddw_r2r( mm2, mm0 ); paddw_r2r( mm3, mm1 ); paddw_m2r( m_4, mm1 ); paddw_r2r( mm1, mm0 ); psraw_i2r( 3, mm0 ); packuswb_r2r( mm7, mm0 ); movd_r2m( mm0, dst[i_dst+x] ); } dst += 2*i_dst; src1 += i_src1; src2 += i_src2; }}#endif/* For debug */static inline void XDeint8x8Set( uint8_t *dst, int i_dst, uint8_t v ){ int y; for( y = 0; y < 8; y++ ) memset( &dst[y*i_dst], v, 8 );}/* XDeint8x8FieldE: Stupid deinterlacing (1,0,1) for block that miss a * neighbour * (Use 8x9 pixels) * TODO: a better one for the inner part. */static inline void XDeint8x8FieldEC( uint8_t *dst, int i_dst, uint8_t *src, int i_src ){ int y, x; /* Interlaced */ for( y = 0; y < 8; y += 2 ) { memcpy( dst, src, 8 ); dst += i_dst; for( x = 0; x < 8; x++ ) dst[x] = (src[x] + src[2*i_src+x] ) >> 1; dst += 1*i_dst; src += 2*i_src; }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -