⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 h264_altivec.c.svn-base

📁 mediastreamer2是开源的网络传输媒体流的库
💻 SVN-BASE
📖 第 1 页 / 共 3 页
字号:
    }}static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,                                    const uint8_t * src2, int dst_stride,                                    int src_stride1, int h){    int i;    vec_u8_t a, b, d, tmp1, tmp2, mask, mask_, edges, align;    mask_ = vec_lvsl(0, src2);    for (i = 0; i < h; i++) {        tmp1 = vec_ld(i * src_stride1, src1);        mask = vec_lvsl(i * src_stride1, src1);        tmp2 = vec_ld(i * src_stride1 + 15, src1);        a = vec_perm(tmp1, tmp2, mask);        tmp1 = vec_ld(i * 16, src2);        tmp2 = vec_ld(i * 16 + 15, src2);        b = vec_perm(tmp1, tmp2, mask_);        tmp1 = vec_ld(0, dst);        mask = vec_lvsl(0, dst);        tmp2 = vec_ld(15, dst);        d = vec_avg(a, b);        edges = vec_perm(tmp2, tmp1, mask);        align = vec_lvsr(0, dst);        tmp2 = vec_perm(d, edges, align);        tmp1 = vec_perm(edges, d, align);        vec_st(tmp2, 15, dst);        vec_st(tmp1, 0 , dst);        dst += dst_stride;    }}static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,                                    const uint8_t * src2, int dst_stride,                                    int src_stride1, int h){    int i;    vec_u8_t a, b, d, tmp1, tmp2, mask, mask_, edges, align;    mask_ = vec_lvsl(0, src2);    for (i = 0; i < h; i++) {        tmp1 = vec_ld(i * src_stride1, src1);        mask = vec_lvsl(i * src_stride1, src1);        tmp2 = vec_ld(i * src_stride1 + 15, src1);        a = vec_perm(tmp1, tmp2, mask);        tmp1 = vec_ld(i * 16, src2);        tmp2 = vec_ld(i * 16 + 15, src2);        b = vec_perm(tmp1, tmp2, mask_);        tmp1 = vec_ld(0, dst);        mask = vec_lvsl(0, dst);        tmp2 = vec_ld(15, dst);        d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));        edges = vec_perm(tmp2, tmp1, mask);        align = vec_lvsr(0, dst);        tmp2 = vec_perm(d, edges, align);        tmp1 = vec_perm(edges, d, align);        vec_st(tmp2, 15, dst);        vec_st(tmp1, 0 , dst);        dst += dst_stride;    }}/* Implemented but could be faster#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h) */  H264_MC(put_, 16, altivec)  H264_MC(avg_, 16, altivec)/**************************************************************************** * IDCT transform: ****************************************************************************/#define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3)               \    /* 1st stage */                                               \    vz0 = vec_add(vb0,vb2);       /* temp[0] = Y[0] + Y[2] */     \    vz1 = vec_sub(vb0,vb2);       /* temp[1] = Y[0] - Y[2] */     \    vz2 = vec_sra(vb1,vec_splat_u16(1));                          \    vz2 = vec_sub(vz2,vb3);       /* temp[2] = Y[1].1/2 - Y[3] */ \    vz3 = vec_sra(vb3,vec_splat_u16(1));                          \    vz3 = vec_add(vb1,vz3);       /* temp[3] = Y[1] + Y[3].1/2 */ \    /* 2nd stage: output */                                       \    va0 = vec_add(vz0,vz3);       /* x[0] = temp[0] + temp[3] */  \    va1 = vec_add(vz1,vz2);       /* x[1] = temp[1] + temp[2] */  \    va2 = vec_sub(vz1,vz2);       /* x[2] = temp[1] - temp[2] */  \    va3 = vec_sub(vz0,vz3)        /* x[3] = temp[0] - temp[3] */#define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \    b0 = vec_mergeh( a0, a0 ); \    b1 = vec_mergeh( a1, a0 ); \    b2 = vec_mergeh( a2, a0 ); \    b3 = vec_mergeh( a3, a0 ); \    a0 = vec_mergeh( b0, b2 ); \    a1 = vec_mergel( b0, b2 ); \    a2 = vec_mergeh( b1, b3 ); \    a3 = vec_mergel( b1, b3 ); \    b0 = vec_mergeh( a0, a2 ); \    b1 = vec_mergel( a0, a2 ); \    b2 = vec_mergeh( a1, a3 ); \    b3 = vec_mergel( a1, a3 )#define VEC_LOAD_U8_ADD_S16_STORE_U8(va)                      \    vdst_orig = vec_ld(0, dst);                               \    vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask);          \    vdst_ss = (vec_s16_t) vec_mergeh(zero_u8v, vdst);         \    va = vec_add(va, vdst_ss);                                \    va_u8 = vec_packsu(va, zero_s16v);                        \    va_u32 = vec_splat((vec_u32_t)va_u8, 0);                  \    vec_ste(va_u32, element, (uint32_t*)dst);static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride){    vec_s16_t va0, va1, va2, va3;    vec_s16_t vz0, vz1, vz2, vz3;    vec_s16_t vtmp0, vtmp1, vtmp2, vtmp3;    vec_u8_t va_u8;    vec_u32_t va_u32;    vec_s16_t vdst_ss;    const vec_u16_t v6us = vec_splat_u16(6);    vec_u8_t vdst, vdst_orig;    vec_u8_t vdst_mask = vec_lvsl(0, dst);    int element = ((unsigned long)dst & 0xf) >> 2;    LOAD_ZERO;    block[0] += 32;  /* add 32 as a DC-level for rounding */    vtmp0 = vec_ld(0,block);    vtmp1 = vec_sld(vtmp0, vtmp0, 8);    vtmp2 = vec_ld(16,block);    vtmp3 = vec_sld(vtmp2, vtmp2, 8);    VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);    VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);    VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);    va0 = vec_sra(va0,v6us);    va1 = vec_sra(va1,v6us);    va2 = vec_sra(va2,v6us);    va3 = vec_sra(va3,v6us);    VEC_LOAD_U8_ADD_S16_STORE_U8(va0);    dst += stride;    VEC_LOAD_U8_ADD_S16_STORE_U8(va1);    dst += stride;    VEC_LOAD_U8_ADD_S16_STORE_U8(va2);    dst += stride;    VEC_LOAD_U8_ADD_S16_STORE_U8(va3);}#define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,  d0, d1, d2, d3, d4, d5, d6, d7) {\    /*        a0  = SRC(0) + SRC(4); */ \    vec_s16_t a0v = vec_add(s0, s4);    \    /*        a2  = SRC(0) - SRC(4); */ \    vec_s16_t a2v = vec_sub(s0, s4);    \    /*        a4  =           (SRC(2)>>1) - SRC(6); */ \    vec_s16_t a4v = vec_sub(vec_sra(s2, onev), s6);    \    /*        a6  =           (SRC(6)>>1) + SRC(2); */ \    vec_s16_t a6v = vec_add(vec_sra(s6, onev), s2);    \    /*        b0  =         a0 + a6; */ \    vec_s16_t b0v = vec_add(a0v, a6v);  \    /*        b2  =         a2 + a4; */ \    vec_s16_t b2v = vec_add(a2v, a4v);  \    /*        b4  =         a2 - a4; */ \    vec_s16_t b4v = vec_sub(a2v, a4v);  \    /*        b6  =         a0 - a6; */ \    vec_s16_t b6v = vec_sub(a0v, a6v);  \    /* a1 =  SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \    /*        a1 =             (SRC(5)-SRC(3)) -  (SRC(7)  +  (SRC(7)>>1)); */ \    vec_s16_t a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \    /* a3 =  SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \    /*        a3 =             (SRC(7)+SRC(1)) -  (SRC(3)  +  (SRC(3)>>1)); */ \    vec_s16_t a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\    /* a5 =  SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \    /*        a5 =             (SRC(7)-SRC(1)) +   SRC(5) +   (SRC(5)>>1); */ \    vec_s16_t a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\    /*        a7 =                SRC(5)+SRC(3) +  SRC(1) +   (SRC(1)>>1); */ \    vec_s16_t a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\    /*        b1 =                  (a7>>2)  +  a1; */ \    vec_s16_t b1v = vec_add( vec_sra(a7v, twov), a1v); \    /*        b3 =          a3 +        (a5>>2); */ \    vec_s16_t b3v = vec_add(a3v, vec_sra(a5v, twov)); \    /*        b5 =                  (a3>>2)  -   a5; */ \    vec_s16_t b5v = vec_sub( vec_sra(a3v, twov), a5v); \    /*        b7 =           a7 -        (a1>>2); */ \    vec_s16_t b7v = vec_sub( a7v, vec_sra(a1v, twov)); \    /* DST(0,    b0 + b7); */ \    d0 = vec_add(b0v, b7v); \    /* DST(1,    b2 + b5); */ \    d1 = vec_add(b2v, b5v); \    /* DST(2,    b4 + b3); */ \    d2 = vec_add(b4v, b3v); \    /* DST(3,    b6 + b1); */ \    d3 = vec_add(b6v, b1v); \    /* DST(4,    b6 - b1); */ \    d4 = vec_sub(b6v, b1v); \    /* DST(5,    b4 - b3); */ \    d5 = vec_sub(b4v, b3v); \    /* DST(6,    b2 - b5); */ \    d6 = vec_sub(b2v, b5v); \    /* DST(7,    b0 - b7); */ \    d7 = vec_sub(b0v, b7v); \}#define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \    /* unaligned load */                                       \    vec_u8_t hv = vec_ld( 0, dest );                           \    vec_u8_t lv = vec_ld( 7, dest );                           \    vec_u8_t dstv   = vec_perm( hv, lv, (vec_u8_t)perm_ldv );  \    vec_s16_t idct_sh6 = vec_sra(idctv, sixv);                 \    vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv);   \    vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16);  \    vec_u8_t idstsum8 = vec_packsu(zero_s16v, idstsum);        \    vec_u8_t edgehv;                                           \    /* unaligned store */                                      \    vec_u8_t bodyv  = vec_perm( idstsum8, idstsum8, perm_stv );\    vec_u8_t edgelv = vec_perm( sel, zero_u8v, perm_stv );     \    lv    = vec_sel( lv, bodyv, edgelv );                      \    vec_st( lv, 7, dest );                                     \    hv    = vec_ld( 0, dest );                                 \    edgehv = vec_perm( zero_u8v, sel, perm_stv );              \    hv    = vec_sel( hv, bodyv, edgehv );                      \    vec_st( hv, 0, dest );                                     \ }void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {    vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;    vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;    vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;    vec_u8_t perm_ldv = vec_lvsl(0, dst);    vec_u8_t perm_stv = vec_lvsr(8, dst);    const vec_u16_t onev = vec_splat_u16(1);    const vec_u16_t twov = vec_splat_u16(2);    const vec_u16_t sixv = vec_splat_u16(6);    const vec_u8_t sel = (vec_u8_t) AVV(0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1);    LOAD_ZERO;    dct[0] += 32; // rounding for the >>6 at the end    s0 = vec_ld(0x00, (int16_t*)dct);    s1 = vec_ld(0x10, (int16_t*)dct);    s2 = vec_ld(0x20, (int16_t*)dct);    s3 = vec_ld(0x30, (int16_t*)dct);    s4 = vec_ld(0x40, (int16_t*)dct);    s5 = vec_ld(0x50, (int16_t*)dct);    s6 = vec_ld(0x60, (int16_t*)dct);    s7 = vec_ld(0x70, (int16_t*)dct);    IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,                     d0, d1, d2, d3, d4, d5, d6, d7);    TRANSPOSE8( d0,  d1,  d2,  d3,  d4,  d5,  d6, d7 );    IDCT8_1D_ALTIVEC(d0,  d1,  d2,  d3,  d4,  d5,  d6, d7,                     idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);    ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);    ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);    ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);    ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);    ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);    ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);    ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);    ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);}#define transpose4x16(r0, r1, r2, r3) {      \    register vec_u8_t r4;                    \    register vec_u8_t r5;                    \    register vec_u8_t r6;                    \    register vec_u8_t r7;                    \                                             \    r4 = vec_mergeh(r0, r2);  /*0, 2 set 0*/ \

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -