📄 h264_template_altivec.c.svn-base
字号:
/* * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA *///#define DEBUG_ALIGNMENT#ifdef DEBUG_ALIGNMENT#define ASSERT_ALIGNED(ptr) assert(((unsigned long)ptr&0x0000000F));#else#define ASSERT_ALIGNED(ptr) ;#endif/* this code assume that stride % 16 == 0 */#define CHROMA_MC8_ALTIVEC_CORE \ vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc2uc);\ vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc3uc);\\ psum = vec_mladd(vA, vsrc0ssH, v32ss);\ psum = vec_mladd(vB, vsrc1ssH, psum);\ psum = vec_mladd(vC, vsrc2ssH, psum);\ psum = vec_mladd(vD, vsrc3ssH, psum);\ psum = vec_sr(psum, v6us);\\ vdst = vec_ld(0, dst);\ ppsum = (vec_u8_t)vec_pack(psum, psum);\ vfdst = vec_perm(vdst, ppsum, fperm);\\ OP_U8_ALTIVEC(fsum, vfdst, vdst);\\ vec_st(fsum, 0, dst);\\ vsrc0ssH = vsrc2ssH;\ vsrc1ssH = vsrc3ssH;\\ dst += stride;\ src += stride;#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \\ vsrc0ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc0uc);\ vsrc1ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc1uc);\\ psum = vec_mladd(vA, vsrc0ssH, v32ss);\ psum = vec_mladd(vE, vsrc1ssH, psum);\ psum = vec_sr(psum, v6us);\\ vdst = vec_ld(0, dst);\ ppsum = (vec_u8_t)vec_pack(psum, psum);\ vfdst = vec_perm(vdst, ppsum, fperm);\\ OP_U8_ALTIVEC(fsum, vfdst, vdst);\\ vec_st(fsum, 0, dst);\\ dst += stride;\ src += stride;void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) { POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1); DECLARE_ALIGNED_16(signed int, ABCD[4]) = {((8 - x) * (8 - y)), (( x) * (8 - y)), ((8 - x) * ( y)), (( x) * ( y))}; register int i; vec_u8_t fperm; const vec_s32_t vABCD = vec_ld(0, ABCD); const vec_s16_t vA = vec_splat((vec_s16_t)vABCD, 1); const vec_s16_t vB = vec_splat((vec_s16_t)vABCD, 3); const vec_s16_t vC = vec_splat((vec_s16_t)vABCD, 5); const vec_s16_t vD = vec_splat((vec_s16_t)vABCD, 7); LOAD_ZERO; const vec_s16_t v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5)); const vec_u16_t v6us = vec_splat_u16(6); register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; vec_u8_t vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1; vec_u8_t vsrc0uc, vsrc1uc; vec_s16_t vsrc0ssH, vsrc1ssH; vec_u8_t vsrcCuc, vsrc2uc, vsrc3uc; vec_s16_t vsrc2ssH, vsrc3ssH, psum; vec_u8_t vdst, ppsum, vfdst, fsum; POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1); if (((unsigned long)dst) % 16 == 0) { fperm = (vec_u8_t)AVV(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F); } else { fperm = (vec_u8_t)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F); } vsrcAuc = vec_ld(0, src); if (loadSecond) vsrcBuc = vec_ld(16, src); vsrcperm0 = vec_lvsl(0, src); vsrcperm1 = vec_lvsl(1, src); vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0); if (reallyBadAlign) vsrc1uc = vsrcBuc; else vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1); vsrc0ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc0uc); vsrc1ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc1uc); if (ABCD[3]) { if (!loadSecond) {// -> !reallyBadAlign for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); CHROMA_MC8_ALTIVEC_CORE } } else { vec_u8_t vsrcDuc; for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrcDuc = vec_ld(stride + 16, src); vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); if (reallyBadAlign) vsrc3uc = vsrcDuc; else vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); CHROMA_MC8_ALTIVEC_CORE } } } else { const vec_s16_t vE = vec_add(vB, vC); if (ABCD[2]) { // x == 0 B == 0 if (!loadSecond) {// -> !reallyBadAlign for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); CHROMA_MC8_ALTIVEC_CORE_SIMPLE vsrc0uc = vsrc1uc; } } else { vec_u8_t vsrcDuc; for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrcDuc = vec_ld(stride + 15, src); vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); CHROMA_MC8_ALTIVEC_CORE_SIMPLE vsrc0uc = vsrc1uc; } } } else { // y == 0 C == 0 if (!loadSecond) {// -> !reallyBadAlign for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(0, src); vsrc0uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); CHROMA_MC8_ALTIVEC_CORE_SIMPLE } } else { vec_u8_t vsrcDuc; for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(0, src); vsrcDuc = vec_ld(15, src); vsrc0uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); if (reallyBadAlign) vsrc1uc = vsrcDuc; else vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); CHROMA_MC8_ALTIVEC_CORE_SIMPLE } } } } POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1);}#undef CHROMA_MC8_ALTIVEC_CORE/* this code assume stride % 16 == 0 */static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) { POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1); register int i; LOAD_ZERO; const vec_u8_t permM2 = vec_lvsl(-2, src); const vec_u8_t permM1 = vec_lvsl(-1, src); const vec_u8_t permP0 = vec_lvsl(+0, src); const vec_u8_t permP1 = vec_lvsl(+1, src); const vec_u8_t permP2 = vec_lvsl(+2, src); const vec_u8_t permP3 = vec_lvsl(+3, src); const vec_s16_t v5ss = vec_splat_s16(5); const vec_u16_t v5us = vec_splat_u16(5); const vec_s16_t v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2)); const vec_s16_t v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4)); vec_u8_t srcM2, srcM1, srcP0, srcP1, srcP2, srcP3; register int align = ((((unsigned long)src) - 2) % 16); vec_s16_t srcP0A, srcP0B, srcP1A, srcP1B, srcP2A, srcP2B, srcP3A, srcP3B, srcM1A, srcM1B, srcM2A, srcM2B, sum1A, sum1B, sum2A, sum2B, sum3A, sum3B, pp1A, pp1B, pp2A, pp2B, pp3A, pp3B, psumA, psumB, sumA, sumB; vec_u8_t sum, vdst, fsum; POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1); for (i = 0 ; i < 16 ; i ++) { vec_u8_t srcR1 = vec_ld(-2, src); vec_u8_t srcR2 = vec_ld(14, src); switch (align) { default: { srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = vec_perm(srcR1, srcR2, permP2); srcP3 = vec_perm(srcR1, srcR2, permP3); } break; case 11: { srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = vec_perm(srcR1, srcR2, permP2); srcP3 = srcR2; } break; case 12: { vec_u8_t srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = srcR2; srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 13: { vec_u8_t srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = srcR2; srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 14: { vec_u8_t srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = srcR2; srcP1 = vec_perm(srcR2, srcR3, permP1); srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 15: { vec_u8_t srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = srcR2; srcP0 = vec_perm(srcR2, srcR3, permP0); srcP1 = vec_perm(srcR2, srcR3, permP1); srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; } srcP0A = (vec_s16_t) vec_mergeh(zero_u8v, srcP0); srcP0B = (vec_s16_t) vec_mergel(zero_u8v, srcP0); srcP1A = (vec_s16_t) vec_mergeh(zero_u8v, srcP1); srcP1B = (vec_s16_t) vec_mergel(zero_u8v, srcP1); srcP2A = (vec_s16_t) vec_mergeh(zero_u8v, srcP2); srcP2B = (vec_s16_t) vec_mergel(zero_u8v, srcP2); srcP3A = (vec_s16_t) vec_mergeh(zero_u8v, srcP3); srcP3B = (vec_s16_t) vec_mergel(zero_u8v, srcP3); srcM1A = (vec_s16_t) vec_mergeh(zero_u8v, srcM1); srcM1B = (vec_s16_t) vec_mergel(zero_u8v, srcM1); srcM2A = (vec_s16_t) vec_mergeh(zero_u8v, srcM2); srcM2B = (vec_s16_t) vec_mergel(zero_u8v, srcM2); sum1A = vec_adds(srcP0A, srcP1A); sum1B = vec_adds(srcP0B, srcP1B); sum2A = vec_adds(srcM1A, srcP2A); sum2B = vec_adds(srcM1B, srcP2B); sum3A = vec_adds(srcM2A, srcP3A); sum3B = vec_adds(srcM2B, srcP3B); pp1A = vec_mladd(sum1A, v20ss, v16ss); pp1B = vec_mladd(sum1B, v20ss, v16ss); pp2A = vec_mladd(sum2A, v5ss, zero_s16v); pp2B = vec_mladd(sum2B, v5ss, zero_s16v); pp3A = vec_add(sum3A, pp1A); pp3B = vec_add(sum3B, pp1B); psumA = vec_sub(pp3A, pp2A); psumB = vec_sub(pp3B, pp2B); sumA = vec_sra(psumA, v5us); sumB = vec_sra(psumB, v5us); sum = vec_packsu(sumA, sumB); ASSERT_ALIGNED(dst); vdst = vec_ld(0, dst); OP_U8_ALTIVEC(fsum, sum, vdst); vec_st(fsum, 0, dst); src += srcStride; dst += dstStride; }POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -