⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dsputil_altivec.c

📁 现在关于h.264的源码很多
💻 C
📖 第 1 页 / 共 5 页
字号:
    int s __attribute__((aligned(16)));    const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);    vector unsigned char *tv;    vector unsigned char pixv;    vector unsigned int sv;    vector signed int sum;    sv = (vector unsigned int)vec_splat_u32(0);    s = 0;    for (i = 0; i < 16; i++) {        /* Read in the potentially unaligned pixels */        tv = (vector unsigned char *) pix;        pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));        /* Square the values, and add them to our sum */        sv = vec_msum(pixv, pixv, sv);        pix += line_size;    }    /* Sum up the four partial sums, and put the result into s */    sum = vec_sums((vector signed int) sv, (vector signed int) zero);    sum = vec_splat(sum, 3);    vec_ste(sum, 0, &s);    return s;}/** * Sum of Squared Errors for a 8x8 block. * AltiVec-enhanced. * It's the sad8_altivec code above w/ squaring added. */int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h){    int i;    int s __attribute__((aligned(16)));    const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);    vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;    vector unsigned char t1, t2, t3,t4, t5;    vector unsigned int sum;    vector signed int sumsqr;    sum = (vector unsigned int)vec_splat_u32(0);    permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);    for(i=0;i<h;i++) {        /* Read potentially unaligned pixels into t1 and t2           Since we're reading 16 pixels, and actually only want 8,           mask out the last 8 pixels. The 0s don't change the sum. */        perm1 = vec_lvsl(0, pix1);        pix1v = (vector unsigned char *) pix1;        perm2 = vec_lvsl(0, pix2);        pix2v = (vector unsigned char *) pix2;        t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);        t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);        /*          Since we want to use unsigned chars, we can take advantage          of the fact that abs(a-b)^2 = (a-b)^2.        */        /* Calculate abs differences vector */        t3 = vec_max(t1, t2);        t4 = vec_min(t1, t2);        t5 = vec_sub(t3, t4);        /* Square the values and add them to our sum */        sum = vec_msum(t5, t5, sum);        pix1 += line_size;        pix2 += line_size;    }    /* Sum up the four partial sums, and put the result into s */    sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);    sumsqr = vec_splat(sumsqr, 3);    vec_ste(sumsqr, 0, &s);    return s;}/** * Sum of Squared Errors for a 16x16 block. * AltiVec-enhanced. * It's the sad16_altivec code above w/ squaring added. */int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h){    int i;    int s __attribute__((aligned(16)));    const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);    vector unsigned char perm1, perm2, *pix1v, *pix2v;    vector unsigned char t1, t2, t3,t4, t5;    vector unsigned int sum;    vector signed int sumsqr;    sum = (vector unsigned int)vec_splat_u32(0);    for(i=0;i<h;i++) {        /* Read potentially unaligned pixels into t1 and t2 */        perm1 = vec_lvsl(0, pix1);        pix1v = (vector unsigned char *) pix1;        perm2 = vec_lvsl(0, pix2);        pix2v = (vector unsigned char *) pix2;        t1 = vec_perm(pix1v[0], pix1v[1], perm1);        t2 = vec_perm(pix2v[0], pix2v[1], perm2);        /*          Since we want to use unsigned chars, we can take advantage          of the fact that abs(a-b)^2 = (a-b)^2.        */        /* Calculate abs differences vector */        t3 = vec_max(t1, t2);        t4 = vec_min(t1, t2);        t5 = vec_sub(t3, t4);        /* Square the values and add them to our sum */        sum = vec_msum(t5, t5, sum);        pix1 += line_size;        pix2 += line_size;    }    /* Sum up the four partial sums, and put the result into s */    sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);    sumsqr = vec_splat(sumsqr, 3);    vec_ste(sumsqr, 0, &s);    return s;}int pix_sum_altivec(uint8_t * pix, int line_size){    const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);    vector unsigned char perm, *pixv;    vector unsigned char t1;    vector unsigned int sad;    vector signed int sumdiffs;    int i;    int s __attribute__((aligned(16)));    sad = (vector unsigned int)vec_splat_u32(0);    for (i = 0; i < 16; i++) {        /* Read the potentially unaligned 16 pixels into t1 */        perm = vec_lvsl(0, pix);        pixv = (vector unsigned char *) pix;        t1 = vec_perm(pixv[0], pixv[1], perm);        /* Add each 4 pixel group together and put 4 results into sad */        sad = vec_sum4s(t1, sad);        pix += line_size;    }    /* Sum up the four partial sums, and put the result into s */    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);    sumdiffs = vec_splat(sumdiffs, 3);    vec_ste(sumdiffs, 0, &s);    return s;}void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size){    int i;    vector unsigned char perm, bytes, *pixv;    const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);    vector signed short shorts;    for(i=0;i<8;i++)    {        // Read potentially unaligned pixels.        // We're reading 16 pixels, and actually only want 8,        // but we simply ignore the extras.        perm = vec_lvsl(0, pixels);        pixv = (vector unsigned char *) pixels;        bytes = vec_perm(pixv[0], pixv[1], perm);        // convert the bytes into shorts        shorts = (vector signed short)vec_mergeh(zero, bytes);        // save the data to the block, we assume the block is 16-byte aligned        vec_st(shorts, i*16, (vector signed short*)block);        pixels += line_size;    }}void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,        const uint8_t *s2, int stride){    int i;    vector unsigned char perm, bytes, *pixv;    const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);    vector signed short shorts1, shorts2;    for(i=0;i<4;i++)    {        // Read potentially unaligned pixels        // We're reading 16 pixels, and actually only want 8,        // but we simply ignore the extras.        perm = vec_lvsl(0, s1);        pixv = (vector unsigned char *) s1;        bytes = vec_perm(pixv[0], pixv[1], perm);        // convert the bytes into shorts        shorts1 = (vector signed short)vec_mergeh(zero, bytes);        // Do the same for the second block of pixels        perm = vec_lvsl(0, s2);        pixv = (vector unsigned char *) s2;        bytes = vec_perm(pixv[0], pixv[1], perm);        // convert the bytes into shorts        shorts2 = (vector signed short)vec_mergeh(zero, bytes);        // Do the subtraction        shorts1 = vec_sub(shorts1, shorts2);        // save the data to the block, we assume the block is 16-byte aligned        vec_st(shorts1, 0, (vector signed short*)block);        s1 += stride;        s2 += stride;        block += 8;        // The code below is a copy of the code above... This is a manual        // unroll.        // Read potentially unaligned pixels        // We're reading 16 pixels, and actually only want 8,        // but we simply ignore the extras.        perm = vec_lvsl(0, s1);        pixv = (vector unsigned char *) s1;        bytes = vec_perm(pixv[0], pixv[1], perm);        // convert the bytes into shorts        shorts1 = (vector signed short)vec_mergeh(zero, bytes);        // Do the same for the second block of pixels        perm = vec_lvsl(0, s2);        pixv = (vector unsigned char *) s2;        bytes = vec_perm(pixv[0], pixv[1], perm);        // convert the bytes into shorts        shorts2 = (vector signed short)vec_mergeh(zero, bytes);        // Do the subtraction        shorts1 = vec_sub(shorts1, shorts2);        // save the data to the block, we assume the block is 16-byte aligned        vec_st(shorts1, 0, (vector signed short*)block);        s1 += stride;        s2 += stride;        block += 8;    }}void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {#ifdef ALTIVEC_USE_REFERENCE_C_CODE    int i;    for(i=0; i+7<w; i++){        dst[i+0] += src[i+0];        dst[i+1] += src[i+1];        dst[i+2] += src[i+2];        dst[i+3] += src[i+3];        dst[i+4] += src[i+4];        dst[i+5] += src[i+5];        dst[i+6] += src[i+6];        dst[i+7] += src[i+7];    }    for(; i<w; i++)        dst[i+0] += src[i+0];#else /* ALTIVEC_USE_REFERENCE_C_CODE */    register int i;    register vector unsigned char vdst, vsrc;    /* dst and src are 16 bytes-aligned (guaranteed) */    for(i = 0 ; (i + 15) < w ; i++)    {      vdst = vec_ld(i << 4, (unsigned char*)dst);      vsrc = vec_ld(i << 4, (unsigned char*)src);      vdst = vec_add(vsrc, vdst);      vec_st(vdst, i << 4, (unsigned char*)dst);    }    /* if w is not a multiple of 16 */    for (; (i < w) ; i++)    {      dst[i] = src[i];    }#endif /* ALTIVEC_USE_REFERENCE_C_CODE */}/* next one assumes that ((line_size % 16) == 0) */void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h){POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1);#ifdef ALTIVEC_USE_REFERENCE_C_CODE    int i;POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);    for(i=0; i<h; i++) {      *((uint32_t*)(block)) = LD32(pixels);      *((uint32_t*)(block+4)) = LD32(pixels+4);      *((uint32_t*)(block+8)) = LD32(pixels+8);      *((uint32_t*)(block+12)) = LD32(pixels+12);      pixels+=line_size;      block +=line_size;    }POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);#else /* ALTIVEC_USE_REFERENCE_C_CODE */    register vector unsigned char pixelsv1, pixelsv2;    register vector unsigned char pixelsv1B, pixelsv2B;    register vector unsigned char pixelsv1C, pixelsv2C;    register vector unsigned char pixelsv1D, pixelsv2D;    register vector unsigned char perm = vec_lvsl(0, pixels);    int i;    register int line_size_2 = line_size << 1;    register int line_size_3 = line_size + line_size_2;    register int line_size_4 = line_size << 2;POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);// hand-unrolling the loop by 4 gains about 15%// mininum execution time goes from 74 to 60 cycles// it's faster than -funroll-loops, but using// -funroll-loops w/ this is bad - 74 cycles again.// all this is on a 7450, tuning for the 7450#if 0    for(i=0; i<h; i++) {      pixelsv1 = vec_ld(0, (unsigned char*)pixels);      pixelsv2 = vec_ld(16, (unsigned char*)pixels);      vec_st(vec_perm(pixelsv1, pixelsv2, perm),             0, (unsigned char*)block);      pixels+=line_size;      block +=line_size;    }#else    for(i=0; i<h; i+=4) {      pixelsv1 = vec_ld(0, (unsigned char*)pixels);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -