📄 block.c
字号:
vec = t->mb.vec[0][luma_index[8]];
src_u = t->ref[list_index][vec.refno]->U + ((t->mb.mb_y << 3) + (vec.y >> 3)) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) +
4 * t->edged_stride_uv;
dst_u += 4 * 8;
t->eighth_pixel_mc_u(src_u, t->edged_stride_uv, dst_u, vec.x, vec.y, 8, 4);
src = t->ref[list_index][vec.refno]->V + ((t->mb.mb_y << 3) + (vec.y >> 3)) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) +
4 * t->edged_stride_uv;
dst += 4 * 8;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 8, 4);
break;
case MB_8x16:
vec = t->mb.vec[0][0];
src_u = t->ref[list_index][vec.refno]->U + ((t->mb.mb_y << 3) + (vec.y >> 3)) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3);
dst_u = pred_u;
t->eighth_pixel_mc_u(src_u, t->edged_stride_uv, dst_u, vec.x, vec.y, 4, 8);
src = t->ref[list_index][vec.refno]->V + ((t->mb.mb_y << 3) + (vec.y >> 3)) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3);
dst = pred_v;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 4, 8);
vec = t->mb.vec[0][luma_index[4]];
src_u = t->ref[list_index][vec.refno]->U + ((t->mb.mb_y << 3) + (vec.y >> 3)) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + 4;
dst_u += 4;
t->eighth_pixel_mc_u(src_u, t->edged_stride_uv, dst_u, vec.x, vec.y, 4, 8);
src = t->ref[list_index][vec.refno]->V + ((t->mb.mb_y << 3) + (vec.y >> 3)) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + 4;
dst += 4;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 4, 8);
break;
case MB_8x8:
case MB_8x8ref0:
for(i = 0 ; i < 4 ; i ++)
{
switch(t->mb.submb_part[luma_index[4 * i]])
{
case MB_8x8:
vec = t->mb.vec[0][luma_index[4 * i]];
src = t->ref[list_index][vec.refno]->U + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4);
dst = pred_u + i / 2 * 32 + i % 2 * 4;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 4, 4);
src = t->ref[list_index][vec.refno]->V + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4);
dst = pred_v + i / 2 * 32 + i % 2 * 4;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 4, 4);
break;
case MB_8x4:
vec = t->mb.vec[0][luma_index[4 * i]];
src_u = t->ref[list_index][vec.refno]->U + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4);
dst_u = pred_u + i / 2 * 32 + i % 2 * 4;
t->eighth_pixel_mc_u(src_u, t->edged_stride_uv, dst_u, vec.x, vec.y, 4, 2);
src = t->ref[list_index][vec.refno]->V + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4);
dst = pred_v + i / 2 * 32 + i % 2 * 4;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 4, 2);
vec = t->mb.vec[0][luma_index[4 * i + 2]];
src_u = t->ref[list_index][vec.refno]->U + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4) +
2 * t->edged_stride_uv;
dst_u += 2 * 8;
t->eighth_pixel_mc_u(src_u, t->edged_stride_uv, dst_u, vec.x, vec.y, 4, 2);
src = t->ref[list_index][vec.refno]->V + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4) +
2 * t->edged_stride_uv;
dst += 2 * 8;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 4, 2);
break;
case MB_4x8:
vec = t->mb.vec[0][luma_index[4 * i]];
src_u = t->ref[list_index][vec.refno]->U + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4);
dst_u = pred_u + i / 2 * 32 + i % 2 * 4;
t->eighth_pixel_mc_u(src_u, t->edged_stride_uv, dst_u, vec.x, vec.y, 2, 4);
src = t->ref[list_index][vec.refno]->V + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4);
dst = pred_v + i / 2 * 32 + i % 2 * 4;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 2, 4);
vec = t->mb.vec[0][luma_index[4 * i + 1]];
src_u = t->ref[list_index][vec.refno]->U + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4) + 2;
dst_u += 2;
t->eighth_pixel_mc_u(src_u, t->edged_stride_uv, dst_u, vec.x, vec.y, 2, 4);
src = t->ref[list_index][vec.refno]->V + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4) + 2;
dst += 2;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 2, 4);
break;
case MB_4x4:
vec = t->mb.vec[0][luma_index[4 * i]];
src_u = t->ref[list_index][vec.refno]->U + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4);
dst_u = pred_u + i / 2 * 32 + i % 2 * 4;
t->eighth_pixel_mc_u(src_u, t->edged_stride_uv, dst_u, vec.x, vec.y, 2, 2);
src = t->ref[list_index][vec.refno]->V + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4);
dst = pred_v + i / 2 * 32 + i % 2 * 4;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 2, 2);
vec = t->mb.vec[0][luma_index[4 * i + 1]];
src_u = t->ref[list_index][vec.refno]->U + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4) + 2;
dst_u += 2;
t->eighth_pixel_mc_u(src_u, t->edged_stride_uv, dst_u, vec.x, vec.y, 2, 2);
src = t->ref[list_index][vec.refno]->V + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4) + 2;
dst += 2;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 2, 2);
vec = t->mb.vec[0][luma_index[4 * i + 2]];
src_u = t->ref[list_index][vec.refno]->U + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4) +
2 * t->edged_stride_uv;
dst_u += 2 * 8 - 2;
t->eighth_pixel_mc_u(src_u, t->edged_stride_uv, dst_u, vec.x, vec.y, 2, 2);
src = t->ref[list_index][vec.refno]->V + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4) +
2 * t->edged_stride_uv;
dst += 2 * 8 - 2;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 2, 2);
vec = t->mb.vec[0][luma_index[4 * i + 3]];
src_u = t->ref[list_index][vec.refno]->U + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4) +
2 * t->edged_stride_uv + 2;
dst_u += 2;
t->eighth_pixel_mc_u(src_u, t->edged_stride_uv, dst_u, vec.x, vec.y, 2, 2);
src = t->ref[list_index][vec.refno]->V + ((t->mb.mb_y << 3) + (vec.y >> 3) + i / 2 * 4) * t->edged_stride_uv + (t->mb.mb_x << 3) + (vec.x >> 3) + (i % 2 * 4) +
2 * t->edged_stride_uv + 2;
dst += 2;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 2, 2);
break;
default:
break;
}
}
break;
default:
break;
}
T264dec_mb_decode_uv(t, pred_u, pred_v);
}
static const int8_t index[4][4][6] =
{
{{0, 0, 0, 0, 0, 0}, {0, 1, 0, 0, 0, 0}, {1, 1, 0, 0, 0, 0}, {1, 0, 0, 0, 1, 0}},
{{0, 2, 0, 0, 0, 0}, {1, 2, 0, 0, 0, 0}, {1, 3, 0, 0, 0, 0}, {1, 2, 0, 0, 1, 0}},
{{2, 2, 0, 0, 0, 0}, {2, 3, 0, 0, 0, 0}, {3, 3, 0, 0, 0, 0}, {3, 2, 0, 0, 1, 0}},
{{2, 0, 0, 0, 0, 1}, {2, 1, 0, 0, 0, 1}, {3, 1, 0, 0, 0, 1}, {1, 2, 0, 1, 1, 0}}
};
void
T264_mb4x4_interb_uv_mc(T264_t* t,T264_vector_t vecPredicted[2][16],uint8_t* pred_u,uint8_t* pred_v)
{
DECLARE_ALIGNED_MATRIX(pred_u_l1, 8, 8, uint8_t, CACHE_SIZE);
DECLARE_ALIGNED_MATRIX(pred_v_l1, 8, 8, uint8_t, CACHE_SIZE);
T264_vector_t vec;
uint8_t* src, *dst;
int32_t i;
int32_t j;
int32_t idx;
int32_t offset_src,offset_dst;
uint8_t *dstv;
for(i = 0;i < 4; ++i)
{
for(j = 0;j < 4; ++j)
{ //predict each 2x2 block
idx = (i * 4) + j;
offset_dst = ((i * 2) * 8) + (j << 1);
vec = vecPredicted[0][idx];
offset_src = ((t->mb.mb_y << 3) + ((i << 1) + (vec.y >> 3))) * t->edged_stride_uv + (t->mb.mb_x << 3) + (j << 1) + (vec.x >> 3);
dstv = pred_v + offset_dst;
dst = pred_u + offset_dst;
if(vec.refno > -1)
{
src = t->ref[0][vec.refno]->U + offset_src;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 2, 2);
src = t->ref[0][vec.refno]->V + offset_src;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dstv, vec.x, vec.y, 2, 2);
}
vec = vecPredicted[1][idx];
offset_src = ((t->mb.mb_y << 3) + ((i << 1) + (vec.y >> 3))) * t->edged_stride_uv + (t->mb.mb_x << 3) + (j << 1) + (vec.x >> 3);
if(vec.refno > -1)
{
if(vecPredicted[0][idx].refno > -1)
{
dst = pred_u_l1 + offset_dst;
dstv = pred_v_l1 + offset_dst;
}
src = t->ref[1][vec.refno]->U + offset_src;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dst, vec.x, vec.y, 2, 2);
src = t->ref[1][vec.refno]->V + offset_src;
t->eighth_pixel_mc_u(src, t->edged_stride_uv, dstv, vec.x, vec.y, 2, 2);
}
if(dst != pred_u + offset_dst)
{
t->pia[MB_2x2](dst, pred_u + offset_dst, 8, 8, pred_u + offset_dst, 8);
t->pia[MB_2x2](dstv, pred_v + offset_dst, 8, 8, pred_v + offset_dst, 8);
}
}
}
}
void
T264_mb4x4_interb_mc(T264_t* t,T264_vector_t vec[2][16],uint8_t* ref)
{
T264_vector_t vec0,vec1;
uint8_t* tmp,*pred_tmp;
int32_t x, y,i,j;
int32_t list_index,
block_idx = 0;
int32_t offset1, offset2;
DECLARE_ALIGNED_MATRIX_H(pred_16x16bi, 16, 16, uint8_t, CACHE_SIZE);
for(i = 0 ; i < 4 ; i ++)
{
for(j = 0;j < 4; ++j)
{
int32_t offset_base;
vec0 = vec[0][block_idx];
vec1 = vec[1][block_idx];
x = (vec0.x & 3);
y = (vec0.y & 3);
// offset_base = luma_inverse_y[block_idx] * 16 * 4 + luma_inverse_x[block_idx] * 4;
offset_base = i * 16 * 4 + j * 4;
pred_tmp = ref + offset_base;
if(vec0.refno > -1)
{
list_index = 0;
if (index[y][x][0] == index[y][x][1])
{
offset1 = ((t->mb.mb_y << 4) + (vec0.y >> 2) + i * 4) * t->edged_stride + ((t->mb.mb_x << 4) + (vec0.x >> 2)) + j * 4;
tmp = t->ref[list_index][vec0.refno]->Y[index[y][x][0]] + offset1;
t->memcpy_stride_u(tmp, 4, 4, t->edged_stride, pred_tmp, 16);
}
else
{
offset1 = ((t->mb.mb_y << 4) + (vec0.y >> 2) + index[y][x][3] + i * 4) * t->edged_stride + (t->mb.mb_x << 4) + (vec0.x >> 2) + index[y][x][2] + j * 4;
offset2 = ((t->mb.mb_y << 4) + (vec0.y >> 2) + index[y][x][5] + i * 4) * t->edged_stride + (t->mb.mb_x << 4) + (vec0.x >> 2) + index[y][x][4] + j * 4;
t->pia[MB_4x4](t->ref[list_index][vec0.refno]->Y[index[y][x][0]] + offset1,
t->ref[list_index][vec0.refno]->Y[index[y][x][1]] + offset2,
t->edged_stride, t->edged_stride, pred_tmp,16);
}
}
x = (vec1.x & 3);
y = (vec1.y & 3);
if(vec1.refno > -1)
{
list_index = 1;
if(vec0.refno > -1)
pred_tmp = pred_16x16bi + offset_base;
if (index[y][x][0] == index[y][x][1])
{
offset1 = ((t->mb.mb_y << 4) + (vec1.y >> 2) + i * 4) * t->edged_stride + ((t->mb.mb_x << 4) + (vec1.x >> 2)) + j * 4;
tmp = t->ref[list_index][vec1.refno]->Y[index[y][x][0]] + offset1;
t->memcpy_stride_u(tmp, 4, 4, t->edged_stride, pred_tmp, 16);
}
else
{
offset1 = ((t->mb.mb_y << 4) + (vec1.y >> 2) + index[y][x][3] + i * 4) * t->edged_stride + (t->mb.mb_x << 4) + (vec1.x >> 2) + index[y][x][2] + j * 4;
offset2 = ((t->mb.mb_y << 4) + (vec1.y >> 2) + index[y][x][5] + i * 4) * t->edged_stride + (t->mb.mb_x << 4) + (vec1.x >> 2) + index[y][x][4] + j * 4;
t->pia[MB_4x4](t->ref[list_index][vec1.refno]->Y[index[y][x][0]] + offset1,
t->ref[list_index][vec1.refno]->Y[index[y][x][1]] + offset2,
t->edged_stride, t->edged_stride, pred_tmp, 16);
}
}
if(pred_tmp != ref + offset_base)
t->pia[MB_4x4](pred_tmp,ref + offset_base,16,16,ref + offset_base,16);
++block_idx;
}
}
}
void
T264dec_mb_decode_interb_mc(T264_t* t, uint8_t* ref)
{
T264_vector_t vec0,vec1;
uint8_t* tmp,*pred_tmp;
int32_t x, y,i;
int32_t list_index;
DECLARE_ALIGNED_MATRIX_H(pred_16x16bi, 16, 16, uint8_t, CACHE_SIZE);
if(t->mb.is_copy)
T264_mb4x4_interb_mc(t,t->mb.vec,ref);
else
switch(t->mb.mb_part)
{
case MB_16x16:
vec0 = t->mb.vec[0][0];
vec1 = t->mb.vec[1][0];
x = (vec0.x & 3);
y = (vec0.y & 3);
pred_tmp = ref;
if(vec0.refno > -1)
{
list_index = 0;
if (index[y][x][0] == index[y][x][1])
{
tmp = t->ref[list_index][vec0.refno]->Y[index[y][x][0]] + ((t->mb.mb_y << 4) + (vec0.y >> 2)) * t->edged_stride +
((t->mb.mb_x << 4) + (vec0.x >> 2));
t->memcpy_stride_u(tmp, 16, 16, t->edged_stride, ref, 16);
}
else
{
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -