⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 inter_test.c

📁 T.264源代码(基于VC开发环境 最新版本)
💻 C
📖 第 1 页 / 共 3 页
字号:
                if (sub_sad < sub_sad_min)
                {
                    sub_sad_min = sub_sad;
                    t->mb.submb_part[i / 2 * 8 + i % 2 * 2 + 0] = mode;
                    t->mb.submb_part[i / 2 * 8 + i % 2 * 2 + 1] = mode;
                    t->mb.submb_part[i / 2 * 8 + i % 2 * 2 + 4] = mode;
                    t->mb.submb_part[i / 2 * 8 + i % 2 * 2 + 5] = mode;
                }
                else
                {
                    // restore current best mode
                    t->mb.vec_ref[VEC_LUMA + i / 2 * 16 + i % 2 * 2 + 0].vec = vec_bak[0];
                    t->mb.vec_ref[VEC_LUMA + i / 2 * 16 + i % 2 * 2 + 1].vec = vec_bak[1];
                    t->mb.vec_ref[VEC_LUMA + i / 2 * 16 + i % 2 * 2 + 8].vec = vec_bak[2];
                    t->mb.vec_ref[VEC_LUMA + i / 2 * 16 + i % 2 * 2 + 9].vec = vec_bak[3];

                    t->mb.vec[0][i / 2 * 8 + i % 2 * 2 + 0] = vec_bak[0];
                    t->mb.vec[0][i / 2 * 8 + i % 2 * 2 + 1] = vec_bak[1];
                    t->mb.vec[0][i / 2 * 8 + i % 2 * 2 + 4] = vec_bak[2];
                    t->mb.vec[0][i / 2 * 8 + i % 2 * 2 + 5] = vec_bak[3];
                }
            }

            sub_sad_all += sub_sad_min;
        }

        if (sub_sad_all < sad_min)
        {
            part = MB_8x8;
            sad_min = sub_sad_all;
        }
    }

    sad = T264_mode_decision_intra_y(t);
    // xxx
    if (0 && sad <= sad_min)
    {
        best_mode = t->mb.mb_mode;
        sad_min = sad;
    }
    else
    {
        t->mb.mb_part = part;
    }

    t->mb.mb_mode = best_mode;
    t->mb.sad = sad_min;

    return sad_min;
}

/*
 	0   median
    1   left
    2   top
    3   topright
    4   topleft
    5   0, 0
    6   last frame
 */
static void
get_pmv(T264_t* t, T264_vector_t* vec, int32_t part, int32_t idx, int32_t width, int32_t* n)
{
    int32_t count = 0;
    int32_t row;
    int32_t col;
    int32_t i;

    vec->refno = 0;
    T264_predict_mv(t, 0, idx, width, vec);
    
    idx = luma_index[idx];
    col = idx % 4;
    row = idx / 4;

    vec[1] = t->mb.vec_ref[VEC_LUMA - 1 + row * 8 + col].vec;   // left
    vec[2] = t->mb.vec_ref[VEC_LUMA - 8 + row * 8 + col].vec;   // top
    vec[3] = t->mb.vec_ref[VEC_LUMA - 8 + row * 8 + col + width].vec;   // top right
    vec[4] = t->mb.vec_ref[VEC_LUMA - 8 + row * 8 + col - 1].vec;       // left top

    for(i = 0 ; i < t->param.ref_num ; i ++)
    {
        if (i != vec[0].refno)
        {
            vec[5 + i].x = vec[0].x;
            vec[5 + i].y = vec[0].y;
        }
        else
        {
            vec[5 + i].x = vec[5 + i].y = 0;
        }
        vec[5 + i].refno = i;
    }
    *n = 5 + t->param.ref_num;
}

uint32_t
T264_mode_decision_inter_16x16p(_RW T264_t* t, search_data_t* s)
{
    T264_vector_t vec[5 + 10];  // NOTE: max 10 refs
    T264_search_context_t context;
    int32_t num;
	uint8_t is_skip = 0;

    get_pmv(t, vec, MB_16x16, 0, 4, &num);

    context.height = 16;
    context.width  = 16;
    context.limit_x= t->param.search_x;
    context.limit_y= t->param.search_y;
    context.vec    = vec;
    context.vec_num= num;
    context.offset = (t->mb.mb_y << 4) * t->edged_stride + (t->mb.mb_x << 4);

    s->src[0] = t->mb.src_y;
    s->sad[0] = t->search(t, &context);
    s->vec[0] = context.vec_best;
    s->ref[0] = t->refl0[s->vec[0].refno];
    s->offset[0] = context.offset;
    s->vec_median[0] = vec[0];

    s->sad[0] = T264_quarter_pixel_search(t, s->src[0], s->ref[0], s->offset[0], &s->vec[0], &s->vec_median[0], s->sad[0], 16, 16, t->mb.pred_p16x16[0]);
    copy_nvec(&s->vec[0], &t->mb.vec[0][0], 4, 4, 4);
	/*/SKIP
/*
	if(t->mb.mb_x == 0 || t->mb.mb_y == 0)
	{
		if(s->vec[0].x == 0 && s->vec[0].y == 0 && s->vec[0].refno == 0)
			is_skip = 1;
	}
	else
	{
		if(s->vec[0].x == context.vec[0].x && s->vec[0].y == context.vec[0].y 
			&& s->vec[0].refno == 0 && context.vec[0].refno == 0)
			is_skip = 1;
	}
*/

/*

	//SKIP
	if(   ((t->mb.mb_neighbour & MB_LEFT) != MB_LEFT && s->vec[0].x == 0 && s->vec[0].y == 0)
       || ((t->mb.mb_neighbour & MB_TOP) != MB_TOP && s->vec[0].x == 0 && s->vec[0].y == 0)
//	   || (t->mb.vec_ref[VEC_LUMA - 1].vec.refno == 0 && t->mb.vec_ref[VEC_LUMA - 1].vec.x == 0 && t->mb.vec_ref[VEC_LUMA - 1].vec.y == 0 && s->vec[0].x == 0 && s->vec[0].y == 0)
//	   || (t->mb.vec_ref[VEC_LUMA - 8].vec.refno == 0 && t->mb.vec_ref[VEC_LUMA - 8].vec.x == 0 && t->mb.vec_ref[VEC_LUMA - 8].vec.y == 0 && s->vec[0].x == 0 && s->vec[0].y == 0)	
	   )
	{
		sad = T264_quarter_pixel_search(t, s->src[0], s->ref[0], s->offset[0], &s->vec[0], &s->vec_median[0], s->sad[0], 16, 16, t->mb.pred_p16x16);
        copy_nvec(&s->vec[0], &t->mb.vec[0][0], 4, 4, 4);
	    t->mb.mb_mode = P_SKIP;
		t->mb.mb_part = MB_16x16;
		t->mb.sad = sad;		
	}

	else if(s->vec[0].x == context.vec[0].x && s->vec[0].y == context.vec[0].y 
		&& s->vec[0].refno == 0 && context.vec[0].refno == 0)
	{
		//All residual zero?
		int32_t nz = 0;
		int32_t idx;
		sad = T264_quarter_pixel_search(t, s->src[0], s->ref[0], s->offset[0], &s->vec[0], &s->vec_median[0], s->sad[0], 16, 16, t->mb.pred_p16x16);
        copy_nvec(&s->vec[0], &t->mb.vec[0][0], 4, 4, 4);
		T264_encode_inter_y(t);
		for(idx = 0; idx < 16; idx++)
			nz += array_non_zero_count(t->mb.dct_y_z[idx], 16);
//		T264_encode_inter_uv(t);
//		for(idx = 0; idx < 8; idx++)
//			nz += array_non_zero_count(t->mb.dct_uv_z[idx / 4][idx % 4], 16);
		if(nz == 0)
		{
//            if ((t->mb.mb_neighbour & MB_LEFT) != MB_LEFT || (t->mb.mb_neighbour & MB_TOP) != MB_TOP ||
//                (t->mb.vec_ref[VEC_LUMA - 1].vec.refno == 0 && t->mb.vec_ref[VEC_LUMA - 1].vec.x == 0 && t->mb.vec_ref[VEC_LUMA - 1].vec.y == 0) ||
//                (t->mb.vec_ref[VEC_LUMA - 8].vec.refno == 0 && t->mb.vec_ref[VEC_LUMA - 8].vec.x == 0 && t->mb.vec_ref[VEC_LUMA - 8].vec.y == 0))
//            {
//                uint32_t flags = t->flags;
//                s->vec[0].x = s->vec[0].y = s->vec[0].refno = 0;
//                copy_nvec(&s->vec[0], &t->mb.vec[0][0], 4, 4, 4);
//                t->flags &= ~(USE_HALFPEL |USE_QUARTPEL);
//                sad = T264_quarter_pixel_search(t, s->src[0], s->ref[0], s->offset[0], &s->vec[0], &s->vec_median[0], s->sad[0], 16, 16, t->mb.pred_p16x16);
//                t->flags = flags;
//                T264_encode_inter_y(t);
//            }
            T264_encode_inter_uv(t);
		    t->mb.mb_mode = P_SKIP;
			t->mb.mb_part = MB_16x16;
			t->mb.sad = sad;

			if(context.vec[0].x !=0 && context.vec[0].y != 0)
				printf("MVx=%d,MVy=%d\n",context.vec[0].x,context.vec[0].y);
		}
		else
		{
			t->mb.mb_mode = P_L0;
			t->mb.mb_part = MB_16x16;
		}
	}
	*/


    return s->sad[0];
}

uint32_t
T264_mode_decision_inter_16x8p(_RW T264_t* t, search_data_t* s)
{
    T264_vector_t vec[5 + 10];  // NOTE: max 10 refs
    T264_search_context_t context;
    int32_t num;
    uint8_t old_part = t->mb.mb_part;

    t->mb.mb_part = MB_16x8;

    get_pmv(t, vec, MB_16x8, 0, 4, &num);

    context.height = 8;
    context.width  = 16;
    context.limit_x= t->param.search_x;
    context.limit_y= t->param.search_y;
    context.vec    = vec;
    context.vec_num= num;
    context.offset = (t->mb.mb_y << 4) * t->edged_stride + (t->mb.mb_x << 4);

    s->src[1] = t->mb.src_y;
    s->sad[1] = t->search(t, &context);
    s->vec[1] = context.vec_best;
    s->ref[1] = t->refl0[s->vec[1].refno];
    s->offset[1] = context.offset;
    s->vec_median[1] = vec[0];

    s->sad[1] = T264_quarter_pixel_search(t, s->src[1], s->ref[1], s->offset[1], &s->vec[1], &s->vec_median[1], s->sad[1], 16, 8, t->mb.pred_p16x16[1]);
    copy_nvec(&s->vec[1], &t->mb.vec[0][0], 4, 2, 4);
    t->mb.vec_ref[VEC_LUMA + 8].vec = s->vec[1];
    get_pmv(t, vec, MB_16x8, luma_index[8], 4, &num);

    s->src[2] = t->mb.src_y + 8 * t->stride;
    context.offset += 8 * t->edged_stride;
    s->sad[2] = t->search(t, &context);
    s->vec[2] = context.vec_best;
    s->ref[2] = t->refl0[s->vec[2].refno];
    s->offset[2] = context.offset;
    s->vec_median[2] = vec[0];

    s->sad[2] = T264_quarter_pixel_search(t, s->src[2], s->ref[2], s->offset[2], &s->vec[2], &s->vec_median[2], s->sad[2], 16, 8, t->mb.pred_p16x16[1] + 16 * 8);
    copy_nvec(&s->vec[2], &t->mb.vec[0][8], 4, 2, 4);

    t->mb.mb_part = old_part;

    return s->sad[1] + s->sad[2];
}

uint32_t
T264_mode_decision_inter_8x16p(_RW T264_t * t, search_data_t* s)
{
    T264_vector_t vec[5 + 10];  // NOTE: max 10 refs
    T264_search_context_t context;
    int32_t num;
    uint8_t old_part = t->mb.mb_part;

    t->mb.mb_part = MB_8x16;
    get_pmv(t, vec, MB_8x16, 0, 2, &num);

    context.height = 16;
    context.width  = 8;
    context.limit_x= t->param.search_x;
    context.limit_y= t->param.search_y;
    context.vec    = vec;
    context.vec_num= num;
    context.offset = (t->mb.mb_y << 4) * t->edged_stride + (t->mb.mb_x << 4);

    s->src[3] = t->mb.src_y;
    s->sad[3] = t->search(t, &context);
    s->vec[3] = context.vec_best;
    s->ref[3] = t->refl0[s->vec[3].refno];
    s->offset[3] = context.offset;
    s->vec_median[3] = vec[0];

    s->sad[3] = T264_quarter_pixel_search(t, s->src[3], s->ref[3], s->offset[3], &s->vec[3], &s->vec_median[3], s->sad[3], 8, 16, t->mb.pred_p16x16[2]);
    copy_nvec(&s->vec[3], &t->mb.vec[0][0], 2, 4, 4);
    t->mb.vec_ref[VEC_LUMA + 1].vec = s->vec[3];
    // xxx
    //printf("mb: %d, x: %d, y: %d, sad: %d\n", t->mb.mb_xy, s->vec[3].x, s->vec[3].y, s->sad[3]);

    get_pmv(t, vec, MB_8x16, luma_index[4], 2, &num);

    s->src[4] = t->mb.src_y + 8;
    context.offset += 8;
    s->sad[4] = t->search(t, &context);
    s->vec[4] = context.vec_best;
    s->ref[4] = t->refl0[s->vec[4].refno];
    s->offset[4] = context.offset;
    s->vec_median[4] = vec[0];

    s->sad[4] = T264_quarter_pixel_search(t, s->src[4], s->ref[4], s->offset[4], &s->vec[4], &s->vec_median[4], s->sad[4], 8, 16, t->mb.pred_p16x16[2] + 8);
    copy_nvec(&s->vec[4], &t->mb.vec[0][2], 2, 4, 4);

    t->mb.mb_part = old_part;

    // xxx
    //printf("mb: %d, x: %d, y: %d, sad: %d\n", t->mb.mb_xy, s->vec[4].x, s->vec[4].y, s->sad[4]);

    return s->sad[3] + s->sad[4];
}

uint32_t
T264_mode_decision_inter_8x8p(_RW T264_t * t, int32_t i, subpart_search_data_t* s)
{
    T264_vector_t vec[5 + 10];  // NOTE: max 10 refs
    T264_search_context_t context;
    int32_t num;

    get_pmv(t, vec, MB_8x8, luma_index[4 * i], 2, &num);

    context.height = 8;
    context.width  = 8;
    context.limit_x= t->param.search_x;
    context.limit_y= t->param.search_y;
    context.vec    = vec;
    context.vec_num= num;
    context.offset = ((t->mb.mb_y << 4) + i / 2 * 8) * t->edged_stride + (t->mb.mb_x << 4) + i % 2 * 8;

    s->src[i][0] = t->mb.src_y + (i / 2 * 8) * t->stride + i % 2 * 8;
    s->sad[i][0] = t->search(t, &context);
    s->vec[i][0] = context.vec_best;
    s->offset[i][0] = context.offset;
    s->ref[i][0] = t->refl0[s->vec[i][0].refno];
    s->vec_median[i][0] = vec[0];

    s->sad[i][0] = T264_quarter_pixel_search(t, s->src[i][0], s->ref[i][0], s->offset[i][0], &s->vec[i][0], &s->vec_median[i][0], s->sad[i][0], 8, 8, t->mb.pred_p16x16[3] + i / 2 * 16 * 8 + i % 2 * 8);

    t->mb.vec_ref[VEC_LUMA + i / 2 * 16 + i % 2 * 2 + 0].vec =
    t->mb.vec_ref[VEC_LUMA + i / 2 * 16 + i % 2 * 2 + 1].vec =
    t->mb.vec_ref[VEC_LUMA + i / 2 * 16 + i % 2 * 2 + 8].vec =
    t->mb.vec_ref[VEC_LUMA + i / 2 * 16 + i % 2 * 2 + 9].vec = s->vec[i][0];

    t->mb.vec[0][i / 2 * 8 + i % 2 * 2 + 0] = s->vec[i][0];
    t->mb.vec[0][i / 2 * 8 + i % 2 * 2 + 1] = s->vec[i][0];
    t->mb.vec[0][i / 2 * 8 + i % 2 * 2 + 4] = s->vec[i][0];
    t->mb.vec[0][i / 2 * 8 + i % 2 * 2 + 5] = s->vec[i][0];
    // xxx
    printf("mb: %d, x: %d, y: %d, mx: %d, my: %d, sad: %d\n", t->mb.mb_xy, s->vec[i][0].x, s->vec[i][0].y, vec[0].x, vec[0].y, s->sad[i][0]);

    return s->sad[i][0];
}

uint32_t
T264_mode_decision_inter_8x4p(_RW T264_t * t, int32_t i, subpart_search_data_t* s)
{
    T264_vector_t vec[5 + 10];  // NOTE: max 10 refs
    T264_search_context_t context;
    int32_t num;

    get_pmv(t, vec, MB_8x4, luma_index[4 * i + 0], 2, &num);

    context.height = 4;
    context.width  = 8;
    context.limit_x= t->param.search_x;
    context.limit_y= t->param.search_y;
    context.vec    = vec;
    context.vec_num= num;
    context.offset = ((t->mb.mb_y << 4) + i / 2 * 8) * t->edged_stride + (t->mb.mb_x << 4) + i % 2 * 8;

    s->src[i][1] = t->mb.src_y + (i / 2 * 8) * t->stride + i % 2 * 8;
    s->sad[i][1] = t->search(t, &context);
    s->vec[i][1] = context.vec_best;
    s->offset[i][1] = context.offset;
    s->ref[i][1] = t->refl0[s->vec[i][1].refno];
    s->vec_median[i][1] = vec[0];

    s->sad[i][1] = T264_quarter_pixel_search(t, s->src[i][1], s->ref[i][1], s->offset[i][1], &s->vec[i][1], &s->vec_median[i][1], s->sad[i][1], 8, 4, t->mb.pred_p8x8 + i / 2 * 16 * 8 + i % 2 * 8);
    t->mb.vec_ref[VEC_LUMA + i / 2 * 16 + i % 2 * 2 + 0].vec =
    t->mb.vec_ref[VEC_LUMA + i / 2 * 16 + i % 2 * 2 + 1].vec = s->vec[i][1];
    t->mb.vec[0][i / 2 * 8 + i % 2 * 2 + 0] = s->vec[i][1];
    t->mb.vec[0][i / 2 * 8 + i % 2 * 2 + 1] = s->vec[i][1];
    get_pmv(t, vec, MB_8x4, luma_index[4 * i + 2], 2, &num);

    s->src[i][2] = s->src[i][1] + 4 * t->stride;
    context.offset += 4 * t->edged_stride;
    s->sad[i][2] = t->search(t, &context);
    s->vec[i][2] = context.vec_best;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -