📄 decinit.c
字号:
lavc_codec = &h264_decoder;
if (!lavc_codec)
{
return 0;
}
if (lavc_codec->capabilities&CODEC_CAP_DR1 && lavc_codec->id != CODEC_ID_H264)
ctx->do_dr1=1;
ctx->b_age = ctx->ip_age[0]= ctx->ip_age[1]= 256*256*256*64;
ctx->ip_count = ctx->b_count= 0;
ctx->pic = avcodec_alloc_frame();
ctx->avctx = avcodec_alloc_context();
avctx = ctx->avctx;
if (ctx->do_dr1)
{
avctx->flags|= CODEC_FLAG_EMU_EDGE;
avctx->get_buffer=Get_Buffer;
avctx->release_buffer= release_buffer;
}
#ifdef CODEC_FLAG_NOT_TRUNCATED
avctx->flags|= CODEC_FLAG_NOT_TRUNCATED;
#endif
avctx->flags|= lavc_param_bitexact;
avctx->width = sh->disp_w;
avctx->height= sh->disp_h;
avctx->error_resilience= lavc_param_error_resilience;
#ifdef CODEC_FLAG2_FAST
avctx->flags2|= lavc_param_fast;
#endif
avctx->codec_tag=mmioFOURCC('a','v','c','1') ;
avctx->skip_top = lavc_param_skip_top;
avctx->skip_bottom= lavc_param_skip_bottom;
if (lavc_param_lowres_str != NULL)
{
sscanf(lavc_param_lowres_str, "%d,%d", &lavc_param_lowres, &lowres_w);
if (lavc_param_lowres < 1 || lavc_param_lowres > 16 || (lowres_w > 0 && avctx->width < lowres_w))
lavc_param_lowres = 0;
avctx->lowres = lavc_param_lowres;
}
switch (sh->format)
{
case mmioFOURCC('R', 'V', '1', '0'):
case mmioFOURCC('R', 'V', '1', '3'):
case mmioFOURCC('R', 'V', '2', '0'):
case mmioFOURCC('R', 'V', '3', '0'):
case mmioFOURCC('R', 'V', '4', '0'):
avctx->extradata_size= 8;
avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (sh->bih->biSize!=sizeof(*sh->bih)+8)
{
// only 1 packet per frame & sub_id from fourcc
((uint32_t*)avctx->extradata)[0] = 0;
avctx->sub_id= ((uint32_t*)avctx->extradata)[1] = (sh->format == mmioFOURCC('R', 'V', '1', '3')) ? 0x10003001 : 0x10000000;
}
else
{
// has extra slice header (demux_rm or rm->avi streamcopy)
unsigned int* extrahdr=(unsigned int*)(sh->bih+1);
((uint32_t*)avctx->extradata)[0] = be2me_32(extrahdr[0]);
avctx->sub_id= extrahdr[1];
((uint32_t*)avctx->extradata)[1] = be2me_32(extrahdr[1]);
DP_DEC((M_TEXT("%X %X\n"), extrahdr[0], extrahdr[1]));
}
break;
default:
if (!sh->bih || sh->bih->biSize <= sizeof(BITMAPINFOHEADER))
break;
avctx->extradata_size = sh->bih->biSize-sizeof(BITMAPINFOHEADER);
avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
MEMCPY(avctx->extradata, sh->bih+1, avctx->extradata_size);
break;
}
if (avcodec_open(avctx, lavc_codec) < 0)
{
return 0;
}
ctx->last_aspect=-3;
return 1; //mpcodecs_config_vo(sh,sh->disp_w,sh->disp_h,IMGFMT_YV12);
}
STATIC_FUNC void uninit(sh_video_t *sh)
{
vd_ffmpeg_ctx *ctx = sh->context;
AVCodecContext *avctx = ctx->avctx;
if (avcodec_close(avctx) < 0)
av_freep(&avctx->extradata);
av_freep(&avctx);
av_freep(&ctx->pic);
if (ctx)
av_freep(ctx);
}
STATIC_FUNC int Get_Buffer(AVCodecContext *avctx, AVFrame *pic)
{
return 0;
}
STATIC_FUNC void release_buffer(struct AVCodecContext *avctx, AVFrame *pic)
{
mp_image_t* mpi= pic->opaque;
sh_video_t * sh = avctx->opaque;
vd_ffmpeg_ctx *ctx = sh->context;
int i;
DP_DEC((M_TEXT("release buffer %d %d %d\n"), mpi ? mpi->flags&MP_IMGFLAG_PRESERVE : -99, ctx->ip_count, ctx->b_count));
if (ctx->ip_count <= 2 && ctx->b_count<=1)
{
if (mpi->flags&MP_IMGFLAG_PRESERVE)
ctx->ip_count--;
else
ctx->b_count--;
}
// Palette support: free palette buffer allocated in get_buffer
if ( mpi && (mpi->bpp == 8))
av_freep(&mpi->planes[1]);
if(pic->type!=FF_BUFFER_TYPE_USER)
{
avcodec_default_release_buffer(avctx, pic);
return;
}
for(i=0; i<4; i++)
{
pic->data[i]= NULL;
}
DP_DEC((M_TEXT("R%X %X\n"), pic->linesize[0], pic->data[0]));
}
vd_functions_t* mpvdec=NULL;
STATIC_FUNC mp_image_t*Decode(sh_video_t *sh,void* data,int len,int flags)
{
return 0;
}
vd_functions_t mpcodecs_vd_ffmpeg =
{
&info,
init,
uninit,
control,
Decode
};
int init_video(sh_video_t *sh_video,char* codecname,char* vfm,int status)
{
unsigned int orig_fourcc=sh_video->bih?sh_video->bih->biCompression:0;
sh_video->codec=NULL;
sh_video->vf_inited=0;
while (1)
{
// restore original fourcc:
if (sh_video->bih)
{
sh_video->bih->biCompression=orig_fourcc;
sh_video->codec=&h264codec;
mpvdec=&mpcodecs_vd_ffmpeg;
if (!mpvdec->init(sh_video))
{
continue; // try next...
}
// Yeah! We got it!
sh_video->inited=1;
return 1;
}
return 0;
}
}
void uninit_video(sh_video_t *sh_video)
{
mpvdec->uninit(sh_video);
sh_video->inited = 0;
}
int h264_decode(ibuf_t* pInBuf, unsigned int *bytes_read)
{
int consumed_bytes=0 , got_picture=0;
vd_ffmpeg_ctx *ctx = sh_video.context;
AVFrame pic;
AVCodecContext *avctx = ctx->avctx;
#ifdef TIME_FRAMES
g_StartTime = timeGetTime();
#endif
if (g_nSkipRate && g_nIFrameSpacing && (g_nSkipRate + g_nFramesSinceI >= g_nIFrameSpacing))
{
g_uiWaitForIFrame = TRUE;
}
if (g_uiWaitForIFrame == TRUE)
{
got_picture = -1;
DP_DEC((M_TEXT("g_uiWaitForIFrame == TRUE\n")));
}
else
got_picture = 0;
DP_PERF((M_TEXT("Calling h264_decode_frame()\n")));
consumed_bytes = h264_decode_frame(avctx, &pic,&got_picture, pInBuf->pbuf, pInBuf->size);
DP_PERF((M_TEXT("Finished h264_decode_frame()\n")));
if (consumed_bytes > 0)
{
g_uiWaitForIFrame = FALSE;
*bytes_read = consumed_bytes;
// Get a new context from the mae-driver
request_mae_buffer_be(1);
g_pWrapContext->encoded_picture_height = sh_video.disp_h;
g_pWrapContext->encoded_picture_linesize = sh_video.disp_w;
g_pWrapContext->desc_size = 0;
g_pWrapContext->pts = pInBuf->pts;
g_pWrapContext->dts = 0;
g_pWrapContext->tnum = g_H264TNum;
if (pic.pict_type == FF_I_TYPE)
{
DP_SYNC((M_TEXT("Frames since last I was %d\n"), g_nFramesSinceI));
if (g_nFramesSinceI > g_nIFrameSpacing)
{
g_nIFrameSpacing = g_nFramesSinceI;
DP_SYNC((M_TEXT("Setting IFrameSpacing to %d\n"), g_nIFrameSpacing));
}
g_nFramesSinceI = 0;
} else //non I frame keep a count until the next I
{
g_nFramesSinceI++;
}
#ifdef TIME_FRAMES
DP_ERROR((M_TEXT("Submitting %s%s, TNUm = %d, PTS = %d, DecodeTime = %d\n"), pic.pict_type == FF_I_TYPE ? TEXT("I-Frame") :
pic.pict_type == FF_P_TYPE ? TEXT("P-Frame") : pic.pict_type == FF_B_TYPE ? TEXT("B-Frame") :
pic.pict_type == FF_S_TYPE ? TEXT("S-Frame") : pic.pict_type == FF_SI_TYPE ? TEXT("SI-Frame") :
pic.pict_type == FF_SP_TYPE ? TEXT("SP-Frame") : TEXT("Unknown Frame"),
pic.key_frame == 1 ? TEXT(" [KF]") : TEXT(""), g_H264TNum, (DWORD)pInBuf->pts, timeGetTime()-g_StartTime));
#else
DP_H264((M_TEXT("Submitting %s%s, TNUm = %d, PTS = %d\n"), pic.pict_type == FF_I_TYPE ? TEXT("I-Frame") :
pic.pict_type == FF_P_TYPE ? TEXT("P-Frame") : pic.pict_type == FF_B_TYPE ? TEXT("B-Frame") :
pic.pict_type == FF_S_TYPE ? TEXT("S-Frame") : pic.pict_type == FF_SI_TYPE ? TEXT("SI-Frame") :
pic.pict_type == FF_SP_TYPE ? TEXT("SP-Frame") : TEXT("Unknown Frame"),
pic.key_frame == 1 ? TEXT(" [KF]") : TEXT(""), g_H264TNum, (DWORD)pInBuf->pts));
#endif
#ifdef DUMP_YUV_DATA
{
FILE *fp;
char szNum[10], szFileName[MAX_PATH] = "\\Hard Disk2\\ce_h264_yuv_";
int size = (int)((sh_video.disp_w * sh_video.disp_h) * 1.5); // 1 full Y + 2 quarters of U & V data
sprintf(szNum, "%02d", g_H264TNum);
strcat(szFileName, szNum);
strcat(szFileName, ".dmp");
fp = fopen(szFileName, "wb");
DP_H264((M_TEXT("Dumping H.264 YUV data...\n")));
fwrite(pic.data[0], size, sizeof(char), fp);
fclose(fp);
}
#endif
submit_mae_buffer_be();
g_H264TNum++;
return DIU_STATUS_OK;
} else if (consumed_bytes == -2)
{
// -2 indicates a skipped frame
g_nFramesSinceI++;
g_nSkipCount++;
#ifdef TIME_FRAMES
DP_ERROR((M_TEXT("Skipping Frame PTS = %d, Decode Time = %d\n"), (DWORD)pInBuf->pts, timeGetTime()-g_StartTime));
#else
DP_SYNC((M_TEXT("Skipping Frame PTS = %d, total skipped(%d)\n"), (DWORD)pInBuf->pts, g_nSkipCount));
#endif
}
return DIU_STATUS_NEEDMOREINPUT;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -