📄 video.c
字号:
p_pic->i_nb_fields = 2 + p_sys->p_ff_pic->repeat_pict; p_pic->b_progressive = !p_sys->p_ff_pic->interlaced_frame; p_pic->b_top_field_first = p_sys->p_ff_pic->top_field_first; return p_pic; } else { p_dec->pf_vout_buffer_del( p_dec, p_pic ); } } block_Release( p_block ); return NULL;}/***************************************************************************** * EndVideo: decoder destruction ***************************************************************************** * This function is called when the thread ends after a successful * initialization. *****************************************************************************/void EndVideoDec( decoder_t *p_dec ){ decoder_sys_t *p_sys = p_dec->p_sys; if( p_sys->p_ff_pic ) av_free( p_sys->p_ff_pic ); free( p_sys->p_buffer_orig );}/***************************************************************************** * ffmpeg_InitCodec: setup codec extra initialization data for ffmpeg *****************************************************************************/static void ffmpeg_InitCodec( decoder_t *p_dec ){ decoder_sys_t *p_sys = p_dec->p_sys; int i_size = p_dec->fmt_in.i_extra; if( !i_size ) return; if( p_sys->i_codec_id == CODEC_ID_SVQ3 ) { uint8_t *p; p_sys->p_context->extradata_size = i_size + 12; p = p_sys->p_context->extradata = malloc( p_sys->p_context->extradata_size ); if( !p ) return; memcpy( &p[0], "SVQ3", 4 ); memset( &p[4], 0, 8 ); memcpy( &p[12], p_dec->fmt_in.p_extra, i_size ); /* Now remove all atoms before the SMI one */ if( p_sys->p_context->extradata_size > 0x5a && strncmp( (char*)&p[0x56], "SMI ", 4 ) ) { uint8_t *psz = &p[0x52]; while( psz < &p[p_sys->p_context->extradata_size - 8] ) { int i_size = GetDWBE( psz ); if( i_size <= 1 ) { /* FIXME handle 1 as long size */ break; } if( !strncmp( (char*)&psz[4], "SMI ", 4 ) ) { memmove( &p[0x52], psz, &p[p_sys->p_context->extradata_size] - psz ); break; } psz += i_size; } } } else if( p_dec->fmt_in.i_codec == VLC_FOURCC( 'R', 'V', '1', '0' ) || p_dec->fmt_in.i_codec == VLC_FOURCC( 'R', 'V', '1', '3' ) || p_dec->fmt_in.i_codec == VLC_FOURCC( 'R', 'V', '2', '0' ) ) { if( p_dec->fmt_in.i_extra == 8 ) { p_sys->p_context->extradata_size = 8; p_sys->p_context->extradata = malloc( 8 ); if( p_sys->p_context->extradata ) { memcpy( p_sys->p_context->extradata, p_dec->fmt_in.p_extra, p_dec->fmt_in.i_extra ); p_sys->p_context->sub_id = ((uint32_t*)p_dec->fmt_in.p_extra)[1]; msg_Warn( p_dec, "using extra data for RV codec sub_id=%08x", p_sys->p_context->sub_id ); } } } else { p_sys->p_context->extradata_size = i_size; p_sys->p_context->extradata = malloc( i_size + FF_INPUT_BUFFER_PADDING_SIZE ); if( p_sys->p_context->extradata ) { memcpy( p_sys->p_context->extradata, p_dec->fmt_in.p_extra, i_size ); memset( &((uint8_t*)p_sys->p_context->extradata)[i_size], 0, FF_INPUT_BUFFER_PADDING_SIZE ); } }}/***************************************************************************** * ffmpeg_CopyPicture: copy a picture from ffmpeg internal buffers to a * picture_t structure (when not in direct rendering mode). *****************************************************************************/static void ffmpeg_CopyPicture( decoder_t *p_dec, picture_t *p_pic, AVFrame *p_ff_pic ){ decoder_sys_t *p_sys = p_dec->p_sys; if( TestFfmpegChroma( p_sys->p_context->pix_fmt, -1 ) == VLC_SUCCESS ) { int i_plane, i_size, i_line; uint8_t *p_dst, *p_src; int i_src_stride, i_dst_stride; for( i_plane = 0; i_plane < p_pic->i_planes; i_plane++ ) { p_src = p_ff_pic->data[i_plane]; p_dst = p_pic->p[i_plane].p_pixels; i_src_stride = p_ff_pic->linesize[i_plane]; i_dst_stride = p_pic->p[i_plane].i_pitch; i_size = __MIN( i_src_stride, i_dst_stride ); for( i_line = 0; i_line < p_pic->p[i_plane].i_visible_lines; i_line++ ) { vlc_memcpy( p_dst, p_src, i_size ); p_src += i_src_stride; p_dst += i_dst_stride; } } } else { msg_Err( p_dec, "don't know how to convert chroma %i", p_sys->p_context->pix_fmt ); p_dec->b_error = 1; }}/***************************************************************************** * ffmpeg_GetFrameBuf: callback used by ffmpeg to get a frame buffer. ***************************************************************************** * It is used for direct rendering as well as to get the right PTS for each * decoded picture (even in indirect rendering mode). *****************************************************************************/static void ffmpeg_SetFrameBufferPts( decoder_t *p_dec, AVFrame *p_ff_pic );static int ffmpeg_GetFrameBuf( struct AVCodecContext *p_context, AVFrame *p_ff_pic ){ decoder_t *p_dec = (decoder_t *)p_context->opaque; decoder_sys_t *p_sys = p_dec->p_sys; picture_t *p_pic; /* Set picture PTS */ ffmpeg_SetFrameBufferPts( p_dec, p_ff_pic ); /* */ p_ff_pic->opaque = 0; /* Not much to do in indirect rendering mode */ if( !p_sys->b_direct_rendering ) { return avcodec_default_get_buffer( p_context, p_ff_pic ); } /* Some codecs set pix_fmt only after the 1st frame has been decoded, * so this check is necessary. */ if( GetVlcChroma( &p_dec->fmt_out.video, p_context->pix_fmt ) != VLC_SUCCESS || p_sys->p_context->width % 16 || p_sys->p_context->height % 16 ) { msg_Dbg( p_dec, "disabling direct rendering" ); p_sys->b_direct_rendering = 0; return avcodec_default_get_buffer( p_context, p_ff_pic ); } p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma; /* Get a new picture */ //p_sys->p_vout->render.b_allow_modify_pics = 0; p_pic = ffmpeg_NewPictBuf( p_dec, p_sys->p_context ); if( !p_pic ) { p_sys->b_direct_rendering = 0; return avcodec_default_get_buffer( p_context, p_ff_pic ); } p_sys->p_context->draw_horiz_band = NULL; p_ff_pic->opaque = (void*)p_pic; p_ff_pic->type = FF_BUFFER_TYPE_USER; p_ff_pic->data[0] = p_pic->p[0].p_pixels; p_ff_pic->data[1] = p_pic->p[1].p_pixels; p_ff_pic->data[2] = p_pic->p[2].p_pixels; p_ff_pic->data[3] = NULL; /* alpha channel but I'm not sure */ p_ff_pic->linesize[0] = p_pic->p[0].i_pitch; p_ff_pic->linesize[1] = p_pic->p[1].i_pitch; p_ff_pic->linesize[2] = p_pic->p[2].i_pitch; p_ff_pic->linesize[3] = 0; if( p_ff_pic->reference != 0 ) { p_dec->pf_picture_link( p_dec, p_pic ); } /* FIXME what is that, should give good value */ p_ff_pic->age = 256*256*256*64; // FIXME FIXME from ffmpeg return 0;}static int ffmpeg_ReGetFrameBuf( struct AVCodecContext *p_context, AVFrame *p_ff_pic ){ decoder_t *p_dec = (decoder_t *)p_context->opaque; int i_ret; /* */ p_ff_pic->pts = AV_NOPTS_VALUE; /* We always use default reget function, it works perfectly fine */ i_ret = avcodec_default_reget_buffer( p_context, p_ff_pic ); /* Set picture PTS if avcodec_default_reget_buffer didn't set it (through a * ffmpeg_GetFrameBuf call) */ if( !i_ret && p_ff_pic->pts == AV_NOPTS_VALUE ) ffmpeg_SetFrameBufferPts( p_dec, p_ff_pic ); return i_ret;}static void ffmpeg_SetFrameBufferPts( decoder_t *p_dec, AVFrame *p_ff_pic ){ decoder_sys_t *p_sys = p_dec->p_sys; /* Set picture PTS */ if( p_sys->input_pts ) { p_ff_pic->pts = p_sys->input_pts; } else if( p_sys->input_dts ) { /* Some demuxers only set the dts so let's try to find a useful * timestamp from this */ if( !p_sys->p_context->has_b_frames || !p_sys->b_has_b_frames || !p_ff_pic->reference || !p_sys->i_pts ) { p_ff_pic->pts = p_sys->input_dts; } else { p_ff_pic->pts = 0; } } else { p_ff_pic->pts = 0; } if( p_sys->i_pts ) /* make sure 1st frame has a pts > 0 */ { p_sys->input_pts = p_sys->input_dts = 0; }}static void ffmpeg_ReleaseFrameBuf( struct AVCodecContext *p_context, AVFrame *p_ff_pic ){ decoder_t *p_dec = (decoder_t *)p_context->opaque; picture_t *p_pic; if( !p_ff_pic->opaque ) { avcodec_default_release_buffer( p_context, p_ff_pic ); return; } p_pic = (picture_t*)p_ff_pic->opaque; p_ff_pic->data[0] = NULL; p_ff_pic->data[1] = NULL; p_ff_pic->data[2] = NULL; p_ff_pic->data[3] = NULL; if( p_ff_pic->reference != 0 ) { p_dec->pf_picture_unlink( p_dec, p_pic ); }}static void ffmpeg_NextPts( decoder_t *p_dec, int i_block_rate ){ decoder_sys_t *p_sys = p_dec->p_sys; if( p_sys->i_pts <= 0 ) return; /* interpolate the next PTS */ if( p_dec->fmt_in.video.i_frame_rate > 0 && p_dec->fmt_in.video.i_frame_rate_base > 0 ) { p_sys->i_pts += INT64_C(1000000) * (2 + p_sys->p_ff_pic->repeat_pict) * p_dec->fmt_in.video.i_frame_rate_base * i_block_rate / INPUT_RATE_DEFAULT / (2 * p_dec->fmt_in.video.i_frame_rate); } else if( p_sys->p_context->time_base.den > 0 ) { p_sys->i_pts += INT64_C(1000000) * (2 + p_sys->p_ff_pic->repeat_pict) * p_sys->p_context->time_base.num * i_block_rate / INPUT_RATE_DEFAULT / (2 * p_sys->p_context->time_base.den); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -