📄 video.c
字号:
if( p_sys->i_late_frames > 0 && mdate() - p_sys->i_late_frames_start > I64C(5000000) ) { if( p_sys->i_pts ) { msg_Err( p_dec, "more than 5 seconds of late video -> " "dropping frame (computer too slow ?)" ); p_sys->i_pts = 0; /* To make sure we recover properly */ } block_Release( p_block ); p_sys->i_late_frames--; return NULL; } if( p_block->i_pts > 0 || p_block->i_dts > 0 ) { p_sys->input_pts = p_block->i_pts; p_sys->input_dts = p_block->i_dts; /* Make sure we don't reuse the same timestamps twice */ p_block->i_pts = p_block->i_dts = 0; } /* TODO implement it in a better way */ /* A good idea could be to decode all I pictures and see for the other */ if( p_sys->b_hurry_up && p_sys->i_late_frames > 4 ) { b_drawpicture = 0; if( p_sys->i_late_frames < 8 ) { p_sys->p_context->hurry_up = 2; } else { /* picture too late, won't decode * but break picture until a new I, and for mpeg4 ...*/ p_sys->i_late_frames--; /* needed else it will never be decrease */ block_Release( p_block ); p_sys->i_buffer = 0; return NULL; } } else { b_drawpicture = 1; p_sys->p_context->hurry_up = 0; } if( p_sys->p_context->width <= 0 || p_sys->p_context->height <= 0 ) { p_sys->p_context->hurry_up = 5; } /* * Do the actual decoding now */ /* Check if post-processing was enabled */ p_sys->b_pp = p_sys->b_pp_async; /* Don't forget that ffmpeg requires a little more bytes * that the real frame size */ if( p_block->i_buffer > 0 ) { p_sys->i_buffer = p_block->i_buffer; if( p_sys->i_buffer + FF_INPUT_BUFFER_PADDING_SIZE > p_sys->i_buffer_orig ) { free( p_sys->p_buffer_orig ); p_sys->i_buffer_orig = p_block->i_buffer + FF_INPUT_BUFFER_PADDING_SIZE; p_sys->p_buffer_orig = malloc( p_sys->i_buffer_orig ); } p_sys->p_buffer = p_sys->p_buffer_orig; p_sys->i_buffer = p_block->i_buffer; p_dec->p_vlc->pf_memcpy( p_sys->p_buffer, p_block->p_buffer, p_block->i_buffer ); memset( p_sys->p_buffer + p_block->i_buffer, 0, FF_INPUT_BUFFER_PADDING_SIZE ); p_block->i_buffer = 0; } while( p_sys->i_buffer > 0 ) { int i_used, b_gotpicture; picture_t *p_pic; i_used = avcodec_decode_video( p_sys->p_context, p_sys->p_ff_pic, &b_gotpicture, p_sys->p_buffer, p_sys->i_buffer ); if( i_used < 0 ) { msg_Warn( p_dec, "cannot decode one frame (%d bytes)", p_sys->i_buffer ); block_Release( p_block ); return NULL; } else if( i_used > p_sys->i_buffer ) { i_used = p_sys->i_buffer; } /* Consumed bytes */ p_sys->i_buffer -= i_used; p_sys->p_buffer += i_used; /* Nothing to display */ if( !b_gotpicture ) { if( i_used == 0 ) { break; } continue; } /* Update frame late count*/ if( p_sys->i_pts && p_sys->i_pts <= mdate() ) { p_sys->i_late_frames++; if( p_sys->i_late_frames == 1 ) p_sys->i_late_frames_start = mdate(); } else { p_sys->i_late_frames = 0; } if( !b_drawpicture || p_sys->p_ff_pic->linesize[0] == 0 ) { /* Do not display the picture */ continue; } if( !p_sys->p_ff_pic->opaque ) { /* Get a new picture */ p_pic = ffmpeg_NewPictBuf( p_dec, p_sys->p_context ); if( !p_pic ) { block_Release( p_block ); return NULL; } /* Fill p_picture_t from AVVideoFrame and do chroma conversion * if needed */ ffmpeg_CopyPicture( p_dec, p_pic, p_sys->p_ff_pic ); } else { p_pic = (picture_t *)p_sys->p_ff_pic->opaque; } /* Set the PTS */ if( p_sys->p_ff_pic->pts ) p_sys->i_pts = p_sys->p_ff_pic->pts; /* Sanity check (seems to be needed for some streams ) */ if( p_sys->p_ff_pic->pict_type == FF_B_TYPE ) { p_sys->b_has_b_frames = VLC_TRUE; } /* Send decoded frame to vout */ if( p_sys->i_pts ) { p_pic->date = p_sys->i_pts; /* interpolate the next PTS */ if( p_sys->p_context->frame_rate > 0 ) { p_sys->i_pts += I64C(1000000) * (2 + p_sys->p_ff_pic->repeat_pict) * p_sys->p_context->frame_rate_base / (2 * p_sys->p_context->frame_rate); } return p_pic; } else { p_dec->pf_vout_buffer_del( p_dec, p_pic ); } } block_Release( p_block ); return NULL;}/***************************************************************************** * EndVideo: decoder destruction ***************************************************************************** * This function is called when the thread ends after a sucessful * initialization. *****************************************************************************/void E_(EndVideoDec)( decoder_t *p_dec ){ decoder_sys_t *p_sys = p_dec->p_sys; if( p_sys->p_ff_pic ) free( p_sys->p_ff_pic );#ifdef LIBAVCODEC_PP E_(ClosePostproc)( p_dec, p_sys->p_pp );#endif free( p_sys->p_buffer_orig );}/***************************************************************************** * ffmpeg_CopyPicture: copy a picture from ffmpeg internal buffers to a * picture_t structure (when not in direct rendering mode). *****************************************************************************/static void ffmpeg_CopyPicture( decoder_t *p_dec, picture_t *p_pic, AVFrame *p_ff_pic ){ decoder_sys_t *p_sys = p_dec->p_sys; if( ffmpeg_PixFmtToChroma( p_sys->p_context->pix_fmt ) ) { int i_plane, i_size, i_line; uint8_t *p_dst, *p_src; int i_src_stride, i_dst_stride;#ifdef LIBAVCODEC_PP if( p_sys->p_pp && p_sys->b_pp ) E_(PostprocPict)( p_dec, p_sys->p_pp, p_pic, p_ff_pic ); else#endif for( i_plane = 0; i_plane < p_pic->i_planes; i_plane++ ) { p_src = p_ff_pic->data[i_plane]; p_dst = p_pic->p[i_plane].p_pixels; i_src_stride = p_ff_pic->linesize[i_plane]; i_dst_stride = p_pic->p[i_plane].i_pitch; i_size = __MIN( i_src_stride, i_dst_stride ); for( i_line = 0; i_line < p_pic->p[i_plane].i_lines; i_line++ ) { p_dec->p_vlc->pf_memcpy( p_dst, p_src, i_size ); p_src += i_src_stride; p_dst += i_dst_stride; } } } else { AVPicture dest_pic; int i; /* we need to convert to I420 */ switch( p_sys->p_context->pix_fmt ) { case PIX_FMT_YUV410P: case PIX_FMT_YUV411P: case PIX_FMT_PAL8: for( i = 0; i < p_pic->i_planes; i++ ) { dest_pic.data[i] = p_pic->p[i].p_pixels; dest_pic.linesize[i] = p_pic->p[i].i_pitch; } img_convert( &dest_pic, PIX_FMT_YUV420P, (AVPicture *)p_ff_pic, p_sys->p_context->pix_fmt, p_sys->p_context->width, p_sys->p_context->height ); break; default: msg_Err( p_dec, "don't know how to convert chroma %i", p_sys->p_context->pix_fmt ); p_dec->b_error = 1; break; } }}/***************************************************************************** * ffmpeg_GetFrameBuf: callback used by ffmpeg to get a frame buffer. ***************************************************************************** * It is used for direct rendering as well as to get the right PTS for each * decoded picture (even in indirect rendering mode). *****************************************************************************/static int ffmpeg_GetFrameBuf( struct AVCodecContext *p_context, AVFrame *p_ff_pic ){ decoder_t *p_dec = (decoder_t *)p_context->opaque; decoder_sys_t *p_sys = p_dec->p_sys; picture_t *p_pic; /* Set picture PTS */ if( p_sys->input_pts ) { p_ff_pic->pts = p_sys->input_pts; } else if( p_sys->input_dts ) { /* Some demuxers only set the dts so let's try to find a useful * timestamp from this */ if( !p_context->has_b_frames || !p_sys->b_has_b_frames || !p_ff_pic->reference ) { p_ff_pic->pts = p_sys->input_dts; } else p_ff_pic->pts = 0; } else p_ff_pic->pts = 0; p_sys->input_pts = p_sys->input_dts = 0; p_ff_pic->opaque = 0; /* Not much to do in indirect rendering mode */ if( !p_sys->b_direct_rendering || p_sys->b_pp ) { return avcodec_default_get_buffer( p_context, p_ff_pic ); } /* Some codecs set pix_fmt only after the 1st frame has been decoded, * so this check is necessary. */ if( !ffmpeg_PixFmtToChroma( p_context->pix_fmt ) || p_sys->p_context->width % 16 || p_sys->p_context->height % 16 ) { msg_Dbg( p_dec, "disabling direct rendering" ); p_sys->b_direct_rendering = 0; return avcodec_default_get_buffer( p_context, p_ff_pic ); } /* Get a new picture */ //p_sys->p_vout->render.b_allow_modify_pics = 0; p_pic = ffmpeg_NewPictBuf( p_dec, p_sys->p_context ); if( !p_pic ) { p_sys->b_direct_rendering = 0; return avcodec_default_get_buffer( p_context, p_ff_pic ); } p_sys->p_context->draw_horiz_band = NULL; p_ff_pic->opaque = (void*)p_pic; p_ff_pic->type = FF_BUFFER_TYPE_USER; p_ff_pic->data[0] = p_pic->p[0].p_pixels; p_ff_pic->data[1] = p_pic->p[1].p_pixels; p_ff_pic->data[2] = p_pic->p[2].p_pixels; p_ff_pic->data[3] = NULL; /* alpha channel but I'm not sure */ p_ff_pic->linesize[0] = p_pic->p[0].i_pitch; p_ff_pic->linesize[1] = p_pic->p[1].i_pitch; p_ff_pic->linesize[2] = p_pic->p[2].i_pitch; p_ff_pic->linesize[3] = 0; if( p_ff_pic->reference != 0 ) { p_dec->pf_picture_link( p_dec, p_pic ); } /* FIXME what is that, should give good value */ p_ff_pic->age = 256*256*256*64; // FIXME FIXME from ffmpeg return 0;}static void ffmpeg_ReleaseFrameBuf( struct AVCodecContext *p_context, AVFrame *p_ff_pic ){ decoder_t *p_dec = (decoder_t *)p_context->opaque; picture_t *p_pic; if( !p_ff_pic->opaque ) { avcodec_default_release_buffer( p_context, p_ff_pic ); return; } p_pic = (picture_t*)p_ff_pic->opaque; p_ff_pic->data[0] = NULL; p_ff_pic->data[1] = NULL; p_ff_pic->data[2] = NULL; p_ff_pic->data[3] = NULL; if( p_ff_pic->reference != 0 ) { p_dec->pf_picture_unlink( p_dec, p_pic ); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -