📄 umc_mpeg2_dec_pic.cpp
字号:
} else if(PictureHeader.picture_coding_type == B_PICTURE) { if(!sequenceHeader.first_p_occure && (!sequenceHeader.closed_gop || sequenceHeader.broken_link )) umcRes = UMC_NOT_ENOUGH_DATA; } } // not verified with field pictures if(UMC_OK == umcRes) { if(PictureHeader.picture_coding_type == 3 && sequenceHeader.is_skipped_b || !sequenceHeader.first_p_occure || ((PictureHeader.picture_coding_type == 3) && m_bNoBframes) || ((PictureHeader.picture_coding_type == 2) && m_bNoPframes)) { if(sequenceHeader.bdied && sequenceHeader.first_p_occure && !(((PictureHeader.picture_coding_type == 3) && m_bNoBframes) || ((PictureHeader.picture_coding_type == 2) && m_bNoPframes))) { sequenceHeader.bdied = 0; goto x30; } if(sequenceHeader.is_skipped_b > 0) sequenceHeader.is_skipped_b--; if(sequenceHeader.first_p_occure) { sequenceHeader.num_of_skipped++; sequenceHeader.bdied = 1; //input->SetDataSize(0); umcRes = UMC_NOT_ENOUGH_DATA; } } } x30: //int retVal; if(UMC_OK == umcRes) { double dTime; umcRes = DecodePicture(); CalculateFrameTime(input->GetTime(), &dTime); if(lpVData) { lpVData->SetTime(dTime); if (NULL == m_lpConverter) { lpVData->SetDest( m_Convert.lpSource0, m_Convert.lpSource2, m_Convert.lpSource1); lpVData->SetPitch( m_Convert.PitchSource0, m_Convert.PitchSource2, m_Convert.PitchSource1); } } } } if(video->frame_buffer.retrieve < 0) { //input->SetDataSize(0); umcRes = UMC_NOT_ENOUGH_DATA; } } // We don't advance PTS for input data - we don't want to guess it // without decoding. return (umcRes);}//VIDEO_FRAME_BUFFER *GetNextFrame(void* strV_cur, struct sMediaData *output)// compute and store PT for input, return PT of the frame to be out// called after field_buffer_index switchingvoid MPEG2VideoDecoderBase::CalculateFrameTime(double in_time, double * out_time){ IppVideoContext* video = &Video[0]; int index; if(PictureHeader.picture_structure == FRAME_PICTURE || PictureHeader.field_buffer_index == 1) { // Save time provided for the frame, ignore for second field video->frame_buffer.frame_p_c_n [video->frame_buffer.curr_index].frame_time = in_time; // Compute current time, ignoring second field, don't recalc if absent if(in_time > 0) { if(PictureHeader.picture_coding_type == B_PICTURE) sequenceHeader.stream_time = in_time; else { // can become wrong sequenceHeader.stream_time = in_time - sequenceHeader.delta_frame_time * (PictureHeader.temporal_reference - sequenceHeader.stream_time_temporal_reference); } } } // Compute time to be out index = video->frame_buffer.retrieve; if(index>=0 && video->frame_buffer.frame_p_c_n [index].frame_time < 0) { // Frame to be out hasn't proper time if(PictureHeader.picture_coding_type == B_PICTURE || m_lFlags & FLAG_VDEC_REORDER) { // use current time *out_time = sequenceHeader.stream_time; } else { // compute next ref_frame, really curr_time + IPdistance *out_time = sequenceHeader.stream_time + sequenceHeader.delta_frame_time * (PictureHeader.temporal_reference - sequenceHeader.stream_time_temporal_reference); } } else if(index>=0) { *out_time = video->frame_buffer.frame_p_c_n [index].frame_time; } else { *out_time = -1; } // Update current time after second field if(PictureHeader.field_buffer_index == 0) sequenceHeader.stream_time += sequenceHeader.delta_frame_time;}void MPEG2VideoDecoderBase::PrepareConvertFrame(){ IppVideoContext *video = &Video[0]; // set source pointer(s) m_Convert.lpSource0 = video->frame_buffer.frame_p_c_n[video->frame_buffer.retrieve].Y_comp_data; m_Convert.lpSource2 = video->frame_buffer.frame_p_c_n[video->frame_buffer.retrieve].U_comp_data; m_Convert.lpSource1 = video->frame_buffer.frame_p_c_n[video->frame_buffer.retrieve].V_comp_data; m_Convert.PitchSource0 = video->frame_buffer.Y_comp_pitch; m_Convert.PitchSource2 = video->frame_buffer.U_comp_pitch; m_Convert.PitchSource1 = video->frame_buffer.V_comp_pitch; if (m_bTwoPictures) { m_ConvertPreview.lpSource0 = video->frame_buffer.frame_p_c_n[video->frame_buffer.retrieve].Y_comp_data; m_ConvertPreview.lpSource2 = video->frame_buffer.frame_p_c_n[video->frame_buffer.retrieve].U_comp_data; m_ConvertPreview.lpSource1 = video->frame_buffer.frame_p_c_n[video->frame_buffer.retrieve].V_comp_data; m_ConvertPreview.PitchSource0 = video->frame_buffer.Y_comp_pitch; m_ConvertPreview.PitchSource2 = video->frame_buffer.U_comp_pitch; m_ConvertPreview.PitchSource1 = video->frame_buffer.V_comp_pitch; } if (NULL != m_lpConverter) { m_lpConverter->BeginFrame(&m_Convert); if (m_bTwoPictures) m_lpConverter->BeginFrame(&m_ConvertPreview); }}void MPEG2VideoDecoderBase::FinalizeConvertFrame(){ if (NULL != m_lpConverter) { if((PictureHeader.picture_structure == IPPVC_FRAME_PICTURE) || (PictureHeader.field_buffer_index == 1)) { m_lpConverter->ConvertFrame(&m_Convert); } }}Status MPEG2VideoDecoderBase::DecodeSlices(int threadID){ Status umcRes; IppVideoContext *video = &Video[threadID]; sSlice *pSlice = &Video[threadID].slice; umcRes = DecodeSliceHeader(threadID); if(umcRes != UMC_OK) return (umcRes); int lSliceNum; do { do { lSliceNum = pSlice->slice_vertical_position; umcRes = DecodeSlice(threadID); // decode next header to know vertical position // advance to next slice if error in data umcRes = DecodeSliceHeader(threadID); if(umcRes != UMC_OK) break; } while(lSliceNum == pSlice->slice_vertical_position); } while( umcRes >= UMC_OK ); return (UMC_OK); //umcRes;}Status MPEG2VideoDecoderBase::DecodePicture(){ IppVideoContext* video = &Video[0]; int i; Status umcRes ;//= DecodePictureHeader();#ifdef KEEP_HISTORY memset(video->frame_buffer.frame_p_c_n[video->frame_buffer.ind_his_curr].frame_history, 0, sequenceHeader.mb_width*sequenceHeader.mb_height);#endif if(Video[0].frame_buffer.retrieve >= 0) PrepareConvertFrame(); Video[0].slice.slice_vertical_position = 1; int saveNumberOfThreads = m_nNumberOfThreads; if(m_nNumberOfThreads > 1) {#define MAX_START_CODES 1024 Ipp8u *start_ptr = GET_BYTE_PTR(video->bs); Ipp8u *end_ptr = GET_END_PTR(video->bs)-3; Ipp8u *ptr = start_ptr; Ipp8u *prev_ptr; int curr_thread; int len = end_ptr - start_ptr; int j, start_count = 0; int start_pos[MAX_START_CODES]; for(start_count = 0; start_count < MAX_START_CODES; start_count++) { int code; while(ptr<end_ptr && (ptr[0] || ptr[1] || ptr[2]!=1)) ptr++; code = ptr[3]; if(ptr>=end_ptr) { ptr = GET_END_PTR(video->bs); break; } if(code > 0 && code<0xb0) { // start of slice start_pos[start_count] = ptr - start_ptr; ptr+=4; } else { break; } } len = (ptr - start_ptr); prev_ptr = start_ptr; curr_thread = 1; // 0th will be last for(i=0, j=0; i<m_nNumberOfThreads; i++) { int approx = len * (i+1) / m_nNumberOfThreads; if(start_pos[j] > approx) { m_nNumberOfThreads --; // no data for thread - covered by previous continue; } while(j<start_count && start_pos[j] < approx) j++; if(j==start_count) { // it will be last thread -> to 0th SET_PTR(Video[0].bs, prev_ptr) m_nNumberOfThreads = curr_thread; break; } Video[curr_thread].frame_buffer = Video[0].frame_buffer; INIT_BITSTREAM(Video[curr_thread].bs, prev_ptr, start_ptr + start_pos[j]); curr_thread ++; prev_ptr = start_ptr + start_pos[j]; } } for (i = 1;i < m_nNumberOfThreads;i += 1) { vm_event_signal(m_lpStartEvent + i); } umcRes = DecodeSlices(0); // wait additional thread(s) for (i = 1;i < m_nNumberOfThreads;i += 1) vm_event_wait(m_lpStopEvent + i); if(umcRes == UMC_END_OF_STREAM) return umcRes; video = &Video[0]; UNGET_BITS_32(video->bs) m_nNumberOfThreads = saveNumberOfThreads; // restore, could have been decreased if(PictureHeader.picture_structure != IPPVC_FRAME_PICTURE) { PictureHeader.field_buffer_index ^= 1; if (PictureHeader.field_buffer_index == 1) { return UMC_NOT_ENOUGH_DATA; } } if(umcRes == UMC_OK && (PictureHeader.picture_structure == IPPVC_FRAME_PICTURE || PictureHeader.field_buffer_index == 0)) { m_decodedFrameNum++; } if(Video[0].frame_buffer.retrieve >= 0 && m_decodedFrameNum > ((m_lFlags & FLAG_VDEC_REORDER)?1:0) ) { m_decodedFrameNum--; FinalizeConvertFrame(); } sequenceHeader.frame_count++; return (UMC_OK);}Status MPEG2VideoDecoderBase::DecodePictureHeader(){ unsigned int code; IppVideoContext *video = &Video[0]; sPictureHeader *pPic = &PictureHeader; int field_buffer_index_save = PictureHeader.field_buffer_index; FrameType picture_coding_type_save = PictureHeader.picture_coding_type; if (GET_REMAINED_BYTES(video->bs) < 4) { // return header back UNGET_BITS_32(video->bs) return (UMC_NOT_ENOUGH_DATA); } memset(&PictureHeader, 0, sizeof(PictureHeader)); // save-restore the field. To be move out of the structure PictureHeader.field_buffer_index = field_buffer_index_save; GET_BITS(video->bs, 10, PictureHeader.temporal_reference) GET_TO9BITS(video->bs, 3, *(int*)&(PictureHeader.picture_coding_type)) GET_BITS(video->bs, 16 ,code) VM_ASSERT(PictureHeader.picture_coding_type > 0 && PictureHeader.picture_coding_type < 5); if(PictureHeader.picture_coding_type == D_PICTURE) { sequenceHeader.first_i_occure = 1; // no refs in this case } else if(PictureHeader.field_buffer_index != 0) { // second field must be the same, except IP if(picture_coding_type_save != PictureHeader.picture_coding_type && picture_coding_type_save != I_PICTURE && PictureHeader.picture_coding_type != P_PICTURE) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -