📄 umc_h264_gen_enc.cpp
字号:
m_pAllocEncoderInst = NULL;
}
if (m_pParsedFrameData)
{
H264_Free(m_pParsedFrameData);
m_pParsedFrameData = NULL;
}
Free_Core_Memory();
}
// This function is called when the encoder's Start_Sequence function is
// called. This signals that dimension dependent variables such as frame
// buffers should now be allocated.
template <class PixType, class CoeffsType> Status H264CoreEncoder<PixType,CoeffsType>::Allocate_Buffers()
{
Ipp32s iFrame;
Ipp32s iBFrmsShared;
Status status = UMC_OK;
do {
m_QPMapSize = m_HeightInMBs * m_WidthInMBs;
// not sure if this is needed... but delete the FrameList if it exists
// Allocate the Reference Frames and
// call H264CoreEncoder::Allocate_Core_Memory
// Allocate space for the bitstream and a bit stream copy (used for IPB)
if (!m_bScratchBufferAllocated){
// Notice the actual non-padded frame dimensions are used. We had this
// clarified with the ITU.
Ipp32u uSize = GetBsBufferMaxSize( m_info.info.clip_info.width, m_info.info.clip_info.height);
// Notice the encoder's internal bitstream buffer is 2x the standard size
m_pAllocEncoderInst = (Ipp8u *)H264_Allocate(uSize * m_info.num_slices*((m_info.coding_type == 1) + 1)*2, false); //TODO fix for PicAFF/AFRM
if (NULL == m_pAllocEncoderInst){
status = UMC_ERR_ALLOC;
break;
}
// the bit stream buffer is 32 byte aligned.
m_pBitStream = H264_ALIGN(m_pAllocEncoderInst, 32);
m_bScratchBufferAllocated = true;
}
// allocate bitstream objects
if((m_pbitstreams = (CH264pBs<PixType,CoeffsType> **)H264_Allocate(m_info.num_slices*((m_info.coding_type == 1) + 1) * sizeof(CH264pBs<PixType,CoeffsType> *), false)) == NULL) { //TODO fix for PicAFF/AFRM
status = UMC_ERR_ALLOC;
break;
}
Ipp32s chunk_size = GetBsBufferMaxSize(m_info.info.clip_info.width, m_info.info.clip_info.height);
Ipp32s i;
Ipp8u *ptr = m_pBitStream;
for(i = 0; i < m_info.num_slices*((m_info.coding_type == 1) + 1); i++) { //TODO fix for PicAFF/AFRM
m_pbitstreams[i] = new CH264pBs<PixType,CoeffsType>(ptr,
chunk_size,
m_info.chroma_format_idc,
status);
ptr += chunk_size;
if (!m_pbitstreams[i]){
status = UMC_ERR_ALLOC;
break;
}else if (status != UMC_OK){
delete m_pbitstreams[i];
m_pbitstreams[i] = NULL;
break;
}
}
if(status != UMC_OK) {
for(i = i - 1; i >= 0; i--) {
delete m_pbitstreams[i];
m_pbitstreams[i] = NULL;
}
ippsFree(m_pbitstreams);
break;
}
{
Ipp32s i;
for(i = 0; i < m_info.num_slices*((m_info.coding_type == 1) + 1); i++) { //TODO fix for PicAFF/AFRM
m_Slices[i].m_pbitstream = m_pbitstreams[i];
}
}
m_bs1 = m_pbitstreams[0]; // m_bs1 is the main stream.
// Determine how much space we need
Ipp32u uMaxNumSlices = (MAX_SLICE_NUM < 0) ?
m_HeightInMBs * m_WidthInMBs :
MIN(MAX_SLICE_NUM, m_HeightInMBs * m_WidthInMBs);
Ipp32u uRefPicListSize = 2 * uMaxNumSlices * sizeof(EncoderRefPicList<PixType>);
Ipp32u totalSize = Ipp32u(
+ uRefPicListSize + 7
+ YUV_ALIGNMENT);
Ipp32s len = MAX(1, totalSize);
m_pParsedFrameData = ippsMalloc_8u(len);
if (!m_pParsedFrameData)
return UMC_ERR_ALLOC;
ippsZero_8u(m_pParsedFrameData, len);
Ipp8u *pAlignedParsedData;
pAlignedParsedData = H264_ALIGN(m_pParsedFrameData, YUV_ALIGNMENT);
Allocate_Core_Memory(m_info.num_slices, m_PaddedSize, m_bs1);
Ipp32s bitDepth = IPP_MAX(m_info.bit_depth_aux, IPP_MAX(m_info.bit_depth_chroma, m_info.bit_depth_luma));
Ipp32s chromaSampling = m_info.chroma_format_idc;
Ipp32s alpha = m_info.aux_format_idc ? 1 : 0;
switch (m_info.rate_controls.method) {
case H264_RCM_CBR:
avbr.Init(8, 4, 8, m_info.info.bitrate, m_info.info.framerate, m_info.info.clip_info.width, m_info.info.clip_info.height, bitDepth, chromaSampling, alpha);
break;
case H264_RCM_VBR:
//avbr.Init(32, m_info.info.bitrate, m_info.info.framerate, m_info.info.clip_info.width, m_info.info.clip_info.height, bitDepth, chromaSampling, alpha);
avbr.Init(0, 0, 0, m_info.info.bitrate, m_info.info.framerate, m_info.info.clip_info.width, m_info.info.clip_info.height, bitDepth, chromaSampling, alpha);
default:
break;
}
// Allocate the input color converted frame queue for the layer
// # of frames needed by all layers using this InputFrmQ + 1 for P
for (iFrame = iBFrmsShared = 0; iFrame < profile_frequency; iFrame++){
if (eFrameType[ iFrame ] == BPREDPIC){
iBFrmsShared++;
}
}
// reset the index into the Profile
m_iProfileIndex = 0;
} while(0);
// if we have an error creating or allocating above then
// cleanup any objects that were successfully created.
if (status != UMC_OK) Deallocate_Buffers();
return status;
}
//
// CompressFrame
// This function drives the compression of one frame
//
template <class PixType, class CoeffsType> Status H264CoreEncoder<PixType,CoeffsType>::CompressFrame(
EnumPicCodType & ePictureType,
EnumPicClass & ePic_Class,
MediaData* dst)
{
Status status = UMC_OK;
Ipp32s slice;
Ipp32s frame_recode = 0;
bool bufferOverflowFlag = false;
bool buffersNotFull = true;
// fprintf(stderr,"frame=%d\n", m_uFrames_Num);
if( m_Analyse & ANALYSE_RECODE_FRAME /*&& m_pReconstructFrame == NULL //could be size change*/){ //make init reconstruct buffer
m_pReconstructFrame = new H264EncoderFrame<PixType>( m_pCurrentFrame, memAlloc
#if defined ALPHA_BLENDING_H264
, m_SeqParamSet.aux_format_idc
#endif
);
if( m_pReconstructFrame->allocate(m_PaddedSize, m_info.num_slices)) return UMC::UMC_ERR_ALLOC;
m_pReconstructFrame->m_bottom_field_flag[0] = m_pCurrentFrame->m_bottom_field_flag[0];
m_pReconstructFrame->m_bottom_field_flag[1] = m_pCurrentFrame->m_bottom_field_flag[1];
}
do{
m_is_cur_pic_afrm = (Ipp32s)(m_pCurrentFrame->m_PictureStructureForDec==AFRM_STRUCTURE);
for (m_field_index=0;m_field_index<=(Ipp8u)(m_pCurrentFrame->m_PictureStructureForDec<FRM_STRUCTURE);m_field_index++)
{
EnumSliceType default_slice_type;
bool startPicture = true;
#if defined ALPHA_BLENDING_H264
bool alpha = true; // Is changed to the opposite at the beginning.
do { // First iteration primary picture, second -- alpha (when present)
alpha = !alpha;
if(!alpha) {
#endif // ALPHA_BLENDING_H264
// Now set-up some things depending on Picture Class
switch (ePic_Class){
case IDR_PIC:
if (m_field_index){
ePic_Class = REFERENCE_PIC;
break;
}
//if (m_uFrames_Num == 0) //temoporaly disabled
{
SetSequenceParameters(); // These only get (re)set on IDR pictures.
SetPictureParameters();
}
// Toggle the idr_pic_id on and off so that adjacent IDRs will have different values
// This is done here because it is done per frame and not per slice.
//FPV m_SliceHeader.idr_pic_id ^= 0x1;
m_SliceHeader.idr_pic_id++;
break;
case REFERENCE_PIC: // swap reference frames before encoding the new reference frame.
//SwapReferencePtrs();
break;
case DISPOSABLE_PIC: // Do Nothing right now?
break;
default: // Unsupported Picture Class
break;
}
// Initialization before encoding all rows.
m_PicType = ePictureType;
if (m_PicType == PREDPIC) {
default_slice_type = PREDSLICE;
} else if (m_PicType == INTRAPIC) {
default_slice_type = INTRASLICE;
} else { // BPREDPIC
default_slice_type = BPREDSLICE;
}
m_PicParamSet.chroma_format_idc = m_SeqParamSet.chroma_format_idc;
m_PicParamSet.bit_depth_luma = m_SeqParamSet.bit_depth_luma;
#if defined ALPHA_BLENDING_H264
}else {
m_cpb.switchToAuxiliary();
m_dpb.switchToAuxiliary();
m_PicParamSet.chroma_format_idc = 0;
m_PicParamSet.bit_depth_luma = m_SeqParamSet.bit_depth_aux;
}
#endif // ALPHA_BLENDING_H264
if( !(m_Analyse & ANALYSE_RECODE_FRAME) ) m_pReconstructFrame = m_pCurrentFrame;
// reset bitstream object before begin compression
Ipp32s i;
for(i = 0; i < m_info.num_slices*((m_info.coding_type == 1) + 1); i++) { //TODO fix for PicAFF/AFRM
m_pbitstreams[i]->Reset();
}
#if defined ALPHA_BLENDING_H264
if(!alpha)
#endif // ALPHA_BLENDING_H264
{
SetSliceHeaderCommon(m_pCurrentFrame); // Set up the values in the slice header
if( default_slice_type == BPREDSLICE ){
if( m_Analyse & ANALYSE_ME_AUTO_DIRECT){
if( m_SliceHeader.direct_spatial_mv_pred_flag )
m_SliceHeader.direct_spatial_mv_pred_flag = m_DirectTypeStat[0] > ((545*m_DirectTypeStat[1])>>9) ? 0:1;
else
m_SliceHeader.direct_spatial_mv_pred_flag = m_DirectTypeStat[1] > ((545*m_DirectTypeStat[0])>>9) ? 1:0;
m_DirectTypeStat[0]=m_DirectTypeStat[1]=0;
}else{
m_SliceHeader.direct_spatial_mv_pred_flag = m_info.direct_pred_mode & 0x1;
}
}
status = encodeFrameHeader(m_bs1, dst, (ePic_Class == IDR_PIC), startPicture); // Write frame header code into bitstream buffer.
if (status != UMC_OK) goto done;
}
status = Start_Picture(&ePic_Class, ePictureType);
if (status != UMC_OK) goto done;
Ipp32s threadId = 0;
Ipp32s slice_qp_delta_default = m_Slices[0].m_slice_qp_delta;
UpdateRefPicListCommon();
#if defined _OPENMP
vm_thread_priority mainTreadPriority = vm_get_current_thread_priority();
#pragma omp parallel for private(slice)
#endif // _OPENMP
for (slice = (Ipp32s)m_info.num_slices*m_field_index; slice < m_info.num_slices*(m_field_index+1); slice++){
#if defined _OPENMP
vm_set_current_thread_priority(mainTreadPriority);
#endif // _OPENMP
m_Slices[slice].m_slice_qp_delta = (Ipp8s)slice_qp_delta_default;
m_Slices[slice].m_slice_number = slice;
m_Slices[slice].m_slice_type = default_slice_type; // Pass to core encoder
UpdateRefPicList(m_Slices + slice, m_pCurrentFrame->GetRefPicLists(slice), m_SliceHeader, &m_ReorderInfoL0, &m_ReorderInfoL1);
/* if (m_SliceHeader.MbaffFrameFlag){
//UpdateRefPicList(&m_pRefPicList[slice], m_SliceHeader, &m_ReorderInfoL0, &m_ReorderInfoL1);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -