⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 umc_h264_dec.h

📁 这是在PCA下的基于IPP库示例代码例子,在网上下了IPP的库之后,设置相关参数就可以编译该代码.
💻 H
📖 第 1 页 / 共 5 页
字号:
        struct SliceData {            Ipp32s        DistScaleFactor[MAX_NUM_REF_FRAMES];            Ipp32s        DistScaleFactorMV[MAX_NUM_REF_FRAMES];            Ipp32s        DistScaleFactorAFF[2][2][2][MAX_NUM_REF_FRAMES]; // [curmb field],[ref1field],[ref0field]            Ipp32s        DistScaleFactorMVAFF[2][2][2][MAX_NUM_REF_FRAMES]; // [curmb field],[ref1field],[ref0field]//            Ipp32u        num_ref_idx_l0_active;            // num list 0 ref pics used to decode the slice//            Ipp32u        num_ref_idx_l1_active;            // num list 0 ref pics used to decode the slice//            Ipp8u        pic_parameter_set_id;            // of pic param set used for this slice//            Ipp8u        disable_deblocking_filter_idc;        // deblock filter control, 0=filter all edges//            Ipp8s        slice_alpha_c0_offset;                // deblock filter c0, alpha table offset//            Ipp8s        slice_beta_offset;                    // deblock filter beta table offset//           Ipp8u        luma_log2_weight_denom;            // luma weighting denominator//            Ipp8u        chroma_log2_weight_denom;        // chroma weighting denominator//            Ipp8s        chroma_qp_index_offset;            // offset to add to QP for chroma//            EnumSliceCodType slice_type;        };    // SliceData        Ipp8u                         *m_pParsedData;        Ipp8u                         *m_pParsedDataNew;            // This points to a huge, monolithic buffer that contains data            // derived from parsing the current frame.            // Logically this information belongs in the H264DecoderFrame class.            // However, algorithmically, we only need to keep around this            // information for the most recent reference frame.            // Thus, as a space saving optimization, we            // allocate this information in the Decoder class rather than            // in the H264DecoderFrame class.        Ipp32s                          m_parsedDataLength;        sDimensions                     m_paddedParsedDataSize;        H264DecoderLocalMacroblockDescriptor m_mbinfo; //Local MB Data        H264DecoderMBAddr *next_mb_tables[3];//0 linear scan 1 mbaff linear scan 2 - bitstream defined scan        Ipp32s                          m_CurMBAddr;        Ipp32s                          m_PairMBAddr;        Ipp32s                          m_CurMB_X,m_CurMB_Y;        Ipp32s                          mb_width,mb_height;        Ipp32s                          m_CurMB_QP;#ifdef USE_SEI        Ipp8u                           m_FrameProcessingStage;#endif        H264DecoderCurrentMacroblockDescriptor m_cur_mb;        bool                            m_bHasSEI;        Ipp8u                           m_SEITargetSPS;    // forward declaration of internal types    typedef void (H264VideoDecoder::*DeblockingFunction)(Ipp32u nMBAddr);protected:        H264SeqParamSet   m_SeqParamSet[MAX_NUM_SEQ_PARAM_SETS];// Sequence parameter sets read from the bitstream.        H264SEIPayLoad    m_SEIPayLoads[MAX_NUM_SEQ_PARAM_SETS];        H264PicParamSet   m_PicParamSet[MAX_NUM_PIC_PARAM_SETS];// Picture parameter sets read from the bitstream.        struct H264DecoderStaticCoeffs        {            Ipp16s m_CoeffsBuffer[16 * 27 + DEFAULT_ALIGN_VALUE]; // (Ipp16s []) array of blocks to decode            Ipp16s *Coeffs()            {                return align_pointer<Ipp16s *> (m_CoeffsBuffer, DEFAULT_ALIGN_VALUE);            }        } m_pCoeffBlocksBufStatic;        struct H264DecoderTemporalPixels        {            Ipp8u m_PixelsBuffer[24 * 48 + DEFAULT_ALIGN_VALUE]; // (Ipp16s []) array of blocks to decode            Ipp8u *Pixels()            {                return align_pointer<Ipp8u *> (m_PixelsBuffer+48*4+16, DEFAULT_ALIGN_VALUE);            }        } m_pTemporalPixels;        Ipp16s               *m_pCoeffBlocksWrite;        Ipp16s               *m_pCoeffBlocksRead;        H264SliceHeader             m_CurSliceHeader;        AdaptiveMarkingInfo         m_AdaptiveMarkingInfo;        // At least one sequence parameter set and one picture parameter        // set must have been read before a picture can be decoded.        bool           m_bSeqParamSetRead;        bool           m_bPicParamSetRead;        //bool           sp_for_switch_flag;        bool           m_NALRefIDC[2];        // Keep track of which parameter set is in use.        Ipp8s          m_CurrentSeqParamSet;        Ipp8s          m_CurrentPicParamSet;        bool           m_bDPBSizeChanged;        bool           m_bSeqParamSetChanged;        bool           m_WaitForDR;        Ipp8s          GetReferenceField(Ipp8s *pFields,Ipp8s RefIndex)        {            if (RefIndex<0)            {                return -1;            }            else            {                VM_ASSERT(pFields[RefIndex]>=0);                return pFields[RefIndex];            }        }        Ipp64f         m_initialTR;        Ipp32s         m_dpbSize;        // This is the value at which the TR Wraps around for this        // particular sequence.        Ipp32s         m_MaxLongTermFrameIdx;        // Forward and backward scaling ratios used in B frame direct mode        // motion vector scaling.        // Indicates whether we are delaying the display of frames        // due to the fact that there might be B frames in the video        // sequence.  Initially, latency is disabled.  However, if a        // B frame has been seen or if the appropriate user interface        // is used, then latency is enabled.        // We allocate this object only once (in our constructor),        // and use it to parse all of our bitstreams.        bool           m_bNeedToCheckMBSliceEdges;        H264Bitstream  *m_pBitStream;        // Declare space for our YUV reference frames and B frames.        // We keep a doubly-linked list of reference frames (i.e.,        // non B-frames).  Currently the list contains at most two decoded        // reference frames.  An additional single buffer is maintained for        // decoding B frames, and this buffer is not linked into the reference        // frame list (although its previous() and future() pointers will        // point into the reference frame list).            // At Start_Sequence, all (2) of our reference frame buffers are            // available, and are on this list.        H264DecoderFrameList  m_H264DecoderFramesList;            // This is the buffer that we decode B frames into.        H264DecoderFrame     *m_pCurrentFrame;            // This points to either m_BFrame, or one of the frames in our            // reference frame list.  It is the frame that we are currently            // parsing in Decode().  It's previous() and future() methods            // can be used to access the frames it is predicted from.            // After a successful call to Decode(), this remains a valid            // pointer, so that the application can use custom messages to            // extract bitstream information for the just-decoded picture.        H264DecoderFrame     *m_pDisplayFrame;            // This points to either m_BFrame, or one of the frames in our            // reference frame list.  It is the frame that we are going to            // return from our Decode() invocation (or that we have returned            // from our most recent Decode() invocation, if we're outside            // the context of Decode()).  Due to B-frame latency, this may            // not be the frame we are actually decoding.  Due to post            // processing, the YUV data that we eventually display might            // actually reside in one of our post processing buffers.#define NUM_COEF_POS_ELEMENTS (27*16*2)            // 1 DC, 16 luma, 8 chroma, 2 chroma DC 4x4 for 1 macroblock#define COEF_POS_EOB 0x20            // end of block flag#define COEF_POS_16BIT_COEF 0x10    // next coefficient is 16 bits flag        Ipp32u                *m_pMBIntraTypes;        // Buffers (within m_pParsedFrameData) used to store list 0 and        // list 1 reference index for each 4x4 subblock of a frame.            // The differential coding of intra-coding types uses the intra            // type of the above and left subblocks as predictors.#define NUM_INTRA_TYPE_ELEMENTS 16#define NUM_MVFLAG_ELEMENTS 16            // Buffer (within m_pParsedData) used to store the number of            // non-zero coefficients in a 4x4 block. Contains only the            // data for the bottom blocks of one row of macroblocks,            // 4 luma, 4 chroma.            // Buffer used to store the number of non-zero coefficients in            // a 4x4 block, containing the data for the right column of            // 8 blocks of one macroblock (4 luma, 4 chroma).        SliceData                 m_CurSliceInfo;            // Array of SliceData structs used to store slice-level info about            // each slice of the picture being decoded. Indexes are assigned            // as new slices are encountered in the picture and are stored in            // MBInfo for each MB to map MBs to slices. Allocated for worst            // case number of slices (number of MBs).        Ipp32s                     m_prev_dquant;        Ipp8u                      m_field_index;        Ipp8u                      m_broken_buffer;        Ipp32s                     m_broken_buffer_start_mb;        Ipp32s                     m_broken_buffer_start_slice;        Ipp32s                     m_NumFramesInL0List;        Ipp32s                     m_NumFramesInL1List;        Ipp32s                     m_NumFramesInLTList;        Ipp8u                      m_NumShortEntriesInList;        Ipp8u                      m_NumLongEntriesInList;        Ipp8u                      m_FrameNumGapPresented;        bool                       m_SkipThisPic;        bool                       m_PreviousPicSkipped;        PredWeightTable             m_pPredWeight_L0[MAX_NUM_REF_FRAMES];        PredWeightTable             m_pPredWeight_L1[MAX_NUM_REF_FRAMES];            // Array of PredWeightTable structs used to store prediction weights            // for list 0 and list 1 references. Array is 2-dimensional:            // [slice number][ref index], there are structs for MAX_NUM_REF_FRAMES            // for each slice.        Ipp8u                         *m_pMBMap;private:        /////////////////////        //                 //        // Private methods //        //                 //        /////////////////////        Status     AllocateParsedData( const sDimensions&, bool exactSizeRequested);            // Reallocate m_pParsedData, if necessary, and initialize all the            // various pointers that point into it.  This gets called            // during Start_Sequence, and at each Decode invocation, based            // on the incoming image dimensions.            // If exactSizeRequested is false, then any existing            // allocated buffer, even if way too big, will be reused without            // being deallocated.  Othwerwise, any existing allocated buffer            // will be first deallocated if is not exactly the size needed.        void        DeallocateParsedData();        // Decode frame        Status DecodeFrame(MediaData *dst, MediaData* in);        // Prepare buffers & convert frame to output buffer        Status OutputFrame(MediaData *dst, Ipp32u nStage);#ifdef USE_SEI        Status     OutputHalfFrame(H264DecoderFrame *pDisplayFrame,MediaData *dst,Ipp8u WhichField);        Status     OutputReverseFrame(H264DecoderFrame *pDisplayFrame,MediaData *dst);#endif // USE_SEI        // Convert frame to output buffer        Status ConvertFrame(MediaData *dst);        // Get oldest frame to display        H264DecoderFrame *GetFrameToDisplay(Ipp32u nStage);            // outputAFrame is a worker method used to perform            // output color conversion to the application's destination            // buffer.  pDisplayFrame must be a non-NULL pointer to the            // image to be displayed.        Status     PrepareDecodeBuffers(RefPicListReorderInfo *,RefPicListReorderInfo *);        Status     ProcessTailNALUnits();            // prepareDecodeBuffers is called at the beginning of Decode(),            // to set up m_pCurrentFrame, m_pDisplayFrame and m_pMBInfo for            // the decoding of the incoming frame. If we have to display a frame prior to            // decoding, the display is handled here and m_pDisplayframe            // will be left NULL.        Status      DecRecSegment_CAVLC(Ipp32u unumMBs);        Status      DecRecSegment_HP_CAVLC(Ipp32u unumMBs);        Status      DecRecSegment_CABAC(Ipp32u unumMBs);        Status      DecRecSegment_HP_CABAC(Ipp32u unumMBs);        Status      DecRecSegment_CAVLC_FLD(Ipp32u unumMBs);        Status      DecRecSegment_HP_CAVLC_FLD(Ipp32u unumMBs);        Status      DecRecSegment_CABAC_FLD(Ipp32u unumMBs);        Status      DecRecSegment_HP_CABAC_FLD(Ipp32u unumMBs);        // Forward declaration of internal type(s)        struct DeblockingParameters;        struct DeblockingParametersMBAFF;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -