⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 skl_mpg4i.h

📁 mpeg4编解码器
💻 H
📖 第 1 页 / 共 3 页
字号:
inline void SKL_MB::Set_Field_DCT(int fDCT){  Field_DCT = fDCT;  if (fDCT>0) {    Y2 = Y1 + BpS;    YBpS = 2*BpS;  }  else {     Y2 = Y1 + BpS8;    YBpS = BpS;  }}inline void SKL_MB::Set_No_Field(){  if (Field_DCT>0) Field_DCT = 0;  Field_Pred = 0;}inlinevoid SKL_MB::Copy_16To8(SKL_INT16 In[6*64]) const{  VOL->Quant_Ops.IDct_Put(In+0*64, Y1  ,YBpS);  VOL->Quant_Ops.IDct_Put(In+1*64, Y1+8,YBpS);  VOL->Quant_Ops.IDct_Put(In+2*64, Y2  ,YBpS);  VOL->Quant_Ops.IDct_Put(In+3*64, Y2+8,YBpS);  VOL->Quant_Ops.IDct_Put(In+4*64, U,    BpS);  VOL->Quant_Ops.IDct_Put(In+5*64, V,    BpS);}inlinevoid SKL_MB::Add_16To8(SKL_INT16 In[6*64]) const{  if (Cbp&0x20) VOL->Quant_Ops.IDct_Add(In+0*64, Y1  ,YBpS);  if (Cbp&0x10) VOL->Quant_Ops.IDct_Add(In+1*64, Y1+8,YBpS);  if (Cbp&0x08) VOL->Quant_Ops.IDct_Add(In+2*64, Y2  ,YBpS);  if (Cbp&0x04) VOL->Quant_Ops.IDct_Add(In+3*64, Y2+8,YBpS);  if (Cbp&0x02) VOL->Quant_Ops.IDct_Add(In+4*64, U,    BpS);  if (Cbp&0x01) VOL->Quant_Ops.IDct_Add(In+5*64, V,    BpS);}//////////////////////////////////////////////////////////// SKL_MB : MV prediction//////////////////////////////////////////////////////////inline void SKL_MB::Store_Zero_MV() const {  SKL_ZERO_MV(MVs[0]);  SKL_ZERO_MV(MVs[1]);  SKL_ZERO_MV(MVs[MV_Stride+0]);  SKL_ZERO_MV(MVs[MV_Stride+1]);}inline void SKL_MB::Store_16x16_MV() const {  SKL_COPY_MV(MVs[1]          , MVs[0]);  SKL_COPY_MV(MVs[MV_Stride+0], MVs[0]);  SKL_COPY_MV(MVs[MV_Stride+1], MVs[0]);}  // all Section 7.7.2 boils down to averaging field MVs after   // they've been used to form block prediction.  // However, vertical component for field is always even,  // and we internally don't store it multiplied by 2  // => we don't need the DIV2RND for y-comp#define DIV2RND(x) ( ((x)>>1) | ((x)&1) )inline void SKL_MB::Store_16x8_MV(SKL_MV *Dst, SKL_MV MV[2]) const{  const int Tmp = MV[0][0] + MV[1][0];  Dst[0][0] = DIV2RND(Tmp);  Dst[0][1] = MV[0][1] + MV[1][1];  // no DIV2RND here  SKL_COPY_MV(Dst[1], Dst[0]);}#undef DIV2RND//////////////////////////////////////////////////////////// Quarter-pixel interpolation////////////////////////////////////////////////////////////// case 0 (pt a):       copy// case 2 (pt b):                h-pass// case 1/3 (pts e/f):           h-pass + h-avrg// case 8 (pt c):                                  v-pass// case 10 (pt d):               h-pass          + v-pass// case 9/11 (pts k/l):          h-pass + h-avrg + v-pass// case 4/12 (pts g/m):                            v-pass + v-avrg// case 6/14 (pts i/o):          h-pass          + v-pass + v-avrg// case 5/13/7/15 (pts h/n/j/p): h-pass + h-avrg + v-pass + v-avrg////////////////////////////////////////////////////////////inlinevoid SKL_MB::Predict_16x16_QP(SKL_BYTE * const Dst, const SKL_BYTE *Src,                              const SKL_MV MV,                              const SKL_MB_FUNCS * const Ops) const{  const int Quads = (MV[0]&3) | ((MV[1]&3)<<2);  Src += (MV[1]>>2)*BpS + (MV[0]>>2);  switch(Quads) {    default:    case 0:  Ops->HP_16x8[0](Dst, Src, BpS);             Ops->HP_16x8[0](Dst+BpS8, Src+BpS8, BpS); break;    case 1:  Ops->H_Pass_Avrg(Dst, Src, 16, BpS);      break;    case 2:  Ops->H_Pass(Dst, Src, 16, BpS);           break;    case 3:  Ops->H_Pass_Avrg_Up(Dst, Src, 16, BpS);   break;    case 4:  Ops->V_Pass_Avrg(Dst, Src, 16, BpS);      break;    case 5:  Ops->H_LowPass_Avrg(YTmp, Src, 17, BpS);             Ops->V_Pass_Avrg(Dst, YTmp, 16, BpS);     break;    case 6:  Ops->H_LowPass(YTmp, Src,   17, BpS);             Ops->V_Pass_Avrg(Dst, YTmp, 16, BpS);     break;    case 7:  Ops->H_LowPass_Avrg_Up(YTmp, Src, 17, BpS);             Ops->V_Pass_Avrg(Dst, YTmp, 16, BpS);     break;    case 8:  Ops->V_Pass(Dst, Src, 16, BpS);           break;    case 9:  Ops->H_LowPass_Avrg(YTmp, Src, 17, BpS);             Ops->V_Pass(Dst, YTmp, 16, BpS);          break;    case 10: Ops->H_LowPass(YTmp, Src, 17, BpS);             Ops->V_Pass(Dst, YTmp, 16, BpS);          break;    case 11: Ops->H_LowPass_Avrg_Up(YTmp, Src, 17, BpS);             Ops->V_Pass(Dst, YTmp, 16, BpS);          break;    case 12: Ops->V_Pass_Avrg_Up(Dst, Src, 16, BpS);   break;    case 13: Ops->H_LowPass_Avrg(YTmp, Src, 17, BpS);             Ops->V_Pass_Avrg_Up(Dst, YTmp, 16, BpS);  break;    case 14: Ops->H_LowPass(YTmp, Src, 17, BpS);             Ops->V_Pass_Avrg_Up( Dst, YTmp, 16, BpS); break;    case 15: Ops->H_LowPass_Avrg_Up(YTmp, Src, 17, BpS);             Ops->V_Pass_Avrg_Up(Dst, YTmp, 16, BpS);  break;  }}inlinevoid SKL_MB::Predict_16x8_Field_QP(SKL_BYTE * const Dst, const SKL_BYTE *Src,                                   const SKL_MV MV,                                   const SKL_MB_FUNCS * const Ops) const{  const int Quads = (MV[0]&3) | ((MV[1]&3)<<2);  Src += ((MV[1]>>1)&~1)*BpS + (MV[0]>>2);  switch(Quads) {    default:    case 0: Ops->HP_16x8[0](Dst, Src, 2*BpS);               break;    case 1: Ops->H_Pass_Avrg(Dst, Src, 8, 2*BpS);           break;    case 2: Ops->H_Pass(Dst, Src, 8, 2*BpS);                break;    case 3: Ops->H_Pass_Avrg_Up(Dst, Src, 8, 2*BpS);        break;    case 4: Ops->V_Pass_Avrg_8(Dst, Src, 16, 2*BpS);        break;    case 5: Ops->H_Pass_Avrg(YTmp, Src, 9, 2*BpS);            Ops->V_Pass_Avrg_8(Dst, YTmp, 16, 2*BpS);       break;    case 6: Ops->H_Pass(YTmp, Src,   9, 2*BpS);            Ops->V_Pass_Avrg_8(Dst, YTmp, 16, 2*BpS);       break;    case 7: Ops->H_Pass_Avrg_Up(YTmp, Src, 9, 2*BpS);            Ops->V_Pass_Avrg_8(Dst, YTmp, 16, 2*BpS);       break;    case 8: Ops->V_Pass_8(Dst, Src, 16, 2*BpS);             break;    case 9: Ops->H_Pass_Avrg(YTmp, Src, 9, 2*BpS);            Ops->V_Pass_8(Dst, YTmp, 16, 2*BpS);            break;    case 10: Ops->H_Pass(YTmp, Src, 9, 2*BpS);             Ops->V_Pass_8(Dst, YTmp, 16, 2*BpS);           break;    case 11: Ops->H_Pass_Avrg_Up(YTmp, Src, 9, 2*BpS);             Ops->V_Pass_8(Dst, YTmp, 16, 2*BpS);           break;    case 12: Ops->V_Pass_Avrg_Up_8(Dst, Src, 16, 2*BpS);    break;    case 13: Ops->H_Pass_Avrg(YTmp, Src, 9, 2*BpS);             Ops->V_Pass_Avrg_Up_8(Dst, YTmp, 16, 2*BpS);   break;    case 14: Ops->H_Pass(YTmp, Src, 9, 2*BpS);             Ops->V_Pass_Avrg_Up_8( Dst, YTmp, 16, 2*BpS);  break;    case 15: Ops->H_Pass_Avrg_Up(YTmp, Src, 9, 2*BpS);             Ops->V_Pass_Avrg_Up_8(Dst, YTmp, 16, 2*BpS);   break;  }}inlinevoid SKL_MB::Predict_8x8_QP(SKL_BYTE * const Dst, const SKL_BYTE *Src,                            const SKL_MV MV,                            const SKL_MB_FUNCS * const Ops) const{  const int Quads = (MV[0]&3) | ((MV[1]&3)<<2);  Src += (MV[1]>>2)*BpS + (MV[0]>>2);  switch(Quads) {    default:    case 0:  Ops->HP_8x8[0](Dst, Src, BpS);             break;    case 1:  Ops->H_Pass_Avrg_8(Dst, Src, 8, BpS);      break;    case 2:  Ops->H_Pass_8(Dst, Src, 8, BpS);           break;    case 3:  Ops->H_Pass_Avrg_Up_8(Dst, Src, 8, BpS);   break;    case 4:  Ops->V_Pass_Avrg_8(Dst, Src, 8, BpS);      break;    case 5:  Ops->H_LowPass_Avrg_8(YTmp, Src, 9, BpS);                Ops->V_Pass_Avrg_8(Dst, YTmp, 8, BpS);     break;    case 6:  Ops->H_LowPass_8(YTmp, Src, 9, BpS);             Ops->V_Pass_Avrg_8(Dst, YTmp, 8, BpS);     break;    case 7:  Ops->H_LowPass_Avrg_Up_8(YTmp, Src, 9, BpS);             Ops->V_Pass_Avrg_8(Dst, YTmp, 8, BpS);     break;    case 8:  Ops->V_Pass_8(Dst, Src, 8, BpS);           break;    case 9:  Ops->H_LowPass_Avrg_8(YTmp, Src, 9, BpS);             Ops->V_Pass_8(Dst, YTmp, 8, BpS);          break;    case 10: Ops->H_LowPass_8(YTmp, Src, 9, BpS);             Ops->V_Pass_8(Dst, YTmp, 8, BpS);          break;    case 11: Ops->H_LowPass_Avrg_Up_8(YTmp, Src, 9, BpS);             Ops->V_Pass_8(Dst, YTmp, 8, BpS);          break;    case 12: Ops->V_Pass_Avrg_Up_8(Dst, Src, 8, BpS);   break;    case 13: Ops->H_LowPass_Avrg_8(YTmp, Src, 9, BpS);             Ops->V_Pass_Avrg_Up_8(Dst, YTmp, 8, BpS);  break;    case 14: Ops->H_LowPass_8(YTmp, Src, 9, BpS);             Ops->V_Pass_Avrg_Up_8( Dst, YTmp, 8, BpS); break;    case 15: Ops->H_LowPass_Avrg_Up_8(YTmp, Src, 9, BpS);             Ops->V_Pass_Avrg_Up_8(Dst, YTmp, 8, BpS);  break;  }}////////////////////////////////////////////////////////////// encoder specific. ////////////////////////////////////////////////////////////typedef void (*SKL_DECIMATE_INTRA_FUNC)(const SKL_QUANT_DSP *This,                                        SKL_INT16 *Out, SKL_INT16 *In,                                        const SKL_QUANTIZER M,                                        SKL_INT32 Scale, SKL_INT32 DC_Scale);typedef int (*SKL_DECIMATE_INTER_FUNC)(const SKL_QUANT_DSP *This,                                       SKL_INT16 *Out, SKL_INT16 *In,                                       const SKL_QUANTIZER M,                                       SKL_INT32 Scale,                                       int PSNR_Limit);  /** @internal      it's a cursor (only 1 instance)     */struct SKL_MB_ENC : public SKL_MB   {  protected:    static const int Map_To_Type[SKL_MAP_LAST];    void Substract_Prediction();    void Substract_Field_Prediction();    int Select_DC_AC_Pred(SKL_INT16 In[6*64], int Pred_Dirs[6]);    int Texture_Bits;    int MV_Bits;  public:    SKL_MB_ENC(const SKL_MP4_I * const VOL)       : SKL_MB(VOL)      , Texture_Bits(0)      , MV_Bits(0)    {}    int B_Type;    int dQuant;    SKL_MV dMVs[4];     /**< de-predicted MVs */    int Last[6];        /**< last non-zero DCT coeff for INTER mb only */    void Set_Type() {      const int Map_Type = Map[Pos].Type;      MB_Type = Map_To_Type[ Map_Type ];      MC_Sel  = (Map_Type==SKL_MAP_GMC);      if (MB_Type!=SKL_MB_SKIPPED) Set_Final_Params();    }    void Set_Final_Params();    void Decimate_Intra(SKL_INT16 Out[12*64]) const;    void Decimate_Inter(SKL_INT16 Out[12*64]);    void Decimate_Inter_GMC(SKL_INT16 Out[12*64]);    void Decimate_Reduced_Intra(SKL_INT16 Out[12*64]) const;    void Decimate_Reduced_Inter(SKL_INT16 Out[12*64]);    void Encode_Intra(SKL_FBB * const Bits, SKL_INT16 In[12*64], int Is_I_VOP);    void Encode_Inter(SKL_FBB * const Bits, SKL_INT16 In[12*64], int Fwd_Code);    void Encode_Inter_B(SKL_FBB * const Bits, SKL_INT16 In[12*64],                        int Fwd_Code, int Bwd_Code);    void Copy_8To16(SKL_INT16 Out[6*64]) const;    void Diff_8To16(SKL_INT16 Out[6*64]) const;    void Copy_8To16_Downsampled(SKL_INT16 Out[6*64]) const;    void Diff_8To16_Downsampled(SKL_INT16 Out[6*64]) const;    int Get_Texture_Bits() const { return Texture_Bits; }    int Get_MV_Bits()      const { return MV_Bits; }};  // Warning: Slicing problem. Inherit from SKL_MP4_I first  // so that cast to SKL_MP4_I* is ok.class SKL_MP4_ENC_I : public SKL_MP4_I, private SKL_MP4_ENC{  private:      // bits I/O buffer    size_t          _Buf_Size;    SKL_BYTE       *_Buf;    size_t          _Buf_Len;    void Check_Buf_Size(size_t Needed_Size);      void Clear_Buf();    SKL_MP4_PIC    *_In_Pic;  private:    int _Need_VOL_Header;    int _Key_VOL_Headers;    int _Emit_SEQ_Codes;    int _Inter_Coding_Threshold;    int _Use_Trellis;    SKL_UINT32 Evaluate_Cost(const SKL_INT16 *C, const int * const Zigzag, int Max, int Lambda) const;    int Trellis_Quantize(SKL_INT16 * const Out, const int Q, const int * const Zigzag, int Non_Zero) const;    SKL_MP4_ANALYZER *_Analyzer;    SKL_MP4_ANALYZER *_Dflt_Analyzer; // use default impl.    void Write_I_VOP(SKL_FBB * const Bits);    void Write_P_VOP(SKL_FBB * const Bits);    void Write_B_VOP(SKL_FBB * const Bits);    void Write_S_VOP(SKL_FBB * const Bits);    void Write_Reduced_I_VOP(SKL_FBB * const Bits);    void Write_Reduced_P_VOP(SKL_FBB * const Bits);    void Write_VOL_Header(SKL_FBB * const Bits) const;    void Write_VOP_Header(SKL_FBB * const Bits, const SKL_MP4_FRAME *VOP) const;      void Code_Frame(SKL_BYTE * const Buf, int Max_Len, const SKL_MP4_FRAME * const Frame);    void Alloc_Aux();    void Clear_Aux();      /* Input analysis and frame coding preparation */    void Setup_VOL_Params();   // init VOL-dependant param: qpel, ...    void Setup_Frame_Params(SKL_MP4_FRAME *Frame); // init frame-specific param: Quant, FCode...  public:    SKL_MP4_ENC_I(SKL_MEM_I *Mem);    ~SKL_MP4_ENC_I();    void Set_Trellis_Usage(const int Just_Do_It);    ;    int Decimate_Inter(SKL_INT16 * const Out, SKL_INT16 * const In, const SKL_INT32 Q) const;    void Decimate_Intra(SKL_INT16 * const Out, SKL_INT16 * const In, SKL_INT32 Q, SKL_INT32 DC_Q) const;    SKL_MP4_PIC *Prepare_For_Input(int Width, int Height);    int Encode(SKL_BYTE *Buf, int Max_Len);  public:      // Top class' implementation.    virtual const SKL_MP4_PIC *Prepare_Next_Frame(int Width, int Height);    virtual const SKL_MP4_PIC *Get_Next_Frame() const;    virtual const SKL_MP4_PIC *Get_Last_Coded_Frame() const;    virtual int Encode();    virtual int Finish_Encoding();    virtual const SKL_BYTE *Get_Bits() const;    virtual int Get_Bits_Length() const;    virtual SKL_MEM_I *Set_Memory_Manager(SKL_MEM_I *Mem=0);    virtual void Set_CPU(SKL_CPU_FEATURE Cpu = SKL_CPU_DETECT);    virtual void Set_Custom_Matrix(int Intra, const SKL_BYTE *M=0);    virtual SKL_MP4_ANALYZER *Set_Analyzer(SKL_MP4_ANALYZER *Analyzer=0);    virtual SKL_MP4_ANALYZER *Get_Analyzer() const;    virtual void Set_Slicer(SKL_MP4_SLICER Slicer, SKL_ANY Slicer_Data=0);    virtual void Get_All_Frames(SKL_MP4_PIC *Pic) const;    virtual void Set_Debug_Level(int Level=0);    virtual int Ioctl(SKL_CST_STRING Param);};//////////////////////////////////////////////////////////#endif  /* _SKL_MPG4I_H_ */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -