⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vdecoder.cc

📁 PIXIL is a small footprint operating environment, complete with PDA PIM applications, a browser and
💻 CC
📖 第 1 页 / 共 5 页
字号:
      d_last = new DecodedImageData; d_last->m_image.Create(specs);      d_next = new DecodedImageData; d_next->m_image.Create(specs);      d_curr = new DecodedImageData; d_curr->m_image.Create(specs);      d_last->m_width  = d_next->m_width  = d_curr->m_width  = specs.width;      d_last->m_height = d_next->m_height = d_curr->m_height = specs.height;      bytesperline_lum = d_last->m_image.AskBitmap(Image<Pixel>::Bitmap_Y ).AskInternalWidth();      bytesperline_chr = d_last->m_image.AskBitmap(Image<Pixel>::Bitmap_Cr).AskInternalWidth();    }}inline void VideoDecoder::SetSPOffsets(TempRef sp_tempref,int n,  // predictor of which direction				       TempRef predref,bool field,bool topfield) // physical location{  Pixel **spy,**spcr,**spcb;  if (sp_tempref==LAST) { spy=&sp_last_y[n]; spcr=&sp_last_cr[n]; spcb=&sp_last_cb[n]; }  else                  { spy=&sp_next_y[n]; spcr=&sp_next_cr[n]; spcb=&sp_next_cb[n]; }  DecodedImageData* buf;  if (predref==LAST) { buf=d_last; } else { buf=d_next; }  if (field)    {      *spy  = buf->m_image.AskBitmap(Image<Pixel>::Bitmap_Y ).AskField(topfield)[0];      *spcr = buf->m_image.AskBitmap(Image<Pixel>::Bitmap_Cr).AskField(topfield)[0];      *spcb = buf->m_image.AskBitmap(Image<Pixel>::Bitmap_Cb).AskField(topfield)[0];    }  else    {      *spy  = buf->m_image.AskFrameY()[0];      *spcr = buf->m_image.AskFrameU()[0];      *spcb = buf->m_image.AskFrameV()[0];    }}#include <sys/time.h>#include <unistd.h>void VideoDecoder::PostPictureHeader(const SystemTimingInformation& timing){  if (0)  {    struct timeval tv;    static struct timeval last_tv;    gettimeofday(&tv,NULL);    static int cnt=0;    long sec_diff = tv.tv_sec - last_tv.tv_sec;    long udiff = sec_diff * 1000000;    udiff += tv.tv_usec-last_tv.tv_usec;    if (cnt>0)      cout << cnt << " " << udiff << "\n";    cnt++;    last_tv=tv;  }  // Make some consistency checks to see if input stream is MPEG-2 compliant.  if (d_IsMPEG2)    {      if (1) //options.WarnOnFalseMPEG1Fields)        {          if (d_pichdr.m_fullpel_fw != 0)            {              //Error(ErrSev_Warning,	      //"Error in input stream (ignored): fullpel-forward motionvector requested.\n");            }          if (d_pichdr.m_fullpel_bw != 0)            {              //Error(ErrSev_Warning,	      //"Error in input stream (ignored): fullpel-backward motionvector requested.\n");            }          if (d_pichdr.m_fcode[0][0] != 7)            {              //Error(ErrSev_Warning,	      //"Error in input stream (ignored): forward fcode is not '7'\n");            }          if (d_pichdr.m_fcode[1][0] != 7)            {              //Error(ErrSev_Warning,	      //"Error in input stream (ignored): backward fcode is not '7'\n");            }        }    }  d_dc_pred = (1<<(d_pichdr.m_intra_dc_precision-1));#ifndef NDEBUG  if (d_options.Trace_PicH)    {      if (timing.HasPTS) cout << "Picture-Header-PTS: " << timing.pts << endl;    }#endif  // Order quantization matrix coefficients  if (d_pichdr.m_alternate_scan)    {      for (int i=0;i<64;i++)	{	  d_quant_bs.m_LumIntra[i] = d_quant_zz.m_LumIntra[AlternateFromZigZag[i]];	  d_quant_bs.m_LumInter[i] = d_quant_zz.m_LumInter[AlternateFromZigZag[i]];	  d_quant_bs.m_ChrIntra[i] = d_quant_zz.m_ChrIntra[AlternateFromZigZag[i]];	  d_quant_bs.m_ChrInter[i] = d_quant_zz.m_ChrInter[AlternateFromZigZag[i]];	}    }  else    {      memcpy(d_quant_bs.m_LumIntra,d_quant_zz.m_LumIntra,64*sizeof(int));      memcpy(d_quant_bs.m_LumInter,d_quant_zz.m_LumInter,64*sizeof(int));      memcpy(d_quant_bs.m_ChrIntra,d_quant_zz.m_ChrIntra,64*sizeof(int));      memcpy(d_quant_bs.m_ChrInter,d_quant_zz.m_ChrInter,64*sizeof(int));    }  d_scalability_mode = None;  if (d_pichdr.m_picture_structure == PICSTRUCT_BottomField)    d_field_offset = 1;  else    d_field_offset = 0;  // ------------------- Now the Picture-Header is completely decoded. ---------------------  // ------------------- Semantic actions follow. ------------------------------------------  Assert(d_sink);  const bool IsANewFrame = (d_pichdr.m_picture_structure == PICSTRUCT_FramePicture || d_FirstFieldInFrame);  /* Do not display images of disabled types.   */  if (IsANewFrame)    {      d_skip_this_frame =	(!d_options.DecodeB && d_pichdr.m_picture_coding_type == PICTYPE_B) ||	(!d_options.DecodeP && d_pichdr.m_picture_coding_type != PICTYPE_I);	      // /* ShallISkipThis() */ false;    }  if (d_skip_this_frame)    {      d_FirstFieldInFrame = !d_FirstFieldInFrame;      return;    }      /* If a new I- or P-image starts, show the last decoded I- or P- frame and     move to d_last-buffer.  */  if (IsANewFrame &&      (d_pichdr.m_picture_coding_type == PICTYPE_I ||       d_pichdr.m_picture_coding_type == PICTYPE_P))    {      if (!d_next_IsEmpty)        {	  d_next->m_may_modify=false;	  d_sink->BeginPicture(d_next);          d_sink->ShowAllMBRows(d_next); // Show last decoded I- or P-image.	  d_sink->FinishedPicture();	  d_next->FreePictureData();     // Free the additional information data.	  swap(d_last,d_next);    // Old forward prediction image becomes new backward prefiction.        }    }  // Decode a new picture.  if (d_pichdr.m_picture_structure == PICSTRUCT_FramePicture)    {      if (d_pichdr.m_picture_coding_type == PICTYPE_I ||          d_pichdr.m_picture_coding_type == PICTYPE_P)        {	  d_next->m_timing = timing;          ptr_y  = d_next->m_image.AskFrameY();          ptr_cr = d_next->m_image.AskFrameU();          ptr_cb = d_next->m_image.AskFrameV();	  if (d_pichdr.m_picture_coding_type==PICTYPE_P)	    {	      Assert(d_last);	      SetSPOffsets(LAST,0,       // forward prediction get its data from			   LAST,false);  // the last frame	    }          lineskip_lum = bytesperline_lum;          lineskip_chr = bytesperline_chr;          d_next_IsEmpty=false;        }      else  // B-picture        {	  d_curr->m_timing = timing;          ptr_y  = d_curr->m_image.AskFrameY();          ptr_cr = d_curr->m_image.AskFrameU();          ptr_cb = d_curr->m_image.AskFrameV();	  SetSPOffsets(LAST,0, LAST,false);  // forward prediction from last frame	  SetSPOffsets(NEXT,0, NEXT,false);  // backward prediction from next frame          lineskip_lum = bytesperline_lum;          lineskip_chr = bytesperline_chr;          d_BFrameAvailable=true;	  d_sink->BeginPicture(d_curr);        }      d_FirstFieldInFrame=true;    }  // ---------------- field pictures ---------------  else if (d_pichdr.m_picture_coding_type == PICTYPE_I ||	   d_pichdr.m_picture_coding_type == PICTYPE_P)    {      if (d_FirstFieldInFrame)        {          bool topfield = (d_pichdr.m_picture_structure==PICSTRUCT_TopField);          ptr_y  = d_next->m_image.AskBitmap(Image<Pixel>::Bitmap_Y).AskField(topfield);          ptr_cr = d_next->m_image.AskBitmap(Image<Pixel>::Bitmap_U).AskField(topfield);          ptr_cb = d_next->m_image.AskBitmap(Image<Pixel>::Bitmap_V).AskField(topfield);	  Assert(IsANewFrame);	  d_next->m_timing = timing;	  if (d_pichdr.m_picture_coding_type == PICTYPE_P)	    {	      SetSPOffsets(LAST,0, LAST,true,true);  // backward prediction from last fields	      SetSPOffsets(LAST,1, LAST,true,false);	    }          lineskip_lum = bytesperline_lum*2;          lineskip_chr = bytesperline_chr*2;          d_next_IsEmpty=false;          d_FirstFieldInFrame=false;        }      else        {          bool topfield = (d_pichdr.m_picture_structure==PICSTRUCT_TopField);          ptr_y  = d_next->m_image.AskBitmap(Image<Pixel>::Bitmap_Y).AskField(topfield);          ptr_cr = d_next->m_image.AskBitmap(Image<Pixel>::Bitmap_U).AskField(topfield);          ptr_cb = d_next->m_image.AskBitmap(Image<Pixel>::Bitmap_V).AskField(topfield);	  if (d_pichdr.m_picture_coding_type == PICTYPE_P)	    {	      if (topfield)		{		  SetSPOffsets(LAST,0, LAST,true,true);  // top-field prediction from last frame		  SetSPOffsets(LAST,1, NEXT,true,false); // bottom-field prediction from current frame		}	      else		{		  SetSPOffsets(LAST,0, NEXT,true,true);  // top-field prediction from current frame		  SetSPOffsets(LAST,1, LAST,true,false); // bottom-field prediction from last frame		}	    }          lineskip_lum = bytesperline_lum*2;          lineskip_chr = bytesperline_chr*2;          //d_next_IsEmpty=false;          d_FirstFieldInFrame=true;        }    }  else // B-Field    {      bool topfield = (d_pichdr.m_picture_structure==PICSTRUCT_TopField);      ptr_y  = d_curr->m_image.AskBitmap(Image<Pixel>::Bitmap_Y).AskField(topfield);      ptr_cr = d_curr->m_image.AskBitmap(Image<Pixel>::Bitmap_U).AskField(topfield);      ptr_cb = d_curr->m_image.AskBitmap(Image<Pixel>::Bitmap_V).AskField(topfield);      SetSPOffsets(LAST,0, LAST,true,true);  // forward prediction from last frame      SetSPOffsets(NEXT,0, NEXT,true,true);      SetSPOffsets(LAST,1, LAST,true,false); // backward prediction from next frame      SetSPOffsets(NEXT,1, NEXT,true,false);      lineskip_lum = bytesperline_lum*2;      lineskip_chr = bytesperline_chr*2;      if (d_FirstFieldInFrame)	{ d_sink->BeginPicture(d_curr); }      if (!d_FirstFieldInFrame)        d_BFrameAvailable=true;        d_FirstFieldInFrame = !d_FirstFieldInFrame;    }  sp_curr_y  = ptr_y[0];  sp_curr_cb = ptr_cb[0];  sp_curr_cr = ptr_cr[0];  // create PictureData if needed  if (1) // d_sink->NeedsPictureData(d_pichdr.m_picture_coding_type))    {      DecodedImageData* img2decode=NULL;      if (d_pichdr.m_picture_coding_type == PICTYPE_I ||	  d_pichdr.m_picture_coding_type == PICTYPE_P)	{ img2decode=d_next; }      else	{ img2decode=d_curr; }      int mbw=0,mbh=0;      if (d_sink->NeedsMBData(d_pichdr.m_picture_coding_type))	{ mbw=d_MBWidth; mbh=d_MBHeight; }      if (d_pichdr.m_picture_structure == PICSTRUCT_FramePicture ||	  d_FirstFieldInFrame)	{	  Assert(img2decode->m_picdata1==NULL);	  if (mbw) d_picdata = img2decode->m_picdata1 = PictureData::GetPictureData(mbw,mbh);	  img2decode->m_pichdr1 = d_pichdr;	}      else	{	  Assert(img2decode->m_picdata2==NULL);	  if (mbw) d_picdata = img2decode->m_picdata2 = PictureData::GetPictureData(mbw,mbh);	  img2decode->m_pichdr2 = d_pichdr;	}      // *((PictureHeader*)d_picdata) = d_pichdr;    }  else    { d_picdata=NULL; }  /*    cout << "Lineskip Lum: " << lineskip_lum << " Lineskip Chr: " << lineskip_chr << endl;    cout << "BPL Lum: " << bytesperline_lum << " BPL Chr: " << bytesperline_chr << endl << endl;  */}void VideoDecoder::DecodeExtensions(int n){  for (;;)    {      SysPacket_Packet* pck = GetNextPacket();      if (pck==NULL)        return;      MemBitstreamReader bs(pck->data.AskContents() , pck->data.AskLength());      switch (bs.PeekBits(32))        {        case STARTCODE_EXTENSION_START:          // TODO          delete pck;          break;        case STARTCODE_USER_DATA:          delete pck;          break;        default:          PushbackPacket(pck);          return;        }    }}void VideoDecoder::SetReferencePtrsFrm(struct PixPtrs_const& ptrs,const struct MotionVector& mv){  ptrs.y  += ( mv.m_habs   >>1) + ( mv.m_vabs   >>1)*lineskip_lum;  if (d_ChromaFormat==CHROMA_420)    {      ptrs.cr += ((mv.m_habs/2)>>1) + ((mv.m_vabs/2)>>1)*lineskip_chr;      ptrs.cb += ((mv.m_habs/2)>>1) + ((mv.m_vabs/2)>>1)*lineskip_chr;    }  else if (d_ChromaFormat==CHROMA_422)    {      ptrs.cr += ((mv.m_habs/2)>>1) + (mv.m_vabs>>1)*lineskip_chr;      ptrs.cb += ((mv.m_habs/2)>>1) + (mv.m_vabs>>1)*lineskip_chr;    }  else    {      ptrs.cr += (mv.m_habs>>1) + (mv.m_vabs>>1)*lineskip_chr;      ptrs.cb += (mv.m_habs>>1) + (mv.m_vabs>>1)*lineskip_chr;    }}void VideoDecoder::SetReferencePtrsFld(struct PixPtrs_const& ptrs,const struct MotionVector& mv){  ptrs.y  += ( mv.m_habs   >>1) + ( mv.m_vabs   >>1)*lineskip_lum*2;  if (d_ChromaFormat==CHROMA_420)    {      ptrs.cr += ((mv.m_habs/2)>>1) + ((mv.m_vabs/2)>>1)*lineskip_chr*2;      ptrs.cb += ((mv.m_habs/2)>>1) + ((mv.m_vabs/2)>>1)*lineskip_chr*2;    }  else if (d_ChromaFormat==CHROMA_422)    {      ptrs.cr += ((mv.m_habs/2)>>1) + (mv.m_vabs>>1)*lineskip_chr*2;      ptrs.cb += ((mv.m_habs/2)>>1) + (mv.m_vabs>>1)*lineskip_chr*2;    }  else    {      ptrs.cr += (mv.m_habs>>1) + (mv.m_vabs>>1)*lineskip_chr*2;      ptrs.cb += (mv.m_habs>>1) + (mv.m_vabs>>1)*lineskip_chr*2;    }}#if 0inline int DequantizeIntra(int value,int qscale,int matrix){#if MMX_DCT  int deq_value = value*qscale*matrix;#else  int deq_value = value*qscale*matrix/16;#endif  return deq_value;}inline int DequantizeInter(int value,int qscale,int matrix){#if MMX_DCT  int sign = (value>0 ? 1 : -1);  int deq_value = (2*value+sign) * matrix * qscale / 2;#else  int sign = (value>0 ? 1 : -1);  int deq_value = (2*value+sign) * matrix * qscale / 32;#endif  return deq_value;}#endifvoid VideoDecoder::SetHalfPelFlags1(struct MotionCompensation_SglMB::MCData& mcdata,				    const struct MotionVector& mv){  mcdata.LumaHalfFlags    = ( mv.m_vabs   &1);    // set MC_Last_HalfV  mcdata.LumaHalfFlags   |= ( mv.m_habs   &1)<<1; // set MC_Last_HalfH  mcdata.ChromaHalfFlags  = ((mv.m_vabs/2)&1);    // set MC_Last_HalfV  mcdata.ChromaHalfFlags |= ((mv.m_habs/2)&1)<<1; // set MC_Last_HalfH}void VideoDecoder::SetHalfPelFlags2(struct MotionCompensation_SglMB::MCData& mcdata,				    const struct MotionVector& lastmv,				    const struct MotionVector& nextmv){  mcdata.LumaHalfFlags    = ( lastmv.m_vabs   &1);    // set MC_Last_HalfV  mcdata.LumaHalfFlags   |= ( lastmv.m_habs   &1)<<1; // set MC_Last_HalfH  mcdata.ChromaHalfFlags  = ((lastmv.m_vabs/2)&1);    // set MC_Last_HalfV  mcdata.ChromaHalfFlags |= ((lastmv.m_habs/2)&1)<<1; // set MC_Last_HalfH  mcdata.LumaHalfFlags   |= ( nextmv.m_vabs   &1)<<2; // set MC_Next_HalfV  mcdata.LumaHalfFlags   |= ( nextmv.m_habs   &1)<<3; // set MC_Next_HalfH  mcdata.ChromaHalfFlags |= ((nextmv.m_vabs/2)&1)<<2; // set MC_Next_HalfV  mcdata.ChromaHalfFlags |= ((nextmv.m_habs/2)&1)<<3; // set MC_Next_HalfH

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -