⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 scal_dec_frame.c

📁 语音压缩算法
💻 C
📖 第 1 页 / 共 5 页
字号:
        commonWindow[i]=1;        break;      default :        CommonExit(-1,"wrong channel config ");        break;      }    }  *lastAacLay = nLowRateChan ? nAacLay : nAacLay-1;  if (aac==0) *lastAacLay=-1;  *lowRateChannelPresent = nLowRateChan;  *firstStLay = nFirstStereo  ;}voidltp_scal_reconstruct(WINDOW_SEQUENCE blockType,		     WINDOW_SHAPE windowShape,		     WINDOW_SHAPE windowShapePrev,		     int num_channels,		     double **p_reconstructed_spectrum,		     int blockSizeSamples,		     int med_win_in_long,		     int short_win_in_long,		     int nr_of_sfb,		     int msMask[8][60],		     NOK_LT_PRED_STATUS **nok_lt_status,		     Info *info, TNS_frame_info **tns_info,		     QC_MOD_SELECT qc_select){  int i, j,ch;  Float tmpbuffer[BLOCK_LEN_LONG];  NOK_LT_PRED_STATUS *nok_ltp;  /*   * Following piece of code reconstructs the spectrum of the    * first mono (or stereo, if no mono present) layer for LTP.   */    if(num_channels == 2)    doInverseMsMatrix(info, p_reconstructed_spectrum[0],		      p_reconstructed_spectrum[1], msMask);  for(ch = 0; ch < num_channels; ch++)  {    nok_ltp = nok_lt_status[ch];    for(i = 0; i < blockSizeSamples; i++)      tmpbuffer[i] = p_reconstructed_spectrum[ch][i];    /* Add the LTP contribution to the reconstructed spectrum. */    nok_lt_predict (info, blockType, windowShape, windowShapePrev,		    nok_ltp->sbk_prediction_used, nok_ltp->sfb_prediction_used,                     nok_ltp, nok_ltp->weight, nok_ltp->delay, tmpbuffer,                     blockSizeSamples, blockSizeSamples / med_win_in_long, 		    blockSizeSamples / short_win_in_long, tns_info[ch], qc_select);    for(i = 0; i < blockSizeSamples; i++)      p_reconstructed_spectrum[ch][i] = tmpbuffer[i];    /* TNS synthesis filtering. */    info->islong = (blockType != EIGHT_SHORT_SEQUENCE);    for(i = j = 0; i < tns_info[ch]->n_subblocks; i++)    {      tns_decode_subblock(tmpbuffer + j, nr_of_sfb, 			  info->sbk_sfb_top[i], info->islong,			  &tns_info[ch]->info[i], qc_select);            j += info->bins_per_sbk[i];    }        /* Update the time domain buffer of LTP. */    nok_ltp_buf_update(blockType, windowShape, windowShapePrev, 		       nok_ltp, tmpbuffer, blockSizeSamples, 		       blockSizeSamples / med_win_in_long, 		       blockSizeSamples / short_win_in_long,		       short_win_in_long);  }  /*   * The reconstructed spectrum of the lowest layer is processed as    * Mid/Side in the upper layers.   */  if(num_channels == 2)    doMsMatrix(info, p_reconstructed_spectrum[0], 	       p_reconstructed_spectrum[1], msMask);}static int debugDiff=0;void aacScaleableFlexMuxDecode ( BsBitStream*       fixed_stream,                                 BsBitStream*       gc_WRstream[],                                 WINDOW_SEQUENCE        windowSequence[MAX_TIME_CHANNELS],                                 int*               flexMuxBits,                                 double*            spectral_line_vector[MAX_TF_LAYER][MAX_TIME_CHANNELS],                                 int                output_select,                                 Info**             sfbInfo,                                 int                numChannels,                                 int                lopLong,                                 int                lopShort,                                 float              normBw,                                 WINDOW_SHAPE*      windowShape,                                 QC_MOD_SELECT      qc_select,                                 HANDLE_RESILIENCE  hResilience,                                 HANDLE_BUFFER      hVm,                                 HANDLE_BUFFER      hHcrSpecData,                                 HANDLE_HCR         hHcrInfo,                                 FRAME_DATA*        fd,                                 TF_DATA*           tfData,                                 ntt_DATA*          nttData,                                 HANDLE_EP_INFO     hEpInfo,                                 HANDLE_CONCEALMENT hConcealment,				 NOK_LT_PRED_STATUS **nok_lt_status,				 int                med_win_in_long,				 int                short_win_in_long,				 PRED_TYPE          pred_type){  BsBitStream* layer_stream ;  unsigned int x, j;  static int lowRateChannelPresent;  static enum CORE_CODEC  coreCodecIdx;  static int     nrGranules = 3;  static int     lastAacLay = 0;    static int     firstAacLay = 0;    static int     layNumChan[MAX_TF_LAYER];  static int     firstStLay;  static int     lastMonoLay;  static WINDOW_SHAPE windowShapePrev = WS_FHG;  unsigned long  ultmp,codedBlockType;  int            diffControl[MAX_TIME_CHANNELS][ 60 ]={{0x0}};  int            commonWindow[MAX_TF_LAYER];  int            aacLayer = 0;  int            layer;  int            ch,calcMonoLay;  double*        low_spec;  int            diffControlBands;  int            i;  int            tmp = 0;  int            msMask[8][60]={{0x0}};  int            msMaskPres = 0;  int            shortFssWidth;  TNS_frame_info tns_info[MAX_TIME_CHANNELS];  TNS_frame_info tns_info_ext[MAX_TIME_CHANNELS];  byte max_sfb[Winds];  static byte sfbCbMap[MAX_TF_LAYER][MAX_TIME_CHANNELS][MAXBANDS] = {{{0x0}}};  int  groupInfo = 0;  int  tns_data_present[2];  int  tns_ext_present[2];  int   used_bits[4][2];  int decodedBits;  int            coreDecBits = 0;  ntt_INDEX           index;  ntt_INDEX           index_scl;  int coreChannels;  unsigned long totalLength;  unsigned long AUlength[8];  int tvqTnsPresent  ;  /* init */  decodedBits       = 0;  tvqTnsPresent     = 0;  coreChannels      = 0;  tns_ext_present[0]= 0;  tns_ext_present[1]= 0;    index.nttDataBase=nttData->nttDataBase;  index_scl.nttDataScl=nttData->nttDataScl;  index.numChannel =numChannels;  index_scl.numChannel =numChannels;  index.block_size_samples=    ( fd->od->ESDescriptor[0]->DecConfigDescr.audioSpecificConfig.      specConf.TFSpecificConfig.frameLength.value ? 960:1024) ;  index_scl.block_size_samples= index.block_size_samples;  /*********************/  for ( x = 0; x < fd->od->streamCount.value; x++ ){    unsigned int AUindex;/* AUindex = scaleable layer Number , since we have not stream map table*/    totalLength = 0;    getAccessUnit ( fixed_stream, fd->layer[x].bitBuf, &AUindex , &totalLength,fd->od->ESDescriptor[x]);    if (x != AUindex) CommonExit(-1,"\nError in FlexMuxDecode");    AUlength[x] = totalLength;  }  *flexMuxBits = BsCurrentBit(fixed_stream);  layer=0;  getModeFromOD(fd->od,&lowRateChannelPresent ,layNumChan ,&lastAacLay ,&firstStLay,&coreCodecIdx,                  commonWindow);    if (output_select>lastAacLay) output_select=lastAacLay;  if (output_select<0) output_select=0; /* YB 980508 */  if (firstStLay>=0)    lastMonoLay=firstStLay-1;/* debug */  else     lastMonoLay=lastAacLay;/* debug */  if (output_select > lastMonoLay ){    calcMonoLay=lastMonoLay ;  } else {    calcMonoLay=output_select ;        }  /* open the first layer bitbuffer  to read :     can  be a AAC or TwinVq bitbuffer as core */  layer_stream= BsOpenBufferRead(fd->layer[layer].bitBuf);  /*      According to the new bitstream syntax (98/11/12),      there is no more ics_reserved_bit for scalable  */  BsGetBit( layer_stream, &ultmp,2); /* window_sequence */  codedBlockType = (int)ultmp;  decodedBits += 2;  switch (   codedBlockType )    {    case 0:       windowSequence[MONO_CHAN] =  ONLY_LONG_SEQUENCE;      break;    case 1:       windowSequence[MONO_CHAN] =  LONG_START_SEQUENCE;      break;    case 2:       windowSequence[MONO_CHAN] =  EIGHT_SHORT_SEQUENCE;      break;    case 3:       windowSequence[MONO_CHAN] =  LONG_STOP_SEQUENCE;      break;    default:       CommonExit(-1,"wrong blocktype %d",   codedBlockType);    }  BsGetBit( layer_stream, &ultmp,1);  windowShape[0] = (WINDOW_SHAPE)ultmp; /* window shape */  decodedBits += 1;    if (numChannels==2){    windowSequence[1]=windowSequence[MONO_CHAN];    windowShape[1] =   windowShape[MONO_CHAN];  }  /* decode core layer if present */  if (lowRateChannelPresent)    {      coreDecBits = decodedBits;      firstAacLay = 1;       switch (coreCodecIdx)        {        case NTT_TVQ:#if 0          tvqAUDecode(  numChannels,                        frameData, /* config data , obj descr. etc. */                        tfData,                        hFault,                        qc_select,                        &index, &index_scl,                        fixed_stream);          mat_usedNumBit = tfData->decoded_bits;          windowSequence[MONO_CHAN]=tfData->windowSequence[MONO_CHAN];          windowShape[MONO_CHAN] =   tfData->windowShape[MONO_CHAN];          if(numChannels == 2){            windowSequence[1]=windowSequence[MONO_CHAN];            windowShape[1] =  windowShape[MONO_CHAN];          }#else          index.w_type=(WINDOW_SEQUENCE)codedBlockType;           index.ms_mask = 0;	  index.group_code = 0x7F ; /*default */          index.last_max_sfb[0] = 0;	  /* ... HP */          ntt_headerdec( -1, layer_stream,  &index,  sfbInfo, &decodedBits,			 tns_info, nok_lt_status, pred_type,			 hResilience, hVm, hEpInfo );	            coreDecBits = decodedBits;          coreDecBits += ntt_BitUnPack ( layer_stream,                                          BsBufferNumBit(fd->layer[0].bitBuf)-					 decodedBits,                                         windowSequence[MONO_CHAN],                                          &index );	  ntt_vq_decoder(&index, spectral_line_vector[0], sfbInfo);          totalLength =   BsBufferNumBit(fd->layer[0].bitBuf);          BsGetSkip(layer_stream,(totalLength-coreDecBits ));          BsCloseRemove ( layer_stream,1 );                  layer++;          layer_stream= BsOpenBufferRead(fd->layer[layer].bitBuf);           coreChannels=1; /* currently only 1 channel is supported */	  /*-- long term predictor --*/	  if(pred_type == NOK_LTP)            {              Float current_frame[BLOCK_LEN_LONG];              Info *info = sfbInfo[index.w_type];	                  for(ch = 0; ch < numChannels; ch++)                {                  for (i = 0; i < info->bins_per_bk; i++)                    current_frame[i] = spectral_line_vector[0][ch][i];	                       nok_lt_predict (sfbInfo[index.w_type],                                   index.w_type,                                   *windowShape,                                  windowShapePrev,                                  nok_lt_status[ch]->sbk_prediction_used,                                  nok_lt_status[ch]->sfb_prediction_used,                                  nok_lt_status[ch],                                   nok_lt_status[ch]->weight,                                  nok_lt_status[ch]->delay,                                  current_frame,                                  info->bins_per_bk,                                  info->bins_per_bk/med_win_in_long,                                  info->bins_per_bk/short_win_in_long,                                  (tvqTnsPresent) ? &tns_info[ch] : NULL,                                   qc_select);                  windowShapePrev = *windowShape;                  /* This is passed to upper layers. */                  for (i = 0; i < info->bins_per_bk; i++)                    spectral_line_vector[0][ch][i] = current_frame[i];	                        /* TNS synthesis filtering. */                    for(i = j = 0; i < tns_info[ch].n_subblocks; i++)                    {                      tns_decode_subblock(current_frame + j,                                          index.max_sfb[0],                                          info->sbk_sfb_top[i],                                          info->islong,                                          &(tns_info[ch].info[i]),                                          qc_select);		                        j += info->bins_per_sbk[i];                    }	                        nok_ltp_buf_update(index.w_type, *windowShape,                                     *windowShape, nok_lt_status[ch],                                     current_frame, info->bins_per_bk,                                     info->bins_per_bk/med_win_in_long,                                     info->bins_per_bk/short_win_in_long,                                     short_win_in_long);                }            }#endif          break;        default:          CommonExit(-1,"\n wrong core coder");          break;        }          decodedBits = 0;    }  if (lastAacLay>=0){    if (windowSequence[MONO_CHAN]== EIGHT_SHORT_SEQUENCE ){      BsGetBit( layer_stream, &ultmp,4);        /* max_sfb */      decodedBits += 4;      max_sfb[firstAacLay]=ultmp;      BsGetBit( layer_stream, &ultmp,7);        /* scale_factor_grouping */      groupInfo = ultmp;      decodedBits += 7;      sfbInfo[windowSequence[MONO_CHAN]]->num_groups =         decode_grouping( ultmp, sfbInfo[windowSequence[MONO_CHAN]]->group_len );    }else {      BsGetBit( layer_stream, &ultmp,6);          /* max_sfb */      decodedBits += 6;      max_sfb[firstAacLay]=ultmp;    }    /* end of ics_info()*/      /* rest of aac_scaleable_main_header */        if( layNumChan[firstAacLay]==2 ){      int g,sfb,win;      int gwin;      BsGetBit( layer_stream, &ultmp,2);     /* ms_mask_present ? */      decodedBits += 2;      msMaskPres = ultmp;      if (msMaskPres==1                    ) {        win=0;        for (g=0;g<sfbInfo[windowSequence[MONO_CHAN]]->num_groups;g++){          for (sfb=0;sfb<max_sfb[firstAacLay];sfb++){            BsGetBit( layer_stream, &ultmp,1);    /* ms_mask */            decodedBits += 1;            msMask[win][sfb]=ultmp;          }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -