📄 dec_tf.c
字号:
if (frameData->od->ESDescriptor[0]->DecConfigDescr.audioSpecificConfig.audioDecoderType.value){ /* Celp core */ /* int MPE_conf=frameData->od->ESDescriptor[0]->DecConfigDescr\ */ /* .audioSpecificConfig.specConf.celpSpecificConfig.MPE_Configuration.value; */ int coreDelay= frameData->od->ESDescriptor[1]->DecConfigDescr\ .audioSpecificConfig.specConf.TFSpecificConfig.coreCoderDelay.value; int samplIdxCore= frameData->od->ESDescriptor[0]\ ->DecConfigDescr.audioSpecificConfig.samplingFreqencyIndex.value; int samplIdxTF= frameData->od->ESDescriptor[1]\ ->DecConfigDescr.audioSpecificConfig.samplingFreqencyIndex.value; float delayS[MAX_CORE_DELAY]; tfData->samplFreqFacCore= samplFreqIndex[samplIdxTF]/samplFreqIndex[samplIdxCore]; tfData->tfSamplRate=samplFreqIndex[samplIdxTF]; tfData->mdct_overlap_buffer=(double**)malloc(2*sizeof(double*)); tfData->mdct_overlap_buffer[0] = (double*)malloc(1024*sizeof(double)); tfData->coreModuloBuffer = CreateFloatModuloBuffer(coreDelay + (lpcData->frameNumSample * tfData->samplFreqFacCore * 4)); AddFloatModuloBufferValues( tfData->coreModuloBuffer,delayS , coreDelay); }}/***************************************************************************************** *** *** Function: DecTfFrame *** *** Purpose: processes a block of time signal input samples into a bitstream *** based on T/F encoding *** *** Description: *** *** *** Parameters: *** *** *** Return Value: returns the number of used bits *** *** **** MPEG-4 VM **** *** ****************************************************************************************/void DecTfFrame ( BsBitBuffer* bitBuf, /* in: bit stream frame */ float** sampleBuf, /* out: frameNumSample audio samples */ int* usedNumBit, /* out: num bits used for this frame */ int numChannels, int mainDebugLevel, HANDLE_FAULT_TOLERANT hFault, /**TM990217*/ HANDLE_RESILIENCE hResilience, /* in: handler for error resilience info */ HANDLE_BUFFER hVm, HANDLE_BUFFER hHcrSpecData, HANDLE_HCR hHcrInfo, FRAME_DATA* frameData, TF_DATA* tfData, ntt_DATA* nttData, HANDLE_EP_INFO hEpInfo, HANDLE_CONCEALMENT hConcealment) { WINDOW_SEQUENCE windowSequence[MAX_TIME_CHANNELS]/* = {ONLY_LONG_SEQUENCE} */; unsigned long ultmp; int decoded_bits = 0; int i; int i_ch; /* added 971010 YT */ BsBitStream* fixed_stream; /* son_NBCpp */ BsBitStream* gc_stream[MAX_TIME_CHANNELS]; BsBitStream* gc_WRstream[MAX_TIME_CHANNELS]; BsBitBuffer* gcBitBuf[MAX_TIME_CHANNELS]; /* bit buffer for gain_control_data() */ static WINDOW_SHAPE windowShape[2] = {WS_FHG, WS_FHG}; static WINDOW_SHAPE prev_windowShape[2] = {WS_FHG, WS_FHG}; /* YB : 971113 */ /* VQ: Variables for NTT_VQ decoder */ ntt_INDEX ntt_index; ntt_INDEX ntt_index_scl; static int InitFlag = 1; int isampf; int ntt_NSclLay, ntt_NSclLayDec; PRED_TYPE pred_type; int mat_usedNumBit =0; /* T.Ishikawa 980526 */ pred_type=tfData->pred_type; if(qc_select == NTT_VQ_SYS || qc_select == NTT_VQ){ ntt_index_scl.nttDataScl = nttData->nttDataScl; ntt_index.nttDataBase = nttData->nttDataBase; ntt_index.nttDataScl = nttData->nttDataScl; block_size_samples = tfData->block_size_samples; ntt_index.block_size_samples = block_size_samples; ntt_index_scl.block_size_samples = block_size_samples; ntt_index.numChannel = max_ch; ntt_index_scl.numChannel = max_ch; } if(qc_select == NTT_VQ_SYS){ ntt_NSclLay = frameData->od->streamCount.value-1; ntt_NSclLayDec = tfData->output_select; ntt_index.nttDataScl->ntt_NSclLay = ntt_NSclLay; ntt_index_scl.nttDataScl->ntt_NSclLay = ntt_NSclLay; switch (frameData->od->ESDescriptor[0]->DecConfigDescr.audioSpecificConfig.samplingFreqencyIndex.value) { case 0: isampf=96; break; case 1: isampf=88; break; case 2: isampf=64; break; case 3: isampf=48; break; case 4: isampf=44; break; case 5: isampf=32; break; case 6: isampf=24; break; case 7: isampf=22; break; case 8: isampf=16; break; case 9: isampf=12; break; case 10: isampf=11; break; case 11: isampf=8; break; case 12: case 13: case 14: case 15: break; } ntt_index.isampf = isampf; ntt_index_scl.isampf = isampf; } else if(qc_select == NTT_VQ){ ntt_NSclLay = ntt_NSclLay_gl; ntt_NSclLayDec = ntt_NSclLayDec_gl; ntt_index.nttDataScl->ntt_NSclLay = ntt_NSclLay; ntt_index_scl.nttDataScl->ntt_NSclLay = ntt_NSclLay; isampf = sampling_rate_decoded/1000; ntt_index.isampf = isampf; ntt_index_scl.isampf = isampf; } ResetReadBitCnt ( hVm ); fixed_stream = BsOpenBufferRead(bitBuf); /* son_NBCpp */ for(i_ch=0;i_ch<MAX_TIME_CHANNELS;i_ch++){ gcBitBuf[i_ch] = BsAllocBuffer(4096); gc_WRstream[i_ch] = BsOpenBufferWrite(gcBitBuf[i_ch]); gc_stream[i_ch] = BsOpenBufferRead(gcBitBuf[i_ch]); } /* for i_ch...*/ /* get sync word */ /* a 7 bit alternative to the strange MPEG-1 audio syncword */ /* KEMAL: syncword makes problems with qc_aac, because it needs byte_alignment, which would be done in AacDecodeFrame() */ if (qc_select!=AAC_SCALABLE) { if( (transport_stream != AAC_RAWDATA_TSTREAM) && (transport_stream != NO_TSTREAM) && (transport_stream != NO_SYNCWORD) ) { BsGetBit( fixed_stream, &ultmp, 7 ); decoded_bits += 7; if( ultmp != 0x6e ) { CommonExit( 1," Wrong Syncword %d ", ultmp); } } } /* inverse Q&C */ switch(qc_select) { case MDCT_VALUES_16BIT: /* debug option unpack MDCT output values */ { double scal; unsigned long iscal; int i; /* get window type */ BsGetBit( fixed_stream, &ultmp, 4 ); decoded_bits += 4; windowSequence[MONO_CHAN] = (WINDOW_SEQUENCE)ultmp; if (mainDebugLevel) { printf("\nblocktype %ld",ultmp); } /* get block scaling factor */ BsGetBit( fixed_stream, &iscal, 12 ); decoded_bits += 12; scal = pow( 2.0, -(((int)iscal)-2048) ); for( i=0; i<block_size_samples; i++ ) { BsGetBit( fixed_stream, &ultmp, 16 ); decoded_bits += 16; spectral_line_vector[0][MONO_CHAN][i] = (double)(((int)ultmp)-32768) * scal; } } break; case AAC_QC: { byte max_sfb[Winds]; byte dummy[2][MAXBANDS]; /* needed for PNS in scalaeble case ! */ decoded_bits += AacDecodeFrame ( fixed_stream, gc_WRstream, spectral_line_vector[0], windowSequence, windowShape, MULTICHANNEL, max_sfb, numChannels, 0, sfbInfo,dummy, hResilience, hVm, hHcrSpecData, hHcrInfo, hEpInfo, hConcealment, qc_select, nok_lt_status); } break; case AAC_SCALABLE: { if (frameData==NULL){ CommonExit(1,"no frameData"); } else { aacScaleableFlexMuxDecode ( fixed_stream, gc_WRstream, &windowSequence[MONO_CHAN], &decoded_bits, spectral_line_vector, scalOutSelect, sfbInfo, numChannels, lopLong, lopShort, normBw, &windowShape[MONO_CHAN], qc_select, hResilience, hVm, hHcrSpecData, hHcrInfo, frameData, tfData, nttData, hEpInfo, hConcealment, nok_lt_status, med_win_in_long, short_win_in_long, pred_type); } } break; case NTT_VQ: { int ntt_available_bits,tomo_tmp, i, j; /* get block type */ decoded_bits=0; BsGetBit( fixed_stream, &ultmp, 2); windowSequence[MONO_CHAN] = (WINDOW_SEQUENCE)ultmp; windowSequence[max_ch-1] = (WINDOW_SEQUENCE)ultmp; ntt_index.w_type=windowSequence[MONO_CHAN]; decoded_bits += 2; BsGetBit( fixed_stream, &ultmp, 1); windowShape[MONO_CHAN] = (WINDOW_SHAPE)ultmp; windowShape[max_ch-1] = (WINDOW_SHAPE)ultmp; decoded_bits += 1; /* window_shape */ ntt_index.ms_mask = 0; ntt_index.group_code = 0x7F ; /*default */ ntt_index.last_max_sfb[0] =0; /* block-wise MS stereo */ ntt_headerdec( -1, fixed_stream, &ntt_index, sfbInfo, &decoded_bits, tns_info, nok_lt_status, pred_type, hResilience, hVm, hEpInfo ); ntt_available_bits = /*frameNumBit*/ ntt_index.nttDataBase->ntt_NBITS_FR - decoded_bits; if ( mainDebugLevel>2 ) { printf("___ntt_available_bits %d %d %d\n",ntt_available_bits, ntt_index.nttDataBase->ntt_NBITS_FR,decoded_bits); } /*--- base decoder ---*/ { float current_frame[ntt_T_FR_MAX]; int itmp; /* bit unpacking */ tomo_tmp=decoded_bits; decoded_bits += ntt_BitUnPack ( fixed_stream, ntt_available_bits, windowSequence[MONO_CHAN], &ntt_index ); if ( mainDebugLevel>2 ) { printf("<<<< BaseLayer(OLD) BitUnPack %d\n", decoded_bits -tomo_tmp); } /* decoding tools*/ ntt_vq_decoder(&ntt_index, spectral_line_vector[0], sfbInfo); /*-- long term predictor --*/ if(pred_type == NOK_LTP) { Info *info = sfbInfo[ntt_index.w_type]; for(i_ch = 0; i_ch < max_ch; i_ch++) { for (itmp = 0; itmp < block_size_samples; itmp++) current_frame[itmp] = spectral_line_vector[0][i_ch][itmp]; if(nok_lt_status[i_ch]->sbk_prediction_used[0]) nok_lt_predict (sfbInfo[ntt_index.w_type], ntt_index.w_type, windowShape[i_ch], prev_windowShape[i_ch], nok_lt_status[i_ch]->sbk_prediction_used, nok_lt_status[i_ch]->sfb_prediction_used, nok_lt_status[i_ch], nok_lt_status[i_ch]->weight, nok_lt_status[i_ch]->delay, current_frame, block_size_samples, block_size_samples/med_win_in_long, block_size_samples/short_win_in_long, &tns_info[i_ch], qc_select); /* This is passed to upper layers. */ for (itmp = 0; itmp < block_size_samples; itmp++) spectral_line_vector[0][i_ch][itmp] = current_frame[itmp]; /* TNS synthesis filtering. */ for(i = j = 0; i < tns_info[i_ch].n_subblocks; i++) { tns_decode_subblock(current_frame + j, ntt_index.max_sfb[0], info->sbk_sfb_top[i], info->islong, &(tns_info[i_ch].info[i]), qc_select); j += info->bins_per_sbk[i]; } nok_ltp_buf_update(ntt_index.w_type, windowShape[MONO_CHAN], prev_windowShape[i_ch], nok_lt_status[i_ch], current_frame, block_size_samples, block_size_samples/med_win_in_long, block_size_samples/short_win_in_long, short_win_in_long); } } } /*--- scalable decoders ---*/ { int iscl, iii, jjj; for(iii=0; iii<8; iii++){ for(jjj=0; jjj<ntt_index.max_sfb[0]; jjj++){ ntt_index_scl.msMask[iii][jjj] = ntt_index.msMask[iii][jjj]; } } ntt_index_scl.w_type = windowSequence[MONO_CHAN]; ntt_index_scl.group_code = ntt_index.group_code ; ntt_index_scl.pf = ntt_index.pf ; ntt_index_scl.max_sfb[0] = ntt_index.max_sfb[0]; for (iscl=0; iscl<ntt_NSclLay; iscl++){ ntt_index_scl.last_max_sfb[iscl+1] = ntt_index_scl.max_sfb[
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -