📄 scal_enc_frame.c
字号:
timeSig[MONO_CHAN][i] *= 0.5; } for( ch=1; ch<omi.ch_no_max; ch++ ) { /* copy S-spectrum */ for( i=0; i<blockSizeSamples; i++ ) { diffSpectrum[ch][i] = p_spectrum[ch][i]; } } } switch (coreCodecIdx) { case CC_G729: case CC_CELP_MPEG4_60: EncodeCore( layerStream, timeSig[MONO_CHAN], coreDecodedSampleBuf[MONO_CHAN], blockSizeSamples, downsamplFac, coreCodecIdx);#if 0 mdct_core( NULL, blockType[MONO_CHAN], windowShape, coreDecodedSampleBuf[MONO_CHAN], coreSpectrum[MONO_CHAN], 0, samplRate, blockSizeSamples );#else mdct_core(NULL, coreDecodedSampleBuf[MONO_CHAN], coreSpectrum[MONO_CHAN], 0, /* samplRate, */ blockSizeSamples );#endif break; case NTT_TVQ: /* assume that NTT_VQ has the same FS as AAC we don't have to apply sampling rate conversion */ { int j, ch; double nttTimeSigAr[MAX_TIME_CHANNELS][1024]; double dlpc_spectrum[ntt_T_FR_MAX]; int used_bits; double *nttTimeSig[MAX_TIME_CHANNELS]; double *nttTimeSigInit[MAX_TIME_CHANNELS]; double *nttCoreSpectrum[MAX_TIME_CHANNELS]; for (i=0; i<omi.ch_no_core; i++) { /* TVQ don't need the time signal, this is only for the interface */ nttTimeSigInit[i] = nttTimeSigAr[i]; nttTimeSig[i] = nttTimeSigAr[i]; nttCoreSpectrum[i] = coreSpectrum[i]; /* copy time signal */ for (j=0; j<1024; j++) nttTimeSigAr[i][j] = (double)timeSig[i][j]; } ntt_index->group_code =0x7f; ntt_index->last_max_sfb[0] =0; ntt_index->restore_flag_tns=0; ntt_index->w_type = blockType[MONO_CHAN]; ntt_select_ms( p_spectrum, ntt_index, sfb_width_table, nr_of_sfb, -1);/* this belongs to ICS info, but has to be written within the first TF layer */ used_bits=0; BsPutBit(layerStream,(int)blockType[MONO_CHAN],2);/*window sequence */ BsPutBit(layerStream, (int)windowShape[MONO_CHAN], 1); /* window shape */ used_bits += 3;#if 1 if(pred_type == NOK_LTP) for(ch = 0; ch < omi.ch_no_core; ch++) { for(i = 0; i < blockSizeSamples; i++) baselayer_spectrum[ch][i] = p_spectrum[ch][i]; nok_ltp_enc(p_spectrum[ch], nok_tmp_DTimeSigBuf[ch], blockType[ch], windowShape[ch], windowShapePrev[ch], blockSizeSamples, blockSizeSamples/med_win_in_long, blockSizeSamples/short_win_in_long, sfb_offset, nr_of_sfb[ch], &nok_lt_status[ch], tnsInfo[ch], qc_select); }#endif if(tvq_debug_level>5) fprintf(stderr, "MSMSMS %5d \n", ntt_index->ms_mask); ntt_headerenc( -1, layerStream, ntt_index, &used_bits, nr_of_sfb, tnsInfo, nok_lt_status, pred_type); if(tvq_debug_level>2) fprintf(stderr, "end tns_enc used_bits %5d\n", used_bits); ntt_vq_coder(nttTimeSigInit, nttTimeSig, p_spectrum, NULL, NTT_PW_INTERNAL, blockType[MONO_CHAN], ntt_index, param_ntt, sfb_width_table, nr_of_sfb, ntt_index->nttDataBase->ntt_NBITS_FR-used_bits, /* T.Ishikawa 981118 */ dlpc_spectrum, nttCoreSpectrum); /* pack the TVQ layer in the flexmuxstream */ if(tvq_debug_level>2) fprintf(stderr," %5d\n",BsCurrentBit(layerStream)); ntt_BitPack(ntt_index, layerStream); core_coder_bits = BsCurrentBit(layerStream); if(tvq_debug_level>2) fprintf(stderr,"core_coder_bits %5d\n",core_coder_bits); /* write to flexmux PDU */ BsClose(layerStream); writeFlexMuxPDU(lay,mainStream,dynpartBuf); layerBits = BsCurrentBit(mainStream); /* * Restore the spectrum of the base layer and update LTP buffer. */ if(pred_type == NOK_LTP) { double baselayer_rec_spectrum[BLOCK_LEN_LONG]; for(ch = 0; ch < omi.ch_no_core; ch++) { for(i = 0; i < blockSizeSamples; i++) baselayer_rec_spectrum[i] = nttCoreSpectrum[ch][i]; /* Add the LTP contribution to the reconstructed spectrum. */ nok_ltp_reconstruct(baselayer_rec_spectrum, blockType[ch], sfb_offset, nr_of_sfb[ch], blockSizeSamples/short_win_in_long, &nok_lt_status[ch]); /* This is passed to the scaleable encoder. */ for(i = 0; i < blockSizeSamples; i++) nttCoreSpectrum[ch][i] = baselayer_rec_spectrum[i]; for(i = 0; i < blockSizeSamples; i++) p_spectrum[ch][i]=baselayer_spectrum[ch][i]; /* TNS synthesis filtering. */ if(ntt_index->tvq_tns_enable) TnsEncode2(nr_of_sfb[ch], nr_of_sfb[ch], blockType[ch], sfb_offset, baselayer_rec_spectrum, tnsInfo[MONO_CHAN], 1); /* Update the time buffer of LTP. */ nok_ltp_update(baselayer_rec_spectrum, blockType[ch], windowShape[ch], windowShapePrev[ch], blockSizeSamples, blockSizeSamples/med_win_in_long, blockSizeSamples/short_win_in_long, &nok_lt_status[ch]); } }#if 1 /* Scalable encoder */ { int iscl; int iii, jjj; for(iii=0; iii<8; iii++){ for(jjj=0; jjj<ntt_index->max_sfb[0]; jjj++){ ntt_index_scl->msMask[iii][jjj]=ntt_index->msMask[iii][jjj]; } } ntt_index_scl->max_sfb[0] = ntt_index->max_sfb[0]; ntt_index_scl->w_type = ntt_index->w_type; ntt_index_scl->group_code = ntt_index->group_code; ntt_index_scl->pf = ntt_index->pf; if(tvq_debug_level >5) fprintf(stderr, "LLL %5d %5d %5d %5d \n", ntt_index_scl->max_sfb[0], ntt_index_scl->w_type, ntt_index_scl->group_code, ntt_index_scl->pf); if(tvq_debug_level >5) fprintf(stderr, "LLL %5d ntt_NSclLay \n", ntt_index->nttDataScl->ntt_NSclLay); for (iscl=0; iscl<ntt_index->nttDataScl->ntt_NSclLay; iscl++){ tvqScalEnc( p_spectrum, ntt_index, ntt_index_scl, param_ntt, mainStream, tnsInfo, sfb_width_table, nr_of_sfb, nttCoreSpectrum, pred_type, iscl); } for(ch = 0; ch < omi.ch_no_core; ch++) { /* for(i = 0; i < blockSizeSamples; i++) nttCoreSpectrum[ch][i] = p_reconstructed_spectrum[ch][i]; */ for(i = 0; i < blockSizeSamples; i++) p_reconstructed_spectrum[ch][i] = nttCoreSpectrum[ch][i] ; } layerBits = BsCurrentBit(mainStream); lay += ntt_index->nttDataScl->ntt_NSclLay; }#endif } break; default: CommonExit( 1, "Encode scalab.: core coder not yet supported" ); } lay++; if (tnsInfo[MONO_CHAN]) { int sfb, k, sfb_offset[ 100 ]; /* calc. sfb offset table */ for (sfb=k=0; sfb<nr_of_sfb[MONO_CHAN]; sfb++) { sfb_offset[sfb] = k; k += sfb_width_table[MONO_CHAN][sfb]; } sfb_offset[sfb] = k; TnsEncode2(nr_of_sfb[MONO_CHAN], /* Number of bands per window */ nr_of_sfb[MONO_CHAN], /* max_sfb */ blockType[MONO_CHAN], /* block type */ sfb_offset, /* Scalefactor band offset table */ coreSpectrum[MONO_CHAN], /* Spectral data array */ tnsInfo[MONO_CHAN], /* TNS info */ 0); /* Analysis filter. */ } if (omi.tf_layers > 0) { CalcFssControl( coreSpectrum[MONO_CHAN], p_spectrum[MONO_CHAN], diffSpectrum[MONO_CHAN], blockType[MONO_CHAN], FssControl[MONO_CHAN], blockSizeSamples, sfb_width_table[MONO_CHAN], samplRate ,diffContrSimu); } } else { for( ch=0; ch<omi.ch_no_max; ch++ ) { for( i=0; i<blockSizeSamples; i++ ) { if(pred_type == NOK_LTP) diffSpectrum[ch][i] = baselayer_spectrum[ch][i]; else diffSpectrum[ch][i] = p_spectrum[ch][i]; } } } if (omi.tf_layers > 0) { /* first T/F layer */ frameBits -= layerBits ; flexmuxOverhead = 3*8*(1 + (frameBits/2040)); frameBits -= flexmuxOverhead; layerStream = BsOpenBufferWrite(dynpartBuf); if (qc_select == AAC_SYS) { aacEncodeScalHeader( layerStream, NULL, blockType[MONO_CHAN], blockType[MONO_CHAN]==EIGHT_SHORT_SEQUENCE ? nr_of_sfb[MONO_CHAN]*num_window_groups : nr_of_sfb[MONO_CHAN] /*max_sfb*/, omi.ch_no_tf[0], omi.ch_no_max, omi.ch_no_core, samplRate, FssControl, ms_mask, num_window_groups, window_group_length ,tnsInfo[MONO_CHAN] ,frameData, qc_select, nok_lt_status); } rem_bits = frameBits - BsCurrentBit(layerStream) ; /* */ fullBandwidth= (int)( samplRate/2); if (fullBandwidth>14000) fullBandwidth=14000; { int available_bits; int limitedBandwidth; int _nr_of_sfb[MAX_TIME_CHANNELS]; memcpy( _nr_of_sfb, nr_of_sfb, sizeof(int)*MAX_TIME_CHANNELS ); { available_bits = rem_bits/omi.tf_layers; limitedBandwidth = fullBandwidth/omi.tf_layers; } ubits = tf_encode_spectrum_aac( /*diffSpectrum*/p_spectrum, energy, allowed_dist, blockType, sfb_width_table, _nr_of_sfb, available_bits, 0, padding_limit, layerStream, var_stream, NULL, /* write to layerStream directly after the header */ omi.ch_no_tf[tfLayer], p_reconstructed_spectrum, useShortWindows, windowShape , aacAllowScalefacs, samplRate, qc_select, pred_type, nok_lt_status, nok_bwp_status, blockSizeSamples, num_window_groups, window_group_length,NULL, limitedBandwidth ); /* Update the LTP buffers of the lowest layer. */ if(coreCodecIdx == NO_CORE && pred_type == NOK_LTP) ltp_scal_reconstruct(blockType[MONO_CHAN], windowShape[MONO_CHAN], windowShapePrev[MONO_CHAN], omi.ch_no_max, p_reconstructed_spectrum[0], p_reconstructed_spectrum[1], num_window_groups, window_group_length, blockSizeSamples, med_win_in_long, short_win_in_long, sfb_offset, _nr_of_sfb, nok_lt_status, tnsInfo); rem_bits -= ubits; } /* pack 1 aac layer to flexMux packet and write to bitstream */ BsClose(layerStream); writeFlexMuxPDU(lay,mainStream,dynpartBuf); layerBits = BsCurrentBit(mainStream); lay++; } /* other T/F layers */ for( ; lay<lastLayer; lay++ ) { tfLayer++; layerStream = BsOpenBufferWrite(dynpartBuf); for( ch=0; ch<omi.ch_no_tf[tfLayer]; ch++ ) { for( i=0; i<blockSizeSamples; i++ ) { /* for now just always the difference signal */ diffSpectrum[ch][i] -= p_reconstructed_spectrum[ch][i]; } for( i=0; i<8; i++ ) { msFssControl[ch][i] = DC_DIFF; } } { int hbits = -BsCurrentBit( layerStream ); aacEncodeLayerHeader( layerStream, blockType[MONO_CHAN], nr_of_sfb[MONO_CHAN] /* max_sfb */, nr_of_sfb[MONO_CHAN] /* max_sfb_prev_layer */, msFssControl, ms_mask, (omi.ch_no_tf[tfLayer]>omi.ch_no_tf[tfLayer-1]) , omi.ch_no_tf[tfLayer],frameData ); hbits += BsCurrentBit( layerStream ); rem_bits -= hbits; } { /* HP 971120 */ int _nr_of_sfb[MAX_TIME_CHANNELS]; memcpy( _nr_of_sfb, nr_of_sfb, sizeof(int)*MAX_TIME_CHANNELS ); rem_bits -= tf_encode_spectrum_aac( diffSpectrum, energy, allowed_dist, blockType, sfb_width_table, _nr_of_sfb, (rem_bits/(omi.tf_layers-tfLayer)), 0, padding_limit, layerStream, var_stream, NULL, omi.ch_no_tf[tfLayer], p_reconstructed_spectrum, useShortWindows, windowShape, aacAllowScalefacs, samplRate, qc_select, pred_type, nok_lt_status, nok_bwp_status, blockSizeSamples, num_window_groups, window_group_length,NULL, fullBandwidth/(omi.tf_layers-tfLayer) ); } BsClose(layerStream); writeFlexMuxPDU(lay,mainStream,dynpartBuf); layerBits = BsCurrentBit(mainStream); } return( BsCurrentBit(mainStream) );}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -