📄 aac_enc_api_fp.c
字号:
AACEnc_com *state_com; int sfb_offset[MAX_SECTION_NUMBER]; sEnc_individual_channel_stream ics[2]; sQuantizationBlock quantization_block; sEnc_single_channel_element sce; sEnc_channel_pair_element cpe; sBitsreamBuffer BS; sBitsreamBuffer *pBS = &BS; int win_seq; int win_shape; int i, j, numCh, ch, procCh, sfb; int available_bits, bits_in_buf; int bits_from_buffer, com_additional_bits, used_bits, save_bits; int additional_bits[2], max_bits_in_buf; int max_sfb_pred = 0; if (!inPointer || !outPointer) return AAC_NULL_PTR; INIT_BITSTREAM(pBS, outPointer) state_com = &(state->com); ippsZero_32f(inSignal + 2048, 2048); for (i = 0; i < 7; i++) { ics[0].scale_factor_grouping[i] = 0; ics[1].scale_factor_grouping[i] = 0; } ics[0].pHuffTables = (void**)&state_com->huffman_tables; ics[1].pHuffTables = (void**)&state_com->huffman_tables; ics[0].audioObjectType = state_com->audioObjectType; ics[1].audioObjectType = state_com->audioObjectType; ics[0].predictor_data_present = 0; ics[1].predictor_data_present = 0; sce.p_individual_channel_stream = &ics[0]; cpe.p_individual_channel_stream_0 = &ics[0]; cpe.p_individual_channel_stream_1 = &ics[1]; numCh = state_com->m_channel_number; ippsDeinterleave_16s(inPointer, numCh, 1024, state_com->buff); for (ch = 0; ch < numCh; ch += procCh) { procCh = 1; if (state_com->chInfo[ch].element_id == ID_CPE) procCh = 2; for (i = 0; i < procCh; i++) { ippsConvert_16s32f(state_com->buff[ch+i], state->m_buff_pointers[3*(ch+i)+state_com->m_buff_next_index], 1024); if (state_com->chInfo[ch].element_id != ID_LFE) { state->psychoacoustic_block_com.input_data[0] = state->m_buff_pointers[3*(ch+i)+state_com->m_buff_curr_index]; state->psychoacoustic_block_com.input_data[1] = state->m_buff_pointers[3*(ch+i)+state_com->m_buff_next_index]; Psychoacoustic(&state->psychoacoustic_block[ch+i], &state->psychoacoustic_block_com); } else { state->psychoacoustic_block[ch+i].block_type = ONLY_LONG_SEQUENCE; } ics[i].windows_sequence = state->psychoacoustic_block[ch+i].block_type; ics[i].window_shape = 1; } /* available_bits counting */ available_bits = state_com->chInfo[ch].mean_bits; bits_in_buf = state_com->chInfo[ch].bits_in_buf; max_bits_in_buf = state_com->chInfo[ch].max_bits_in_buf; used_bits = 3; /* Syntactic element ID */ if (procCh == 1) { used_bits += enc_single_channel_element(&sce, state_com->chInfo[ch].element_instance_tag, pBS, 0); } else { if (ics[0].windows_sequence == ics[1].windows_sequence) { cpe.common_window = 1; } else { cpe.common_window = 0; } used_bits += enc_channel_pair_element(&cpe, state_com->chInfo[ch].element_instance_tag, pBS, 0); } if (bits_in_buf > 0.9 * max_bits_in_buf) { available_bits += bits_in_buf - (int)(0.9 * max_bits_in_buf); bits_from_buffer = (int)(0.9 * max_bits_in_buf); } else { available_bits = (int)(available_bits * 0.85); bits_from_buffer = bits_in_buf; } available_bits -= used_bits; available_bits /= procCh; com_additional_bits = 0; for (i = 0; i < procCh; i++) { additional_bits[i] = (int)((state->psychoacoustic_block[ch+i].curr_frame_PE - 600) * 1.5); if (additional_bits[i] < 0) { additional_bits[i] = 0; } else if (additional_bits[i] > 1.5 * available_bits) { additional_bits[i] = (int)(1.5 * available_bits); } else if ((ics[i].windows_sequence == EIGHT_SHORT_SEQUENCE) && (additional_bits[i] < 0.5 * available_bits)) { additional_bits[i] = (int)(0.5 * available_bits); } com_additional_bits += additional_bits[i]; } for (i = 0; i < procCh; i++) { if (com_additional_bits > bits_from_buffer) { additional_bits[i] = (int)((float)additional_bits[i]* ((float)bits_from_buffer / (float)com_additional_bits)); } quantization_block.available_bits = available_bits + additional_bits[i]; //quantization_block.available_bits = // (state_com->chInfo[ch].mean_bits - used_bits) /procCh; if (quantization_block.available_bits > 768 * 8) { quantization_block.available_bits = 768 * 8; } else if (quantization_block.available_bits < 0) { quantization_block.available_bits = 0; } win_seq = ics[i].windows_sequence; win_shape = ics[i].window_shape; /*if (pBlock->block_type == EIGHT_SHORT_SEQUENCE) { state->quantization_block[i].p_smr = pBlock->smr_short[pBlock->nb_curr_index]; } else { state->quantization_block[i].p_smr = pBlock->smr_long[pBlock->nb_curr_index]; }*/ ics[i].num_sfb = state_com->real_num_sfb[win_seq]; if (state_com->chInfo[ch+i].element_id != ID_LFE) { ics[i].max_sfb = state_com->real_max_sfb[win_seq]; } else { ics[i].max_sfb = state_com->real_max_sfb_lfe[win_seq]; } ics[i].sfb_offset = state_com->sfb_offset[win_seq]; FilterbankEnc(&(state->filterbank_block), state->m_buff_pointers[3*(ch+i)+state_com->m_buff_prev_index], state->m_buff_pointers[3*(ch+i)+state_com->m_buff_curr_index], win_seq, win_shape, state_com->chInfo[ch+i].prev_window_shape, mdct_line, 0); p_mdct_line_pred = NULL; if (state_com->audioObjectType == AOT_AAC_LTP) { if (win_seq != EIGHT_SHORT_SEQUENCE) { max_sfb_pred = ics[i].max_sfb; if (max_sfb_pred > MAX_LTP_SFB_LONG) max_sfb_pred = MAX_LTP_SFB_LONG; ippsCopy_32f(state->m_buff_pointers[3*(ch+i)+state_com->m_buff_prev_index], inSignal, 1024); ippsCopy_32f(state->m_buff_pointers[3*(ch+i)+state_com->m_buff_curr_index], inSignal + 1024, 1024); ltpEncode(inSignal, state->ltp_buff[ch+i], predictedBuf, &(ics[i].ltp_lag), &(ics[i].ltp_coef), state->corrFft, state->corrBuff); if (ics[i].ltp_lag >= 0) { FilterbankEnc(&(state->filterbank_block), predictedBuf, predictedBuf + 1024, win_seq, win_shape, state_com->chInfo[ch+i].prev_window_shape, predictedSpectrum, 0); ippsZero_32f(predictedSpectrum + state_com->sfb_offset[win_seq][max_sfb_pred], 1024 - state_com->sfb_offset[win_seq][max_sfb_pred]); p_mdct_line_pred = mdct_line_pred; ippsSub_32f(predictedSpectrum, mdct_line, p_mdct_line_pred, 1024); } } } quantization_block.common_scalefactor_update = &(state_com->chInfo[ch+i].common_scalefactor_update); quantization_block.last_frame_common_scalefactor = &(state_com->chInfo[ch+i].last_frame_common_scalefactor); ics[i].num_window_groups = 1; ics[i].len_window_group[0] = 1; p_mdct_line = mdct_line; /* short block interleave */ if (win_seq == EIGHT_SHORT_SEQUENCE) { int *len_window_group = ics[i].len_window_group; int *scale_factor_grouping = ics[i].scale_factor_grouping; int *tmp_sfb_offset = state_com->sfb_offset[win_seq]; int num_window_groups = 1; int max_sfb = ics[i].max_sfb; Ipp32f *ptrIn = mdct_line; Ipp32f *ptrOut = mdct_line_i; int g, sfb, w, ind; for (j = 0; j < 7; j++) { if (scale_factor_grouping[j] == 0) { len_window_group[num_window_groups] = 1; num_window_groups++; } else { len_window_group[num_window_groups - 1]++; } } ics[i].num_window_groups = num_window_groups; if (num_window_groups != 8) { sfb_offset[0] = 0; ind = 1; for (g = 0; g < num_window_groups; g++) { for (sfb = 0; sfb < max_sfb; sfb++) { int sfb_start = tmp_sfb_offset[sfb]; int sfb_end = tmp_sfb_offset[sfb+1]; int sfb_width = sfb_end - sfb_start; sfb_offset[ind] = sfb_offset[ind - 1] + sfb_width * len_window_group[g]; ind++; for (j = 0; j < len_window_group[g]; j++) { for (w = 0; w < sfb_width; w++) { *ptrOut = ptrIn[w + sfb_start + 128 * j]; ptrOut++; } } } ptrIn += 128 * len_window_group[g]; } ics[i].sfb_offset = sfb_offset; p_mdct_line = mdct_line_i; ics[i].num_sfb = max_sfb; } } Quantization(&quantization_block, &ics[i], p_mdct_line, p_mdct_line_pred); //Quantization(&quantization_block, &ics[i], p_mdct_line, 0); if (state_com->audioObjectType == AOT_AAC_LTP) { if (win_seq != EIGHT_SHORT_SEQUENCE) { ics[i].predictor_data_present = ics[i].ltp_data_present; if (ics[i].predictor_data_present) { for (sfb = 0; sfb < max_sfb_pred; sfb++) { if (!ics[i].ltp_long_used[sfb]) { int begin = ics[i].sfb_offset[sfb]; int end = ics[i].sfb_offset[sfb+1]; for (j = begin; j < end; j++) { predictedSpectrum[j] = 0; } } } } } ltpBufferUpdate(state->ltp_buff[ch+i], state->ltp_overlap[ch+i], predictedSpectrum, &ics[i], &(state->filterbank_block), state_com->sfb_offset[EIGHT_SHORT_SEQUENCE], state_com->chInfo[ch+i].prev_window_shape, ics[i].predictor_data_present); } state_com->chInfo[ch+i].prev_window_shape = win_shape; } GET_BITS_COUNT(pBS, save_bits) /* Put bits into bitstream */ if (state_com->chInfo[ch].element_id == ID_SCE) { PUT_BITS(pBS,ID_SCE,3); enc_single_channel_element(&sce, state_com->chInfo[ch].element_instance_tag, pBS, 1); } else if (state_com->chInfo[ch].element_id == ID_LFE) { PUT_BITS(pBS,ID_LFE,3); enc_single_channel_element(&sce, state_com->chInfo[ch].element_instance_tag, pBS, 1); } else { if (cpe.common_window) { if (state_com->audioObjectType == AOT_AAC_LTP) { if (ics[1].predictor_data_present) { ics[0].predictor_data_present = 1; } } } PUT_BITS(pBS,ID_CPE,3); enc_channel_pair_element(&cpe, state_com->chInfo[ch].element_instance_tag, pBS, 1); } GET_BITS_COUNT(pBS, used_bits) used_bits -= save_bits; state_com->chInfo[ch].bits_in_buf += state_com->chInfo[ch].mean_bits - used_bits; } PUT_BITS(pBS,ID_END,3); SAVE_BITSTREAM(pBS) Byte_alignment(pBS); state_com->m_buff_prev_index++; if (state_com->m_buff_prev_index == 3) state_com->m_buff_prev_index = 0; state_com->m_buff_curr_index++; if (state_com->m_buff_curr_index == 3) state_com->m_buff_curr_index = 0; state_com->m_buff_next_index++; if (state_com->m_buff_next_index == 3) state_com->m_buff_next_index = 0; state->psychoacoustic_block_com.prev_prev_f_r_index = state_com->m_buff_prev_index; state->psychoacoustic_block_com.prev_f_r_index = state_com->m_buff_curr_index; state->psychoacoustic_block_com.current_f_r_index = state_com->m_buff_next_index; state->psychoacoustic_block_com.nb_curr_index++; state->psychoacoustic_block_com.nb_curr_index &= 1; state->psychoacoustic_block_com.nb_prev_index++; state->psychoacoustic_block_com.nb_prev_index &= 1; GET_BITS_COUNT(pBS, (*encodedBytes)) *encodedBytes >>= 3; state_com->m_frame_number++; return AAC_OK;}/********************************************************************/AACStatus aacencClose(AACEnc *state){ if (state == NULL) return AAC_OK; FreePsychoacousticCom(&state->psychoacoustic_block_com); FreeFilterbank(&state->filterbank_block); FreeHuffmanTables((IppsVLCEncodeSpec_32s**)(&state->com.huffman_tables)); ippsFree(state->com.real_state); return AAC_OK;}/********************************************************************/AACStatus aacencGetSampleFrequencyIndex(int *freq_index, AACEnc *state){ if (!state) return AAC_NULL_PTR; *freq_index = state->com.sampling_frequency_index; return AAC_OK;}/********************************************************************/AACStatus aacencGetDuration(float *p_duration, AACEnc *state){ float duration; duration = (float)(state->com.m_frame_number) * 1024; *p_duration = duration / (float)(state->com.m_sampling_frequency); return AAC_OK;}/********************************************************************/
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -