📄 specrec.c
字号:
count = faad_get_ts() - count; hDecoder->requant_cycles += count;#endif /* pns decoding */ pns_decode(ics, NULL, hDecoder->frameLength, 0, hDecoder->object_type);#ifdef MAIN_DEC /* MAIN object type prediction */ if (hDecoder->object_type == MAIN) { /* intra channel prediction */ ic_prediction(ics, hDecoder->pred_stat[channel], hDecoder->frameLength, hDecoder->sf_index); /* In addition, for scalefactor bands coded by perceptual noise substitution the predictors belonging to the corresponding spectral coefficients are reset. */ pns_reset_pred_state(ics, hDecoder->pred_stat[channel]); }#endif#ifdef LTP_DEC if (is_ltp_ot(hDecoder->object_type)) {#ifdef LD_DEC if (hDecoder->object_type == LD) { if (ics->ltp.data_present) { if (ics->ltp.lag_update) hDecoder->ltp_lag[channel] = ics->ltp.lag; } ics->ltp.lag = hDecoder->ltp_lag[channel]; }#endif /* long term prediction */ lt_prediction(ics, &(ics->ltp), hDecoder->lt_pred_stat[channel], hDecoder->fb, ics->window_shape, hDecoder->window_shape_prev[channel], hDecoder->sf_index, hDecoder->object_type, hDecoder->frameLength); }#endif /* tns decoding */ tns_decode_frame(ics, &(ics->tns), hDecoder->sf_index, hDecoder->object_type, hDecoder->frameLength); /* drc decoding */ if (hDecoder->drc->present) { if (!hDecoder->drc->exclude_mask[channel] || !hDecoder->drc->excluded_chns_present) drc_decode(hDecoder->drc, ics->buffer); } /* filter bank */#ifdef SSR_DEC if (hDecoder->object_type != SSR) {#endif
#ifdef LIBPAAC
hDecoder->Lib.Filter(&hDecoder->Lib, ics->window_sequence, ics->window_shape,
hDecoder->window_shape_prev[channel],
hDecoder->time_out[channel], hDecoder->fb_intermed[channel], hDecoder->d.buffer);
#else ifilter_bank(hDecoder->fb, ics->window_sequence, ics->window_shape, hDecoder->window_shape_prev[channel], ics->buffer, hDecoder->time_out[channel], hDecoder->fb_intermed[channel], hDecoder->d.buffer, hDecoder->object_type, hDecoder->frameLength);
#endif#ifdef SSR_DEC } else { ssr_decode(&(ics->ssr), hDecoder->fb, ics->window_sequence, ics->window_shape, hDecoder->window_shape_prev[channel], ics->buffer, hDecoder->time_out[channel], hDecoder->ssr_overlap[channel], hDecoder->ipqf_buffer[channel], hDecoder->prev_fmd[channel], hDecoder->frameLength); }#endif /* save window shape for next frame */ hDecoder->window_shape_prev[channel] = ics->window_shape;#ifdef LTP_DEC if (is_ltp_ot(hDecoder->object_type)) { lt_update_state(hDecoder->lt_pred_stat[channel], hDecoder->time_out[channel], hDecoder->fb_intermed[channel], hDecoder->frameLength, hDecoder->object_type); }#endif#ifdef SBR_DEC if (((hDecoder->sbr_present_flag == 1) || (hDecoder->forceUpSampling == 1)) && hDecoder->sbr_alloced[hDecoder->fr_ch_ele]) {
retval = safestack(sbrDecode,hDecoder,ics,&hDecoder->stack);
if (retval > 0) return retval; } else if (((hDecoder->sbr_present_flag == 1) || (hDecoder->forceUpSampling == 1)) && !hDecoder->sbr_alloced[hDecoder->fr_ch_ele]) { return 23; }#endif /* copy L to R when no PS is used */
#if (defined(PS_DEC) || defined(DRM_PS))
if ((hDecoder->ps_used[hDecoder->fr_ch_ele] == 0) &&
(hDecoder->element_output_channels[hDecoder->fr_ch_ele] == 2))
{
uint8_t ele = hDecoder->fr_ch_ele;
uint16_t frame_size = (hDecoder->sbr_alloced[ele]) ? 2 : 1;
frame_size *= hDecoder->frameLength*sizeof(real_t);
memcpy(hDecoder->time_out[channel+1], hDecoder->time_out[channel], frame_size);
}
#endif
return 0;}uint8_t reconstruct_channel_pair(NeAACDecHandle hDecoder, ic_stream *ics1, ic_stream *ics2){ uint8_t retval;
uint8_t channel = hDecoder->channel;
uint8_t paired_channel = hDecoder->paired_channel;#ifdef PROFILE int64_t count = faad_get_ts();#endif
if (hDecoder->element_alloced[hDecoder->fr_ch_ele] == 0)
{
retval = allocate_channel_pair(hDecoder, channel, paired_channel);
if (retval > 0)
return retval;
hDecoder->element_alloced[hDecoder->fr_ch_ele] = 1;
}
ics1->buffer = hDecoder->time_out[channel];
ics2->buffer = hDecoder->time_out[paired_channel];
/* dequantisation and scaling */ retval = quant_to_spec(hDecoder, ics1, hDecoder->d.spec_data[0], hDecoder->frameLength); if (retval > 0) return retval; retval = quant_to_spec(hDecoder, ics2, hDecoder->d.spec_data[1], hDecoder->frameLength); if (retval > 0) return retval;#ifdef PROFILE count = faad_get_ts() - count; hDecoder->requant_cycles += count;#endif /* pns decoding */ if (ics1->ms_mask_present) { pns_decode(ics1, ics2, hDecoder->frameLength, 1, hDecoder->object_type); } else { pns_decode(ics1, NULL, hDecoder->frameLength, 0, hDecoder->object_type); pns_decode(ics2, NULL, hDecoder->frameLength, 0, hDecoder->object_type); } /* mid/side decoding */ ms_decode(ics1, ics2, hDecoder->frameLength);#if 0 { int i; for (i = 0; i < 1024; i++) { //printf("%d\n", ics1->buffer[i]); printf("0x%.8X\n", ics1->buffer[i]); } for (i = 0; i < 1024; i++) { //printf("%d\n", ics2->buffer[i]); printf("0x%.8X\n", ics2->buffer[i]); } }#endif /* intensity stereo decoding */ is_decode(ics1, ics2, hDecoder->frameLength);#if 0 { int i; for (i = 0; i < 1024; i++) { printf("%d\n", ics1->buffer[i]); //printf("0x%.8X\n", ics1->buffer[i]); } for (i = 0; i < 1024; i++) { printf("%d\n", ics2->buffer[i]); //printf("0x%.8X\n", ics2->buffer[i]); } }#endif#ifdef MAIN_DEC /* MAIN object type prediction */ if (hDecoder->object_type == MAIN) { /* intra channel prediction */ ic_prediction(ics1, hDecoder->pred_stat[channel], hDecoder->frameLength, hDecoder->sf_index); ic_prediction(ics2, hDecoder->pred_stat[paired_channel], hDecoder->frameLength, hDecoder->sf_index); /* In addition, for scalefactor bands coded by perceptual noise substitution the predictors belonging to the corresponding spectral coefficients are reset. */ pns_reset_pred_state(ics1, hDecoder->pred_stat[channel]); pns_reset_pred_state(ics2, hDecoder->pred_stat[paired_channel]); }#endif#ifdef LTP_DEC if (is_ltp_ot(hDecoder->object_type)) { ltp_info *ltp1 = &(ics1->ltp); ltp_info *ltp2 = (hDecoder->common_window) ? &(ics2->ltp2) : &(ics2->ltp);#ifdef LD_DEC if (hDecoder->object_type == LD) { if (ltp1->data_present) { if (ltp1->lag_update) hDecoder->ltp_lag[channel] = ltp1->lag; } ltp1->lag = hDecoder->ltp_lag[channel]; if (ltp2->data_present) { if (ltp2->lag_update) hDecoder->ltp_lag[paired_channel] = ltp2->lag; } ltp2->lag = hDecoder->ltp_lag[paired_channel]; }#endif /* long term prediction */ lt_prediction(ics1, ltp1, hDecoder->lt_pred_stat[channel], hDecoder->fb, ics1->window_shape, hDecoder->window_shape_prev[channel], hDecoder->sf_index, hDecoder->object_type, hDecoder->frameLength); lt_prediction(ics2, ltp2, hDecoder->lt_pred_stat[paired_channel], hDecoder->fb, ics2->window_shape, hDecoder->window_shape_prev[paired_channel], hDecoder->sf_index, hDecoder->object_type, hDecoder->frameLength); }#endif /* tns decoding */ tns_decode_frame(ics1, &(ics1->tns), hDecoder->sf_index, hDecoder->object_type, hDecoder->frameLength); tns_decode_frame(ics2, &(ics2->tns), hDecoder->sf_index, hDecoder->object_type, hDecoder->frameLength); /* drc decoding */ if (hDecoder->drc->present) { if (!hDecoder->drc->exclude_mask[channel] || !hDecoder->drc->excluded_chns_present) drc_decode(hDecoder->drc, ics1->buffer); if (!hDecoder->drc->exclude_mask[paired_channel] || !hDecoder->drc->excluded_chns_present) drc_decode(hDecoder->drc, ics1->buffer); } /* filter bank */#ifdef SSR_DEC if (hDecoder->object_type != SSR) {#endif#ifdef LIBPAAC
hDecoder->Lib.Filter(&hDecoder->Lib, ics1->window_sequence, ics1->window_shape,
hDecoder->window_shape_prev[channel],
hDecoder->time_out[channel], hDecoder->fb_intermed[channel], hDecoder->d.buffer);
hDecoder->Lib.Filter(&hDecoder->Lib, ics2->window_sequence, ics2->window_shape,
hDecoder->window_shape_prev[paired_channel],
hDecoder->time_out[paired_channel], hDecoder->fb_intermed[paired_channel], hDecoder->d.buffer);
#else
ifilter_bank(hDecoder->fb, ics1->window_sequence, ics1->window_shape, hDecoder->window_shape_prev[channel], ics1->buffer, hDecoder->time_out[channel], hDecoder->fb_intermed[channel], hDecoder->d.buffer, hDecoder->object_type, hDecoder->frameLength); ifilter_bank(hDecoder->fb, ics2->window_sequence, ics2->window_shape, hDecoder->window_shape_prev[paired_channel], ics2->buffer, hDecoder->time_out[paired_channel], hDecoder->fb_intermed[paired_channel], hDecoder->d.buffer, hDecoder->object_type, hDecoder->frameLength);
#endif#ifdef SSR_DEC } else {
ssr_decode(&(ics1->ssr), hDecoder->fb, ics1->window_sequence, ics1->window_shape, hDecoder->window_shape_prev[channel], ics1->buffer, hDecoder->time_out[channel], hDecoder->ssr_overlap[channel], hDecoder->ipqf_buffer[channel], hDecoder->prev_fmd[channel], hDecoder->frameLength); ssr_decode(&(ics2->ssr), hDecoder->fb, ics2->window_sequence, ics2->window_shape, hDecoder->window_shape_prev[paired_channel], ics2->buffer, hDecoder->time_out[paired_channel], hDecoder->ssr_overlap[paired_channel], hDecoder->ipqf_buffer[paired_channel], hDecoder->prev_fmd[paired_channel], hDecoder->frameLength); }#endif /* save window shape for next frame */ hDecoder->window_shape_prev[channel] = ics1->window_shape; hDecoder->window_shape_prev[paired_channel] = ics2->window_shape;#ifdef LTP_DEC if (is_ltp_ot(hDecoder->object_type)) { lt_update_state(hDecoder->lt_pred_stat[channel], hDecoder->time_out[channel], hDecoder->fb_intermed[channel], hDecoder->frameLength, hDecoder->object_type); lt_update_state(hDecoder->lt_pred_stat[paired_channel], hDecoder->time_out[paired_channel], hDecoder->fb_intermed[paired_channel], hDecoder->frameLength, hDecoder->object_type); }#endif#ifdef SBR_DEC if (((hDecoder->sbr_present_flag == 1) || (hDecoder->forceUpSampling == 1)) && hDecoder->sbr_alloced[hDecoder->fr_ch_ele]) {
retval = safestack(sbrDecodeCouple,hDecoder,ics1,&hDecoder->stack);
if (retval > 0) return retval; } else if (((hDecoder->sbr_present_flag == 1) || (hDecoder->forceUpSampling == 1)) && !hDecoder->sbr_alloced[hDecoder->fr_ch_ele]) { return 23; }#endif return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -