📄 dts_parser.cpp
字号:
current_subframe = 0;
current_subsubframe = 0;
if (amode >= sizeof(amode2mask_tbl) / sizeof(amode2mask_tbl[0]))
return false;
/*
if (bs.get_type() == BITSTREAM_14LE ||
bs.get_type() == BITSTREAM_14BE)
frame_size = frame_size * 16 / 14;
int mask = amode2mask_tbl[amode];
int relation = amode2rel_tbl[amode];
if (lfe) mask |= CH_MASK_LFE;
spk.set(FORMAT_DTS, mask, dts_sample_rates[sample_rate], 1.0, relation);
// todo: support short frames
nsamples = sample_blocks * 32;
*/
return true;
}
///////////////////////////////////////////////////////////////////////////////
// SUBFRAME HEADER
///////////////////////////////////////////////////////////////////////////////
bool
DTSParser::parse_subframe_header()
{
// Primary audio coding side information
int ch, k;
// Subsubframe count
subsubframes = bs.get(2) + 1;
// Partial subsubframe sample count
partial_samples = bs.get(3);
// Get prediction mode for each subband
for (ch = 0; ch < prim_channels; ch++)
for (k = 0; k < subband_activity[ch]; k++)
prediction_mode[ch][k] = bs.get(1);
// Get prediction codebook
for (ch = 0; ch < prim_channels; ch++)
for (k = 0; k < subband_activity[ch]; k++)
if (prediction_mode[ch][k] > 0)
// (Prediction coefficient VQ address)
prediction_vq[ch][k] = bs.get(12);
// Bit allocation index
for (ch = 0; ch < prim_channels; ch++)
{
for (k = 0; k < vq_start_subband[ch]; k++)
{
if (bitalloc_huffman[ch] == 6)
bitalloc[ch][k] = bs.get(5);
else if (bitalloc_huffman[ch] == 5)
bitalloc[ch][k] = bs.get(4);
else
bitalloc[ch][k] = InverseQ(bitalloc_12[bitalloc_huffman[ch]]);
if (bitalloc[ch][k] > 26)
{
fprintf (stderr, "bitalloc index [%i][%i] too big (%i)\n",
ch, k, bitalloc[ch][k]);
return false;
}
}
}
// Transition mode
for (ch = 0; ch < prim_channels; ch++)
{
for (k = 0; k < subband_activity[ch]; k++)
{
transition_mode[ch][k] = 0;
if (subsubframes > 1 && k < vq_start_subband[ch] && bitalloc[ch][k] > 0)
transition_mode[ch][k] = InverseQ(tmode[transient_huffman[ch]]);
}
}
// Scale factors
for (ch = 0; ch < prim_channels; ch++)
{
const int *scale_table;
int scale_sum;
for (k = 0; k < subband_activity[ch]; k++)
{
scale_factor[ch][k][0] = 0;
scale_factor[ch][k][1] = 0;
}
if (scalefactor_huffman[ch] == 6)
scale_table = scale_factor_quant7;
else
scale_table = scale_factor_quant6;
// When huffman coded, only the difference is encoded
scale_sum = 0;
for (k = 0; k < subband_activity[ch]; k++)
{
if (k >= vq_start_subband[ch] || bitalloc[ch][k] > 0)
{
if (scalefactor_huffman[ch] < 5)
// huffman encoded
scale_sum += InverseQ(scales_129[scalefactor_huffman[ch]]);
else if (scalefactor_huffman[ch] == 5)
scale_sum = bs.get(6);
else if (scalefactor_huffman[ch] == 6)
scale_sum = bs.get(7);
scale_factor[ch][k][0] = scale_table[scale_sum];
}
if (k < vq_start_subband[ch] && transition_mode[ch][k])
{
// Get second scale factor
if (scalefactor_huffman[ch] < 5)
// huffman encoded
scale_sum += InverseQ(scales_129[scalefactor_huffman[ch]]);
else if (scalefactor_huffman[ch] == 5)
scale_sum = bs.get(6);
else if (scalefactor_huffman[ch] == 6)
scale_sum = bs.get(7);
scale_factor[ch][k][1] = scale_table[scale_sum];
}
}
}
// Joint subband scale factor codebook select
for (ch = 0; ch < prim_channels; ch++)
if (joint_intensity[ch] > 0)
joint_huff[ch] = bs.get(3);
// Scale factors for joint subband coding
for (ch = 0; ch < prim_channels; ch++)
{
int source_channel;
// Transmitted only if joint subband coding enabled
if (joint_intensity[ch] > 0)
{
int scale = 0;
source_channel = joint_intensity[ch] - 1;
// When huffman coded, only the difference is encoded
// (is this valid as well for joint scales ???)
for (k = subband_activity[ch];
k < subband_activity[source_channel]; k++)
{
if (joint_huff[ch] < 5)
// huffman encoded
scale = InverseQ(scales_129[joint_huff[ch]]);
else if (joint_huff[ch] == 5)
scale = bs.get(6);
else if (joint_huff[ch] == 6)
scale = bs.get(7);
scale += 64; // bias
joint_scale_factor[ch][k] = scale;//joint_scale_table[scale];*/
}
//if (!debug_flag & 0x02)
//{
// fprintf (stderr, "Joint stereo coding not supported\n");
// debug_flag |= 0x02;
//}
}
}
// Stereo downmix coefficients
if (prim_channels > 2 && downmix)
{
for (ch = 0; ch < prim_channels; ch++)
{
// ???????????????????????????
downmix_coef[ch][0] = bs.get(7);
downmix_coef[ch][1] = bs.get(7);
}
}
// Dynamic range coefficient
if (dynrange) dynrange_coef = bs.get(8);
// Side information CRC check word
if (crc_present)
bs.get(16);
/////////////////////////////////////////////////////////
// Primary audio data arrays
// VQ encoded high frequency subbands
for (ch = 0; ch < prim_channels; ch++)
for (k = vq_start_subband[ch]; k < subband_activity[ch]; k++)
// 1 vector -> 32 samples
high_freq_vq[ch][k] = bs.get(10);
// Low frequency effect data
if (lfe)
{
// LFE samples
int lfe_samples = 2 * lfe * subsubframes;
double lfe_scale;
for (k = lfe_samples; k < lfe_samples * 2; k++)
// Signed 8 bits int
lfe_data[k] = bs.get_signed(8);
// Scale factor index
lfe_scale_factor = scale_factor_quant7[bs.get(8)];
// Quantization step size * scale factor
lfe_scale = 0.035 * lfe_scale_factor;
for (k = lfe_samples; k < lfe_samples * 2; k++)
lfe_data[k] *= lfe_scale;
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
// SUBSUBFRAME
///////////////////////////////////////////////////////////////////////////////
bool
DTSParser::parse_subsubframe()
{
int ch, l;
int subsubframe = current_subsubframe;
const double *quant_step_table;
// FIXME
double subband_samples[DTS_PRIM_CHANNELS_MAX][DTS_SUBBANDS][8];
/////////////////////////////////////////////////////////
// Audio data
// Select quantization step size table
if (bit_rate == 0x1f)
quant_step_table = lossless_quant_d;
else
quant_step_table = lossy_quant_d;
for (ch = 0; ch < prim_channels; ch++)
{
for (l = 0; l < vq_start_subband[ch] ; l++)
{
int m;
// Select the mid-tread linear quantizer
int abits = bitalloc[ch][l];
double quant_step_size = quant_step_table[abits];
double rscale;
/////////////////////////////////////////////////////
// Determine quantization index code book and its type
// Select quantization index code book
int sel = quant_index_huffman[ch][abits];
// Determine its type
int q_type = 1; // (Assume Huffman type by default)
if (abits >= 11 || !bitalloc_select[abits][sel])
{
// Not Huffman type
if (abits <= 7)
q_type = 3; // Block code
else
q_type = 2; // No further encoding
}
if (abits == 0) q_type = 0; // No bits allocated
/////////////////////////////////////////////////////
// Extract bits from the bit stream
switch (q_type)
{
case 0: // No bits allocated
for (m=0; m<8; m++)
subband_samples[ch][l][m] = 0;
break;
case 1: // Huffman code
for (m=0; m<8; m++)
subband_samples[ch][l][m] = InverseQ(bitalloc_select[abits][sel]);
break;
case 2: // No further encoding
for (m=0; m<8; m++)
{
// Extract (signed) quantization index
int q_index = bs.get(abits - 3);
if (q_index & (1 << (abits - 4)))
{
q_index = (1 << (abits - 3)) - q_index;
q_index = -q_index;
}
subband_samples[ch][l][m] = q_index;
}
break;
case 3: // Block code
{
int block_code1, block_code2, size, levels;
int block[8];
switch (abits)
{
case 1:
size = 7;
levels = 3;
break;
case 2:
size = 10;
levels = 5;
break;
case 3:
size = 12;
levels = 7;
break;
case 4:
size = 13;
levels = 9;
break;
case 5:
size = 15;
levels = 13;
break;
case 6:
size = 17;
levels = 17;
break;
case 7:
default:
size = 19;
levels = 25;
break;
}
block_code1 = bs.get(size);
// Should test return value
decode_blockcode (block_code1, levels, block);
block_code2 = bs.get(size);
decode_blockcode (block_code2, levels, &block[4]);
for (m=0; m<8; m++)
subband_samples[ch][l][m] = block[m];
}
break;
default: // Undefined
fprintf (stderr, "Unknown quantization index codebook");
return false;
} // switch (q_type)
/////////////////////////////////////////////////////
// Account for quantization step and scale factor
// Deal with transients
if (transition_mode[ch][l] && subsubframe >= transition_mode[ch][l])
rscale = quant_step_size * scale_factor[ch][l][1];
else
rscale = quant_step_size * scale_factor[ch][l][0];
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -