📄 speaker.c
字号:
len = (int) d->buffer.buffer_len;
}
if (d->compression & fCompLPC) {
Assert(pClientData != NULL);
lpcdecomp(d, pClientData);
len = (int) d->buffer.buffer_len;
}
if (d->compression & fCompLPC10) {
Assert(pClientData != NULL);
lpc10decomp(d, pClientData);
len = (int) d->buffer.buffer_len;
}
if (d->compression & fCompCELP) {
Assert(pClientData != NULL);
celpdecomp(d, pClientData);
len = (int) d->buffer.buffer_len;
}
if (d->compression & fComp2X) {
int olen = len * 2;
Assert(olen < (sizeof auxbuf));
Assert(pClientData != NULL);
rate_flow(&pClientData->rateconv,
(unsigned char *) val, (unsigned char *) auxbuf,
&len, &olen);
len = olen;
memcpy(val, auxbuf, len);
d->buffer.buffer_len = len;
}
/* If we're performing rate adjustment on the audio to compensate
for receiving audio more rapidly than our hardware can play
it, convert the data in the sound buffer to the desired
rate. Note that we perform the conversion here at the moment
the sound buffer is in generic form: ulaw at 8000 samples
per second.
If the requested
rate would cause the output buffer to overflow, the length is
limited to that of the buffer, truncating any additional
audio in the packet. In practice, this only happens with
absurdly large rate reductions which would render the audio
incomprehensible in any case. */
if (currentOutputRate != EXCHANGE_SAMPLE_RATE) {
int is = len, os = ((len * currentOutputRate) + (EXCHANGE_SAMPLE_RATE - 1)) / EXCHANGE_SAMPLE_RATE;
rate_t r;
if (os > BUFL) {
os = BUFL; /* Constrain length to output buffer size */
}
rate_start(&r, EXCHANGE_SAMPLE_RATE, currentOutputRate);
rate_flow(&r, (unsigned char *) val, (unsigned char *) auxbuf, &is, &os);
memcpy(val, auxbuf, os);
len = os;
cPrintf("Resampled %ld to %ld samples at %d samples/sec.\n", d->buffer.buffer_len, len, currentOutputRate);
d->buffer.buffer_len = len;
}
/* If monitoring received audio, save original ulaw samples
in a buffer linked to the end of the wave packet so they
can be passed to the audio monitor right after they're
done playing. */
if (saveSamp && (hDlgSpectral && !spectrumTransmitOnly)) {
char *specbuf = malloc(len + sizeof(WORD));
if (specbuf != NULL) {
*((WORD *) specbuf) = len;
memcpy(specbuf + sizeof(WORD), val, len);
*specsamp = specbuf;
}
}
/* Compute time, in microseconds, it will take to play the samples in
this buffer. We compute this while the contents are in common 8000
samples per second u-law, before any transformation into device-specific
format. */
bufferMicroseconds = len * (1000000 / 8000);
cPrintf("Microseconds in buffer: %ld", bufferMicroseconds);
if (samplesPerSecond == 11025) {
if (bitsPerSample == 16) {
/* Convert the resulting u-law samples in the sound buffer
to 16 bit signed linear format. */
wh->lpData = (LPSTR) GlobalAllocPtr(GMEM_MOVEABLE | GMEM_SHARE,
(DWORD) ((((BUFL * ((DWORD) sizeof(short)) * 12) / 8) + sizeof(int))));
if (wh->lpData == NULL) {
GlobalFreePtr(wh);
return;
}
sbuf = (short *) wh->lpData;
ulp = (unsigned char *) val;
for (i = 0; i < len; i++) {
int j = i & 7;
*sbuf++ = audio_u2s(*ulp);
// This should be written out for better optimisation
if (j > 0 && !(j & 1)) {
*sbuf++ = audio_u2s(*ulp);
} else if (j % 320 == 319) {
*sbuf++ = audio_u2s(*ulp);
}
ulp++;
}
wh->dwBufferLength = wh->dwBytesRecorded =
(((LPSTR) sbuf) - wh->lpData);
} else if (bitsPerSample == 8) {
BYTE *bbuf;
/* Convert the resulting u-law samples in the sound buffer
to 8 bit PCM format. */
wh->lpData = (LPSTR) GlobalAllocPtr(GMEM_MOVEABLE | GMEM_SHARE,
(DWORD) ((((BUFL * ((DWORD) sizeof(short)) * 12) / 16)) + sizeof(int)));
if (wh->lpData == NULL) {
GlobalFreePtr(wh);
return;
}
bbuf = (BYTE *) wh->lpData;
ulp = (unsigned char *) val;
for (i = 0; i < len; i++) {
int j = i & 7;
*bbuf++ = audio_u2c(*ulp);
// This should be written out for better optimisation
if (j > 0 && !(j & 1)) {
*bbuf++ = audio_u2c(*ulp);
} else if (j % 320 == 319) {
*bbuf++ = audio_u2c(*ulp);
}
ulp++;
}
wh->dwBufferLength = wh->dwBytesRecorded =
(((LPSTR) bbuf) - wh->lpData);
}
} else { // samplesPerSecond == 8000
if (bitsPerSample == 16) {
/* Convert the resulting u-law samples in the sound buffer
to 16 bit signed linear format. */
wh->lpData = (LPSTR) GlobalAllocPtr(GMEM_MOVEABLE | GMEM_SHARE,
(DWORD) ((len * sizeof(short)) + sizeof(int)));
if (wh->lpData == NULL) {
GlobalFreePtr(wh);
return;
}
sbuf = (short *) wh->lpData;
ulp = (unsigned char *) val;
for (i = 0; i < len; i++) {
*sbuf++ = audio_u2s(*ulp++);
}
wh->dwBufferLength = wh->dwBytesRecorded =
(((LPSTR) sbuf) - wh->lpData);
} else if (bitsPerSample == 8) {
BYTE *bbuf;
/* Convert the resulting u-law samples in the sound buffer
to 8 bit PCM format. */
wh->lpData = (LPSTR) GlobalAllocPtr(GMEM_MOVEABLE | GMEM_SHARE,
(DWORD) ((len * sizeof(BYTE)) + sizeof(int)));
if (wh->lpData == NULL) {
GlobalFreePtr(wh);
return;
}
bbuf = (BYTE *) wh->lpData;
ulp = (unsigned char *) val;
for (i = 0; i < len; i++) {
*bbuf++ = audio_u2c(*ulp++);
}
wh->dwBufferLength = wh->dwBytesRecorded =
(((LPSTR) bbuf) - wh->lpData);
}
}
// Stuff the time it will take to play these samples at the end of the buffer
memcpy(wh->lpData + wh->dwBufferLength, &bufferMicroseconds, sizeof bufferMicroseconds);
}
/* PLAYSOUND -- Play a sound buffer, decrypting and decompressing
as required. */
void playSound(HWND hWnd, LPCLIENT_DATA pClientData, soundbuf *d,
int bitsPerSample, int samplesPerSecond)
{
LPWAVEHDR wh;
WORD stat;
int bufferMicroseconds;
/* If the message queue is close to exhaustion, ditch the output
buffer to avoid a hangup due to queue overflow. */
if (outputPending >= ((3 * messageQueueSize) / 4)) {
propeller(IDC_PH_INPUT_LOST, ++inputPacketsLost);
return;
}
wh = (LPWAVEHDR) GlobalAllocPtr(GMEM_MOVEABLE | GMEM_SHARE,
sizeof(WAVEHDR) + sizeof(CHAR *));
if (wh == NULL) {
return;
}
decodeSoundPacket(pClientData, d, bitsPerSample, samplesPerSecond, wh, TRUE);
/* Recover time it will take to play these samples which decodeSoundPacket
stuffed at the end of the sample data. */
memcpy(&bufferMicroseconds, wh->lpData + wh->dwBufferLength, sizeof bufferMicroseconds);
/* Give the answering machine a chance to save the buffer. Note that
at this point the sound buffer is in canonical form: no encryption,
and uncompressed u-law at 8000 samples/second. As playSound() is,
itself, invoked by the answering machine to replay sound packets
saved in the incoming message file, it must take care to set the
fPlayback bit so they're not stored right back into the answering
machine. */
if (!(d->compression & fPlayback)) {
char userId[1024];
strcpy(userId, pClientData->szHost);
/* Note: the only circumstances in which the userId buffer risks
overflow are in a direct attack. Consequently, we needn't go
beyond protecting ourselves here. */
if ((pClientData->email[0] != 0) || (pClientData->uname != NULL)) {
strcat(userId, ";");
if ((pClientData->email[0] != 0) &&
((strlen(userId) + strlen(pClientData->email) + 2) < (sizeof userId))) {
strcat(userId, pClientData->email);
}
strcat(userId, ";");
if ((pClientData->uname != NULL) &&
((strlen(userId) + strlen(pClientData->uname) + 2) < (sizeof userId))) {
strcat(userId, pClientData->uname);
}
}
answerSave(pClientData->inetSock.sin_addr, userId, d);
}
wh->dwFlags = 0;
waveOutPrepareHeader(hWaveOut, wh, sizeof(WAVEHDR));
#ifdef DELAY_OUTPUT
if (outputPending == 0)
waveOutPause(hWaveOut);
else if (outputPending == 10)
waveOutRestart(hWaveOut);
#endif
/* If output is paused due to anti-jitter, but we've used up half
our message queue with held packets, start output anyway. A
little jitter is better than packets discarded due to the
message queue limitation. */
if (jitterPause && (outputPending >= (messageQueueSize / 2))) {
jitterPause = FALSE;
waveOutRestart(hWaveOut);
//OutputDebugString("Restart for queue length\r\n");
}
stat = waveOutWrite(hWaveOut, wh, sizeof(WAVEHDR));
if (stat == 0) {
/* Increment number of pending output buffers and compute the
number of milliseconds it will take to play these buffers
at the nominal output sample rate. This is then used to
update an exponentially smoothed moving average of the length
of the output queue in microseconds. */
outputPending++;
microsecondsPending += bufferMicroseconds;
smoothedMicrosecondsPending = smoothedMicrosecondsPending + (microsecondsPending - smoothedMicrosecondsPending) * 0.1;
cPrintf("+ time pending = %.2g", smoothedMicrosecondsPending / 1000000.0);
/* Unless disabled by the workaround waAudioNoOutputRateAdjustment,
compare the smoothed estimate of the time it will take to clear the
output queue to the chosen jitter interval. If it's greater than
the jitter interval by a prescribed amount, adjust the output sample
rate to speed up the output rate of subsequently arriving samples. */
if (!waAudioNoOutputRateAdjustment) {
int excessQueue = (int) ((smoothedMicrosecondsPending / 1000.0) - min(jitterBuf, AORA_MINIMUM_DELAY));
if (excessQueue > AORA_ACTION_DELAY) {
currentOutputRate = EXCHANGE_SAMPLE_RATE -
((EXCHANGE_SAMPLE_RATE * AORA_MAXIMUM_RATE_PERCENT *
min(excessQueue - AORA_ACTION_DELAY, AORA_MAXIMUM_RATE_DELAY)) / (AORA_MAXIMUM_RATE_DELAY * 100));
} else {
currentOutputRate = EXCHANGE_SAMPLE_RATE;
}
cPrintf("+ Excess queue length: %d msec. Rate = %d", excessQueue, currentOutputRate);
}
// Update the output queue length and time in the extended status dialogue
if (hDlgPropeller != NULL) {
char s[80];
if (outputPending == 0) {
wsprintf(s, Format(6));
} else {
sprintf(s, Format(7), outputPending, microsecondsPending / 1000000.0);
}
SetDlgItemText(hDlgPropeller, IDC_PH_AUDIO_OUT_QUEUE, s);
}
} else {
char et[MAXERRORLENGTH];
waveOutGetErrorText(stat, et, sizeof et);
waveOutUnprepareHeader(hWaveOut, wh, sizeof(WAVEHDR));
GlobalFreePtr(wh);
MsgBox(hWnd, MB_OK | MB_ICONEXCLAMATION, Format(45), et);
return;
}
}
/* DECODEANSWERPACKET -- Decode a sound packet saved by the answering machine
so that it can be saved in a .WAV file. */
void decodeAnswerPacket(soundbuf *d, int bitsPerSample, int samplesPerSecond,
LPSTR *pcmData, DWORD *pcmLength)
{
WAVEHDR wh;
decodeSoundPacket(NULL, d, bitsPerSample, samplesPerSecond, &wh, FALSE);
*pcmData = wh.lpData;
*pcmLength = wh.dwBytesRecorded;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -