⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 maccoreaudio.cxx

📁 sloedgy open sip stack source code
💻 CXX
📖 第 1 页 / 共 4 页
字号:
 */ 
OSStatus PSoundChannelCoreAudio::MatchHALOutputFormat()
{
   OSStatus err = noErr;
   //AudioStreamBasicDescription& asbd = hwASBD;
   UInt32 size = sizeof (AudioStreamBasicDescription);

   memset(&hwASBD, 0, size);
        
   /*
   err = AudioDeviceGetProperty(mDeviceID, 
         0,     // channel
         //true,  // isInput
         false,  // isInput
         kAudioDevicePropertyStreamFormat,
         &size, &hwASBD);
   checkStatus(err);
   */

   //Get the current stream format of the output
   err = AudioUnitGetProperty (mAudioUnit,
                           kAudioUnitProperty_StreamFormat,
                           kAudioUnitScope_Output,
                           0,  // output bus 
                           &hwASBD,
                           &size);
   checkStatus(err);  

   // make sure it is non-interleaved
   BOOL isInterleaved = 
            !(hwASBD.mFormatFlags & kAudioFormatFlagIsNonInterleaved);

   hwASBD.mFormatFlags |= kAudioFormatFlagIsNonInterleaved; 
   if(isInterleaved){
      // so its only one buffer containing all data, according to 
      // list.apple.com: You only multiply out by mChannelsPerFrame 
      // if you are doing interleaved.
      hwASBD.mBytesPerPacket /= hwASBD.mChannelsPerFrame;
      hwASBD.mBytesPerFrame  /= hwASBD.mChannelsPerFrame;
   }
  
   //Set the stream format of the output to match the input
   err = AudioUnitSetProperty (mAudioUnit,
              kAudioUnitProperty_StreamFormat,
              kAudioUnitScope_Input,
              0,
              &hwASBD,
              size);
                                                        

   // make sure we really know the current format
   size = sizeof (AudioStreamBasicDescription);
   err = AudioUnitGetProperty (mAudioUnit,
              kAudioUnitProperty_StreamFormat,
              kAudioUnitScope_Input,
              0,  // input bus
              &hwASBD,
              &size);
              
  return err;
}


/* 
 * Configure the builtin AudioConverter to provide data in non-interleaved 
 * format. Turn off SRC by setting the same sample rate at both ends.
 * See also general notes above
 */ 
OSStatus PSoundChannelCoreAudio::MatchHALInputFormat()
{
   OSStatus err = noErr;
   AudioStreamBasicDescription& asbd = hwASBD;
   UInt32 size = sizeof (AudioStreamBasicDescription);

   memset(&asbd, 0, size);

   /*
   err = AudioDeviceGetProperty(mDeviceID, 
         0,     // channel
         true,  // isInput
         kAudioDevicePropertyStreamFormat,
         &size, 
         &asbd);
   checkStatus(err);
   */

   /* This code asks for the supported sample rates of the microphone
   UInt32 count, numRanges;
   err = AudioDeviceGetPropertyInfo ( mDeviceID, 
               0, true,
             kAudioDevicePropertyAvailableNominalSampleRates, 
             &count, NULL );

   numRanges = count / sizeof(AudioValueRange);
   AudioValueRange* rangeArray = (AudioValueRange*)malloc ( count );

   err = AudioDeviceGetProperty ( mDeviceID, 
         0, true, 
         kAudioDevicePropertyAvailableNominalSampleRates, 
         &count, (void*)rangeArray );
   checkStatus(err);
   */

   //Get the current stream format of the output
   err = AudioUnitGetProperty (mAudioUnit,
                        kAudioUnitProperty_StreamFormat,
                        kAudioUnitScope_Input,
                        1,  // input bus/
                        &asbd,
                        &size);

   /*
    * make it one-channel, non-interleaved, keeping same sample rate 
    */
   BOOL isInterleaved = 
            !(asbd.mFormatFlags & kAudioFormatFlagIsNonInterleaved); 

   PTRACE_IF(5, isInterleaved, "channels are interleaved ");

   // mFormatID -> assume lpcm !!!
   asbd.mFormatFlags |= kAudioFormatFlagIsNonInterleaved;   
   if(isInterleaved){
      // so it's only one buffer containing all channels, according to 
      //list.apple.com: You only multiple out by mChannelsPerFrame 
      //if you are doing interleaved.
      asbd.mBytesPerPacket /= asbd.mChannelsPerFrame;
      asbd.mBytesPerFrame  /= asbd.mChannelsPerFrame;
   }
   asbd.mChannelsPerFrame = 1;
   
   // Set it to output side of input bus
   size = sizeof (AudioStreamBasicDescription);
   err = AudioUnitSetProperty (mAudioUnit,
           kAudioUnitProperty_StreamFormat,
           kAudioUnitScope_Output,
           1,  // input bus
           &asbd,
           size);
   checkStatus(err);

   // make sure we really know the current format
   size = sizeof (AudioStreamBasicDescription);
   err = AudioUnitGetProperty (mAudioUnit,
           kAudioUnitProperty_StreamFormat,
           kAudioUnitScope_Output,
           1,  // input bus
           &hwASBD,
           &size);

   return err;
}



BOOL PSoundChannelCoreAudio::SetFormat(unsigned numChannels,
                  unsigned sampleRate,
                  unsigned bitsPerSample)
{
   // making some assumptions about input format for now
   PAssert(sampleRate == 8000 && numChannels == 1 && bitsPerSample == 16,
      PUnsupportedFeature);

   if(state != open_){
      PTRACE(1, "Please select a device first");
      return FALSE;
   }
  
   /*
    * Setup the pwlibASBD
    */
   memset((void *)&pwlibASBD, 0, sizeof(AudioStreamBasicDescription)); 
  
   /* pwlibASBD->mReserved */
   pwlibASBD.mFormatID          = kAudioFormatLinearPCM;
   pwlibASBD.mFormatFlags       = kLinearPCMFormatFlagIsSignedInteger;
   pwlibASBD.mFormatFlags      |= kLinearPCMFormatFlagIsNonInterleaved; 
#if PBYTE_ORDER == PBIG_ENDIAN
   pwlibASBD.mFormatFlags      |= kLinearPCMFormatFlagIsBigEndian;
#endif
   pwlibASBD.mSampleRate        = sampleRate;
   pwlibASBD.mChannelsPerFrame  = numChannels;
   pwlibASBD.mBitsPerChannel    = bitsPerSample;
   pwlibASBD.mBytesPerFrame     = bitsPerSample / 8;
   pwlibASBD.mFramesPerPacket   = 1;
   pwlibASBD.mBytesPerPacket    = pwlibASBD.mBytesPerFrame;
  
  
   if(mDeviceID == kAudioDeviceDummy){
      PTRACE(1, "Dummy device");
      return TRUE;
   }

   OSStatus err;
   if(direction == Player)
      err = MatchHALOutputFormat();  
   else 
      err = MatchHALInputFormat();
   checkStatus(err);
  
   /*
    * Sample Rate Conversion (SRC)
    * Create AudioConverters, input/output buffers, compute conversion rate 
    */
  
   PTRACE(3, "ASBD PwLib Format of "    << direction << endl << pwlibASBD);
   PTRACE(3, "ASBD Hardware Format of " << direction << endl << hwASBD);
  
  
   // how many samples has the output device compared to pwlib sample rate?
   rateTimes8kHz  = hwASBD.mSampleRate / pwlibASBD.mSampleRate;
  
  
   /*
    * Create Converter for Sample Rate conversion
    */
   if (direction == Player) 
     err = AudioConverterNew(&pwlibASBD, &hwASBD, &converter);
   else 
     err = AudioConverterNew(&hwASBD, &pwlibASBD, &converter);
   checkStatus(err);
  
   UInt32 quality = kAudioConverterQuality_Max;
   err = AudioConverterSetProperty(converter,
                kAudioConverterSampleRateConverterQuality,
                sizeof(UInt32),
                &quality);
   checkStatus(err);
  
   //if(direction == Recorder){
     // trying compute number of requested data more predictably also 
     // for the first request
     UInt32 primeMethod = kConverterPrimeMethod_None;
     err = AudioConverterSetProperty(converter,
                  kAudioConverterPrimeMethod,
                  sizeof(UInt32),
                  &primeMethod);
      checkStatus(err);
   //}

   state = setformat_;
   return TRUE;
}


/* gets never called, see sound.h:
 * baseChannel->PChannel::GetHandle(); 
 */
BOOL PSoundChannelCoreAudio::IsOpen() const
{
   //return (os_handle != -1);
   return (state != init_ || state != destroy_);
}


/* gets never called, see sound.h:
 * baseChannel->PChannel::GetHandle(); 
 */
int PSoundChannelCoreAudio::GetHandle() const
{
   PTRACE(1, "GetHandle");
   //return os_handle; 
   return -1;
}

BOOL PSoundChannelCoreAudio::Abort()
{
   PTRACE(1, "Abort");
   PAssert(0, PUnimplementedFunction);
   return false;
}




/**
 * SetBuffers is used to create the circular buffer as requested by the caller 
 * plus all the hidden buffers used for Sample-Rate-Conversion(SRC)
 *
 * A device can not be used after calling Open(), SetBuffers() must
 * also be called before it can start working.
 *
 * size:    Size of each buffer
 * count:   Number of buffers
 *
 */
BOOL PSoundChannelCoreAudio::SetBuffers(PINDEX bufferSize,
                  PINDEX bufferCount)
{
   OSStatus err = noErr;

   if(state != setformat_){
      // use GetError
      PTRACE(1, "Please specify a format first");
      return FALSE;
   }

   PTRACE(3, __func__ << direction << " : "
        << bufferSize << " BufferSize "<< bufferCount << " BufferCount");

   PAssert(bufferSize > 0 && bufferCount > 0 && bufferCount < 65536, \
                                                       PInvalidParameter);

   this->bufferSizeBytes = bufferSize;
   this->bufferCount = bufferCount;

   if(mDeviceID == kAudioDeviceDummy){
      // abort here
      PTRACE(1, "Dummy device");
      return TRUE;
   }

   mCircularBuffer = new CircularBuffer(bufferSize * bufferCount );
  
  
   /** Register callback function */
   err = CallbackSetup();


   /** 
    * Tune the buffer size of the underlying audio device.
    * The aim is to make the device request half of the buffer size on
    * each callback.
	 *
	 * Not possible, because  buffer size for device input/output is not
	 * independant of each other. Creates havoc in case SetBuffer is called with 
	 * different buffer size for Player/Recorder Channel
    */
	/*
   UInt32 targetSizeBytes = bufferSizeBytes / 2;
   UInt32 size = sizeof(UInt32);
   if (direction == Player) {
     err = AudioConverterGetProperty(converter,
             kAudioConverterPropertyCalculateOutputBufferSize,
             &size, &targetSizeBytes);
   } else {
     err = AudioConverterGetProperty(converter,
             kAudioConverterPropertyCalculateInputBufferSize,
             &size, &targetSizeBytes);
   }
   checkStatus(err);
   if (err) {
     return FALSE;
   }

   PTRACE(2, __func__ <<  " AudioDevice buffer size set to " 
            << targetSizeBytes);

   UInt32 targetSizeFrames = targetSizeBytes / hwASBD.mBytesPerFrame;
   if (direction == Player) {
      err = AudioDeviceSetProperty( mDeviceID,
         0, //&ts, timestruct 
         0, // output channel 
         true, // isInput 
         // kAudioDevicePropertyBufferSize, 
         kAudioDevicePropertyBufferFrameSize,
         sizeof(UInt32),
         &targetSizeFrames);
   } else {
      err = AudioDeviceSetProperty( mDeviceID,
         0, //&ts, timestruct 
         1, // input channel
         false, // isInput 
         kAudioDevicePropertyBufferFrameSize,
         sizeof(UInt32),
         &targetSizeFrames);
   }
   checkStatus(err);
	*/


   /** 
    * Allocate byte array passed as input to the converter 
    */
   UInt32 bufferSizeFrames, bufferSizeBytes;
   UInt32 propertySize = sizeof(UInt32);
   err = AudioDeviceGetProperty( mDeviceID,
            0,  // output channel,  
            true,  // isInput 
            kAudioDevicePropertyBufferFrameSize,
            &propertySize,
            &bufferSizeFrames);
   checkStatus(err);
   bufferSizeBytes = bufferSizeFrames * hwASBD.mBytesPerFrame;
   //UInt32 bufferSizeBytes = targetSizeBytes;

   if (direction == Player) {
      UInt32 propertySize = sizeof(UInt32);
      err = AudioConverterGetProperty(converter,
            kAudioConverterPropertyCalculateInputBufferSize,
            &propertySize,
            &bufferSizeBytes);
      checkStatus(err);
      converter_buffer_size = bufferSizeBytes;
   } else {
      // on each turn the device spits out bufferSizeBytes bytes
      // the input ringbuffer has at most MIN_INPUT_FILL frames in it 
      // all other frames were converter during the last callback
      converter_buffer_size = bufferSizeBytes + 
                  2 * MIN_INPUT_FILL * hwASBD.mBytesPerFrame;
   }
   converter_buffer = (char*)malloc(converter_buffer_size);
   if(converter_buffer == NULL)
      PTRACE(1, "Failed to allocate converter_buffer");
   else
      PTRACE(2, "Allocated converter_buffer of size " 
            << converter_buffer_size );


   /** In case of Recording we need a couple of buffers more */
   if(direction == Recorder){
      SetupAdditionalRecordBuffers();
   }

   /*
    * AU Setup, allocates necessary buffers... 
    */
   err = AudioUnitInitialize(mAudioUnit);
   
   //(err);
  
   state = setbuffer_;

   return TRUE;

}

OSStatus PSoundChannelCoreAudio::SetupAdditionalRecordBuffers()
{

   OSStatus err = noErr;
   UInt32 bufferSizeFrames, bufferSizeBytes;
   
   /** 
    * build buffer list to take over the data from the microphone 
    */
   UInt32 propertySize = sizeof(UInt32);
   err = AudioDeviceGetProperty( mDeviceID,
      0,  // channel, probably all  
      true,  // isInput 
      //false,  // isInput ()
      kAudioDevicePropertyBufferFrameSize,
      &propertySize,
      &bufferSizeFrames);
   checkStatus(err);
   bufferSizeBytes = bufferSizeFrames * hwASBD.mBytesPerFrame;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -