⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 maccoreaudio.cxx

📁 安装 H323需要的pwlib库
💻 CXX
📖 第 1 页 / 共 4 页
字号:
BOOL PSoundChannelCoreAudio::Open(const PString & deviceName,              Directions dir,              unsigned numChannels,              unsigned sampleRate,              unsigned bitsPerSample){  OSStatus err;  /* Save whether this is a Player or Recorder */  this->direction = dir;  /*   * Init the AudioUnit and assign it to the requested AudioDevice   */  if (strcmp(deviceName, CA_DUMMY_DEVICE_NAME) == 0) {     /* Dummy device */     PTRACE(6, "Dummy device " << direction);     mDeviceID = kAudioDeviceUnknown;  } else {    AudioDeviceID deviceID = GetDeviceID(deviceName, direction == Recorder);    if(direction == Player)       err = SetupOutputUnit(deviceID);    else        err = SetupInputUnit(deviceID);    checkStatus(err);  }  //os_handle = mDeviceID;  // tell PChanne::IsOpen() that the channel is open.  os_handle = 8;  // tell PChanne::IsOpen() that the channel is open.  state = open_;  return SetFormat(numChannels, sampleRate, bitsPerSample);}/* Audio Unit for Hardware Abstraction Layer(AUHAL) have builtin converters.  * It would be nice if we could configure it to spit out/consume the data in  * the format the data are passed by Read/Write function calls. * * Unfortunately this is not possible for the microphone, because this  * converter does not have a buffer inside, so it cannot do any Sample * Rate Conversion(SRC). We would have to set the device nominal sample * rate itself to 8kHz. Unfortunately not all microphones can do that, * so this is not an option. Maybe there will be some change in the future * by Apple, so we leave it here.  * * For the output we have the problem that we do not know currently how * to configure the channel map so that a mono input channel gets copied  * to all output channels, so we still have to do the conversion ourselves * to copy the result onto all output channels. * * Still the builtin converters can be used for something useful, such as  * converting from interleaved -> non-interleaved and to reduce the number of  * bits per sample to save space and time while copying  *//*  * Configure the builtin AudioConverter to accept non-interleaved data. * Turn off SRC by setting the same sample rate at both ends. * See also general notes above */ OSStatus PSoundChannelCoreAudio::MatchHALOutputFormat(){   OSStatus err = noErr;   //AudioStreamBasicDescription& asbd = hwASBD;   UInt32 size = sizeof (AudioStreamBasicDescription);   memset(&hwASBD, 0, size);           /*   err = AudioDeviceGetProperty(mDeviceID,          0,     // channel         //true,  // isInput         false,  // isInput         kAudioDevicePropertyStreamFormat,         &size, &hwASBD);   checkStatus(err);   */   //Get the current stream format of the output   err = AudioUnitGetProperty (mAudioUnit,                           kAudioUnitProperty_StreamFormat,                           kAudioUnitScope_Output,                           0,  // output bus                            &hwASBD,                           &size);   checkStatus(err);     //PTRACE(1, direction << "before " << endl << hwASBD);   // make sure it is non-interleaved   BOOL isInterleaved =             !(hwASBD.mFormatFlags & kAudioFormatFlagIsNonInterleaved);   hwASBD.mFormatFlags |= kAudioFormatFlagIsNonInterleaved;    if(isInterleaved){      // so its only one buffer containing all data, according to       // list.apple.com: You only multiply out by mChannelsPerFrame       // if you are doing interleaved.      hwASBD.mBytesPerPacket /= hwASBD.mChannelsPerFrame;      hwASBD.mBytesPerFrame  /= hwASBD.mChannelsPerFrame;   }     //Set the stream format of the output to match the input   err = AudioUnitSetProperty (mAudioUnit,              kAudioUnitProperty_StreamFormat,              kAudioUnitScope_Input,              0,              &hwASBD,              size);                                                           //PTRACE(1, direction << "after" << endl << hwASBD);     // make sure we really know the current format   size = sizeof (AudioStreamBasicDescription);   err = AudioUnitGetProperty (mAudioUnit,              kAudioUnitProperty_StreamFormat,              kAudioUnitScope_Input,              0,  // input bus              &hwASBD,              &size);                return err;}/*  * Configure the builtin AudioConverter to provide data in non-interleaved  * format. Turn off SRC by setting the same sample rate at both ends. * See also general notes above */ OSStatus PSoundChannelCoreAudio::MatchHALInputFormat(){   OSStatus err = noErr;   AudioStreamBasicDescription& asbd = hwASBD;   UInt32 size = sizeof (AudioStreamBasicDescription);   memset(&asbd, 0, size);   /*   err = AudioDeviceGetProperty(mDeviceID,          0,     // channel         true,  // isInput         kAudioDevicePropertyStreamFormat,         &size,          &asbd);   checkStatus(err);   */   /* This code asks for the supported sample rates of the microphone   UInt32 count, numRanges;   err = AudioDeviceGetPropertyInfo ( mDeviceID,                0, true,             kAudioDevicePropertyAvailableNominalSampleRates,              &count, NULL );   numRanges = count / sizeof(AudioValueRange);   AudioValueRange* rangeArray = (AudioValueRange*)malloc ( count );   err = AudioDeviceGetProperty ( mDeviceID,          0, true,          kAudioDevicePropertyAvailableNominalSampleRates,          &count, (void*)rangeArray );   checkStatus(err);   */   //Get the current stream format of the output   err = AudioUnitGetProperty (mAudioUnit,                        kAudioUnitProperty_StreamFormat,                        kAudioUnitScope_Input,                        1,  // input bus/                        &asbd,                        &size);   /*    * make it one-channel, non-interleaved, keeping same sample rate     */   BOOL isInterleaved =             !(asbd.mFormatFlags & kAudioFormatFlagIsNonInterleaved);    PTRACE_IF(1, isInterleaved, "channels are interleaved ");   // mFormatID -> assume lpcm !!!   asbd.mFormatFlags |= kAudioFormatFlagIsNonInterleaved;      if(isInterleaved){      // so it's only one buffer containing all channels, according to       //list.apple.com: You only multiple out by mChannelsPerFrame       //if you are doing interleaved.      asbd.mBytesPerPacket /= asbd.mChannelsPerFrame;      asbd.mBytesPerFrame  /= asbd.mChannelsPerFrame;   }   asbd.mChannelsPerFrame = 1;      // Set it to output side of input bus   size = sizeof (AudioStreamBasicDescription);   err = AudioUnitSetProperty (mAudioUnit,           kAudioUnitProperty_StreamFormat,           kAudioUnitScope_Output,           1,  // input bus           &asbd,           size);   checkStatus(err);   // make sure we really know the current format   size = sizeof (AudioStreamBasicDescription);   err = AudioUnitGetProperty (mAudioUnit,           kAudioUnitProperty_StreamFormat,           kAudioUnitScope_Output,           1,  // input bus           &hwASBD,           &size);   return err;}BOOL PSoundChannelCoreAudio::SetFormat(unsigned numChannels,                  unsigned sampleRate,                  unsigned bitsPerSample){   // making some assumptions about input format for now   PAssert(sampleRate == 8000 && numChannels == 1 && bitsPerSample == 16,      PUnsupportedFeature);   if(state != open_){      PTRACE(1, "Please select a device first");      return FALSE;   }     /*    * Setup the pwlibASBD    */   memset((void *)&pwlibASBD, 0, sizeof(AudioStreamBasicDescription));      /* pwlibASBD->mReserved */   pwlibASBD.mFormatID          = kAudioFormatLinearPCM;   pwlibASBD.mFormatFlags       = kLinearPCMFormatFlagIsSignedInteger;   pwlibASBD.mFormatFlags      |= kLinearPCMFormatFlagIsNonInterleaved; #if PBYTE_ORDER == PBIG_ENDIAN   pwlibASBD.mFormatFlags      |= kLinearPCMFormatFlagIsBigEndian;#endif   pwlibASBD.mSampleRate        = sampleRate;   pwlibASBD.mChannelsPerFrame  = numChannels;   pwlibASBD.mBitsPerChannel    = bitsPerSample;   pwlibASBD.mBytesPerFrame     = bitsPerSample / 8;   pwlibASBD.mFramesPerPacket   = 1;   pwlibASBD.mBytesPerPacket    = pwlibASBD.mBytesPerFrame;       if(mDeviceID == kAudioDeviceDummy){      PTRACE(1, "Dummy device");      return TRUE;   }   OSStatus err;   if(direction == Player)      err = MatchHALOutputFormat();     else       err = MatchHALInputFormat();   checkStatus(err);     /*    * Sample Rate Conversion (SRC)    * Create AudioConverters, input/output buffers, compute conversion rate     */     PTRACE(2, "ASBD of PwLib Audio format:" << endl << pwlibASBD );   PTRACE(2, "ASBD of Hardware Audio format:" << endl << hwASBD);       // how many samples has the output device compared to pwlib sample rate?   rateTimes8kHz  = hwASBD.mSampleRate / pwlibASBD.mSampleRate;       /*    * Create Converter for Sample Rate conversion    */   if (direction == Player)      err = AudioConverterNew(&pwlibASBD, &hwASBD, &converter);   else      err = AudioConverterNew(&hwASBD, &pwlibASBD, &converter);   checkStatus(err);     UInt32 quality = kAudioConverterQuality_Max;   err = AudioConverterSetProperty(converter,                kAudioConverterSampleRateConverterQuality,                sizeof(UInt32),                &quality);   checkStatus(err);     //if(direction == Recorder){     // trying compute number of requested data more predictably also      // for the first request     UInt32 primeMethod = kConverterPrimeMethod_None;     err = AudioConverterSetProperty(converter,                  kAudioConverterPrimeMethod,                  sizeof(UInt32),                  &primeMethod);      checkStatus(err);   //}   state = setformat_;   return TRUE;}/* gets never called, see sound.h: * baseChannel->PChannel::GetHandle();  */BOOL PSoundChannelCoreAudio::IsOpen() const{   //return (os_handle != -1);   return (state != init_ || state != destroy_);}/* gets never called, see sound.h: * baseChannel->PChannel::GetHandle();  */int PSoundChannelCoreAudio::GetHandle() const{   PTRACE(1, "GetHandle");   //return os_handle;    return -1;}BOOL PSoundChannelCoreAudio::Abort(){   PTRACE(1, "Abort");   PAssert(0, PUnimplementedFunction);   return false;}/** * SetBuffers is used to create the circular buffer as requested by the caller  * plus all the hidden buffers used for Sample-Rate-Conversion(SRC) * * A device can not be used after calling Open(), SetBuffers() must * also be called before it can start working. * * size:    Size of each buffer * count:   Number of buffers * */BOOL PSoundChannelCoreAudio::SetBuffers(PINDEX bufferSize,                  PINDEX bufferCount){   OSStatus err = noErr;   if(state != setformat_){      // use GetError      PTRACE(1, "Please specify a format first");      return FALSE;   }   PTRACE(1, __func__ << direction << " : "        << bufferSize << " BufferSize "<< bufferCount << " BufferCount");   PAssert(bufferSize > 0 && bufferCount > 0 && bufferCount < 65536, \                                                       PInvalidParameter);   this->bufferSizeBytes = bufferSize;   this->bufferCount = bufferCount;   if(mDeviceID == kAudioDeviceDummy){      // abort here      PTRACE(1, "Dummy device");      return TRUE;   }   mCircularBuffer = new CircularBuffer(bufferSize * bufferCount );       /** Register callback function */   err = CallbackSetup();   /**     * Tune the buffer size of the underlying audio device.    * The aim is to make the device request half of the buffer size on    * each callback.	 *	 * Not possible, because  buffer size for device input/output is not	 * independant of each other. Creates havoc in case SetBuffer is called with 	 * different buffer size for Player/Recorder Channel    */	/*   UInt32 targetSizeBytes = bufferSizeBytes / 2;   UInt32 size = sizeof(UInt32);   if (direction == Player) {     err = AudioConverterGetProperty(converter,             kAudioConverterPropertyCalculateOutputBufferSize,             &size, &targetSizeBytes);   } else {     err = AudioConverterGetProperty(converter,             kAudioConverterPropertyCalculateInputBufferSize,             &size, &targetSizeBytes);   }   checkStatus(err);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -