我有一个需要使用自定义库进行连接和通信的 IPCamera。我已经处理了所有视频,但我还想让用户可以选择收听相机录制的音频。
我以字节流的形式接收音频(音频是PCM u-law)。
由于我没有从文件中读取数据或没有可以连接的 URL,我想我必须使用 AudioUnits 或 openAL 之类的东西来播放我的音频。
我尝试根据我在网上找到的示例使用 AudioUnits 来实现它,这就是我目前所拥有的:
-(void) audioThread
{
char buffer[1024];
int size = 0;
boolean audioConfigured = false;
AudioComponentInstance audioUnit;
while (running) {
getAudioData(buffer,size); //fill buffer with my audio
int16_t* tempChar = (int16_t *)calloc(ret, sizeof(int16_t));
for (int i = 0; i < ret; i++) {
tempChar[i] = MuLaw_Decode(buf[i]);
}
uint8_t *data = NULL;
data = malloc(size);
data = memcpy(data, &tempChar, size);
CMBlockBufferRef blockBuffer = NULL;
OSStatus status = CMBlockBufferCreateWithMemoryBlock(NULL, data,
size,
kCFAllocatorNull, NULL,
0,
size,
0, &blockBuffer);
CMSampleBufferRef sampleBuffer = NULL;
// now I create my samplebuffer from the block buffer
if(status == noErr)
{
const size_t sampleSize = size;
status = CMSampleBufferCreate(kCFAllocatorDefault,
blockBuffer, true, NULL, NULL,
formatDesc, 1, 0, NULL, 1,
&sampleSize, &sampleBuffer);
}
AudioStreamBasicDescription audioBasic;
audioBasic.mBitsPerChannel = 16;
audioBasic.mBytesPerPacket = 2;
audioBasic.mBytesPerFrame = 2;
audioBasic.mChannelsPerFrame = 1;
audioBasic.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioBasic.mFormatID = kAudioFormatLinearPCM;
audioBasic.mFramesPerPacket = 1;
audioBasic.mSampleRate = 48000;
audioBasic.mReserved = 0;
if(!audioConfigured)
{
//initialize the circular buffer
if(instance.decodingBuffer == NULL)
instance.decodingBuffer = malloc(sizeof(TPCircularBuffer));
if(!TPCircularBufferInit(instance.decodingBuffer, 1024))
continue;
AudioComponentDescription componentDescription;
componentDescription.componentType = kAudioUnitType_Output;
componentDescription.componentSubType = kAudioUnitSubType_RemoteIO;
componentDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
componentDescription.componentFlags = 0;
componentDescription.componentFlagsMask = 0;
AudioComponent component = AudioComponentFindNext(NULL, &componentDescription);
if(AudioComponentInstanceNew(component, &audioUnit) != noErr) {
NSLog(@"Failed to initialize the AudioComponent");
continue;
}
//enable IO for playback
UInt32 flag = 1;
if(AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &flag, sizeof(flag)) != noErr) {
NSLog(@"Failed to enable IO for playback");
continue;
}
// set the format for the outputstream
if(AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output, 1, &audioBasic, sizeof(audioBasic)) != noErr) {
NSLog(@"Failed to set the format for the outputstream");
continue;
}
// set output callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = (__bridge void*) self;
if(AudioUnitSetProperty(audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &callbackStruct, sizeof(callbackStruct))!= noErr) {
NSLog(@"Failed to Set output callback");
continue;
}
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
flag = 0;
status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_ShouldAllocateBuffer, kAudioUnitScope_Output, 1, &flag, sizeof(flag));
if(AudioUnitInitialize(audioUnit) != noErr) {
NSLog(@"Failed to initialize audioUnits");
}
if(AudioOutputUnitStart(audioUnit)!= noErr) {
NSLog(@"[thread_ReceiveAudio] Failed to start audio");
}
audioConfigured = true;
}
AudioBufferList bufferList ;
if (sampleBuffer!=NULL) {
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &bufferList, sizeof(bufferList), NULL, NULL, kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment, &blockBuffer);
UInt64 size = CMSampleBufferGetTotalSampleSize(sampleBuffer);
// Put audio into circular buffer
TPCircularBufferProduceBytes(self.decodingBuffer, bufferList.mBuffers[0].mData, size);
//TPCircularBufferCopyAudioBufferList(self.decodingBuffer, &bufferList, NULL, kTPCircularBufferCopyAll, NULL);
CFRelease(sampleBuffer);
CFRelease(blockBuffer);
}
}
//stop playing audio
if(audioConfigured){
if(AudioOutputUnitStop(audioUnit)!= noErr) {
NSLog(@"[thread_ReceiveAudio] Failed to stop audio");
}
else{
//clean up audio
AudioComponentInstanceDispose(audioUnit);
}
}
}
int16_t MuLaw_Decode(int8_t number)
{
const uint16_t MULAW_BIAS = 33;
uint8_t sign = 0, position = 0;
int16_t decoded = 0;
number = ~number;
if (number & 0x80)
{
number &= ~(1 << 7);
sign = -1;
}
position = ((number & 0xF0) >> 4) + 5;
decoded = ((1 << position) | ((number & 0x0F) << (position - 4))
| (1 << (position - 5))) - MULAW_BIAS;
return (sign == 0) ? (decoded) : (-(decoded));
}
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
int bytesToCopy = ioData->mBuffers[0].mDataByteSize;
SInt16 *targetBuffer = (SInt16*)ioData->mBuffers[0].mData;
int32_t availableBytes;
SInt16 *buffer = TPCircularBufferTail(instance.decodingBuffer, &availableBytes);
int sampleCount = MIN(bytesToCopy, availableBytes);
memcpy(targetBuffer, buffer, MIN(bytesToCopy, availableBytes));
TPCircularBufferConsume(self.decodingBuffer, sampleCount);
return noErr;
}
上面的代码不会产生任何错误,但不会播放任何声音。我虽然可以通过recordCallback中的bufferList设置音频,但它从来没有被调用过。
所以我的问题是:如何在 iOS 上播放字节流中的音频?
Best Answer-推荐答案 strong>
我决定以全新的眼光看待这个项目。我摆脱了大部分代码并让它现在可以工作。它并不漂亮,但至少现在可以运行。例如:我必须将我的采样率设置为 4000,否则它会播放得很快,我仍然会遇到性能问题。无论如何,这就是我想出的:
#define BUFFER_SIZE 1024
#define NUM_CHANNELS 2
#define kOutputBus 0
#define kInputBus 1
-(void) main
{
char buf[BUFFER_SIZE];
int size;
runloop: while (self.running) {
getAudioData(&buf, size);
if(!self.configured) {
if(![self activateAudioSession])
continue;
self.configured = true;
}
TPCircularBufferProduceBytes(self.decodingBuffer, buf, size);
}
//stop audiounits
AudioOutputUnitStop(self.audioUnit);
AudioComponentInstanceDispose(self.audioUnit);
if (self.decodingBuffer != NULL) {
TPCircularBufferCleanup(self.decodingBuffer);
}
}
static void audioSessionInterruptionCallback(void *inUserData, UInt32 interruptionState) {
if (interruptionState == kAudioSessionEndInterruption) {
AudioSessionSetActive(YES);
AudioOutputUnitStart(self.audioUnit);
}
if (interruptionState == kAudioSessionBeginInterruption) {
AudioOutputUnitStop(self.audioUnit);
}
}
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Notes: ioData contains buffers (may be more than one!)
// Fill them up as much as you can. Remember to set the size value in each buffer to match how much data is in the buffer.
if (!self.running ) {
return -1;
}
int bytesToCopy = ioData->mBuffers[0].mDataByteSize;
SInt16 *targetBuffer = (SInt16*)ioData->mBuffers[0].mData;
// Pull audio from playthrough buffer
int32_t availableBytes;
if(self.decodingBuffer == NULL || self.decodingBuffer->length < 1) {
NSLog(@"buffer is empty");
return 0;
}
SInt16 *buffer = TPCircularBufferTail(self.decodingBuffer, &availableBytes);
int sampleCount = MIN(bytesToCopy, availableBytes);
memcpy(targetBuffer, buffer, sampleCount);
TPCircularBufferConsume(self.decodingBuffer, sampleCount);
return noErr;
}
- (BOOL) activateAudioSession {
if (!self.activated_) {
OSStatus result;
result = AudioSessionInitialize(NULL,
NULL,
audioSessionInterruptionCallback,
(__bridge void *)(self));
if (kAudioSessionAlreadyInitialized != result)
[self checkError:result message"Couldn't initialize audio session"];
[self setupAudio]
self.activated_ = YES;
}
return self.activated_;
}
- (void) setupAudio
{
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
AudioComponentInstanceNew(inputComponent, &_audioUnit);
// // Enable IO for recording
// UInt32 flag = 1;
// status = AudioUnitSetProperty(audioUnit,
// kAudioOutputUnitProperty_EnableIO,
// kAudioUnitScope_Input,
// kInputBus,
// &flag,
// sizeof(flag));
// Enable IO for playback
UInt32 flag = 1;
AudioUnitSetProperty(_audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
// Describe format
AudioStreamBasicDescription format;
format.mSampleRate = 4000;
format.mFormatID = kAudioFormatULaw; //kAudioFormatULaw
format.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;//
format.mBitsPerChannel = 8 * sizeof(char);
format.mChannelsPerFrame = NUM_CHANNELS;
format.mBytesPerFrame = sizeof(char) * NUM_CHANNELS;
format.mFramesPerPacket = 1;
format.mBytesPerPacket = format.mBytesPerFrame * format.mFramesPerPacket;
format.mReserved = 0;
self.audioFormat = format;
// Apply format
AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&_audioFormat,
sizeof(_audioFormat));
AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&_audioFormat,
sizeof(_audioFormat));
// // Set input callback
// AURenderCallbackStruct callbackStruct;
// callbackStruct.inputProc = recordingCallback;
// callbackStruct.inputProcRefCon = self;
// status = AudioUnitSetProperty(audioUnit,
// kAudioOutputUnitProperty_SetInputCallback,
// kAudioUnitScope_Global,
// kInputBus,
// &callbackStruct,
// sizeof(callbackStruct));
// checkStatus(status);
// Set output callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = (__bridge void * _Nullable)(self);
AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
flag = 0;
status = AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus,
&flag,
sizeof(flag));
//initialize the circular buffer
if(self.decodingBuffer == NULL)
self.decodingBuffer = malloc(sizeof(TPCircularBuffer));
if(!TPCircularBufferInit(self.decodingBuffer, 512*1024))
return NO;
// Initialise
status = AudioUnitInitialize(self.audioUnit);
AudioOutputUnitStart(self.audioUnit);
}
我通过查看 github 和 a tasty pixel 找到了大部分内容
关于ios - 如何在 iOS 上播放实时音频?,我们在Stack Overflow上找到一个类似的问题:
https://stackoverflow.com/questions/34023843/
|