Compare commits

...

1 Commits
master ... ios

Author SHA1 Message Date
nillerusr
79c3bc86bd macos,ios: engine: use openal instead of old macos api 2023-06-03 22:50:43 +03:00
5 changed files with 3 additions and 1147 deletions

View File

@ -1,599 +0,0 @@
//========= Copyright Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
//===========================================================================//
#include "audio_pch.h"
#include <AudioToolbox/AudioQueue.h>
#include <AudioToolbox/AudioFile.h>
#include <AudioToolbox/AudioFormat.h>
// memdbgon must be the last include file in a .cpp file!!!
#include "tier0/memdbgon.h"
extern bool snd_firsttime;
extern bool MIX_ScaleChannelVolume( paintbuffer_t *ppaint, channel_t *pChannel, int volume[CCHANVOLUMES], int mixchans );
extern void S_SpatializeChannel( int volume[6], int master_vol, const Vector *psourceDir, float gain, float mono );
#define NUM_BUFFERS_SOURCES 128
#define BUFF_MASK (NUM_BUFFERS_SOURCES - 1 )
#define BUFFER_SIZE 0x0400
//-----------------------------------------------------------------------------
//
// NOTE: This only allows 16-bit, stereo wave out
//
//-----------------------------------------------------------------------------
class CAudioDeviceAudioQueue : public CAudioDeviceBase
{
public:
bool IsActive( void );
bool Init( void );
void Shutdown( void );
void PaintEnd( void );
int GetOutputPosition( void );
void ChannelReset( int entnum, int channelIndex, float distanceMod );
void Pause( void );
void UnPause( void );
float MixDryVolume( void );
bool Should3DMix( void );
void StopAllSounds( void );
int PaintBegin( float mixAheadTime, int soundtime, int paintedtime );
void ClearBuffer( void );
void UpdateListener( const Vector& position, const Vector& forward, const Vector& right, const Vector& up );
void MixBegin( int sampleCount );
void MixUpsample( int sampleCount, int filtertype );
void Mix8Mono( channel_t *pChannel, char *pData, int outputOffset, int inputOffset, fixedint rateScaleFix, int outCount, int timecompress );
void Mix8Stereo( channel_t *pChannel, char *pData, int outputOffset, int inputOffset, fixedint rateScaleFix, int outCount, int timecompress );
void Mix16Mono( channel_t *pChannel, short *pData, int outputOffset, int inputOffset, fixedint rateScaleFix, int outCount, int timecompress );
void Mix16Stereo( channel_t *pChannel, short *pData, int outputOffset, int inputOffset, fixedint rateScaleFix, int outCount, int timecompress );
void TransferSamples( int end );
void SpatializeChannel( int volume[CCHANVOLUMES/2], int master_vol, const Vector& sourceDir, float gain, float mono);
void ApplyDSPEffects( int idsp, portable_samplepair_t *pbuffront, portable_samplepair_t *pbufrear, portable_samplepair_t *pbufcenter, int samplecount );
const char *DeviceName( void ) { return "AudioQueue"; }
int DeviceChannels( void ) { return 2; }
int DeviceSampleBits( void ) { return 16; }
int DeviceSampleBytes( void ) { return 2; }
int DeviceDmaSpeed( void ) { return SOUND_DMA_SPEED; }
int DeviceSampleCount( void ) { return m_deviceSampleCount; }
void BufferCompleted() { m_buffersCompleted++; }
void SetRunning( bool bState ) { m_bRunning = bState; }
private:
void OpenWaveOut( void );
void CloseWaveOut( void );
bool ValidWaveOut( void ) const;
bool BIsPlaying();
AudioStreamBasicDescription m_DataFormat;
AudioQueueRef m_Queue;
AudioQueueBufferRef m_Buffers[NUM_BUFFERS_SOURCES];
int m_SndBufSize;
void *m_sndBuffers;
CInterlockedInt m_deviceSampleCount;
int m_buffersSent;
int m_buffersCompleted;
int m_pauseCount;
bool m_bSoundsShutdown;
bool m_bFailed;
bool m_bRunning;
};
CAudioDeviceAudioQueue *wave = NULL;
static void AudioCallback(void *pContext, AudioQueueRef pQueue, AudioQueueBufferRef pBuffer)
{
if ( wave )
wave->BufferCompleted();
}
IAudioDevice *Audio_CreateMacAudioQueueDevice( void )
{
wave = new CAudioDeviceAudioQueue;
if ( wave->Init() )
return wave;
delete wave;
wave = NULL;
return NULL;
}
void OnSndSurroundCvarChanged2( IConVar *pVar, const char *pOldString, float flOldValue );
void OnSndSurroundLegacyChanged2( IConVar *pVar, const char *pOldString, float flOldValue );
//-----------------------------------------------------------------------------
// Init, shutdown
//-----------------------------------------------------------------------------
bool CAudioDeviceAudioQueue::Init( void )
{
m_SndBufSize = 0;
m_sndBuffers = NULL;
m_pauseCount = 0;
m_bSurround = false;
m_bSurroundCenter = false;
m_bHeadphone = false;
m_buffersSent = 0;
m_buffersCompleted = 0;
m_pauseCount = 0;
m_bSoundsShutdown = false;
m_bFailed = false;
m_bRunning = false;
m_Queue = NULL;
static bool first = true;
if ( first )
{
snd_surround.SetValue( 2 );
snd_surround.InstallChangeCallback( &OnSndSurroundCvarChanged2 );
snd_legacy_surround.InstallChangeCallback( &OnSndSurroundLegacyChanged2 );
first = false;
}
OpenWaveOut();
if ( snd_firsttime )
{
DevMsg( "Wave sound initialized\n" );
}
return ValidWaveOut() && !m_bFailed;
}
void CAudioDeviceAudioQueue::Shutdown( void )
{
CloseWaveOut();
}
//-----------------------------------------------------------------------------
// WAV out device
//-----------------------------------------------------------------------------
inline bool CAudioDeviceAudioQueue::ValidWaveOut( void ) const
{
return m_sndBuffers != 0 && m_Queue;
}
//-----------------------------------------------------------------------------
// called by the mac audioqueue code when we run out of playback buffers
//-----------------------------------------------------------------------------
void AudioQueueIsRunningCallback( void* inClientData, AudioQueueRef inAQ, AudioQueuePropertyID inID)
{
CAudioDeviceAudioQueue* audioqueue = (CAudioDeviceAudioQueue*)inClientData;
UInt32 running = 0;
UInt32 size;
OSStatus err = AudioQueueGetProperty(inAQ, kAudioQueueProperty_IsRunning, &running, &size);
audioqueue->SetRunning( running != 0 );
//DevWarning( "AudioQueueStart %d\n", running );
}
//-----------------------------------------------------------------------------
// Opens the windows wave out device
//-----------------------------------------------------------------------------
void CAudioDeviceAudioQueue::OpenWaveOut( void )
{
if ( m_Queue )
return;
m_buffersSent = 0;
m_buffersCompleted = 0;
m_DataFormat.mSampleRate = 44100;
m_DataFormat.mFormatID = kAudioFormatLinearPCM;
m_DataFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger|kAudioFormatFlagIsPacked;
m_DataFormat.mBytesPerPacket = 4; // 16-bit samples * 2 channels
m_DataFormat.mFramesPerPacket = 1;
m_DataFormat.mBytesPerFrame = 4; // 16-bit samples * 2 channels
m_DataFormat.mChannelsPerFrame = 2;
m_DataFormat.mBitsPerChannel = 16;
m_DataFormat.mReserved = 0;
// Create the audio queue that will be used to manage the array of audio
// buffers used to queue samples.
OSStatus err = AudioQueueNewOutput(&m_DataFormat, AudioCallback, this, NULL, NULL, 0, &m_Queue);
if ( err != noErr)
{
DevMsg( "Failed to create AudioQueue output %d\n", (int)err );
m_bFailed = true;
return;
}
for ( int i = 0; i < NUM_BUFFERS_SOURCES; ++i)
{
err = AudioQueueAllocateBuffer( m_Queue, BUFFER_SIZE,&(m_Buffers[i]));
if ( err != noErr)
{
DevMsg( "Failed to AudioQueueAllocateBuffer output %d (%i)\n",(int)err,i );
m_bFailed = true;
}
m_Buffers[i]->mAudioDataByteSize = BUFFER_SIZE;
Q_memset( m_Buffers[i]->mAudioData, 0, BUFFER_SIZE );
}
err = AudioQueuePrime( m_Queue, 0, NULL);
if ( err != noErr)
{
DevMsg( "Failed to create AudioQueue output %d\n", (int)err );
m_bFailed = true;
return;
}
AudioQueueSetParameter( m_Queue, kAudioQueueParam_Volume, 1.0);
err = AudioQueueAddPropertyListener( m_Queue, kAudioQueueProperty_IsRunning, AudioQueueIsRunningCallback, this );
if ( err != noErr)
{
DevMsg( "Failed to create AudioQueue output %d\n", (int)err );
m_bFailed = true;
return;
}
m_SndBufSize = NUM_BUFFERS_SOURCES*BUFFER_SIZE;
m_deviceSampleCount = m_SndBufSize / DeviceSampleBytes();
if ( !m_sndBuffers )
{
m_sndBuffers = malloc( m_SndBufSize );
memset( m_sndBuffers, 0x0, m_SndBufSize );
}
}
//-----------------------------------------------------------------------------
// Closes the windows wave out device
//-----------------------------------------------------------------------------
void CAudioDeviceAudioQueue::CloseWaveOut( void )
{
if ( ValidWaveOut() )
{
AudioQueueStop(m_Queue, true);
m_bRunning = false;
AudioQueueRemovePropertyListener( m_Queue, kAudioQueueProperty_IsRunning, AudioQueueIsRunningCallback, this );
for ( int i = 0; i < NUM_BUFFERS_SOURCES; i++ )
AudioQueueFreeBuffer( m_Queue, m_Buffers[i]);
AudioQueueDispose( m_Queue, true);
m_Queue = NULL;
}
if ( m_sndBuffers )
{
free( m_sndBuffers );
m_sndBuffers = NULL;
}
}
//-----------------------------------------------------------------------------
// Mixing setup
//-----------------------------------------------------------------------------
int CAudioDeviceAudioQueue::PaintBegin( float mixAheadTime, int soundtime, int paintedtime )
{
// soundtime - total samples that have been played out to hardware at dmaspeed
// paintedtime - total samples that have been mixed at speed
// endtime - target for samples in mixahead buffer at speed
unsigned int endtime = soundtime + mixAheadTime * DeviceDmaSpeed();
int samps = DeviceSampleCount() >> (DeviceChannels()-1);
if ((int)(endtime - soundtime) > samps)
endtime = soundtime + samps;
if ((endtime - paintedtime) & 0x3)
{
// The difference between endtime and painted time should align on
// boundaries of 4 samples. This is important when upsampling from 11khz -> 44khz.
endtime -= (endtime - paintedtime) & 0x3;
}
return endtime;
}
//-----------------------------------------------------------------------------
// Actually performs the mixing
//-----------------------------------------------------------------------------
void CAudioDeviceAudioQueue::PaintEnd( void )
{
int cblocks = 4 << 1;
if ( m_bRunning && m_buffersSent == m_buffersCompleted )
{
// We are running the audio queue but have become starved of buffers.
// Stop the audio queue so we force a restart of it.
AudioQueueStop( m_Queue, true );
}
//
// submit a few new sound blocks
//
// 44K sound support
while (((m_buffersSent - m_buffersCompleted) >> SAMPLE_16BIT_SHIFT) < cblocks)
{
int iBuf = m_buffersSent&BUFF_MASK;
m_Buffers[iBuf]->mAudioDataByteSize = BUFFER_SIZE;
Q_memcpy( m_Buffers[iBuf]->mAudioData, (char *)m_sndBuffers + iBuf*BUFFER_SIZE, BUFFER_SIZE);
// Queue the buffer for playback.
OSStatus err = AudioQueueEnqueueBuffer( m_Queue, m_Buffers[iBuf], 0, NULL);
if ( err != noErr)
{
DevMsg( "Failed to AudioQueueEnqueueBuffer output %d\n", (int)err );
}
m_buffersSent++;
}
if ( !m_bRunning )
{
DevMsg( "Restarting sound playback\n" );
m_bRunning = true;
AudioQueueStart( m_Queue, NULL);
}
}
int CAudioDeviceAudioQueue::GetOutputPosition( void )
{
int s = m_buffersSent * BUFFER_SIZE;
s >>= SAMPLE_16BIT_SHIFT;
s &= (DeviceSampleCount()-1);
return s / DeviceChannels();
}
//-----------------------------------------------------------------------------
// Pausing
//-----------------------------------------------------------------------------
void CAudioDeviceAudioQueue::Pause( void )
{
m_pauseCount++;
if (m_pauseCount == 1)
{
m_bRunning = false;
AudioQueueStop(m_Queue, true);
}
}
void CAudioDeviceAudioQueue::UnPause( void )
{
if ( m_pauseCount > 0 )
{
m_pauseCount--;
}
if ( m_pauseCount == 0 )
{
m_bRunning = true;
AudioQueueStart( m_Queue, NULL);
}
}
bool CAudioDeviceAudioQueue::IsActive( void )
{
return ( m_pauseCount == 0 );
}
float CAudioDeviceAudioQueue::MixDryVolume( void )
{
return 0;
}
bool CAudioDeviceAudioQueue::Should3DMix( void )
{
return false;
}
void CAudioDeviceAudioQueue::ClearBuffer( void )
{
if ( !m_sndBuffers )
return;
Q_memset( m_sndBuffers, 0x0, DeviceSampleCount() * DeviceSampleBytes() );
}
void CAudioDeviceAudioQueue::UpdateListener( const Vector& position, const Vector& forward, const Vector& right, const Vector& up )
{
}
bool CAudioDeviceAudioQueue::BIsPlaying()
{
UInt32 isRunning;
UInt32 propSize = sizeof(isRunning);
OSStatus result = AudioQueueGetProperty( m_Queue, kAudioQueueProperty_IsRunning, &isRunning, &propSize);
return isRunning != 0;
}
void CAudioDeviceAudioQueue::MixBegin( int sampleCount )
{
MIX_ClearAllPaintBuffers( sampleCount, false );
}
void CAudioDeviceAudioQueue::MixUpsample( int sampleCount, int filtertype )
{
paintbuffer_t *ppaint = MIX_GetCurrentPaintbufferPtr();
int ifilter = ppaint->ifilter;
Assert (ifilter < CPAINTFILTERS);
S_MixBufferUpsample2x( sampleCount, ppaint->pbuf, &(ppaint->fltmem[ifilter][0]), CPAINTFILTERMEM, filtertype );
ppaint->ifilter++;
}
void CAudioDeviceAudioQueue::Mix8Mono( channel_t *pChannel, char *pData, int outputOffset, int inputOffset, fixedint rateScaleFix, int outCount, int timecompress )
{
int volume[CCHANVOLUMES];
paintbuffer_t *ppaint = MIX_GetCurrentPaintbufferPtr();
if (!MIX_ScaleChannelVolume( ppaint, pChannel, volume, 1))
return;
Mix8MonoWavtype( pChannel, ppaint->pbuf + outputOffset, volume, (byte *)pData, inputOffset, rateScaleFix, outCount );
}
void CAudioDeviceAudioQueue::Mix8Stereo( channel_t *pChannel, char *pData, int outputOffset, int inputOffset, fixedint rateScaleFix, int outCount, int timecompress )
{
int volume[CCHANVOLUMES];
paintbuffer_t *ppaint = MIX_GetCurrentPaintbufferPtr();
if (!MIX_ScaleChannelVolume( ppaint, pChannel, volume, 2 ))
return;
Mix8StereoWavtype( pChannel, ppaint->pbuf + outputOffset, volume, (byte *)pData, inputOffset, rateScaleFix, outCount );
}
void CAudioDeviceAudioQueue::Mix16Mono( channel_t *pChannel, short *pData, int outputOffset, int inputOffset, fixedint rateScaleFix, int outCount, int timecompress )
{
int volume[CCHANVOLUMES];
paintbuffer_t *ppaint = MIX_GetCurrentPaintbufferPtr();
if (!MIX_ScaleChannelVolume( ppaint, pChannel, volume, 1 ))
return;
Mix16MonoWavtype( pChannel, ppaint->pbuf + outputOffset, volume, pData, inputOffset, rateScaleFix, outCount );
}
void CAudioDeviceAudioQueue::Mix16Stereo( channel_t *pChannel, short *pData, int outputOffset, int inputOffset, fixedint rateScaleFix, int outCount, int timecompress )
{
int volume[CCHANVOLUMES];
paintbuffer_t *ppaint = MIX_GetCurrentPaintbufferPtr();
if (!MIX_ScaleChannelVolume( ppaint, pChannel, volume, 2 ))
return;
Mix16StereoWavtype( pChannel, ppaint->pbuf + outputOffset, volume, pData, inputOffset, rateScaleFix, outCount );
}
void CAudioDeviceAudioQueue::ChannelReset( int entnum, int channelIndex, float distanceMod )
{
}
void CAudioDeviceAudioQueue::TransferSamples( int end )
{
int lpaintedtime = g_paintedtime;
int endtime = end;
// resumes playback...
if ( m_sndBuffers )
{
S_TransferStereo16( m_sndBuffers, PAINTBUFFER, lpaintedtime, endtime );
}
}
void CAudioDeviceAudioQueue::SpatializeChannel( int volume[CCHANVOLUMES/2], int master_vol, const Vector& sourceDir, float gain, float mono )
{
VPROF("CAudioDeviceAudioQueue::SpatializeChannel");
S_SpatializeChannel( volume, master_vol, &sourceDir, gain, mono );
}
void CAudioDeviceAudioQueue::StopAllSounds( void )
{
m_bSoundsShutdown = true;
m_bRunning = false;
AudioQueueStop(m_Queue, true);
}
void CAudioDeviceAudioQueue::ApplyDSPEffects( int idsp, portable_samplepair_t *pbuffront, portable_samplepair_t *pbufrear, portable_samplepair_t *pbufcenter, int samplecount )
{
//SX_RoomFX( endtime, filter, timefx );
DSP_Process( idsp, pbuffront, pbufrear, pbufcenter, samplecount );
}
static uint32 GetOSXSpeakerConfig()
{
return 2;
}
static uint32 GetSpeakerConfigForSurroundMode( int surroundMode, const char **pConfigDesc )
{
uint32 newSpeakerConfig = 2;
*pConfigDesc = "stereo speaker";
return newSpeakerConfig;
}
void OnSndSurroundCvarChanged2( IConVar *pVar, const char *pOldString, float flOldValue )
{
// if the old value is -1, we're setting this from the detect routine for the first time
// no need to reset the device
if ( flOldValue == -1 )
return;
// get the user's previous speaker config
uint32 speaker_config = GetOSXSpeakerConfig();
// get the new config
uint32 newSpeakerConfig = 0;
const char *speakerConfigDesc = "";
ConVarRef var( pVar );
newSpeakerConfig = GetSpeakerConfigForSurroundMode( var.GetInt(), &speakerConfigDesc );
// make sure the config has changed
if (newSpeakerConfig == speaker_config)
return;
// set new configuration
//SetWindowsSpeakerConfig(newSpeakerConfig);
Msg("Speaker configuration has been changed to %s.\n", speakerConfigDesc);
// restart sound system so it takes effect
//g_pSoundServices->RestartSoundSystem();
}
void OnSndSurroundLegacyChanged2( IConVar *pVar, const char *pOldString, float flOldValue )
{
}

View File

@ -11,10 +11,6 @@
#endif #endif
#ifdef OSX #ifdef OSX
#include "snd_dev_openal.h" #include "snd_dev_openal.h"
#include "snd_dev_mac_audioqueue.h"
ConVar snd_audioqueue( "snd_audioqueue", "1" );
#endif #endif
// memdbgon must be the last include file in a .cpp file!!! // memdbgon must be the last include file in a .cpp file!!!
@ -94,11 +90,6 @@ IAudioDevice *IAudioDevice::AutoDetectInit( bool waveOnly )
pDevice = Audio_CreateWaveDevice(); pDevice = Audio_CreateWaveDevice();
} }
#elif defined(OSX) #elif defined(OSX)
if ( !CommandLine()->CheckParm( "-snd_openal" ) )
{
DevMsg( "Using AudioQueue Interface\n" );
pDevice = Audio_CreateMacAudioQueueDevice();
}
if ( !pDevice ) if ( !pDevice )
{ {
DevMsg( "Using OpenAL Interface\n" ); DevMsg( "Using OpenAL Interface\n" );

View File

@ -189,8 +189,6 @@ bool g_bUsingSteamVoice = false;
#ifdef WIN32 #ifdef WIN32
extern IVoiceRecord* CreateVoiceRecord_DSound(int nSamplesPerSec); extern IVoiceRecord* CreateVoiceRecord_DSound(int nSamplesPerSec);
#elif defined( OSX )
extern IVoiceRecord* CreateVoiceRecord_AudioQueue(int sampleRate);
#endif #endif
#ifdef POSIX #ifdef POSIX
@ -643,13 +641,8 @@ bool Voice_Init( const char *pCodecName, int nSampleRate )
return false; return false;
// Get the voice input device. // Get the voice input device.
#ifdef OSX #if defined( OSX )
g_pVoiceRecord = CreateVoiceRecord_AudioQueue( Voice_SamplesPerSec() );
if ( !g_pVoiceRecord )
{
// Fall back to OpenAL
g_pVoiceRecord = CreateVoiceRecord_OpenAL( Voice_SamplesPerSec() ); g_pVoiceRecord = CreateVoiceRecord_OpenAL( Voice_SamplesPerSec() );
}
#elif defined( WIN32 ) #elif defined( WIN32 )
g_pVoiceRecord = CreateVoiceRecord_DSound( Voice_SamplesPerSec() ); g_pVoiceRecord = CreateVoiceRecord_DSound( Voice_SamplesPerSec() );
#elif defined( USE_SDL ) #elif defined( USE_SDL )

View File

@ -1,528 +0,0 @@
//========= Copyright 1996-2009, Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
// $NoKeywords: $
//
//=============================================================================//
// This module implements the voice record and compression functions
#include <Carbon/Carbon.h>
#include <AudioUnit/AudioUnit.h>
#include <AudioToolbox/AudioToolbox.h>
#include "tier0/platform.h"
#include "tier0/threadtools.h"
//#include "tier0/vcrmode.h"
#include "ivoicerecord.h"
#define kNumSecAudioBuffer 1.0f
// ------------------------------------------------------------------------------
// VoiceRecord_AudioQueue
// ------------------------------------------------------------------------------
class VoiceRecord_AudioQueue : public IVoiceRecord
{
public:
VoiceRecord_AudioQueue();
virtual ~VoiceRecord_AudioQueue();
// IVoiceRecord.
virtual void Release();
virtual bool RecordStart();
virtual void RecordStop();
// Initialize. The format of the data we expect from the provider is
// 8-bit signed mono at the specified sample rate.
virtual bool Init( int nSampleRate );
virtual void Idle();
// Get the most recent N samples.
virtual int GetRecordedData(short *pOut, int nSamplesWanted );
AudioUnit GetAudioUnit() { return m_AudioUnit; }
AudioConverterRef GetConverter() { return m_Converter; }
void RenderBuffer( const short *pszBuf, int nSamples );
bool BRecording() { return m_bRecordingAudio; }
void ClearThreadHandle() { m_hThread = NULL; m_bFirstInit = false; }
AudioBufferList m_MicInputBuffer;
AudioBufferList m_ConverterBuffer;
void *m_pMicInputBuffer;
int m_nMicInputSamplesAvaialble;
float m_flSampleRateConversion;
int m_nBufferFrameSize;
int m_ConverterBufferSize;
int m_MicInputBufferSize;
int m_InputBytesPerPacket;
private:
bool InitalizeInterfaces(); // Initialize the openal capture buffers and other interfaces
void ReleaseInterfaces(); // Release openal buffers and other interfaces
void ClearInterfaces(); // Clear members.
private:
AudioUnit m_AudioUnit;
char *m_SampleBuffer;
int m_SampleBufferSize;
int m_nSampleRate;
bool m_bRecordingAudio;
bool m_bFirstInit;
ThreadHandle_t m_hThread;
AudioConverterRef m_Converter;
CInterlockedUInt m_SampleBufferReadPos;
CInterlockedUInt m_SampleBufferWritePos;
//UInt32 nPackets = 0;
//bool bHaveListData = false;
};
VoiceRecord_AudioQueue::VoiceRecord_AudioQueue() :
m_nSampleRate( 0 ), m_AudioUnit( NULL ), m_SampleBufferSize(0), m_SampleBuffer(NULL),
m_SampleBufferReadPos(0), m_SampleBufferWritePos(0), m_bRecordingAudio(false), m_hThread( NULL ), m_bFirstInit( true )
{
ClearInterfaces();
}
VoiceRecord_AudioQueue::~VoiceRecord_AudioQueue()
{
ReleaseInterfaces();
if ( m_hThread )
ReleaseThreadHandle( m_hThread );
m_hThread = NULL;
}
void VoiceRecord_AudioQueue::Release()
{
ReleaseInterfaces();
}
uintp StartAudio( void *pRecorder )
{
VoiceRecord_AudioQueue *vr = (VoiceRecord_AudioQueue *)pRecorder;
if ( vr )
{
//printf( "AudioOutputUnitStart\n" );
AudioOutputUnitStart( vr->GetAudioUnit() );
vr->ClearThreadHandle();
}
//printf( "StartAudio thread done\n" );
return 0;
}
bool VoiceRecord_AudioQueue::RecordStart()
{
if ( !m_AudioUnit )
return false;
if ( m_bFirstInit )
m_hThread = CreateSimpleThread( StartAudio, this );
else
AudioOutputUnitStart( m_AudioUnit );
m_SampleBufferReadPos = m_SampleBufferWritePos = 0;
m_bRecordingAudio = true;
//printf( "VoiceRecord_AudioQueue::RecordStart\n" );
return ( !m_bFirstInit || m_hThread != NULL );
}
void VoiceRecord_AudioQueue::RecordStop()
{
// Stop capturing.
if ( m_AudioUnit && m_bRecordingAudio )
{
AudioOutputUnitStop( m_AudioUnit );
//printf( "AudioOutputUnitStop\n" );
}
m_SampleBufferReadPos = m_SampleBufferWritePos = 0;
m_bRecordingAudio = false;
if ( m_hThread )
ReleaseThreadHandle( m_hThread );
m_hThread = NULL;
}
OSStatus ComplexBufferFillPlayback( AudioConverterRef inAudioConverter,
UInt32 *ioNumberDataPackets,
AudioBufferList *ioData,
AudioStreamPacketDescription **outDataPacketDesc,
void *inUserData)
{
VoiceRecord_AudioQueue *vr = (VoiceRecord_AudioQueue *)inUserData;
if ( !vr->BRecording() )
return noErr;
if ( vr->m_nMicInputSamplesAvaialble )
{
int nBytesRequired = *ioNumberDataPackets * vr->m_InputBytesPerPacket;
int nBytesAvailable = vr->m_nMicInputSamplesAvaialble*vr->m_InputBytesPerPacket;
if ( nBytesRequired < nBytesAvailable )
{
ioData->mBuffers[0].mData = vr->m_MicInputBuffer.mBuffers[0].mData;
ioData->mBuffers[0].mDataByteSize = nBytesRequired;
vr->m_MicInputBuffer.mBuffers[0].mData = (char *)vr->m_MicInputBuffer.mBuffers[0].mData+nBytesRequired;
vr->m_MicInputBuffer.mBuffers[0].mDataByteSize = nBytesAvailable - nBytesRequired;
}
else
{
ioData->mBuffers[0].mData = vr->m_MicInputBuffer.mBuffers[0].mData;
ioData->mBuffers[0].mDataByteSize = nBytesAvailable;
vr->m_MicInputBuffer.mBuffers[0].mData = vr->m_pMicInputBuffer;
vr->m_MicInputBuffer.mBuffers[0].mDataByteSize = vr->m_MicInputBufferSize;
}
*ioNumberDataPackets = ioData->mBuffers[0].mDataByteSize / vr->m_InputBytesPerPacket;
vr->m_nMicInputSamplesAvaialble = nBytesAvailable / vr->m_InputBytesPerPacket - *ioNumberDataPackets;
}
else
{
*ioNumberDataPackets = 0;
return -1;
}
return noErr;
}
static OSStatus recordingCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData)
{
VoiceRecord_AudioQueue *vr = (VoiceRecord_AudioQueue *)inRefCon;
if ( !vr->BRecording() )
return noErr;
OSStatus err = noErr;
if ( vr->m_nMicInputSamplesAvaialble == 0 )
{
err = AudioUnitRender( vr->GetAudioUnit(), ioActionFlags, inTimeStamp, 1, inNumberFrames, &vr->m_MicInputBuffer );
if ( err == noErr )
vr->m_nMicInputSamplesAvaialble = vr->m_MicInputBuffer.mBuffers[0].mDataByteSize / vr->m_InputBytesPerPacket;
}
if ( vr->m_nMicInputSamplesAvaialble > 0 )
{
UInt32 nConverterSamples = ceil(vr->m_nMicInputSamplesAvaialble/vr->m_flSampleRateConversion);
vr->m_ConverterBuffer.mBuffers[0].mDataByteSize = vr->m_ConverterBufferSize;
OSStatus err = AudioConverterFillComplexBuffer( vr->GetConverter(),
ComplexBufferFillPlayback,
vr,
&nConverterSamples,
&vr->m_ConverterBuffer,
NULL );
if ( err == noErr || err == -1 )
vr->RenderBuffer( (short *)vr->m_ConverterBuffer.mBuffers[0].mData, vr->m_ConverterBuffer.mBuffers[0].mDataByteSize/sizeof(short) );
}
return err;
}
void VoiceRecord_AudioQueue::RenderBuffer( const short *pszBuf, int nSamples )
{
int samplePos = m_SampleBufferWritePos;
int samplePosBefore = samplePos;
int readPos = m_SampleBufferReadPos;
bool bBeforeRead = false;
if ( samplePos < readPos )
bBeforeRead = true;
char *pOut = (char *)(m_SampleBuffer + samplePos);
int nFirstCopy = MIN( nSamples*sizeof(short), m_SampleBufferSize - samplePos );
memcpy( pOut, pszBuf, nFirstCopy );
samplePos += nFirstCopy;
if ( nSamples*sizeof(short) > nFirstCopy )
{
nSamples -= ( nFirstCopy / sizeof(short) );
samplePos = 0;
memcpy( m_SampleBuffer, pszBuf + nFirstCopy, nSamples * sizeof(short) );
samplePos += nSamples * sizeof(short);
}
m_SampleBufferWritePos = samplePos%m_SampleBufferSize;
if ( (bBeforeRead && samplePos > readPos) )
{
m_SampleBufferReadPos = (readPos+m_SampleBufferSize/2)%m_SampleBufferSize; // if we crossed the read pointer then bump it forward
//printf( "Crossed %d %d (%d)\n", (int)samplePosBefore, (int)samplePos, readPos );
}
}
bool VoiceRecord_AudioQueue::InitalizeInterfaces()
{
//printf( "Initializing audio queue recorder\n" );
// Describe audio component
ComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_HALOutput;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
Component comp = FindNextComponent(NULL, &desc);
if (comp == NULL)
return false;
OSStatus status = OpenAComponent(comp, &m_AudioUnit);
if ( status != noErr )
return false;
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty( m_AudioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input,
1, &flag, sizeof(flag));
if ( status != noErr )
return false;
// disable output on the device
flag = 0;
status = AudioUnitSetProperty( m_AudioUnit,kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output,
0, &flag,sizeof(flag));
if ( status != noErr )
return false;
UInt32 size = sizeof(AudioDeviceID);
AudioDeviceID inputDevice;
status = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice,&size, &inputDevice);
if ( status != noErr )
return false;
status =AudioUnitSetProperty( m_AudioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global,
0, &inputDevice, sizeof(inputDevice));
if ( status != noErr )
return false;
// Describe format
AudioStreamBasicDescription audioDeviceFormat;
size = sizeof(AudioStreamBasicDescription);
status = AudioUnitGetProperty( m_AudioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
1, // input bus
&audioDeviceFormat,
&size);
if ( status != noErr )
return false;
// we only want mono audio, so if they have a stero input ask for mono
if ( audioDeviceFormat.mChannelsPerFrame == 2 )
{
audioDeviceFormat.mChannelsPerFrame = 1;
audioDeviceFormat.mBytesPerPacket /= 2;
audioDeviceFormat.mBytesPerFrame /= 2;
}
// Apply format
status = AudioUnitSetProperty( m_AudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
1, &audioDeviceFormat, sizeof(audioDeviceFormat) );
if ( status != noErr )
return false;
AudioStreamBasicDescription audioOutputFormat;
audioOutputFormat = audioDeviceFormat;
audioOutputFormat.mFormatID = kAudioFormatLinearPCM;
audioOutputFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioOutputFormat.mBytesPerPacket = 2; // 16-bit samples * 1 channels
audioOutputFormat.mFramesPerPacket = 1;
audioOutputFormat.mBytesPerFrame = 2; // 16-bit samples * 1 channels
audioOutputFormat.mChannelsPerFrame = 1;
audioOutputFormat.mBitsPerChannel = 16;
audioOutputFormat.mReserved = 0;
audioOutputFormat.mSampleRate = m_nSampleRate;
m_flSampleRateConversion = audioDeviceFormat.mSampleRate / audioOutputFormat.mSampleRate;
// setup sample rate conversion
status = AudioConverterNew( &audioDeviceFormat, &audioOutputFormat, &m_Converter );
if ( status != noErr )
return false;
UInt32 primeMethod = kConverterPrimeMethod_None;
status = AudioConverterSetProperty( m_Converter, kAudioConverterPrimeMethod, sizeof(UInt32), &primeMethod);
if ( status != noErr )
return false;
UInt32 quality = kAudioConverterQuality_Medium;
status = AudioConverterSetProperty( m_Converter, kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &quality);
if ( status != noErr )
return false;
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = this;
status = AudioUnitSetProperty( m_AudioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global,
0, &callbackStruct, sizeof(callbackStruct) );
if ( status != noErr )
return false;
UInt32 bufferFrameSize;
size = sizeof(bufferFrameSize);
status = AudioDeviceGetProperty( inputDevice, 1, 1, kAudioDevicePropertyBufferFrameSize, &size, &bufferFrameSize );
if ( status != noErr )
return false;
m_nBufferFrameSize = bufferFrameSize;
// allocate the input and conversion sound storage buffers
m_MicInputBuffer.mNumberBuffers = 1;
m_MicInputBuffer.mBuffers[0].mDataByteSize = m_nBufferFrameSize*audioDeviceFormat.mBitsPerChannel/8*audioDeviceFormat.mChannelsPerFrame;
m_MicInputBuffer.mBuffers[0].mData = malloc( m_MicInputBuffer.mBuffers[0].mDataByteSize );
m_MicInputBuffer.mBuffers[0].mNumberChannels = audioDeviceFormat.mChannelsPerFrame;
m_pMicInputBuffer = m_MicInputBuffer.mBuffers[0].mData;
m_MicInputBufferSize = m_MicInputBuffer.mBuffers[0].mDataByteSize;
m_InputBytesPerPacket = audioDeviceFormat.mBytesPerPacket;
m_ConverterBuffer.mNumberBuffers = 1;
m_ConverterBuffer.mBuffers[0].mDataByteSize = m_nBufferFrameSize*audioOutputFormat.mBitsPerChannel/8*audioOutputFormat.mChannelsPerFrame;
m_ConverterBuffer.mBuffers[0].mData = malloc( m_MicInputBuffer.mBuffers[0].mDataByteSize );
m_ConverterBuffer.mBuffers[0].mNumberChannels = 1;
m_ConverterBufferSize = m_ConverterBuffer.mBuffers[0].mDataByteSize;
m_nMicInputSamplesAvaialble = 0;
m_SampleBufferReadPos = m_SampleBufferWritePos = 0;
m_SampleBufferSize = ceil( kNumSecAudioBuffer * m_nSampleRate * audioOutputFormat.mBytesPerPacket );
m_SampleBuffer = (char *)malloc( m_SampleBufferSize );
memset( m_SampleBuffer, 0x0, m_SampleBufferSize );
DevMsg( "Initialized AudioQueue record interface\n" );
return true;
}
bool VoiceRecord_AudioQueue::Init( int nSampleRate )
{
if ( m_AudioUnit && m_nSampleRate != nSampleRate )
{
// Need to recreate interfaces with different sample rate
ReleaseInterfaces();
ClearInterfaces();
}
m_nSampleRate = nSampleRate;
// Re-initialize the capture buffer if neccesary
if ( !m_AudioUnit )
{
InitalizeInterfaces();
}
m_SampleBufferReadPos = m_SampleBufferWritePos = 0;
//printf( "VoiceRecord_AudioQueue::Init()\n" );
// Initialise
OSStatus status = AudioUnitInitialize( m_AudioUnit );
if ( status != noErr )
return false;
return true;
}
void VoiceRecord_AudioQueue::ReleaseInterfaces()
{
AudioOutputUnitStop( m_AudioUnit );
AudioConverterDispose( m_Converter );
AudioUnitUninitialize( m_AudioUnit );
m_AudioUnit = NULL;
m_Converter = NULL;
}
void VoiceRecord_AudioQueue::ClearInterfaces()
{
m_AudioUnit = NULL;
m_Converter = NULL;
m_SampleBufferReadPos = m_SampleBufferWritePos = 0;
if ( m_SampleBuffer )
free( m_SampleBuffer );
m_SampleBuffer = NULL;
if ( m_MicInputBuffer.mBuffers[0].mData )
free( m_MicInputBuffer.mBuffers[0].mData );
if ( m_ConverterBuffer.mBuffers[0].mData )
free( m_ConverterBuffer.mBuffers[0].mData );
m_MicInputBuffer.mBuffers[0].mData = NULL;
m_ConverterBuffer.mBuffers[0].mData = NULL;
}
void VoiceRecord_AudioQueue::Idle()
{
}
int VoiceRecord_AudioQueue::GetRecordedData(short *pOut, int nSamples )
{
if ( !m_SampleBuffer )
return 0;
int cbSamples = nSamples*2; // convert to bytes
int writePos = m_SampleBufferWritePos;
int readPos = m_SampleBufferReadPos;
int nOutstandingSamples = ( writePos - readPos );
if ( readPos > writePos ) // writing has wrapped around
{
nOutstandingSamples = writePos + ( m_SampleBufferSize - readPos );
}
if ( !nOutstandingSamples )
return 0;
if ( nOutstandingSamples < cbSamples )
cbSamples = nOutstandingSamples; // clamp to the number of samples we have available
memcpy( (char *)pOut, m_SampleBuffer + readPos, MIN( cbSamples, m_SampleBufferSize - readPos ) );
if ( cbSamples > ( m_SampleBufferSize - readPos ) )
{
int offset = m_SampleBufferSize - readPos;
cbSamples -= offset;
readPos = 0;
memcpy( (char *)pOut + offset, m_SampleBuffer, cbSamples );
}
readPos+=cbSamples;
m_SampleBufferReadPos = readPos%m_SampleBufferSize;
//printf( "Returning %d samples, %d %d (%d)\n", cbSamples/2, (int)m_SampleBufferReadPos, (int)m_SampleBufferWritePos, m_SampleBufferSize );
return cbSamples/2;
}
VoiceRecord_AudioQueue g_AudioQueueVoiceRecord;
IVoiceRecord* CreateVoiceRecord_AudioQueue( int sampleRate )
{
if ( g_AudioQueueVoiceRecord.Init( sampleRate ) )
{
return &g_AudioQueueVoiceRecord;
}
else
{
g_AudioQueueVoiceRecord.Release();
return NULL;
}
}

View File

@ -342,7 +342,6 @@ def build(bld):
source += [ source += [
'audio/snd_dev_openal.cpp', # [$OSXALL] 'audio/snd_dev_openal.cpp', # [$OSXALL]
'audio/snd_dev_mac_audioqueue.cpp', # [$OSXALL] 'audio/snd_dev_mac_audioqueue.cpp', # [$OSXALL]
'audio/voice_record_mac_audioqueue.cpp', #[$OSXALL]
] ]
includes = [ includes = [