|
|
@ -59,8 +59,9 @@ |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
// Defines and Macros |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
#define MAX_STREAM_BUFFERS 2 |
|
|
|
#define MAX_AUDIO_CONTEXTS 4 // Number of open AL sources |
|
|
|
#define MAX_STREAM_BUFFERS 2 // Number of buffers for each alSource |
|
|
|
#define MAX_MIX_CHANNELS 4 // Number of open AL sources |
|
|
|
#define MAX_MUSIC_STREAMS 2 // Number of simultanious music sources |
|
|
|
|
|
|
|
#if defined(PLATFORM_RPI) || defined(PLATFORM_ANDROID) |
|
|
|
// NOTE: On RPI and Android should be lower to avoid frame-stalls |
|
|
@ -76,37 +77,32 @@ |
|
|
|
// Types and Structures Definition |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
|
|
|
|
// Music type (file streaming from memory) |
|
|
|
// NOTE: Anything longer than ~10 seconds should be streamed... |
|
|
|
typedef struct Music { |
|
|
|
stb_vorbis *stream; |
|
|
|
jar_xm_context_t *chipctx; // Stores jar_xm context |
|
|
|
|
|
|
|
ALuint buffers[MAX_STREAM_BUFFERS]; |
|
|
|
ALuint source; |
|
|
|
ALenum format; |
|
|
|
|
|
|
|
int channels; |
|
|
|
int sampleRate; |
|
|
|
int totalSamplesLeft; |
|
|
|
float totalLengthSeconds; |
|
|
|
bool loop; |
|
|
|
bool chipTune; // True if chiptune is loaded |
|
|
|
} Music; |
|
|
|
|
|
|
|
// Audio Context, used to create custom audio streams that are not bound to a sound file. There can be |
|
|
|
// no more than 4 concurrent audio contexts in use. This is due to each active context being tied to |
|
|
|
// a dedicated mix channel. All audio is 32bit floating point in stereo. |
|
|
|
typedef struct AudioContext_t { |
|
|
|
// Used to create custom audio streams that are not bound to a specific file. There can be |
|
|
|
// no more than 4 concurrent mixchannels in use. This is due to each active mixc being tied to |
|
|
|
// a dedicated mix channel. |
|
|
|
typedef struct MixChannel_t { |
|
|
|
unsigned short sampleRate; // default is 48000 |
|
|
|
unsigned char channels; // 1=mono,2=stereo |
|
|
|
unsigned char mixChannel; // 0-3 or mixA-mixD, each mix channel can receive up to one dedicated audio stream |
|
|
|
bool floatingPoint; // if false then the short datatype is used instead |
|
|
|
bool playing; |
|
|
|
bool playing; // false if paused |
|
|
|
ALenum alFormat; // openAL format specifier |
|
|
|
ALuint alSource; // openAL source |
|
|
|
ALuint alBuffer[MAX_STREAM_BUFFERS]; // openAL sample buffer |
|
|
|
} AudioContext_t; |
|
|
|
} MixChannel_t; |
|
|
|
|
|
|
|
// Music type (file streaming from memory) |
|
|
|
// NOTE: Anything longer than ~10 seconds should be streamed into a mix channel... |
|
|
|
typedef struct Music { |
|
|
|
stb_vorbis *stream; |
|
|
|
jar_xm_context_t *chipctx; // Stores jar_xm mixc |
|
|
|
MixChannel_t *mixc; // mix channel |
|
|
|
|
|
|
|
int totalSamplesLeft; |
|
|
|
float totalLengthSeconds; |
|
|
|
bool loop; |
|
|
|
bool chipTune; // True if chiptune is loaded |
|
|
|
} Music; |
|
|
|
|
|
|
|
#if defined(AUDIO_STANDALONE) |
|
|
|
typedef enum { INFO = 0, ERROR, WARNING, DEBUG, OTHER } TraceLogType; |
|
|
@ -115,23 +111,28 @@ typedef enum { INFO = 0, ERROR, WARNING, DEBUG, OTHER } TraceLogType; |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
// Global Variables Definition |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
static AudioContext_t* mixChannelsActive_g[MAX_AUDIO_CONTEXTS]; // What mix channels are currently active |
|
|
|
static bool musicEnabled = false; |
|
|
|
static Music currentMusic; // Current music loaded |
|
|
|
// NOTE: Only one music file playing at a time |
|
|
|
static MixChannel_t* mixChannelsActive_g[MAX_MIX_CHANNELS]; // What mix channels are currently active |
|
|
|
static bool musicEnabled_g = false; |
|
|
|
static Music currentMusic[MAX_MUSIC_STREAMS]; // Current music loaded, up to two can play at the same time |
|
|
|
|
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
// Module specific Functions Declaration |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
static Wave LoadWAV(const char *fileName); // Load WAV file |
|
|
|
static Wave LoadOGG(char *fileName); // Load OGG file |
|
|
|
static void UnloadWave(Wave wave); // Unload wave data |
|
|
|
static Wave LoadWAV(const char *fileName); // Load WAV file |
|
|
|
static Wave LoadOGG(char *fileName); // Load OGG file |
|
|
|
static void UnloadWave(Wave wave); // Unload wave data |
|
|
|
|
|
|
|
static bool BufferMusicStream(n">ALuint buffer); // Fill music buffers with data |
|
|
|
static void EmptyMusicStream(void); // Empty music buffers |
|
|
|
static bool BufferMusicStream(kt">int index, int numBuffers); // Fill music buffers with data |
|
|
|
static void EmptyMusicStream(int index); // Empty music buffers |
|
|
|
|
|
|
|
static unsigned short FillAlBufferWithSilence(AudioContext_t *context, ALuint buffer);// fill buffer with zeros, returns number processed |
|
|
|
static void ResampleShortToFloat(short *shorts, float *floats, unsigned short len); // pass two arrays of the same legnth in |
|
|
|
static void ResampleByteToFloat(char *chars, float *floats, unsigned short len); // pass two arrays of same length in |
|
|
|
|
|
|
|
static MixChannel_t* InitMixChannel(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint); // For streaming into mix channels. |
|
|
|
static void CloseMixChannel(MixChannel_t* mixc); // Frees mix channel |
|
|
|
static int BufferMixChannel(MixChannel_t* mixc, void *data, int numberElements); // Pushes more audio data into mixc mix channel, if NULL is passed it pauses |
|
|
|
static int FillAlBufferWithSilence(MixChannel_t *mixc, ALuint buffer); // Fill buffer with zeros, returns number processed |
|
|
|
static void ResampleShortToFloat(short *shorts, float *floats, unsigned short len); // Pass two arrays of the same legnth in |
|
|
|
static void ResampleByteToFloat(char *chars, float *floats, unsigned short len); // Pass two arrays of same length in |
|
|
|
static int IsMusicStreamReadyForBuffering(int index); // Checks if music buffer is ready to be refilled |
|
|
|
|
|
|
|
#if defined(AUDIO_STANDALONE) |
|
|
|
const char *GetExtension(const char *fileName); // Get the extension for a filename |
|
|
@ -142,7 +143,7 @@ void TraceLog(int msgType, const char *text, ...); // Outputs a trace log messa |
|
|
|
// Module Functions Definition - Audio Device initialization and Closing |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
|
|
|
|
// Initialize audio device and context |
|
|
|
// Initialize audio device and mixc |
|
|
|
void InitAudioDevice(void) |
|
|
|
{ |
|
|
|
// Open and initialize a device with default settings |
|
|
@ -158,7 +159,7 @@ void InitAudioDevice(void) |
|
|
|
|
|
|
|
alcCloseDevice(device); |
|
|
|
|
|
|
|
TraceLog(ERROR, "Could not setup audio context"); |
|
|
|
TraceLog(ERROR, "Could not setup mix channel"); |
|
|
|
} |
|
|
|
|
|
|
|
TraceLog(INFO, "Audio device and context initialized successfully: %s", alcGetString(device, ALC_DEVICE_SPECIFIER)); |
|
|
@ -169,15 +170,19 @@ void InitAudioDevice(void) |
|
|
|
alListener3f(AL_ORIENTATION, 0, 0, -1); |
|
|
|
} |
|
|
|
|
|
|
|
// Close the audio device for the current context, and destroys the context |
|
|
|
// Close the audio device for all contexts |
|
|
|
void CloseAudioDevice(void) |
|
|
|
{ |
|
|
|
StopMusicStream(); // Stop music streaming and close current stream |
|
|
|
for(int index=0; index<MAX_MUSIC_STREAMS; index++) |
|
|
|
{ |
|
|
|
if(currentMusic[index].mixc) StopMusicStream(index); // Stop music streaming and close current stream |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
ALCdevice *device; |
|
|
|
ALCcontext *context = alcGetCurrentContext(); |
|
|
|
|
|
|
|
if (context == NULL) TraceLog(WARNING, "Could not get current audio context for closing"); |
|
|
|
if (context == NULL) TraceLog(WARNING, "Could not get current mix channel for closing"); |
|
|
|
|
|
|
|
device = alcGetContextsDevice(context); |
|
|
|
|
|
|
@ -202,187 +207,141 @@ bool IsAudioDeviceReady(void) |
|
|
|
// Module Functions Definition - Custom audio output |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
|
|
|
|
// Audio contexts are for outputing custom audio waveforms, This will shut down any other sound sources currently playing |
|
|
|
// The mixChannel is what mix channel you want to operate on, 0-3 are the ones available. Each mix channel can only be used one at a time. |
|
|
|
// exmple usage is InitAudioContext(48000, 0, 2, true); // mixchannel 1, 48khz, stereo, floating point |
|
|
|
n">AudioContext InitAudioContext(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint) |
|
|
|
// For streaming into mix channels. |
|
|
|
// The mixChannel is what audio muxing channel you want to operate on, 0-3 are the ones available. Each mix channel can only be used one at a time. |
|
|
|
// exmple usage is InitMixChannel(48000, 0, 2, true); // mixchannel 1, 48khz, stereo, floating point |
|
|
|
k">static MixChannel_t* InitMixChannel(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint) |
|
|
|
{ |
|
|
|
if(mixChannel >= MAX_AUDIO_CONTEXTS) return NULL; |
|
|
|
if(mixChannel >= MAX_MIX_CHANNELS) return NULL; |
|
|
|
if(!IsAudioDeviceReady()) InitAudioDevice(); |
|
|
|
else StopMusicStream(); |
|
|
|
|
|
|
|
if(!mixChannelsActive_g[mixChannel]){ |
|
|
|
AudioContext_t *ac = (AudioContext_t*)malloc(sizeof(AudioContext_t)); |
|
|
|
ac->sampleRate = sampleRate; |
|
|
|
ac->channels = channels; |
|
|
|
ac->mixChannel = mixChannel; |
|
|
|
ac->floatingPoint = floatingPoint; |
|
|
|
mixChannelsActive_g[mixChannel] = ac; |
|
|
|
MixChannel_t *mixc = (MixChannel_t*)malloc(sizeof(MixChannel_t)); |
|
|
|
mixc->sampleRate = sampleRate; |
|
|
|
mixc->channels = channels; |
|
|
|
mixc->mixChannel = mixChannel; |
|
|
|
mixc->floatingPoint = floatingPoint; |
|
|
|
mixChannelsActive_g[mixChannel] = mixc; |
|
|
|
|
|
|
|
// setup openAL format |
|
|
|
if(channels == 1) |
|
|
|
{ |
|
|
|
if(floatingPoint) |
|
|
|
ac->alFormat = AL_FORMAT_MONO_FLOAT32; |
|
|
|
mixc->alFormat = AL_FORMAT_MONO_FLOAT32; |
|
|
|
else |
|
|
|
ac->alFormat = AL_FORMAT_MONO16; |
|
|
|
mixc->alFormat = AL_FORMAT_MONO16; |
|
|
|
} |
|
|
|
else if(channels == 2) |
|
|
|
{ |
|
|
|
if(floatingPoint) |
|
|
|
ac->alFormat = AL_FORMAT_STEREO_FLOAT32; |
|
|
|
mixc->alFormat = AL_FORMAT_STEREO_FLOAT32; |
|
|
|
else |
|
|
|
ac->alFormat = AL_FORMAT_STEREO16; |
|
|
|
mixc->alFormat = AL_FORMAT_STEREO16; |
|
|
|
} |
|
|
|
|
|
|
|
// Create an audio source |
|
|
|
alGenSources(1, &ac->alSource); |
|
|
|
alSourcef(ac->alSource, AL_PITCH, 1); |
|
|
|
alSourcef(ac->alSource, AL_GAIN, 1); |
|
|
|
alSource3f(ac->alSource, AL_POSITION, 0, 0, 0); |
|
|
|
alSource3f(ac->alSource, AL_VELOCITY, 0, 0, 0); |
|
|
|
alGenSources(1, &mixc->alSource); |
|
|
|
alSourcef(mixc->alSource, AL_PITCH, 1); |
|
|
|
alSourcef(mixc->alSource, AL_GAIN, 1); |
|
|
|
alSource3f(mixc->alSource, AL_POSITION, 0, 0, 0); |
|
|
|
alSource3f(mixc->alSource, AL_VELOCITY, 0, 0, 0); |
|
|
|
|
|
|
|
// Create Buffer |
|
|
|
alGenBuffers(MAX_STREAM_BUFFERS, ac->alBuffer); |
|
|
|
alGenBuffers(MAX_STREAM_BUFFERS, mixc->alBuffer); |
|
|
|
|
|
|
|
//fill buffers |
|
|
|
int x; |
|
|
|
for(x=0;x<MAX_STREAM_BUFFERS;x++) |
|
|
|
FillAlBufferWithSilence(ac, ac->alBuffer[x]); |
|
|
|
FillAlBufferWithSilence(mixc, mixc->alBuffer[x]); |
|
|
|
|
|
|
|
alSourceQueueBuffers(ac->alSource, MAX_STREAM_BUFFERS, ac->alBuffer); |
|
|
|
alSourcePlay(ac->alSource); |
|
|
|
ac->playing = true; |
|
|
|
alSourceQueueBuffers(mixc->alSource, MAX_STREAM_BUFFERS, mixc->alBuffer); |
|
|
|
mixc->playing = true; |
|
|
|
alSourcePlay(mixc->alSource); |
|
|
|
|
|
|
|
return ac; |
|
|
|
return mixc; |
|
|
|
} |
|
|
|
return NULL; |
|
|
|
} |
|
|
|
|
|
|
|
// Frees buffer in audio context |
|
|
|
void CloseAudioContext(AudioContext ctx) |
|
|
|
// Frees buffer in mix channel |
|
|
|
static void CloseMixChannel(MixChannel_t* mixc) |
|
|
|
{ |
|
|
|
AudioContext_t *context = (AudioContext_t*)ctx; |
|
|
|
if(context){ |
|
|
|
alSourceStop(context->alSource); |
|
|
|
context->playing = false; |
|
|
|
if(mixc){ |
|
|
|
alSourceStop(mixc->alSource); |
|
|
|
mixc->playing = false; |
|
|
|
|
|
|
|
//flush out all queued buffers |
|
|
|
ALuint buffer = 0; |
|
|
|
int queued = 0; |
|
|
|
alGetSourcei(context->alSource, AL_BUFFERS_QUEUED, &queued); |
|
|
|
alGetSourcei(mixc->alSource, AL_BUFFERS_QUEUED, &queued); |
|
|
|
while (queued > 0) |
|
|
|
{ |
|
|
|
alSourceUnqueueBuffers(context->alSource, 1, &buffer); |
|
|
|
alSourceUnqueueBuffers(mixc->alSource, 1, &buffer); |
|
|
|
queued--; |
|
|
|
} |
|
|
|
|
|
|
|
//delete source and buffers |
|
|
|
alDeleteSources(1, &context->alSource); |
|
|
|
alDeleteBuffers(MAX_STREAM_BUFFERS, context->alBuffer); |
|
|
|
mixChannelsActive_g[context->mixChannel] = NULL; |
|
|
|
free(context); |
|
|
|
ctx = NULL; |
|
|
|
alDeleteSources(1, &mixc->alSource); |
|
|
|
alDeleteBuffers(MAX_STREAM_BUFFERS, mixc->alBuffer); |
|
|
|
mixChannelsActive_g[mixc->mixChannel] = NULL; |
|
|
|
free(mixc); |
|
|
|
mixc = NULL; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// Pushes more audio data into context mix channel, k">if none are ever pushed then zeros are fed in. |
|
|
|
// Call "UpdateAudioContext(ctx, NULL, 0)" if you want to pause the audio. |
|
|
|
// Pushes more audio data into mixc mix channel, n">only one buffer per call |
|
|
|
// Call "BufferMixChannel(mixc, NULL, 0)" if you want to pause the audio. |
|
|
|
// @Returns number of samples that where processed. |
|
|
|
t">unsigned short UpdateAudioContext(AudioContext ctx, void *data, unsigned short numberElements) |
|
|
|
">static int BufferMixChannel(MixChannel_t* mixc, void *data, int numberElements) |
|
|
|
{ |
|
|
|
AudioContext_t *context = (AudioContext_t*)ctx; |
|
|
|
|
|
|
|
if(!context || (context->channels == 2 && numberElements % 2 != 0)) return 0; // when there is two channels there must be an even number of samples |
|
|
|
if(!mixc || mixChannelsActive_g[mixc->mixChannel] != mixc) return 0; // when there is two channels there must be an even number of samples |
|
|
|
|
|
|
|
if (!data || !numberElements) |
|
|
|
{ // pauses audio until data is given |
|
|
|
alSourcePause(context->alSource); |
|
|
|
context->playing = false; |
|
|
|
if(mixc->playing){ |
|
|
|
alSourcePause(mixc->alSource); |
|
|
|
mixc->playing = false; |
|
|
|
} |
|
|
|
return 0; |
|
|
|
} |
|
|
|
else |
|
|
|
else if(!mixc->playing) |
|
|
|
{ // restart audio otherwise |
|
|
|
ALint state; |
|
|
|
alGetSourcei(context->alSource, AL_SOURCE_STATE, &state); |
|
|
|
if (state != AL_PLAYING){ |
|
|
|
alSourcePlay(context->alSource); |
|
|
|
context->playing = true; |
|
|
|
} |
|
|
|
alSourcePlay(mixc->alSource); |
|
|
|
mixc->playing = true; |
|
|
|
} |
|
|
|
|
|
|
|
if (context && context->playing && mixChannelsActive_g[context->mixChannel] == context) |
|
|
|
|
|
|
|
ALuint buffer = 0; |
|
|
|
|
|
|
|
alSourceUnqueueBuffers(mixc->alSource, 1, &buffer); |
|
|
|
if(!buffer) return 0; |
|
|
|
if(mixc->floatingPoint) // process float buffers |
|
|
|
{ |
|
|
|
ALint processed = 0; |
|
|
|
ALuint buffer = 0; |
|
|
|
unsigned short numberProcessed = 0; |
|
|
|
unsigned short numberRemaining = numberElements; |
|
|
|
|
|
|
|
|
|
|
|
alGetSourcei(context->alSource, AL_BUFFERS_PROCESSED, &processed); // Get the number of already processed buffers (if any) |
|
|
|
if(!processed) return 0; // nothing to process, queue is still full |
|
|
|
|
|
|
|
|
|
|
|
while (processed > 0) |
|
|
|
{ |
|
|
|
if(context->floatingPoint) // process float buffers |
|
|
|
{ |
|
|
|
float *ptr = (float*)data; |
|
|
|
alSourceUnqueueBuffers(context->alSource, 1, &buffer); |
|
|
|
if(numberRemaining >= MUSIC_BUFFER_SIZE_FLOAT) |
|
|
|
{ |
|
|
|
alBufferData(buffer, context->alFormat, &ptr[numberProcessed], MUSIC_BUFFER_SIZE_FLOAT*sizeof(float), context->sampleRate); |
|
|
|
numberProcessed+=MUSIC_BUFFER_SIZE_FLOAT; |
|
|
|
numberRemaining-=MUSIC_BUFFER_SIZE_FLOAT; |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
alBufferData(buffer, context->alFormat, &ptr[numberProcessed], numberRemaining*sizeof(float), context->sampleRate); |
|
|
|
numberProcessed+=numberRemaining; |
|
|
|
numberRemaining=0; |
|
|
|
} |
|
|
|
alSourceQueueBuffers(context->alSource, 1, &buffer); |
|
|
|
processed--; |
|
|
|
} |
|
|
|
else if(!context->floatingPoint) // process short buffers |
|
|
|
{ |
|
|
|
short *ptr = (short*)data; |
|
|
|
alSourceUnqueueBuffers(context->alSource, 1, &buffer); |
|
|
|
if(numberRemaining >= MUSIC_BUFFER_SIZE_SHORT) |
|
|
|
{ |
|
|
|
alBufferData(buffer, context->alFormat, &ptr[numberProcessed], MUSIC_BUFFER_SIZE_FLOAT*sizeof(short), context->sampleRate); |
|
|
|
numberProcessed+=MUSIC_BUFFER_SIZE_SHORT; |
|
|
|
numberRemaining-=MUSIC_BUFFER_SIZE_SHORT; |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
alBufferData(buffer, context->alFormat, &ptr[numberProcessed], numberRemaining*sizeof(short), context->sampleRate); |
|
|
|
numberProcessed+=numberRemaining; |
|
|
|
numberRemaining=0; |
|
|
|
} |
|
|
|
alSourceQueueBuffers(context->alSource, 1, &buffer); |
|
|
|
processed--; |
|
|
|
} |
|
|
|
else |
|
|
|
break; |
|
|
|
} |
|
|
|
return numberProcessed; |
|
|
|
float *ptr = (float*)data; |
|
|
|
alBufferData(buffer, mixc->alFormat, ptr, numberElements*sizeof(float), mixc->sampleRate); |
|
|
|
} |
|
|
|
else // process short buffers |
|
|
|
{ |
|
|
|
short *ptr = (short*)data; |
|
|
|
alBufferData(buffer, mixc->alFormat, ptr, numberElements*sizeof(short), mixc->sampleRate); |
|
|
|
} |
|
|
|
return 0; |
|
|
|
alSourceQueueBuffers(mixc->alSource, 1, &buffer); |
|
|
|
|
|
|
|
return numberElements; |
|
|
|
} |
|
|
|
|
|
|
|
// fill buffer with zeros, returns number processed |
|
|
|
static unsigned short FillAlBufferWithSilence(AudioContext_t *context, ALuint buffer) |
|
|
|
static int FillAlBufferWithSilence(MixChannel_t *mixc, ALuint buffer) |
|
|
|
{ |
|
|
|
if(context->floatingPoint){ |
|
|
|
if(mixc->floatingPoint){ |
|
|
|
float pcm[MUSIC_BUFFER_SIZE_FLOAT] = {0.f}; |
|
|
|
alBufferData(buffer, context->alFormat, pcm, MUSIC_BUFFER_SIZE_FLOAT*sizeof(float), context->sampleRate); |
|
|
|
alBufferData(buffer, mixc->alFormat, pcm, MUSIC_BUFFER_SIZE_FLOAT*sizeof(float), mixc->sampleRate); |
|
|
|
return MUSIC_BUFFER_SIZE_FLOAT; |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
short pcm[MUSIC_BUFFER_SIZE_SHORT] = {0}; |
|
|
|
alBufferData(buffer, context->alFormat, pcm, MUSIC_BUFFER_SIZE_SHORT*sizeof(short), context->sampleRate); |
|
|
|
alBufferData(buffer, mixc->alFormat, pcm, MUSIC_BUFFER_SIZE_SHORT*sizeof(short), mixc->sampleRate); |
|
|
|
return MUSIC_BUFFER_SIZE_SHORT; |
|
|
|
} |
|
|
|
} |
|
|
@ -417,6 +376,42 @@ static void ResampleByteToFloat(char *chars, float *floats, unsigned short len) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// used to output raw audio streams, returns negative numbers on error |
|
|
|
// if floating point is false the data size is 16bit short, otherwise it is float 32bit |
|
|
|
RawAudioContext InitRawAudioContext(int sampleRate, int channels, bool floatingPoint) |
|
|
|
{ |
|
|
|
int mixIndex; |
|
|
|
for(mixIndex = 0; mixIndex < MAX_MIX_CHANNELS; mixIndex++) // find empty mix channel slot |
|
|
|
{ |
|
|
|
if(mixChannelsActive_g[mixIndex] == NULL) break; |
|
|
|
else if(mixIndex = MAX_MIX_CHANNELS - 1) return -1; // error |
|
|
|
} |
|
|
|
|
|
|
|
if(InitMixChannel(sampleRate, mixIndex, channels, floatingPoint)) |
|
|
|
return mixIndex; |
|
|
|
else |
|
|
|
return -2; // error |
|
|
|
} |
|
|
|
|
|
|
|
void CloseRawAudioContext(RawAudioContext ctx) |
|
|
|
{ |
|
|
|
if(mixChannelsActive_g[ctx]) |
|
|
|
CloseMixChannel(mixChannelsActive_g[ctx]); |
|
|
|
} |
|
|
|
|
|
|
|
int BufferRawAudioContext(RawAudioContext ctx, void *data, int numberElements) |
|
|
|
{ |
|
|
|
int numBuffered = 0; |
|
|
|
if(ctx >= 0) |
|
|
|
{ |
|
|
|
MixChannel_t* mixc = mixChannelsActive_g[ctx]; |
|
|
|
numBuffered = BufferMixChannel(mixc, data, numberElements); |
|
|
|
} |
|
|
|
return numBuffered; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//---------------------------------------------------------------------------------- |
|
|
@ -767,205 +762,215 @@ void SetSoundPitch(Sound sound, float pitch) |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
|
|
|
|
// Start music playing (open stream) |
|
|
|
void PlayMusicStream(char *fileName) |
|
|
|
// returns 0 on success |
|
|
|
int PlayMusicStream(int musicIndex, char *fileName) |
|
|
|
{ |
|
|
|
int mixIndex; |
|
|
|
|
|
|
|
if(currentMusic[musicIndex].stream || currentMusic[musicIndex].chipctx) return 1; // error |
|
|
|
|
|
|
|
for(mixIndex = 0; mixIndex < MAX_MIX_CHANNELS; mixIndex++) // find empty mix channel slot |
|
|
|
{ |
|
|
|
if(mixChannelsActive_g[mixIndex] == NULL) break; |
|
|
|
else if(mixIndex = MAX_MIX_CHANNELS - 1) return 2; // error |
|
|
|
} |
|
|
|
|
|
|
|
if (strcmp(GetExtension(fileName),"ogg") == 0) |
|
|
|
{ |
|
|
|
// Stop current music, clean buffers, unload current stream |
|
|
|
StopMusicStream(); |
|
|
|
|
|
|
|
// Open audio stream |
|
|
|
currentMusic.stream = stb_vorbis_open_filename(fileName, NULL, NULL); |
|
|
|
currentMusic[musicIndex].stream = stb_vorbis_open_filename(fileName, NULL, NULL); |
|
|
|
|
|
|
|
if (currentMusic.stream == NULL) |
|
|
|
if (currentMusic[musicIndex].stream == NULL) |
|
|
|
{ |
|
|
|
TraceLog(WARNING, "[%s] OGG audio file could not be opened", fileName); |
|
|
|
return 3; // error |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
// Get file info |
|
|
|
stb_vorbis_info info = stb_vorbis_get_info(currentMusic.stream); |
|
|
|
|
|
|
|
currentMusic.channels = info.channels; |
|
|
|
currentMusic.sampleRate = info.sample_rate; |
|
|
|
stb_vorbis_info info = stb_vorbis_get_info(currentMusic[musicIndex].stream); |
|
|
|
|
|
|
|
TraceLog(INFO, "[%s] Ogg sample rate: %i", fileName, info.sample_rate); |
|
|
|
TraceLog(INFO, "[%s] Ogg channels: %i", fileName, info.channels); |
|
|
|
TraceLog(DEBUG, "[%s] Temp memory required: %i", fileName, info.temp_memory_required); |
|
|
|
|
|
|
|
if (info.channels == 2) currentMusic.format = AL_FORMAT_STEREO16; |
|
|
|
else currentMusic.format = AL_FORMAT_MONO16; |
|
|
|
|
|
|
|
currentMusic.loop = true; // We loop by default |
|
|
|
musicEnabled = true; |
|
|
|
|
|
|
|
// Create an audio source |
|
|
|
alGenSources(1, ¤tMusic.source); // Generate pointer to audio source |
|
|
|
|
|
|
|
alSourcef(currentMusic.source, AL_PITCH, 1); |
|
|
|
alSourcef(currentMusic.source, AL_GAIN, 1); |
|
|
|
alSource3f(currentMusic.source, AL_POSITION, 0, 0, 0); |
|
|
|
alSource3f(currentMusic.source, AL_VELOCITY, 0, 0, 0); |
|
|
|
//alSourcei(currentMusic.source, AL_LOOPING, AL_TRUE); // ERROR: Buffers do not queue! |
|
|
|
|
|
|
|
// Generate two OpenAL buffers |
|
|
|
alGenBuffers(2, currentMusic.buffers); |
|
|
|
|
|
|
|
// Fill buffers with music... |
|
|
|
BufferMusicStream(currentMusic.buffers[0]); |
|
|
|
BufferMusicStream(currentMusic.buffers[1]); |
|
|
|
|
|
|
|
// Queue buffers and start playing |
|
|
|
alSourceQueueBuffers(currentMusic.source, 2, currentMusic.buffers); |
|
|
|
alSourcePlay(currentMusic.source); |
|
|
|
|
|
|
|
// NOTE: Regularly, we must check if a buffer has been processed and refill it: UpdateMusicStream() |
|
|
|
currentMusic[musicIndex].loop = true; // We loop by default |
|
|
|
musicEnabled_g = true; |
|
|
|
|
|
|
|
|
|
|
|
currentMusic.totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic.stream) * currentMusic.channels; |
|
|
|
currentMusic.totalLengthSeconds = stb_vorbis_stream_length_in_seconds(currentMusic.stream); |
|
|
|
currentMusic[musicIndex].totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic[musicIndex].stream) * info.channels; |
|
|
|
currentMusic[musicIndex].totalLengthSeconds = stb_vorbis_stream_length_in_seconds(currentMusic[musicIndex].stream); |
|
|
|
|
|
|
|
if (info.channels == 2){ |
|
|
|
currentMusic[musicIndex].mixc = InitMixChannel(info.sample_rate, mixIndex, 2, false); |
|
|
|
currentMusic[musicIndex].mixc->playing = true; |
|
|
|
} |
|
|
|
else{ |
|
|
|
currentMusic[musicIndex].mixc = InitMixChannel(info.sample_rate, mixIndex, 1, false); |
|
|
|
currentMusic[musicIndex].mixc->playing = true; |
|
|
|
} |
|
|
|
if(!currentMusic[musicIndex].mixc) return 4; // error |
|
|
|
} |
|
|
|
} |
|
|
|
else if (strcmp(GetExtension(fileName),"xm") == 0) |
|
|
|
{ |
|
|
|
// Stop current music, clean buffers, unload current stream |
|
|
|
StopMusicStream(); |
|
|
|
|
|
|
|
// new song settings for xm chiptune |
|
|
|
currentMusic.chipTune = true; |
|
|
|
currentMusic.channels = 2; |
|
|
|
currentMusic.sampleRate = 48000; |
|
|
|
currentMusic.loop = true; |
|
|
|
|
|
|
|
// only stereo is supported for xm |
|
|
|
if(!jar_xm_create_context_from_file(¤tMusic.chipctx, currentMusic.sampleRate, fileName)) |
|
|
|
if(!jar_xm_create_context_from_file(¤tMusic[musicIndex].chipctx, 48000, fileName)) |
|
|
|
{ |
|
|
|
currentMusic.format = AL_FORMAT_STEREO16; |
|
|
|
jar_xm_set_max_loop_count(currentMusic.chipctx, 0); // infinite number of loops |
|
|
|
currentMusic.totalSamplesLeft = jar_xm_get_remaining_samples(currentMusic.chipctx); |
|
|
|
currentMusic.totalLengthSeconds = ((float)currentMusic.totalSamplesLeft) / ((float)currentMusic.sampleRate); |
|
|
|
musicEnabled = true; |
|
|
|
currentMusic[musicIndex].chipTune = true; |
|
|
|
currentMusic[musicIndex].loop = true; |
|
|
|
jar_xm_set_max_loop_count(currentMusic[musicIndex].chipctx, 0); // infinite number of loops |
|
|
|
currentMusic[musicIndex].totalSamplesLeft = jar_xm_get_remaining_samples(currentMusic[musicIndex].chipctx); |
|
|
|
currentMusic[musicIndex].totalLengthSeconds = ((float)currentMusic[musicIndex].totalSamplesLeft) / 48000.f; |
|
|
|
musicEnabled_g = true; |
|
|
|
|
|
|
|
TraceLog(INFO, "[%s] XM number of samples: %i", fileName, currentMusic.totalSamplesLeft); |
|
|
|
TraceLog(INFO, "[%s] XM track length: %11.6f sec", fileName, currentMusic.totalLengthSeconds); |
|
|
|
TraceLog(INFO, "[%s] XM number of samples: %i", fileName, currentMusic[musicIndex].totalSamplesLeft); |
|
|
|
TraceLog(INFO, "[%s] XM track length: %11.6f sec", fileName, currentMusic[musicIndex].totalLengthSeconds); |
|
|
|
|
|
|
|
// Set up OpenAL |
|
|
|
alGenSources(1, ¤tMusic.source); |
|
|
|
alSourcef(currentMusic.source, AL_PITCH, 1); |
|
|
|
alSourcef(currentMusic.source, AL_GAIN, 1); |
|
|
|
alSource3f(currentMusic.source, AL_POSITION, 0, 0, 0); |
|
|
|
alSource3f(currentMusic.source, AL_VELOCITY, 0, 0, 0); |
|
|
|
alGenBuffers(2, currentMusic.buffers); |
|
|
|
BufferMusicStream(currentMusic.buffers[0]); |
|
|
|
BufferMusicStream(currentMusic.buffers[1]); |
|
|
|
alSourceQueueBuffers(currentMusic.source, 2, currentMusic.buffers); |
|
|
|
alSourcePlay(currentMusic.source); |
|
|
|
|
|
|
|
// NOTE: Regularly, we must check if a buffer has been processed and refill it: UpdateMusicStream() |
|
|
|
currentMusic[musicIndex].mixc = InitMixChannel(48000, mixIndex, 2, false); |
|
|
|
if(!currentMusic[musicIndex].mixc) return 5; // error |
|
|
|
currentMusic[musicIndex].mixc->playing = true; |
|
|
|
} |
|
|
|
else TraceLog(WARNING, "[%s] XM file could not be opened", fileName); |
|
|
|
else |
|
|
|
{ |
|
|
|
TraceLog(WARNING, "[%s] XM file could not be opened", fileName); |
|
|
|
return 6; // error |
|
|
|
} |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
TraceLog(WARNING, "[%s] Music extension not recognized, it can't be loaded", fileName); |
|
|
|
return 7; // error |
|
|
|
} |
|
|
|
else TraceLog(WARNING, "[%s] Music extension not recognized, it can't be loaded", fileName); |
|
|
|
return 0; // normal return |
|
|
|
} |
|
|
|
|
|
|
|
// Stop music playing (close stream) |
|
|
|
void StopMusicStream(void) |
|
|
|
// Stop music playing k">for individual music index of currentMusic array (close stream) |
|
|
|
void StopMusicStream(int index) |
|
|
|
{ |
|
|
|
if (musicEnabled) |
|
|
|
if (index < MAX_MUSIC_STREAMS && currentMusic[index].mixc) |
|
|
|
{ |
|
|
|
alSourceStop(currentMusic.source); |
|
|
|
EmptyMusicStream(); // Empty music buffers |
|
|
|
alDeleteSources(1, ¤tMusic.source); |
|
|
|
alDeleteBuffers(2, currentMusic.buffers); |
|
|
|
CloseMixChannel(currentMusic[index].mixc); |
|
|
|
|
|
|
|
if (currentMusic.chipTune) |
|
|
|
if (currentMusic[index].chipTune) |
|
|
|
{ |
|
|
|
jar_xm_free_context(currentMusic.chipctx); |
|
|
|
jar_xm_free_context(currentMusic[index].chipctx); |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
stb_vorbis_close(currentMusic.stream); |
|
|
|
stb_vorbis_close(currentMusic[index].stream); |
|
|
|
} |
|
|
|
|
|
|
|
if(!getMusicStreamCount()) musicEnabled_g = false; |
|
|
|
if(currentMusic[index].stream || currentMusic[index].chipctx) |
|
|
|
{ |
|
|
|
currentMusic[index].stream = NULL; |
|
|
|
currentMusic[index].chipctx = NULL; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
musicEnabled = false; |
|
|
|
//get number of music channels active at this time, this does not mean they are playing |
|
|
|
int getMusicStreamCount(void) |
|
|
|
{ |
|
|
|
int musicCount = 0; |
|
|
|
for(int musicIndex = 0; musicIndex < MAX_MUSIC_STREAMS; musicIndex++) // find empty music slot |
|
|
|
if(currentMusic[musicIndex].stream != NULL || currentMusic[musicIndex].chipTune) musicCount++; |
|
|
|
|
|
|
|
return musicCount; |
|
|
|
} |
|
|
|
|
|
|
|
// Pause music playing |
|
|
|
void PauseMusicStream(void) |
|
|
|
void PauseMusicStream(int index) |
|
|
|
{ |
|
|
|
// Pause music stream if music available! |
|
|
|
if (musicEnabled) |
|
|
|
if (index < MAX_MUSIC_STREAMS && currentMusic[index].mixc && musicEnabled_g) |
|
|
|
{ |
|
|
|
TraceLog(INFO, "Pausing music stream"); |
|
|
|
alSourcePause(currentMusic.source); |
|
|
|
musicEnabled = false; |
|
|
|
alSourcePause(currentMusic[index].mixcspan>->alSource); |
|
|
|
currentMusic[index].mixc->playing = false; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// Resume music playing |
|
|
|
void ResumeMusicStream(void) |
|
|
|
void ResumeMusicStream(int index) |
|
|
|
{ |
|
|
|
// Resume music playing... if music available! |
|
|
|
ALenum state; |
|
|
|
alGetSourcei(currentMusic.source, AL_SOURCE_STATE, &state); |
|
|
|
|
|
|
|
if (state == AL_PAUSED) |
|
|
|
{ |
|
|
|
TraceLog(INFO, "Resuming music stream"); |
|
|
|
alSourcePlay(currentMusic.source); |
|
|
|
musicEnabled = true; |
|
|
|
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){ |
|
|
|
alGetSourcei(currentMusic[index].mixc->alSource, AL_SOURCE_STATE, &state); |
|
|
|
if (state == AL_PAUSED) |
|
|
|
{ |
|
|
|
TraceLog(INFO, "Resuming music stream"); |
|
|
|
alSourcePlay(currentMusic[index].mixc->alSource); |
|
|
|
currentMusic[index].mixc->playing = true; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// Check if music is playing |
|
|
|
bool IsMusicPlaying(void) |
|
|
|
// Check if any music is playing |
|
|
|
bool IsMusicPlaying(int index) |
|
|
|
{ |
|
|
|
bool playing = false; |
|
|
|
ALint state; |
|
|
|
|
|
|
|
alGetSourcei(currentMusic.source, AL_SOURCE_STATE, &state); |
|
|
|
if (state == AL_PLAYING) playing = true; |
|
|
|
|
|
|
|
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){ |
|
|
|
alGetSourcei(currentMusic[index].mixc->alSource, AL_SOURCE_STATE, &state); |
|
|
|
if (state == AL_PLAYING) playing = true; |
|
|
|
} |
|
|
|
|
|
|
|
return playing; |
|
|
|
} |
|
|
|
|
|
|
|
// Set volume for music |
|
|
|
void SetMusicVolume(float volume) |
|
|
|
void SetMusicVolume(int index, float volume) |
|
|
|
{ |
|
|
|
alSourcef(currentMusic.source, AL_GAIN, volume); |
|
|
|
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){ |
|
|
|
alSourcef(currentMusic[index].mixc->alSource, AL_GAIN, volume); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
void SetMusicPitch(int index, float pitch) |
|
|
|
{ |
|
|
|
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){ |
|
|
|
alSourcef(currentMusic[index].mixc->alSource, AL_PITCH, pitch); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// Get current music time length (in seconds) |
|
|
|
float GetMusicTimeLength(void) |
|
|
|
float GetMusicTimeLength(int index) |
|
|
|
{ |
|
|
|
float totalSeconds; |
|
|
|
if (currentMusic.chipTune) |
|
|
|
if (currentMusic[index].chipTune) |
|
|
|
{ |
|
|
|
totalSeconds = currentMusic.totalLengthSeconds; |
|
|
|
totalSeconds = currentMusic[index].totalLengthSeconds; |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
totalSeconds = stb_vorbis_stream_length_in_seconds(currentMusic.stream); |
|
|
|
totalSeconds = stb_vorbis_stream_length_in_seconds(currentMusic[index].stream); |
|
|
|
} |
|
|
|
|
|
|
|
return totalSeconds; |
|
|
|
} |
|
|
|
|
|
|
|
// Get current music time played (in seconds) |
|
|
|
float GetMusicTimePlayed(void) |
|
|
|
float GetMusicTimePlayed(int index) |
|
|
|
{ |
|
|
|
float secondsPlayed; |
|
|
|
if (currentMusic.chipTune) |
|
|
|
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc) |
|
|
|
{ |
|
|
|
uint64_t samples; |
|
|
|
jar_xm_get_position(currentMusic.chipctx, NULL, NULL, NULL, &samples); |
|
|
|
secondsPlayed = (float)samples / (currentMusic.sampleRate * currentMusic.channels); // Not sure if this is the correct value |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
int totalSamples = stb_vorbis_stream_length_in_samples(currentMusic.stream) * currentMusic.channels; |
|
|
|
int samplesPlayed = totalSamples - currentMusic.totalSamplesLeft; |
|
|
|
secondsPlayed = (float)samplesPlayed / (currentMusic.sampleRate * currentMusic.channels); |
|
|
|
if (currentMusic[index].chipTune) |
|
|
|
{ |
|
|
|
uint64_t samples; |
|
|
|
jar_xm_get_position(currentMusic[index].chipctx, NULL, NULL, NULL, &samples); |
|
|
|
secondsPlayed = (float)samples / (48000 * currentMusic[index].mixc->channels); // Not sure if this is the correct value |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
int totalSamples = stb_vorbis_stream_length_in_samples(currentMusic[index].stream) * currentMusic[index].mixc->channels; |
|
|
|
int samplesPlayed = totalSamples - currentMusic[index].totalSamplesLeft; |
|
|
|
secondsPlayed = (float)samplesPlayed / (currentMusic[index].mixc->sampleRate * currentMusic[index].mixc->channels); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -977,116 +982,118 @@ float GetMusicTimePlayed(void) |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
|
|
|
|
// Fill music buffers with new data from music stream |
|
|
|
static bool BufferMusicStream(n">ALuint buffer) |
|
|
|
static bool BufferMusicStream(kt">int index, int numBuffers) |
|
|
|
{ |
|
|
|
short pcm[MUSIC_BUFFER_SIZE_SHORT]; |
|
|
|
float pcmf[MUSIC_BUFFER_SIZE_FLOAT]; |
|
|
|
|
|
|
|
int size = 0; // Total size of data steamed (in bytes) |
|
|
|
int streamedBytes = 0; // samples of data obtained, channels are not included in calculation |
|
|
|
int size = 0; // Total size of data steamed in L+R samples for xm floats, individual L or R for ogg shorts |
|
|
|
bool active = true; // We can get more data from stream (not finished) |
|
|
|
|
|
|
|
if (musicEnabled) |
|
|
|
|
|
|
|
if (currentMusic[index].chipTune) // There is no end of stream for xmfiles, once the end is reached zeros are generated for non looped chiptunes. |
|
|
|
{ |
|
|
|
if (currentMusic.chipTune) // There is no end of stream for xmfiles, once the end is reached zeros are generated for non looped chiptunes. |
|
|
|
{ |
|
|
|
int readlen = MUSIC_BUFFER_SIZE_SHORT / 2; |
|
|
|
jar_xm_generate_samples_16bit(currentMusic.chipctx, pcm, readlen); // reads 2*readlen shorts and moves them to buffer+size memory location |
|
|
|
size += readlen * currentMusic.channels; // Not sure if this is what it needs |
|
|
|
} |
|
|
|
if(currentMusic[index].totalSamplesLeft >= MUSIC_BUFFER_SIZE_SHORT) |
|
|
|
size = MUSIC_BUFFER_SIZE_SHORT / 2; |
|
|
|
else |
|
|
|
size = currentMusic[index].totalSamplesLeft / 2; |
|
|
|
|
|
|
|
for(int x=0; x<numBuffers; x++) |
|
|
|
{ |
|
|
|
while (size < MUSIC_BUFFER_SIZE_SHORT) |
|
|
|
jar_xm_generate_samples_16bit(currentMusic[index].chipctx, pcm, size); // reads 2*readlen shorts and moves them to buffer+size memory location |
|
|
|
BufferMixChannel(currentMusic[index].mixc, pcm, size * 2); |
|
|
|
currentMusic[index].totalSamplesLeft -= size * 2; |
|
|
|
if(currentMusic[index].totalSamplesLeft <= 0) |
|
|
|
{ |
|
|
|
streamedBytes = stb_vorbis_get_samples_short_interleaved(currentMusic.stream, currentMusic.channels, pcm + size, MUSIC_BUFFER_SIZE_SHORT - size); |
|
|
|
if (streamedBytes > 0) size += (streamedBytes*currentMusic.channels); |
|
|
|
else break; |
|
|
|
active = false; |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
TraceLog(DEBUG, "Streaming music data to buffer. Bytes streamed: %i", size); |
|
|
|
} |
|
|
|
|
|
|
|
if (size > 0) |
|
|
|
{ |
|
|
|
alBufferData(buffer, currentMusic.format, pcm, size*sizeof(short), currentMusic.sampleRate); |
|
|
|
currentMusic.totalSamplesLeft -= size; |
|
|
|
|
|
|
|
if(currentMusic.totalSamplesLeft <= 0) active = false; // end if no more samples left |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
active = false; |
|
|
|
TraceLog(WARNING, "No more data obtained from stream"); |
|
|
|
if(currentMusic[index].totalSamplesLeft >= MUSIC_BUFFER_SIZE_SHORT) |
|
|
|
size = MUSIC_BUFFER_SIZE_SHORT; |
|
|
|
else |
|
|
|
size = currentMusic[index].totalSamplesLeft; |
|
|
|
|
|
|
|
for(int x=0; x<numBuffers; x++) |
|
|
|
{ |
|
|
|
int streamedBytes = stb_vorbis_get_samples_short_interleaved(currentMusic[index].stream, currentMusic[index].mixc->channels, pcm, size); |
|
|
|
BufferMixChannel(currentMusic[index].mixc, pcm, streamedBytes * currentMusic[index].mixc->channels); |
|
|
|
currentMusic[index].totalSamplesLeft -= streamedBytes * currentMusic[index].mixc->channels; |
|
|
|
if(currentMusic[index].totalSamplesLeft <= 0) |
|
|
|
{ |
|
|
|
active = false; |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
return active; |
|
|
|
} |
|
|
|
|
|
|
|
// Empty music buffers |
|
|
|
static void EmptyMusicStream(void) |
|
|
|
static void EmptyMusicStream(int index) |
|
|
|
{ |
|
|
|
ALuint buffer = 0; |
|
|
|
int queued = 0; |
|
|
|
|
|
|
|
alGetSourcei(currentMusic.source, AL_BUFFERS_QUEUED, &queued); |
|
|
|
alGetSourcei(currentMusic[index].mixcspan>->alSource, AL_BUFFERS_QUEUED, &queued); |
|
|
|
|
|
|
|
while (queued > 0) |
|
|
|
{ |
|
|
|
alSourceUnqueueBuffers(currentMusic.source, 1, &buffer); |
|
|
|
alSourceUnqueueBuffers(currentMusic[index].mixcspan>->alSource, 1, &buffer); |
|
|
|
|
|
|
|
queued--; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// Update (re-fill) music buffers if data already processed |
|
|
|
t">void UpdateMusicStream(void) |
|
|
|
//determine if a music stream is ready to be written to |
|
|
|
">static int IsMusicStreamReadyForBuffering(int index) |
|
|
|
{ |
|
|
|
ALuint buffer = 0; |
|
|
|
ALint processed = 0; |
|
|
|
bool active = true; |
|
|
|
alGetSourcei(currentMusic[index].mixc->alSource, AL_BUFFERS_PROCESSED, &processed); |
|
|
|
return processed; |
|
|
|
} |
|
|
|
|
|
|
|
if (musicEnabled) |
|
|
|
// Update (re-fill) music buffers if data already processed |
|
|
|
void UpdateMusicStream(int index) |
|
|
|
{ |
|
|
|
ALenum state; |
|
|
|
bool active = true; |
|
|
|
int numBuffers = IsMusicStreamReadyForBuffering(index); |
|
|
|
|
|
|
|
if (currentMusic[index].mixc->playing && index < MAX_MUSIC_STREAMS && musicEnabled_g && currentMusic[index].mixc && numBuffers) |
|
|
|
{ |
|
|
|
// Get the number of already processed buffers (if any) |
|
|
|
alGetSourcei(currentMusic.source, AL_BUFFERS_PROCESSED, &processed); |
|
|
|
|
|
|
|
while (processed > 0) |
|
|
|
active = BufferMusicStream(index, numBuffers); |
|
|
|
|
|
|
|
if (!active && currentMusic[index].loop) |
|
|
|
{ |
|
|
|
// Recover processed buffer for refill |
|
|
|
alSourceUnqueueBuffers(currentMusic.source, 1, &buffer); |
|
|
|
|
|
|
|
// Refill buffer |
|
|
|
active = BufferMusicStream(buffer); |
|
|
|
|
|
|
|
// If no more data to stream, restart music (if loop) |
|
|
|
if ((!active) && (currentMusic.loop)) |
|
|
|
if (currentMusic[index].chipTune) |
|
|
|
{ |
|
|
|
if(currentMusic.chipTune) |
|
|
|
{ |
|
|
|
currentMusic.totalSamplesLeft = currentMusic.totalLengthSeconds * currentMusic.sampleRate; |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
stb_vorbis_seek_start(currentMusic.stream); |
|
|
|
currentMusic.totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic.stream)*currentMusic.channels; |
|
|
|
} |
|
|
|
active = BufferMusicStream(buffer); |
|
|
|
currentMusic[index].totalSamplesLeft = currentMusic[index].totalLengthSeconds * 48000; |
|
|
|
} |
|
|
|
|
|
|
|
// Add refilled buffer to queue again... don't let the music stop! |
|
|
|
alSourceQueueBuffers(currentMusic.source, 1, &buffer); |
|
|
|
|
|
|
|
if (alGetError() != AL_NO_ERROR) TraceLog(WARNING, "Error buffering data..."); |
|
|
|
|
|
|
|
processed--; |
|
|
|
else |
|
|
|
{ |
|
|
|
stb_vorbis_seek_start(currentMusic[index].stream); |
|
|
|
currentMusic[index].totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic[index].stream) * currentMusic[index].mixc->channels; |
|
|
|
} |
|
|
|
active = true; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
ALenum state; |
|
|
|
alGetSourcei(currentMusic.source, AL_SOURCE_STATE, &state); |
|
|
|
if (alGetError() != AL_NO_ERROR) TraceLog(WARNING, "Error buffering data..."); |
|
|
|
|
|
|
|
alGetSourcei(currentMusic[index].mixc->alSource, AL_SOURCE_STATE, &state); |
|
|
|
|
|
|
|
if (p">(state != AL_PLAYING) && active) alSourcePlay(currentMusic.source); |
|
|
|
if (state != AL_PLAYING && active) alSourcePlay(currentMusic[index].mixcspan>->alSource); |
|
|
|
|
|
|
|
if (!active) StopMusicStream(); |
|
|
|
if (!active) StopMusicStream(index); |
|
|
|
|
|
|
|
} |
|
|
|
else |
|
|
|
return; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// Load WAV file into Wave structure |
|
|
|