|
|
@ -230,6 +230,7 @@ typedef struct tagBITMAPINFOHEADER { |
|
|
|
|
|
|
|
#define QOA_IMPLEMENTATION |
|
|
|
#include "external/qoa.h" // QOA loading and saving functions |
|
|
|
#include "external/qoaplay.c" // QOA stream playing helper functions |
|
|
|
#endif |
|
|
|
|
|
|
|
#if defined(SUPPORT_FILEFORMAT_FLAC) |
|
|
@ -287,21 +288,6 @@ typedef struct tagBITMAPINFOHEADER { |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
// Types and Structures Definition |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
|
|
|
|
// Music context type |
|
|
|
// NOTE: Depends on data structure provided by the library |
|
|
|
// in charge of reading the different file types |
|
|
|
typedef enum { |
|
|
|
MUSIC_AUDIO_NONE = 0, // No audio context loaded |
|
|
|
MUSIC_AUDIO_WAV, // WAV audio context |
|
|
|
MUSIC_AUDIO_OGG, // OGG audio context |
|
|
|
MUSIC_AUDIO_FLAC, // FLAC audio context |
|
|
|
MUSIC_AUDIO_MP3, // MP3 audio context |
|
|
|
MUSIC_AUDIO_QOA, // QOA audio context |
|
|
|
MUSIC_MODULE_XM, // XM module audio context |
|
|
|
MUSIC_MODULE_MOD // MOD module audio context |
|
|
|
} MusicContextType; |
|
|
|
|
|
|
|
#if defined(RAUDIO_STANDALONE) |
|
|
|
// Trace log level |
|
|
|
// NOTE: Organized by priority level |
|
|
@ -317,6 +303,20 @@ typedef enum { |
|
|
|
} TraceLogLevel; |
|
|
|
#endif |
|
|
|
|
|
|
|
// Music context type |
|
|
|
// NOTE: Depends on data structure provided by the library |
|
|
|
// in charge of reading the different file types |
|
|
|
typedef enum { |
|
|
|
MUSIC_AUDIO_NONE = 0, // No audio context loaded |
|
|
|
MUSIC_AUDIO_WAV, // WAV audio context |
|
|
|
MUSIC_AUDIO_OGG, // OGG audio context |
|
|
|
MUSIC_AUDIO_FLAC, // FLAC audio context |
|
|
|
MUSIC_AUDIO_MP3, // MP3 audio context |
|
|
|
MUSIC_AUDIO_QOA, // QOA audio context |
|
|
|
MUSIC_MODULE_XM, // XM module audio context |
|
|
|
MUSIC_MODULE_MOD // MOD module audio context |
|
|
|
} MusicContextType; |
|
|
|
|
|
|
|
// NOTE: Different logic is used when feeding data to the playback device |
|
|
|
// depending on whether data is streamed (Music vs Sound) |
|
|
|
typedef enum { |
|
|
@ -1322,7 +1322,7 @@ void UnloadWaveSamples(float *samples) |
|
|
|
} |
|
|
|
|
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
// Module Functions Definition - Music loading and stream playing (.OGG) |
|
|
|
// Module Functions Definition - Music loading and stream playing |
|
|
|
//---------------------------------------------------------------------------------- |
|
|
|
|
|
|
|
// Load music stream from file |
|
|
@ -1395,21 +1395,16 @@ Music LoadMusicStream(const char *fileName) |
|
|
|
#if defined(SUPPORT_FILEFORMAT_QOA) |
|
|
|
else if (IsFileExtension(fileName, ".qoa")) |
|
|
|
{ |
|
|
|
qoa_desc *ctxQoa = RL_CALLOC(1, sizeof(qoa_desc)); |
|
|
|
|
|
|
|
// TODO: QOA stream support: Init context from file |
|
|
|
int result = 0; |
|
|
|
|
|
|
|
qoaplay_desc *ctxQoa = qoaplay_open(fileName); |
|
|
|
music.ctxType = MUSIC_AUDIO_QOA; |
|
|
|
music.ctxData = ctxQoa; |
|
|
|
|
|
|
|
if (result > 0) |
|
|
|
if (ctxQoa->file != NULL) |
|
|
|
{ |
|
|
|
music.stream = LoadAudioStream(ctxQoa->samplerate, 16, ctxQoa->channels); |
|
|
|
|
|
|
|
// TODO: Read next frame(s) from QOA stream |
|
|
|
//music.frameCount = qoa_decode_frame(const unsigned char *bytes, unsigned int size, ctxQoa, short *sample_data, unsigned int *frame_len); |
|
|
|
|
|
|
|
// NOTE: We are loading samples are 32bit float normalized data, so, |
|
|
|
// we configure the output audio stream to also use float 32bit |
|
|
|
music.stream = LoadAudioStream(ctxQoa->info.samplerate, 32, ctxQoa->info.channels); |
|
|
|
music.frameCount = ctxQoa->info.samples; |
|
|
|
music.looping = true; // Looping enabled by default |
|
|
|
musicLoaded = true; |
|
|
|
} |
|
|
@ -1594,21 +1589,16 @@ Music LoadMusicStreamFromMemory(const char *fileType, const unsigned char *data, |
|
|
|
#if defined(SUPPORT_FILEFORMAT_QOA) |
|
|
|
else if (strcmp(fileType, ".qoa") == 0) |
|
|
|
{ |
|
|
|
qoa_desc *ctxQoa = RL_CALLOC(1, sizeof(qoa_desc)); |
|
|
|
|
|
|
|
// TODO: Init QOA context data |
|
|
|
int result = 0; |
|
|
|
|
|
|
|
qoaplay_desc *ctxQoa = qoaplay_open_memory(data, dataSize); |
|
|
|
music.ctxType = MUSIC_AUDIO_QOA; |
|
|
|
music.ctxData = ctxQoa; |
|
|
|
|
|
|
|
if (n">result > 0) |
|
|
|
if ((ctxQoa->file_data != NULL) && (ctxQoa->file_data_size != 0)) |
|
|
|
{ |
|
|
|
music.stream = LoadAudioStream(ctxQoa->samplerate, 16, ctxQoa->channels); |
|
|
|
|
|
|
|
// TODO: Read next frame(s) from QOA stream |
|
|
|
//music.frameCount = qoa_decode_frame(const unsigned char *bytes, unsigned int size, ctxQoa, short *sample_data, unsigned int *frame_len); |
|
|
|
|
|
|
|
// NOTE: We are loading samples are 32bit float normalized data, so, |
|
|
|
// we configure the output audio stream to also use float 32bit |
|
|
|
music.stream = LoadAudioStream(ctxQoa->info.samplerate, 32, ctxQoa->info.channels); |
|
|
|
music.frameCount = ctxQoa->info.samples; |
|
|
|
music.looping = true; // Looping enabled by default |
|
|
|
musicLoaded = true; |
|
|
|
} |
|
|
@ -1697,27 +1687,27 @@ Music LoadMusicStreamFromMemory(const char *fileType, const unsigned char *data, |
|
|
|
if (!musicLoaded) |
|
|
|
{ |
|
|
|
if (false) { } |
|
|
|
#if defined(SUPPORT_FILEFORMAT_WAV) |
|
|
|
#if defined(SUPPORT_FILEFORMAT_WAV) |
|
|
|
else if (music.ctxType == MUSIC_AUDIO_WAV) drwav_uninit((drwav *)music.ctxData); |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_OGG) |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_OGG) |
|
|
|
else if (music.ctxType == MUSIC_AUDIO_OGG) stb_vorbis_close((stb_vorbis *)music.ctxData); |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_MP3) |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_MP3) |
|
|
|
else if (music.ctxType == MUSIC_AUDIO_MP3) { drmp3_uninit((drmp3 *)music.ctxData); RL_FREE(music.ctxData); } |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_QOA) |
|
|
|
else if (music.ctxType == MUSIC_AUDIO_QOA) p">{ /*TODO: Release QOA context*/ RL_FREE(music.ctxData); } |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_FLAC) |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_QOA) |
|
|
|
else if (music.ctxType == MUSIC_AUDIO_QOA) n">qoaplay_close((qoaplay_desc *)music.ctxData); |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_FLAC) |
|
|
|
else if (music.ctxType == MUSIC_AUDIO_FLAC) drflac_free((drflac *)music.ctxData, NULL); |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_XM) |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_XM) |
|
|
|
else if (music.ctxType == MUSIC_MODULE_XM) jar_xm_free_context((jar_xm_context_t *)music.ctxData); |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_MOD) |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_MOD) |
|
|
|
else if (music.ctxType == MUSIC_MODULE_MOD) { jar_mod_unload((jar_mod_context_t *)music.ctxData); RL_FREE(music.ctxData); } |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
music.ctxData = NULL; |
|
|
|
TRACELOG(LOG_WARNING, "FILEIO: Music data could not be loaded"); |
|
|
@ -1763,7 +1753,7 @@ void UnloadMusicStream(Music music) |
|
|
|
else if (music.ctxType == MUSIC_AUDIO_MP3) { drmp3_uninit((drmp3 *)music.ctxData); RL_FREE(music.ctxData); } |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_QOA) |
|
|
|
else if (music.ctxType == MUSIC_AUDIO_QOA) p">{ /*TODO: Release QOA context*/ RL_FREE(music.ctxData); } |
|
|
|
else if (music.ctxType == MUSIC_AUDIO_QOA) n">qoaplay_close((qoaplay_desc *)music.ctxData); |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_FLAC) |
|
|
|
else if (music.ctxType == MUSIC_AUDIO_FLAC) drflac_free((drflac *)music.ctxData, NULL); |
|
|
@ -1821,7 +1811,7 @@ void StopMusicStream(Music music) |
|
|
|
case MUSIC_AUDIO_MP3: drmp3_seek_to_start_of_stream((drmp3 *)music.ctxData); break; |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_QOA) |
|
|
|
case MUSIC_AUDIO_QOA: cm">/*TODO: Restart QOA context to beginning*/ break; |
|
|
|
case MUSIC_AUDIO_QOA: n">qoaplay_rewind((qoaplay_desc *)music.ctxData); break; |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_FLAC) |
|
|
|
case MUSIC_AUDIO_FLAC: drflac__seek_to_first_frame((drflac *)music.ctxData); break; |
|
|
@ -1856,7 +1846,7 @@ void SeekMusicStream(Music music, float position) |
|
|
|
case MUSIC_AUDIO_MP3: drmp3_seek_to_pcm_frame((drmp3 *)music.ctxData, positionInFrames); break; |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_QOA) |
|
|
|
case MUSIC_AUDIO_QOA: cm">/*TODO: Seek to specific QOA frame*/ break; |
|
|
|
case MUSIC_AUDIO_QOA: n">qoaplay_seek_frame((qoaplay_desc *)music.ctxData, positionInFrames); break; |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_FLAC) |
|
|
|
case MUSIC_AUDIO_FLAC: drflac_seek_to_pcm_frame((drflac *)music.ctxData, positionInFrames); break; |
|
|
@ -1892,11 +1882,13 @@ void UpdateMusicStream(Music music) |
|
|
|
|
|
|
|
unsigned int framesLeft = music.frameCount - music.stream.buffer->framesProcessed; // Frames left to be processed |
|
|
|
unsigned int framesToStream = 0; // Total frames to be streamed |
|
|
|
|
|
|
|
if ((framesLeft >= subBufferSizeInFrames) || music.looping) framesToStream = subBufferSizeInFrames; |
|
|
|
else framesToStream = framesLeft; |
|
|
|
|
|
|
|
int frameCountStillNeeded = framesToStream; |
|
|
|
int frameCountRedTotal = 0; |
|
|
|
int frameCountReadTotal = 0; |
|
|
|
|
|
|
|
switch (music.ctxType) |
|
|
|
{ |
|
|
|
#if defined(SUPPORT_FILEFORMAT_WAV) |
|
|
@ -1906,8 +1898,8 @@ void UpdateMusicStream(Music music) |
|
|
|
{ |
|
|
|
while (true) |
|
|
|
{ |
|
|
|
int frameCountRed = (int)drwav_read_pcm_frames_s16((drwav *)music.ctxData, frameCountStillNeeded, (short *)((char *)AUDIO.System.pcmBuffer + frameCountRedTotal*frameSize)); |
|
|
|
frameCountRedTotal += frameCountRed; |
|
|
|
int frameCountRed = (int)drwav_read_pcm_frames_s16((drwav *)music.ctxData, frameCountStillNeeded, (short *)((char *)AUDIO.System.pcmBuffer + frameCountReadTotal*frameSize)); |
|
|
|
frameCountReadTotal += frameCountRed; |
|
|
|
frameCountStillNeeded -= frameCountRed; |
|
|
|
if (frameCountStillNeeded == 0) break; |
|
|
|
else drwav_seek_to_first_pcm_frame((drwav *)music.ctxData); |
|
|
@ -1917,8 +1909,8 @@ void UpdateMusicStream(Music music) |
|
|
|
{ |
|
|
|
while (true) |
|
|
|
{ |
|
|
|
int frameCountRed = (int)drwav_read_pcm_frames_f32((drwav *)music.ctxData, frameCountStillNeeded, (float *)((char *)AUDIO.System.pcmBuffer + frameCountRedTotal*frameSize)); |
|
|
|
frameCountRedTotal += frameCountRed; |
|
|
|
int frameCountRed = (int)drwav_read_pcm_frames_f32((drwav *)music.ctxData, frameCountStillNeeded, (float *)((char *)AUDIO.System.pcmBuffer + frameCountReadTotal*frameSize)); |
|
|
|
frameCountReadTotal += frameCountRed; |
|
|
|
frameCountStillNeeded -= frameCountRed; |
|
|
|
if (frameCountStillNeeded == 0) break; |
|
|
|
else drwav_seek_to_first_pcm_frame((drwav *)music.ctxData); |
|
|
@ -1931,8 +1923,8 @@ void UpdateMusicStream(Music music) |
|
|
|
{ |
|
|
|
while (true) |
|
|
|
{ |
|
|
|
int frameCountRed = stb_vorbis_get_samples_short_interleaved((stb_vorbis *)music.ctxData, music.stream.channels, (short *)((char *)AUDIO.System.pcmBuffer + frameCountRedTotal*frameSize), frameCountStillNeeded*music.stream.channels); |
|
|
|
frameCountRedTotal += frameCountRed; |
|
|
|
int frameCountRed = stb_vorbis_get_samples_short_interleaved((stb_vorbis *)music.ctxData, music.stream.channels, (short *)((char *)AUDIO.System.pcmBuffer + frameCountReadTotal*frameSize), frameCountStillNeeded*music.stream.channels); |
|
|
|
frameCountReadTotal += frameCountRed; |
|
|
|
frameCountStillNeeded -= frameCountRed; |
|
|
|
if (frameCountStillNeeded == 0) break; |
|
|
|
else stb_vorbis_seek_start((stb_vorbis *)music.ctxData); |
|
|
@ -1944,9 +1936,9 @@ void UpdateMusicStream(Music music) |
|
|
|
{ |
|
|
|
while (true) |
|
|
|
{ |
|
|
|
int frameCountRed = (int)drmp3_read_pcm_frames_f32((drmp3 *)music.ctxData, frameCountStillNeeded, (float *)((char *)AUDIO.System.pcmBuffer + frameCountRedTotal*frameSize)); |
|
|
|
frameCountRedTotal += frameCountRed; |
|
|
|
frameCountStillNeeded -= frameCountRed; |
|
|
|
int frameCountRead = (int)drmp3_read_pcm_frames_f32((drmp3 *)music.ctxData, frameCountStillNeeded, (float *)((char *)AUDIO.System.pcmBuffer + frameCountReadTotal*frameSize)); |
|
|
|
frameCountReadTotal += frameCountRead; |
|
|
|
frameCountStillNeeded -= frameCountRead; |
|
|
|
if (frameCountStillNeeded == 0) break; |
|
|
|
else drmp3_seek_to_start_of_stream((drmp3 *)music.ctxData); |
|
|
|
} |
|
|
@ -1955,7 +1947,18 @@ void UpdateMusicStream(Music music) |
|
|
|
#if defined(SUPPORT_FILEFORMAT_QOA) |
|
|
|
case MUSIC_AUDIO_QOA: |
|
|
|
{ |
|
|
|
// TODO: Read QOA required framecount to fill buffer to keep music playing |
|
|
|
unsigned int frameCountRead = qoaplay_decode((qoaplay_desc *)music.ctxData, (float *)AUDIO.System.pcmBuffer, framesToStream); |
|
|
|
frameCountReadTotal += frameCountRead; |
|
|
|
/* |
|
|
|
while (true) |
|
|
|
{ |
|
|
|
int frameCountRead = (int)qoaplay_decode((qoaplay_desc *)music.ctxData, (float *)((char *)AUDIO.System.pcmBuffer + frameCountReadTotal*frameSize), frameCountStillNeeded); |
|
|
|
frameCountReadTotal += frameCountRead; |
|
|
|
frameCountStillNeeded -= frameCountRead; |
|
|
|
if (frameCountStillNeeded == 0) break; |
|
|
|
else qoaplay_rewind((qoaplay_desc *)music.ctxData); |
|
|
|
} |
|
|
|
*/ |
|
|
|
} break; |
|
|
|
#endif |
|
|
|
#if defined(SUPPORT_FILEFORMAT_FLAC) |
|
|
@ -1964,8 +1967,8 @@ void UpdateMusicStream(Music music) |
|
|
|
while (true) |
|
|
|
{ |
|
|
|
int frameCountRed = drflac_read_pcm_frames_s16((drflac *)music.ctxData, frameCountStillNeeded, (short *)((char *)AUDIO.System.pcmBuffer + frameCountRedTotal*frameSize)); |
|
|
|
frameCountRedTotal += frameCountRed; |
|
|
|
frameCountStillNeeded -= frameCountRed; |
|
|
|
frameCountReadTotal += frameCountRead; |
|
|
|
frameCountStillNeeded -= frameCountRead; |
|
|
|
if (frameCountStillNeeded == 0) break; |
|
|
|
else drflac__seek_to_first_frame((drflac *)music.ctxData); |
|
|
|
} |
|
|
@ -1978,7 +1981,6 @@ void UpdateMusicStream(Music music) |
|
|
|
if (AUDIO_DEVICE_FORMAT == ma_format_f32) jar_xm_generate_samples((jar_xm_context_t *)music.ctxData, (float *)AUDIO.System.pcmBuffer, framesToStream); |
|
|
|
else if (AUDIO_DEVICE_FORMAT == ma_format_s16) jar_xm_generate_samples_16bit((jar_xm_context_t *)music.ctxData, (short *)AUDIO.System.pcmBuffer, framesToStream); |
|
|
|
else if (AUDIO_DEVICE_FORMAT == ma_format_u8) jar_xm_generate_samples_8bit((jar_xm_context_t *)music.ctxData, (char *)AUDIO.System.pcmBuffer, framesToStream); |
|
|
|
|
|
|
|
//jar_xm_reset((jar_xm_context_t *)music.ctxData); |
|
|
|
|
|
|
|
} break; |
|
|
@ -1988,7 +1990,6 @@ void UpdateMusicStream(Music music) |
|
|
|
{ |
|
|
|
// NOTE: 3rd parameter (nbsample) specify the number of stereo 16bits samples you want, so sampleCount/2 |
|
|
|
jar_mod_fillbuffer((jar_mod_context_t *)music.ctxData, (short *)AUDIO.System.pcmBuffer, framesToStream, 0); |
|
|
|
|
|
|
|
//jar_mod_seek_start((jar_mod_context_t *)music.ctxData); |
|
|
|
|
|
|
|
} break; |
|
|
@ -2056,7 +2057,7 @@ float GetMusicTimePlayed(Music music) |
|
|
|
float secondsPlayed = 0.0f; |
|
|
|
if (music.stream.buffer != NULL) |
|
|
|
{ |
|
|
|
#if defined(SUPPORT_FILEFORMAT_XM) |
|
|
|
#if defined(SUPPORT_FILEFORMAT_XM) |
|
|
|
if (music.ctxType == MUSIC_MODULE_XM) |
|
|
|
{ |
|
|
|
uint64_t framesPlayed = 0; |
|
|
@ -2065,7 +2066,7 @@ float GetMusicTimePlayed(Music music) |
|
|
|
secondsPlayed = (float)framesPlayed/music.stream.sampleRate; |
|
|
|
} |
|
|
|
else |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
{ |
|
|
|
//ma_uint32 frameSizeInBytes = ma_get_bytes_per_sample(music.stream.buffer->dsp.formatConverterIn.config.formatIn)*music.stream.buffer->dsp.formatConverterIn.config.channels; |
|
|
|
int framesProcessed = (int)music.stream.buffer->framesProcessed; |
|
|
@ -2115,7 +2116,7 @@ AudioStream LoadAudioStream(unsigned int sampleRate, unsigned int sampleSize, un |
|
|
|
} |
|
|
|
|
|
|
|
// Checks if an audio stream is ready |
|
|
|
RLAPI bool IsAudioStreamReady(AudioStream stream) |
|
|
|
bool IsAudioStreamReady(AudioStream stream) |
|
|
|
{ |
|
|
|
return ((stream.buffer != NULL) && // Validate stream buffer |
|
|
|
(stream.sampleRate > 0) && // Validate sample rate is supported |
|
|
@ -2277,6 +2278,7 @@ void AttachAudioStreamProcessor(AudioStream stream, AudioCallback process) |
|
|
|
ma_mutex_unlock(&AUDIO.System.lock); |
|
|
|
} |
|
|
|
|
|
|
|
// Remove processor from audio stream |
|
|
|
void DetachAudioStreamProcessor(AudioStream stream, AudioCallback process) |
|
|
|
{ |
|
|
|
ma_mutex_lock(&AUDIO.System.lock); |
|
|
@ -2304,9 +2306,8 @@ void DetachAudioStreamProcessor(AudioStream stream, AudioCallback process) |
|
|
|
} |
|
|
|
|
|
|
|
// Add processor to audio pipeline. Order of processors is important |
|
|
|
// Works the same way as {Attach,Detach}AudioStreamProcessor functions, except |
|
|
|
// these two work on the already mixed output just before sending it to the |
|
|
|
// sound hardware. |
|
|
|
// Works the same way as {Attach,Detach}AudioStreamProcessor() functions, except |
|
|
|
// these two work on the already mixed output just before sending it to the sound hardware |
|
|
|
void AttachAudioMixedProcessor(AudioCallback process) |
|
|
|
{ |
|
|
|
ma_mutex_lock(&AUDIO.System.lock); |
|
|
@ -2330,6 +2331,7 @@ void AttachAudioMixedProcessor(AudioCallback process) |
|
|
|
ma_mutex_unlock(&AUDIO.System.lock); |
|
|
|
} |
|
|
|
|
|
|
|
// Remove processor from audio pipeline |
|
|
|
void DetachAudioMixedProcessor(AudioCallback process) |
|
|
|
{ |
|
|
|
ma_mutex_lock(&AUDIO.System.lock); |
|
|
@ -2508,7 +2510,6 @@ static ma_uint32 ReadAudioBufferFramesInMixingFormat(AudioBuffer *audioBuffer, f |
|
|
|
return totalOutputFramesProcessed; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Sending audio data to device callback function |
|
|
|
// This function will be called when miniaudio needs more data |
|
|
|
// NOTE: All the mixing takes place here |
|
|
|