瀏覽代碼

Removed OpenAL backend

pull/669/head
Ray 6 年之前
父節點
當前提交
c2aa1fed7b
共有 6 個檔案被更改,包括 12 行新增642 行删除
  1. +0
    -1
      .travis.yml
  2. +2
    -7
      src/CMakeLists.txt
  3. +0
    -5
      src/CMakeOptions.txt
  4. +1
    -23
      src/Makefile
  5. +9
    -604
      src/audio.c
  6. +0
    -2
      src/config.h.in

+ 0
- 1
.travis.yml 查看文件

@ -120,7 +120,6 @@ script:
-DBUILD_EXAMPLES=ON -DBUILD_GAMES=ON
-DUSE_EXTERNAL_GLFW=$USE_EXTERNAL_GLFW
-DUSE_WAYLAND=$WAYLAND
-DUSE_OPENAL_BACKEND=$OPENAL
-DINCLUDE_EVERYTHING=ON
..
- $RUNNER make VERBOSE=1

+ 2
- 7
src/CMakeLists.txt 查看文件

@ -45,13 +45,8 @@ endif()
add_definitions("-DRAYLIB_CMAKE=1")
if(USE_AUDIO)
if (NOT USE_OPENAL_BACKEND)
file(GLOB mini_al external/mini_al.c)
MESSAGE(STATUS "Audio Backend: mini_al")
else()
find_package(OpenAL REQUIRED)
MESSAGE(STATUS "Audio Backend: OpenAL")
endif()
file(GLOB mini_al external/mini_al.c)
MESSAGE(STATUS "Audio Backend: mini_al")
file(GLOB stb_vorbis external/stb_vorbis.c)
set(sources ${raylib_sources} ${mini_al} ${stb_vorbis})
else()

+ 0
- 5
src/CMakeOptions.txt 查看文件

@ -12,11 +12,6 @@ option(SHARED "Build raylib as a dynamic library" OFF)
option(STATIC "Build raylib as a static library" ON)
option(MACOS_FATLIB "Build fat library for both i386 and x86_64 on macOS" OFF)
option(USE_AUDIO "Build raylib with audio module" ON)
if(${PLATFORM} MATCHES "Web")
cmake_dependent_option(USE_OPENAL_BACKEND "Link raylib with openAL instead of mini-al" ON "USE_AUDIO" OFF)
else()
cmake_dependent_option(USE_OPENAL_BACKEND "Link raylib with openAL instead of mini-al" OFF "USE_AUDIO" OFF)
endif()
enum_option(USE_EXTERNAL_GLFW "OFF;IF_POSSIBLE;ON" "Link raylib against system GLFW instead of embedded one")
if(UNIX AND NOT APPLE)

+ 1
- 23
src/Makefile 查看文件

@ -63,14 +63,6 @@ RAYLIB_BUILD_MODE ?= RELEASE
# NOTE: Some programs like tools could not require audio support
INCLUDE_AUDIO_MODULE ?= TRUE
# Use OpenAL Soft backend for audio
USE_OPENAL_BACKEND ?= FALSE
# OpenAL Soft audio backend forced on HTML5 and OSX (see below)
ifeq ($(PLATFORM),PLATFORM_WEB)
USE_OPENAL_BACKEND = TRUE
endif
# Use external GLFW library instead of rglfw module
# TODO: Review usage of examples on Linux.
USE_EXTERNAL_GLFW ?= FALSE
@ -154,13 +146,6 @@ endif
# RAYLIB_PATH ?= /home/pi/raylib
#endif
# Force OpenAL Soft audio backend for OSX platform
# NOTE 1: mini_al library does not support CoreAudio yet
# NOTE 2: Required OpenAL libraries should be available on OSX
ifeq ($(PLATFORM_OS),OSX)
USE_OPENAL_BACKEND = TRUE
endif
ifeq ($(PLATFORM),PLATFORM_WEB)
# Emscripten required variables
EMSDK_PATH = C:/emsdk
@ -343,11 +328,6 @@ ifeq ($(RAYLIB_LIBTYPE),SHARED)
CFLAGS += -fPIC -DBUILD_LIBTYPE_SHARED
endif
# Use OpenAL Soft backend instead of mini_al
ifeq ($(USE_OPENAL_BACKEND),TRUE)
CFLAGS += -DUSE_OPENAL_BACKEND
endif
# Use Wayland display on Linux desktop
ifeq ($(PLATFORM),PLATFORM_DESKTOP)
ifeq ($(PLATFORM_OS), LINUX)
@ -426,9 +406,7 @@ endif
ifeq ($(INCLUDE_AUDIO_MODULE),TRUE)
OBJS += audio.o
OBJS += stb_vorbis.o
ifeq ($(USE_OPENAL_BACKEND),FALSE)
OBJS += mini_al.o
endif
OBJS += mini_al.o
endif
ifeq ($(PLATFORM),PLATFORM_ANDROID)

+ 9
- 604
src/audio.c 查看文件

@ -16,9 +16,6 @@
* Define to use the module as standalone library (independently of raylib).
* Required types and functions are defined in the same module.
*
* #define USE_OPENAL_BACKEND
* Use OpenAL Soft audio backend
*
* #define SUPPORT_FILEFORMAT_WAV
* #define SUPPORT_FILEFORMAT_OGG
* #define SUPPORT_FILEFORMAT_XM
@ -82,25 +79,9 @@
#include "utils.h" // Required for: fopen() Android mapping
#endif
#if !defined(USE_OPENAL_BACKEND)
#define USE_MINI_AL 1 // Set to 1 to use mini_al; 0 to use OpenAL.
#endif
#include "external/mini_al.h" // Implemented in mini_al.c. Cannot implement this here because it conflicts with Win32 APIs such as CloseWindow(), etc.
#if !defined(USE_MINI_AL) || (USE_MINI_AL == 0)
#if defined(__APPLE__)
#include "OpenAL/al.h" // OpenAL basic header
#include "OpenAL/alc.h" // OpenAL context header (like OpenGL, OpenAL requires a context to work)
#else
#include "AL/al.h" // OpenAL basic header
#include "AL/alc.h" // OpenAL context header (like OpenGL, OpenAL requires a context to work)
//#include "AL/alext.h" // OpenAL extensions header, required for AL_EXT_FLOAT32 and AL_EXT_MCFORMATS
#endif
// OpenAL extension: AL_EXT_FLOAT32 - Support for 32bit float samples
// OpenAL extension: AL_EXT_MCFORMATS - Support for multi-channel formats (Quad, 5.1, 6.1, 7.1)
#endif
#include "external/mini_al.h" // mini_al audio library
// NOTE: Cannot be implement here because it conflicts with
// Win32 APIs: Rectangle, CloseWindow(), ShowCursor(), PlaySoundA()
#include <stdlib.h> // Required for: malloc(), free()
#include <string.h> // Required for: strcmp(), strncmp()
@ -147,15 +128,6 @@
// In case of music-stalls, just increase this number
#define AUDIO_BUFFER_SIZE 4096 // PCM data samples (i.e. 16bit, Mono: 8Kb)
// Support uncompressed PCM data in 32-bit float IEEE format
// NOTE: This definition is included in "AL/alext.h", but some OpenAL implementations
// could not provide the extensions header (Android), so its defined here
#if !defined(AL_EXT_float32)
#define AL_EXT_float32 1
#define AL_FORMAT_MONO_FLOAT32 0x10010
#define AL_FORMAT_STEREO_FLOAT32 0x10011
#endif
//----------------------------------------------------------------------------------
// Types and Structures Definition
//----------------------------------------------------------------------------------
@ -233,8 +205,6 @@ void TraceLog(int msgType, const char *text, ...); // Show trace lo
//----------------------------------------------------------------------------------
// mini_al AudioBuffer Functionality
//----------------------------------------------------------------------------------
#if USE_MINI_AL
#define DEVICE_FORMAT mal_format_f32
#define DEVICE_CHANNELS 2
#define DEVICE_SAMPLE_RATE 44100
@ -487,7 +457,6 @@ static void MixAudioFrames(float *framesOut, const float *framesIn, mal_uint32 f
}
}
}
#endif
//----------------------------------------------------------------------------------
// Module Functions Definition - Audio Device initialization and Closing
@ -495,7 +464,6 @@ static void MixAudioFrames(float *framesOut, const float *framesIn, mal_uint32 f
// Initialize audio device
void InitAudioDevice(void)
{
#if USE_MINI_AL
// Context.
mal_context_config contextConfig = mal_context_config_init(OnLog);
mal_result result = mal_context_init(NULL, 0, &contextConfig, &context);
@ -545,45 +513,11 @@ void InitAudioDevice(void)
TraceLog(LOG_INFO, "Audio buffer size: %d", device.bufferSizeInFrames);
isAudioInitialized = MAL_TRUE;
#else
// Open and initialize a device with default settings
ALCdevice *device = alcOpenDevice(NULL);
if (!device) TraceLog(LOG_ERROR, "Audio device could not be opened");
else
{
ALCcontext *context = alcCreateContext(device, NULL);
if ((context == NULL) || (alcMakeContextCurrent(context) == ALC_FALSE))
{
if (context != NULL) alcDestroyContext(context);
alcCloseDevice(device);
TraceLog(LOG_ERROR, "Could not initialize audio context");
}
else
{
TraceLog(LOG_INFO, "Audio device and context initialized successfully: %s", alcGetString(device, ALC_DEVICE_SPECIFIER));
// Listener definition (just for 2D)
alListener3f(AL_POSITION, 0.0f, 0.0f, 0.0f);
alListener3f(AL_VELOCITY, 0.0f, 0.0f, 0.0f);
alListener3f(AL_ORIENTATION, 0.0f, 0.0f, -1.0f);
alListenerf(AL_GAIN, 1.0f);
if (alIsExtensionPresent("AL_EXT_float32")) TraceLog(LOG_INFO, "[EXTENSION] AL_EXT_float32 supported");
else TraceLog(LOG_INFO, "[EXTENSION] AL_EXT_float32 not supported");
}
}
#endif
}
// Close the audio device for all contexts
void CloseAudioDevice(void)
{
#if USE_MINI_AL
if (!isAudioInitialized)
{
TraceLog(LOG_WARNING, "Could not close audio device because it is not currently initialized");
@ -593,18 +527,6 @@ void CloseAudioDevice(void)
mal_mutex_uninit(&audioLock);
mal_device_uninit(&device);
mal_context_uninit(&context);
#else
ALCdevice *device;
ALCcontext *context = alcGetCurrentContext();
if (context == NULL) TraceLog(LOG_WARNING, "Could not get current audio context for closing");
device = alcGetContextsDevice(context);
alcMakeContextCurrent(NULL);
alcDestroyContext(context);
alcCloseDevice(device);
#endif
TraceLog(LOG_INFO, "Audio device closed successfully");
}
@ -612,20 +534,7 @@ void CloseAudioDevice(void)
// Check if device has been initialized successfully
bool IsAudioDeviceReady(void)
{
#if USE_MINI_AL
return isAudioInitialized;
#else
ALCcontext *context = alcGetCurrentContext();
if (context == NULL) return false;
else
{
ALCdevice *device = alcGetContextsDevice(context);
if (device == NULL) return false;
else return true;
}
#endif
}
// Set master volume (listener)
@ -634,17 +543,13 @@ void SetMasterVolume(float volume)
if (volume < 0.0f) volume = 0.0f;
else if (volume > 1.0f) volume = 1.0f;
#if USE_MINI_AL
masterVolume = volume;
#else
alListenerf(AL_GAIN, volume);
#endif
}
//----------------------------------------------------------------------------------
// Module Functions Definition - Audio Buffer management
//----------------------------------------------------------------------------------
#if USE_MINI_AL
// Create a new audio buffer. Initially filled with silence
AudioBuffer *CreateAudioBuffer(mal_format format, mal_uint32 channels, mal_uint32 sampleRate, mal_uint32 bufferSizeInFrames, AudioBufferUsage usage)
{
@ -843,7 +748,6 @@ void UntrackAudioBuffer(AudioBuffer *audioBuffer)
mal_mutex_unlock(&audioLock);
}
#endif
//----------------------------------------------------------------------------------
// Module Functions Definition - Sounds loading and playing (.WAV)
@ -909,7 +813,6 @@ Sound LoadSoundFromWave(Wave wave)
if (wave.data != NULL)
{
#if USE_MINI_AL
// When using mini_al we need to do our own mixing. To simplify this we need convert the format of each sound to be consistent with
// the format used to open the playback device. We can do this two ways:
//
@ -931,61 +834,6 @@ Sound LoadSoundFromWave(Wave wave)
if (frameCount == 0) TraceLog(LOG_WARNING, "LoadSoundFromWave() : Format conversion failed");
sound.audioBuffer = audioBuffer;
#else
ALenum format = 0;
// The OpenAL format is worked out by looking at the number of channels and the sample size (bits per sample)
if (wave.channels == 1)
{
switch (wave.sampleSize)
{
case 8: format = AL_FORMAT_MONO8; break;
case 16: format = AL_FORMAT_MONO16; break;
case 32: format = AL_FORMAT_MONO_FLOAT32; break; // Requires OpenAL extension: AL_EXT_FLOAT32
default: TraceLog(LOG_WARNING, "Wave sample size not supported: %i", wave.sampleSize); break;
}
}
else if (wave.channels == 2)
{
switch (wave.sampleSize)
{
case 8: format = AL_FORMAT_STEREO8; break;
case 16: format = AL_FORMAT_STEREO16; break;
case 32: format = AL_FORMAT_STEREO_FLOAT32; break; // Requires OpenAL extension: AL_EXT_FLOAT32
default: TraceLog(LOG_WARNING, "Wave sample size not supported: %i", wave.sampleSize); break;
}
}
else TraceLog(LOG_WARNING, "Wave number of channels not supported: %i", wave.channels);
// Create an audio source
ALuint source;
alGenSources(1, &source); // Generate pointer to audio source
alSourcef(source, AL_PITCH, 1.0f);
alSourcef(source, AL_GAIN, 1.0f);
alSource3f(source, AL_POSITION, 0.0f, 0.0f, 0.0f);
alSource3f(source, AL_VELOCITY, 0.0f, 0.0f, 0.0f);
alSourcei(source, AL_LOOPING, AL_FALSE);
// Convert loaded data to OpenAL buffer
//----------------------------------------
ALuint buffer;
alGenBuffers(1, &buffer); // Generate pointer to buffer
unsigned int dataSize = wave.sampleCount*wave.channels*wave.sampleSize/8; // Size in bytes
// Upload sound data to buffer
alBufferData(buffer, format, wave.data, dataSize, wave.sampleRate);
// Attach sound buffer to source
alSourcei(source, AL_BUFFER, buffer);
TraceLog(LOG_INFO, "[SND ID %i][BUFR ID %i] Sound data loaded successfully (%i Hz, %i bit, %s)", source, buffer, wave.sampleRate, wave.sampleSize, (wave.channels == 1) ? "Mono" : "Stereo");
sound.source = source;
sound.buffer = buffer;
sound.format = format;
#endif
}
return sound;
@ -1002,14 +850,7 @@ void UnloadWave(Wave wave)
// Unload sound
void UnloadSound(Sound sound)
{
#if USE_MINI_AL
DeleteAudioBuffer((AudioBuffer *)sound.audioBuffer);
#else
alSourceStop(sound.source);
alDeleteSources(1, &sound.source);
alDeleteBuffers(1, &sound.buffer);
#endif
TraceLog(LOG_INFO, "[SND ID %i][BUFR ID %i] Unloaded sound data from RAM", sound.source, sound.buffer);
}
@ -1018,8 +859,8 @@ void UnloadSound(Sound sound)
// NOTE: data must match sound.format
void UpdateSound(Sound sound, const void *data, int samplesCount)
{
#if USE_MINI_AL
AudioBuffer *audioBuffer = (AudioBuffer *)sound.audioBuffer;
if (audioBuffer == NULL)
{
TraceLog(LOG_ERROR, "UpdateSound() : Invalid sound - no audio buffer");
@ -1030,29 +871,6 @@ void UpdateSound(Sound sound, const void *data, int samplesCount)
// TODO: May want to lock/unlock this since this data buffer is read at mixing time.
memcpy(audioBuffer->buffer, data, samplesCount*audioBuffer->dsp.formatConverterIn.config.channels*mal_get_bytes_per_sample(audioBuffer->dsp.formatConverterIn.config.formatIn));
#else
ALint sampleRate, sampleSize, channels;
alGetBufferi(sound.buffer, AL_FREQUENCY, &sampleRate);
alGetBufferi(sound.buffer, AL_BITS, &sampleSize); // It could also be retrieved from sound.format
alGetBufferi(sound.buffer, AL_CHANNELS, &channels); // It could also be retrieved from sound.format
TraceLog(LOG_DEBUG, "UpdateSound() : AL_FREQUENCY: %i", sampleRate);
TraceLog(LOG_DEBUG, "UpdateSound() : AL_BITS: %i", sampleSize);
TraceLog(LOG_DEBUG, "UpdateSound() : AL_CHANNELS: %i", channels);
unsigned int dataSize = samplesCount*channels*sampleSize/8; // Size of data in bytes
alSourceStop(sound.source); // Stop sound
alSourcei(sound.source, AL_BUFFER, 0); // Unbind buffer from sound to update
//alDeleteBuffers(1, &sound.buffer); // Delete current buffer data
//alGenBuffers(1, &sound.buffer); // Generate new buffer
// Upload new data to sound buffer
alBufferData(sound.buffer, sound.format, data, dataSize, sampleRate);
// Attach sound buffer to source again
alSourcei(sound.source, AL_BUFFER, sound.buffer);
#endif
}
// Export wave data to file
@ -1141,102 +959,48 @@ void ExportWave(Wave wave, const char *fileName)
// Play a sound
void PlaySound(Sound sound)
{
#if USE_MINI_AL
PlayAudioBuffer((AudioBuffer *)sound.audioBuffer);
#else
alSourcePlay(sound.source); // Play the sound
#endif
//TraceLog(LOG_INFO, "Playing sound");
// Find the current position of the sound being played
// NOTE: Only work when the entire file is in a single buffer
//int byteOffset;
//alGetSourcei(sound.source, AL_BYTE_OFFSET, &byteOffset);
//
//int sampleRate;
//alGetBufferi(sound.buffer, AL_FREQUENCY, &sampleRate); // AL_CHANNELS, AL_BITS (bps)
//float seconds = (float)byteOffset/sampleRate; // Number of seconds since the beginning of the sound
//or
//float result;
//alGetSourcef(sound.source, AL_SEC_OFFSET, &result); // AL_SAMPLE_OFFSET
}
// Pause a sound
void PauseSound(Sound sound)
{
#if USE_MINI_AL
PauseAudioBuffer((AudioBuffer *)sound.audioBuffer);
#else
alSourcePause(sound.source);
#endif
}
// Resume a paused sound
void ResumeSound(Sound sound)
{
#if USE_MINI_AL
ResumeAudioBuffer((AudioBuffer *)sound.audioBuffer);
#else
ALenum state;
alGetSourcei(sound.source, AL_SOURCE_STATE, &state);
if (state == AL_PAUSED) alSourcePlay(sound.source);
#endif
}
// Stop reproducing a sound
void StopSound(Sound sound)
{
#if USE_MINI_AL
StopAudioBuffer((AudioBuffer *)sound.audioBuffer);
#else
alSourceStop(sound.source);
#endif
}
// Check if a sound is playing
bool IsSoundPlaying(Sound sound)
{
#if USE_MINI_AL
return IsAudioBufferPlaying((AudioBuffer *)sound.audioBuffer);
#else
bool playing = false;
ALint state;
alGetSourcei(sound.source, AL_SOURCE_STATE, &state);
if (state == AL_PLAYING) playing = true;
return playing;
#endif
}
// Set volume for a sound
void SetSoundVolume(Sound sound, float volume)
{
#if USE_MINI_AL
SetAudioBufferVolume((AudioBuffer *)sound.audioBuffer, volume);
#else
alSourcef(sound.source, AL_GAIN, volume);
#endif
}
// Set pitch for a sound
void SetSoundPitch(Sound sound, float pitch)
{
#if USE_MINI_AL
SetAudioBufferPitch((AudioBuffer *)sound.audioBuffer, pitch);
#else
alSourcef(sound.source, AL_PITCH, pitch);
#endif
}
// Convert wave data to desired format
void WaveFormat(Wave *wave, int sampleRate, int sampleSize, int channels)
{
#if USE_MINI_AL
mal_format formatIn = ((wave->sampleSize == 8) ? mal_format_u8 : ((wave->sampleSize == 16) ? mal_format_s16 : mal_format_f32));
mal_format formatOut = (( sampleSize == 8) ? mal_format_u8 : (( sampleSize == 16) ? mal_format_s16 : mal_format_f32));
@ -1264,87 +1028,6 @@ void WaveFormat(Wave *wave, int sampleRate, int sampleSize, int channels)
wave->channels = channels;
free(wave->data);
wave->data = data;
#else
// Format sample rate
// NOTE: Only supported 22050 <--> 44100
if (wave->sampleRate != sampleRate)
{
// TODO: Resample wave data (upsampling or downsampling)
// NOTE 1: To downsample, you have to drop samples or average them.
// NOTE 2: To upsample, you have to interpolate new samples.
wave->sampleRate = sampleRate;
}
// Format sample size
// NOTE: Only supported 8 bit <--> 16 bit <--> 32 bit
if (wave->sampleSize != sampleSize)
{
void *data = malloc(wave->sampleCount*wave->channels*sampleSize/8);
for (int i = 0; i < wave->sampleCount; i++)
{
for (int j = 0; j < wave->channels; j++)
{
if (sampleSize == 8)
{
if (wave->sampleSize == 16) ((unsigned char *)data)[wave->channels*i + j] = (unsigned char)(((float)(((short *)wave->data)[wave->channels*i + j])/32767.0f)*256);
else if (wave->sampleSize == 32) ((unsigned char *)data)[wave->channels*i + j] = (unsigned char)(((float *)wave->data)[wave->channels*i + j]*127.0f + 127);
}
else if (sampleSize == 16)
{
if (wave->sampleSize == 8) ((short *)data)[wave->channels*i + j] = (short)(((float)(((unsigned char *)wave->data)[wave->channels*i + j] - 127)/256.0f)*32767);
else if (wave->sampleSize == 32) ((short *)data)[wave->channels*i + j] = (short)((((float *)wave->data)[wave->channels*i + j])*32767);
}
else if (sampleSize == 32)
{
if (wave->sampleSize == 8) ((float *)data)[wave->channels*i + j] = (float)(((unsigned char *)wave->data)[wave->channels*i + j] - 127)/256.0f;
else if (wave->sampleSize == 16) ((float *)data)[wave->channels*i + j] = (float)(((short *)wave->data)[wave->channels*i + j])/32767.0f;
}
}
}
wave->sampleSize = sampleSize;
free(wave->data);
wave->data = data;
}
// Format channels (interlaced mode)
// NOTE: Only supported mono <--> stereo
if (wave->channels != channels)
{
void *data = malloc(wave->sampleCount*wave->sampleSize/8*channels);
if ((wave->channels == 1) && (channels == 2)) // mono ---> stereo (duplicate mono information)
{
for (int i = 0; i < wave->sampleCount; i++)
{
for (int j = 0; j < channels; j++)
{
if (wave->sampleSize == 8) ((unsigned char *)data)[channels*i + j] = ((unsigned char *)wave->data)[i];
else if (wave->sampleSize == 16) ((short *)data)[channels*i + j] = ((short *)wave->data)[i];
else if (wave->sampleSize == 32) ((float *)data)[channels*i + j] = ((float *)wave->data)[i];
}
}
}
else if ((wave->channels == 2) && (channels == 1)) // stereo ---> mono (mix stereo channels)
{
for (int i = 0, j = 0; i < wave->sampleCount; i++, j += 2)
{
if (wave->sampleSize == 8) ((unsigned char *)data)[i] = (((unsigned char *)wave->data)[j] + ((unsigned char *)wave->data)[j + 1])/2;
else if (wave->sampleSize == 16) ((short *)data)[i] = (((short *)wave->data)[j] + ((short *)wave->data)[j + 1])/2;
else if (wave->sampleSize == 32) ((float *)data)[i] = (((float *)wave->data)[j] + ((float *)wave->data)[j + 1])/2.0f;
}
}
// TODO: Add/remove additional interlaced channels
wave->channels = channels;
free(wave->data);
wave->data = data;
}
#endif
}
// Copy a wave to a new wave
@ -1578,8 +1261,8 @@ void UnloadMusicStream(Music music)
// Start music playing (open stream)
void PlayMusicStream(Music music)
{
#if USE_MINI_AL
AudioBuffer *audioBuffer = (AudioBuffer *)music->stream.audioBuffer;
if (audioBuffer == NULL)
{
TraceLog(LOG_ERROR, "PlayMusicStream() : No audio buffer");
@ -1595,61 +1278,25 @@ void PlayMusicStream(Music music)
PlayAudioStream(music->stream); // <-- This resets the cursor position.
audioBuffer->frameCursorPos = frameCursorPos;
#else
alSourcePlay(music->stream.source);
#endif
}
// Pause music playing
void PauseMusicStream(Music music)
{
#if USE_MINI_AL
PauseAudioStream(music->stream);
#else
alSourcePause(music->stream.source);
#endif
}
// Resume music playing
void ResumeMusicStream(Music music)
{
#if USE_MINI_AL
ResumeAudioStream(music->stream);
#else
ALenum state;
alGetSourcei(music->stream.source, AL_SOURCE_STATE, &state);
if (state == AL_PAUSED)
{
TraceLog(LOG_INFO, "[AUD ID %i] Resume music stream playing", music->stream.source);
alSourcePlay(music->stream.source);
}
#endif
}
// Stop music playing (close stream)
// TODO: To clear a buffer, make sure they have been already processed!
void StopMusicStream(Music music)
{
#if USE_MINI_AL
StopAudioStream(music->stream);
#else
alSourceStop(music->stream.source);
/*
// Clear stream buffers
// WARNING: Queued buffers must have been processed before unqueueing and reloaded with data!!!
void *pcm = calloc(AUDIO_BUFFER_SIZE*music->stream.sampleSize/8*music->stream.channels, 1);
for (int i = 0; i < MAX_STREAM_BUFFERS; i++)
{
//UpdateAudioStream(music->stream, pcm, AUDIO_BUFFER_SIZE); // Update one buffer at a time
alBufferData(music->stream.buffers[i], music->stream.format, pcm, AUDIO_BUFFER_SIZE*music->stream.sampleSize/8*music->stream.channels, music->stream.sampleRate);
}
free(pcm);
*/
#endif
// Restart music context
switch (music->ctxType)
@ -1677,7 +1324,6 @@ void StopMusicStream(Music music)
// TODO: Make sure buffers are ready for update... check music state
void UpdateMusicStream(Music music)
{
#if USE_MINI_AL
bool streamEnding = false;
unsigned int subBufferSizeInFrames = ((AudioBuffer *)music->stream.audioBuffer)->bufferSizeInFrames/2;
@ -1761,139 +1407,24 @@ void UpdateMusicStream(Music music)
// just make sure to play again on window restore
if (IsMusicPlaying(music)) PlayMusicStream(music);
}
#else
ALenum state;
ALint processed = 0;
alGetSourcei(music->stream.source, AL_SOURCE_STATE, &state); // Get music stream state
alGetSourcei(music->stream.source, AL_BUFFERS_PROCESSED, &processed); // Get processed buffers
if (processed > 0)
{
bool streamEnding = false;
// NOTE: Using dynamic allocation because it could require more than 16KB
void *pcm = calloc(AUDIO_BUFFER_SIZE*music->stream.sampleSize/8*music->stream.channels, 1);
int numBuffersToProcess = processed;
int samplesCount = 0; // Total size of data steamed in L+R samples for xm floats,
// individual L or R for ogg shorts
for (int i = 0; i < numBuffersToProcess; i++)
{
if (music->samplesLeft >= AUDIO_BUFFER_SIZE) samplesCount = AUDIO_BUFFER_SIZE;
else samplesCount = music->samplesLeft;
// TODO: Really don't like ctxType thingy...
switch (music->ctxType)
{
case MUSIC_AUDIO_OGG:
{
// NOTE: Returns the number of samples to process (be careful! we ask for number of shorts!)
int numSamplesOgg = stb_vorbis_get_samples_short_interleaved(music->ctxOgg, music->stream.channels, (short *)pcm, samplesCount*music->stream.channels);
} break;
#if defined(SUPPORT_FILEFORMAT_FLAC)
case MUSIC_AUDIO_FLAC:
{
// NOTE: Returns the number of samples to process
unsigned int numSamplesFlac = (unsigned int)drflac_read_s16(music->ctxFlac, samplesCount*music->stream.channels, (short *)pcm);
} break;
#endif
#if defined(SUPPORT_FILEFORMAT_MP3)
case MUSIC_AUDIO_MP3:
{
// NOTE: Returns the number of samples to process
unsigned int numSamplesMp3 = (unsigned int)drmp3_read_f32(&music->ctxMp3, samplesCount*music->stream.channels, (float *)pcm);
} break;
#endif
#if defined(SUPPORT_FILEFORMAT_XM)
case MUSIC_MODULE_XM: jar_xm_generate_samples_16bit(music->ctxXm, pcm, samplesCount); break;
#endif
#if defined(SUPPORT_FILEFORMAT_MOD)
case MUSIC_MODULE_MOD: jar_mod_fillbuffer(&music->ctxMod, pcm, samplesCount, 0); break;
#endif
default: break;
}
UpdateAudioStream(music->stream, pcm, samplesCount);
music->samplesLeft -= samplesCount;
if (music->samplesLeft <= 0)
{
streamEnding = true;
break;
}
}
// Free allocated pcm data
free(pcm);
// Reset audio stream for looping
if (streamEnding)
{
StopMusicStream(music); // Stop music (and reset)
// Decrease loopCount to stop when required
if (music->loopCount > 0)
{
music->loopCount--; // Decrease loop count
PlayMusicStream(music); // Play again
}
else
{
if (music->loopCount == -1)
{
PlayMusicStream(music);
}
}
}
else
{
// NOTE: In case window is minimized, music stream is stopped,
// just make sure to play again on window restore
if (state != AL_PLAYING) PlayMusicStream(music);
}
}
#endif
}
// Check if any music is playing
bool IsMusicPlaying(Music music)
{
#if USE_MINI_AL
return IsAudioStreamPlaying(music->stream);
#else
bool playing = false;
ALint state;
alGetSourcei(music->stream.source, AL_SOURCE_STATE, &state);
if (state == AL_PLAYING) playing = true;
return playing;
#endif
}
// Set volume for music
void SetMusicVolume(Music music, float volume)
{
#if USE_MINI_AL
SetAudioStreamVolume(music->stream, volume);
#else
alSourcef(music->stream.source, AL_GAIN, volume);
#endif
}
// Set pitch for music
void SetMusicPitch(Music music, float pitch)
{
#if USE_MINI_AL
SetAudioStreamPitch(music->stream, pitch);
#else
alSourcef(music->stream.source, AL_PITCH, pitch);
#endif
}
// Set music loop count (loop repeats)
@ -1939,8 +1470,6 @@ AudioStream InitAudioStream(unsigned int sampleRate, unsigned int sampleSize, un
stream.channels = 1; // Fallback to mono channel
}
#if USE_MINI_AL
mal_format formatIn = ((stream.sampleSize == 8) ? mal_format_u8 : ((stream.sampleSize == 16) ? mal_format_s16 : mal_format_f32));
// The size of a streaming buffer must be at least double the size of a period.
@ -1957,52 +1486,6 @@ AudioStream InitAudioStream(unsigned int sampleRate, unsigned int sampleSize, un
audioBuffer->looping = true; // Always loop for streaming buffers.
stream.audioBuffer = audioBuffer;
#else
// Setup OpenAL format
if (stream.channels == 1)
{
switch (sampleSize)
{
case 8: stream.format = AL_FORMAT_MONO8; break;
case 16: stream.format = AL_FORMAT_MONO16; break;
case 32: stream.format = AL_FORMAT_MONO_FLOAT32; break; // Requires OpenAL extension: AL_EXT_FLOAT32
default: TraceLog(LOG_WARNING, "Init audio stream: Sample size not supported: %i", sampleSize); break;
}
}
else if (stream.channels == 2)
{
switch (sampleSize)
{
case 8: stream.format = AL_FORMAT_STEREO8; break;
case 16: stream.format = AL_FORMAT_STEREO16; break;
case 32: stream.format = AL_FORMAT_STEREO_FLOAT32; break; // Requires OpenAL extension: AL_EXT_FLOAT32
default: TraceLog(LOG_WARNING, "Init audio stream: Sample size not supported: %i", sampleSize); break;
}
}
// Create an audio source
alGenSources(1, &stream.source);
alSourcef(stream.source, AL_PITCH, 1.0f);
alSourcef(stream.source, AL_GAIN, 1.0f);
alSource3f(stream.source, AL_POSITION, 0.0f, 0.0f, 0.0f);
alSource3f(stream.source, AL_VELOCITY, 0.0f, 0.0f, 0.0f);
// Create Buffers (double buffering)
alGenBuffers(MAX_STREAM_BUFFERS, stream.buffers);
// Initialize buffer with zeros by default
// NOTE: Using dynamic allocation because it requires more than 16KB
void *pcm = calloc(AUDIO_BUFFER_SIZE*stream.sampleSize/8*stream.channels, 1);
for (int i = 0; i < MAX_STREAM_BUFFERS; i++)
{
alBufferData(stream.buffers[i], stream.format, pcm, AUDIO_BUFFER_SIZE*stream.sampleSize/8*stream.channels, stream.sampleRate);
}
free(pcm);
alSourceQueueBuffers(stream.source, MAX_STREAM_BUFFERS, stream.buffers);
#endif
TraceLog(LOG_INFO, "[AUD ID %i] Audio stream loaded successfully (%i Hz, %i bit, %s)", stream.source, stream.sampleRate, stream.sampleSize, (stream.channels == 1) ? "Mono" : "Stereo");
@ -2012,28 +1495,7 @@ AudioStream InitAudioStream(unsigned int sampleRate, unsigned int sampleSize, un
// Close audio stream and free memory
void CloseAudioStream(AudioStream stream)
{
#if USE_MINI_AL
DeleteAudioBuffer((AudioBuffer *)stream.audioBuffer);
#else
// Stop playing channel
alSourceStop(stream.source);
// Flush out all queued buffers
int queued = 0;
alGetSourcei(stream.source, AL_BUFFERS_QUEUED, &queued);
ALuint buffer = 0;
while (queued > 0)
{
alSourceUnqueueBuffers(stream.source, 1, &buffer);
queued--;
}
// Delete source and buffers
alDeleteSources(1, &stream.source);
alDeleteBuffers(MAX_STREAM_BUFFERS, stream.buffers);
#endif
TraceLog(LOG_INFO, "[AUD ID %i] Unloaded audio stream data", stream.source);
}
@ -2043,7 +1505,6 @@ void CloseAudioStream(AudioStream stream)
// NOTE 2: To unqueue a buffer it needs to be processed: IsAudioBufferProcessed()
void UpdateAudioStream(AudioStream stream, const void *data, int samplesCount)
{
#if USE_MINI_AL
AudioBuffer *audioBuffer = (AudioBuffer *)stream.audioBuffer;
if (audioBuffer == NULL)
{
@ -2054,6 +1515,7 @@ void UpdateAudioStream(AudioStream stream, const void *data, int samplesCount)
if (audioBuffer->isSubBufferProcessed[0] || audioBuffer->isSubBufferProcessed[1])
{
mal_uint32 subBufferToUpdate;
if (audioBuffer->isSubBufferProcessed[0] && audioBuffer->isSubBufferProcessed[1])
{
// Both buffers are available for updating. Update the first one and make sure the cursor is moved back to the front.
@ -2073,6 +1535,7 @@ void UpdateAudioStream(AudioStream stream, const void *data, int samplesCount)
if (subBufferSizeInFrames >= (mal_uint32)samplesCount)
{
mal_uint32 framesToWrite = subBufferSizeInFrames;
if (framesToWrite > (mal_uint32)samplesCount) framesToWrite = (mal_uint32)samplesCount;
mal_uint32 bytesToWrite = framesToWrite*stream.channels*(stream.sampleSize/8);
@ -2080,6 +1543,7 @@ void UpdateAudioStream(AudioStream stream, const void *data, int samplesCount)
// Any leftover frames should be filled with zeros.
mal_uint32 leftoverFrameCount = subBufferSizeInFrames - framesToWrite;
if (leftoverFrameCount > 0)
{
memset(subBuffer + bytesToWrite, 0, leftoverFrameCount*stream.channels*(stream.sampleSize/8));
@ -2098,24 +1562,11 @@ void UpdateAudioStream(AudioStream stream, const void *data, int samplesCount)
TraceLog(LOG_ERROR, "Audio buffer not available for updating");
return;
}
#else
ALuint buffer = 0;
alSourceUnqueueBuffers(stream.source, 1, &buffer);
// Check if any buffer was available for unqueue
if (alGetError() != AL_INVALID_VALUE)
{
alBufferData(buffer, stream.format, data, samplesCount*stream.sampleSize/8*stream.channels, stream.sampleRate);
alSourceQueueBuffers(stream.source, 1, &buffer);
}
else TraceLog(LOG_WARNING, "[AUD ID %i] Audio buffer not available for unqueuing", stream.source);
#endif
}
// Check if any audio stream buffers requires refill
bool IsAudioBufferProcessed(AudioStream stream)
{
#if USE_MINI_AL
AudioBuffer *audioBuffer = (AudioBuffer *)stream.audioBuffer;
if (audioBuffer == NULL)
{
@ -2124,92 +1575,46 @@ bool IsAudioBufferProcessed(AudioStream stream)
}
return audioBuffer->isSubBufferProcessed[0] || audioBuffer->isSubBufferProcessed[1];
#else
ALint processed = 0;
// Determine if music stream is ready to be written
alGetSourcei(stream.source, AL_BUFFERS_PROCESSED, &processed);
return (processed > 0);
#endif
}
// Play audio stream
void PlayAudioStream(AudioStream stream)
{
#if USE_MINI_AL
PlayAudioBuffer((AudioBuffer *)stream.audioBuffer);
#else
alSourcePlay(stream.source);
#endif
}
// Play audio stream
void PauseAudioStream(AudioStream stream)
{
#if USE_MINI_AL
PauseAudioBuffer((AudioBuffer *)stream.audioBuffer);
#else
alSourcePause(stream.source);
#endif
}
// Resume audio stream playing
void ResumeAudioStream(AudioStream stream)
{
#if USE_MINI_AL
ResumeAudioBuffer((AudioBuffer *)stream.audioBuffer);
#else
ALenum state;
alGetSourcei(stream.source, AL_SOURCE_STATE, &state);
if (state == AL_PAUSED) alSourcePlay(stream.source);
#endif
}
// Check if audio stream is playing.
bool IsAudioStreamPlaying(AudioStream stream)
{
#if USE_MINI_AL
return IsAudioBufferPlaying((AudioBuffer *)stream.audioBuffer);
#else
bool playing = false;
ALint state;
alGetSourcei(stream.source, AL_SOURCE_STATE, &state);
if (state == AL_PLAYING) playing = true;
return playing;
#endif
}
// Stop audio stream
void StopAudioStream(AudioStream stream)
{
#if USE_MINI_AL
StopAudioBuffer((AudioBuffer *)stream.audioBuffer);
#else
alSourceStop(stream.source);
#endif
}
void SetAudioStreamVolume(AudioStream stream, float volume)
{
#if USE_MINI_AL
SetAudioBufferVolume((AudioBuffer *)stream.audioBuffer, volume);
#else
alSourcef(stream.source, AL_GAIN, volume);
#endif
}
void SetAudioStreamPitch(AudioStream stream, float pitch)
{
#if USE_MINI_AL
SetAudioBufferPitch((AudioBuffer *)stream.audioBuffer, pitch);
#else
alSourcef(stream.source, AL_PITCH, pitch);
#endif
}
//----------------------------------------------------------------------------------

+ 0
- 2
src/config.h.in 查看文件

@ -1,7 +1,5 @@
/* config.h.in */
#cmakedefine USE_OPENAL_BACKEND 1
// core.c
/* Camera module is included (camera.h) and multiple predefined cameras are available: free, 1st/3rd person, orbital */
#cmakedefine SUPPORT_CAMERA_SYSTEM 1

Loading…
取消
儲存